xref: /linux/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c (revision 397692eab35cbbd83681880c6a2dbcdb9fd84386)
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
22 #include "hclge_tm.h"
23 #include "hclge_err.h"
24 #include "hnae3.h"
25 
26 #define HCLGE_NAME			"hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29 
30 #define HCLGE_BUF_SIZE_UNIT	256U
31 #define HCLGE_BUF_MUL_BY	2
32 #define HCLGE_BUF_DIV_BY	2
33 #define NEED_RESERVE_TC_NUM	2
34 #define BUF_MAX_PERCENT		100
35 #define BUF_RESERVE_PERCENT	90
36 
37 #define HCLGE_RESET_MAX_FAIL_CNT	5
38 #define HCLGE_RESET_SYNC_TIME		100
39 #define HCLGE_PF_RESET_SYNC_TIME	20
40 #define HCLGE_PF_RESET_SYNC_CNT		1500
41 
42 /* Get DFX BD number offset */
43 #define HCLGE_DFX_BIOS_BD_OFFSET        1
44 #define HCLGE_DFX_SSU_0_BD_OFFSET       2
45 #define HCLGE_DFX_SSU_1_BD_OFFSET       3
46 #define HCLGE_DFX_IGU_BD_OFFSET         4
47 #define HCLGE_DFX_RPU_0_BD_OFFSET       5
48 #define HCLGE_DFX_RPU_1_BD_OFFSET       6
49 #define HCLGE_DFX_NCSI_BD_OFFSET        7
50 #define HCLGE_DFX_RTC_BD_OFFSET         8
51 #define HCLGE_DFX_PPP_BD_OFFSET         9
52 #define HCLGE_DFX_RCB_BD_OFFSET         10
53 #define HCLGE_DFX_TQP_BD_OFFSET         11
54 #define HCLGE_DFX_SSU_2_BD_OFFSET       12
55 
56 #define HCLGE_LINK_STATUS_MS	10
57 
58 #define HCLGE_VF_VPORT_START_NUM	1
59 
60 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
61 static int hclge_init_vlan_config(struct hclge_dev *hdev);
62 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
63 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
64 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
65 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
66 			       u16 *allocated_size, bool is_alloc);
67 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
68 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
69 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
70 						   unsigned long *addr);
71 static int hclge_set_default_loopback(struct hclge_dev *hdev);
72 
73 static struct hnae3_ae_algo ae_algo;
74 
75 static struct workqueue_struct *hclge_wq;
76 
77 static const struct pci_device_id ae_algo_pci_tbl[] = {
78 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
79 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
80 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
81 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
82 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
83 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
84 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
85 	/* required last entry */
86 	{0, }
87 };
88 
89 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
90 
91 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
92 					 HCLGE_CMDQ_TX_ADDR_H_REG,
93 					 HCLGE_CMDQ_TX_DEPTH_REG,
94 					 HCLGE_CMDQ_TX_TAIL_REG,
95 					 HCLGE_CMDQ_TX_HEAD_REG,
96 					 HCLGE_CMDQ_RX_ADDR_L_REG,
97 					 HCLGE_CMDQ_RX_ADDR_H_REG,
98 					 HCLGE_CMDQ_RX_DEPTH_REG,
99 					 HCLGE_CMDQ_RX_TAIL_REG,
100 					 HCLGE_CMDQ_RX_HEAD_REG,
101 					 HCLGE_VECTOR0_CMDQ_SRC_REG,
102 					 HCLGE_CMDQ_INTR_STS_REG,
103 					 HCLGE_CMDQ_INTR_EN_REG,
104 					 HCLGE_CMDQ_INTR_GEN_REG};
105 
106 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
107 					   HCLGE_VECTOR0_OTER_EN_REG,
108 					   HCLGE_MISC_RESET_STS_REG,
109 					   HCLGE_MISC_VECTOR_INT_STS,
110 					   HCLGE_GLOBAL_RESET_REG,
111 					   HCLGE_FUN_RST_ING,
112 					   HCLGE_GRO_EN_REG};
113 
114 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
115 					 HCLGE_RING_RX_ADDR_H_REG,
116 					 HCLGE_RING_RX_BD_NUM_REG,
117 					 HCLGE_RING_RX_BD_LENGTH_REG,
118 					 HCLGE_RING_RX_MERGE_EN_REG,
119 					 HCLGE_RING_RX_TAIL_REG,
120 					 HCLGE_RING_RX_HEAD_REG,
121 					 HCLGE_RING_RX_FBD_NUM_REG,
122 					 HCLGE_RING_RX_OFFSET_REG,
123 					 HCLGE_RING_RX_FBD_OFFSET_REG,
124 					 HCLGE_RING_RX_STASH_REG,
125 					 HCLGE_RING_RX_BD_ERR_REG,
126 					 HCLGE_RING_TX_ADDR_L_REG,
127 					 HCLGE_RING_TX_ADDR_H_REG,
128 					 HCLGE_RING_TX_BD_NUM_REG,
129 					 HCLGE_RING_TX_PRIORITY_REG,
130 					 HCLGE_RING_TX_TC_REG,
131 					 HCLGE_RING_TX_MERGE_EN_REG,
132 					 HCLGE_RING_TX_TAIL_REG,
133 					 HCLGE_RING_TX_HEAD_REG,
134 					 HCLGE_RING_TX_FBD_NUM_REG,
135 					 HCLGE_RING_TX_OFFSET_REG,
136 					 HCLGE_RING_TX_EBD_NUM_REG,
137 					 HCLGE_RING_TX_EBD_OFFSET_REG,
138 					 HCLGE_RING_TX_BD_ERR_REG,
139 					 HCLGE_RING_EN_REG};
140 
141 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
142 					     HCLGE_TQP_INTR_GL0_REG,
143 					     HCLGE_TQP_INTR_GL1_REG,
144 					     HCLGE_TQP_INTR_GL2_REG,
145 					     HCLGE_TQP_INTR_RL_REG};
146 
147 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
148 	"App    Loopback test",
149 	"Serdes serial Loopback test",
150 	"Serdes parallel Loopback test",
151 	"Phy    Loopback test"
152 };
153 
154 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
155 	{"mac_tx_mac_pause_num",
156 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
157 	{"mac_rx_mac_pause_num",
158 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
159 	{"mac_tx_control_pkt_num",
160 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
161 	{"mac_rx_control_pkt_num",
162 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
163 	{"mac_tx_pfc_pkt_num",
164 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
165 	{"mac_tx_pfc_pri0_pkt_num",
166 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
167 	{"mac_tx_pfc_pri1_pkt_num",
168 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
169 	{"mac_tx_pfc_pri2_pkt_num",
170 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
171 	{"mac_tx_pfc_pri3_pkt_num",
172 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
173 	{"mac_tx_pfc_pri4_pkt_num",
174 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
175 	{"mac_tx_pfc_pri5_pkt_num",
176 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
177 	{"mac_tx_pfc_pri6_pkt_num",
178 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
179 	{"mac_tx_pfc_pri7_pkt_num",
180 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
181 	{"mac_rx_pfc_pkt_num",
182 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
183 	{"mac_rx_pfc_pri0_pkt_num",
184 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
185 	{"mac_rx_pfc_pri1_pkt_num",
186 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
187 	{"mac_rx_pfc_pri2_pkt_num",
188 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
189 	{"mac_rx_pfc_pri3_pkt_num",
190 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
191 	{"mac_rx_pfc_pri4_pkt_num",
192 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
193 	{"mac_rx_pfc_pri5_pkt_num",
194 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
195 	{"mac_rx_pfc_pri6_pkt_num",
196 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
197 	{"mac_rx_pfc_pri7_pkt_num",
198 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
199 	{"mac_tx_total_pkt_num",
200 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
201 	{"mac_tx_total_oct_num",
202 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
203 	{"mac_tx_good_pkt_num",
204 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
205 	{"mac_tx_bad_pkt_num",
206 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
207 	{"mac_tx_good_oct_num",
208 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
209 	{"mac_tx_bad_oct_num",
210 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
211 	{"mac_tx_uni_pkt_num",
212 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
213 	{"mac_tx_multi_pkt_num",
214 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
215 	{"mac_tx_broad_pkt_num",
216 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
217 	{"mac_tx_undersize_pkt_num",
218 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
219 	{"mac_tx_oversize_pkt_num",
220 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
221 	{"mac_tx_64_oct_pkt_num",
222 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
223 	{"mac_tx_65_127_oct_pkt_num",
224 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
225 	{"mac_tx_128_255_oct_pkt_num",
226 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
227 	{"mac_tx_256_511_oct_pkt_num",
228 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
229 	{"mac_tx_512_1023_oct_pkt_num",
230 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
231 	{"mac_tx_1024_1518_oct_pkt_num",
232 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
233 	{"mac_tx_1519_2047_oct_pkt_num",
234 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
235 	{"mac_tx_2048_4095_oct_pkt_num",
236 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
237 	{"mac_tx_4096_8191_oct_pkt_num",
238 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
239 	{"mac_tx_8192_9216_oct_pkt_num",
240 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
241 	{"mac_tx_9217_12287_oct_pkt_num",
242 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
243 	{"mac_tx_12288_16383_oct_pkt_num",
244 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
245 	{"mac_tx_1519_max_good_pkt_num",
246 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
247 	{"mac_tx_1519_max_bad_pkt_num",
248 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
249 	{"mac_rx_total_pkt_num",
250 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
251 	{"mac_rx_total_oct_num",
252 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
253 	{"mac_rx_good_pkt_num",
254 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
255 	{"mac_rx_bad_pkt_num",
256 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
257 	{"mac_rx_good_oct_num",
258 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
259 	{"mac_rx_bad_oct_num",
260 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
261 	{"mac_rx_uni_pkt_num",
262 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
263 	{"mac_rx_multi_pkt_num",
264 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
265 	{"mac_rx_broad_pkt_num",
266 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
267 	{"mac_rx_undersize_pkt_num",
268 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
269 	{"mac_rx_oversize_pkt_num",
270 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
271 	{"mac_rx_64_oct_pkt_num",
272 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
273 	{"mac_rx_65_127_oct_pkt_num",
274 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
275 	{"mac_rx_128_255_oct_pkt_num",
276 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
277 	{"mac_rx_256_511_oct_pkt_num",
278 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
279 	{"mac_rx_512_1023_oct_pkt_num",
280 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
281 	{"mac_rx_1024_1518_oct_pkt_num",
282 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
283 	{"mac_rx_1519_2047_oct_pkt_num",
284 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
285 	{"mac_rx_2048_4095_oct_pkt_num",
286 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
287 	{"mac_rx_4096_8191_oct_pkt_num",
288 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
289 	{"mac_rx_8192_9216_oct_pkt_num",
290 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
291 	{"mac_rx_9217_12287_oct_pkt_num",
292 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
293 	{"mac_rx_12288_16383_oct_pkt_num",
294 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
295 	{"mac_rx_1519_max_good_pkt_num",
296 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
297 	{"mac_rx_1519_max_bad_pkt_num",
298 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
299 
300 	{"mac_tx_fragment_pkt_num",
301 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
302 	{"mac_tx_undermin_pkt_num",
303 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
304 	{"mac_tx_jabber_pkt_num",
305 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
306 	{"mac_tx_err_all_pkt_num",
307 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
308 	{"mac_tx_from_app_good_pkt_num",
309 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
310 	{"mac_tx_from_app_bad_pkt_num",
311 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
312 	{"mac_rx_fragment_pkt_num",
313 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
314 	{"mac_rx_undermin_pkt_num",
315 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
316 	{"mac_rx_jabber_pkt_num",
317 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
318 	{"mac_rx_fcs_err_pkt_num",
319 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
320 	{"mac_rx_send_app_good_pkt_num",
321 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
322 	{"mac_rx_send_app_bad_pkt_num",
323 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
324 };
325 
326 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
327 	{
328 		.flags = HCLGE_MAC_MGR_MASK_VLAN_B,
329 		.ethter_type = cpu_to_le16(ETH_P_LLDP),
330 		.mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
331 		.i_port_bitmap = 0x1,
332 	},
333 };
334 
335 static const u8 hclge_hash_key[] = {
336 	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
337 	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
338 	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
339 	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
340 	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
341 };
342 
343 static const u32 hclge_dfx_bd_offset_list[] = {
344 	HCLGE_DFX_BIOS_BD_OFFSET,
345 	HCLGE_DFX_SSU_0_BD_OFFSET,
346 	HCLGE_DFX_SSU_1_BD_OFFSET,
347 	HCLGE_DFX_IGU_BD_OFFSET,
348 	HCLGE_DFX_RPU_0_BD_OFFSET,
349 	HCLGE_DFX_RPU_1_BD_OFFSET,
350 	HCLGE_DFX_NCSI_BD_OFFSET,
351 	HCLGE_DFX_RTC_BD_OFFSET,
352 	HCLGE_DFX_PPP_BD_OFFSET,
353 	HCLGE_DFX_RCB_BD_OFFSET,
354 	HCLGE_DFX_TQP_BD_OFFSET,
355 	HCLGE_DFX_SSU_2_BD_OFFSET
356 };
357 
358 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
359 	HCLGE_OPC_DFX_BIOS_COMMON_REG,
360 	HCLGE_OPC_DFX_SSU_REG_0,
361 	HCLGE_OPC_DFX_SSU_REG_1,
362 	HCLGE_OPC_DFX_IGU_EGU_REG,
363 	HCLGE_OPC_DFX_RPU_REG_0,
364 	HCLGE_OPC_DFX_RPU_REG_1,
365 	HCLGE_OPC_DFX_NCSI_REG,
366 	HCLGE_OPC_DFX_RTC_REG,
367 	HCLGE_OPC_DFX_PPP_REG,
368 	HCLGE_OPC_DFX_RCB_REG,
369 	HCLGE_OPC_DFX_TQP_REG,
370 	HCLGE_OPC_DFX_SSU_REG_2
371 };
372 
373 static const struct key_info meta_data_key_info[] = {
374 	{ PACKET_TYPE_ID, 6},
375 	{ IP_FRAGEMENT, 1},
376 	{ ROCE_TYPE, 1},
377 	{ NEXT_KEY, 5},
378 	{ VLAN_NUMBER, 2},
379 	{ SRC_VPORT, 12},
380 	{ DST_VPORT, 12},
381 	{ TUNNEL_PACKET, 1},
382 };
383 
384 static const struct key_info tuple_key_info[] = {
385 	{ OUTER_DST_MAC, 48},
386 	{ OUTER_SRC_MAC, 48},
387 	{ OUTER_VLAN_TAG_FST, 16},
388 	{ OUTER_VLAN_TAG_SEC, 16},
389 	{ OUTER_ETH_TYPE, 16},
390 	{ OUTER_L2_RSV, 16},
391 	{ OUTER_IP_TOS, 8},
392 	{ OUTER_IP_PROTO, 8},
393 	{ OUTER_SRC_IP, 32},
394 	{ OUTER_DST_IP, 32},
395 	{ OUTER_L3_RSV, 16},
396 	{ OUTER_SRC_PORT, 16},
397 	{ OUTER_DST_PORT, 16},
398 	{ OUTER_L4_RSV, 32},
399 	{ OUTER_TUN_VNI, 24},
400 	{ OUTER_TUN_FLOW_ID, 8},
401 	{ INNER_DST_MAC, 48},
402 	{ INNER_SRC_MAC, 48},
403 	{ INNER_VLAN_TAG_FST, 16},
404 	{ INNER_VLAN_TAG_SEC, 16},
405 	{ INNER_ETH_TYPE, 16},
406 	{ INNER_L2_RSV, 16},
407 	{ INNER_IP_TOS, 8},
408 	{ INNER_IP_PROTO, 8},
409 	{ INNER_SRC_IP, 32},
410 	{ INNER_DST_IP, 32},
411 	{ INNER_L3_RSV, 16},
412 	{ INNER_SRC_PORT, 16},
413 	{ INNER_DST_PORT, 16},
414 	{ INNER_L4_RSV, 32},
415 };
416 
417 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
418 {
419 #define HCLGE_MAC_CMD_NUM 21
420 
421 	u64 *data = (u64 *)(&hdev->mac_stats);
422 	struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
423 	__le64 *desc_data;
424 	int i, k, n;
425 	int ret;
426 
427 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
428 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
429 	if (ret) {
430 		dev_err(&hdev->pdev->dev,
431 			"Get MAC pkt stats fail, status = %d.\n", ret);
432 
433 		return ret;
434 	}
435 
436 	for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
437 		/* for special opcode 0032, only the first desc has the head */
438 		if (unlikely(i == 0)) {
439 			desc_data = (__le64 *)(&desc[i].data[0]);
440 			n = HCLGE_RD_FIRST_STATS_NUM;
441 		} else {
442 			desc_data = (__le64 *)(&desc[i]);
443 			n = HCLGE_RD_OTHER_STATS_NUM;
444 		}
445 
446 		for (k = 0; k < n; k++) {
447 			*data += le64_to_cpu(*desc_data);
448 			data++;
449 			desc_data++;
450 		}
451 	}
452 
453 	return 0;
454 }
455 
456 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
457 {
458 	u64 *data = (u64 *)(&hdev->mac_stats);
459 	struct hclge_desc *desc;
460 	__le64 *desc_data;
461 	u16 i, k, n;
462 	int ret;
463 
464 	/* This may be called inside atomic sections,
465 	 * so GFP_ATOMIC is more suitalbe here
466 	 */
467 	desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
468 	if (!desc)
469 		return -ENOMEM;
470 
471 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
472 	ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
473 	if (ret) {
474 		kfree(desc);
475 		return ret;
476 	}
477 
478 	for (i = 0; i < desc_num; i++) {
479 		/* for special opcode 0034, only the first desc has the head */
480 		if (i == 0) {
481 			desc_data = (__le64 *)(&desc[i].data[0]);
482 			n = HCLGE_RD_FIRST_STATS_NUM;
483 		} else {
484 			desc_data = (__le64 *)(&desc[i]);
485 			n = HCLGE_RD_OTHER_STATS_NUM;
486 		}
487 
488 		for (k = 0; k < n; k++) {
489 			*data += le64_to_cpu(*desc_data);
490 			data++;
491 			desc_data++;
492 		}
493 	}
494 
495 	kfree(desc);
496 
497 	return 0;
498 }
499 
500 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
501 {
502 	struct hclge_desc desc;
503 	__le32 *desc_data;
504 	u32 reg_num;
505 	int ret;
506 
507 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
508 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
509 	if (ret)
510 		return ret;
511 
512 	desc_data = (__le32 *)(&desc.data[0]);
513 	reg_num = le32_to_cpu(*desc_data);
514 
515 	*desc_num = 1 + ((reg_num - 3) >> 2) +
516 		    (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
517 
518 	return 0;
519 }
520 
521 static int hclge_mac_update_stats(struct hclge_dev *hdev)
522 {
523 	u32 desc_num;
524 	int ret;
525 
526 	ret = hclge_mac_query_reg_num(hdev, &desc_num);
527 
528 	/* The firmware supports the new statistics acquisition method */
529 	if (!ret)
530 		ret = hclge_mac_update_stats_complete(hdev, desc_num);
531 	else if (ret == -EOPNOTSUPP)
532 		ret = hclge_mac_update_stats_defective(hdev);
533 	else
534 		dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
535 
536 	return ret;
537 }
538 
539 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
540 {
541 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
542 	struct hclge_vport *vport = hclge_get_vport(handle);
543 	struct hclge_dev *hdev = vport->back;
544 	struct hnae3_queue *queue;
545 	struct hclge_desc desc[1];
546 	struct hclge_tqp *tqp;
547 	int ret, i;
548 
549 	for (i = 0; i < kinfo->num_tqps; i++) {
550 		queue = handle->kinfo.tqp[i];
551 		tqp = container_of(queue, struct hclge_tqp, q);
552 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
553 		hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS,
554 					   true);
555 
556 		desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
557 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
558 		if (ret) {
559 			dev_err(&hdev->pdev->dev,
560 				"Query tqp stat fail, status = %d,queue = %d\n",
561 				ret, i);
562 			return ret;
563 		}
564 		tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
565 			le32_to_cpu(desc[0].data[1]);
566 	}
567 
568 	for (i = 0; i < kinfo->num_tqps; i++) {
569 		queue = handle->kinfo.tqp[i];
570 		tqp = container_of(queue, struct hclge_tqp, q);
571 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
572 		hclge_cmd_setup_basic_desc(&desc[0],
573 					   HCLGE_OPC_QUERY_TX_STATUS,
574 					   true);
575 
576 		desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
577 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
578 		if (ret) {
579 			dev_err(&hdev->pdev->dev,
580 				"Query tqp stat fail, status = %d,queue = %d\n",
581 				ret, i);
582 			return ret;
583 		}
584 		tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
585 			le32_to_cpu(desc[0].data[1]);
586 	}
587 
588 	return 0;
589 }
590 
591 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
592 {
593 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
594 	struct hclge_tqp *tqp;
595 	u64 *buff = data;
596 	int i;
597 
598 	for (i = 0; i < kinfo->num_tqps; i++) {
599 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
600 		*buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
601 	}
602 
603 	for (i = 0; i < kinfo->num_tqps; i++) {
604 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
605 		*buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
606 	}
607 
608 	return buff;
609 }
610 
611 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
612 {
613 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
614 
615 	/* each tqp has TX & RX two queues */
616 	return kinfo->num_tqps * (2);
617 }
618 
619 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
620 {
621 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
622 	u8 *buff = data;
623 	int i = 0;
624 
625 	for (i = 0; i < kinfo->num_tqps; i++) {
626 		struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
627 			struct hclge_tqp, q);
628 		snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
629 			 tqp->index);
630 		buff = buff + ETH_GSTRING_LEN;
631 	}
632 
633 	for (i = 0; i < kinfo->num_tqps; i++) {
634 		struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
635 			struct hclge_tqp, q);
636 		snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
637 			 tqp->index);
638 		buff = buff + ETH_GSTRING_LEN;
639 	}
640 
641 	return buff;
642 }
643 
644 static u64 *hclge_comm_get_stats(const void *comm_stats,
645 				 const struct hclge_comm_stats_str strs[],
646 				 int size, u64 *data)
647 {
648 	u64 *buf = data;
649 	u32 i;
650 
651 	for (i = 0; i < size; i++)
652 		buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
653 
654 	return buf + size;
655 }
656 
657 static u8 *hclge_comm_get_strings(u32 stringset,
658 				  const struct hclge_comm_stats_str strs[],
659 				  int size, u8 *data)
660 {
661 	char *buff = (char *)data;
662 	u32 i;
663 
664 	if (stringset != ETH_SS_STATS)
665 		return buff;
666 
667 	for (i = 0; i < size; i++) {
668 		snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
669 		buff = buff + ETH_GSTRING_LEN;
670 	}
671 
672 	return (u8 *)buff;
673 }
674 
675 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
676 {
677 	struct hnae3_handle *handle;
678 	int status;
679 
680 	handle = &hdev->vport[0].nic;
681 	if (handle->client) {
682 		status = hclge_tqps_update_stats(handle);
683 		if (status) {
684 			dev_err(&hdev->pdev->dev,
685 				"Update TQPS stats fail, status = %d.\n",
686 				status);
687 		}
688 	}
689 
690 	status = hclge_mac_update_stats(hdev);
691 	if (status)
692 		dev_err(&hdev->pdev->dev,
693 			"Update MAC stats fail, status = %d.\n", status);
694 }
695 
696 static void hclge_update_stats(struct hnae3_handle *handle,
697 			       struct net_device_stats *net_stats)
698 {
699 	struct hclge_vport *vport = hclge_get_vport(handle);
700 	struct hclge_dev *hdev = vport->back;
701 	int status;
702 
703 	if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
704 		return;
705 
706 	status = hclge_mac_update_stats(hdev);
707 	if (status)
708 		dev_err(&hdev->pdev->dev,
709 			"Update MAC stats fail, status = %d.\n",
710 			status);
711 
712 	status = hclge_tqps_update_stats(handle);
713 	if (status)
714 		dev_err(&hdev->pdev->dev,
715 			"Update TQPS stats fail, status = %d.\n",
716 			status);
717 
718 	clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
719 }
720 
721 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
722 {
723 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
724 		HNAE3_SUPPORT_PHY_LOOPBACK |\
725 		HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
726 		HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
727 
728 	struct hclge_vport *vport = hclge_get_vport(handle);
729 	struct hclge_dev *hdev = vport->back;
730 	int count = 0;
731 
732 	/* Loopback test support rules:
733 	 * mac: only GE mode support
734 	 * serdes: all mac mode will support include GE/XGE/LGE/CGE
735 	 * phy: only support when phy device exist on board
736 	 */
737 	if (stringset == ETH_SS_TEST) {
738 		/* clear loopback bit flags at first */
739 		handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
740 		if (hdev->pdev->revision >= 0x21 ||
741 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
742 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
743 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
744 			count += 1;
745 			handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
746 		}
747 
748 		count += 2;
749 		handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
750 		handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
751 
752 		if (hdev->hw.mac.phydev) {
753 			count += 1;
754 			handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
755 		}
756 
757 	} else if (stringset == ETH_SS_STATS) {
758 		count = ARRAY_SIZE(g_mac_stats_string) +
759 			hclge_tqps_get_sset_count(handle, stringset);
760 	}
761 
762 	return count;
763 }
764 
765 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
766 			      u8 *data)
767 {
768 	u8 *p = (char *)data;
769 	int size;
770 
771 	if (stringset == ETH_SS_STATS) {
772 		size = ARRAY_SIZE(g_mac_stats_string);
773 		p = hclge_comm_get_strings(stringset, g_mac_stats_string,
774 					   size, p);
775 		p = hclge_tqps_get_strings(handle, p);
776 	} else if (stringset == ETH_SS_TEST) {
777 		if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
778 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
779 			       ETH_GSTRING_LEN);
780 			p += ETH_GSTRING_LEN;
781 		}
782 		if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
783 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
784 			       ETH_GSTRING_LEN);
785 			p += ETH_GSTRING_LEN;
786 		}
787 		if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
788 			memcpy(p,
789 			       hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
790 			       ETH_GSTRING_LEN);
791 			p += ETH_GSTRING_LEN;
792 		}
793 		if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
794 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
795 			       ETH_GSTRING_LEN);
796 			p += ETH_GSTRING_LEN;
797 		}
798 	}
799 }
800 
801 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
802 {
803 	struct hclge_vport *vport = hclge_get_vport(handle);
804 	struct hclge_dev *hdev = vport->back;
805 	u64 *p;
806 
807 	p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
808 				 ARRAY_SIZE(g_mac_stats_string), data);
809 	p = hclge_tqps_get_stats(handle, p);
810 }
811 
812 static void hclge_get_mac_stat(struct hnae3_handle *handle,
813 			       struct hns3_mac_stats *mac_stats)
814 {
815 	struct hclge_vport *vport = hclge_get_vport(handle);
816 	struct hclge_dev *hdev = vport->back;
817 
818 	hclge_update_stats(handle, NULL);
819 
820 	mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
821 	mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
822 }
823 
824 static int hclge_parse_func_status(struct hclge_dev *hdev,
825 				   struct hclge_func_status_cmd *status)
826 {
827 #define HCLGE_MAC_ID_MASK	0xF
828 
829 	if (!(status->pf_state & HCLGE_PF_STATE_DONE))
830 		return -EINVAL;
831 
832 	/* Set the pf to main pf */
833 	if (status->pf_state & HCLGE_PF_STATE_MAIN)
834 		hdev->flag |= HCLGE_FLAG_MAIN;
835 	else
836 		hdev->flag &= ~HCLGE_FLAG_MAIN;
837 
838 	hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
839 	return 0;
840 }
841 
842 static int hclge_query_function_status(struct hclge_dev *hdev)
843 {
844 #define HCLGE_QUERY_MAX_CNT	5
845 
846 	struct hclge_func_status_cmd *req;
847 	struct hclge_desc desc;
848 	int timeout = 0;
849 	int ret;
850 
851 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
852 	req = (struct hclge_func_status_cmd *)desc.data;
853 
854 	do {
855 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
856 		if (ret) {
857 			dev_err(&hdev->pdev->dev,
858 				"query function status failed %d.\n", ret);
859 			return ret;
860 		}
861 
862 		/* Check pf reset is done */
863 		if (req->pf_state)
864 			break;
865 		usleep_range(1000, 2000);
866 	} while (timeout++ < HCLGE_QUERY_MAX_CNT);
867 
868 	return hclge_parse_func_status(hdev, req);
869 }
870 
871 static int hclge_query_pf_resource(struct hclge_dev *hdev)
872 {
873 	struct hclge_pf_res_cmd *req;
874 	struct hclge_desc desc;
875 	int ret;
876 
877 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
878 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
879 	if (ret) {
880 		dev_err(&hdev->pdev->dev,
881 			"query pf resource failed %d.\n", ret);
882 		return ret;
883 	}
884 
885 	req = (struct hclge_pf_res_cmd *)desc.data;
886 	hdev->num_tqps = le16_to_cpu(req->tqp_num);
887 	hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
888 
889 	if (req->tx_buf_size)
890 		hdev->tx_buf_size =
891 			le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
892 	else
893 		hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
894 
895 	hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
896 
897 	if (req->dv_buf_size)
898 		hdev->dv_buf_size =
899 			le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
900 	else
901 		hdev->dv_buf_size = HCLGE_DEFAULT_DV;
902 
903 	hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
904 
905 	if (hnae3_dev_roce_supported(hdev)) {
906 		hdev->roce_base_msix_offset =
907 		hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee),
908 				HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
909 		hdev->num_roce_msi =
910 		hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
911 				HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
912 
913 		/* nic's msix numbers is always equals to the roce's. */
914 		hdev->num_nic_msi = hdev->num_roce_msi;
915 
916 		/* PF should have NIC vectors and Roce vectors,
917 		 * NIC vectors are queued before Roce vectors.
918 		 */
919 		hdev->num_msi = hdev->num_roce_msi +
920 				hdev->roce_base_msix_offset;
921 	} else {
922 		hdev->num_msi =
923 		hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
924 				HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
925 
926 		hdev->num_nic_msi = hdev->num_msi;
927 	}
928 
929 	if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
930 		dev_err(&hdev->pdev->dev,
931 			"Just %u msi resources, not enough for pf(min:2).\n",
932 			hdev->num_nic_msi);
933 		return -EINVAL;
934 	}
935 
936 	return 0;
937 }
938 
939 static int hclge_parse_speed(int speed_cmd, int *speed)
940 {
941 	switch (speed_cmd) {
942 	case 6:
943 		*speed = HCLGE_MAC_SPEED_10M;
944 		break;
945 	case 7:
946 		*speed = HCLGE_MAC_SPEED_100M;
947 		break;
948 	case 0:
949 		*speed = HCLGE_MAC_SPEED_1G;
950 		break;
951 	case 1:
952 		*speed = HCLGE_MAC_SPEED_10G;
953 		break;
954 	case 2:
955 		*speed = HCLGE_MAC_SPEED_25G;
956 		break;
957 	case 3:
958 		*speed = HCLGE_MAC_SPEED_40G;
959 		break;
960 	case 4:
961 		*speed = HCLGE_MAC_SPEED_50G;
962 		break;
963 	case 5:
964 		*speed = HCLGE_MAC_SPEED_100G;
965 		break;
966 	default:
967 		return -EINVAL;
968 	}
969 
970 	return 0;
971 }
972 
973 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
974 {
975 	struct hclge_vport *vport = hclge_get_vport(handle);
976 	struct hclge_dev *hdev = vport->back;
977 	u32 speed_ability = hdev->hw.mac.speed_ability;
978 	u32 speed_bit = 0;
979 
980 	switch (speed) {
981 	case HCLGE_MAC_SPEED_10M:
982 		speed_bit = HCLGE_SUPPORT_10M_BIT;
983 		break;
984 	case HCLGE_MAC_SPEED_100M:
985 		speed_bit = HCLGE_SUPPORT_100M_BIT;
986 		break;
987 	case HCLGE_MAC_SPEED_1G:
988 		speed_bit = HCLGE_SUPPORT_1G_BIT;
989 		break;
990 	case HCLGE_MAC_SPEED_10G:
991 		speed_bit = HCLGE_SUPPORT_10G_BIT;
992 		break;
993 	case HCLGE_MAC_SPEED_25G:
994 		speed_bit = HCLGE_SUPPORT_25G_BIT;
995 		break;
996 	case HCLGE_MAC_SPEED_40G:
997 		speed_bit = HCLGE_SUPPORT_40G_BIT;
998 		break;
999 	case HCLGE_MAC_SPEED_50G:
1000 		speed_bit = HCLGE_SUPPORT_50G_BIT;
1001 		break;
1002 	case HCLGE_MAC_SPEED_100G:
1003 		speed_bit = HCLGE_SUPPORT_100G_BIT;
1004 		break;
1005 	default:
1006 		return -EINVAL;
1007 	}
1008 
1009 	if (speed_bit & speed_ability)
1010 		return 0;
1011 
1012 	return -EINVAL;
1013 }
1014 
1015 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
1016 {
1017 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1018 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1019 				 mac->supported);
1020 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1021 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1022 				 mac->supported);
1023 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1024 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1025 				 mac->supported);
1026 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1027 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1028 				 mac->supported);
1029 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1030 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1031 				 mac->supported);
1032 }
1033 
1034 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
1035 {
1036 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1037 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1038 				 mac->supported);
1039 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1040 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1041 				 mac->supported);
1042 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1043 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1044 				 mac->supported);
1045 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1046 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1047 				 mac->supported);
1048 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1049 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1050 				 mac->supported);
1051 }
1052 
1053 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
1054 {
1055 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1056 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1057 				 mac->supported);
1058 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1059 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1060 				 mac->supported);
1061 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1062 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1063 				 mac->supported);
1064 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1065 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1066 				 mac->supported);
1067 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1068 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1069 				 mac->supported);
1070 }
1071 
1072 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
1073 {
1074 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1075 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1076 				 mac->supported);
1077 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1078 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1079 				 mac->supported);
1080 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1081 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1082 				 mac->supported);
1083 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1084 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1085 				 mac->supported);
1086 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1087 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1088 				 mac->supported);
1089 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1090 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1091 				 mac->supported);
1092 }
1093 
1094 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1095 {
1096 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1097 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1098 
1099 	switch (mac->speed) {
1100 	case HCLGE_MAC_SPEED_10G:
1101 	case HCLGE_MAC_SPEED_40G:
1102 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1103 				 mac->supported);
1104 		mac->fec_ability =
1105 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1106 		break;
1107 	case HCLGE_MAC_SPEED_25G:
1108 	case HCLGE_MAC_SPEED_50G:
1109 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1110 				 mac->supported);
1111 		mac->fec_ability =
1112 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1113 			BIT(HNAE3_FEC_AUTO);
1114 		break;
1115 	case HCLGE_MAC_SPEED_100G:
1116 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1117 		mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1118 		break;
1119 	default:
1120 		mac->fec_ability = 0;
1121 		break;
1122 	}
1123 }
1124 
1125 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1126 					u8 speed_ability)
1127 {
1128 	struct hclge_mac *mac = &hdev->hw.mac;
1129 
1130 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1131 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1132 				 mac->supported);
1133 
1134 	hclge_convert_setting_sr(mac, speed_ability);
1135 	hclge_convert_setting_lr(mac, speed_ability);
1136 	hclge_convert_setting_cr(mac, speed_ability);
1137 	if (hdev->pdev->revision >= 0x21)
1138 		hclge_convert_setting_fec(mac);
1139 
1140 	linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1141 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1142 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1143 }
1144 
1145 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1146 					    u8 speed_ability)
1147 {
1148 	struct hclge_mac *mac = &hdev->hw.mac;
1149 
1150 	hclge_convert_setting_kr(mac, speed_ability);
1151 	if (hdev->pdev->revision >= 0x21)
1152 		hclge_convert_setting_fec(mac);
1153 	linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1154 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1155 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1156 }
1157 
1158 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1159 					 u8 speed_ability)
1160 {
1161 	unsigned long *supported = hdev->hw.mac.supported;
1162 
1163 	/* default to support all speed for GE port */
1164 	if (!speed_ability)
1165 		speed_ability = HCLGE_SUPPORT_GE;
1166 
1167 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1168 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1169 				 supported);
1170 
1171 	if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1172 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1173 				 supported);
1174 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1175 				 supported);
1176 	}
1177 
1178 	if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1179 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1180 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1181 	}
1182 
1183 	linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1184 	linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1185 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1186 	linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1187 }
1188 
1189 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1190 {
1191 	u8 media_type = hdev->hw.mac.media_type;
1192 
1193 	if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1194 		hclge_parse_fiber_link_mode(hdev, speed_ability);
1195 	else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1196 		hclge_parse_copper_link_mode(hdev, speed_ability);
1197 	else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1198 		hclge_parse_backplane_link_mode(hdev, speed_ability);
1199 }
1200 
1201 static u32 hclge_get_max_speed(u8 speed_ability)
1202 {
1203 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1204 		return HCLGE_MAC_SPEED_100G;
1205 
1206 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1207 		return HCLGE_MAC_SPEED_50G;
1208 
1209 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1210 		return HCLGE_MAC_SPEED_40G;
1211 
1212 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1213 		return HCLGE_MAC_SPEED_25G;
1214 
1215 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1216 		return HCLGE_MAC_SPEED_10G;
1217 
1218 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1219 		return HCLGE_MAC_SPEED_1G;
1220 
1221 	if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1222 		return HCLGE_MAC_SPEED_100M;
1223 
1224 	if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1225 		return HCLGE_MAC_SPEED_10M;
1226 
1227 	return HCLGE_MAC_SPEED_1G;
1228 }
1229 
1230 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1231 {
1232 	struct hclge_cfg_param_cmd *req;
1233 	u64 mac_addr_tmp_high;
1234 	u64 mac_addr_tmp;
1235 	unsigned int i;
1236 
1237 	req = (struct hclge_cfg_param_cmd *)desc[0].data;
1238 
1239 	/* get the configuration */
1240 	cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1241 					      HCLGE_CFG_VMDQ_M,
1242 					      HCLGE_CFG_VMDQ_S);
1243 	cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1244 				      HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1245 	cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1246 					    HCLGE_CFG_TQP_DESC_N_M,
1247 					    HCLGE_CFG_TQP_DESC_N_S);
1248 
1249 	cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1250 					HCLGE_CFG_PHY_ADDR_M,
1251 					HCLGE_CFG_PHY_ADDR_S);
1252 	cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1253 					  HCLGE_CFG_MEDIA_TP_M,
1254 					  HCLGE_CFG_MEDIA_TP_S);
1255 	cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1256 					  HCLGE_CFG_RX_BUF_LEN_M,
1257 					  HCLGE_CFG_RX_BUF_LEN_S);
1258 	/* get mac_address */
1259 	mac_addr_tmp = __le32_to_cpu(req->param[2]);
1260 	mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1261 					    HCLGE_CFG_MAC_ADDR_H_M,
1262 					    HCLGE_CFG_MAC_ADDR_H_S);
1263 
1264 	mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1265 
1266 	cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1267 					     HCLGE_CFG_DEFAULT_SPEED_M,
1268 					     HCLGE_CFG_DEFAULT_SPEED_S);
1269 	cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1270 					    HCLGE_CFG_RSS_SIZE_M,
1271 					    HCLGE_CFG_RSS_SIZE_S);
1272 
1273 	for (i = 0; i < ETH_ALEN; i++)
1274 		cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1275 
1276 	req = (struct hclge_cfg_param_cmd *)desc[1].data;
1277 	cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1278 
1279 	cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1280 					     HCLGE_CFG_SPEED_ABILITY_M,
1281 					     HCLGE_CFG_SPEED_ABILITY_S);
1282 	cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1283 					 HCLGE_CFG_UMV_TBL_SPACE_M,
1284 					 HCLGE_CFG_UMV_TBL_SPACE_S);
1285 	if (!cfg->umv_space)
1286 		cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1287 }
1288 
1289 /* hclge_get_cfg: query the static parameter from flash
1290  * @hdev: pointer to struct hclge_dev
1291  * @hcfg: the config structure to be getted
1292  */
1293 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1294 {
1295 	struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1296 	struct hclge_cfg_param_cmd *req;
1297 	unsigned int i;
1298 	int ret;
1299 
1300 	for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1301 		u32 offset = 0;
1302 
1303 		req = (struct hclge_cfg_param_cmd *)desc[i].data;
1304 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1305 					   true);
1306 		hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1307 				HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1308 		/* Len should be united by 4 bytes when send to hardware */
1309 		hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1310 				HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1311 		req->offset = cpu_to_le32(offset);
1312 	}
1313 
1314 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1315 	if (ret) {
1316 		dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1317 		return ret;
1318 	}
1319 
1320 	hclge_parse_cfg(hcfg, desc);
1321 
1322 	return 0;
1323 }
1324 
1325 static int hclge_get_cap(struct hclge_dev *hdev)
1326 {
1327 	int ret;
1328 
1329 	ret = hclge_query_function_status(hdev);
1330 	if (ret) {
1331 		dev_err(&hdev->pdev->dev,
1332 			"query function status error %d.\n", ret);
1333 		return ret;
1334 	}
1335 
1336 	/* get pf resource */
1337 	return hclge_query_pf_resource(hdev);
1338 }
1339 
1340 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1341 {
1342 #define HCLGE_MIN_TX_DESC	64
1343 #define HCLGE_MIN_RX_DESC	64
1344 
1345 	if (!is_kdump_kernel())
1346 		return;
1347 
1348 	dev_info(&hdev->pdev->dev,
1349 		 "Running kdump kernel. Using minimal resources\n");
1350 
1351 	/* minimal queue pairs equals to the number of vports */
1352 	hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1353 	hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1354 	hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1355 }
1356 
1357 static int hclge_configure(struct hclge_dev *hdev)
1358 {
1359 	struct hclge_cfg cfg;
1360 	unsigned int i;
1361 	int ret;
1362 
1363 	ret = hclge_get_cfg(hdev, &cfg);
1364 	if (ret) {
1365 		dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1366 		return ret;
1367 	}
1368 
1369 	hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1370 	hdev->base_tqp_pid = 0;
1371 	hdev->rss_size_max = cfg.rss_size_max;
1372 	hdev->rx_buf_len = cfg.rx_buf_len;
1373 	ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1374 	hdev->hw.mac.media_type = cfg.media_type;
1375 	hdev->hw.mac.phy_addr = cfg.phy_addr;
1376 	hdev->num_tx_desc = cfg.tqp_desc_num;
1377 	hdev->num_rx_desc = cfg.tqp_desc_num;
1378 	hdev->tm_info.num_pg = 1;
1379 	hdev->tc_max = cfg.tc_num;
1380 	hdev->tm_info.hw_pfc_map = 0;
1381 	hdev->wanted_umv_size = cfg.umv_space;
1382 
1383 	if (hnae3_dev_fd_supported(hdev)) {
1384 		hdev->fd_en = true;
1385 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1386 	}
1387 
1388 	ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1389 	if (ret) {
1390 		dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1391 		return ret;
1392 	}
1393 
1394 	hclge_parse_link_mode(hdev, cfg.speed_ability);
1395 
1396 	hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1397 
1398 	if ((hdev->tc_max > HNAE3_MAX_TC) ||
1399 	    (hdev->tc_max < 1)) {
1400 		dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1401 			 hdev->tc_max);
1402 		hdev->tc_max = 1;
1403 	}
1404 
1405 	/* Dev does not support DCB */
1406 	if (!hnae3_dev_dcb_supported(hdev)) {
1407 		hdev->tc_max = 1;
1408 		hdev->pfc_max = 0;
1409 	} else {
1410 		hdev->pfc_max = hdev->tc_max;
1411 	}
1412 
1413 	hdev->tm_info.num_tc = 1;
1414 
1415 	/* Currently not support uncontiuous tc */
1416 	for (i = 0; i < hdev->tm_info.num_tc; i++)
1417 		hnae3_set_bit(hdev->hw_tc_map, i, 1);
1418 
1419 	hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1420 
1421 	hclge_init_kdump_kernel_config(hdev);
1422 
1423 	/* Set the init affinity based on pci func number */
1424 	i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1425 	i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1426 	cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1427 			&hdev->affinity_mask);
1428 
1429 	return ret;
1430 }
1431 
1432 static int hclge_config_tso(struct hclge_dev *hdev, unsigned int tso_mss_min,
1433 			    unsigned int tso_mss_max)
1434 {
1435 	struct hclge_cfg_tso_status_cmd *req;
1436 	struct hclge_desc desc;
1437 	u16 tso_mss;
1438 
1439 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1440 
1441 	req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1442 
1443 	tso_mss = 0;
1444 	hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1445 			HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1446 	req->tso_mss_min = cpu_to_le16(tso_mss);
1447 
1448 	tso_mss = 0;
1449 	hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1450 			HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1451 	req->tso_mss_max = cpu_to_le16(tso_mss);
1452 
1453 	return hclge_cmd_send(&hdev->hw, &desc, 1);
1454 }
1455 
1456 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1457 {
1458 	struct hclge_cfg_gro_status_cmd *req;
1459 	struct hclge_desc desc;
1460 	int ret;
1461 
1462 	if (!hnae3_dev_gro_supported(hdev))
1463 		return 0;
1464 
1465 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1466 	req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1467 
1468 	req->gro_en = cpu_to_le16(en ? 1 : 0);
1469 
1470 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1471 	if (ret)
1472 		dev_err(&hdev->pdev->dev,
1473 			"GRO hardware config cmd failed, ret = %d\n", ret);
1474 
1475 	return ret;
1476 }
1477 
1478 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1479 {
1480 	struct hclge_tqp *tqp;
1481 	int i;
1482 
1483 	hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1484 				  sizeof(struct hclge_tqp), GFP_KERNEL);
1485 	if (!hdev->htqp)
1486 		return -ENOMEM;
1487 
1488 	tqp = hdev->htqp;
1489 
1490 	for (i = 0; i < hdev->num_tqps; i++) {
1491 		tqp->dev = &hdev->pdev->dev;
1492 		tqp->index = i;
1493 
1494 		tqp->q.ae_algo = &ae_algo;
1495 		tqp->q.buf_size = hdev->rx_buf_len;
1496 		tqp->q.tx_desc_num = hdev->num_tx_desc;
1497 		tqp->q.rx_desc_num = hdev->num_rx_desc;
1498 		tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1499 			i * HCLGE_TQP_REG_SIZE;
1500 
1501 		tqp++;
1502 	}
1503 
1504 	return 0;
1505 }
1506 
1507 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1508 				  u16 tqp_pid, u16 tqp_vid, bool is_pf)
1509 {
1510 	struct hclge_tqp_map_cmd *req;
1511 	struct hclge_desc desc;
1512 	int ret;
1513 
1514 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1515 
1516 	req = (struct hclge_tqp_map_cmd *)desc.data;
1517 	req->tqp_id = cpu_to_le16(tqp_pid);
1518 	req->tqp_vf = func_id;
1519 	req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1520 	if (!is_pf)
1521 		req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1522 	req->tqp_vid = cpu_to_le16(tqp_vid);
1523 
1524 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1525 	if (ret)
1526 		dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1527 
1528 	return ret;
1529 }
1530 
1531 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1532 {
1533 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1534 	struct hclge_dev *hdev = vport->back;
1535 	int i, alloced;
1536 
1537 	for (i = 0, alloced = 0; i < hdev->num_tqps &&
1538 	     alloced < num_tqps; i++) {
1539 		if (!hdev->htqp[i].alloced) {
1540 			hdev->htqp[i].q.handle = &vport->nic;
1541 			hdev->htqp[i].q.tqp_index = alloced;
1542 			hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1543 			hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1544 			kinfo->tqp[alloced] = &hdev->htqp[i].q;
1545 			hdev->htqp[i].alloced = true;
1546 			alloced++;
1547 		}
1548 	}
1549 	vport->alloc_tqps = alloced;
1550 	kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1551 				vport->alloc_tqps / hdev->tm_info.num_tc);
1552 
1553 	/* ensure one to one mapping between irq and queue at default */
1554 	kinfo->rss_size = min_t(u16, kinfo->rss_size,
1555 				(hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1556 
1557 	return 0;
1558 }
1559 
1560 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1561 			    u16 num_tx_desc, u16 num_rx_desc)
1562 
1563 {
1564 	struct hnae3_handle *nic = &vport->nic;
1565 	struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1566 	struct hclge_dev *hdev = vport->back;
1567 	int ret;
1568 
1569 	kinfo->num_tx_desc = num_tx_desc;
1570 	kinfo->num_rx_desc = num_rx_desc;
1571 
1572 	kinfo->rx_buf_len = hdev->rx_buf_len;
1573 
1574 	kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1575 				  sizeof(struct hnae3_queue *), GFP_KERNEL);
1576 	if (!kinfo->tqp)
1577 		return -ENOMEM;
1578 
1579 	ret = hclge_assign_tqp(vport, num_tqps);
1580 	if (ret)
1581 		dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1582 
1583 	return ret;
1584 }
1585 
1586 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1587 				  struct hclge_vport *vport)
1588 {
1589 	struct hnae3_handle *nic = &vport->nic;
1590 	struct hnae3_knic_private_info *kinfo;
1591 	u16 i;
1592 
1593 	kinfo = &nic->kinfo;
1594 	for (i = 0; i < vport->alloc_tqps; i++) {
1595 		struct hclge_tqp *q =
1596 			container_of(kinfo->tqp[i], struct hclge_tqp, q);
1597 		bool is_pf;
1598 		int ret;
1599 
1600 		is_pf = !(vport->vport_id);
1601 		ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1602 					     i, is_pf);
1603 		if (ret)
1604 			return ret;
1605 	}
1606 
1607 	return 0;
1608 }
1609 
1610 static int hclge_map_tqp(struct hclge_dev *hdev)
1611 {
1612 	struct hclge_vport *vport = hdev->vport;
1613 	u16 i, num_vport;
1614 
1615 	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1616 	for (i = 0; i < num_vport; i++)	{
1617 		int ret;
1618 
1619 		ret = hclge_map_tqp_to_vport(hdev, vport);
1620 		if (ret)
1621 			return ret;
1622 
1623 		vport++;
1624 	}
1625 
1626 	return 0;
1627 }
1628 
1629 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1630 {
1631 	struct hnae3_handle *nic = &vport->nic;
1632 	struct hclge_dev *hdev = vport->back;
1633 	int ret;
1634 
1635 	nic->pdev = hdev->pdev;
1636 	nic->ae_algo = &ae_algo;
1637 	nic->numa_node_mask = hdev->numa_node_mask;
1638 
1639 	ret = hclge_knic_setup(vport, num_tqps,
1640 			       hdev->num_tx_desc, hdev->num_rx_desc);
1641 	if (ret)
1642 		dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1643 
1644 	return ret;
1645 }
1646 
1647 static int hclge_alloc_vport(struct hclge_dev *hdev)
1648 {
1649 	struct pci_dev *pdev = hdev->pdev;
1650 	struct hclge_vport *vport;
1651 	u32 tqp_main_vport;
1652 	u32 tqp_per_vport;
1653 	int num_vport, i;
1654 	int ret;
1655 
1656 	/* We need to alloc a vport for main NIC of PF */
1657 	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1658 
1659 	if (hdev->num_tqps < num_vport) {
1660 		dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1661 			hdev->num_tqps, num_vport);
1662 		return -EINVAL;
1663 	}
1664 
1665 	/* Alloc the same number of TQPs for every vport */
1666 	tqp_per_vport = hdev->num_tqps / num_vport;
1667 	tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1668 
1669 	vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1670 			     GFP_KERNEL);
1671 	if (!vport)
1672 		return -ENOMEM;
1673 
1674 	hdev->vport = vport;
1675 	hdev->num_alloc_vport = num_vport;
1676 
1677 	if (IS_ENABLED(CONFIG_PCI_IOV))
1678 		hdev->num_alloc_vfs = hdev->num_req_vfs;
1679 
1680 	for (i = 0; i < num_vport; i++) {
1681 		vport->back = hdev;
1682 		vport->vport_id = i;
1683 		vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1684 		vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1685 		vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1686 		vport->rxvlan_cfg.rx_vlan_offload_en = true;
1687 		INIT_LIST_HEAD(&vport->vlan_list);
1688 		INIT_LIST_HEAD(&vport->uc_mac_list);
1689 		INIT_LIST_HEAD(&vport->mc_mac_list);
1690 
1691 		if (i == 0)
1692 			ret = hclge_vport_setup(vport, tqp_main_vport);
1693 		else
1694 			ret = hclge_vport_setup(vport, tqp_per_vport);
1695 		if (ret) {
1696 			dev_err(&pdev->dev,
1697 				"vport setup failed for vport %d, %d\n",
1698 				i, ret);
1699 			return ret;
1700 		}
1701 
1702 		vport++;
1703 	}
1704 
1705 	return 0;
1706 }
1707 
1708 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1709 				    struct hclge_pkt_buf_alloc *buf_alloc)
1710 {
1711 /* TX buffer size is unit by 128 byte */
1712 #define HCLGE_BUF_SIZE_UNIT_SHIFT	7
1713 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK	BIT(15)
1714 	struct hclge_tx_buff_alloc_cmd *req;
1715 	struct hclge_desc desc;
1716 	int ret;
1717 	u8 i;
1718 
1719 	req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1720 
1721 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1722 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1723 		u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1724 
1725 		req->tx_pkt_buff[i] =
1726 			cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1727 				     HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1728 	}
1729 
1730 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1731 	if (ret)
1732 		dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1733 			ret);
1734 
1735 	return ret;
1736 }
1737 
1738 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1739 				 struct hclge_pkt_buf_alloc *buf_alloc)
1740 {
1741 	int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1742 
1743 	if (ret)
1744 		dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1745 
1746 	return ret;
1747 }
1748 
1749 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1750 {
1751 	unsigned int i;
1752 	u32 cnt = 0;
1753 
1754 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1755 		if (hdev->hw_tc_map & BIT(i))
1756 			cnt++;
1757 	return cnt;
1758 }
1759 
1760 /* Get the number of pfc enabled TCs, which have private buffer */
1761 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1762 				  struct hclge_pkt_buf_alloc *buf_alloc)
1763 {
1764 	struct hclge_priv_buf *priv;
1765 	unsigned int i;
1766 	int cnt = 0;
1767 
1768 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1769 		priv = &buf_alloc->priv_buf[i];
1770 		if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1771 		    priv->enable)
1772 			cnt++;
1773 	}
1774 
1775 	return cnt;
1776 }
1777 
1778 /* Get the number of pfc disabled TCs, which have private buffer */
1779 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1780 				     struct hclge_pkt_buf_alloc *buf_alloc)
1781 {
1782 	struct hclge_priv_buf *priv;
1783 	unsigned int i;
1784 	int cnt = 0;
1785 
1786 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1787 		priv = &buf_alloc->priv_buf[i];
1788 		if (hdev->hw_tc_map & BIT(i) &&
1789 		    !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1790 		    priv->enable)
1791 			cnt++;
1792 	}
1793 
1794 	return cnt;
1795 }
1796 
1797 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1798 {
1799 	struct hclge_priv_buf *priv;
1800 	u32 rx_priv = 0;
1801 	int i;
1802 
1803 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1804 		priv = &buf_alloc->priv_buf[i];
1805 		if (priv->enable)
1806 			rx_priv += priv->buf_size;
1807 	}
1808 	return rx_priv;
1809 }
1810 
1811 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1812 {
1813 	u32 i, total_tx_size = 0;
1814 
1815 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1816 		total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1817 
1818 	return total_tx_size;
1819 }
1820 
1821 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1822 				struct hclge_pkt_buf_alloc *buf_alloc,
1823 				u32 rx_all)
1824 {
1825 	u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1826 	u32 tc_num = hclge_get_tc_num(hdev);
1827 	u32 shared_buf, aligned_mps;
1828 	u32 rx_priv;
1829 	int i;
1830 
1831 	aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1832 
1833 	if (hnae3_dev_dcb_supported(hdev))
1834 		shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1835 					hdev->dv_buf_size;
1836 	else
1837 		shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1838 					+ hdev->dv_buf_size;
1839 
1840 	shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1841 	shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1842 			     HCLGE_BUF_SIZE_UNIT);
1843 
1844 	rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1845 	if (rx_all < rx_priv + shared_std)
1846 		return false;
1847 
1848 	shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1849 	buf_alloc->s_buf.buf_size = shared_buf;
1850 	if (hnae3_dev_dcb_supported(hdev)) {
1851 		buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1852 		buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1853 			- roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1854 				  HCLGE_BUF_SIZE_UNIT);
1855 	} else {
1856 		buf_alloc->s_buf.self.high = aligned_mps +
1857 						HCLGE_NON_DCB_ADDITIONAL_BUF;
1858 		buf_alloc->s_buf.self.low = aligned_mps;
1859 	}
1860 
1861 	if (hnae3_dev_dcb_supported(hdev)) {
1862 		hi_thrd = shared_buf - hdev->dv_buf_size;
1863 
1864 		if (tc_num <= NEED_RESERVE_TC_NUM)
1865 			hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1866 					/ BUF_MAX_PERCENT;
1867 
1868 		if (tc_num)
1869 			hi_thrd = hi_thrd / tc_num;
1870 
1871 		hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1872 		hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1873 		lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1874 	} else {
1875 		hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1876 		lo_thrd = aligned_mps;
1877 	}
1878 
1879 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1880 		buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1881 		buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1882 	}
1883 
1884 	return true;
1885 }
1886 
1887 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1888 				struct hclge_pkt_buf_alloc *buf_alloc)
1889 {
1890 	u32 i, total_size;
1891 
1892 	total_size = hdev->pkt_buf_size;
1893 
1894 	/* alloc tx buffer for all enabled tc */
1895 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1896 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1897 
1898 		if (hdev->hw_tc_map & BIT(i)) {
1899 			if (total_size < hdev->tx_buf_size)
1900 				return -ENOMEM;
1901 
1902 			priv->tx_buf_size = hdev->tx_buf_size;
1903 		} else {
1904 			priv->tx_buf_size = 0;
1905 		}
1906 
1907 		total_size -= priv->tx_buf_size;
1908 	}
1909 
1910 	return 0;
1911 }
1912 
1913 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1914 				  struct hclge_pkt_buf_alloc *buf_alloc)
1915 {
1916 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1917 	u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1918 	unsigned int i;
1919 
1920 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1921 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1922 
1923 		priv->enable = 0;
1924 		priv->wl.low = 0;
1925 		priv->wl.high = 0;
1926 		priv->buf_size = 0;
1927 
1928 		if (!(hdev->hw_tc_map & BIT(i)))
1929 			continue;
1930 
1931 		priv->enable = 1;
1932 
1933 		if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1934 			priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
1935 			priv->wl.high = roundup(priv->wl.low + aligned_mps,
1936 						HCLGE_BUF_SIZE_UNIT);
1937 		} else {
1938 			priv->wl.low = 0;
1939 			priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
1940 					aligned_mps;
1941 		}
1942 
1943 		priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1944 	}
1945 
1946 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1947 }
1948 
1949 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1950 					  struct hclge_pkt_buf_alloc *buf_alloc)
1951 {
1952 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1953 	int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1954 	int i;
1955 
1956 	/* let the last to be cleared first */
1957 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1958 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1959 		unsigned int mask = BIT((unsigned int)i);
1960 
1961 		if (hdev->hw_tc_map & mask &&
1962 		    !(hdev->tm_info.hw_pfc_map & mask)) {
1963 			/* Clear the no pfc TC private buffer */
1964 			priv->wl.low = 0;
1965 			priv->wl.high = 0;
1966 			priv->buf_size = 0;
1967 			priv->enable = 0;
1968 			no_pfc_priv_num--;
1969 		}
1970 
1971 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1972 		    no_pfc_priv_num == 0)
1973 			break;
1974 	}
1975 
1976 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1977 }
1978 
1979 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1980 					struct hclge_pkt_buf_alloc *buf_alloc)
1981 {
1982 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1983 	int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1984 	int i;
1985 
1986 	/* let the last to be cleared first */
1987 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1988 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1989 		unsigned int mask = BIT((unsigned int)i);
1990 
1991 		if (hdev->hw_tc_map & mask &&
1992 		    hdev->tm_info.hw_pfc_map & mask) {
1993 			/* Reduce the number of pfc TC with private buffer */
1994 			priv->wl.low = 0;
1995 			priv->enable = 0;
1996 			priv->wl.high = 0;
1997 			priv->buf_size = 0;
1998 			pfc_priv_num--;
1999 		}
2000 
2001 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2002 		    pfc_priv_num == 0)
2003 			break;
2004 	}
2005 
2006 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2007 }
2008 
2009 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2010 				      struct hclge_pkt_buf_alloc *buf_alloc)
2011 {
2012 #define COMPENSATE_BUFFER	0x3C00
2013 #define COMPENSATE_HALF_MPS_NUM	5
2014 #define PRIV_WL_GAP		0x1800
2015 
2016 	u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2017 	u32 tc_num = hclge_get_tc_num(hdev);
2018 	u32 half_mps = hdev->mps >> 1;
2019 	u32 min_rx_priv;
2020 	unsigned int i;
2021 
2022 	if (tc_num)
2023 		rx_priv = rx_priv / tc_num;
2024 
2025 	if (tc_num <= NEED_RESERVE_TC_NUM)
2026 		rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2027 
2028 	min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2029 			COMPENSATE_HALF_MPS_NUM * half_mps;
2030 	min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2031 	rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2032 
2033 	if (rx_priv < min_rx_priv)
2034 		return false;
2035 
2036 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2037 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2038 
2039 		priv->enable = 0;
2040 		priv->wl.low = 0;
2041 		priv->wl.high = 0;
2042 		priv->buf_size = 0;
2043 
2044 		if (!(hdev->hw_tc_map & BIT(i)))
2045 			continue;
2046 
2047 		priv->enable = 1;
2048 		priv->buf_size = rx_priv;
2049 		priv->wl.high = rx_priv - hdev->dv_buf_size;
2050 		priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2051 	}
2052 
2053 	buf_alloc->s_buf.buf_size = 0;
2054 
2055 	return true;
2056 }
2057 
2058 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2059  * @hdev: pointer to struct hclge_dev
2060  * @buf_alloc: pointer to buffer calculation data
2061  * @return: 0: calculate sucessful, negative: fail
2062  */
2063 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2064 				struct hclge_pkt_buf_alloc *buf_alloc)
2065 {
2066 	/* When DCB is not supported, rx private buffer is not allocated. */
2067 	if (!hnae3_dev_dcb_supported(hdev)) {
2068 		u32 rx_all = hdev->pkt_buf_size;
2069 
2070 		rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2071 		if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2072 			return -ENOMEM;
2073 
2074 		return 0;
2075 	}
2076 
2077 	if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2078 		return 0;
2079 
2080 	if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2081 		return 0;
2082 
2083 	/* try to decrease the buffer size */
2084 	if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2085 		return 0;
2086 
2087 	if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2088 		return 0;
2089 
2090 	if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2091 		return 0;
2092 
2093 	return -ENOMEM;
2094 }
2095 
2096 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2097 				   struct hclge_pkt_buf_alloc *buf_alloc)
2098 {
2099 	struct hclge_rx_priv_buff_cmd *req;
2100 	struct hclge_desc desc;
2101 	int ret;
2102 	int i;
2103 
2104 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2105 	req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2106 
2107 	/* Alloc private buffer TCs */
2108 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2109 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2110 
2111 		req->buf_num[i] =
2112 			cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2113 		req->buf_num[i] |=
2114 			cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2115 	}
2116 
2117 	req->shared_buf =
2118 		cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2119 			    (1 << HCLGE_TC0_PRI_BUF_EN_B));
2120 
2121 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2122 	if (ret)
2123 		dev_err(&hdev->pdev->dev,
2124 			"rx private buffer alloc cmd failed %d\n", ret);
2125 
2126 	return ret;
2127 }
2128 
2129 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2130 				   struct hclge_pkt_buf_alloc *buf_alloc)
2131 {
2132 	struct hclge_rx_priv_wl_buf *req;
2133 	struct hclge_priv_buf *priv;
2134 	struct hclge_desc desc[2];
2135 	int i, j;
2136 	int ret;
2137 
2138 	for (i = 0; i < 2; i++) {
2139 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2140 					   false);
2141 		req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2142 
2143 		/* The first descriptor set the NEXT bit to 1 */
2144 		if (i == 0)
2145 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2146 		else
2147 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2148 
2149 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2150 			u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2151 
2152 			priv = &buf_alloc->priv_buf[idx];
2153 			req->tc_wl[j].high =
2154 				cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2155 			req->tc_wl[j].high |=
2156 				cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2157 			req->tc_wl[j].low =
2158 				cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2159 			req->tc_wl[j].low |=
2160 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2161 		}
2162 	}
2163 
2164 	/* Send 2 descriptor at one time */
2165 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2166 	if (ret)
2167 		dev_err(&hdev->pdev->dev,
2168 			"rx private waterline config cmd failed %d\n",
2169 			ret);
2170 	return ret;
2171 }
2172 
2173 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2174 				    struct hclge_pkt_buf_alloc *buf_alloc)
2175 {
2176 	struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2177 	struct hclge_rx_com_thrd *req;
2178 	struct hclge_desc desc[2];
2179 	struct hclge_tc_thrd *tc;
2180 	int i, j;
2181 	int ret;
2182 
2183 	for (i = 0; i < 2; i++) {
2184 		hclge_cmd_setup_basic_desc(&desc[i],
2185 					   HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2186 		req = (struct hclge_rx_com_thrd *)&desc[i].data;
2187 
2188 		/* The first descriptor set the NEXT bit to 1 */
2189 		if (i == 0)
2190 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2191 		else
2192 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2193 
2194 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2195 			tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2196 
2197 			req->com_thrd[j].high =
2198 				cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2199 			req->com_thrd[j].high |=
2200 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2201 			req->com_thrd[j].low =
2202 				cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2203 			req->com_thrd[j].low |=
2204 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2205 		}
2206 	}
2207 
2208 	/* Send 2 descriptors at one time */
2209 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2210 	if (ret)
2211 		dev_err(&hdev->pdev->dev,
2212 			"common threshold config cmd failed %d\n", ret);
2213 	return ret;
2214 }
2215 
2216 static int hclge_common_wl_config(struct hclge_dev *hdev,
2217 				  struct hclge_pkt_buf_alloc *buf_alloc)
2218 {
2219 	struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2220 	struct hclge_rx_com_wl *req;
2221 	struct hclge_desc desc;
2222 	int ret;
2223 
2224 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2225 
2226 	req = (struct hclge_rx_com_wl *)desc.data;
2227 	req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2228 	req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2229 
2230 	req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2231 	req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2232 
2233 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2234 	if (ret)
2235 		dev_err(&hdev->pdev->dev,
2236 			"common waterline config cmd failed %d\n", ret);
2237 
2238 	return ret;
2239 }
2240 
2241 int hclge_buffer_alloc(struct hclge_dev *hdev)
2242 {
2243 	struct hclge_pkt_buf_alloc *pkt_buf;
2244 	int ret;
2245 
2246 	pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2247 	if (!pkt_buf)
2248 		return -ENOMEM;
2249 
2250 	ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2251 	if (ret) {
2252 		dev_err(&hdev->pdev->dev,
2253 			"could not calc tx buffer size for all TCs %d\n", ret);
2254 		goto out;
2255 	}
2256 
2257 	ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2258 	if (ret) {
2259 		dev_err(&hdev->pdev->dev,
2260 			"could not alloc tx buffers %d\n", ret);
2261 		goto out;
2262 	}
2263 
2264 	ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2265 	if (ret) {
2266 		dev_err(&hdev->pdev->dev,
2267 			"could not calc rx priv buffer size for all TCs %d\n",
2268 			ret);
2269 		goto out;
2270 	}
2271 
2272 	ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2273 	if (ret) {
2274 		dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2275 			ret);
2276 		goto out;
2277 	}
2278 
2279 	if (hnae3_dev_dcb_supported(hdev)) {
2280 		ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2281 		if (ret) {
2282 			dev_err(&hdev->pdev->dev,
2283 				"could not configure rx private waterline %d\n",
2284 				ret);
2285 			goto out;
2286 		}
2287 
2288 		ret = hclge_common_thrd_config(hdev, pkt_buf);
2289 		if (ret) {
2290 			dev_err(&hdev->pdev->dev,
2291 				"could not configure common threshold %d\n",
2292 				ret);
2293 			goto out;
2294 		}
2295 	}
2296 
2297 	ret = hclge_common_wl_config(hdev, pkt_buf);
2298 	if (ret)
2299 		dev_err(&hdev->pdev->dev,
2300 			"could not configure common waterline %d\n", ret);
2301 
2302 out:
2303 	kfree(pkt_buf);
2304 	return ret;
2305 }
2306 
2307 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2308 {
2309 	struct hnae3_handle *roce = &vport->roce;
2310 	struct hnae3_handle *nic = &vport->nic;
2311 
2312 	roce->rinfo.num_vectors = vport->back->num_roce_msi;
2313 
2314 	if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2315 	    vport->back->num_msi_left == 0)
2316 		return -EINVAL;
2317 
2318 	roce->rinfo.base_vector = vport->back->roce_base_vector;
2319 
2320 	roce->rinfo.netdev = nic->kinfo.netdev;
2321 	roce->rinfo.roce_io_base = vport->back->hw.io_base;
2322 
2323 	roce->pdev = nic->pdev;
2324 	roce->ae_algo = nic->ae_algo;
2325 	roce->numa_node_mask = nic->numa_node_mask;
2326 
2327 	return 0;
2328 }
2329 
2330 static int hclge_init_msi(struct hclge_dev *hdev)
2331 {
2332 	struct pci_dev *pdev = hdev->pdev;
2333 	int vectors;
2334 	int i;
2335 
2336 	vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2337 					hdev->num_msi,
2338 					PCI_IRQ_MSI | PCI_IRQ_MSIX);
2339 	if (vectors < 0) {
2340 		dev_err(&pdev->dev,
2341 			"failed(%d) to allocate MSI/MSI-X vectors\n",
2342 			vectors);
2343 		return vectors;
2344 	}
2345 	if (vectors < hdev->num_msi)
2346 		dev_warn(&hdev->pdev->dev,
2347 			 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2348 			 hdev->num_msi, vectors);
2349 
2350 	hdev->num_msi = vectors;
2351 	hdev->num_msi_left = vectors;
2352 
2353 	hdev->base_msi_vector = pdev->irq;
2354 	hdev->roce_base_vector = hdev->base_msi_vector +
2355 				hdev->roce_base_msix_offset;
2356 
2357 	hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2358 					   sizeof(u16), GFP_KERNEL);
2359 	if (!hdev->vector_status) {
2360 		pci_free_irq_vectors(pdev);
2361 		return -ENOMEM;
2362 	}
2363 
2364 	for (i = 0; i < hdev->num_msi; i++)
2365 		hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2366 
2367 	hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2368 					sizeof(int), GFP_KERNEL);
2369 	if (!hdev->vector_irq) {
2370 		pci_free_irq_vectors(pdev);
2371 		return -ENOMEM;
2372 	}
2373 
2374 	return 0;
2375 }
2376 
2377 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2378 {
2379 	if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2380 		duplex = HCLGE_MAC_FULL;
2381 
2382 	return duplex;
2383 }
2384 
2385 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2386 				      u8 duplex)
2387 {
2388 	struct hclge_config_mac_speed_dup_cmd *req;
2389 	struct hclge_desc desc;
2390 	int ret;
2391 
2392 	req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2393 
2394 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2395 
2396 	if (duplex)
2397 		hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2398 
2399 	switch (speed) {
2400 	case HCLGE_MAC_SPEED_10M:
2401 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2402 				HCLGE_CFG_SPEED_S, 6);
2403 		break;
2404 	case HCLGE_MAC_SPEED_100M:
2405 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2406 				HCLGE_CFG_SPEED_S, 7);
2407 		break;
2408 	case HCLGE_MAC_SPEED_1G:
2409 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2410 				HCLGE_CFG_SPEED_S, 0);
2411 		break;
2412 	case HCLGE_MAC_SPEED_10G:
2413 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2414 				HCLGE_CFG_SPEED_S, 1);
2415 		break;
2416 	case HCLGE_MAC_SPEED_25G:
2417 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2418 				HCLGE_CFG_SPEED_S, 2);
2419 		break;
2420 	case HCLGE_MAC_SPEED_40G:
2421 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2422 				HCLGE_CFG_SPEED_S, 3);
2423 		break;
2424 	case HCLGE_MAC_SPEED_50G:
2425 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2426 				HCLGE_CFG_SPEED_S, 4);
2427 		break;
2428 	case HCLGE_MAC_SPEED_100G:
2429 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2430 				HCLGE_CFG_SPEED_S, 5);
2431 		break;
2432 	default:
2433 		dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2434 		return -EINVAL;
2435 	}
2436 
2437 	hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2438 		      1);
2439 
2440 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2441 	if (ret) {
2442 		dev_err(&hdev->pdev->dev,
2443 			"mac speed/duplex config cmd failed %d.\n", ret);
2444 		return ret;
2445 	}
2446 
2447 	return 0;
2448 }
2449 
2450 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2451 {
2452 	int ret;
2453 
2454 	duplex = hclge_check_speed_dup(duplex, speed);
2455 	if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2456 		return 0;
2457 
2458 	ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2459 	if (ret)
2460 		return ret;
2461 
2462 	hdev->hw.mac.speed = speed;
2463 	hdev->hw.mac.duplex = duplex;
2464 
2465 	return 0;
2466 }
2467 
2468 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2469 				     u8 duplex)
2470 {
2471 	struct hclge_vport *vport = hclge_get_vport(handle);
2472 	struct hclge_dev *hdev = vport->back;
2473 
2474 	return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2475 }
2476 
2477 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2478 {
2479 	struct hclge_config_auto_neg_cmd *req;
2480 	struct hclge_desc desc;
2481 	u32 flag = 0;
2482 	int ret;
2483 
2484 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2485 
2486 	req = (struct hclge_config_auto_neg_cmd *)desc.data;
2487 	if (enable)
2488 		hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2489 	req->cfg_an_cmd_flag = cpu_to_le32(flag);
2490 
2491 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2492 	if (ret)
2493 		dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2494 			ret);
2495 
2496 	return ret;
2497 }
2498 
2499 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2500 {
2501 	struct hclge_vport *vport = hclge_get_vport(handle);
2502 	struct hclge_dev *hdev = vport->back;
2503 
2504 	if (!hdev->hw.mac.support_autoneg) {
2505 		if (enable) {
2506 			dev_err(&hdev->pdev->dev,
2507 				"autoneg is not supported by current port\n");
2508 			return -EOPNOTSUPP;
2509 		} else {
2510 			return 0;
2511 		}
2512 	}
2513 
2514 	return hclge_set_autoneg_en(hdev, enable);
2515 }
2516 
2517 static int hclge_get_autoneg(struct hnae3_handle *handle)
2518 {
2519 	struct hclge_vport *vport = hclge_get_vport(handle);
2520 	struct hclge_dev *hdev = vport->back;
2521 	struct phy_device *phydev = hdev->hw.mac.phydev;
2522 
2523 	if (phydev)
2524 		return phydev->autoneg;
2525 
2526 	return hdev->hw.mac.autoneg;
2527 }
2528 
2529 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2530 {
2531 	struct hclge_vport *vport = hclge_get_vport(handle);
2532 	struct hclge_dev *hdev = vport->back;
2533 	int ret;
2534 
2535 	dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2536 
2537 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2538 	if (ret)
2539 		return ret;
2540 	return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2541 }
2542 
2543 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2544 {
2545 	struct hclge_vport *vport = hclge_get_vport(handle);
2546 	struct hclge_dev *hdev = vport->back;
2547 
2548 	if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2549 		return hclge_set_autoneg_en(hdev, !halt);
2550 
2551 	return 0;
2552 }
2553 
2554 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2555 {
2556 	struct hclge_config_fec_cmd *req;
2557 	struct hclge_desc desc;
2558 	int ret;
2559 
2560 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2561 
2562 	req = (struct hclge_config_fec_cmd *)desc.data;
2563 	if (fec_mode & BIT(HNAE3_FEC_AUTO))
2564 		hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2565 	if (fec_mode & BIT(HNAE3_FEC_RS))
2566 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2567 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2568 	if (fec_mode & BIT(HNAE3_FEC_BASER))
2569 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2570 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2571 
2572 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2573 	if (ret)
2574 		dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2575 
2576 	return ret;
2577 }
2578 
2579 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2580 {
2581 	struct hclge_vport *vport = hclge_get_vport(handle);
2582 	struct hclge_dev *hdev = vport->back;
2583 	struct hclge_mac *mac = &hdev->hw.mac;
2584 	int ret;
2585 
2586 	if (fec_mode && !(mac->fec_ability & fec_mode)) {
2587 		dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2588 		return -EINVAL;
2589 	}
2590 
2591 	ret = hclge_set_fec_hw(hdev, fec_mode);
2592 	if (ret)
2593 		return ret;
2594 
2595 	mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2596 	return 0;
2597 }
2598 
2599 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2600 			  u8 *fec_mode)
2601 {
2602 	struct hclge_vport *vport = hclge_get_vport(handle);
2603 	struct hclge_dev *hdev = vport->back;
2604 	struct hclge_mac *mac = &hdev->hw.mac;
2605 
2606 	if (fec_ability)
2607 		*fec_ability = mac->fec_ability;
2608 	if (fec_mode)
2609 		*fec_mode = mac->fec_mode;
2610 }
2611 
2612 static int hclge_mac_init(struct hclge_dev *hdev)
2613 {
2614 	struct hclge_mac *mac = &hdev->hw.mac;
2615 	int ret;
2616 
2617 	hdev->support_sfp_query = true;
2618 	hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2619 	ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2620 					 hdev->hw.mac.duplex);
2621 	if (ret)
2622 		return ret;
2623 
2624 	if (hdev->hw.mac.support_autoneg) {
2625 		ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2626 		if (ret)
2627 			return ret;
2628 	}
2629 
2630 	mac->link = 0;
2631 
2632 	if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2633 		ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2634 		if (ret)
2635 			return ret;
2636 	}
2637 
2638 	ret = hclge_set_mac_mtu(hdev, hdev->mps);
2639 	if (ret) {
2640 		dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2641 		return ret;
2642 	}
2643 
2644 	ret = hclge_set_default_loopback(hdev);
2645 	if (ret)
2646 		return ret;
2647 
2648 	ret = hclge_buffer_alloc(hdev);
2649 	if (ret)
2650 		dev_err(&hdev->pdev->dev,
2651 			"allocate buffer fail, ret=%d\n", ret);
2652 
2653 	return ret;
2654 }
2655 
2656 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2657 {
2658 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2659 	    !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2660 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2661 				    hclge_wq, &hdev->service_task, 0);
2662 }
2663 
2664 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2665 {
2666 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2667 	    !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2668 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2669 				    hclge_wq, &hdev->service_task, 0);
2670 }
2671 
2672 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2673 {
2674 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2675 	    !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2676 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2677 				    hclge_wq, &hdev->service_task,
2678 				    delay_time);
2679 }
2680 
2681 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2682 {
2683 	struct hclge_link_status_cmd *req;
2684 	struct hclge_desc desc;
2685 	int link_status;
2686 	int ret;
2687 
2688 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2689 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2690 	if (ret) {
2691 		dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2692 			ret);
2693 		return ret;
2694 	}
2695 
2696 	req = (struct hclge_link_status_cmd *)desc.data;
2697 	link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2698 
2699 	return !!link_status;
2700 }
2701 
2702 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2703 {
2704 	unsigned int mac_state;
2705 	int link_stat;
2706 
2707 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2708 		return 0;
2709 
2710 	mac_state = hclge_get_mac_link_status(hdev);
2711 
2712 	if (hdev->hw.mac.phydev) {
2713 		if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2714 			link_stat = mac_state &
2715 				hdev->hw.mac.phydev->link;
2716 		else
2717 			link_stat = 0;
2718 
2719 	} else {
2720 		link_stat = mac_state;
2721 	}
2722 
2723 	return !!link_stat;
2724 }
2725 
2726 static void hclge_update_link_status(struct hclge_dev *hdev)
2727 {
2728 	struct hnae3_client *rclient = hdev->roce_client;
2729 	struct hnae3_client *client = hdev->nic_client;
2730 	struct hnae3_handle *rhandle;
2731 	struct hnae3_handle *handle;
2732 	int state;
2733 	int i;
2734 
2735 	if (!client)
2736 		return;
2737 
2738 	if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2739 		return;
2740 
2741 	state = hclge_get_mac_phy_link(hdev);
2742 	if (state != hdev->hw.mac.link) {
2743 		for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2744 			handle = &hdev->vport[i].nic;
2745 			client->ops->link_status_change(handle, state);
2746 			hclge_config_mac_tnl_int(hdev, state);
2747 			rhandle = &hdev->vport[i].roce;
2748 			if (rclient && rclient->ops->link_status_change)
2749 				rclient->ops->link_status_change(rhandle,
2750 								 state);
2751 		}
2752 		hdev->hw.mac.link = state;
2753 	}
2754 
2755 	clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2756 }
2757 
2758 static void hclge_update_port_capability(struct hclge_mac *mac)
2759 {
2760 	/* update fec ability by speed */
2761 	hclge_convert_setting_fec(mac);
2762 
2763 	/* firmware can not identify back plane type, the media type
2764 	 * read from configuration can help deal it
2765 	 */
2766 	if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2767 	    mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2768 		mac->module_type = HNAE3_MODULE_TYPE_KR;
2769 	else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2770 		mac->module_type = HNAE3_MODULE_TYPE_TP;
2771 
2772 	if (mac->support_autoneg) {
2773 		linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2774 		linkmode_copy(mac->advertising, mac->supported);
2775 	} else {
2776 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2777 				   mac->supported);
2778 		linkmode_zero(mac->advertising);
2779 	}
2780 }
2781 
2782 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2783 {
2784 	struct hclge_sfp_info_cmd *resp;
2785 	struct hclge_desc desc;
2786 	int ret;
2787 
2788 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2789 	resp = (struct hclge_sfp_info_cmd *)desc.data;
2790 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2791 	if (ret == -EOPNOTSUPP) {
2792 		dev_warn(&hdev->pdev->dev,
2793 			 "IMP do not support get SFP speed %d\n", ret);
2794 		return ret;
2795 	} else if (ret) {
2796 		dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2797 		return ret;
2798 	}
2799 
2800 	*speed = le32_to_cpu(resp->speed);
2801 
2802 	return 0;
2803 }
2804 
2805 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2806 {
2807 	struct hclge_sfp_info_cmd *resp;
2808 	struct hclge_desc desc;
2809 	int ret;
2810 
2811 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2812 	resp = (struct hclge_sfp_info_cmd *)desc.data;
2813 
2814 	resp->query_type = QUERY_ACTIVE_SPEED;
2815 
2816 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2817 	if (ret == -EOPNOTSUPP) {
2818 		dev_warn(&hdev->pdev->dev,
2819 			 "IMP does not support get SFP info %d\n", ret);
2820 		return ret;
2821 	} else if (ret) {
2822 		dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2823 		return ret;
2824 	}
2825 
2826 	/* In some case, mac speed get from IMP may be 0, it shouldn't be
2827 	 * set to mac->speed.
2828 	 */
2829 	if (!le32_to_cpu(resp->speed))
2830 		return 0;
2831 
2832 	mac->speed = le32_to_cpu(resp->speed);
2833 	/* if resp->speed_ability is 0, it means it's an old version
2834 	 * firmware, do not update these params
2835 	 */
2836 	if (resp->speed_ability) {
2837 		mac->module_type = le32_to_cpu(resp->module_type);
2838 		mac->speed_ability = le32_to_cpu(resp->speed_ability);
2839 		mac->autoneg = resp->autoneg;
2840 		mac->support_autoneg = resp->autoneg_ability;
2841 		mac->speed_type = QUERY_ACTIVE_SPEED;
2842 		if (!resp->active_fec)
2843 			mac->fec_mode = 0;
2844 		else
2845 			mac->fec_mode = BIT(resp->active_fec);
2846 	} else {
2847 		mac->speed_type = QUERY_SFP_SPEED;
2848 	}
2849 
2850 	return 0;
2851 }
2852 
2853 static int hclge_update_port_info(struct hclge_dev *hdev)
2854 {
2855 	struct hclge_mac *mac = &hdev->hw.mac;
2856 	int speed = HCLGE_MAC_SPEED_UNKNOWN;
2857 	int ret;
2858 
2859 	/* get the port info from SFP cmd if not copper port */
2860 	if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2861 		return 0;
2862 
2863 	/* if IMP does not support get SFP/qSFP info, return directly */
2864 	if (!hdev->support_sfp_query)
2865 		return 0;
2866 
2867 	if (hdev->pdev->revision >= 0x21)
2868 		ret = hclge_get_sfp_info(hdev, mac);
2869 	else
2870 		ret = hclge_get_sfp_speed(hdev, &speed);
2871 
2872 	if (ret == -EOPNOTSUPP) {
2873 		hdev->support_sfp_query = false;
2874 		return ret;
2875 	} else if (ret) {
2876 		return ret;
2877 	}
2878 
2879 	if (hdev->pdev->revision >= 0x21) {
2880 		if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2881 			hclge_update_port_capability(mac);
2882 			return 0;
2883 		}
2884 		return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2885 					       HCLGE_MAC_FULL);
2886 	} else {
2887 		if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2888 			return 0; /* do nothing if no SFP */
2889 
2890 		/* must config full duplex for SFP */
2891 		return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2892 	}
2893 }
2894 
2895 static int hclge_get_status(struct hnae3_handle *handle)
2896 {
2897 	struct hclge_vport *vport = hclge_get_vport(handle);
2898 	struct hclge_dev *hdev = vport->back;
2899 
2900 	hclge_update_link_status(hdev);
2901 
2902 	return hdev->hw.mac.link;
2903 }
2904 
2905 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
2906 {
2907 	if (!pci_num_vf(hdev->pdev)) {
2908 		dev_err(&hdev->pdev->dev,
2909 			"SRIOV is disabled, can not get vport(%d) info.\n", vf);
2910 		return NULL;
2911 	}
2912 
2913 	if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
2914 		dev_err(&hdev->pdev->dev,
2915 			"vf id(%d) is out of range(0 <= vfid < %d)\n",
2916 			vf, pci_num_vf(hdev->pdev));
2917 		return NULL;
2918 	}
2919 
2920 	/* VF start from 1 in vport */
2921 	vf += HCLGE_VF_VPORT_START_NUM;
2922 	return &hdev->vport[vf];
2923 }
2924 
2925 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
2926 			       struct ifla_vf_info *ivf)
2927 {
2928 	struct hclge_vport *vport = hclge_get_vport(handle);
2929 	struct hclge_dev *hdev = vport->back;
2930 
2931 	vport = hclge_get_vf_vport(hdev, vf);
2932 	if (!vport)
2933 		return -EINVAL;
2934 
2935 	ivf->vf = vf;
2936 	ivf->linkstate = vport->vf_info.link_state;
2937 	ivf->spoofchk = vport->vf_info.spoofchk;
2938 	ivf->trusted = vport->vf_info.trusted;
2939 	ivf->min_tx_rate = 0;
2940 	ivf->max_tx_rate = vport->vf_info.max_tx_rate;
2941 	ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
2942 	ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
2943 	ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
2944 	ether_addr_copy(ivf->mac, vport->vf_info.mac);
2945 
2946 	return 0;
2947 }
2948 
2949 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
2950 				   int link_state)
2951 {
2952 	struct hclge_vport *vport = hclge_get_vport(handle);
2953 	struct hclge_dev *hdev = vport->back;
2954 
2955 	vport = hclge_get_vf_vport(hdev, vf);
2956 	if (!vport)
2957 		return -EINVAL;
2958 
2959 	vport->vf_info.link_state = link_state;
2960 
2961 	return 0;
2962 }
2963 
2964 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2965 {
2966 	u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2967 
2968 	/* fetch the events from their corresponding regs */
2969 	rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2970 	cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2971 	msix_src_reg = hclge_read_dev(&hdev->hw,
2972 				      HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2973 
2974 	/* Assumption: If by any chance reset and mailbox events are reported
2975 	 * together then we will only process reset event in this go and will
2976 	 * defer the processing of the mailbox events. Since, we would have not
2977 	 * cleared RX CMDQ event this time we would receive again another
2978 	 * interrupt from H/W just for the mailbox.
2979 	 *
2980 	 * check for vector0 reset event sources
2981 	 */
2982 	if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2983 		dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2984 		set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2985 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2986 		*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2987 		hdev->rst_stats.imp_rst_cnt++;
2988 		return HCLGE_VECTOR0_EVENT_RST;
2989 	}
2990 
2991 	if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2992 		dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2993 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2994 		set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2995 		*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2996 		hdev->rst_stats.global_rst_cnt++;
2997 		return HCLGE_VECTOR0_EVENT_RST;
2998 	}
2999 
3000 	/* check for vector0 msix event source */
3001 	if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
3002 		*clearval = msix_src_reg;
3003 		return HCLGE_VECTOR0_EVENT_ERR;
3004 	}
3005 
3006 	/* check for vector0 mailbox(=CMDQ RX) event source */
3007 	if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3008 		cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3009 		*clearval = cmdq_src_reg;
3010 		return HCLGE_VECTOR0_EVENT_MBX;
3011 	}
3012 
3013 	/* print other vector0 event source */
3014 	dev_info(&hdev->pdev->dev,
3015 		 "CMDQ INT status:0x%x, other INT status:0x%x\n",
3016 		 cmdq_src_reg, msix_src_reg);
3017 	*clearval = msix_src_reg;
3018 
3019 	return HCLGE_VECTOR0_EVENT_OTHER;
3020 }
3021 
3022 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3023 				    u32 regclr)
3024 {
3025 	switch (event_type) {
3026 	case HCLGE_VECTOR0_EVENT_RST:
3027 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3028 		break;
3029 	case HCLGE_VECTOR0_EVENT_MBX:
3030 		hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3031 		break;
3032 	default:
3033 		break;
3034 	}
3035 }
3036 
3037 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3038 {
3039 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3040 				BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3041 				BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3042 				BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3043 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3044 }
3045 
3046 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3047 {
3048 	writel(enable ? 1 : 0, vector->addr);
3049 }
3050 
3051 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3052 {
3053 	struct hclge_dev *hdev = data;
3054 	u32 clearval = 0;
3055 	u32 event_cause;
3056 
3057 	hclge_enable_vector(&hdev->misc_vector, false);
3058 	event_cause = hclge_check_event_cause(hdev, &clearval);
3059 
3060 	/* vector 0 interrupt is shared with reset and mailbox source events.*/
3061 	switch (event_cause) {
3062 	case HCLGE_VECTOR0_EVENT_ERR:
3063 		/* we do not know what type of reset is required now. This could
3064 		 * only be decided after we fetch the type of errors which
3065 		 * caused this event. Therefore, we will do below for now:
3066 		 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3067 		 *    have defered type of reset to be used.
3068 		 * 2. Schedule the reset serivce task.
3069 		 * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
3070 		 *    will fetch the correct type of reset.  This would be done
3071 		 *    by first decoding the types of errors.
3072 		 */
3073 		set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3074 		/* fall through */
3075 	case HCLGE_VECTOR0_EVENT_RST:
3076 		hclge_reset_task_schedule(hdev);
3077 		break;
3078 	case HCLGE_VECTOR0_EVENT_MBX:
3079 		/* If we are here then,
3080 		 * 1. Either we are not handling any mbx task and we are not
3081 		 *    scheduled as well
3082 		 *                        OR
3083 		 * 2. We could be handling a mbx task but nothing more is
3084 		 *    scheduled.
3085 		 * In both cases, we should schedule mbx task as there are more
3086 		 * mbx messages reported by this interrupt.
3087 		 */
3088 		hclge_mbx_task_schedule(hdev);
3089 		break;
3090 	default:
3091 		dev_warn(&hdev->pdev->dev,
3092 			 "received unknown or unhandled event of vector0\n");
3093 		break;
3094 	}
3095 
3096 	hclge_clear_event_cause(hdev, event_cause, clearval);
3097 
3098 	/* Enable interrupt if it is not cause by reset. And when
3099 	 * clearval equal to 0, it means interrupt status may be
3100 	 * cleared by hardware before driver reads status register.
3101 	 * For this case, vector0 interrupt also should be enabled.
3102 	 */
3103 	if (!clearval ||
3104 	    event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3105 		hclge_enable_vector(&hdev->misc_vector, true);
3106 	}
3107 
3108 	return IRQ_HANDLED;
3109 }
3110 
3111 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3112 {
3113 	if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3114 		dev_warn(&hdev->pdev->dev,
3115 			 "vector(vector_id %d) has been freed.\n", vector_id);
3116 		return;
3117 	}
3118 
3119 	hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3120 	hdev->num_msi_left += 1;
3121 	hdev->num_msi_used -= 1;
3122 }
3123 
3124 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3125 {
3126 	struct hclge_misc_vector *vector = &hdev->misc_vector;
3127 
3128 	vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3129 
3130 	vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3131 	hdev->vector_status[0] = 0;
3132 
3133 	hdev->num_msi_left -= 1;
3134 	hdev->num_msi_used += 1;
3135 }
3136 
3137 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3138 				      const cpumask_t *mask)
3139 {
3140 	struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3141 					      affinity_notify);
3142 
3143 	cpumask_copy(&hdev->affinity_mask, mask);
3144 }
3145 
3146 static void hclge_irq_affinity_release(struct kref *ref)
3147 {
3148 }
3149 
3150 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3151 {
3152 	irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3153 			      &hdev->affinity_mask);
3154 
3155 	hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3156 	hdev->affinity_notify.release = hclge_irq_affinity_release;
3157 	irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3158 				  &hdev->affinity_notify);
3159 }
3160 
3161 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3162 {
3163 	irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3164 	irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3165 }
3166 
3167 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3168 {
3169 	int ret;
3170 
3171 	hclge_get_misc_vector(hdev);
3172 
3173 	/* this would be explicitly freed in the end */
3174 	snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3175 		 HCLGE_NAME, pci_name(hdev->pdev));
3176 	ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3177 			  0, hdev->misc_vector.name, hdev);
3178 	if (ret) {
3179 		hclge_free_vector(hdev, 0);
3180 		dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3181 			hdev->misc_vector.vector_irq);
3182 	}
3183 
3184 	return ret;
3185 }
3186 
3187 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3188 {
3189 	free_irq(hdev->misc_vector.vector_irq, hdev);
3190 	hclge_free_vector(hdev, 0);
3191 }
3192 
3193 int hclge_notify_client(struct hclge_dev *hdev,
3194 			enum hnae3_reset_notify_type type)
3195 {
3196 	struct hnae3_client *client = hdev->nic_client;
3197 	u16 i;
3198 
3199 	if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3200 		return 0;
3201 
3202 	if (!client->ops->reset_notify)
3203 		return -EOPNOTSUPP;
3204 
3205 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3206 		struct hnae3_handle *handle = &hdev->vport[i].nic;
3207 		int ret;
3208 
3209 		ret = client->ops->reset_notify(handle, type);
3210 		if (ret) {
3211 			dev_err(&hdev->pdev->dev,
3212 				"notify nic client failed %d(%d)\n", type, ret);
3213 			return ret;
3214 		}
3215 	}
3216 
3217 	return 0;
3218 }
3219 
3220 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3221 				    enum hnae3_reset_notify_type type)
3222 {
3223 	struct hnae3_client *client = hdev->roce_client;
3224 	int ret = 0;
3225 	u16 i;
3226 
3227 	if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3228 		return 0;
3229 
3230 	if (!client->ops->reset_notify)
3231 		return -EOPNOTSUPP;
3232 
3233 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3234 		struct hnae3_handle *handle = &hdev->vport[i].roce;
3235 
3236 		ret = client->ops->reset_notify(handle, type);
3237 		if (ret) {
3238 			dev_err(&hdev->pdev->dev,
3239 				"notify roce client failed %d(%d)",
3240 				type, ret);
3241 			return ret;
3242 		}
3243 	}
3244 
3245 	return ret;
3246 }
3247 
3248 static int hclge_reset_wait(struct hclge_dev *hdev)
3249 {
3250 #define HCLGE_RESET_WATI_MS	100
3251 #define HCLGE_RESET_WAIT_CNT	350
3252 
3253 	u32 val, reg, reg_bit;
3254 	u32 cnt = 0;
3255 
3256 	switch (hdev->reset_type) {
3257 	case HNAE3_IMP_RESET:
3258 		reg = HCLGE_GLOBAL_RESET_REG;
3259 		reg_bit = HCLGE_IMP_RESET_BIT;
3260 		break;
3261 	case HNAE3_GLOBAL_RESET:
3262 		reg = HCLGE_GLOBAL_RESET_REG;
3263 		reg_bit = HCLGE_GLOBAL_RESET_BIT;
3264 		break;
3265 	case HNAE3_FUNC_RESET:
3266 		reg = HCLGE_FUN_RST_ING;
3267 		reg_bit = HCLGE_FUN_RST_ING_B;
3268 		break;
3269 	default:
3270 		dev_err(&hdev->pdev->dev,
3271 			"Wait for unsupported reset type: %d\n",
3272 			hdev->reset_type);
3273 		return -EINVAL;
3274 	}
3275 
3276 	val = hclge_read_dev(&hdev->hw, reg);
3277 	while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3278 		msleep(HCLGE_RESET_WATI_MS);
3279 		val = hclge_read_dev(&hdev->hw, reg);
3280 		cnt++;
3281 	}
3282 
3283 	if (cnt >= HCLGE_RESET_WAIT_CNT) {
3284 		dev_warn(&hdev->pdev->dev,
3285 			 "Wait for reset timeout: %d\n", hdev->reset_type);
3286 		return -EBUSY;
3287 	}
3288 
3289 	return 0;
3290 }
3291 
3292 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3293 {
3294 	struct hclge_vf_rst_cmd *req;
3295 	struct hclge_desc desc;
3296 
3297 	req = (struct hclge_vf_rst_cmd *)desc.data;
3298 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3299 	req->dest_vfid = func_id;
3300 
3301 	if (reset)
3302 		req->vf_rst = 0x1;
3303 
3304 	return hclge_cmd_send(&hdev->hw, &desc, 1);
3305 }
3306 
3307 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3308 {
3309 	int i;
3310 
3311 	for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3312 		struct hclge_vport *vport = &hdev->vport[i];
3313 		int ret;
3314 
3315 		/* Send cmd to set/clear VF's FUNC_RST_ING */
3316 		ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3317 		if (ret) {
3318 			dev_err(&hdev->pdev->dev,
3319 				"set vf(%u) rst failed %d!\n",
3320 				vport->vport_id, ret);
3321 			return ret;
3322 		}
3323 
3324 		if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3325 			continue;
3326 
3327 		/* Inform VF to process the reset.
3328 		 * hclge_inform_reset_assert_to_vf may fail if VF
3329 		 * driver is not loaded.
3330 		 */
3331 		ret = hclge_inform_reset_assert_to_vf(vport);
3332 		if (ret)
3333 			dev_warn(&hdev->pdev->dev,
3334 				 "inform reset to vf(%u) failed %d!\n",
3335 				 vport->vport_id, ret);
3336 	}
3337 
3338 	return 0;
3339 }
3340 
3341 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3342 {
3343 	if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3344 	    test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3345 	    test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3346 		return;
3347 
3348 	hclge_mbx_handler(hdev);
3349 
3350 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3351 }
3352 
3353 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3354 {
3355 	struct hclge_pf_rst_sync_cmd *req;
3356 	struct hclge_desc desc;
3357 	int cnt = 0;
3358 	int ret;
3359 
3360 	req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3361 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3362 
3363 	do {
3364 		/* vf need to down netdev by mbx during PF or FLR reset */
3365 		hclge_mailbox_service_task(hdev);
3366 
3367 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3368 		/* for compatible with old firmware, wait
3369 		 * 100 ms for VF to stop IO
3370 		 */
3371 		if (ret == -EOPNOTSUPP) {
3372 			msleep(HCLGE_RESET_SYNC_TIME);
3373 			return;
3374 		} else if (ret) {
3375 			dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3376 				 ret);
3377 			return;
3378 		} else if (req->all_vf_ready) {
3379 			return;
3380 		}
3381 		msleep(HCLGE_PF_RESET_SYNC_TIME);
3382 		hclge_cmd_reuse_desc(&desc, true);
3383 	} while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3384 
3385 	dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3386 }
3387 
3388 void hclge_report_hw_error(struct hclge_dev *hdev,
3389 			   enum hnae3_hw_error_type type)
3390 {
3391 	struct hnae3_client *client = hdev->nic_client;
3392 	u16 i;
3393 
3394 	if (!client || !client->ops->process_hw_error ||
3395 	    !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3396 		return;
3397 
3398 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3399 		client->ops->process_hw_error(&hdev->vport[i].nic, type);
3400 }
3401 
3402 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3403 {
3404 	u32 reg_val;
3405 
3406 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3407 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3408 		hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3409 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3410 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3411 	}
3412 
3413 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3414 		hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3415 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3416 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3417 	}
3418 }
3419 
3420 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3421 {
3422 	struct hclge_desc desc;
3423 	struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3424 	int ret;
3425 
3426 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3427 	hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3428 	req->fun_reset_vfid = func_id;
3429 
3430 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3431 	if (ret)
3432 		dev_err(&hdev->pdev->dev,
3433 			"send function reset cmd fail, status =%d\n", ret);
3434 
3435 	return ret;
3436 }
3437 
3438 static void hclge_do_reset(struct hclge_dev *hdev)
3439 {
3440 	struct hnae3_handle *handle = &hdev->vport[0].nic;
3441 	struct pci_dev *pdev = hdev->pdev;
3442 	u32 val;
3443 
3444 	if (hclge_get_hw_reset_stat(handle)) {
3445 		dev_info(&pdev->dev, "Hardware reset not finish\n");
3446 		dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3447 			 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3448 			 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3449 		return;
3450 	}
3451 
3452 	switch (hdev->reset_type) {
3453 	case HNAE3_GLOBAL_RESET:
3454 		val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3455 		hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3456 		hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3457 		dev_info(&pdev->dev, "Global Reset requested\n");
3458 		break;
3459 	case HNAE3_FUNC_RESET:
3460 		dev_info(&pdev->dev, "PF Reset requested\n");
3461 		/* schedule again to check later */
3462 		set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3463 		hclge_reset_task_schedule(hdev);
3464 		break;
3465 	default:
3466 		dev_warn(&pdev->dev,
3467 			 "Unsupported reset type: %d\n", hdev->reset_type);
3468 		break;
3469 	}
3470 }
3471 
3472 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3473 						   unsigned long *addr)
3474 {
3475 	enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3476 	struct hclge_dev *hdev = ae_dev->priv;
3477 
3478 	/* first, resolve any unknown reset type to the known type(s) */
3479 	if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3480 		u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
3481 					HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
3482 		/* we will intentionally ignore any errors from this function
3483 		 *  as we will end up in *some* reset request in any case
3484 		 */
3485 		if (hclge_handle_hw_msix_error(hdev, addr))
3486 			dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3487 				 msix_sts_reg);
3488 
3489 		clear_bit(HNAE3_UNKNOWN_RESET, addr);
3490 		/* We defered the clearing of the error event which caused
3491 		 * interrupt since it was not posssible to do that in
3492 		 * interrupt context (and this is the reason we introduced
3493 		 * new UNKNOWN reset type). Now, the errors have been
3494 		 * handled and cleared in hardware we can safely enable
3495 		 * interrupts. This is an exception to the norm.
3496 		 */
3497 		hclge_enable_vector(&hdev->misc_vector, true);
3498 	}
3499 
3500 	/* return the highest priority reset level amongst all */
3501 	if (test_bit(HNAE3_IMP_RESET, addr)) {
3502 		rst_level = HNAE3_IMP_RESET;
3503 		clear_bit(HNAE3_IMP_RESET, addr);
3504 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3505 		clear_bit(HNAE3_FUNC_RESET, addr);
3506 	} else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3507 		rst_level = HNAE3_GLOBAL_RESET;
3508 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3509 		clear_bit(HNAE3_FUNC_RESET, addr);
3510 	} else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3511 		rst_level = HNAE3_FUNC_RESET;
3512 		clear_bit(HNAE3_FUNC_RESET, addr);
3513 	} else if (test_bit(HNAE3_FLR_RESET, addr)) {
3514 		rst_level = HNAE3_FLR_RESET;
3515 		clear_bit(HNAE3_FLR_RESET, addr);
3516 	}
3517 
3518 	if (hdev->reset_type != HNAE3_NONE_RESET &&
3519 	    rst_level < hdev->reset_type)
3520 		return HNAE3_NONE_RESET;
3521 
3522 	return rst_level;
3523 }
3524 
3525 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3526 {
3527 	u32 clearval = 0;
3528 
3529 	switch (hdev->reset_type) {
3530 	case HNAE3_IMP_RESET:
3531 		clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3532 		break;
3533 	case HNAE3_GLOBAL_RESET:
3534 		clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3535 		break;
3536 	default:
3537 		break;
3538 	}
3539 
3540 	if (!clearval)
3541 		return;
3542 
3543 	/* For revision 0x20, the reset interrupt source
3544 	 * can only be cleared after hardware reset done
3545 	 */
3546 	if (hdev->pdev->revision == 0x20)
3547 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3548 				clearval);
3549 
3550 	hclge_enable_vector(&hdev->misc_vector, true);
3551 }
3552 
3553 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3554 {
3555 	u32 reg_val;
3556 
3557 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3558 	if (enable)
3559 		reg_val |= HCLGE_NIC_SW_RST_RDY;
3560 	else
3561 		reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3562 
3563 	hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3564 }
3565 
3566 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3567 {
3568 	int ret;
3569 
3570 	ret = hclge_set_all_vf_rst(hdev, true);
3571 	if (ret)
3572 		return ret;
3573 
3574 	hclge_func_reset_sync_vf(hdev);
3575 
3576 	return 0;
3577 }
3578 
3579 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3580 {
3581 	u32 reg_val;
3582 	int ret = 0;
3583 
3584 	switch (hdev->reset_type) {
3585 	case HNAE3_FUNC_RESET:
3586 		ret = hclge_func_reset_notify_vf(hdev);
3587 		if (ret)
3588 			return ret;
3589 
3590 		ret = hclge_func_reset_cmd(hdev, 0);
3591 		if (ret) {
3592 			dev_err(&hdev->pdev->dev,
3593 				"asserting function reset fail %d!\n", ret);
3594 			return ret;
3595 		}
3596 
3597 		/* After performaning pf reset, it is not necessary to do the
3598 		 * mailbox handling or send any command to firmware, because
3599 		 * any mailbox handling or command to firmware is only valid
3600 		 * after hclge_cmd_init is called.
3601 		 */
3602 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3603 		hdev->rst_stats.pf_rst_cnt++;
3604 		break;
3605 	case HNAE3_FLR_RESET:
3606 		ret = hclge_func_reset_notify_vf(hdev);
3607 		if (ret)
3608 			return ret;
3609 		break;
3610 	case HNAE3_IMP_RESET:
3611 		hclge_handle_imp_error(hdev);
3612 		reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3613 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3614 				BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3615 		break;
3616 	default:
3617 		break;
3618 	}
3619 
3620 	/* inform hardware that preparatory work is done */
3621 	msleep(HCLGE_RESET_SYNC_TIME);
3622 	hclge_reset_handshake(hdev, true);
3623 	dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3624 
3625 	return ret;
3626 }
3627 
3628 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3629 {
3630 #define MAX_RESET_FAIL_CNT 5
3631 
3632 	if (hdev->reset_pending) {
3633 		dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3634 			 hdev->reset_pending);
3635 		return true;
3636 	} else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3637 		   HCLGE_RESET_INT_M) {
3638 		dev_info(&hdev->pdev->dev,
3639 			 "reset failed because new reset interrupt\n");
3640 		hclge_clear_reset_cause(hdev);
3641 		return false;
3642 	} else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3643 		hdev->rst_stats.reset_fail_cnt++;
3644 		set_bit(hdev->reset_type, &hdev->reset_pending);
3645 		dev_info(&hdev->pdev->dev,
3646 			 "re-schedule reset task(%u)\n",
3647 			 hdev->rst_stats.reset_fail_cnt);
3648 		return true;
3649 	}
3650 
3651 	hclge_clear_reset_cause(hdev);
3652 
3653 	/* recover the handshake status when reset fail */
3654 	hclge_reset_handshake(hdev, true);
3655 
3656 	dev_err(&hdev->pdev->dev, "Reset fail!\n");
3657 
3658 	hclge_dbg_dump_rst_info(hdev);
3659 
3660 	set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3661 
3662 	return false;
3663 }
3664 
3665 static int hclge_set_rst_done(struct hclge_dev *hdev)
3666 {
3667 	struct hclge_pf_rst_done_cmd *req;
3668 	struct hclge_desc desc;
3669 	int ret;
3670 
3671 	req = (struct hclge_pf_rst_done_cmd *)desc.data;
3672 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3673 	req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3674 
3675 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3676 	/* To be compatible with the old firmware, which does not support
3677 	 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3678 	 * return success
3679 	 */
3680 	if (ret == -EOPNOTSUPP) {
3681 		dev_warn(&hdev->pdev->dev,
3682 			 "current firmware does not support command(0x%x)!\n",
3683 			 HCLGE_OPC_PF_RST_DONE);
3684 		return 0;
3685 	} else if (ret) {
3686 		dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3687 			ret);
3688 	}
3689 
3690 	return ret;
3691 }
3692 
3693 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3694 {
3695 	int ret = 0;
3696 
3697 	switch (hdev->reset_type) {
3698 	case HNAE3_FUNC_RESET:
3699 		/* fall through */
3700 	case HNAE3_FLR_RESET:
3701 		ret = hclge_set_all_vf_rst(hdev, false);
3702 		break;
3703 	case HNAE3_GLOBAL_RESET:
3704 		/* fall through */
3705 	case HNAE3_IMP_RESET:
3706 		ret = hclge_set_rst_done(hdev);
3707 		break;
3708 	default:
3709 		break;
3710 	}
3711 
3712 	/* clear up the handshake status after re-initialize done */
3713 	hclge_reset_handshake(hdev, false);
3714 
3715 	return ret;
3716 }
3717 
3718 static int hclge_reset_stack(struct hclge_dev *hdev)
3719 {
3720 	int ret;
3721 
3722 	ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3723 	if (ret)
3724 		return ret;
3725 
3726 	ret = hclge_reset_ae_dev(hdev->ae_dev);
3727 	if (ret)
3728 		return ret;
3729 
3730 	ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3731 	if (ret)
3732 		return ret;
3733 
3734 	return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3735 }
3736 
3737 static int hclge_reset_prepare(struct hclge_dev *hdev)
3738 {
3739 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3740 	int ret;
3741 
3742 	/* Initialize ae_dev reset status as well, in case enet layer wants to
3743 	 * know if device is undergoing reset
3744 	 */
3745 	ae_dev->reset_type = hdev->reset_type;
3746 	hdev->rst_stats.reset_cnt++;
3747 	/* perform reset of the stack & ae device for a client */
3748 	ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3749 	if (ret)
3750 		return ret;
3751 
3752 	rtnl_lock();
3753 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3754 	rtnl_unlock();
3755 	if (ret)
3756 		return ret;
3757 
3758 	return hclge_reset_prepare_wait(hdev);
3759 }
3760 
3761 static int hclge_reset_rebuild(struct hclge_dev *hdev)
3762 {
3763 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3764 	enum hnae3_reset_type reset_level;
3765 	int ret;
3766 
3767 	hdev->rst_stats.hw_reset_done_cnt++;
3768 
3769 	ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3770 	if (ret)
3771 		return ret;
3772 
3773 	rtnl_lock();
3774 	ret = hclge_reset_stack(hdev);
3775 	rtnl_unlock();
3776 	if (ret)
3777 		return ret;
3778 
3779 	hclge_clear_reset_cause(hdev);
3780 
3781 	ret = hclge_reset_prepare_up(hdev);
3782 	if (ret)
3783 		return ret;
3784 
3785 
3786 	ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3787 	/* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3788 	 * times
3789 	 */
3790 	if (ret &&
3791 	    hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3792 		return ret;
3793 
3794 	rtnl_lock();
3795 	ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3796 	rtnl_unlock();
3797 	if (ret)
3798 		return ret;
3799 
3800 	ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3801 	if (ret)
3802 		return ret;
3803 
3804 	hdev->last_reset_time = jiffies;
3805 	hdev->rst_stats.reset_fail_cnt = 0;
3806 	hdev->rst_stats.reset_done_cnt++;
3807 	ae_dev->reset_type = HNAE3_NONE_RESET;
3808 	clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3809 
3810 	/* if default_reset_request has a higher level reset request,
3811 	 * it should be handled as soon as possible. since some errors
3812 	 * need this kind of reset to fix.
3813 	 */
3814 	reset_level = hclge_get_reset_level(ae_dev,
3815 					    &hdev->default_reset_request);
3816 	if (reset_level != HNAE3_NONE_RESET)
3817 		set_bit(reset_level, &hdev->reset_request);
3818 
3819 	return 0;
3820 }
3821 
3822 static void hclge_reset(struct hclge_dev *hdev)
3823 {
3824 	if (hclge_reset_prepare(hdev))
3825 		goto err_reset;
3826 
3827 	if (hclge_reset_wait(hdev))
3828 		goto err_reset;
3829 
3830 	if (hclge_reset_rebuild(hdev))
3831 		goto err_reset;
3832 
3833 	return;
3834 
3835 err_reset:
3836 	if (hclge_reset_err_handle(hdev))
3837 		hclge_reset_task_schedule(hdev);
3838 }
3839 
3840 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3841 {
3842 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3843 	struct hclge_dev *hdev = ae_dev->priv;
3844 
3845 	/* We might end up getting called broadly because of 2 below cases:
3846 	 * 1. Recoverable error was conveyed through APEI and only way to bring
3847 	 *    normalcy is to reset.
3848 	 * 2. A new reset request from the stack due to timeout
3849 	 *
3850 	 * For the first case,error event might not have ae handle available.
3851 	 * check if this is a new reset request and we are not here just because
3852 	 * last reset attempt did not succeed and watchdog hit us again. We will
3853 	 * know this if last reset request did not occur very recently (watchdog
3854 	 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3855 	 * In case of new request we reset the "reset level" to PF reset.
3856 	 * And if it is a repeat reset request of the most recent one then we
3857 	 * want to make sure we throttle the reset request. Therefore, we will
3858 	 * not allow it again before 3*HZ times.
3859 	 */
3860 	if (!handle)
3861 		handle = &hdev->vport[0].nic;
3862 
3863 	if (time_before(jiffies, (hdev->last_reset_time +
3864 				  HCLGE_RESET_INTERVAL))) {
3865 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
3866 		return;
3867 	} else if (hdev->default_reset_request) {
3868 		hdev->reset_level =
3869 			hclge_get_reset_level(ae_dev,
3870 					      &hdev->default_reset_request);
3871 	} else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
3872 		hdev->reset_level = HNAE3_FUNC_RESET;
3873 	}
3874 
3875 	dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
3876 		 hdev->reset_level);
3877 
3878 	/* request reset & schedule reset task */
3879 	set_bit(hdev->reset_level, &hdev->reset_request);
3880 	hclge_reset_task_schedule(hdev);
3881 
3882 	if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3883 		hdev->reset_level++;
3884 }
3885 
3886 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3887 					enum hnae3_reset_type rst_type)
3888 {
3889 	struct hclge_dev *hdev = ae_dev->priv;
3890 
3891 	set_bit(rst_type, &hdev->default_reset_request);
3892 }
3893 
3894 static void hclge_reset_timer(struct timer_list *t)
3895 {
3896 	struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3897 
3898 	/* if default_reset_request has no value, it means that this reset
3899 	 * request has already be handled, so just return here
3900 	 */
3901 	if (!hdev->default_reset_request)
3902 		return;
3903 
3904 	dev_info(&hdev->pdev->dev,
3905 		 "triggering reset in reset timer\n");
3906 	hclge_reset_event(hdev->pdev, NULL);
3907 }
3908 
3909 static void hclge_reset_subtask(struct hclge_dev *hdev)
3910 {
3911 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3912 
3913 	/* check if there is any ongoing reset in the hardware. This status can
3914 	 * be checked from reset_pending. If there is then, we need to wait for
3915 	 * hardware to complete reset.
3916 	 *    a. If we are able to figure out in reasonable time that hardware
3917 	 *       has fully resetted then, we can proceed with driver, client
3918 	 *       reset.
3919 	 *    b. else, we can come back later to check this status so re-sched
3920 	 *       now.
3921 	 */
3922 	hdev->last_reset_time = jiffies;
3923 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
3924 	if (hdev->reset_type != HNAE3_NONE_RESET)
3925 		hclge_reset(hdev);
3926 
3927 	/* check if we got any *new* reset requests to be honored */
3928 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
3929 	if (hdev->reset_type != HNAE3_NONE_RESET)
3930 		hclge_do_reset(hdev);
3931 
3932 	hdev->reset_type = HNAE3_NONE_RESET;
3933 }
3934 
3935 static void hclge_reset_service_task(struct hclge_dev *hdev)
3936 {
3937 	if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
3938 		return;
3939 
3940 	down(&hdev->reset_sem);
3941 	set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3942 
3943 	hclge_reset_subtask(hdev);
3944 
3945 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3946 	up(&hdev->reset_sem);
3947 }
3948 
3949 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3950 {
3951 	int i;
3952 
3953 	/* start from vport 1 for PF is always alive */
3954 	for (i = 1; i < hdev->num_alloc_vport; i++) {
3955 		struct hclge_vport *vport = &hdev->vport[i];
3956 
3957 		if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3958 			clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3959 
3960 		/* If vf is not alive, set to default value */
3961 		if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3962 			vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3963 	}
3964 }
3965 
3966 static void hclge_periodic_service_task(struct hclge_dev *hdev)
3967 {
3968 	unsigned long delta = round_jiffies_relative(HZ);
3969 
3970 	/* Always handle the link updating to make sure link state is
3971 	 * updated when it is triggered by mbx.
3972 	 */
3973 	hclge_update_link_status(hdev);
3974 
3975 	if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
3976 		delta = jiffies - hdev->last_serv_processed;
3977 
3978 		if (delta < round_jiffies_relative(HZ)) {
3979 			delta = round_jiffies_relative(HZ) - delta;
3980 			goto out;
3981 		}
3982 	}
3983 
3984 	hdev->serv_processed_cnt++;
3985 	hclge_update_vport_alive(hdev);
3986 
3987 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
3988 		hdev->last_serv_processed = jiffies;
3989 		goto out;
3990 	}
3991 
3992 	if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
3993 		hclge_update_stats_for_all(hdev);
3994 
3995 	hclge_update_port_info(hdev);
3996 	hclge_sync_vlan_filter(hdev);
3997 
3998 	if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
3999 		hclge_rfs_filter_expire(hdev);
4000 
4001 	hdev->last_serv_processed = jiffies;
4002 
4003 out:
4004 	hclge_task_schedule(hdev, delta);
4005 }
4006 
4007 static void hclge_service_task(struct work_struct *work)
4008 {
4009 	struct hclge_dev *hdev =
4010 		container_of(work, struct hclge_dev, service_task.work);
4011 
4012 	hclge_reset_service_task(hdev);
4013 	hclge_mailbox_service_task(hdev);
4014 	hclge_periodic_service_task(hdev);
4015 
4016 	/* Handle reset and mbx again in case periodical task delays the
4017 	 * handling by calling hclge_task_schedule() in
4018 	 * hclge_periodic_service_task().
4019 	 */
4020 	hclge_reset_service_task(hdev);
4021 	hclge_mailbox_service_task(hdev);
4022 }
4023 
4024 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4025 {
4026 	/* VF handle has no client */
4027 	if (!handle->client)
4028 		return container_of(handle, struct hclge_vport, nic);
4029 	else if (handle->client->type == HNAE3_CLIENT_ROCE)
4030 		return container_of(handle, struct hclge_vport, roce);
4031 	else
4032 		return container_of(handle, struct hclge_vport, nic);
4033 }
4034 
4035 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4036 			    struct hnae3_vector_info *vector_info)
4037 {
4038 	struct hclge_vport *vport = hclge_get_vport(handle);
4039 	struct hnae3_vector_info *vector = vector_info;
4040 	struct hclge_dev *hdev = vport->back;
4041 	int alloc = 0;
4042 	int i, j;
4043 
4044 	vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4045 	vector_num = min(hdev->num_msi_left, vector_num);
4046 
4047 	for (j = 0; j < vector_num; j++) {
4048 		for (i = 1; i < hdev->num_msi; i++) {
4049 			if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4050 				vector->vector = pci_irq_vector(hdev->pdev, i);
4051 				vector->io_addr = hdev->hw.io_base +
4052 					HCLGE_VECTOR_REG_BASE +
4053 					(i - 1) * HCLGE_VECTOR_REG_OFFSET +
4054 					vport->vport_id *
4055 					HCLGE_VECTOR_VF_OFFSET;
4056 				hdev->vector_status[i] = vport->vport_id;
4057 				hdev->vector_irq[i] = vector->vector;
4058 
4059 				vector++;
4060 				alloc++;
4061 
4062 				break;
4063 			}
4064 		}
4065 	}
4066 	hdev->num_msi_left -= alloc;
4067 	hdev->num_msi_used += alloc;
4068 
4069 	return alloc;
4070 }
4071 
4072 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4073 {
4074 	int i;
4075 
4076 	for (i = 0; i < hdev->num_msi; i++)
4077 		if (vector == hdev->vector_irq[i])
4078 			return i;
4079 
4080 	return -EINVAL;
4081 }
4082 
4083 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4084 {
4085 	struct hclge_vport *vport = hclge_get_vport(handle);
4086 	struct hclge_dev *hdev = vport->back;
4087 	int vector_id;
4088 
4089 	vector_id = hclge_get_vector_index(hdev, vector);
4090 	if (vector_id < 0) {
4091 		dev_err(&hdev->pdev->dev,
4092 			"Get vector index fail. vector = %d\n", vector);
4093 		return vector_id;
4094 	}
4095 
4096 	hclge_free_vector(hdev, vector_id);
4097 
4098 	return 0;
4099 }
4100 
4101 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4102 {
4103 	return HCLGE_RSS_KEY_SIZE;
4104 }
4105 
4106 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
4107 {
4108 	return HCLGE_RSS_IND_TBL_SIZE;
4109 }
4110 
4111 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4112 				  const u8 hfunc, const u8 *key)
4113 {
4114 	struct hclge_rss_config_cmd *req;
4115 	unsigned int key_offset = 0;
4116 	struct hclge_desc desc;
4117 	int key_counts;
4118 	int key_size;
4119 	int ret;
4120 
4121 	key_counts = HCLGE_RSS_KEY_SIZE;
4122 	req = (struct hclge_rss_config_cmd *)desc.data;
4123 
4124 	while (key_counts) {
4125 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4126 					   false);
4127 
4128 		req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4129 		req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4130 
4131 		key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4132 		memcpy(req->hash_key,
4133 		       key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4134 
4135 		key_counts -= key_size;
4136 		key_offset++;
4137 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4138 		if (ret) {
4139 			dev_err(&hdev->pdev->dev,
4140 				"Configure RSS config fail, status = %d\n",
4141 				ret);
4142 			return ret;
4143 		}
4144 	}
4145 	return 0;
4146 }
4147 
4148 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
4149 {
4150 	struct hclge_rss_indirection_table_cmd *req;
4151 	struct hclge_desc desc;
4152 	int i, j;
4153 	int ret;
4154 
4155 	req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4156 
4157 	for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
4158 		hclge_cmd_setup_basic_desc
4159 			(&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4160 
4161 		req->start_table_index =
4162 			cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4163 		req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4164 
4165 		for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
4166 			req->rss_result[j] =
4167 				indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4168 
4169 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4170 		if (ret) {
4171 			dev_err(&hdev->pdev->dev,
4172 				"Configure rss indir table fail,status = %d\n",
4173 				ret);
4174 			return ret;
4175 		}
4176 	}
4177 	return 0;
4178 }
4179 
4180 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4181 				 u16 *tc_size, u16 *tc_offset)
4182 {
4183 	struct hclge_rss_tc_mode_cmd *req;
4184 	struct hclge_desc desc;
4185 	int ret;
4186 	int i;
4187 
4188 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4189 	req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4190 
4191 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4192 		u16 mode = 0;
4193 
4194 		hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4195 		hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4196 				HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4197 		hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4198 				HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4199 
4200 		req->rss_tc_mode[i] = cpu_to_le16(mode);
4201 	}
4202 
4203 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4204 	if (ret)
4205 		dev_err(&hdev->pdev->dev,
4206 			"Configure rss tc mode fail, status = %d\n", ret);
4207 
4208 	return ret;
4209 }
4210 
4211 static void hclge_get_rss_type(struct hclge_vport *vport)
4212 {
4213 	if (vport->rss_tuple_sets.ipv4_tcp_en ||
4214 	    vport->rss_tuple_sets.ipv4_udp_en ||
4215 	    vport->rss_tuple_sets.ipv4_sctp_en ||
4216 	    vport->rss_tuple_sets.ipv6_tcp_en ||
4217 	    vport->rss_tuple_sets.ipv6_udp_en ||
4218 	    vport->rss_tuple_sets.ipv6_sctp_en)
4219 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4220 	else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4221 		 vport->rss_tuple_sets.ipv6_fragment_en)
4222 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4223 	else
4224 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4225 }
4226 
4227 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4228 {
4229 	struct hclge_rss_input_tuple_cmd *req;
4230 	struct hclge_desc desc;
4231 	int ret;
4232 
4233 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4234 
4235 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4236 
4237 	/* Get the tuple cfg from pf */
4238 	req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4239 	req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4240 	req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4241 	req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4242 	req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4243 	req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4244 	req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4245 	req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4246 	hclge_get_rss_type(&hdev->vport[0]);
4247 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4248 	if (ret)
4249 		dev_err(&hdev->pdev->dev,
4250 			"Configure rss input fail, status = %d\n", ret);
4251 	return ret;
4252 }
4253 
4254 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4255 			 u8 *key, u8 *hfunc)
4256 {
4257 	struct hclge_vport *vport = hclge_get_vport(handle);
4258 	int i;
4259 
4260 	/* Get hash algorithm */
4261 	if (hfunc) {
4262 		switch (vport->rss_algo) {
4263 		case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4264 			*hfunc = ETH_RSS_HASH_TOP;
4265 			break;
4266 		case HCLGE_RSS_HASH_ALGO_SIMPLE:
4267 			*hfunc = ETH_RSS_HASH_XOR;
4268 			break;
4269 		default:
4270 			*hfunc = ETH_RSS_HASH_UNKNOWN;
4271 			break;
4272 		}
4273 	}
4274 
4275 	/* Get the RSS Key required by the user */
4276 	if (key)
4277 		memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4278 
4279 	/* Get indirect table */
4280 	if (indir)
4281 		for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4282 			indir[i] =  vport->rss_indirection_tbl[i];
4283 
4284 	return 0;
4285 }
4286 
4287 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4288 			 const  u8 *key, const  u8 hfunc)
4289 {
4290 	struct hclge_vport *vport = hclge_get_vport(handle);
4291 	struct hclge_dev *hdev = vport->back;
4292 	u8 hash_algo;
4293 	int ret, i;
4294 
4295 	/* Set the RSS Hash Key if specififed by the user */
4296 	if (key) {
4297 		switch (hfunc) {
4298 		case ETH_RSS_HASH_TOP:
4299 			hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4300 			break;
4301 		case ETH_RSS_HASH_XOR:
4302 			hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4303 			break;
4304 		case ETH_RSS_HASH_NO_CHANGE:
4305 			hash_algo = vport->rss_algo;
4306 			break;
4307 		default:
4308 			return -EINVAL;
4309 		}
4310 
4311 		ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4312 		if (ret)
4313 			return ret;
4314 
4315 		/* Update the shadow RSS key with user specified qids */
4316 		memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4317 		vport->rss_algo = hash_algo;
4318 	}
4319 
4320 	/* Update the shadow RSS table with user specified qids */
4321 	for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4322 		vport->rss_indirection_tbl[i] = indir[i];
4323 
4324 	/* Update the hardware */
4325 	return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4326 }
4327 
4328 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4329 {
4330 	u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4331 
4332 	if (nfc->data & RXH_L4_B_2_3)
4333 		hash_sets |= HCLGE_D_PORT_BIT;
4334 	else
4335 		hash_sets &= ~HCLGE_D_PORT_BIT;
4336 
4337 	if (nfc->data & RXH_IP_SRC)
4338 		hash_sets |= HCLGE_S_IP_BIT;
4339 	else
4340 		hash_sets &= ~HCLGE_S_IP_BIT;
4341 
4342 	if (nfc->data & RXH_IP_DST)
4343 		hash_sets |= HCLGE_D_IP_BIT;
4344 	else
4345 		hash_sets &= ~HCLGE_D_IP_BIT;
4346 
4347 	if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4348 		hash_sets |= HCLGE_V_TAG_BIT;
4349 
4350 	return hash_sets;
4351 }
4352 
4353 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4354 			       struct ethtool_rxnfc *nfc)
4355 {
4356 	struct hclge_vport *vport = hclge_get_vport(handle);
4357 	struct hclge_dev *hdev = vport->back;
4358 	struct hclge_rss_input_tuple_cmd *req;
4359 	struct hclge_desc desc;
4360 	u8 tuple_sets;
4361 	int ret;
4362 
4363 	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4364 			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
4365 		return -EINVAL;
4366 
4367 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4368 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4369 
4370 	req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4371 	req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4372 	req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4373 	req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4374 	req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4375 	req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4376 	req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4377 	req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4378 
4379 	tuple_sets = hclge_get_rss_hash_bits(nfc);
4380 	switch (nfc->flow_type) {
4381 	case TCP_V4_FLOW:
4382 		req->ipv4_tcp_en = tuple_sets;
4383 		break;
4384 	case TCP_V6_FLOW:
4385 		req->ipv6_tcp_en = tuple_sets;
4386 		break;
4387 	case UDP_V4_FLOW:
4388 		req->ipv4_udp_en = tuple_sets;
4389 		break;
4390 	case UDP_V6_FLOW:
4391 		req->ipv6_udp_en = tuple_sets;
4392 		break;
4393 	case SCTP_V4_FLOW:
4394 		req->ipv4_sctp_en = tuple_sets;
4395 		break;
4396 	case SCTP_V6_FLOW:
4397 		if ((nfc->data & RXH_L4_B_0_1) ||
4398 		    (nfc->data & RXH_L4_B_2_3))
4399 			return -EINVAL;
4400 
4401 		req->ipv6_sctp_en = tuple_sets;
4402 		break;
4403 	case IPV4_FLOW:
4404 		req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4405 		break;
4406 	case IPV6_FLOW:
4407 		req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4408 		break;
4409 	default:
4410 		return -EINVAL;
4411 	}
4412 
4413 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4414 	if (ret) {
4415 		dev_err(&hdev->pdev->dev,
4416 			"Set rss tuple fail, status = %d\n", ret);
4417 		return ret;
4418 	}
4419 
4420 	vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4421 	vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4422 	vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4423 	vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4424 	vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4425 	vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4426 	vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4427 	vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4428 	hclge_get_rss_type(vport);
4429 	return 0;
4430 }
4431 
4432 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4433 			       struct ethtool_rxnfc *nfc)
4434 {
4435 	struct hclge_vport *vport = hclge_get_vport(handle);
4436 	u8 tuple_sets;
4437 
4438 	nfc->data = 0;
4439 
4440 	switch (nfc->flow_type) {
4441 	case TCP_V4_FLOW:
4442 		tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4443 		break;
4444 	case UDP_V4_FLOW:
4445 		tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4446 		break;
4447 	case TCP_V6_FLOW:
4448 		tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4449 		break;
4450 	case UDP_V6_FLOW:
4451 		tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4452 		break;
4453 	case SCTP_V4_FLOW:
4454 		tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4455 		break;
4456 	case SCTP_V6_FLOW:
4457 		tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4458 		break;
4459 	case IPV4_FLOW:
4460 	case IPV6_FLOW:
4461 		tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4462 		break;
4463 	default:
4464 		return -EINVAL;
4465 	}
4466 
4467 	if (!tuple_sets)
4468 		return 0;
4469 
4470 	if (tuple_sets & HCLGE_D_PORT_BIT)
4471 		nfc->data |= RXH_L4_B_2_3;
4472 	if (tuple_sets & HCLGE_S_PORT_BIT)
4473 		nfc->data |= RXH_L4_B_0_1;
4474 	if (tuple_sets & HCLGE_D_IP_BIT)
4475 		nfc->data |= RXH_IP_DST;
4476 	if (tuple_sets & HCLGE_S_IP_BIT)
4477 		nfc->data |= RXH_IP_SRC;
4478 
4479 	return 0;
4480 }
4481 
4482 static int hclge_get_tc_size(struct hnae3_handle *handle)
4483 {
4484 	struct hclge_vport *vport = hclge_get_vport(handle);
4485 	struct hclge_dev *hdev = vport->back;
4486 
4487 	return hdev->rss_size_max;
4488 }
4489 
4490 int hclge_rss_init_hw(struct hclge_dev *hdev)
4491 {
4492 	struct hclge_vport *vport = hdev->vport;
4493 	u8 *rss_indir = vport[0].rss_indirection_tbl;
4494 	u16 rss_size = vport[0].alloc_rss_size;
4495 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4496 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4497 	u8 *key = vport[0].rss_hash_key;
4498 	u8 hfunc = vport[0].rss_algo;
4499 	u16 tc_valid[HCLGE_MAX_TC_NUM];
4500 	u16 roundup_size;
4501 	unsigned int i;
4502 	int ret;
4503 
4504 	ret = hclge_set_rss_indir_table(hdev, rss_indir);
4505 	if (ret)
4506 		return ret;
4507 
4508 	ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4509 	if (ret)
4510 		return ret;
4511 
4512 	ret = hclge_set_rss_input_tuple(hdev);
4513 	if (ret)
4514 		return ret;
4515 
4516 	/* Each TC have the same queue size, and tc_size set to hardware is
4517 	 * the log2 of roundup power of two of rss_size, the acutal queue
4518 	 * size is limited by indirection table.
4519 	 */
4520 	if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4521 		dev_err(&hdev->pdev->dev,
4522 			"Configure rss tc size failed, invalid TC_SIZE = %u\n",
4523 			rss_size);
4524 		return -EINVAL;
4525 	}
4526 
4527 	roundup_size = roundup_pow_of_two(rss_size);
4528 	roundup_size = ilog2(roundup_size);
4529 
4530 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4531 		tc_valid[i] = 0;
4532 
4533 		if (!(hdev->hw_tc_map & BIT(i)))
4534 			continue;
4535 
4536 		tc_valid[i] = 1;
4537 		tc_size[i] = roundup_size;
4538 		tc_offset[i] = rss_size * i;
4539 	}
4540 
4541 	return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4542 }
4543 
4544 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4545 {
4546 	struct hclge_vport *vport = hdev->vport;
4547 	int i, j;
4548 
4549 	for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4550 		for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4551 			vport[j].rss_indirection_tbl[i] =
4552 				i % vport[j].alloc_rss_size;
4553 	}
4554 }
4555 
4556 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4557 {
4558 	int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4559 	struct hclge_vport *vport = hdev->vport;
4560 
4561 	if (hdev->pdev->revision >= 0x21)
4562 		rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4563 
4564 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4565 		vport[i].rss_tuple_sets.ipv4_tcp_en =
4566 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4567 		vport[i].rss_tuple_sets.ipv4_udp_en =
4568 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4569 		vport[i].rss_tuple_sets.ipv4_sctp_en =
4570 			HCLGE_RSS_INPUT_TUPLE_SCTP;
4571 		vport[i].rss_tuple_sets.ipv4_fragment_en =
4572 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4573 		vport[i].rss_tuple_sets.ipv6_tcp_en =
4574 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4575 		vport[i].rss_tuple_sets.ipv6_udp_en =
4576 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4577 		vport[i].rss_tuple_sets.ipv6_sctp_en =
4578 			HCLGE_RSS_INPUT_TUPLE_SCTP;
4579 		vport[i].rss_tuple_sets.ipv6_fragment_en =
4580 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4581 
4582 		vport[i].rss_algo = rss_algo;
4583 
4584 		memcpy(vport[i].rss_hash_key, hclge_hash_key,
4585 		       HCLGE_RSS_KEY_SIZE);
4586 	}
4587 
4588 	hclge_rss_indir_init_cfg(hdev);
4589 }
4590 
4591 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4592 				int vector_id, bool en,
4593 				struct hnae3_ring_chain_node *ring_chain)
4594 {
4595 	struct hclge_dev *hdev = vport->back;
4596 	struct hnae3_ring_chain_node *node;
4597 	struct hclge_desc desc;
4598 	struct hclge_ctrl_vector_chain_cmd *req =
4599 		(struct hclge_ctrl_vector_chain_cmd *)desc.data;
4600 	enum hclge_cmd_status status;
4601 	enum hclge_opcode_type op;
4602 	u16 tqp_type_and_id;
4603 	int i;
4604 
4605 	op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4606 	hclge_cmd_setup_basic_desc(&desc, op, false);
4607 	req->int_vector_id = vector_id;
4608 
4609 	i = 0;
4610 	for (node = ring_chain; node; node = node->next) {
4611 		tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4612 		hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
4613 				HCLGE_INT_TYPE_S,
4614 				hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4615 		hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4616 				HCLGE_TQP_ID_S, node->tqp_index);
4617 		hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4618 				HCLGE_INT_GL_IDX_S,
4619 				hnae3_get_field(node->int_gl_idx,
4620 						HNAE3_RING_GL_IDX_M,
4621 						HNAE3_RING_GL_IDX_S));
4622 		req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4623 		if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4624 			req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4625 			req->vfid = vport->vport_id;
4626 
4627 			status = hclge_cmd_send(&hdev->hw, &desc, 1);
4628 			if (status) {
4629 				dev_err(&hdev->pdev->dev,
4630 					"Map TQP fail, status is %d.\n",
4631 					status);
4632 				return -EIO;
4633 			}
4634 			i = 0;
4635 
4636 			hclge_cmd_setup_basic_desc(&desc,
4637 						   op,
4638 						   false);
4639 			req->int_vector_id = vector_id;
4640 		}
4641 	}
4642 
4643 	if (i > 0) {
4644 		req->int_cause_num = i;
4645 		req->vfid = vport->vport_id;
4646 		status = hclge_cmd_send(&hdev->hw, &desc, 1);
4647 		if (status) {
4648 			dev_err(&hdev->pdev->dev,
4649 				"Map TQP fail, status is %d.\n", status);
4650 			return -EIO;
4651 		}
4652 	}
4653 
4654 	return 0;
4655 }
4656 
4657 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4658 				    struct hnae3_ring_chain_node *ring_chain)
4659 {
4660 	struct hclge_vport *vport = hclge_get_vport(handle);
4661 	struct hclge_dev *hdev = vport->back;
4662 	int vector_id;
4663 
4664 	vector_id = hclge_get_vector_index(hdev, vector);
4665 	if (vector_id < 0) {
4666 		dev_err(&hdev->pdev->dev,
4667 			"failed to get vector index. vector=%d\n", vector);
4668 		return vector_id;
4669 	}
4670 
4671 	return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4672 }
4673 
4674 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4675 				       struct hnae3_ring_chain_node *ring_chain)
4676 {
4677 	struct hclge_vport *vport = hclge_get_vport(handle);
4678 	struct hclge_dev *hdev = vport->back;
4679 	int vector_id, ret;
4680 
4681 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4682 		return 0;
4683 
4684 	vector_id = hclge_get_vector_index(hdev, vector);
4685 	if (vector_id < 0) {
4686 		dev_err(&handle->pdev->dev,
4687 			"Get vector index fail. ret =%d\n", vector_id);
4688 		return vector_id;
4689 	}
4690 
4691 	ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4692 	if (ret)
4693 		dev_err(&handle->pdev->dev,
4694 			"Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4695 			vector_id, ret);
4696 
4697 	return ret;
4698 }
4699 
4700 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4701 				      struct hclge_promisc_param *param)
4702 {
4703 	struct hclge_promisc_cfg_cmd *req;
4704 	struct hclge_desc desc;
4705 	int ret;
4706 
4707 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4708 
4709 	req = (struct hclge_promisc_cfg_cmd *)desc.data;
4710 	req->vf_id = param->vf_id;
4711 
4712 	/* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4713 	 * pdev revision(0x20), new revision support them. The
4714 	 * value of this two fields will not return error when driver
4715 	 * send command to fireware in revision(0x20).
4716 	 */
4717 	req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4718 		HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4719 
4720 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4721 	if (ret)
4722 		dev_err(&hdev->pdev->dev,
4723 			"Set promisc mode fail, status is %d.\n", ret);
4724 
4725 	return ret;
4726 }
4727 
4728 static void hclge_promisc_param_init(struct hclge_promisc_param *param,
4729 				     bool en_uc, bool en_mc, bool en_bc,
4730 				     int vport_id)
4731 {
4732 	if (!param)
4733 		return;
4734 
4735 	memset(param, 0, sizeof(struct hclge_promisc_param));
4736 	if (en_uc)
4737 		param->enable = HCLGE_PROMISC_EN_UC;
4738 	if (en_mc)
4739 		param->enable |= HCLGE_PROMISC_EN_MC;
4740 	if (en_bc)
4741 		param->enable |= HCLGE_PROMISC_EN_BC;
4742 	param->vf_id = vport_id;
4743 }
4744 
4745 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
4746 				 bool en_mc_pmc, bool en_bc_pmc)
4747 {
4748 	struct hclge_dev *hdev = vport->back;
4749 	struct hclge_promisc_param param;
4750 
4751 	hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4752 				 vport->vport_id);
4753 	return hclge_cmd_set_promisc_mode(hdev, &param);
4754 }
4755 
4756 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4757 				  bool en_mc_pmc)
4758 {
4759 	struct hclge_vport *vport = hclge_get_vport(handle);
4760 	bool en_bc_pmc = true;
4761 
4762 	/* For revision 0x20, if broadcast promisc enabled, vlan filter is
4763 	 * always bypassed. So broadcast promisc should be disabled until
4764 	 * user enable promisc mode
4765 	 */
4766 	if (handle->pdev->revision == 0x20)
4767 		en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4768 
4769 	return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
4770 					    en_bc_pmc);
4771 }
4772 
4773 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4774 {
4775 	struct hclge_get_fd_mode_cmd *req;
4776 	struct hclge_desc desc;
4777 	int ret;
4778 
4779 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4780 
4781 	req = (struct hclge_get_fd_mode_cmd *)desc.data;
4782 
4783 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4784 	if (ret) {
4785 		dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4786 		return ret;
4787 	}
4788 
4789 	*fd_mode = req->mode;
4790 
4791 	return ret;
4792 }
4793 
4794 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4795 				   u32 *stage1_entry_num,
4796 				   u32 *stage2_entry_num,
4797 				   u16 *stage1_counter_num,
4798 				   u16 *stage2_counter_num)
4799 {
4800 	struct hclge_get_fd_allocation_cmd *req;
4801 	struct hclge_desc desc;
4802 	int ret;
4803 
4804 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4805 
4806 	req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4807 
4808 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4809 	if (ret) {
4810 		dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4811 			ret);
4812 		return ret;
4813 	}
4814 
4815 	*stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4816 	*stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4817 	*stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4818 	*stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4819 
4820 	return ret;
4821 }
4822 
4823 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4824 {
4825 	struct hclge_set_fd_key_config_cmd *req;
4826 	struct hclge_fd_key_cfg *stage;
4827 	struct hclge_desc desc;
4828 	int ret;
4829 
4830 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4831 
4832 	req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4833 	stage = &hdev->fd_cfg.key_cfg[stage_num];
4834 	req->stage = stage_num;
4835 	req->key_select = stage->key_sel;
4836 	req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4837 	req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4838 	req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4839 	req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4840 	req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4841 	req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4842 
4843 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4844 	if (ret)
4845 		dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4846 
4847 	return ret;
4848 }
4849 
4850 static int hclge_init_fd_config(struct hclge_dev *hdev)
4851 {
4852 #define LOW_2_WORDS		0x03
4853 	struct hclge_fd_key_cfg *key_cfg;
4854 	int ret;
4855 
4856 	if (!hnae3_dev_fd_supported(hdev))
4857 		return 0;
4858 
4859 	ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4860 	if (ret)
4861 		return ret;
4862 
4863 	switch (hdev->fd_cfg.fd_mode) {
4864 	case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4865 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4866 		break;
4867 	case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4868 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4869 		break;
4870 	default:
4871 		dev_err(&hdev->pdev->dev,
4872 			"Unsupported flow director mode %u\n",
4873 			hdev->fd_cfg.fd_mode);
4874 		return -EOPNOTSUPP;
4875 	}
4876 
4877 	hdev->fd_cfg.proto_support =
4878 		TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4879 		UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4880 	key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4881 	key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4882 	key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4883 	key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4884 	key_cfg->outer_sipv6_word_en = 0;
4885 	key_cfg->outer_dipv6_word_en = 0;
4886 
4887 	key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4888 				BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4889 				BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4890 				BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4891 
4892 	/* If use max 400bit key, we can support tuples for ether type */
4893 	if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4894 		hdev->fd_cfg.proto_support |= ETHER_FLOW;
4895 		key_cfg->tuple_active |=
4896 				BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4897 	}
4898 
4899 	/* roce_type is used to filter roce frames
4900 	 * dst_vport is used to specify the rule
4901 	 */
4902 	key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4903 
4904 	ret = hclge_get_fd_allocation(hdev,
4905 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4906 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4907 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4908 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4909 	if (ret)
4910 		return ret;
4911 
4912 	return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4913 }
4914 
4915 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4916 				int loc, u8 *key, bool is_add)
4917 {
4918 	struct hclge_fd_tcam_config_1_cmd *req1;
4919 	struct hclge_fd_tcam_config_2_cmd *req2;
4920 	struct hclge_fd_tcam_config_3_cmd *req3;
4921 	struct hclge_desc desc[3];
4922 	int ret;
4923 
4924 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4925 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4926 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4927 	desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4928 	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4929 
4930 	req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4931 	req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4932 	req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4933 
4934 	req1->stage = stage;
4935 	req1->xy_sel = sel_x ? 1 : 0;
4936 	hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4937 	req1->index = cpu_to_le32(loc);
4938 	req1->entry_vld = sel_x ? is_add : 0;
4939 
4940 	if (key) {
4941 		memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4942 		memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4943 		       sizeof(req2->tcam_data));
4944 		memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4945 		       sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4946 	}
4947 
4948 	ret = hclge_cmd_send(&hdev->hw, desc, 3);
4949 	if (ret)
4950 		dev_err(&hdev->pdev->dev,
4951 			"config tcam key fail, ret=%d\n",
4952 			ret);
4953 
4954 	return ret;
4955 }
4956 
4957 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4958 			      struct hclge_fd_ad_data *action)
4959 {
4960 	struct hclge_fd_ad_config_cmd *req;
4961 	struct hclge_desc desc;
4962 	u64 ad_data = 0;
4963 	int ret;
4964 
4965 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4966 
4967 	req = (struct hclge_fd_ad_config_cmd *)desc.data;
4968 	req->index = cpu_to_le32(loc);
4969 	req->stage = stage;
4970 
4971 	hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4972 		      action->write_rule_id_to_bd);
4973 	hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4974 			action->rule_id);
4975 	ad_data <<= 32;
4976 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4977 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4978 		      action->forward_to_direct_queue);
4979 	hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4980 			action->queue_id);
4981 	hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4982 	hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4983 			HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4984 	hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4985 	hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4986 			action->counter_id);
4987 
4988 	req->ad_data = cpu_to_le64(ad_data);
4989 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4990 	if (ret)
4991 		dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4992 
4993 	return ret;
4994 }
4995 
4996 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4997 				   struct hclge_fd_rule *rule)
4998 {
4999 	u16 tmp_x_s, tmp_y_s;
5000 	u32 tmp_x_l, tmp_y_l;
5001 	int i;
5002 
5003 	if (rule->unused_tuple & tuple_bit)
5004 		return true;
5005 
5006 	switch (tuple_bit) {
5007 	case 0:
5008 		return false;
5009 	case BIT(INNER_DST_MAC):
5010 		for (i = 0; i < ETH_ALEN; i++) {
5011 			calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5012 			       rule->tuples_mask.dst_mac[i]);
5013 			calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5014 			       rule->tuples_mask.dst_mac[i]);
5015 		}
5016 
5017 		return true;
5018 	case BIT(INNER_SRC_MAC):
5019 		for (i = 0; i < ETH_ALEN; i++) {
5020 			calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5021 			       rule->tuples.src_mac[i]);
5022 			calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5023 			       rule->tuples.src_mac[i]);
5024 		}
5025 
5026 		return true;
5027 	case BIT(INNER_VLAN_TAG_FST):
5028 		calc_x(tmp_x_s, rule->tuples.vlan_tag1,
5029 		       rule->tuples_mask.vlan_tag1);
5030 		calc_y(tmp_y_s, rule->tuples.vlan_tag1,
5031 		       rule->tuples_mask.vlan_tag1);
5032 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5033 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5034 
5035 		return true;
5036 	case BIT(INNER_ETH_TYPE):
5037 		calc_x(tmp_x_s, rule->tuples.ether_proto,
5038 		       rule->tuples_mask.ether_proto);
5039 		calc_y(tmp_y_s, rule->tuples.ether_proto,
5040 		       rule->tuples_mask.ether_proto);
5041 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5042 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5043 
5044 		return true;
5045 	case BIT(INNER_IP_TOS):
5046 		calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5047 		calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5048 
5049 		return true;
5050 	case BIT(INNER_IP_PROTO):
5051 		calc_x(*key_x, rule->tuples.ip_proto,
5052 		       rule->tuples_mask.ip_proto);
5053 		calc_y(*key_y, rule->tuples.ip_proto,
5054 		       rule->tuples_mask.ip_proto);
5055 
5056 		return true;
5057 	case BIT(INNER_SRC_IP):
5058 		calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
5059 		       rule->tuples_mask.src_ip[IPV4_INDEX]);
5060 		calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
5061 		       rule->tuples_mask.src_ip[IPV4_INDEX]);
5062 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5063 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5064 
5065 		return true;
5066 	case BIT(INNER_DST_IP):
5067 		calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
5068 		       rule->tuples_mask.dst_ip[IPV4_INDEX]);
5069 		calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
5070 		       rule->tuples_mask.dst_ip[IPV4_INDEX]);
5071 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5072 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5073 
5074 		return true;
5075 	case BIT(INNER_SRC_PORT):
5076 		calc_x(tmp_x_s, rule->tuples.src_port,
5077 		       rule->tuples_mask.src_port);
5078 		calc_y(tmp_y_s, rule->tuples.src_port,
5079 		       rule->tuples_mask.src_port);
5080 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5081 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5082 
5083 		return true;
5084 	case BIT(INNER_DST_PORT):
5085 		calc_x(tmp_x_s, rule->tuples.dst_port,
5086 		       rule->tuples_mask.dst_port);
5087 		calc_y(tmp_y_s, rule->tuples.dst_port,
5088 		       rule->tuples_mask.dst_port);
5089 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5090 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5091 
5092 		return true;
5093 	default:
5094 		return false;
5095 	}
5096 }
5097 
5098 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5099 				 u8 vf_id, u8 network_port_id)
5100 {
5101 	u32 port_number = 0;
5102 
5103 	if (port_type == HOST_PORT) {
5104 		hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5105 				pf_id);
5106 		hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5107 				vf_id);
5108 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5109 	} else {
5110 		hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5111 				HCLGE_NETWORK_PORT_ID_S, network_port_id);
5112 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5113 	}
5114 
5115 	return port_number;
5116 }
5117 
5118 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5119 				       __le32 *key_x, __le32 *key_y,
5120 				       struct hclge_fd_rule *rule)
5121 {
5122 	u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5123 	u8 cur_pos = 0, tuple_size, shift_bits;
5124 	unsigned int i;
5125 
5126 	for (i = 0; i < MAX_META_DATA; i++) {
5127 		tuple_size = meta_data_key_info[i].key_length;
5128 		tuple_bit = key_cfg->meta_data_active & BIT(i);
5129 
5130 		switch (tuple_bit) {
5131 		case BIT(ROCE_TYPE):
5132 			hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5133 			cur_pos += tuple_size;
5134 			break;
5135 		case BIT(DST_VPORT):
5136 			port_number = hclge_get_port_number(HOST_PORT, 0,
5137 							    rule->vf_id, 0);
5138 			hnae3_set_field(meta_data,
5139 					GENMASK(cur_pos + tuple_size, cur_pos),
5140 					cur_pos, port_number);
5141 			cur_pos += tuple_size;
5142 			break;
5143 		default:
5144 			break;
5145 		}
5146 	}
5147 
5148 	calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5149 	calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5150 	shift_bits = sizeof(meta_data) * 8 - cur_pos;
5151 
5152 	*key_x = cpu_to_le32(tmp_x << shift_bits);
5153 	*key_y = cpu_to_le32(tmp_y << shift_bits);
5154 }
5155 
5156 /* A complete key is combined with meta data key and tuple key.
5157  * Meta data key is stored at the MSB region, and tuple key is stored at
5158  * the LSB region, unused bits will be filled 0.
5159  */
5160 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5161 			    struct hclge_fd_rule *rule)
5162 {
5163 	struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5164 	u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5165 	u8 *cur_key_x, *cur_key_y;
5166 	unsigned int i;
5167 	int ret, tuple_size;
5168 	u8 meta_data_region;
5169 
5170 	memset(key_x, 0, sizeof(key_x));
5171 	memset(key_y, 0, sizeof(key_y));
5172 	cur_key_x = key_x;
5173 	cur_key_y = key_y;
5174 
5175 	for (i = 0 ; i < MAX_TUPLE; i++) {
5176 		bool tuple_valid;
5177 		u32 check_tuple;
5178 
5179 		tuple_size = tuple_key_info[i].key_length / 8;
5180 		check_tuple = key_cfg->tuple_active & BIT(i);
5181 
5182 		tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5183 						     cur_key_y, rule);
5184 		if (tuple_valid) {
5185 			cur_key_x += tuple_size;
5186 			cur_key_y += tuple_size;
5187 		}
5188 	}
5189 
5190 	meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5191 			MAX_META_DATA_LENGTH / 8;
5192 
5193 	hclge_fd_convert_meta_data(key_cfg,
5194 				   (__le32 *)(key_x + meta_data_region),
5195 				   (__le32 *)(key_y + meta_data_region),
5196 				   rule);
5197 
5198 	ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5199 				   true);
5200 	if (ret) {
5201 		dev_err(&hdev->pdev->dev,
5202 			"fd key_y config fail, loc=%u, ret=%d\n",
5203 			rule->queue_id, ret);
5204 		return ret;
5205 	}
5206 
5207 	ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5208 				   true);
5209 	if (ret)
5210 		dev_err(&hdev->pdev->dev,
5211 			"fd key_x config fail, loc=%u, ret=%d\n",
5212 			rule->queue_id, ret);
5213 	return ret;
5214 }
5215 
5216 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5217 			       struct hclge_fd_rule *rule)
5218 {
5219 	struct hclge_fd_ad_data ad_data;
5220 
5221 	ad_data.ad_id = rule->location;
5222 
5223 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5224 		ad_data.drop_packet = true;
5225 		ad_data.forward_to_direct_queue = false;
5226 		ad_data.queue_id = 0;
5227 	} else {
5228 		ad_data.drop_packet = false;
5229 		ad_data.forward_to_direct_queue = true;
5230 		ad_data.queue_id = rule->queue_id;
5231 	}
5232 
5233 	ad_data.use_counter = false;
5234 	ad_data.counter_id = 0;
5235 
5236 	ad_data.use_next_stage = false;
5237 	ad_data.next_input_key = 0;
5238 
5239 	ad_data.write_rule_id_to_bd = true;
5240 	ad_data.rule_id = rule->location;
5241 
5242 	return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5243 }
5244 
5245 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5246 			       struct ethtool_rx_flow_spec *fs, u32 *unused)
5247 {
5248 	struct ethtool_tcpip4_spec *tcp_ip4_spec;
5249 	struct ethtool_usrip4_spec *usr_ip4_spec;
5250 	struct ethtool_tcpip6_spec *tcp_ip6_spec;
5251 	struct ethtool_usrip6_spec *usr_ip6_spec;
5252 	struct ethhdr *ether_spec;
5253 
5254 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5255 		return -EINVAL;
5256 
5257 	if (!(fs->flow_type & hdev->fd_cfg.proto_support))
5258 		return -EOPNOTSUPP;
5259 
5260 	if ((fs->flow_type & FLOW_EXT) &&
5261 	    (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5262 		dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5263 		return -EOPNOTSUPP;
5264 	}
5265 
5266 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5267 	case SCTP_V4_FLOW:
5268 	case TCP_V4_FLOW:
5269 	case UDP_V4_FLOW:
5270 		tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
5271 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5272 
5273 		if (!tcp_ip4_spec->ip4src)
5274 			*unused |= BIT(INNER_SRC_IP);
5275 
5276 		if (!tcp_ip4_spec->ip4dst)
5277 			*unused |= BIT(INNER_DST_IP);
5278 
5279 		if (!tcp_ip4_spec->psrc)
5280 			*unused |= BIT(INNER_SRC_PORT);
5281 
5282 		if (!tcp_ip4_spec->pdst)
5283 			*unused |= BIT(INNER_DST_PORT);
5284 
5285 		if (!tcp_ip4_spec->tos)
5286 			*unused |= BIT(INNER_IP_TOS);
5287 
5288 		break;
5289 	case IP_USER_FLOW:
5290 		usr_ip4_spec = &fs->h_u.usr_ip4_spec;
5291 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5292 			BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5293 
5294 		if (!usr_ip4_spec->ip4src)
5295 			*unused |= BIT(INNER_SRC_IP);
5296 
5297 		if (!usr_ip4_spec->ip4dst)
5298 			*unused |= BIT(INNER_DST_IP);
5299 
5300 		if (!usr_ip4_spec->tos)
5301 			*unused |= BIT(INNER_IP_TOS);
5302 
5303 		if (!usr_ip4_spec->proto)
5304 			*unused |= BIT(INNER_IP_PROTO);
5305 
5306 		if (usr_ip4_spec->l4_4_bytes)
5307 			return -EOPNOTSUPP;
5308 
5309 		if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
5310 			return -EOPNOTSUPP;
5311 
5312 		break;
5313 	case SCTP_V6_FLOW:
5314 	case TCP_V6_FLOW:
5315 	case UDP_V6_FLOW:
5316 		tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
5317 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5318 			BIT(INNER_IP_TOS);
5319 
5320 		/* check whether src/dst ip address used */
5321 		if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
5322 		    !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
5323 			*unused |= BIT(INNER_SRC_IP);
5324 
5325 		if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
5326 		    !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
5327 			*unused |= BIT(INNER_DST_IP);
5328 
5329 		if (!tcp_ip6_spec->psrc)
5330 			*unused |= BIT(INNER_SRC_PORT);
5331 
5332 		if (!tcp_ip6_spec->pdst)
5333 			*unused |= BIT(INNER_DST_PORT);
5334 
5335 		if (tcp_ip6_spec->tclass)
5336 			return -EOPNOTSUPP;
5337 
5338 		break;
5339 	case IPV6_USER_FLOW:
5340 		usr_ip6_spec = &fs->h_u.usr_ip6_spec;
5341 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5342 			BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
5343 			BIT(INNER_DST_PORT);
5344 
5345 		/* check whether src/dst ip address used */
5346 		if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
5347 		    !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
5348 			*unused |= BIT(INNER_SRC_IP);
5349 
5350 		if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
5351 		    !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
5352 			*unused |= BIT(INNER_DST_IP);
5353 
5354 		if (!usr_ip6_spec->l4_proto)
5355 			*unused |= BIT(INNER_IP_PROTO);
5356 
5357 		if (usr_ip6_spec->tclass)
5358 			return -EOPNOTSUPP;
5359 
5360 		if (usr_ip6_spec->l4_4_bytes)
5361 			return -EOPNOTSUPP;
5362 
5363 		break;
5364 	case ETHER_FLOW:
5365 		ether_spec = &fs->h_u.ether_spec;
5366 		*unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5367 			BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5368 			BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5369 
5370 		if (is_zero_ether_addr(ether_spec->h_source))
5371 			*unused |= BIT(INNER_SRC_MAC);
5372 
5373 		if (is_zero_ether_addr(ether_spec->h_dest))
5374 			*unused |= BIT(INNER_DST_MAC);
5375 
5376 		if (!ether_spec->h_proto)
5377 			*unused |= BIT(INNER_ETH_TYPE);
5378 
5379 		break;
5380 	default:
5381 		return -EOPNOTSUPP;
5382 	}
5383 
5384 	if ((fs->flow_type & FLOW_EXT)) {
5385 		if (fs->h_ext.vlan_etype)
5386 			return -EOPNOTSUPP;
5387 		if (!fs->h_ext.vlan_tci)
5388 			*unused |= BIT(INNER_VLAN_TAG_FST);
5389 
5390 		if (fs->m_ext.vlan_tci) {
5391 			if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
5392 				return -EINVAL;
5393 		}
5394 	} else {
5395 		*unused |= BIT(INNER_VLAN_TAG_FST);
5396 	}
5397 
5398 	if (fs->flow_type & FLOW_MAC_EXT) {
5399 		if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
5400 			return -EOPNOTSUPP;
5401 
5402 		if (is_zero_ether_addr(fs->h_ext.h_dest))
5403 			*unused |= BIT(INNER_DST_MAC);
5404 		else
5405 			*unused &= ~(BIT(INNER_DST_MAC));
5406 	}
5407 
5408 	return 0;
5409 }
5410 
5411 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5412 {
5413 	struct hclge_fd_rule *rule = NULL;
5414 	struct hlist_node *node2;
5415 
5416 	spin_lock_bh(&hdev->fd_rule_lock);
5417 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5418 		if (rule->location >= location)
5419 			break;
5420 	}
5421 
5422 	spin_unlock_bh(&hdev->fd_rule_lock);
5423 
5424 	return  rule && rule->location == location;
5425 }
5426 
5427 /* make sure being called after lock up with fd_rule_lock */
5428 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5429 				     struct hclge_fd_rule *new_rule,
5430 				     u16 location,
5431 				     bool is_add)
5432 {
5433 	struct hclge_fd_rule *rule = NULL, *parent = NULL;
5434 	struct hlist_node *node2;
5435 
5436 	if (is_add && !new_rule)
5437 		return -EINVAL;
5438 
5439 	hlist_for_each_entry_safe(rule, node2,
5440 				  &hdev->fd_rule_list, rule_node) {
5441 		if (rule->location >= location)
5442 			break;
5443 		parent = rule;
5444 	}
5445 
5446 	if (rule && rule->location == location) {
5447 		hlist_del(&rule->rule_node);
5448 		kfree(rule);
5449 		hdev->hclge_fd_rule_num--;
5450 
5451 		if (!is_add) {
5452 			if (!hdev->hclge_fd_rule_num)
5453 				hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5454 			clear_bit(location, hdev->fd_bmap);
5455 
5456 			return 0;
5457 		}
5458 	} else if (!is_add) {
5459 		dev_err(&hdev->pdev->dev,
5460 			"delete fail, rule %u is inexistent\n",
5461 			location);
5462 		return -EINVAL;
5463 	}
5464 
5465 	INIT_HLIST_NODE(&new_rule->rule_node);
5466 
5467 	if (parent)
5468 		hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5469 	else
5470 		hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5471 
5472 	set_bit(location, hdev->fd_bmap);
5473 	hdev->hclge_fd_rule_num++;
5474 	hdev->fd_active_type = new_rule->rule_type;
5475 
5476 	return 0;
5477 }
5478 
5479 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5480 			      struct ethtool_rx_flow_spec *fs,
5481 			      struct hclge_fd_rule *rule)
5482 {
5483 	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5484 
5485 	switch (flow_type) {
5486 	case SCTP_V4_FLOW:
5487 	case TCP_V4_FLOW:
5488 	case UDP_V4_FLOW:
5489 		rule->tuples.src_ip[IPV4_INDEX] =
5490 				be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5491 		rule->tuples_mask.src_ip[IPV4_INDEX] =
5492 				be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5493 
5494 		rule->tuples.dst_ip[IPV4_INDEX] =
5495 				be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5496 		rule->tuples_mask.dst_ip[IPV4_INDEX] =
5497 				be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5498 
5499 		rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5500 		rule->tuples_mask.src_port =
5501 				be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5502 
5503 		rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5504 		rule->tuples_mask.dst_port =
5505 				be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5506 
5507 		rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5508 		rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5509 
5510 		rule->tuples.ether_proto = ETH_P_IP;
5511 		rule->tuples_mask.ether_proto = 0xFFFF;
5512 
5513 		break;
5514 	case IP_USER_FLOW:
5515 		rule->tuples.src_ip[IPV4_INDEX] =
5516 				be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5517 		rule->tuples_mask.src_ip[IPV4_INDEX] =
5518 				be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5519 
5520 		rule->tuples.dst_ip[IPV4_INDEX] =
5521 				be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5522 		rule->tuples_mask.dst_ip[IPV4_INDEX] =
5523 				be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5524 
5525 		rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5526 		rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5527 
5528 		rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5529 		rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5530 
5531 		rule->tuples.ether_proto = ETH_P_IP;
5532 		rule->tuples_mask.ether_proto = 0xFFFF;
5533 
5534 		break;
5535 	case SCTP_V6_FLOW:
5536 	case TCP_V6_FLOW:
5537 	case UDP_V6_FLOW:
5538 		be32_to_cpu_array(rule->tuples.src_ip,
5539 				  fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5540 		be32_to_cpu_array(rule->tuples_mask.src_ip,
5541 				  fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5542 
5543 		be32_to_cpu_array(rule->tuples.dst_ip,
5544 				  fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5545 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
5546 				  fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5547 
5548 		rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5549 		rule->tuples_mask.src_port =
5550 				be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5551 
5552 		rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5553 		rule->tuples_mask.dst_port =
5554 				be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5555 
5556 		rule->tuples.ether_proto = ETH_P_IPV6;
5557 		rule->tuples_mask.ether_proto = 0xFFFF;
5558 
5559 		break;
5560 	case IPV6_USER_FLOW:
5561 		be32_to_cpu_array(rule->tuples.src_ip,
5562 				  fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5563 		be32_to_cpu_array(rule->tuples_mask.src_ip,
5564 				  fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5565 
5566 		be32_to_cpu_array(rule->tuples.dst_ip,
5567 				  fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5568 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
5569 				  fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5570 
5571 		rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5572 		rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5573 
5574 		rule->tuples.ether_proto = ETH_P_IPV6;
5575 		rule->tuples_mask.ether_proto = 0xFFFF;
5576 
5577 		break;
5578 	case ETHER_FLOW:
5579 		ether_addr_copy(rule->tuples.src_mac,
5580 				fs->h_u.ether_spec.h_source);
5581 		ether_addr_copy(rule->tuples_mask.src_mac,
5582 				fs->m_u.ether_spec.h_source);
5583 
5584 		ether_addr_copy(rule->tuples.dst_mac,
5585 				fs->h_u.ether_spec.h_dest);
5586 		ether_addr_copy(rule->tuples_mask.dst_mac,
5587 				fs->m_u.ether_spec.h_dest);
5588 
5589 		rule->tuples.ether_proto =
5590 				be16_to_cpu(fs->h_u.ether_spec.h_proto);
5591 		rule->tuples_mask.ether_proto =
5592 				be16_to_cpu(fs->m_u.ether_spec.h_proto);
5593 
5594 		break;
5595 	default:
5596 		return -EOPNOTSUPP;
5597 	}
5598 
5599 	switch (flow_type) {
5600 	case SCTP_V4_FLOW:
5601 	case SCTP_V6_FLOW:
5602 		rule->tuples.ip_proto = IPPROTO_SCTP;
5603 		rule->tuples_mask.ip_proto = 0xFF;
5604 		break;
5605 	case TCP_V4_FLOW:
5606 	case TCP_V6_FLOW:
5607 		rule->tuples.ip_proto = IPPROTO_TCP;
5608 		rule->tuples_mask.ip_proto = 0xFF;
5609 		break;
5610 	case UDP_V4_FLOW:
5611 	case UDP_V6_FLOW:
5612 		rule->tuples.ip_proto = IPPROTO_UDP;
5613 		rule->tuples_mask.ip_proto = 0xFF;
5614 		break;
5615 	default:
5616 		break;
5617 	}
5618 
5619 	if ((fs->flow_type & FLOW_EXT)) {
5620 		rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5621 		rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5622 	}
5623 
5624 	if (fs->flow_type & FLOW_MAC_EXT) {
5625 		ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5626 		ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5627 	}
5628 
5629 	return 0;
5630 }
5631 
5632 /* make sure being called after lock up with fd_rule_lock */
5633 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5634 				struct hclge_fd_rule *rule)
5635 {
5636 	int ret;
5637 
5638 	if (!rule) {
5639 		dev_err(&hdev->pdev->dev,
5640 			"The flow director rule is NULL\n");
5641 		return -EINVAL;
5642 	}
5643 
5644 	/* it will never fail here, so needn't to check return value */
5645 	hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5646 
5647 	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5648 	if (ret)
5649 		goto clear_rule;
5650 
5651 	ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5652 	if (ret)
5653 		goto clear_rule;
5654 
5655 	return 0;
5656 
5657 clear_rule:
5658 	hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5659 	return ret;
5660 }
5661 
5662 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5663 			      struct ethtool_rxnfc *cmd)
5664 {
5665 	struct hclge_vport *vport = hclge_get_vport(handle);
5666 	struct hclge_dev *hdev = vport->back;
5667 	u16 dst_vport_id = 0, q_index = 0;
5668 	struct ethtool_rx_flow_spec *fs;
5669 	struct hclge_fd_rule *rule;
5670 	u32 unused = 0;
5671 	u8 action;
5672 	int ret;
5673 
5674 	if (!hnae3_dev_fd_supported(hdev))
5675 		return -EOPNOTSUPP;
5676 
5677 	if (!hdev->fd_en) {
5678 		dev_warn(&hdev->pdev->dev,
5679 			 "Please enable flow director first\n");
5680 		return -EOPNOTSUPP;
5681 	}
5682 
5683 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5684 
5685 	ret = hclge_fd_check_spec(hdev, fs, &unused);
5686 	if (ret) {
5687 		dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5688 		return ret;
5689 	}
5690 
5691 	if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5692 		action = HCLGE_FD_ACTION_DROP_PACKET;
5693 	} else {
5694 		u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5695 		u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5696 		u16 tqps;
5697 
5698 		if (vf > hdev->num_req_vfs) {
5699 			dev_err(&hdev->pdev->dev,
5700 				"Error: vf id (%u) > max vf num (%u)\n",
5701 				vf, hdev->num_req_vfs);
5702 			return -EINVAL;
5703 		}
5704 
5705 		dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5706 		tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5707 
5708 		if (ring >= tqps) {
5709 			dev_err(&hdev->pdev->dev,
5710 				"Error: queue id (%u) > max tqp num (%u)\n",
5711 				ring, tqps - 1);
5712 			return -EINVAL;
5713 		}
5714 
5715 		action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5716 		q_index = ring;
5717 	}
5718 
5719 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5720 	if (!rule)
5721 		return -ENOMEM;
5722 
5723 	ret = hclge_fd_get_tuple(hdev, fs, rule);
5724 	if (ret) {
5725 		kfree(rule);
5726 		return ret;
5727 	}
5728 
5729 	rule->flow_type = fs->flow_type;
5730 
5731 	rule->location = fs->location;
5732 	rule->unused_tuple = unused;
5733 	rule->vf_id = dst_vport_id;
5734 	rule->queue_id = q_index;
5735 	rule->action = action;
5736 	rule->rule_type = HCLGE_FD_EP_ACTIVE;
5737 
5738 	/* to avoid rule conflict, when user configure rule by ethtool,
5739 	 * we need to clear all arfs rules
5740 	 */
5741 	hclge_clear_arfs_rules(handle);
5742 
5743 	spin_lock_bh(&hdev->fd_rule_lock);
5744 	ret = hclge_fd_config_rule(hdev, rule);
5745 
5746 	spin_unlock_bh(&hdev->fd_rule_lock);
5747 
5748 	return ret;
5749 }
5750 
5751 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5752 			      struct ethtool_rxnfc *cmd)
5753 {
5754 	struct hclge_vport *vport = hclge_get_vport(handle);
5755 	struct hclge_dev *hdev = vport->back;
5756 	struct ethtool_rx_flow_spec *fs;
5757 	int ret;
5758 
5759 	if (!hnae3_dev_fd_supported(hdev))
5760 		return -EOPNOTSUPP;
5761 
5762 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5763 
5764 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5765 		return -EINVAL;
5766 
5767 	if (!hclge_fd_rule_exist(hdev, fs->location)) {
5768 		dev_err(&hdev->pdev->dev,
5769 			"Delete fail, rule %u is inexistent\n", fs->location);
5770 		return -ENOENT;
5771 	}
5772 
5773 	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5774 				   NULL, false);
5775 	if (ret)
5776 		return ret;
5777 
5778 	spin_lock_bh(&hdev->fd_rule_lock);
5779 	ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5780 
5781 	spin_unlock_bh(&hdev->fd_rule_lock);
5782 
5783 	return ret;
5784 }
5785 
5786 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5787 				     bool clear_list)
5788 {
5789 	struct hclge_vport *vport = hclge_get_vport(handle);
5790 	struct hclge_dev *hdev = vport->back;
5791 	struct hclge_fd_rule *rule;
5792 	struct hlist_node *node;
5793 	u16 location;
5794 
5795 	if (!hnae3_dev_fd_supported(hdev))
5796 		return;
5797 
5798 	spin_lock_bh(&hdev->fd_rule_lock);
5799 	for_each_set_bit(location, hdev->fd_bmap,
5800 			 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5801 		hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5802 				     NULL, false);
5803 
5804 	if (clear_list) {
5805 		hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5806 					  rule_node) {
5807 			hlist_del(&rule->rule_node);
5808 			kfree(rule);
5809 		}
5810 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5811 		hdev->hclge_fd_rule_num = 0;
5812 		bitmap_zero(hdev->fd_bmap,
5813 			    hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5814 	}
5815 
5816 	spin_unlock_bh(&hdev->fd_rule_lock);
5817 }
5818 
5819 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5820 {
5821 	struct hclge_vport *vport = hclge_get_vport(handle);
5822 	struct hclge_dev *hdev = vport->back;
5823 	struct hclge_fd_rule *rule;
5824 	struct hlist_node *node;
5825 	int ret;
5826 
5827 	/* Return ok here, because reset error handling will check this
5828 	 * return value. If error is returned here, the reset process will
5829 	 * fail.
5830 	 */
5831 	if (!hnae3_dev_fd_supported(hdev))
5832 		return 0;
5833 
5834 	/* if fd is disabled, should not restore it when reset */
5835 	if (!hdev->fd_en)
5836 		return 0;
5837 
5838 	spin_lock_bh(&hdev->fd_rule_lock);
5839 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5840 		ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5841 		if (!ret)
5842 			ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5843 
5844 		if (ret) {
5845 			dev_warn(&hdev->pdev->dev,
5846 				 "Restore rule %u failed, remove it\n",
5847 				 rule->location);
5848 			clear_bit(rule->location, hdev->fd_bmap);
5849 			hlist_del(&rule->rule_node);
5850 			kfree(rule);
5851 			hdev->hclge_fd_rule_num--;
5852 		}
5853 	}
5854 
5855 	if (hdev->hclge_fd_rule_num)
5856 		hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5857 
5858 	spin_unlock_bh(&hdev->fd_rule_lock);
5859 
5860 	return 0;
5861 }
5862 
5863 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5864 				 struct ethtool_rxnfc *cmd)
5865 {
5866 	struct hclge_vport *vport = hclge_get_vport(handle);
5867 	struct hclge_dev *hdev = vport->back;
5868 
5869 	if (!hnae3_dev_fd_supported(hdev))
5870 		return -EOPNOTSUPP;
5871 
5872 	cmd->rule_cnt = hdev->hclge_fd_rule_num;
5873 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5874 
5875 	return 0;
5876 }
5877 
5878 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5879 				  struct ethtool_rxnfc *cmd)
5880 {
5881 	struct hclge_vport *vport = hclge_get_vport(handle);
5882 	struct hclge_fd_rule *rule = NULL;
5883 	struct hclge_dev *hdev = vport->back;
5884 	struct ethtool_rx_flow_spec *fs;
5885 	struct hlist_node *node2;
5886 
5887 	if (!hnae3_dev_fd_supported(hdev))
5888 		return -EOPNOTSUPP;
5889 
5890 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5891 
5892 	spin_lock_bh(&hdev->fd_rule_lock);
5893 
5894 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5895 		if (rule->location >= fs->location)
5896 			break;
5897 	}
5898 
5899 	if (!rule || fs->location != rule->location) {
5900 		spin_unlock_bh(&hdev->fd_rule_lock);
5901 
5902 		return -ENOENT;
5903 	}
5904 
5905 	fs->flow_type = rule->flow_type;
5906 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5907 	case SCTP_V4_FLOW:
5908 	case TCP_V4_FLOW:
5909 	case UDP_V4_FLOW:
5910 		fs->h_u.tcp_ip4_spec.ip4src =
5911 				cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5912 		fs->m_u.tcp_ip4_spec.ip4src =
5913 			rule->unused_tuple & BIT(INNER_SRC_IP) ?
5914 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5915 
5916 		fs->h_u.tcp_ip4_spec.ip4dst =
5917 				cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5918 		fs->m_u.tcp_ip4_spec.ip4dst =
5919 			rule->unused_tuple & BIT(INNER_DST_IP) ?
5920 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5921 
5922 		fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5923 		fs->m_u.tcp_ip4_spec.psrc =
5924 				rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5925 				0 : cpu_to_be16(rule->tuples_mask.src_port);
5926 
5927 		fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5928 		fs->m_u.tcp_ip4_spec.pdst =
5929 				rule->unused_tuple & BIT(INNER_DST_PORT) ?
5930 				0 : cpu_to_be16(rule->tuples_mask.dst_port);
5931 
5932 		fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5933 		fs->m_u.tcp_ip4_spec.tos =
5934 				rule->unused_tuple & BIT(INNER_IP_TOS) ?
5935 				0 : rule->tuples_mask.ip_tos;
5936 
5937 		break;
5938 	case IP_USER_FLOW:
5939 		fs->h_u.usr_ip4_spec.ip4src =
5940 				cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5941 		fs->m_u.tcp_ip4_spec.ip4src =
5942 			rule->unused_tuple & BIT(INNER_SRC_IP) ?
5943 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5944 
5945 		fs->h_u.usr_ip4_spec.ip4dst =
5946 				cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5947 		fs->m_u.usr_ip4_spec.ip4dst =
5948 			rule->unused_tuple & BIT(INNER_DST_IP) ?
5949 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5950 
5951 		fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5952 		fs->m_u.usr_ip4_spec.tos =
5953 				rule->unused_tuple & BIT(INNER_IP_TOS) ?
5954 				0 : rule->tuples_mask.ip_tos;
5955 
5956 		fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5957 		fs->m_u.usr_ip4_spec.proto =
5958 				rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5959 				0 : rule->tuples_mask.ip_proto;
5960 
5961 		fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5962 
5963 		break;
5964 	case SCTP_V6_FLOW:
5965 	case TCP_V6_FLOW:
5966 	case UDP_V6_FLOW:
5967 		cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5968 				  rule->tuples.src_ip, IPV6_SIZE);
5969 		if (rule->unused_tuple & BIT(INNER_SRC_IP))
5970 			memset(fs->m_u.tcp_ip6_spec.ip6src, 0,
5971 			       sizeof(int) * IPV6_SIZE);
5972 		else
5973 			cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5974 					  rule->tuples_mask.src_ip, IPV6_SIZE);
5975 
5976 		cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5977 				  rule->tuples.dst_ip, IPV6_SIZE);
5978 		if (rule->unused_tuple & BIT(INNER_DST_IP))
5979 			memset(fs->m_u.tcp_ip6_spec.ip6dst, 0,
5980 			       sizeof(int) * IPV6_SIZE);
5981 		else
5982 			cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5983 					  rule->tuples_mask.dst_ip, IPV6_SIZE);
5984 
5985 		fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5986 		fs->m_u.tcp_ip6_spec.psrc =
5987 				rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5988 				0 : cpu_to_be16(rule->tuples_mask.src_port);
5989 
5990 		fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5991 		fs->m_u.tcp_ip6_spec.pdst =
5992 				rule->unused_tuple & BIT(INNER_DST_PORT) ?
5993 				0 : cpu_to_be16(rule->tuples_mask.dst_port);
5994 
5995 		break;
5996 	case IPV6_USER_FLOW:
5997 		cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5998 				  rule->tuples.src_ip, IPV6_SIZE);
5999 		if (rule->unused_tuple & BIT(INNER_SRC_IP))
6000 			memset(fs->m_u.usr_ip6_spec.ip6src, 0,
6001 			       sizeof(int) * IPV6_SIZE);
6002 		else
6003 			cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
6004 					  rule->tuples_mask.src_ip, IPV6_SIZE);
6005 
6006 		cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
6007 				  rule->tuples.dst_ip, IPV6_SIZE);
6008 		if (rule->unused_tuple & BIT(INNER_DST_IP))
6009 			memset(fs->m_u.usr_ip6_spec.ip6dst, 0,
6010 			       sizeof(int) * IPV6_SIZE);
6011 		else
6012 			cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
6013 					  rule->tuples_mask.dst_ip, IPV6_SIZE);
6014 
6015 		fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
6016 		fs->m_u.usr_ip6_spec.l4_proto =
6017 				rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6018 				0 : rule->tuples_mask.ip_proto;
6019 
6020 		break;
6021 	case ETHER_FLOW:
6022 		ether_addr_copy(fs->h_u.ether_spec.h_source,
6023 				rule->tuples.src_mac);
6024 		if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6025 			eth_zero_addr(fs->m_u.ether_spec.h_source);
6026 		else
6027 			ether_addr_copy(fs->m_u.ether_spec.h_source,
6028 					rule->tuples_mask.src_mac);
6029 
6030 		ether_addr_copy(fs->h_u.ether_spec.h_dest,
6031 				rule->tuples.dst_mac);
6032 		if (rule->unused_tuple & BIT(INNER_DST_MAC))
6033 			eth_zero_addr(fs->m_u.ether_spec.h_dest);
6034 		else
6035 			ether_addr_copy(fs->m_u.ether_spec.h_dest,
6036 					rule->tuples_mask.dst_mac);
6037 
6038 		fs->h_u.ether_spec.h_proto =
6039 				cpu_to_be16(rule->tuples.ether_proto);
6040 		fs->m_u.ether_spec.h_proto =
6041 				rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6042 				0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6043 
6044 		break;
6045 	default:
6046 		spin_unlock_bh(&hdev->fd_rule_lock);
6047 		return -EOPNOTSUPP;
6048 	}
6049 
6050 	if (fs->flow_type & FLOW_EXT) {
6051 		fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6052 		fs->m_ext.vlan_tci =
6053 				rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6054 				cpu_to_be16(VLAN_VID_MASK) :
6055 				cpu_to_be16(rule->tuples_mask.vlan_tag1);
6056 	}
6057 
6058 	if (fs->flow_type & FLOW_MAC_EXT) {
6059 		ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6060 		if (rule->unused_tuple & BIT(INNER_DST_MAC))
6061 			eth_zero_addr(fs->m_u.ether_spec.h_dest);
6062 		else
6063 			ether_addr_copy(fs->m_u.ether_spec.h_dest,
6064 					rule->tuples_mask.dst_mac);
6065 	}
6066 
6067 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6068 		fs->ring_cookie = RX_CLS_FLOW_DISC;
6069 	} else {
6070 		u64 vf_id;
6071 
6072 		fs->ring_cookie = rule->queue_id;
6073 		vf_id = rule->vf_id;
6074 		vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6075 		fs->ring_cookie |= vf_id;
6076 	}
6077 
6078 	spin_unlock_bh(&hdev->fd_rule_lock);
6079 
6080 	return 0;
6081 }
6082 
6083 static int hclge_get_all_rules(struct hnae3_handle *handle,
6084 			       struct ethtool_rxnfc *cmd, u32 *rule_locs)
6085 {
6086 	struct hclge_vport *vport = hclge_get_vport(handle);
6087 	struct hclge_dev *hdev = vport->back;
6088 	struct hclge_fd_rule *rule;
6089 	struct hlist_node *node2;
6090 	int cnt = 0;
6091 
6092 	if (!hnae3_dev_fd_supported(hdev))
6093 		return -EOPNOTSUPP;
6094 
6095 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6096 
6097 	spin_lock_bh(&hdev->fd_rule_lock);
6098 	hlist_for_each_entry_safe(rule, node2,
6099 				  &hdev->fd_rule_list, rule_node) {
6100 		if (cnt == cmd->rule_cnt) {
6101 			spin_unlock_bh(&hdev->fd_rule_lock);
6102 			return -EMSGSIZE;
6103 		}
6104 
6105 		rule_locs[cnt] = rule->location;
6106 		cnt++;
6107 	}
6108 
6109 	spin_unlock_bh(&hdev->fd_rule_lock);
6110 
6111 	cmd->rule_cnt = cnt;
6112 
6113 	return 0;
6114 }
6115 
6116 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6117 				     struct hclge_fd_rule_tuples *tuples)
6118 {
6119 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
6120 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
6121 
6122 	tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6123 	tuples->ip_proto = fkeys->basic.ip_proto;
6124 	tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6125 
6126 	if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6127 		tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6128 		tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6129 	} else {
6130 		int i;
6131 
6132 		for (i = 0; i < IPV6_SIZE; i++) {
6133 			tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
6134 			tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
6135 		}
6136 	}
6137 }
6138 
6139 /* traverse all rules, check whether an existed rule has the same tuples */
6140 static struct hclge_fd_rule *
6141 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6142 			  const struct hclge_fd_rule_tuples *tuples)
6143 {
6144 	struct hclge_fd_rule *rule = NULL;
6145 	struct hlist_node *node;
6146 
6147 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6148 		if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6149 			return rule;
6150 	}
6151 
6152 	return NULL;
6153 }
6154 
6155 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6156 				     struct hclge_fd_rule *rule)
6157 {
6158 	rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6159 			     BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6160 			     BIT(INNER_SRC_PORT);
6161 	rule->action = 0;
6162 	rule->vf_id = 0;
6163 	rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6164 	if (tuples->ether_proto == ETH_P_IP) {
6165 		if (tuples->ip_proto == IPPROTO_TCP)
6166 			rule->flow_type = TCP_V4_FLOW;
6167 		else
6168 			rule->flow_type = UDP_V4_FLOW;
6169 	} else {
6170 		if (tuples->ip_proto == IPPROTO_TCP)
6171 			rule->flow_type = TCP_V6_FLOW;
6172 		else
6173 			rule->flow_type = UDP_V6_FLOW;
6174 	}
6175 	memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6176 	memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6177 }
6178 
6179 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6180 				      u16 flow_id, struct flow_keys *fkeys)
6181 {
6182 	struct hclge_vport *vport = hclge_get_vport(handle);
6183 	struct hclge_fd_rule_tuples new_tuples;
6184 	struct hclge_dev *hdev = vport->back;
6185 	struct hclge_fd_rule *rule;
6186 	u16 tmp_queue_id;
6187 	u16 bit_id;
6188 	int ret;
6189 
6190 	if (!hnae3_dev_fd_supported(hdev))
6191 		return -EOPNOTSUPP;
6192 
6193 	memset(&new_tuples, 0, sizeof(new_tuples));
6194 	hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6195 
6196 	spin_lock_bh(&hdev->fd_rule_lock);
6197 
6198 	/* when there is already fd rule existed add by user,
6199 	 * arfs should not work
6200 	 */
6201 	if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6202 		spin_unlock_bh(&hdev->fd_rule_lock);
6203 
6204 		return -EOPNOTSUPP;
6205 	}
6206 
6207 	/* check is there flow director filter existed for this flow,
6208 	 * if not, create a new filter for it;
6209 	 * if filter exist with different queue id, modify the filter;
6210 	 * if filter exist with same queue id, do nothing
6211 	 */
6212 	rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6213 	if (!rule) {
6214 		bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6215 		if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6216 			spin_unlock_bh(&hdev->fd_rule_lock);
6217 
6218 			return -ENOSPC;
6219 		}
6220 
6221 		rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6222 		if (!rule) {
6223 			spin_unlock_bh(&hdev->fd_rule_lock);
6224 
6225 			return -ENOMEM;
6226 		}
6227 
6228 		set_bit(bit_id, hdev->fd_bmap);
6229 		rule->location = bit_id;
6230 		rule->flow_id = flow_id;
6231 		rule->queue_id = queue_id;
6232 		hclge_fd_build_arfs_rule(&new_tuples, rule);
6233 		ret = hclge_fd_config_rule(hdev, rule);
6234 
6235 		spin_unlock_bh(&hdev->fd_rule_lock);
6236 
6237 		if (ret)
6238 			return ret;
6239 
6240 		return rule->location;
6241 	}
6242 
6243 	spin_unlock_bh(&hdev->fd_rule_lock);
6244 
6245 	if (rule->queue_id == queue_id)
6246 		return rule->location;
6247 
6248 	tmp_queue_id = rule->queue_id;
6249 	rule->queue_id = queue_id;
6250 	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6251 	if (ret) {
6252 		rule->queue_id = tmp_queue_id;
6253 		return ret;
6254 	}
6255 
6256 	return rule->location;
6257 }
6258 
6259 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6260 {
6261 #ifdef CONFIG_RFS_ACCEL
6262 	struct hnae3_handle *handle = &hdev->vport[0].nic;
6263 	struct hclge_fd_rule *rule;
6264 	struct hlist_node *node;
6265 	HLIST_HEAD(del_list);
6266 
6267 	spin_lock_bh(&hdev->fd_rule_lock);
6268 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6269 		spin_unlock_bh(&hdev->fd_rule_lock);
6270 		return;
6271 	}
6272 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6273 		if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6274 					rule->flow_id, rule->location)) {
6275 			hlist_del_init(&rule->rule_node);
6276 			hlist_add_head(&rule->rule_node, &del_list);
6277 			hdev->hclge_fd_rule_num--;
6278 			clear_bit(rule->location, hdev->fd_bmap);
6279 		}
6280 	}
6281 	spin_unlock_bh(&hdev->fd_rule_lock);
6282 
6283 	hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6284 		hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6285 				     rule->location, NULL, false);
6286 		kfree(rule);
6287 	}
6288 #endif
6289 }
6290 
6291 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6292 {
6293 #ifdef CONFIG_RFS_ACCEL
6294 	struct hclge_vport *vport = hclge_get_vport(handle);
6295 	struct hclge_dev *hdev = vport->back;
6296 
6297 	if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6298 		hclge_del_all_fd_entries(handle, true);
6299 #endif
6300 }
6301 
6302 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6303 {
6304 	struct hclge_vport *vport = hclge_get_vport(handle);
6305 	struct hclge_dev *hdev = vport->back;
6306 
6307 	return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6308 	       hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6309 }
6310 
6311 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6312 {
6313 	struct hclge_vport *vport = hclge_get_vport(handle);
6314 	struct hclge_dev *hdev = vport->back;
6315 
6316 	return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6317 }
6318 
6319 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6320 {
6321 	struct hclge_vport *vport = hclge_get_vport(handle);
6322 	struct hclge_dev *hdev = vport->back;
6323 
6324 	return hdev->rst_stats.hw_reset_done_cnt;
6325 }
6326 
6327 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6328 {
6329 	struct hclge_vport *vport = hclge_get_vport(handle);
6330 	struct hclge_dev *hdev = vport->back;
6331 	bool clear;
6332 
6333 	hdev->fd_en = enable;
6334 	clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
6335 	if (!enable)
6336 		hclge_del_all_fd_entries(handle, clear);
6337 	else
6338 		hclge_restore_fd_entries(handle);
6339 }
6340 
6341 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6342 {
6343 	struct hclge_desc desc;
6344 	struct hclge_config_mac_mode_cmd *req =
6345 		(struct hclge_config_mac_mode_cmd *)desc.data;
6346 	u32 loop_en = 0;
6347 	int ret;
6348 
6349 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
6350 
6351 	if (enable) {
6352 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6353 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6354 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6355 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6356 		hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6357 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6358 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6359 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6360 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6361 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6362 	}
6363 
6364 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6365 
6366 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6367 	if (ret)
6368 		dev_err(&hdev->pdev->dev,
6369 			"mac enable fail, ret =%d.\n", ret);
6370 }
6371 
6372 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
6373 				     u8 switch_param, u8 param_mask)
6374 {
6375 	struct hclge_mac_vlan_switch_cmd *req;
6376 	struct hclge_desc desc;
6377 	u32 func_id;
6378 	int ret;
6379 
6380 	func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
6381 	req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
6382 
6383 	/* read current config parameter */
6384 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
6385 				   true);
6386 	req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
6387 	req->func_id = cpu_to_le32(func_id);
6388 
6389 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6390 	if (ret) {
6391 		dev_err(&hdev->pdev->dev,
6392 			"read mac vlan switch parameter fail, ret = %d\n", ret);
6393 		return ret;
6394 	}
6395 
6396 	/* modify and write new config parameter */
6397 	hclge_cmd_reuse_desc(&desc, false);
6398 	req->switch_param = (req->switch_param & param_mask) | switch_param;
6399 	req->param_mask = param_mask;
6400 
6401 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6402 	if (ret)
6403 		dev_err(&hdev->pdev->dev,
6404 			"set mac vlan switch parameter fail, ret = %d\n", ret);
6405 	return ret;
6406 }
6407 
6408 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
6409 				       int link_ret)
6410 {
6411 #define HCLGE_PHY_LINK_STATUS_NUM  200
6412 
6413 	struct phy_device *phydev = hdev->hw.mac.phydev;
6414 	int i = 0;
6415 	int ret;
6416 
6417 	do {
6418 		ret = phy_read_status(phydev);
6419 		if (ret) {
6420 			dev_err(&hdev->pdev->dev,
6421 				"phy update link status fail, ret = %d\n", ret);
6422 			return;
6423 		}
6424 
6425 		if (phydev->link == link_ret)
6426 			break;
6427 
6428 		msleep(HCLGE_LINK_STATUS_MS);
6429 	} while (++i < HCLGE_PHY_LINK_STATUS_NUM);
6430 }
6431 
6432 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
6433 {
6434 #define HCLGE_MAC_LINK_STATUS_NUM  100
6435 
6436 	int i = 0;
6437 	int ret;
6438 
6439 	do {
6440 		ret = hclge_get_mac_link_status(hdev);
6441 		if (ret < 0)
6442 			return ret;
6443 		else if (ret == link_ret)
6444 			return 0;
6445 
6446 		msleep(HCLGE_LINK_STATUS_MS);
6447 	} while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6448 	return -EBUSY;
6449 }
6450 
6451 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
6452 					  bool is_phy)
6453 {
6454 #define HCLGE_LINK_STATUS_DOWN 0
6455 #define HCLGE_LINK_STATUS_UP   1
6456 
6457 	int link_ret;
6458 
6459 	link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
6460 
6461 	if (is_phy)
6462 		hclge_phy_link_status_wait(hdev, link_ret);
6463 
6464 	return hclge_mac_link_status_wait(hdev, link_ret);
6465 }
6466 
6467 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
6468 {
6469 	struct hclge_config_mac_mode_cmd *req;
6470 	struct hclge_desc desc;
6471 	u32 loop_en;
6472 	int ret;
6473 
6474 	req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
6475 	/* 1 Read out the MAC mode config at first */
6476 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
6477 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6478 	if (ret) {
6479 		dev_err(&hdev->pdev->dev,
6480 			"mac loopback get fail, ret =%d.\n", ret);
6481 		return ret;
6482 	}
6483 
6484 	/* 2 Then setup the loopback flag */
6485 	loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
6486 	hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
6487 	hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
6488 	hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
6489 
6490 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6491 
6492 	/* 3 Config mac work mode with loopback flag
6493 	 * and its original configure parameters
6494 	 */
6495 	hclge_cmd_reuse_desc(&desc, false);
6496 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6497 	if (ret)
6498 		dev_err(&hdev->pdev->dev,
6499 			"mac loopback set fail, ret =%d.\n", ret);
6500 	return ret;
6501 }
6502 
6503 static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
6504 				     enum hnae3_loop loop_mode)
6505 {
6506 #define HCLGE_SERDES_RETRY_MS	10
6507 #define HCLGE_SERDES_RETRY_NUM	100
6508 
6509 	struct hclge_serdes_lb_cmd *req;
6510 	struct hclge_desc desc;
6511 	int ret, i = 0;
6512 	u8 loop_mode_b;
6513 
6514 	req = (struct hclge_serdes_lb_cmd *)desc.data;
6515 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
6516 
6517 	switch (loop_mode) {
6518 	case HNAE3_LOOP_SERIAL_SERDES:
6519 		loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6520 		break;
6521 	case HNAE3_LOOP_PARALLEL_SERDES:
6522 		loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6523 		break;
6524 	default:
6525 		dev_err(&hdev->pdev->dev,
6526 			"unsupported serdes loopback mode %d\n", loop_mode);
6527 		return -ENOTSUPP;
6528 	}
6529 
6530 	if (en) {
6531 		req->enable = loop_mode_b;
6532 		req->mask = loop_mode_b;
6533 	} else {
6534 		req->mask = loop_mode_b;
6535 	}
6536 
6537 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6538 	if (ret) {
6539 		dev_err(&hdev->pdev->dev,
6540 			"serdes loopback set fail, ret = %d\n", ret);
6541 		return ret;
6542 	}
6543 
6544 	do {
6545 		msleep(HCLGE_SERDES_RETRY_MS);
6546 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
6547 					   true);
6548 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6549 		if (ret) {
6550 			dev_err(&hdev->pdev->dev,
6551 				"serdes loopback get, ret = %d\n", ret);
6552 			return ret;
6553 		}
6554 	} while (++i < HCLGE_SERDES_RETRY_NUM &&
6555 		 !(req->result & HCLGE_CMD_SERDES_DONE_B));
6556 
6557 	if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
6558 		dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
6559 		return -EBUSY;
6560 	} else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
6561 		dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
6562 		return -EIO;
6563 	}
6564 	return ret;
6565 }
6566 
6567 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
6568 				     enum hnae3_loop loop_mode)
6569 {
6570 	int ret;
6571 
6572 	ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
6573 	if (ret)
6574 		return ret;
6575 
6576 	hclge_cfg_mac_mode(hdev, en);
6577 
6578 	ret = hclge_mac_phy_link_status_wait(hdev, en, false);
6579 	if (ret)
6580 		dev_err(&hdev->pdev->dev,
6581 			"serdes loopback config mac mode timeout\n");
6582 
6583 	return ret;
6584 }
6585 
6586 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
6587 				     struct phy_device *phydev)
6588 {
6589 	int ret;
6590 
6591 	if (!phydev->suspended) {
6592 		ret = phy_suspend(phydev);
6593 		if (ret)
6594 			return ret;
6595 	}
6596 
6597 	ret = phy_resume(phydev);
6598 	if (ret)
6599 		return ret;
6600 
6601 	return phy_loopback(phydev, true);
6602 }
6603 
6604 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
6605 				      struct phy_device *phydev)
6606 {
6607 	int ret;
6608 
6609 	ret = phy_loopback(phydev, false);
6610 	if (ret)
6611 		return ret;
6612 
6613 	return phy_suspend(phydev);
6614 }
6615 
6616 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
6617 {
6618 	struct phy_device *phydev = hdev->hw.mac.phydev;
6619 	int ret;
6620 
6621 	if (!phydev)
6622 		return -ENOTSUPP;
6623 
6624 	if (en)
6625 		ret = hclge_enable_phy_loopback(hdev, phydev);
6626 	else
6627 		ret = hclge_disable_phy_loopback(hdev, phydev);
6628 	if (ret) {
6629 		dev_err(&hdev->pdev->dev,
6630 			"set phy loopback fail, ret = %d\n", ret);
6631 		return ret;
6632 	}
6633 
6634 	hclge_cfg_mac_mode(hdev, en);
6635 
6636 	ret = hclge_mac_phy_link_status_wait(hdev, en, true);
6637 	if (ret)
6638 		dev_err(&hdev->pdev->dev,
6639 			"phy loopback config mac mode timeout\n");
6640 
6641 	return ret;
6642 }
6643 
6644 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
6645 			    int stream_id, bool enable)
6646 {
6647 	struct hclge_desc desc;
6648 	struct hclge_cfg_com_tqp_queue_cmd *req =
6649 		(struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6650 	int ret;
6651 
6652 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6653 	req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6654 	req->stream_id = cpu_to_le16(stream_id);
6655 	if (enable)
6656 		req->enable |= 1U << HCLGE_TQP_ENABLE_B;
6657 
6658 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6659 	if (ret)
6660 		dev_err(&hdev->pdev->dev,
6661 			"Tqp enable fail, status =%d.\n", ret);
6662 	return ret;
6663 }
6664 
6665 static int hclge_set_loopback(struct hnae3_handle *handle,
6666 			      enum hnae3_loop loop_mode, bool en)
6667 {
6668 	struct hclge_vport *vport = hclge_get_vport(handle);
6669 	struct hnae3_knic_private_info *kinfo;
6670 	struct hclge_dev *hdev = vport->back;
6671 	int i, ret;
6672 
6673 	/* Loopback can be enabled in three places: SSU, MAC, and serdes. By
6674 	 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
6675 	 * the same, the packets are looped back in the SSU. If SSU loopback
6676 	 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
6677 	 */
6678 	if (hdev->pdev->revision >= 0x21) {
6679 		u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
6680 
6681 		ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
6682 						HCLGE_SWITCH_ALW_LPBK_MASK);
6683 		if (ret)
6684 			return ret;
6685 	}
6686 
6687 	switch (loop_mode) {
6688 	case HNAE3_LOOP_APP:
6689 		ret = hclge_set_app_loopback(hdev, en);
6690 		break;
6691 	case HNAE3_LOOP_SERIAL_SERDES:
6692 	case HNAE3_LOOP_PARALLEL_SERDES:
6693 		ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6694 		break;
6695 	case HNAE3_LOOP_PHY:
6696 		ret = hclge_set_phy_loopback(hdev, en);
6697 		break;
6698 	default:
6699 		ret = -ENOTSUPP;
6700 		dev_err(&hdev->pdev->dev,
6701 			"loop_mode %d is not supported\n", loop_mode);
6702 		break;
6703 	}
6704 
6705 	if (ret)
6706 		return ret;
6707 
6708 	kinfo = &vport->nic.kinfo;
6709 	for (i = 0; i < kinfo->num_tqps; i++) {
6710 		ret = hclge_tqp_enable(hdev, i, 0, en);
6711 		if (ret)
6712 			return ret;
6713 	}
6714 
6715 	return 0;
6716 }
6717 
6718 static int hclge_set_default_loopback(struct hclge_dev *hdev)
6719 {
6720 	int ret;
6721 
6722 	ret = hclge_set_app_loopback(hdev, false);
6723 	if (ret)
6724 		return ret;
6725 
6726 	ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
6727 	if (ret)
6728 		return ret;
6729 
6730 	return hclge_cfg_serdes_loopback(hdev, false,
6731 					 HNAE3_LOOP_PARALLEL_SERDES);
6732 }
6733 
6734 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6735 {
6736 	struct hclge_vport *vport = hclge_get_vport(handle);
6737 	struct hnae3_knic_private_info *kinfo;
6738 	struct hnae3_queue *queue;
6739 	struct hclge_tqp *tqp;
6740 	int i;
6741 
6742 	kinfo = &vport->nic.kinfo;
6743 	for (i = 0; i < kinfo->num_tqps; i++) {
6744 		queue = handle->kinfo.tqp[i];
6745 		tqp = container_of(queue, struct hclge_tqp, q);
6746 		memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6747 	}
6748 }
6749 
6750 static void hclge_flush_link_update(struct hclge_dev *hdev)
6751 {
6752 #define HCLGE_FLUSH_LINK_TIMEOUT	100000
6753 
6754 	unsigned long last = hdev->serv_processed_cnt;
6755 	int i = 0;
6756 
6757 	while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
6758 	       i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
6759 	       last == hdev->serv_processed_cnt)
6760 		usleep_range(1, 1);
6761 }
6762 
6763 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6764 {
6765 	struct hclge_vport *vport = hclge_get_vport(handle);
6766 	struct hclge_dev *hdev = vport->back;
6767 
6768 	if (enable) {
6769 		hclge_task_schedule(hdev, round_jiffies_relative(HZ));
6770 	} else {
6771 		/* Set the DOWN flag here to disable link updating */
6772 		set_bit(HCLGE_STATE_DOWN, &hdev->state);
6773 
6774 		/* flush memory to make sure DOWN is seen by service task */
6775 		smp_mb__before_atomic();
6776 		hclge_flush_link_update(hdev);
6777 	}
6778 }
6779 
6780 static int hclge_ae_start(struct hnae3_handle *handle)
6781 {
6782 	struct hclge_vport *vport = hclge_get_vport(handle);
6783 	struct hclge_dev *hdev = vport->back;
6784 
6785 	/* mac enable */
6786 	hclge_cfg_mac_mode(hdev, true);
6787 	clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6788 	hdev->hw.mac.link = 0;
6789 
6790 	/* reset tqp stats */
6791 	hclge_reset_tqp_stats(handle);
6792 
6793 	hclge_mac_start_phy(hdev);
6794 
6795 	return 0;
6796 }
6797 
6798 static void hclge_ae_stop(struct hnae3_handle *handle)
6799 {
6800 	struct hclge_vport *vport = hclge_get_vport(handle);
6801 	struct hclge_dev *hdev = vport->back;
6802 	int i;
6803 
6804 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
6805 
6806 	hclge_clear_arfs_rules(handle);
6807 
6808 	/* If it is not PF reset, the firmware will disable the MAC,
6809 	 * so it only need to stop phy here.
6810 	 */
6811 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6812 	    hdev->reset_type != HNAE3_FUNC_RESET) {
6813 		hclge_mac_stop_phy(hdev);
6814 		hclge_update_link_status(hdev);
6815 		return;
6816 	}
6817 
6818 	for (i = 0; i < handle->kinfo.num_tqps; i++)
6819 		hclge_reset_tqp(handle, i);
6820 
6821 	hclge_config_mac_tnl_int(hdev, false);
6822 
6823 	/* Mac disable */
6824 	hclge_cfg_mac_mode(hdev, false);
6825 
6826 	hclge_mac_stop_phy(hdev);
6827 
6828 	/* reset tqp stats */
6829 	hclge_reset_tqp_stats(handle);
6830 	hclge_update_link_status(hdev);
6831 }
6832 
6833 int hclge_vport_start(struct hclge_vport *vport)
6834 {
6835 	set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6836 	vport->last_active_jiffies = jiffies;
6837 	return 0;
6838 }
6839 
6840 void hclge_vport_stop(struct hclge_vport *vport)
6841 {
6842 	clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6843 }
6844 
6845 static int hclge_client_start(struct hnae3_handle *handle)
6846 {
6847 	struct hclge_vport *vport = hclge_get_vport(handle);
6848 
6849 	return hclge_vport_start(vport);
6850 }
6851 
6852 static void hclge_client_stop(struct hnae3_handle *handle)
6853 {
6854 	struct hclge_vport *vport = hclge_get_vport(handle);
6855 
6856 	hclge_vport_stop(vport);
6857 }
6858 
6859 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6860 					 u16 cmdq_resp, u8  resp_code,
6861 					 enum hclge_mac_vlan_tbl_opcode op)
6862 {
6863 	struct hclge_dev *hdev = vport->back;
6864 
6865 	if (cmdq_resp) {
6866 		dev_err(&hdev->pdev->dev,
6867 			"cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
6868 			cmdq_resp);
6869 		return -EIO;
6870 	}
6871 
6872 	if (op == HCLGE_MAC_VLAN_ADD) {
6873 		if ((!resp_code) || (resp_code == 1)) {
6874 			return 0;
6875 		} else if (resp_code == HCLGE_ADD_UC_OVERFLOW) {
6876 			dev_err(&hdev->pdev->dev,
6877 				"add mac addr failed for uc_overflow.\n");
6878 			return -ENOSPC;
6879 		} else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
6880 			dev_err(&hdev->pdev->dev,
6881 				"add mac addr failed for mc_overflow.\n");
6882 			return -ENOSPC;
6883 		}
6884 
6885 		dev_err(&hdev->pdev->dev,
6886 			"add mac addr failed for undefined, code=%u.\n",
6887 			resp_code);
6888 		return -EIO;
6889 	} else if (op == HCLGE_MAC_VLAN_REMOVE) {
6890 		if (!resp_code) {
6891 			return 0;
6892 		} else if (resp_code == 1) {
6893 			dev_dbg(&hdev->pdev->dev,
6894 				"remove mac addr failed for miss.\n");
6895 			return -ENOENT;
6896 		}
6897 
6898 		dev_err(&hdev->pdev->dev,
6899 			"remove mac addr failed for undefined, code=%u.\n",
6900 			resp_code);
6901 		return -EIO;
6902 	} else if (op == HCLGE_MAC_VLAN_LKUP) {
6903 		if (!resp_code) {
6904 			return 0;
6905 		} else if (resp_code == 1) {
6906 			dev_dbg(&hdev->pdev->dev,
6907 				"lookup mac addr failed for miss.\n");
6908 			return -ENOENT;
6909 		}
6910 
6911 		dev_err(&hdev->pdev->dev,
6912 			"lookup mac addr failed for undefined, code=%u.\n",
6913 			resp_code);
6914 		return -EIO;
6915 	}
6916 
6917 	dev_err(&hdev->pdev->dev,
6918 		"unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
6919 
6920 	return -EINVAL;
6921 }
6922 
6923 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6924 {
6925 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
6926 
6927 	unsigned int word_num;
6928 	unsigned int bit_num;
6929 
6930 	if (vfid > 255 || vfid < 0)
6931 		return -EIO;
6932 
6933 	if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
6934 		word_num = vfid / 32;
6935 		bit_num  = vfid % 32;
6936 		if (clr)
6937 			desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6938 		else
6939 			desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6940 	} else {
6941 		word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
6942 		bit_num  = vfid % 32;
6943 		if (clr)
6944 			desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6945 		else
6946 			desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6947 	}
6948 
6949 	return 0;
6950 }
6951 
6952 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6953 {
6954 #define HCLGE_DESC_NUMBER 3
6955 #define HCLGE_FUNC_NUMBER_PER_DESC 6
6956 	int i, j;
6957 
6958 	for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6959 		for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6960 			if (desc[i].data[j])
6961 				return false;
6962 
6963 	return true;
6964 }
6965 
6966 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
6967 				   const u8 *addr, bool is_mc)
6968 {
6969 	const unsigned char *mac_addr = addr;
6970 	u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
6971 		       (mac_addr[0]) | (mac_addr[1] << 8);
6972 	u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
6973 
6974 	hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6975 	if (is_mc) {
6976 		hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
6977 		hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6978 	}
6979 
6980 	new_req->mac_addr_hi32 = cpu_to_le32(high_val);
6981 	new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
6982 }
6983 
6984 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
6985 				     struct hclge_mac_vlan_tbl_entry_cmd *req)
6986 {
6987 	struct hclge_dev *hdev = vport->back;
6988 	struct hclge_desc desc;
6989 	u8 resp_code;
6990 	u16 retval;
6991 	int ret;
6992 
6993 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
6994 
6995 	memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6996 
6997 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6998 	if (ret) {
6999 		dev_err(&hdev->pdev->dev,
7000 			"del mac addr failed for cmd_send, ret =%d.\n",
7001 			ret);
7002 		return ret;
7003 	}
7004 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7005 	retval = le16_to_cpu(desc.retval);
7006 
7007 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7008 					     HCLGE_MAC_VLAN_REMOVE);
7009 }
7010 
7011 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
7012 				     struct hclge_mac_vlan_tbl_entry_cmd *req,
7013 				     struct hclge_desc *desc,
7014 				     bool is_mc)
7015 {
7016 	struct hclge_dev *hdev = vport->back;
7017 	u8 resp_code;
7018 	u16 retval;
7019 	int ret;
7020 
7021 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
7022 	if (is_mc) {
7023 		desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7024 		memcpy(desc[0].data,
7025 		       req,
7026 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7027 		hclge_cmd_setup_basic_desc(&desc[1],
7028 					   HCLGE_OPC_MAC_VLAN_ADD,
7029 					   true);
7030 		desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7031 		hclge_cmd_setup_basic_desc(&desc[2],
7032 					   HCLGE_OPC_MAC_VLAN_ADD,
7033 					   true);
7034 		ret = hclge_cmd_send(&hdev->hw, desc, 3);
7035 	} else {
7036 		memcpy(desc[0].data,
7037 		       req,
7038 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7039 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
7040 	}
7041 	if (ret) {
7042 		dev_err(&hdev->pdev->dev,
7043 			"lookup mac addr failed for cmd_send, ret =%d.\n",
7044 			ret);
7045 		return ret;
7046 	}
7047 	resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
7048 	retval = le16_to_cpu(desc[0].retval);
7049 
7050 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7051 					     HCLGE_MAC_VLAN_LKUP);
7052 }
7053 
7054 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
7055 				  struct hclge_mac_vlan_tbl_entry_cmd *req,
7056 				  struct hclge_desc *mc_desc)
7057 {
7058 	struct hclge_dev *hdev = vport->back;
7059 	int cfg_status;
7060 	u8 resp_code;
7061 	u16 retval;
7062 	int ret;
7063 
7064 	if (!mc_desc) {
7065 		struct hclge_desc desc;
7066 
7067 		hclge_cmd_setup_basic_desc(&desc,
7068 					   HCLGE_OPC_MAC_VLAN_ADD,
7069 					   false);
7070 		memcpy(desc.data, req,
7071 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7072 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7073 		resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7074 		retval = le16_to_cpu(desc.retval);
7075 
7076 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7077 							   resp_code,
7078 							   HCLGE_MAC_VLAN_ADD);
7079 	} else {
7080 		hclge_cmd_reuse_desc(&mc_desc[0], false);
7081 		mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7082 		hclge_cmd_reuse_desc(&mc_desc[1], false);
7083 		mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7084 		hclge_cmd_reuse_desc(&mc_desc[2], false);
7085 		mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
7086 		memcpy(mc_desc[0].data, req,
7087 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7088 		ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
7089 		resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
7090 		retval = le16_to_cpu(mc_desc[0].retval);
7091 
7092 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7093 							   resp_code,
7094 							   HCLGE_MAC_VLAN_ADD);
7095 	}
7096 
7097 	if (ret) {
7098 		dev_err(&hdev->pdev->dev,
7099 			"add mac addr failed for cmd_send, ret =%d.\n",
7100 			ret);
7101 		return ret;
7102 	}
7103 
7104 	return cfg_status;
7105 }
7106 
7107 static int hclge_init_umv_space(struct hclge_dev *hdev)
7108 {
7109 	u16 allocated_size = 0;
7110 	int ret;
7111 
7112 	ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
7113 				  true);
7114 	if (ret)
7115 		return ret;
7116 
7117 	if (allocated_size < hdev->wanted_umv_size)
7118 		dev_warn(&hdev->pdev->dev,
7119 			 "Alloc umv space failed, want %u, get %u\n",
7120 			 hdev->wanted_umv_size, allocated_size);
7121 
7122 	mutex_init(&hdev->umv_mutex);
7123 	hdev->max_umv_size = allocated_size;
7124 	/* divide max_umv_size by (hdev->num_req_vfs + 2), in order to
7125 	 * preserve some unicast mac vlan table entries shared by pf
7126 	 * and its vfs.
7127 	 */
7128 	hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
7129 	hdev->share_umv_size = hdev->priv_umv_size +
7130 			hdev->max_umv_size % (hdev->num_req_vfs + 2);
7131 
7132 	return 0;
7133 }
7134 
7135 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
7136 {
7137 	int ret;
7138 
7139 	if (hdev->max_umv_size > 0) {
7140 		ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
7141 					  false);
7142 		if (ret)
7143 			return ret;
7144 		hdev->max_umv_size = 0;
7145 	}
7146 	mutex_destroy(&hdev->umv_mutex);
7147 
7148 	return 0;
7149 }
7150 
7151 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
7152 			       u16 *allocated_size, bool is_alloc)
7153 {
7154 	struct hclge_umv_spc_alc_cmd *req;
7155 	struct hclge_desc desc;
7156 	int ret;
7157 
7158 	req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7159 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
7160 	if (!is_alloc)
7161 		hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, 1);
7162 
7163 	req->space_size = cpu_to_le32(space_size);
7164 
7165 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7166 	if (ret) {
7167 		dev_err(&hdev->pdev->dev,
7168 			"%s umv space failed for cmd_send, ret =%d\n",
7169 			is_alloc ? "allocate" : "free", ret);
7170 		return ret;
7171 	}
7172 
7173 	if (is_alloc && allocated_size)
7174 		*allocated_size = le32_to_cpu(desc.data[1]);
7175 
7176 	return 0;
7177 }
7178 
7179 static void hclge_reset_umv_space(struct hclge_dev *hdev)
7180 {
7181 	struct hclge_vport *vport;
7182 	int i;
7183 
7184 	for (i = 0; i < hdev->num_alloc_vport; i++) {
7185 		vport = &hdev->vport[i];
7186 		vport->used_umv_num = 0;
7187 	}
7188 
7189 	mutex_lock(&hdev->umv_mutex);
7190 	hdev->share_umv_size = hdev->priv_umv_size +
7191 			hdev->max_umv_size % (hdev->num_req_vfs + 2);
7192 	mutex_unlock(&hdev->umv_mutex);
7193 }
7194 
7195 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
7196 {
7197 	struct hclge_dev *hdev = vport->back;
7198 	bool is_full;
7199 
7200 	mutex_lock(&hdev->umv_mutex);
7201 	is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7202 		   hdev->share_umv_size == 0);
7203 	mutex_unlock(&hdev->umv_mutex);
7204 
7205 	return is_full;
7206 }
7207 
7208 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7209 {
7210 	struct hclge_dev *hdev = vport->back;
7211 
7212 	mutex_lock(&hdev->umv_mutex);
7213 	if (is_free) {
7214 		if (vport->used_umv_num > hdev->priv_umv_size)
7215 			hdev->share_umv_size++;
7216 
7217 		if (vport->used_umv_num > 0)
7218 			vport->used_umv_num--;
7219 	} else {
7220 		if (vport->used_umv_num >= hdev->priv_umv_size &&
7221 		    hdev->share_umv_size > 0)
7222 			hdev->share_umv_size--;
7223 		vport->used_umv_num++;
7224 	}
7225 	mutex_unlock(&hdev->umv_mutex);
7226 }
7227 
7228 static int hclge_add_uc_addr(struct hnae3_handle *handle,
7229 			     const unsigned char *addr)
7230 {
7231 	struct hclge_vport *vport = hclge_get_vport(handle);
7232 
7233 	return hclge_add_uc_addr_common(vport, addr);
7234 }
7235 
7236 int hclge_add_uc_addr_common(struct hclge_vport *vport,
7237 			     const unsigned char *addr)
7238 {
7239 	struct hclge_dev *hdev = vport->back;
7240 	struct hclge_mac_vlan_tbl_entry_cmd req;
7241 	struct hclge_desc desc;
7242 	u16 egress_port = 0;
7243 	int ret;
7244 
7245 	/* mac addr check */
7246 	if (is_zero_ether_addr(addr) ||
7247 	    is_broadcast_ether_addr(addr) ||
7248 	    is_multicast_ether_addr(addr)) {
7249 		dev_err(&hdev->pdev->dev,
7250 			"Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
7251 			 addr, is_zero_ether_addr(addr),
7252 			 is_broadcast_ether_addr(addr),
7253 			 is_multicast_ether_addr(addr));
7254 		return -EINVAL;
7255 	}
7256 
7257 	memset(&req, 0, sizeof(req));
7258 
7259 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7260 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7261 
7262 	req.egress_port = cpu_to_le16(egress_port);
7263 
7264 	hclge_prepare_mac_addr(&req, addr, false);
7265 
7266 	/* Lookup the mac address in the mac_vlan table, and add
7267 	 * it if the entry is inexistent. Repeated unicast entry
7268 	 * is not allowed in the mac vlan table.
7269 	 */
7270 	ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
7271 	if (ret == -ENOENT) {
7272 		if (!hclge_is_umv_space_full(vport)) {
7273 			ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7274 			if (!ret)
7275 				hclge_update_umv_space(vport, false);
7276 			return ret;
7277 		}
7278 
7279 		dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7280 			hdev->priv_umv_size);
7281 
7282 		return -ENOSPC;
7283 	}
7284 
7285 	/* check if we just hit the duplicate */
7286 	if (!ret) {
7287 		dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
7288 			 vport->vport_id, addr);
7289 		return 0;
7290 	}
7291 
7292 	dev_err(&hdev->pdev->dev,
7293 		"PF failed to add unicast entry(%pM) in the MAC table\n",
7294 		addr);
7295 
7296 	return ret;
7297 }
7298 
7299 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
7300 			    const unsigned char *addr)
7301 {
7302 	struct hclge_vport *vport = hclge_get_vport(handle);
7303 
7304 	return hclge_rm_uc_addr_common(vport, addr);
7305 }
7306 
7307 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
7308 			    const unsigned char *addr)
7309 {
7310 	struct hclge_dev *hdev = vport->back;
7311 	struct hclge_mac_vlan_tbl_entry_cmd req;
7312 	int ret;
7313 
7314 	/* mac addr check */
7315 	if (is_zero_ether_addr(addr) ||
7316 	    is_broadcast_ether_addr(addr) ||
7317 	    is_multicast_ether_addr(addr)) {
7318 		dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
7319 			addr);
7320 		return -EINVAL;
7321 	}
7322 
7323 	memset(&req, 0, sizeof(req));
7324 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7325 	hclge_prepare_mac_addr(&req, addr, false);
7326 	ret = hclge_remove_mac_vlan_tbl(vport, &req);
7327 	if (!ret)
7328 		hclge_update_umv_space(vport, true);
7329 
7330 	return ret;
7331 }
7332 
7333 static int hclge_add_mc_addr(struct hnae3_handle *handle,
7334 			     const unsigned char *addr)
7335 {
7336 	struct hclge_vport *vport = hclge_get_vport(handle);
7337 
7338 	return hclge_add_mc_addr_common(vport, addr);
7339 }
7340 
7341 int hclge_add_mc_addr_common(struct hclge_vport *vport,
7342 			     const unsigned char *addr)
7343 {
7344 	struct hclge_dev *hdev = vport->back;
7345 	struct hclge_mac_vlan_tbl_entry_cmd req;
7346 	struct hclge_desc desc[3];
7347 	int status;
7348 
7349 	/* mac addr check */
7350 	if (!is_multicast_ether_addr(addr)) {
7351 		dev_err(&hdev->pdev->dev,
7352 			"Add mc mac err! invalid mac:%pM.\n",
7353 			 addr);
7354 		return -EINVAL;
7355 	}
7356 	memset(&req, 0, sizeof(req));
7357 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7358 	hclge_prepare_mac_addr(&req, addr, true);
7359 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7360 	if (status) {
7361 		/* This mac addr do not exist, add new entry for it */
7362 		memset(desc[0].data, 0, sizeof(desc[0].data));
7363 		memset(desc[1].data, 0, sizeof(desc[0].data));
7364 		memset(desc[2].data, 0, sizeof(desc[0].data));
7365 	}
7366 	status = hclge_update_desc_vfid(desc, vport->vport_id, false);
7367 	if (status)
7368 		return status;
7369 	status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7370 
7371 	if (status == -ENOSPC)
7372 		dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
7373 
7374 	return status;
7375 }
7376 
7377 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
7378 			    const unsigned char *addr)
7379 {
7380 	struct hclge_vport *vport = hclge_get_vport(handle);
7381 
7382 	return hclge_rm_mc_addr_common(vport, addr);
7383 }
7384 
7385 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
7386 			    const unsigned char *addr)
7387 {
7388 	struct hclge_dev *hdev = vport->back;
7389 	struct hclge_mac_vlan_tbl_entry_cmd req;
7390 	enum hclge_cmd_status status;
7391 	struct hclge_desc desc[3];
7392 
7393 	/* mac addr check */
7394 	if (!is_multicast_ether_addr(addr)) {
7395 		dev_dbg(&hdev->pdev->dev,
7396 			"Remove mc mac err! invalid mac:%pM.\n",
7397 			 addr);
7398 		return -EINVAL;
7399 	}
7400 
7401 	memset(&req, 0, sizeof(req));
7402 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7403 	hclge_prepare_mac_addr(&req, addr, true);
7404 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7405 	if (!status) {
7406 		/* This mac addr exist, remove this handle's VFID for it */
7407 		status = hclge_update_desc_vfid(desc, vport->vport_id, true);
7408 		if (status)
7409 			return status;
7410 
7411 		if (hclge_is_all_function_id_zero(desc))
7412 			/* All the vfid is zero, so need to delete this entry */
7413 			status = hclge_remove_mac_vlan_tbl(vport, &req);
7414 		else
7415 			/* Not all the vfid is zero, update the vfid */
7416 			status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7417 
7418 	} else {
7419 		/* Maybe this mac address is in mta table, but it cannot be
7420 		 * deleted here because an entry of mta represents an address
7421 		 * range rather than a specific address. the delete action to
7422 		 * all entries will take effect in update_mta_status called by
7423 		 * hns3_nic_set_rx_mode.
7424 		 */
7425 		status = 0;
7426 	}
7427 
7428 	return status;
7429 }
7430 
7431 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7432 			       enum HCLGE_MAC_ADDR_TYPE mac_type)
7433 {
7434 	struct hclge_vport_mac_addr_cfg *mac_cfg;
7435 	struct list_head *list;
7436 
7437 	if (!vport->vport_id)
7438 		return;
7439 
7440 	mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
7441 	if (!mac_cfg)
7442 		return;
7443 
7444 	mac_cfg->hd_tbl_status = true;
7445 	memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
7446 
7447 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7448 	       &vport->uc_mac_list : &vport->mc_mac_list;
7449 
7450 	list_add_tail(&mac_cfg->node, list);
7451 }
7452 
7453 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7454 			      bool is_write_tbl,
7455 			      enum HCLGE_MAC_ADDR_TYPE mac_type)
7456 {
7457 	struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7458 	struct list_head *list;
7459 	bool uc_flag, mc_flag;
7460 
7461 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7462 	       &vport->uc_mac_list : &vport->mc_mac_list;
7463 
7464 	uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
7465 	mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
7466 
7467 	list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7468 		if (ether_addr_equal(mac_cfg->mac_addr, mac_addr)) {
7469 			if (uc_flag && mac_cfg->hd_tbl_status)
7470 				hclge_rm_uc_addr_common(vport, mac_addr);
7471 
7472 			if (mc_flag && mac_cfg->hd_tbl_status)
7473 				hclge_rm_mc_addr_common(vport, mac_addr);
7474 
7475 			list_del(&mac_cfg->node);
7476 			kfree(mac_cfg);
7477 			break;
7478 		}
7479 	}
7480 }
7481 
7482 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
7483 				  enum HCLGE_MAC_ADDR_TYPE mac_type)
7484 {
7485 	struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7486 	struct list_head *list;
7487 
7488 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7489 	       &vport->uc_mac_list : &vport->mc_mac_list;
7490 
7491 	list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7492 		if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
7493 			hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
7494 
7495 		if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
7496 			hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
7497 
7498 		mac_cfg->hd_tbl_status = false;
7499 		if (is_del_list) {
7500 			list_del(&mac_cfg->node);
7501 			kfree(mac_cfg);
7502 		}
7503 	}
7504 }
7505 
7506 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
7507 {
7508 	struct hclge_vport_mac_addr_cfg *mac, *tmp;
7509 	struct hclge_vport *vport;
7510 	int i;
7511 
7512 	for (i = 0; i < hdev->num_alloc_vport; i++) {
7513 		vport = &hdev->vport[i];
7514 		list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
7515 			list_del(&mac->node);
7516 			kfree(mac);
7517 		}
7518 
7519 		list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
7520 			list_del(&mac->node);
7521 			kfree(mac);
7522 		}
7523 	}
7524 }
7525 
7526 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
7527 					      u16 cmdq_resp, u8 resp_code)
7528 {
7529 #define HCLGE_ETHERTYPE_SUCCESS_ADD		0
7530 #define HCLGE_ETHERTYPE_ALREADY_ADD		1
7531 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW	2
7532 #define HCLGE_ETHERTYPE_KEY_CONFLICT		3
7533 
7534 	int return_status;
7535 
7536 	if (cmdq_resp) {
7537 		dev_err(&hdev->pdev->dev,
7538 			"cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
7539 			cmdq_resp);
7540 		return -EIO;
7541 	}
7542 
7543 	switch (resp_code) {
7544 	case HCLGE_ETHERTYPE_SUCCESS_ADD:
7545 	case HCLGE_ETHERTYPE_ALREADY_ADD:
7546 		return_status = 0;
7547 		break;
7548 	case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
7549 		dev_err(&hdev->pdev->dev,
7550 			"add mac ethertype failed for manager table overflow.\n");
7551 		return_status = -EIO;
7552 		break;
7553 	case HCLGE_ETHERTYPE_KEY_CONFLICT:
7554 		dev_err(&hdev->pdev->dev,
7555 			"add mac ethertype failed for key conflict.\n");
7556 		return_status = -EIO;
7557 		break;
7558 	default:
7559 		dev_err(&hdev->pdev->dev,
7560 			"add mac ethertype failed for undefined, code=%u.\n",
7561 			resp_code);
7562 		return_status = -EIO;
7563 	}
7564 
7565 	return return_status;
7566 }
7567 
7568 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
7569 				     u8 *mac_addr)
7570 {
7571 	struct hclge_mac_vlan_tbl_entry_cmd req;
7572 	struct hclge_dev *hdev = vport->back;
7573 	struct hclge_desc desc;
7574 	u16 egress_port = 0;
7575 	int i;
7576 
7577 	if (is_zero_ether_addr(mac_addr))
7578 		return false;
7579 
7580 	memset(&req, 0, sizeof(req));
7581 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7582 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7583 	req.egress_port = cpu_to_le16(egress_port);
7584 	hclge_prepare_mac_addr(&req, mac_addr, false);
7585 
7586 	if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
7587 		return true;
7588 
7589 	vf_idx += HCLGE_VF_VPORT_START_NUM;
7590 	for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
7591 		if (i != vf_idx &&
7592 		    ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
7593 			return true;
7594 
7595 	return false;
7596 }
7597 
7598 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
7599 			    u8 *mac_addr)
7600 {
7601 	struct hclge_vport *vport = hclge_get_vport(handle);
7602 	struct hclge_dev *hdev = vport->back;
7603 
7604 	vport = hclge_get_vf_vport(hdev, vf);
7605 	if (!vport)
7606 		return -EINVAL;
7607 
7608 	if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
7609 		dev_info(&hdev->pdev->dev,
7610 			 "Specified MAC(=%pM) is same as before, no change committed!\n",
7611 			 mac_addr);
7612 		return 0;
7613 	}
7614 
7615 	if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
7616 		dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
7617 			mac_addr);
7618 		return -EEXIST;
7619 	}
7620 
7621 	ether_addr_copy(vport->vf_info.mac, mac_addr);
7622 	dev_info(&hdev->pdev->dev,
7623 		 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
7624 		 vf, mac_addr);
7625 
7626 	return hclge_inform_reset_assert_to_vf(vport);
7627 }
7628 
7629 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
7630 			     const struct hclge_mac_mgr_tbl_entry_cmd *req)
7631 {
7632 	struct hclge_desc desc;
7633 	u8 resp_code;
7634 	u16 retval;
7635 	int ret;
7636 
7637 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
7638 	memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
7639 
7640 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7641 	if (ret) {
7642 		dev_err(&hdev->pdev->dev,
7643 			"add mac ethertype failed for cmd_send, ret =%d.\n",
7644 			ret);
7645 		return ret;
7646 	}
7647 
7648 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7649 	retval = le16_to_cpu(desc.retval);
7650 
7651 	return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
7652 }
7653 
7654 static int init_mgr_tbl(struct hclge_dev *hdev)
7655 {
7656 	int ret;
7657 	int i;
7658 
7659 	for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
7660 		ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
7661 		if (ret) {
7662 			dev_err(&hdev->pdev->dev,
7663 				"add mac ethertype failed, ret =%d.\n",
7664 				ret);
7665 			return ret;
7666 		}
7667 	}
7668 
7669 	return 0;
7670 }
7671 
7672 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
7673 {
7674 	struct hclge_vport *vport = hclge_get_vport(handle);
7675 	struct hclge_dev *hdev = vport->back;
7676 
7677 	ether_addr_copy(p, hdev->hw.mac.mac_addr);
7678 }
7679 
7680 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
7681 			      bool is_first)
7682 {
7683 	const unsigned char *new_addr = (const unsigned char *)p;
7684 	struct hclge_vport *vport = hclge_get_vport(handle);
7685 	struct hclge_dev *hdev = vport->back;
7686 	int ret;
7687 
7688 	/* mac addr check */
7689 	if (is_zero_ether_addr(new_addr) ||
7690 	    is_broadcast_ether_addr(new_addr) ||
7691 	    is_multicast_ether_addr(new_addr)) {
7692 		dev_err(&hdev->pdev->dev,
7693 			"Change uc mac err! invalid mac:%pM.\n",
7694 			 new_addr);
7695 		return -EINVAL;
7696 	}
7697 
7698 	if ((!is_first || is_kdump_kernel()) &&
7699 	    hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
7700 		dev_warn(&hdev->pdev->dev,
7701 			 "remove old uc mac address fail.\n");
7702 
7703 	ret = hclge_add_uc_addr(handle, new_addr);
7704 	if (ret) {
7705 		dev_err(&hdev->pdev->dev,
7706 			"add uc mac address fail, ret =%d.\n",
7707 			ret);
7708 
7709 		if (!is_first &&
7710 		    hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
7711 			dev_err(&hdev->pdev->dev,
7712 				"restore uc mac address fail.\n");
7713 
7714 		return -EIO;
7715 	}
7716 
7717 	ret = hclge_pause_addr_cfg(hdev, new_addr);
7718 	if (ret) {
7719 		dev_err(&hdev->pdev->dev,
7720 			"configure mac pause address fail, ret =%d.\n",
7721 			ret);
7722 		return -EIO;
7723 	}
7724 
7725 	ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
7726 
7727 	return 0;
7728 }
7729 
7730 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
7731 			  int cmd)
7732 {
7733 	struct hclge_vport *vport = hclge_get_vport(handle);
7734 	struct hclge_dev *hdev = vport->back;
7735 
7736 	if (!hdev->hw.mac.phydev)
7737 		return -EOPNOTSUPP;
7738 
7739 	return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
7740 }
7741 
7742 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
7743 				      u8 fe_type, bool filter_en, u8 vf_id)
7744 {
7745 	struct hclge_vlan_filter_ctrl_cmd *req;
7746 	struct hclge_desc desc;
7747 	int ret;
7748 
7749 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
7750 
7751 	req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
7752 	req->vlan_type = vlan_type;
7753 	req->vlan_fe = filter_en ? fe_type : 0;
7754 	req->vf_id = vf_id;
7755 
7756 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7757 	if (ret)
7758 		dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
7759 			ret);
7760 
7761 	return ret;
7762 }
7763 
7764 #define HCLGE_FILTER_TYPE_VF		0
7765 #define HCLGE_FILTER_TYPE_PORT		1
7766 #define HCLGE_FILTER_FE_EGRESS_V1_B	BIT(0)
7767 #define HCLGE_FILTER_FE_NIC_INGRESS_B	BIT(0)
7768 #define HCLGE_FILTER_FE_NIC_EGRESS_B	BIT(1)
7769 #define HCLGE_FILTER_FE_ROCE_INGRESS_B	BIT(2)
7770 #define HCLGE_FILTER_FE_ROCE_EGRESS_B	BIT(3)
7771 #define HCLGE_FILTER_FE_EGRESS		(HCLGE_FILTER_FE_NIC_EGRESS_B \
7772 					| HCLGE_FILTER_FE_ROCE_EGRESS_B)
7773 #define HCLGE_FILTER_FE_INGRESS		(HCLGE_FILTER_FE_NIC_INGRESS_B \
7774 					| HCLGE_FILTER_FE_ROCE_INGRESS_B)
7775 
7776 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
7777 {
7778 	struct hclge_vport *vport = hclge_get_vport(handle);
7779 	struct hclge_dev *hdev = vport->back;
7780 
7781 	if (hdev->pdev->revision >= 0x21) {
7782 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7783 					   HCLGE_FILTER_FE_EGRESS, enable, 0);
7784 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7785 					   HCLGE_FILTER_FE_INGRESS, enable, 0);
7786 	} else {
7787 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7788 					   HCLGE_FILTER_FE_EGRESS_V1_B, enable,
7789 					   0);
7790 	}
7791 	if (enable)
7792 		handle->netdev_flags |= HNAE3_VLAN_FLTR;
7793 	else
7794 		handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7795 }
7796 
7797 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
7798 				    bool is_kill, u16 vlan,
7799 				    __be16 proto)
7800 {
7801 	struct hclge_vport *vport = &hdev->vport[vfid];
7802 	struct hclge_vlan_filter_vf_cfg_cmd *req0;
7803 	struct hclge_vlan_filter_vf_cfg_cmd *req1;
7804 	struct hclge_desc desc[2];
7805 	u8 vf_byte_val;
7806 	u8 vf_byte_off;
7807 	int ret;
7808 
7809 	/* if vf vlan table is full, firmware will close vf vlan filter, it
7810 	 * is unable and unnecessary to add new vlan id to vf vlan filter.
7811 	 * If spoof check is enable, and vf vlan is full, it shouldn't add
7812 	 * new vlan, because tx packets with these vlan id will be dropped.
7813 	 */
7814 	if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
7815 		if (vport->vf_info.spoofchk && vlan) {
7816 			dev_err(&hdev->pdev->dev,
7817 				"Can't add vlan due to spoof check is on and vf vlan table is full\n");
7818 			return -EPERM;
7819 		}
7820 		return 0;
7821 	}
7822 
7823 	hclge_cmd_setup_basic_desc(&desc[0],
7824 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7825 	hclge_cmd_setup_basic_desc(&desc[1],
7826 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7827 
7828 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7829 
7830 	vf_byte_off = vfid / 8;
7831 	vf_byte_val = 1 << (vfid % 8);
7832 
7833 	req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7834 	req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7835 
7836 	req0->vlan_id  = cpu_to_le16(vlan);
7837 	req0->vlan_cfg = is_kill;
7838 
7839 	if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7840 		req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7841 	else
7842 		req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7843 
7844 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
7845 	if (ret) {
7846 		dev_err(&hdev->pdev->dev,
7847 			"Send vf vlan command fail, ret =%d.\n",
7848 			ret);
7849 		return ret;
7850 	}
7851 
7852 	if (!is_kill) {
7853 #define HCLGE_VF_VLAN_NO_ENTRY	2
7854 		if (!req0->resp_code || req0->resp_code == 1)
7855 			return 0;
7856 
7857 		if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7858 			set_bit(vfid, hdev->vf_vlan_full);
7859 			dev_warn(&hdev->pdev->dev,
7860 				 "vf vlan table is full, vf vlan filter is disabled\n");
7861 			return 0;
7862 		}
7863 
7864 		dev_err(&hdev->pdev->dev,
7865 			"Add vf vlan filter fail, ret =%u.\n",
7866 			req0->resp_code);
7867 	} else {
7868 #define HCLGE_VF_VLAN_DEL_NO_FOUND	1
7869 		if (!req0->resp_code)
7870 			return 0;
7871 
7872 		/* vf vlan filter is disabled when vf vlan table is full,
7873 		 * then new vlan id will not be added into vf vlan table.
7874 		 * Just return 0 without warning, avoid massive verbose
7875 		 * print logs when unload.
7876 		 */
7877 		if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
7878 			return 0;
7879 
7880 		dev_err(&hdev->pdev->dev,
7881 			"Kill vf vlan filter fail, ret =%u.\n",
7882 			req0->resp_code);
7883 	}
7884 
7885 	return -EIO;
7886 }
7887 
7888 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7889 				      u16 vlan_id, bool is_kill)
7890 {
7891 	struct hclge_vlan_filter_pf_cfg_cmd *req;
7892 	struct hclge_desc desc;
7893 	u8 vlan_offset_byte_val;
7894 	u8 vlan_offset_byte;
7895 	u8 vlan_offset_160;
7896 	int ret;
7897 
7898 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7899 
7900 	vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
7901 	vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
7902 			   HCLGE_VLAN_BYTE_SIZE;
7903 	vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
7904 
7905 	req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7906 	req->vlan_offset = vlan_offset_160;
7907 	req->vlan_cfg = is_kill;
7908 	req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7909 
7910 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7911 	if (ret)
7912 		dev_err(&hdev->pdev->dev,
7913 			"port vlan command, send fail, ret =%d.\n", ret);
7914 	return ret;
7915 }
7916 
7917 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7918 				    u16 vport_id, u16 vlan_id,
7919 				    bool is_kill)
7920 {
7921 	u16 vport_idx, vport_num = 0;
7922 	int ret;
7923 
7924 	if (is_kill && !vlan_id)
7925 		return 0;
7926 
7927 	ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7928 				       proto);
7929 	if (ret) {
7930 		dev_err(&hdev->pdev->dev,
7931 			"Set %u vport vlan filter config fail, ret =%d.\n",
7932 			vport_id, ret);
7933 		return ret;
7934 	}
7935 
7936 	/* vlan 0 may be added twice when 8021q module is enabled */
7937 	if (!is_kill && !vlan_id &&
7938 	    test_bit(vport_id, hdev->vlan_table[vlan_id]))
7939 		return 0;
7940 
7941 	if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7942 		dev_err(&hdev->pdev->dev,
7943 			"Add port vlan failed, vport %u is already in vlan %u\n",
7944 			vport_id, vlan_id);
7945 		return -EINVAL;
7946 	}
7947 
7948 	if (is_kill &&
7949 	    !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7950 		dev_err(&hdev->pdev->dev,
7951 			"Delete port vlan failed, vport %u is not in vlan %u\n",
7952 			vport_id, vlan_id);
7953 		return -EINVAL;
7954 	}
7955 
7956 	for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7957 		vport_num++;
7958 
7959 	if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7960 		ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
7961 						 is_kill);
7962 
7963 	return ret;
7964 }
7965 
7966 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
7967 {
7968 	struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
7969 	struct hclge_vport_vtag_tx_cfg_cmd *req;
7970 	struct hclge_dev *hdev = vport->back;
7971 	struct hclge_desc desc;
7972 	u16 bmap_index;
7973 	int status;
7974 
7975 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
7976 
7977 	req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
7978 	req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
7979 	req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
7980 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
7981 		      vcfg->accept_tag1 ? 1 : 0);
7982 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
7983 		      vcfg->accept_untag1 ? 1 : 0);
7984 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
7985 		      vcfg->accept_tag2 ? 1 : 0);
7986 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
7987 		      vcfg->accept_untag2 ? 1 : 0);
7988 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
7989 		      vcfg->insert_tag1_en ? 1 : 0);
7990 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
7991 		      vcfg->insert_tag2_en ? 1 : 0);
7992 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
7993 
7994 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7995 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
7996 			HCLGE_VF_NUM_PER_BYTE;
7997 	req->vf_bitmap[bmap_index] =
7998 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7999 
8000 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
8001 	if (status)
8002 		dev_err(&hdev->pdev->dev,
8003 			"Send port txvlan cfg command fail, ret =%d\n",
8004 			status);
8005 
8006 	return status;
8007 }
8008 
8009 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
8010 {
8011 	struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
8012 	struct hclge_vport_vtag_rx_cfg_cmd *req;
8013 	struct hclge_dev *hdev = vport->back;
8014 	struct hclge_desc desc;
8015 	u16 bmap_index;
8016 	int status;
8017 
8018 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
8019 
8020 	req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
8021 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
8022 		      vcfg->strip_tag1_en ? 1 : 0);
8023 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
8024 		      vcfg->strip_tag2_en ? 1 : 0);
8025 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
8026 		      vcfg->vlan1_vlan_prionly ? 1 : 0);
8027 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
8028 		      vcfg->vlan2_vlan_prionly ? 1 : 0);
8029 
8030 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8031 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8032 			HCLGE_VF_NUM_PER_BYTE;
8033 	req->vf_bitmap[bmap_index] =
8034 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8035 
8036 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
8037 	if (status)
8038 		dev_err(&hdev->pdev->dev,
8039 			"Send port rxvlan cfg command fail, ret =%d\n",
8040 			status);
8041 
8042 	return status;
8043 }
8044 
8045 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
8046 				  u16 port_base_vlan_state,
8047 				  u16 vlan_tag)
8048 {
8049 	int ret;
8050 
8051 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8052 		vport->txvlan_cfg.accept_tag1 = true;
8053 		vport->txvlan_cfg.insert_tag1_en = false;
8054 		vport->txvlan_cfg.default_tag1 = 0;
8055 	} else {
8056 		vport->txvlan_cfg.accept_tag1 = false;
8057 		vport->txvlan_cfg.insert_tag1_en = true;
8058 		vport->txvlan_cfg.default_tag1 = vlan_tag;
8059 	}
8060 
8061 	vport->txvlan_cfg.accept_untag1 = true;
8062 
8063 	/* accept_tag2 and accept_untag2 are not supported on
8064 	 * pdev revision(0x20), new revision support them,
8065 	 * this two fields can not be configured by user.
8066 	 */
8067 	vport->txvlan_cfg.accept_tag2 = true;
8068 	vport->txvlan_cfg.accept_untag2 = true;
8069 	vport->txvlan_cfg.insert_tag2_en = false;
8070 	vport->txvlan_cfg.default_tag2 = 0;
8071 
8072 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8073 		vport->rxvlan_cfg.strip_tag1_en = false;
8074 		vport->rxvlan_cfg.strip_tag2_en =
8075 				vport->rxvlan_cfg.rx_vlan_offload_en;
8076 	} else {
8077 		vport->rxvlan_cfg.strip_tag1_en =
8078 				vport->rxvlan_cfg.rx_vlan_offload_en;
8079 		vport->rxvlan_cfg.strip_tag2_en = true;
8080 	}
8081 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8082 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8083 
8084 	ret = hclge_set_vlan_tx_offload_cfg(vport);
8085 	if (ret)
8086 		return ret;
8087 
8088 	return hclge_set_vlan_rx_offload_cfg(vport);
8089 }
8090 
8091 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
8092 {
8093 	struct hclge_rx_vlan_type_cfg_cmd *rx_req;
8094 	struct hclge_tx_vlan_type_cfg_cmd *tx_req;
8095 	struct hclge_desc desc;
8096 	int status;
8097 
8098 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
8099 	rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
8100 	rx_req->ot_fst_vlan_type =
8101 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
8102 	rx_req->ot_sec_vlan_type =
8103 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
8104 	rx_req->in_fst_vlan_type =
8105 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
8106 	rx_req->in_sec_vlan_type =
8107 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
8108 
8109 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
8110 	if (status) {
8111 		dev_err(&hdev->pdev->dev,
8112 			"Send rxvlan protocol type command fail, ret =%d\n",
8113 			status);
8114 		return status;
8115 	}
8116 
8117 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
8118 
8119 	tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
8120 	tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
8121 	tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
8122 
8123 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
8124 	if (status)
8125 		dev_err(&hdev->pdev->dev,
8126 			"Send txvlan protocol type command fail, ret =%d\n",
8127 			status);
8128 
8129 	return status;
8130 }
8131 
8132 static int hclge_init_vlan_config(struct hclge_dev *hdev)
8133 {
8134 #define HCLGE_DEF_VLAN_TYPE		0x8100
8135 
8136 	struct hnae3_handle *handle = &hdev->vport[0].nic;
8137 	struct hclge_vport *vport;
8138 	int ret;
8139 	int i;
8140 
8141 	if (hdev->pdev->revision >= 0x21) {
8142 		/* for revision 0x21, vf vlan filter is per function */
8143 		for (i = 0; i < hdev->num_alloc_vport; i++) {
8144 			vport = &hdev->vport[i];
8145 			ret = hclge_set_vlan_filter_ctrl(hdev,
8146 							 HCLGE_FILTER_TYPE_VF,
8147 							 HCLGE_FILTER_FE_EGRESS,
8148 							 true,
8149 							 vport->vport_id);
8150 			if (ret)
8151 				return ret;
8152 		}
8153 
8154 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8155 						 HCLGE_FILTER_FE_INGRESS, true,
8156 						 0);
8157 		if (ret)
8158 			return ret;
8159 	} else {
8160 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8161 						 HCLGE_FILTER_FE_EGRESS_V1_B,
8162 						 true, 0);
8163 		if (ret)
8164 			return ret;
8165 	}
8166 
8167 	handle->netdev_flags |= HNAE3_VLAN_FLTR;
8168 
8169 	hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8170 	hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8171 	hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8172 	hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8173 	hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
8174 	hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
8175 
8176 	ret = hclge_set_vlan_protocol_type(hdev);
8177 	if (ret)
8178 		return ret;
8179 
8180 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8181 		u16 vlan_tag;
8182 
8183 		vport = &hdev->vport[i];
8184 		vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8185 
8186 		ret = hclge_vlan_offload_cfg(vport,
8187 					     vport->port_base_vlan_cfg.state,
8188 					     vlan_tag);
8189 		if (ret)
8190 			return ret;
8191 	}
8192 
8193 	return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
8194 }
8195 
8196 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8197 				       bool writen_to_tbl)
8198 {
8199 	struct hclge_vport_vlan_cfg *vlan;
8200 
8201 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
8202 	if (!vlan)
8203 		return;
8204 
8205 	vlan->hd_tbl_status = writen_to_tbl;
8206 	vlan->vlan_id = vlan_id;
8207 
8208 	list_add_tail(&vlan->node, &vport->vlan_list);
8209 }
8210 
8211 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
8212 {
8213 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8214 	struct hclge_dev *hdev = vport->back;
8215 	int ret;
8216 
8217 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8218 		if (!vlan->hd_tbl_status) {
8219 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8220 						       vport->vport_id,
8221 						       vlan->vlan_id, false);
8222 			if (ret) {
8223 				dev_err(&hdev->pdev->dev,
8224 					"restore vport vlan list failed, ret=%d\n",
8225 					ret);
8226 				return ret;
8227 			}
8228 		}
8229 		vlan->hd_tbl_status = true;
8230 	}
8231 
8232 	return 0;
8233 }
8234 
8235 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8236 				      bool is_write_tbl)
8237 {
8238 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8239 	struct hclge_dev *hdev = vport->back;
8240 
8241 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8242 		if (vlan->vlan_id == vlan_id) {
8243 			if (is_write_tbl && vlan->hd_tbl_status)
8244 				hclge_set_vlan_filter_hw(hdev,
8245 							 htons(ETH_P_8021Q),
8246 							 vport->vport_id,
8247 							 vlan_id,
8248 							 true);
8249 
8250 			list_del(&vlan->node);
8251 			kfree(vlan);
8252 			break;
8253 		}
8254 	}
8255 }
8256 
8257 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
8258 {
8259 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8260 	struct hclge_dev *hdev = vport->back;
8261 
8262 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8263 		if (vlan->hd_tbl_status)
8264 			hclge_set_vlan_filter_hw(hdev,
8265 						 htons(ETH_P_8021Q),
8266 						 vport->vport_id,
8267 						 vlan->vlan_id,
8268 						 true);
8269 
8270 		vlan->hd_tbl_status = false;
8271 		if (is_del_list) {
8272 			list_del(&vlan->node);
8273 			kfree(vlan);
8274 		}
8275 	}
8276 }
8277 
8278 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
8279 {
8280 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8281 	struct hclge_vport *vport;
8282 	int i;
8283 
8284 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8285 		vport = &hdev->vport[i];
8286 		list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8287 			list_del(&vlan->node);
8288 			kfree(vlan);
8289 		}
8290 	}
8291 }
8292 
8293 static void hclge_restore_vlan_table(struct hnae3_handle *handle)
8294 {
8295 	struct hclge_vport *vport = hclge_get_vport(handle);
8296 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8297 	struct hclge_dev *hdev = vport->back;
8298 	u16 vlan_proto;
8299 	u16 state, vlan_id;
8300 	int i;
8301 
8302 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8303 		vport = &hdev->vport[i];
8304 		vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
8305 		vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8306 		state = vport->port_base_vlan_cfg.state;
8307 
8308 		if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
8309 			hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
8310 						 vport->vport_id, vlan_id,
8311 						 false);
8312 			continue;
8313 		}
8314 
8315 		list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8316 			int ret;
8317 
8318 			if (!vlan->hd_tbl_status)
8319 				continue;
8320 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8321 						       vport->vport_id,
8322 						       vlan->vlan_id, false);
8323 			if (ret)
8324 				break;
8325 		}
8326 	}
8327 }
8328 
8329 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
8330 {
8331 	struct hclge_vport *vport = hclge_get_vport(handle);
8332 
8333 	if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8334 		vport->rxvlan_cfg.strip_tag1_en = false;
8335 		vport->rxvlan_cfg.strip_tag2_en = enable;
8336 	} else {
8337 		vport->rxvlan_cfg.strip_tag1_en = enable;
8338 		vport->rxvlan_cfg.strip_tag2_en = true;
8339 	}
8340 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8341 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8342 	vport->rxvlan_cfg.rx_vlan_offload_en = enable;
8343 
8344 	return hclge_set_vlan_rx_offload_cfg(vport);
8345 }
8346 
8347 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
8348 					    u16 port_base_vlan_state,
8349 					    struct hclge_vlan_info *new_info,
8350 					    struct hclge_vlan_info *old_info)
8351 {
8352 	struct hclge_dev *hdev = vport->back;
8353 	int ret;
8354 
8355 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
8356 		hclge_rm_vport_all_vlan_table(vport, false);
8357 		return hclge_set_vlan_filter_hw(hdev,
8358 						 htons(new_info->vlan_proto),
8359 						 vport->vport_id,
8360 						 new_info->vlan_tag,
8361 						 false);
8362 	}
8363 
8364 	ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
8365 				       vport->vport_id, old_info->vlan_tag,
8366 				       true);
8367 	if (ret)
8368 		return ret;
8369 
8370 	return hclge_add_vport_all_vlan_table(vport);
8371 }
8372 
8373 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
8374 				    struct hclge_vlan_info *vlan_info)
8375 {
8376 	struct hnae3_handle *nic = &vport->nic;
8377 	struct hclge_vlan_info *old_vlan_info;
8378 	struct hclge_dev *hdev = vport->back;
8379 	int ret;
8380 
8381 	old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
8382 
8383 	ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
8384 	if (ret)
8385 		return ret;
8386 
8387 	if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
8388 		/* add new VLAN tag */
8389 		ret = hclge_set_vlan_filter_hw(hdev,
8390 					       htons(vlan_info->vlan_proto),
8391 					       vport->vport_id,
8392 					       vlan_info->vlan_tag,
8393 					       false);
8394 		if (ret)
8395 			return ret;
8396 
8397 		/* remove old VLAN tag */
8398 		ret = hclge_set_vlan_filter_hw(hdev,
8399 					       htons(old_vlan_info->vlan_proto),
8400 					       vport->vport_id,
8401 					       old_vlan_info->vlan_tag,
8402 					       true);
8403 		if (ret)
8404 			return ret;
8405 
8406 		goto update;
8407 	}
8408 
8409 	ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
8410 					       old_vlan_info);
8411 	if (ret)
8412 		return ret;
8413 
8414 	/* update state only when disable/enable port based VLAN */
8415 	vport->port_base_vlan_cfg.state = state;
8416 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
8417 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
8418 	else
8419 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
8420 
8421 update:
8422 	vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
8423 	vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
8424 	vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
8425 
8426 	return 0;
8427 }
8428 
8429 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
8430 					  enum hnae3_port_base_vlan_state state,
8431 					  u16 vlan)
8432 {
8433 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8434 		if (!vlan)
8435 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8436 		else
8437 			return HNAE3_PORT_BASE_VLAN_ENABLE;
8438 	} else {
8439 		if (!vlan)
8440 			return HNAE3_PORT_BASE_VLAN_DISABLE;
8441 		else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
8442 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8443 		else
8444 			return HNAE3_PORT_BASE_VLAN_MODIFY;
8445 	}
8446 }
8447 
8448 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
8449 				    u16 vlan, u8 qos, __be16 proto)
8450 {
8451 	struct hclge_vport *vport = hclge_get_vport(handle);
8452 	struct hclge_dev *hdev = vport->back;
8453 	struct hclge_vlan_info vlan_info;
8454 	u16 state;
8455 	int ret;
8456 
8457 	if (hdev->pdev->revision == 0x20)
8458 		return -EOPNOTSUPP;
8459 
8460 	vport = hclge_get_vf_vport(hdev, vfid);
8461 	if (!vport)
8462 		return -EINVAL;
8463 
8464 	/* qos is a 3 bits value, so can not be bigger than 7 */
8465 	if (vlan > VLAN_N_VID - 1 || qos > 7)
8466 		return -EINVAL;
8467 	if (proto != htons(ETH_P_8021Q))
8468 		return -EPROTONOSUPPORT;
8469 
8470 	state = hclge_get_port_base_vlan_state(vport,
8471 					       vport->port_base_vlan_cfg.state,
8472 					       vlan);
8473 	if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
8474 		return 0;
8475 
8476 	vlan_info.vlan_tag = vlan;
8477 	vlan_info.qos = qos;
8478 	vlan_info.vlan_proto = ntohs(proto);
8479 
8480 	if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8481 		return hclge_update_port_base_vlan_cfg(vport, state,
8482 						       &vlan_info);
8483 	} else {
8484 		ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
8485 							vport->vport_id, state,
8486 							vlan, qos,
8487 							ntohs(proto));
8488 		return ret;
8489 	}
8490 }
8491 
8492 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
8493 			  u16 vlan_id, bool is_kill)
8494 {
8495 	struct hclge_vport *vport = hclge_get_vport(handle);
8496 	struct hclge_dev *hdev = vport->back;
8497 	bool writen_to_tbl = false;
8498 	int ret = 0;
8499 
8500 	/* When device is resetting, firmware is unable to handle
8501 	 * mailbox. Just record the vlan id, and remove it after
8502 	 * reset finished.
8503 	 */
8504 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
8505 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
8506 		return -EBUSY;
8507 	}
8508 
8509 	/* when port base vlan enabled, we use port base vlan as the vlan
8510 	 * filter entry. In this case, we don't update vlan filter table
8511 	 * when user add new vlan or remove exist vlan, just update the vport
8512 	 * vlan list. The vlan id in vlan list will be writen in vlan filter
8513 	 * table until port base vlan disabled
8514 	 */
8515 	if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8516 		ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
8517 					       vlan_id, is_kill);
8518 		writen_to_tbl = true;
8519 	}
8520 
8521 	if (!ret) {
8522 		if (is_kill)
8523 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
8524 		else
8525 			hclge_add_vport_vlan_table(vport, vlan_id,
8526 						   writen_to_tbl);
8527 	} else if (is_kill) {
8528 		/* when remove hw vlan filter failed, record the vlan id,
8529 		 * and try to remove it from hw later, to be consistence
8530 		 * with stack
8531 		 */
8532 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
8533 	}
8534 	return ret;
8535 }
8536 
8537 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
8538 {
8539 #define HCLGE_MAX_SYNC_COUNT	60
8540 
8541 	int i, ret, sync_cnt = 0;
8542 	u16 vlan_id;
8543 
8544 	/* start from vport 1 for PF is always alive */
8545 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8546 		struct hclge_vport *vport = &hdev->vport[i];
8547 
8548 		vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8549 					 VLAN_N_VID);
8550 		while (vlan_id != VLAN_N_VID) {
8551 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8552 						       vport->vport_id, vlan_id,
8553 						       true);
8554 			if (ret && ret != -EINVAL)
8555 				return;
8556 
8557 			clear_bit(vlan_id, vport->vlan_del_fail_bmap);
8558 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
8559 
8560 			sync_cnt++;
8561 			if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
8562 				return;
8563 
8564 			vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8565 						 VLAN_N_VID);
8566 		}
8567 	}
8568 }
8569 
8570 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
8571 {
8572 	struct hclge_config_max_frm_size_cmd *req;
8573 	struct hclge_desc desc;
8574 
8575 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
8576 
8577 	req = (struct hclge_config_max_frm_size_cmd *)desc.data;
8578 	req->max_frm_size = cpu_to_le16(new_mps);
8579 	req->min_frm_size = HCLGE_MAC_MIN_FRAME;
8580 
8581 	return hclge_cmd_send(&hdev->hw, &desc, 1);
8582 }
8583 
8584 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
8585 {
8586 	struct hclge_vport *vport = hclge_get_vport(handle);
8587 
8588 	return hclge_set_vport_mtu(vport, new_mtu);
8589 }
8590 
8591 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
8592 {
8593 	struct hclge_dev *hdev = vport->back;
8594 	int i, max_frm_size, ret;
8595 
8596 	/* HW supprt 2 layer vlan */
8597 	max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8598 	if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
8599 	    max_frm_size > HCLGE_MAC_MAX_FRAME)
8600 		return -EINVAL;
8601 
8602 	max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
8603 	mutex_lock(&hdev->vport_lock);
8604 	/* VF's mps must fit within hdev->mps */
8605 	if (vport->vport_id && max_frm_size > hdev->mps) {
8606 		mutex_unlock(&hdev->vport_lock);
8607 		return -EINVAL;
8608 	} else if (vport->vport_id) {
8609 		vport->mps = max_frm_size;
8610 		mutex_unlock(&hdev->vport_lock);
8611 		return 0;
8612 	}
8613 
8614 	/* PF's mps must be greater then VF's mps */
8615 	for (i = 1; i < hdev->num_alloc_vport; i++)
8616 		if (max_frm_size < hdev->vport[i].mps) {
8617 			mutex_unlock(&hdev->vport_lock);
8618 			return -EINVAL;
8619 		}
8620 
8621 	hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
8622 
8623 	ret = hclge_set_mac_mtu(hdev, max_frm_size);
8624 	if (ret) {
8625 		dev_err(&hdev->pdev->dev,
8626 			"Change mtu fail, ret =%d\n", ret);
8627 		goto out;
8628 	}
8629 
8630 	hdev->mps = max_frm_size;
8631 	vport->mps = max_frm_size;
8632 
8633 	ret = hclge_buffer_alloc(hdev);
8634 	if (ret)
8635 		dev_err(&hdev->pdev->dev,
8636 			"Allocate buffer fail, ret =%d\n", ret);
8637 
8638 out:
8639 	hclge_notify_client(hdev, HNAE3_UP_CLIENT);
8640 	mutex_unlock(&hdev->vport_lock);
8641 	return ret;
8642 }
8643 
8644 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
8645 				    bool enable)
8646 {
8647 	struct hclge_reset_tqp_queue_cmd *req;
8648 	struct hclge_desc desc;
8649 	int ret;
8650 
8651 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
8652 
8653 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8654 	req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8655 	if (enable)
8656 		hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
8657 
8658 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8659 	if (ret) {
8660 		dev_err(&hdev->pdev->dev,
8661 			"Send tqp reset cmd error, status =%d\n", ret);
8662 		return ret;
8663 	}
8664 
8665 	return 0;
8666 }
8667 
8668 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
8669 {
8670 	struct hclge_reset_tqp_queue_cmd *req;
8671 	struct hclge_desc desc;
8672 	int ret;
8673 
8674 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
8675 
8676 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8677 	req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8678 
8679 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8680 	if (ret) {
8681 		dev_err(&hdev->pdev->dev,
8682 			"Get reset status error, status =%d\n", ret);
8683 		return ret;
8684 	}
8685 
8686 	return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
8687 }
8688 
8689 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
8690 {
8691 	struct hnae3_queue *queue;
8692 	struct hclge_tqp *tqp;
8693 
8694 	queue = handle->kinfo.tqp[queue_id];
8695 	tqp = container_of(queue, struct hclge_tqp, q);
8696 
8697 	return tqp->index;
8698 }
8699 
8700 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
8701 {
8702 	struct hclge_vport *vport = hclge_get_vport(handle);
8703 	struct hclge_dev *hdev = vport->back;
8704 	int reset_try_times = 0;
8705 	int reset_status;
8706 	u16 queue_gid;
8707 	int ret;
8708 
8709 	queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
8710 
8711 	ret = hclge_tqp_enable(hdev, queue_id, 0, false);
8712 	if (ret) {
8713 		dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
8714 		return ret;
8715 	}
8716 
8717 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8718 	if (ret) {
8719 		dev_err(&hdev->pdev->dev,
8720 			"Send reset tqp cmd fail, ret = %d\n", ret);
8721 		return ret;
8722 	}
8723 
8724 	while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8725 		reset_status = hclge_get_reset_status(hdev, queue_gid);
8726 		if (reset_status)
8727 			break;
8728 
8729 		/* Wait for tqp hw reset */
8730 		usleep_range(1000, 1200);
8731 	}
8732 
8733 	if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8734 		dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
8735 		return ret;
8736 	}
8737 
8738 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8739 	if (ret)
8740 		dev_err(&hdev->pdev->dev,
8741 			"Deassert the soft reset fail, ret = %d\n", ret);
8742 
8743 	return ret;
8744 }
8745 
8746 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
8747 {
8748 	struct hclge_dev *hdev = vport->back;
8749 	int reset_try_times = 0;
8750 	int reset_status;
8751 	u16 queue_gid;
8752 	int ret;
8753 
8754 	queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
8755 
8756 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8757 	if (ret) {
8758 		dev_warn(&hdev->pdev->dev,
8759 			 "Send reset tqp cmd fail, ret = %d\n", ret);
8760 		return;
8761 	}
8762 
8763 	while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8764 		reset_status = hclge_get_reset_status(hdev, queue_gid);
8765 		if (reset_status)
8766 			break;
8767 
8768 		/* Wait for tqp hw reset */
8769 		usleep_range(1000, 1200);
8770 	}
8771 
8772 	if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8773 		dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
8774 		return;
8775 	}
8776 
8777 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8778 	if (ret)
8779 		dev_warn(&hdev->pdev->dev,
8780 			 "Deassert the soft reset fail, ret = %d\n", ret);
8781 }
8782 
8783 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
8784 {
8785 	struct hclge_vport *vport = hclge_get_vport(handle);
8786 	struct hclge_dev *hdev = vport->back;
8787 
8788 	return hdev->fw_version;
8789 }
8790 
8791 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8792 {
8793 	struct phy_device *phydev = hdev->hw.mac.phydev;
8794 
8795 	if (!phydev)
8796 		return;
8797 
8798 	phy_set_asym_pause(phydev, rx_en, tx_en);
8799 }
8800 
8801 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8802 {
8803 	int ret;
8804 
8805 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
8806 		return 0;
8807 
8808 	ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
8809 	if (ret)
8810 		dev_err(&hdev->pdev->dev,
8811 			"configure pauseparam error, ret = %d.\n", ret);
8812 
8813 	return ret;
8814 }
8815 
8816 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
8817 {
8818 	struct phy_device *phydev = hdev->hw.mac.phydev;
8819 	u16 remote_advertising = 0;
8820 	u16 local_advertising;
8821 	u32 rx_pause, tx_pause;
8822 	u8 flowctl;
8823 
8824 	if (!phydev->link || !phydev->autoneg)
8825 		return 0;
8826 
8827 	local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
8828 
8829 	if (phydev->pause)
8830 		remote_advertising = LPA_PAUSE_CAP;
8831 
8832 	if (phydev->asym_pause)
8833 		remote_advertising |= LPA_PAUSE_ASYM;
8834 
8835 	flowctl = mii_resolve_flowctrl_fdx(local_advertising,
8836 					   remote_advertising);
8837 	tx_pause = flowctl & FLOW_CTRL_TX;
8838 	rx_pause = flowctl & FLOW_CTRL_RX;
8839 
8840 	if (phydev->duplex == HCLGE_MAC_HALF) {
8841 		tx_pause = 0;
8842 		rx_pause = 0;
8843 	}
8844 
8845 	return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8846 }
8847 
8848 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8849 				 u32 *rx_en, u32 *tx_en)
8850 {
8851 	struct hclge_vport *vport = hclge_get_vport(handle);
8852 	struct hclge_dev *hdev = vport->back;
8853 	struct phy_device *phydev = hdev->hw.mac.phydev;
8854 
8855 	*auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
8856 
8857 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8858 		*rx_en = 0;
8859 		*tx_en = 0;
8860 		return;
8861 	}
8862 
8863 	if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8864 		*rx_en = 1;
8865 		*tx_en = 0;
8866 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8867 		*tx_en = 1;
8868 		*rx_en = 0;
8869 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8870 		*rx_en = 1;
8871 		*tx_en = 1;
8872 	} else {
8873 		*rx_en = 0;
8874 		*tx_en = 0;
8875 	}
8876 }
8877 
8878 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
8879 					 u32 rx_en, u32 tx_en)
8880 {
8881 	if (rx_en && tx_en)
8882 		hdev->fc_mode_last_time = HCLGE_FC_FULL;
8883 	else if (rx_en && !tx_en)
8884 		hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
8885 	else if (!rx_en && tx_en)
8886 		hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
8887 	else
8888 		hdev->fc_mode_last_time = HCLGE_FC_NONE;
8889 
8890 	hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
8891 }
8892 
8893 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8894 				u32 rx_en, u32 tx_en)
8895 {
8896 	struct hclge_vport *vport = hclge_get_vport(handle);
8897 	struct hclge_dev *hdev = vport->back;
8898 	struct phy_device *phydev = hdev->hw.mac.phydev;
8899 	u32 fc_autoneg;
8900 
8901 	if (phydev) {
8902 		fc_autoneg = hclge_get_autoneg(handle);
8903 		if (auto_neg != fc_autoneg) {
8904 			dev_info(&hdev->pdev->dev,
8905 				 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8906 			return -EOPNOTSUPP;
8907 		}
8908 	}
8909 
8910 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8911 		dev_info(&hdev->pdev->dev,
8912 			 "Priority flow control enabled. Cannot set link flow control.\n");
8913 		return -EOPNOTSUPP;
8914 	}
8915 
8916 	hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8917 
8918 	hclge_record_user_pauseparam(hdev, rx_en, tx_en);
8919 
8920 	if (!auto_neg)
8921 		return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8922 
8923 	if (phydev)
8924 		return phy_start_aneg(phydev);
8925 
8926 	return -EOPNOTSUPP;
8927 }
8928 
8929 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8930 					  u8 *auto_neg, u32 *speed, u8 *duplex)
8931 {
8932 	struct hclge_vport *vport = hclge_get_vport(handle);
8933 	struct hclge_dev *hdev = vport->back;
8934 
8935 	if (speed)
8936 		*speed = hdev->hw.mac.speed;
8937 	if (duplex)
8938 		*duplex = hdev->hw.mac.duplex;
8939 	if (auto_neg)
8940 		*auto_neg = hdev->hw.mac.autoneg;
8941 }
8942 
8943 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8944 				 u8 *module_type)
8945 {
8946 	struct hclge_vport *vport = hclge_get_vport(handle);
8947 	struct hclge_dev *hdev = vport->back;
8948 
8949 	if (media_type)
8950 		*media_type = hdev->hw.mac.media_type;
8951 
8952 	if (module_type)
8953 		*module_type = hdev->hw.mac.module_type;
8954 }
8955 
8956 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8957 				u8 *tp_mdix_ctrl, u8 *tp_mdix)
8958 {
8959 	struct hclge_vport *vport = hclge_get_vport(handle);
8960 	struct hclge_dev *hdev = vport->back;
8961 	struct phy_device *phydev = hdev->hw.mac.phydev;
8962 	int mdix_ctrl, mdix, is_resolved;
8963 	unsigned int retval;
8964 
8965 	if (!phydev) {
8966 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8967 		*tp_mdix = ETH_TP_MDI_INVALID;
8968 		return;
8969 	}
8970 
8971 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
8972 
8973 	retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
8974 	mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
8975 				    HCLGE_PHY_MDIX_CTRL_S);
8976 
8977 	retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
8978 	mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
8979 	is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
8980 
8981 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
8982 
8983 	switch (mdix_ctrl) {
8984 	case 0x0:
8985 		*tp_mdix_ctrl = ETH_TP_MDI;
8986 		break;
8987 	case 0x1:
8988 		*tp_mdix_ctrl = ETH_TP_MDI_X;
8989 		break;
8990 	case 0x3:
8991 		*tp_mdix_ctrl = ETH_TP_MDI_AUTO;
8992 		break;
8993 	default:
8994 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8995 		break;
8996 	}
8997 
8998 	if (!is_resolved)
8999 		*tp_mdix = ETH_TP_MDI_INVALID;
9000 	else if (mdix)
9001 		*tp_mdix = ETH_TP_MDI_X;
9002 	else
9003 		*tp_mdix = ETH_TP_MDI;
9004 }
9005 
9006 static void hclge_info_show(struct hclge_dev *hdev)
9007 {
9008 	struct device *dev = &hdev->pdev->dev;
9009 
9010 	dev_info(dev, "PF info begin:\n");
9011 
9012 	dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
9013 	dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
9014 	dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
9015 	dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
9016 	dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
9017 	dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
9018 	dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
9019 	dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
9020 	dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
9021 	dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
9022 	dev_info(dev, "This is %s PF\n",
9023 		 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
9024 	dev_info(dev, "DCB %s\n",
9025 		 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
9026 	dev_info(dev, "MQPRIO %s\n",
9027 		 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
9028 
9029 	dev_info(dev, "PF info end.\n");
9030 }
9031 
9032 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
9033 					  struct hclge_vport *vport)
9034 {
9035 	struct hnae3_client *client = vport->nic.client;
9036 	struct hclge_dev *hdev = ae_dev->priv;
9037 	int rst_cnt = hdev->rst_stats.reset_cnt;
9038 	int ret;
9039 
9040 	ret = client->ops->init_instance(&vport->nic);
9041 	if (ret)
9042 		return ret;
9043 
9044 	set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9045 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9046 	    rst_cnt != hdev->rst_stats.reset_cnt) {
9047 		ret = -EBUSY;
9048 		goto init_nic_err;
9049 	}
9050 
9051 	/* Enable nic hw error interrupts */
9052 	ret = hclge_config_nic_hw_error(hdev, true);
9053 	if (ret) {
9054 		dev_err(&ae_dev->pdev->dev,
9055 			"fail(%d) to enable hw error interrupts\n", ret);
9056 		goto init_nic_err;
9057 	}
9058 
9059 	hnae3_set_client_init_flag(client, ae_dev, 1);
9060 
9061 	if (netif_msg_drv(&hdev->vport->nic))
9062 		hclge_info_show(hdev);
9063 
9064 	return ret;
9065 
9066 init_nic_err:
9067 	clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9068 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9069 		msleep(HCLGE_WAIT_RESET_DONE);
9070 
9071 	client->ops->uninit_instance(&vport->nic, 0);
9072 
9073 	return ret;
9074 }
9075 
9076 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
9077 					   struct hclge_vport *vport)
9078 {
9079 	struct hclge_dev *hdev = ae_dev->priv;
9080 	struct hnae3_client *client;
9081 	int rst_cnt;
9082 	int ret;
9083 
9084 	if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
9085 	    !hdev->nic_client)
9086 		return 0;
9087 
9088 	client = hdev->roce_client;
9089 	ret = hclge_init_roce_base_info(vport);
9090 	if (ret)
9091 		return ret;
9092 
9093 	rst_cnt = hdev->rst_stats.reset_cnt;
9094 	ret = client->ops->init_instance(&vport->roce);
9095 	if (ret)
9096 		return ret;
9097 
9098 	set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9099 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9100 	    rst_cnt != hdev->rst_stats.reset_cnt) {
9101 		ret = -EBUSY;
9102 		goto init_roce_err;
9103 	}
9104 
9105 	/* Enable roce ras interrupts */
9106 	ret = hclge_config_rocee_ras_interrupt(hdev, true);
9107 	if (ret) {
9108 		dev_err(&ae_dev->pdev->dev,
9109 			"fail(%d) to enable roce ras interrupts\n", ret);
9110 		goto init_roce_err;
9111 	}
9112 
9113 	hnae3_set_client_init_flag(client, ae_dev, 1);
9114 
9115 	return 0;
9116 
9117 init_roce_err:
9118 	clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9119 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9120 		msleep(HCLGE_WAIT_RESET_DONE);
9121 
9122 	hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
9123 
9124 	return ret;
9125 }
9126 
9127 static int hclge_init_client_instance(struct hnae3_client *client,
9128 				      struct hnae3_ae_dev *ae_dev)
9129 {
9130 	struct hclge_dev *hdev = ae_dev->priv;
9131 	struct hclge_vport *vport;
9132 	int i, ret;
9133 
9134 	for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
9135 		vport = &hdev->vport[i];
9136 
9137 		switch (client->type) {
9138 		case HNAE3_CLIENT_KNIC:
9139 			hdev->nic_client = client;
9140 			vport->nic.client = client;
9141 			ret = hclge_init_nic_client_instance(ae_dev, vport);
9142 			if (ret)
9143 				goto clear_nic;
9144 
9145 			ret = hclge_init_roce_client_instance(ae_dev, vport);
9146 			if (ret)
9147 				goto clear_roce;
9148 
9149 			break;
9150 		case HNAE3_CLIENT_ROCE:
9151 			if (hnae3_dev_roce_supported(hdev)) {
9152 				hdev->roce_client = client;
9153 				vport->roce.client = client;
9154 			}
9155 
9156 			ret = hclge_init_roce_client_instance(ae_dev, vport);
9157 			if (ret)
9158 				goto clear_roce;
9159 
9160 			break;
9161 		default:
9162 			return -EINVAL;
9163 		}
9164 	}
9165 
9166 	return 0;
9167 
9168 clear_nic:
9169 	hdev->nic_client = NULL;
9170 	vport->nic.client = NULL;
9171 	return ret;
9172 clear_roce:
9173 	hdev->roce_client = NULL;
9174 	vport->roce.client = NULL;
9175 	return ret;
9176 }
9177 
9178 static void hclge_uninit_client_instance(struct hnae3_client *client,
9179 					 struct hnae3_ae_dev *ae_dev)
9180 {
9181 	struct hclge_dev *hdev = ae_dev->priv;
9182 	struct hclge_vport *vport;
9183 	int i;
9184 
9185 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9186 		vport = &hdev->vport[i];
9187 		if (hdev->roce_client) {
9188 			clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9189 			while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9190 				msleep(HCLGE_WAIT_RESET_DONE);
9191 
9192 			hdev->roce_client->ops->uninit_instance(&vport->roce,
9193 								0);
9194 			hdev->roce_client = NULL;
9195 			vport->roce.client = NULL;
9196 		}
9197 		if (client->type == HNAE3_CLIENT_ROCE)
9198 			return;
9199 		if (hdev->nic_client && client->ops->uninit_instance) {
9200 			clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9201 			while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9202 				msleep(HCLGE_WAIT_RESET_DONE);
9203 
9204 			client->ops->uninit_instance(&vport->nic, 0);
9205 			hdev->nic_client = NULL;
9206 			vport->nic.client = NULL;
9207 		}
9208 	}
9209 }
9210 
9211 static int hclge_pci_init(struct hclge_dev *hdev)
9212 {
9213 	struct pci_dev *pdev = hdev->pdev;
9214 	struct hclge_hw *hw;
9215 	int ret;
9216 
9217 	ret = pci_enable_device(pdev);
9218 	if (ret) {
9219 		dev_err(&pdev->dev, "failed to enable PCI device\n");
9220 		return ret;
9221 	}
9222 
9223 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9224 	if (ret) {
9225 		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9226 		if (ret) {
9227 			dev_err(&pdev->dev,
9228 				"can't set consistent PCI DMA");
9229 			goto err_disable_device;
9230 		}
9231 		dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
9232 	}
9233 
9234 	ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
9235 	if (ret) {
9236 		dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
9237 		goto err_disable_device;
9238 	}
9239 
9240 	pci_set_master(pdev);
9241 	hw = &hdev->hw;
9242 	hw->io_base = pcim_iomap(pdev, 2, 0);
9243 	if (!hw->io_base) {
9244 		dev_err(&pdev->dev, "Can't map configuration register space\n");
9245 		ret = -ENOMEM;
9246 		goto err_clr_master;
9247 	}
9248 
9249 	hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
9250 
9251 	return 0;
9252 err_clr_master:
9253 	pci_clear_master(pdev);
9254 	pci_release_regions(pdev);
9255 err_disable_device:
9256 	pci_disable_device(pdev);
9257 
9258 	return ret;
9259 }
9260 
9261 static void hclge_pci_uninit(struct hclge_dev *hdev)
9262 {
9263 	struct pci_dev *pdev = hdev->pdev;
9264 
9265 	pcim_iounmap(pdev, hdev->hw.io_base);
9266 	pci_free_irq_vectors(pdev);
9267 	pci_clear_master(pdev);
9268 	pci_release_mem_regions(pdev);
9269 	pci_disable_device(pdev);
9270 }
9271 
9272 static void hclge_state_init(struct hclge_dev *hdev)
9273 {
9274 	set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
9275 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
9276 	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
9277 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9278 	clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
9279 	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
9280 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
9281 }
9282 
9283 static void hclge_state_uninit(struct hclge_dev *hdev)
9284 {
9285 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
9286 	set_bit(HCLGE_STATE_REMOVING, &hdev->state);
9287 
9288 	if (hdev->reset_timer.function)
9289 		del_timer_sync(&hdev->reset_timer);
9290 	if (hdev->service_task.work.func)
9291 		cancel_delayed_work_sync(&hdev->service_task);
9292 }
9293 
9294 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
9295 {
9296 #define HCLGE_FLR_RETRY_WAIT_MS	500
9297 #define HCLGE_FLR_RETRY_CNT	5
9298 
9299 	struct hclge_dev *hdev = ae_dev->priv;
9300 	int retry_cnt = 0;
9301 	int ret;
9302 
9303 retry:
9304 	down(&hdev->reset_sem);
9305 	set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9306 	hdev->reset_type = HNAE3_FLR_RESET;
9307 	ret = hclge_reset_prepare(hdev);
9308 	if (ret) {
9309 		dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
9310 			ret);
9311 		if (hdev->reset_pending ||
9312 		    retry_cnt++ < HCLGE_FLR_RETRY_CNT) {
9313 			dev_err(&hdev->pdev->dev,
9314 				"reset_pending:0x%lx, retry_cnt:%d\n",
9315 				hdev->reset_pending, retry_cnt);
9316 			clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9317 			up(&hdev->reset_sem);
9318 			msleep(HCLGE_FLR_RETRY_WAIT_MS);
9319 			goto retry;
9320 		}
9321 	}
9322 
9323 	/* disable misc vector before FLR done */
9324 	hclge_enable_vector(&hdev->misc_vector, false);
9325 	set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
9326 	hdev->rst_stats.flr_rst_cnt++;
9327 }
9328 
9329 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
9330 {
9331 	struct hclge_dev *hdev = ae_dev->priv;
9332 	int ret;
9333 
9334 	hclge_enable_vector(&hdev->misc_vector, true);
9335 
9336 	ret = hclge_reset_rebuild(hdev);
9337 	if (ret)
9338 		dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
9339 
9340 	hdev->reset_type = HNAE3_NONE_RESET;
9341 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9342 	up(&hdev->reset_sem);
9343 }
9344 
9345 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
9346 {
9347 	u16 i;
9348 
9349 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9350 		struct hclge_vport *vport = &hdev->vport[i];
9351 		int ret;
9352 
9353 		 /* Send cmd to clear VF's FUNC_RST_ING */
9354 		ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
9355 		if (ret)
9356 			dev_warn(&hdev->pdev->dev,
9357 				 "clear vf(%u) rst failed %d!\n",
9358 				 vport->vport_id, ret);
9359 	}
9360 }
9361 
9362 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
9363 {
9364 	struct pci_dev *pdev = ae_dev->pdev;
9365 	struct hclge_dev *hdev;
9366 	int ret;
9367 
9368 	hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
9369 	if (!hdev) {
9370 		ret = -ENOMEM;
9371 		goto out;
9372 	}
9373 
9374 	hdev->pdev = pdev;
9375 	hdev->ae_dev = ae_dev;
9376 	hdev->reset_type = HNAE3_NONE_RESET;
9377 	hdev->reset_level = HNAE3_FUNC_RESET;
9378 	ae_dev->priv = hdev;
9379 
9380 	/* HW supprt 2 layer vlan */
9381 	hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9382 
9383 	mutex_init(&hdev->vport_lock);
9384 	spin_lock_init(&hdev->fd_rule_lock);
9385 	sema_init(&hdev->reset_sem, 1);
9386 
9387 	ret = hclge_pci_init(hdev);
9388 	if (ret)
9389 		goto out;
9390 
9391 	/* Firmware command queue initialize */
9392 	ret = hclge_cmd_queue_init(hdev);
9393 	if (ret)
9394 		goto err_pci_uninit;
9395 
9396 	/* Firmware command initialize */
9397 	ret = hclge_cmd_init(hdev);
9398 	if (ret)
9399 		goto err_cmd_uninit;
9400 
9401 	ret = hclge_get_cap(hdev);
9402 	if (ret)
9403 		goto err_cmd_uninit;
9404 
9405 	ret = hclge_configure(hdev);
9406 	if (ret) {
9407 		dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
9408 		goto err_cmd_uninit;
9409 	}
9410 
9411 	ret = hclge_init_msi(hdev);
9412 	if (ret) {
9413 		dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
9414 		goto err_cmd_uninit;
9415 	}
9416 
9417 	ret = hclge_misc_irq_init(hdev);
9418 	if (ret)
9419 		goto err_msi_uninit;
9420 
9421 	ret = hclge_alloc_tqps(hdev);
9422 	if (ret) {
9423 		dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
9424 		goto err_msi_irq_uninit;
9425 	}
9426 
9427 	ret = hclge_alloc_vport(hdev);
9428 	if (ret)
9429 		goto err_msi_irq_uninit;
9430 
9431 	ret = hclge_map_tqp(hdev);
9432 	if (ret)
9433 		goto err_msi_irq_uninit;
9434 
9435 	if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
9436 		ret = hclge_mac_mdio_config(hdev);
9437 		if (ret)
9438 			goto err_msi_irq_uninit;
9439 	}
9440 
9441 	ret = hclge_init_umv_space(hdev);
9442 	if (ret)
9443 		goto err_mdiobus_unreg;
9444 
9445 	ret = hclge_mac_init(hdev);
9446 	if (ret) {
9447 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9448 		goto err_mdiobus_unreg;
9449 	}
9450 
9451 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9452 	if (ret) {
9453 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9454 		goto err_mdiobus_unreg;
9455 	}
9456 
9457 	ret = hclge_config_gro(hdev, true);
9458 	if (ret)
9459 		goto err_mdiobus_unreg;
9460 
9461 	ret = hclge_init_vlan_config(hdev);
9462 	if (ret) {
9463 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9464 		goto err_mdiobus_unreg;
9465 	}
9466 
9467 	ret = hclge_tm_schd_init(hdev);
9468 	if (ret) {
9469 		dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
9470 		goto err_mdiobus_unreg;
9471 	}
9472 
9473 	hclge_rss_init_cfg(hdev);
9474 	ret = hclge_rss_init_hw(hdev);
9475 	if (ret) {
9476 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9477 		goto err_mdiobus_unreg;
9478 	}
9479 
9480 	ret = init_mgr_tbl(hdev);
9481 	if (ret) {
9482 		dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
9483 		goto err_mdiobus_unreg;
9484 	}
9485 
9486 	ret = hclge_init_fd_config(hdev);
9487 	if (ret) {
9488 		dev_err(&pdev->dev,
9489 			"fd table init fail, ret=%d\n", ret);
9490 		goto err_mdiobus_unreg;
9491 	}
9492 
9493 	INIT_KFIFO(hdev->mac_tnl_log);
9494 
9495 	hclge_dcb_ops_set(hdev);
9496 
9497 	timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
9498 	INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
9499 
9500 	/* Setup affinity after service timer setup because add_timer_on
9501 	 * is called in affinity notify.
9502 	 */
9503 	hclge_misc_affinity_setup(hdev);
9504 
9505 	hclge_clear_all_event_cause(hdev);
9506 	hclge_clear_resetting_state(hdev);
9507 
9508 	/* Log and clear the hw errors those already occurred */
9509 	hclge_handle_all_hns_hw_errors(ae_dev);
9510 
9511 	/* request delayed reset for the error recovery because an immediate
9512 	 * global reset on a PF affecting pending initialization of other PFs
9513 	 */
9514 	if (ae_dev->hw_err_reset_req) {
9515 		enum hnae3_reset_type reset_level;
9516 
9517 		reset_level = hclge_get_reset_level(ae_dev,
9518 						    &ae_dev->hw_err_reset_req);
9519 		hclge_set_def_reset_request(ae_dev, reset_level);
9520 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
9521 	}
9522 
9523 	/* Enable MISC vector(vector0) */
9524 	hclge_enable_vector(&hdev->misc_vector, true);
9525 
9526 	hclge_state_init(hdev);
9527 	hdev->last_reset_time = jiffies;
9528 
9529 	dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
9530 		 HCLGE_DRIVER_NAME);
9531 
9532 	hclge_task_schedule(hdev, round_jiffies_relative(HZ));
9533 
9534 	return 0;
9535 
9536 err_mdiobus_unreg:
9537 	if (hdev->hw.mac.phydev)
9538 		mdiobus_unregister(hdev->hw.mac.mdio_bus);
9539 err_msi_irq_uninit:
9540 	hclge_misc_irq_uninit(hdev);
9541 err_msi_uninit:
9542 	pci_free_irq_vectors(pdev);
9543 err_cmd_uninit:
9544 	hclge_cmd_uninit(hdev);
9545 err_pci_uninit:
9546 	pcim_iounmap(pdev, hdev->hw.io_base);
9547 	pci_clear_master(pdev);
9548 	pci_release_regions(pdev);
9549 	pci_disable_device(pdev);
9550 out:
9551 	return ret;
9552 }
9553 
9554 static void hclge_stats_clear(struct hclge_dev *hdev)
9555 {
9556 	memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
9557 }
9558 
9559 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
9560 {
9561 	return hclge_config_switch_param(hdev, vf, enable,
9562 					 HCLGE_SWITCH_ANTI_SPOOF_MASK);
9563 }
9564 
9565 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
9566 {
9567 	return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9568 					  HCLGE_FILTER_FE_NIC_INGRESS_B,
9569 					  enable, vf);
9570 }
9571 
9572 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
9573 {
9574 	int ret;
9575 
9576 	ret = hclge_set_mac_spoofchk(hdev, vf, enable);
9577 	if (ret) {
9578 		dev_err(&hdev->pdev->dev,
9579 			"Set vf %d mac spoof check %s failed, ret=%d\n",
9580 			vf, enable ? "on" : "off", ret);
9581 		return ret;
9582 	}
9583 
9584 	ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
9585 	if (ret)
9586 		dev_err(&hdev->pdev->dev,
9587 			"Set vf %d vlan spoof check %s failed, ret=%d\n",
9588 			vf, enable ? "on" : "off", ret);
9589 
9590 	return ret;
9591 }
9592 
9593 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
9594 				 bool enable)
9595 {
9596 	struct hclge_vport *vport = hclge_get_vport(handle);
9597 	struct hclge_dev *hdev = vport->back;
9598 	u32 new_spoofchk = enable ? 1 : 0;
9599 	int ret;
9600 
9601 	if (hdev->pdev->revision == 0x20)
9602 		return -EOPNOTSUPP;
9603 
9604 	vport = hclge_get_vf_vport(hdev, vf);
9605 	if (!vport)
9606 		return -EINVAL;
9607 
9608 	if (vport->vf_info.spoofchk == new_spoofchk)
9609 		return 0;
9610 
9611 	if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
9612 		dev_warn(&hdev->pdev->dev,
9613 			 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
9614 			 vf);
9615 	else if (enable && hclge_is_umv_space_full(vport))
9616 		dev_warn(&hdev->pdev->dev,
9617 			 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
9618 			 vf);
9619 
9620 	ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
9621 	if (ret)
9622 		return ret;
9623 
9624 	vport->vf_info.spoofchk = new_spoofchk;
9625 	return 0;
9626 }
9627 
9628 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
9629 {
9630 	struct hclge_vport *vport = hdev->vport;
9631 	int ret;
9632 	int i;
9633 
9634 	if (hdev->pdev->revision == 0x20)
9635 		return 0;
9636 
9637 	/* resume the vf spoof check state after reset */
9638 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9639 		ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
9640 					       vport->vf_info.spoofchk);
9641 		if (ret)
9642 			return ret;
9643 
9644 		vport++;
9645 	}
9646 
9647 	return 0;
9648 }
9649 
9650 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
9651 {
9652 	struct hclge_vport *vport = hclge_get_vport(handle);
9653 	struct hclge_dev *hdev = vport->back;
9654 	u32 new_trusted = enable ? 1 : 0;
9655 	bool en_bc_pmc;
9656 	int ret;
9657 
9658 	vport = hclge_get_vf_vport(hdev, vf);
9659 	if (!vport)
9660 		return -EINVAL;
9661 
9662 	if (vport->vf_info.trusted == new_trusted)
9663 		return 0;
9664 
9665 	/* Disable promisc mode for VF if it is not trusted any more. */
9666 	if (!enable && vport->vf_info.promisc_enable) {
9667 		en_bc_pmc = hdev->pdev->revision != 0x20;
9668 		ret = hclge_set_vport_promisc_mode(vport, false, false,
9669 						   en_bc_pmc);
9670 		if (ret)
9671 			return ret;
9672 		vport->vf_info.promisc_enable = 0;
9673 		hclge_inform_vf_promisc_info(vport);
9674 	}
9675 
9676 	vport->vf_info.trusted = new_trusted;
9677 
9678 	return 0;
9679 }
9680 
9681 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
9682 {
9683 	int ret;
9684 	int vf;
9685 
9686 	/* reset vf rate to default value */
9687 	for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
9688 		struct hclge_vport *vport = &hdev->vport[vf];
9689 
9690 		vport->vf_info.max_tx_rate = 0;
9691 		ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
9692 		if (ret)
9693 			dev_err(&hdev->pdev->dev,
9694 				"vf%d failed to reset to default, ret=%d\n",
9695 				vf - HCLGE_VF_VPORT_START_NUM, ret);
9696 	}
9697 }
9698 
9699 static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf,
9700 				     int min_tx_rate, int max_tx_rate)
9701 {
9702 	if (min_tx_rate != 0 ||
9703 	    max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
9704 		dev_err(&hdev->pdev->dev,
9705 			"min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
9706 			min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
9707 		return -EINVAL;
9708 	}
9709 
9710 	return 0;
9711 }
9712 
9713 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
9714 			     int min_tx_rate, int max_tx_rate, bool force)
9715 {
9716 	struct hclge_vport *vport = hclge_get_vport(handle);
9717 	struct hclge_dev *hdev = vport->back;
9718 	int ret;
9719 
9720 	ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate);
9721 	if (ret)
9722 		return ret;
9723 
9724 	vport = hclge_get_vf_vport(hdev, vf);
9725 	if (!vport)
9726 		return -EINVAL;
9727 
9728 	if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
9729 		return 0;
9730 
9731 	ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
9732 	if (ret)
9733 		return ret;
9734 
9735 	vport->vf_info.max_tx_rate = max_tx_rate;
9736 
9737 	return 0;
9738 }
9739 
9740 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
9741 {
9742 	struct hnae3_handle *handle = &hdev->vport->nic;
9743 	struct hclge_vport *vport;
9744 	int ret;
9745 	int vf;
9746 
9747 	/* resume the vf max_tx_rate after reset */
9748 	for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
9749 		vport = hclge_get_vf_vport(hdev, vf);
9750 		if (!vport)
9751 			return -EINVAL;
9752 
9753 		/* zero means max rate, after reset, firmware already set it to
9754 		 * max rate, so just continue.
9755 		 */
9756 		if (!vport->vf_info.max_tx_rate)
9757 			continue;
9758 
9759 		ret = hclge_set_vf_rate(handle, vf, 0,
9760 					vport->vf_info.max_tx_rate, true);
9761 		if (ret) {
9762 			dev_err(&hdev->pdev->dev,
9763 				"vf%d failed to resume tx_rate:%u, ret=%d\n",
9764 				vf, vport->vf_info.max_tx_rate, ret);
9765 			return ret;
9766 		}
9767 	}
9768 
9769 	return 0;
9770 }
9771 
9772 static void hclge_reset_vport_state(struct hclge_dev *hdev)
9773 {
9774 	struct hclge_vport *vport = hdev->vport;
9775 	int i;
9776 
9777 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9778 		hclge_vport_stop(vport);
9779 		vport++;
9780 	}
9781 }
9782 
9783 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
9784 {
9785 	struct hclge_dev *hdev = ae_dev->priv;
9786 	struct pci_dev *pdev = ae_dev->pdev;
9787 	int ret;
9788 
9789 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
9790 
9791 	hclge_stats_clear(hdev);
9792 	memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
9793 	memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
9794 
9795 	ret = hclge_cmd_init(hdev);
9796 	if (ret) {
9797 		dev_err(&pdev->dev, "Cmd queue init failed\n");
9798 		return ret;
9799 	}
9800 
9801 	ret = hclge_map_tqp(hdev);
9802 	if (ret) {
9803 		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
9804 		return ret;
9805 	}
9806 
9807 	hclge_reset_umv_space(hdev);
9808 
9809 	ret = hclge_mac_init(hdev);
9810 	if (ret) {
9811 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9812 		return ret;
9813 	}
9814 
9815 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9816 	if (ret) {
9817 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9818 		return ret;
9819 	}
9820 
9821 	ret = hclge_config_gro(hdev, true);
9822 	if (ret)
9823 		return ret;
9824 
9825 	ret = hclge_init_vlan_config(hdev);
9826 	if (ret) {
9827 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9828 		return ret;
9829 	}
9830 
9831 	ret = hclge_tm_init_hw(hdev, true);
9832 	if (ret) {
9833 		dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
9834 		return ret;
9835 	}
9836 
9837 	ret = hclge_rss_init_hw(hdev);
9838 	if (ret) {
9839 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9840 		return ret;
9841 	}
9842 
9843 	ret = init_mgr_tbl(hdev);
9844 	if (ret) {
9845 		dev_err(&pdev->dev,
9846 			"failed to reinit manager table, ret = %d\n", ret);
9847 		return ret;
9848 	}
9849 
9850 	ret = hclge_init_fd_config(hdev);
9851 	if (ret) {
9852 		dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
9853 		return ret;
9854 	}
9855 
9856 	/* Log and clear the hw errors those already occurred */
9857 	hclge_handle_all_hns_hw_errors(ae_dev);
9858 
9859 	/* Re-enable the hw error interrupts because
9860 	 * the interrupts get disabled on global reset.
9861 	 */
9862 	ret = hclge_config_nic_hw_error(hdev, true);
9863 	if (ret) {
9864 		dev_err(&pdev->dev,
9865 			"fail(%d) to re-enable NIC hw error interrupts\n",
9866 			ret);
9867 		return ret;
9868 	}
9869 
9870 	if (hdev->roce_client) {
9871 		ret = hclge_config_rocee_ras_interrupt(hdev, true);
9872 		if (ret) {
9873 			dev_err(&pdev->dev,
9874 				"fail(%d) to re-enable roce ras interrupts\n",
9875 				ret);
9876 			return ret;
9877 		}
9878 	}
9879 
9880 	hclge_reset_vport_state(hdev);
9881 	ret = hclge_reset_vport_spoofchk(hdev);
9882 	if (ret)
9883 		return ret;
9884 
9885 	ret = hclge_resume_vf_rate(hdev);
9886 	if (ret)
9887 		return ret;
9888 
9889 	dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
9890 		 HCLGE_DRIVER_NAME);
9891 
9892 	return 0;
9893 }
9894 
9895 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
9896 {
9897 	struct hclge_dev *hdev = ae_dev->priv;
9898 	struct hclge_mac *mac = &hdev->hw.mac;
9899 
9900 	hclge_reset_vf_rate(hdev);
9901 	hclge_misc_affinity_teardown(hdev);
9902 	hclge_state_uninit(hdev);
9903 
9904 	if (mac->phydev)
9905 		mdiobus_unregister(mac->mdio_bus);
9906 
9907 	hclge_uninit_umv_space(hdev);
9908 
9909 	/* Disable MISC vector(vector0) */
9910 	hclge_enable_vector(&hdev->misc_vector, false);
9911 	synchronize_irq(hdev->misc_vector.vector_irq);
9912 
9913 	/* Disable all hw interrupts */
9914 	hclge_config_mac_tnl_int(hdev, false);
9915 	hclge_config_nic_hw_error(hdev, false);
9916 	hclge_config_rocee_ras_interrupt(hdev, false);
9917 
9918 	hclge_cmd_uninit(hdev);
9919 	hclge_misc_irq_uninit(hdev);
9920 	hclge_pci_uninit(hdev);
9921 	mutex_destroy(&hdev->vport_lock);
9922 	hclge_uninit_vport_mac_table(hdev);
9923 	hclge_uninit_vport_vlan_table(hdev);
9924 	ae_dev->priv = NULL;
9925 }
9926 
9927 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
9928 {
9929 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9930 	struct hclge_vport *vport = hclge_get_vport(handle);
9931 	struct hclge_dev *hdev = vport->back;
9932 
9933 	return min_t(u32, hdev->rss_size_max,
9934 		     vport->alloc_tqps / kinfo->num_tc);
9935 }
9936 
9937 static void hclge_get_channels(struct hnae3_handle *handle,
9938 			       struct ethtool_channels *ch)
9939 {
9940 	ch->max_combined = hclge_get_max_channels(handle);
9941 	ch->other_count = 1;
9942 	ch->max_other = 1;
9943 	ch->combined_count = handle->kinfo.rss_size;
9944 }
9945 
9946 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
9947 					u16 *alloc_tqps, u16 *max_rss_size)
9948 {
9949 	struct hclge_vport *vport = hclge_get_vport(handle);
9950 	struct hclge_dev *hdev = vport->back;
9951 
9952 	*alloc_tqps = vport->alloc_tqps;
9953 	*max_rss_size = hdev->rss_size_max;
9954 }
9955 
9956 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
9957 			      bool rxfh_configured)
9958 {
9959 	struct hclge_vport *vport = hclge_get_vport(handle);
9960 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
9961 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
9962 	struct hclge_dev *hdev = vport->back;
9963 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
9964 	u16 cur_rss_size = kinfo->rss_size;
9965 	u16 cur_tqps = kinfo->num_tqps;
9966 	u16 tc_valid[HCLGE_MAX_TC_NUM];
9967 	u16 roundup_size;
9968 	u32 *rss_indir;
9969 	unsigned int i;
9970 	int ret;
9971 
9972 	kinfo->req_rss_size = new_tqps_num;
9973 
9974 	ret = hclge_tm_vport_map_update(hdev);
9975 	if (ret) {
9976 		dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
9977 		return ret;
9978 	}
9979 
9980 	roundup_size = roundup_pow_of_two(kinfo->rss_size);
9981 	roundup_size = ilog2(roundup_size);
9982 	/* Set the RSS TC mode according to the new RSS size */
9983 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
9984 		tc_valid[i] = 0;
9985 
9986 		if (!(hdev->hw_tc_map & BIT(i)))
9987 			continue;
9988 
9989 		tc_valid[i] = 1;
9990 		tc_size[i] = roundup_size;
9991 		tc_offset[i] = kinfo->rss_size * i;
9992 	}
9993 	ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
9994 	if (ret)
9995 		return ret;
9996 
9997 	/* RSS indirection table has been configuared by user */
9998 	if (rxfh_configured)
9999 		goto out;
10000 
10001 	/* Reinitializes the rss indirect table according to the new RSS size */
10002 	rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
10003 	if (!rss_indir)
10004 		return -ENOMEM;
10005 
10006 	for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
10007 		rss_indir[i] = i % kinfo->rss_size;
10008 
10009 	ret = hclge_set_rss(handle, rss_indir, NULL, 0);
10010 	if (ret)
10011 		dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
10012 			ret);
10013 
10014 	kfree(rss_indir);
10015 
10016 out:
10017 	if (!ret)
10018 		dev_info(&hdev->pdev->dev,
10019 			 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
10020 			 cur_rss_size, kinfo->rss_size,
10021 			 cur_tqps, kinfo->rss_size * kinfo->num_tc);
10022 
10023 	return ret;
10024 }
10025 
10026 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
10027 			      u32 *regs_num_64_bit)
10028 {
10029 	struct hclge_desc desc;
10030 	u32 total_num;
10031 	int ret;
10032 
10033 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
10034 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10035 	if (ret) {
10036 		dev_err(&hdev->pdev->dev,
10037 			"Query register number cmd failed, ret = %d.\n", ret);
10038 		return ret;
10039 	}
10040 
10041 	*regs_num_32_bit = le32_to_cpu(desc.data[0]);
10042 	*regs_num_64_bit = le32_to_cpu(desc.data[1]);
10043 
10044 	total_num = *regs_num_32_bit + *regs_num_64_bit;
10045 	if (!total_num)
10046 		return -EINVAL;
10047 
10048 	return 0;
10049 }
10050 
10051 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10052 				 void *data)
10053 {
10054 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
10055 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
10056 
10057 	struct hclge_desc *desc;
10058 	u32 *reg_val = data;
10059 	__le32 *desc_data;
10060 	int nodata_num;
10061 	int cmd_num;
10062 	int i, k, n;
10063 	int ret;
10064 
10065 	if (regs_num == 0)
10066 		return 0;
10067 
10068 	nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
10069 	cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
10070 			       HCLGE_32_BIT_REG_RTN_DATANUM);
10071 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10072 	if (!desc)
10073 		return -ENOMEM;
10074 
10075 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
10076 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10077 	if (ret) {
10078 		dev_err(&hdev->pdev->dev,
10079 			"Query 32 bit register cmd failed, ret = %d.\n", ret);
10080 		kfree(desc);
10081 		return ret;
10082 	}
10083 
10084 	for (i = 0; i < cmd_num; i++) {
10085 		if (i == 0) {
10086 			desc_data = (__le32 *)(&desc[i].data[0]);
10087 			n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
10088 		} else {
10089 			desc_data = (__le32 *)(&desc[i]);
10090 			n = HCLGE_32_BIT_REG_RTN_DATANUM;
10091 		}
10092 		for (k = 0; k < n; k++) {
10093 			*reg_val++ = le32_to_cpu(*desc_data++);
10094 
10095 			regs_num--;
10096 			if (!regs_num)
10097 				break;
10098 		}
10099 	}
10100 
10101 	kfree(desc);
10102 	return 0;
10103 }
10104 
10105 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10106 				 void *data)
10107 {
10108 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
10109 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
10110 
10111 	struct hclge_desc *desc;
10112 	u64 *reg_val = data;
10113 	__le64 *desc_data;
10114 	int nodata_len;
10115 	int cmd_num;
10116 	int i, k, n;
10117 	int ret;
10118 
10119 	if (regs_num == 0)
10120 		return 0;
10121 
10122 	nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
10123 	cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
10124 			       HCLGE_64_BIT_REG_RTN_DATANUM);
10125 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10126 	if (!desc)
10127 		return -ENOMEM;
10128 
10129 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
10130 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10131 	if (ret) {
10132 		dev_err(&hdev->pdev->dev,
10133 			"Query 64 bit register cmd failed, ret = %d.\n", ret);
10134 		kfree(desc);
10135 		return ret;
10136 	}
10137 
10138 	for (i = 0; i < cmd_num; i++) {
10139 		if (i == 0) {
10140 			desc_data = (__le64 *)(&desc[i].data[0]);
10141 			n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
10142 		} else {
10143 			desc_data = (__le64 *)(&desc[i]);
10144 			n = HCLGE_64_BIT_REG_RTN_DATANUM;
10145 		}
10146 		for (k = 0; k < n; k++) {
10147 			*reg_val++ = le64_to_cpu(*desc_data++);
10148 
10149 			regs_num--;
10150 			if (!regs_num)
10151 				break;
10152 		}
10153 	}
10154 
10155 	kfree(desc);
10156 	return 0;
10157 }
10158 
10159 #define MAX_SEPARATE_NUM	4
10160 #define SEPARATOR_VALUE		0xFDFCFBFA
10161 #define REG_NUM_PER_LINE	4
10162 #define REG_LEN_PER_LINE	(REG_NUM_PER_LINE * sizeof(u32))
10163 #define REG_SEPARATOR_LINE	1
10164 #define REG_NUM_REMAIN_MASK	3
10165 #define BD_LIST_MAX_NUM		30
10166 
10167 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
10168 {
10169 	/*prepare 4 commands to query DFX BD number*/
10170 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true);
10171 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10172 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true);
10173 	desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10174 	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true);
10175 	desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10176 	hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true);
10177 
10178 	return hclge_cmd_send(&hdev->hw, desc, 4);
10179 }
10180 
10181 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
10182 				    int *bd_num_list,
10183 				    u32 type_num)
10184 {
10185 	u32 entries_per_desc, desc_index, index, offset, i;
10186 	struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
10187 	int ret;
10188 
10189 	ret = hclge_query_bd_num_cmd_send(hdev, desc);
10190 	if (ret) {
10191 		dev_err(&hdev->pdev->dev,
10192 			"Get dfx bd num fail, status is %d.\n", ret);
10193 		return ret;
10194 	}
10195 
10196 	entries_per_desc = ARRAY_SIZE(desc[0].data);
10197 	for (i = 0; i < type_num; i++) {
10198 		offset = hclge_dfx_bd_offset_list[i];
10199 		index = offset % entries_per_desc;
10200 		desc_index = offset / entries_per_desc;
10201 		bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
10202 	}
10203 
10204 	return ret;
10205 }
10206 
10207 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
10208 				  struct hclge_desc *desc_src, int bd_num,
10209 				  enum hclge_opcode_type cmd)
10210 {
10211 	struct hclge_desc *desc = desc_src;
10212 	int i, ret;
10213 
10214 	hclge_cmd_setup_basic_desc(desc, cmd, true);
10215 	for (i = 0; i < bd_num - 1; i++) {
10216 		desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10217 		desc++;
10218 		hclge_cmd_setup_basic_desc(desc, cmd, true);
10219 	}
10220 
10221 	desc = desc_src;
10222 	ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
10223 	if (ret)
10224 		dev_err(&hdev->pdev->dev,
10225 			"Query dfx reg cmd(0x%x) send fail, status is %d.\n",
10226 			cmd, ret);
10227 
10228 	return ret;
10229 }
10230 
10231 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
10232 				    void *data)
10233 {
10234 	int entries_per_desc, reg_num, separator_num, desc_index, index, i;
10235 	struct hclge_desc *desc = desc_src;
10236 	u32 *reg = data;
10237 
10238 	entries_per_desc = ARRAY_SIZE(desc->data);
10239 	reg_num = entries_per_desc * bd_num;
10240 	separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
10241 	for (i = 0; i < reg_num; i++) {
10242 		index = i % entries_per_desc;
10243 		desc_index = i / entries_per_desc;
10244 		*reg++ = le32_to_cpu(desc[desc_index].data[index]);
10245 	}
10246 	for (i = 0; i < separator_num; i++)
10247 		*reg++ = SEPARATOR_VALUE;
10248 
10249 	return reg_num + separator_num;
10250 }
10251 
10252 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
10253 {
10254 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10255 	int data_len_per_desc, data_len, bd_num, i;
10256 	int bd_num_list[BD_LIST_MAX_NUM];
10257 	int ret;
10258 
10259 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10260 	if (ret) {
10261 		dev_err(&hdev->pdev->dev,
10262 			"Get dfx reg bd num fail, status is %d.\n", ret);
10263 		return ret;
10264 	}
10265 
10266 	data_len_per_desc = sizeof_field(struct hclge_desc, data);
10267 	*len = 0;
10268 	for (i = 0; i < dfx_reg_type_num; i++) {
10269 		bd_num = bd_num_list[i];
10270 		data_len = data_len_per_desc * bd_num;
10271 		*len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
10272 	}
10273 
10274 	return ret;
10275 }
10276 
10277 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
10278 {
10279 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10280 	int bd_num, bd_num_max, buf_len, i;
10281 	int bd_num_list[BD_LIST_MAX_NUM];
10282 	struct hclge_desc *desc_src;
10283 	u32 *reg = data;
10284 	int ret;
10285 
10286 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10287 	if (ret) {
10288 		dev_err(&hdev->pdev->dev,
10289 			"Get dfx reg bd num fail, status is %d.\n", ret);
10290 		return ret;
10291 	}
10292 
10293 	bd_num_max = bd_num_list[0];
10294 	for (i = 1; i < dfx_reg_type_num; i++)
10295 		bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
10296 
10297 	buf_len = sizeof(*desc_src) * bd_num_max;
10298 	desc_src = kzalloc(buf_len, GFP_KERNEL);
10299 	if (!desc_src)
10300 		return -ENOMEM;
10301 
10302 	for (i = 0; i < dfx_reg_type_num; i++) {
10303 		bd_num = bd_num_list[i];
10304 		ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
10305 					     hclge_dfx_reg_opcode_list[i]);
10306 		if (ret) {
10307 			dev_err(&hdev->pdev->dev,
10308 				"Get dfx reg fail, status is %d.\n", ret);
10309 			break;
10310 		}
10311 
10312 		reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
10313 	}
10314 
10315 	kfree(desc_src);
10316 	return ret;
10317 }
10318 
10319 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
10320 			      struct hnae3_knic_private_info *kinfo)
10321 {
10322 #define HCLGE_RING_REG_OFFSET		0x200
10323 #define HCLGE_RING_INT_REG_OFFSET	0x4
10324 
10325 	int i, j, reg_num, separator_num;
10326 	int data_num_sum;
10327 	u32 *reg = data;
10328 
10329 	/* fetching per-PF registers valus from PF PCIe register space */
10330 	reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
10331 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10332 	for (i = 0; i < reg_num; i++)
10333 		*reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
10334 	for (i = 0; i < separator_num; i++)
10335 		*reg++ = SEPARATOR_VALUE;
10336 	data_num_sum = reg_num + separator_num;
10337 
10338 	reg_num = ARRAY_SIZE(common_reg_addr_list);
10339 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10340 	for (i = 0; i < reg_num; i++)
10341 		*reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
10342 	for (i = 0; i < separator_num; i++)
10343 		*reg++ = SEPARATOR_VALUE;
10344 	data_num_sum += reg_num + separator_num;
10345 
10346 	reg_num = ARRAY_SIZE(ring_reg_addr_list);
10347 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10348 	for (j = 0; j < kinfo->num_tqps; j++) {
10349 		for (i = 0; i < reg_num; i++)
10350 			*reg++ = hclge_read_dev(&hdev->hw,
10351 						ring_reg_addr_list[i] +
10352 						HCLGE_RING_REG_OFFSET * j);
10353 		for (i = 0; i < separator_num; i++)
10354 			*reg++ = SEPARATOR_VALUE;
10355 	}
10356 	data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
10357 
10358 	reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
10359 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10360 	for (j = 0; j < hdev->num_msi_used - 1; j++) {
10361 		for (i = 0; i < reg_num; i++)
10362 			*reg++ = hclge_read_dev(&hdev->hw,
10363 						tqp_intr_reg_addr_list[i] +
10364 						HCLGE_RING_INT_REG_OFFSET * j);
10365 		for (i = 0; i < separator_num; i++)
10366 			*reg++ = SEPARATOR_VALUE;
10367 	}
10368 	data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
10369 
10370 	return data_num_sum;
10371 }
10372 
10373 static int hclge_get_regs_len(struct hnae3_handle *handle)
10374 {
10375 	int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
10376 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10377 	struct hclge_vport *vport = hclge_get_vport(handle);
10378 	struct hclge_dev *hdev = vport->back;
10379 	int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
10380 	int regs_lines_32_bit, regs_lines_64_bit;
10381 	int ret;
10382 
10383 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
10384 	if (ret) {
10385 		dev_err(&hdev->pdev->dev,
10386 			"Get register number failed, ret = %d.\n", ret);
10387 		return ret;
10388 	}
10389 
10390 	ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
10391 	if (ret) {
10392 		dev_err(&hdev->pdev->dev,
10393 			"Get dfx reg len failed, ret = %d.\n", ret);
10394 		return ret;
10395 	}
10396 
10397 	cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
10398 		REG_SEPARATOR_LINE;
10399 	common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
10400 		REG_SEPARATOR_LINE;
10401 	ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
10402 		REG_SEPARATOR_LINE;
10403 	tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
10404 		REG_SEPARATOR_LINE;
10405 	regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
10406 		REG_SEPARATOR_LINE;
10407 	regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
10408 		REG_SEPARATOR_LINE;
10409 
10410 	return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
10411 		tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
10412 		regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
10413 }
10414 
10415 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
10416 			   void *data)
10417 {
10418 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10419 	struct hclge_vport *vport = hclge_get_vport(handle);
10420 	struct hclge_dev *hdev = vport->back;
10421 	u32 regs_num_32_bit, regs_num_64_bit;
10422 	int i, reg_num, separator_num, ret;
10423 	u32 *reg = data;
10424 
10425 	*version = hdev->fw_version;
10426 
10427 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
10428 	if (ret) {
10429 		dev_err(&hdev->pdev->dev,
10430 			"Get register number failed, ret = %d.\n", ret);
10431 		return;
10432 	}
10433 
10434 	reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
10435 
10436 	ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
10437 	if (ret) {
10438 		dev_err(&hdev->pdev->dev,
10439 			"Get 32 bit register failed, ret = %d.\n", ret);
10440 		return;
10441 	}
10442 	reg_num = regs_num_32_bit;
10443 	reg += reg_num;
10444 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10445 	for (i = 0; i < separator_num; i++)
10446 		*reg++ = SEPARATOR_VALUE;
10447 
10448 	ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
10449 	if (ret) {
10450 		dev_err(&hdev->pdev->dev,
10451 			"Get 64 bit register failed, ret = %d.\n", ret);
10452 		return;
10453 	}
10454 	reg_num = regs_num_64_bit * 2;
10455 	reg += reg_num;
10456 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10457 	for (i = 0; i < separator_num; i++)
10458 		*reg++ = SEPARATOR_VALUE;
10459 
10460 	ret = hclge_get_dfx_reg(hdev, reg);
10461 	if (ret)
10462 		dev_err(&hdev->pdev->dev,
10463 			"Get dfx register failed, ret = %d.\n", ret);
10464 }
10465 
10466 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
10467 {
10468 	struct hclge_set_led_state_cmd *req;
10469 	struct hclge_desc desc;
10470 	int ret;
10471 
10472 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
10473 
10474 	req = (struct hclge_set_led_state_cmd *)desc.data;
10475 	hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
10476 			HCLGE_LED_LOCATE_STATE_S, locate_led_status);
10477 
10478 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10479 	if (ret)
10480 		dev_err(&hdev->pdev->dev,
10481 			"Send set led state cmd error, ret =%d\n", ret);
10482 
10483 	return ret;
10484 }
10485 
10486 enum hclge_led_status {
10487 	HCLGE_LED_OFF,
10488 	HCLGE_LED_ON,
10489 	HCLGE_LED_NO_CHANGE = 0xFF,
10490 };
10491 
10492 static int hclge_set_led_id(struct hnae3_handle *handle,
10493 			    enum ethtool_phys_id_state status)
10494 {
10495 	struct hclge_vport *vport = hclge_get_vport(handle);
10496 	struct hclge_dev *hdev = vport->back;
10497 
10498 	switch (status) {
10499 	case ETHTOOL_ID_ACTIVE:
10500 		return hclge_set_led_status(hdev, HCLGE_LED_ON);
10501 	case ETHTOOL_ID_INACTIVE:
10502 		return hclge_set_led_status(hdev, HCLGE_LED_OFF);
10503 	default:
10504 		return -EINVAL;
10505 	}
10506 }
10507 
10508 static void hclge_get_link_mode(struct hnae3_handle *handle,
10509 				unsigned long *supported,
10510 				unsigned long *advertising)
10511 {
10512 	unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
10513 	struct hclge_vport *vport = hclge_get_vport(handle);
10514 	struct hclge_dev *hdev = vport->back;
10515 	unsigned int idx = 0;
10516 
10517 	for (; idx < size; idx++) {
10518 		supported[idx] = hdev->hw.mac.supported[idx];
10519 		advertising[idx] = hdev->hw.mac.advertising[idx];
10520 	}
10521 }
10522 
10523 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
10524 {
10525 	struct hclge_vport *vport = hclge_get_vport(handle);
10526 	struct hclge_dev *hdev = vport->back;
10527 
10528 	return hclge_config_gro(hdev, enable);
10529 }
10530 
10531 static const struct hnae3_ae_ops hclge_ops = {
10532 	.init_ae_dev = hclge_init_ae_dev,
10533 	.uninit_ae_dev = hclge_uninit_ae_dev,
10534 	.flr_prepare = hclge_flr_prepare,
10535 	.flr_done = hclge_flr_done,
10536 	.init_client_instance = hclge_init_client_instance,
10537 	.uninit_client_instance = hclge_uninit_client_instance,
10538 	.map_ring_to_vector = hclge_map_ring_to_vector,
10539 	.unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
10540 	.get_vector = hclge_get_vector,
10541 	.put_vector = hclge_put_vector,
10542 	.set_promisc_mode = hclge_set_promisc_mode,
10543 	.set_loopback = hclge_set_loopback,
10544 	.start = hclge_ae_start,
10545 	.stop = hclge_ae_stop,
10546 	.client_start = hclge_client_start,
10547 	.client_stop = hclge_client_stop,
10548 	.get_status = hclge_get_status,
10549 	.get_ksettings_an_result = hclge_get_ksettings_an_result,
10550 	.cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
10551 	.get_media_type = hclge_get_media_type,
10552 	.check_port_speed = hclge_check_port_speed,
10553 	.get_fec = hclge_get_fec,
10554 	.set_fec = hclge_set_fec,
10555 	.get_rss_key_size = hclge_get_rss_key_size,
10556 	.get_rss_indir_size = hclge_get_rss_indir_size,
10557 	.get_rss = hclge_get_rss,
10558 	.set_rss = hclge_set_rss,
10559 	.set_rss_tuple = hclge_set_rss_tuple,
10560 	.get_rss_tuple = hclge_get_rss_tuple,
10561 	.get_tc_size = hclge_get_tc_size,
10562 	.get_mac_addr = hclge_get_mac_addr,
10563 	.set_mac_addr = hclge_set_mac_addr,
10564 	.do_ioctl = hclge_do_ioctl,
10565 	.add_uc_addr = hclge_add_uc_addr,
10566 	.rm_uc_addr = hclge_rm_uc_addr,
10567 	.add_mc_addr = hclge_add_mc_addr,
10568 	.rm_mc_addr = hclge_rm_mc_addr,
10569 	.set_autoneg = hclge_set_autoneg,
10570 	.get_autoneg = hclge_get_autoneg,
10571 	.restart_autoneg = hclge_restart_autoneg,
10572 	.halt_autoneg = hclge_halt_autoneg,
10573 	.get_pauseparam = hclge_get_pauseparam,
10574 	.set_pauseparam = hclge_set_pauseparam,
10575 	.set_mtu = hclge_set_mtu,
10576 	.reset_queue = hclge_reset_tqp,
10577 	.get_stats = hclge_get_stats,
10578 	.get_mac_stats = hclge_get_mac_stat,
10579 	.update_stats = hclge_update_stats,
10580 	.get_strings = hclge_get_strings,
10581 	.get_sset_count = hclge_get_sset_count,
10582 	.get_fw_version = hclge_get_fw_version,
10583 	.get_mdix_mode = hclge_get_mdix_mode,
10584 	.enable_vlan_filter = hclge_enable_vlan_filter,
10585 	.set_vlan_filter = hclge_set_vlan_filter,
10586 	.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
10587 	.enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
10588 	.reset_event = hclge_reset_event,
10589 	.get_reset_level = hclge_get_reset_level,
10590 	.set_default_reset_request = hclge_set_def_reset_request,
10591 	.get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
10592 	.set_channels = hclge_set_channels,
10593 	.get_channels = hclge_get_channels,
10594 	.get_regs_len = hclge_get_regs_len,
10595 	.get_regs = hclge_get_regs,
10596 	.set_led_id = hclge_set_led_id,
10597 	.get_link_mode = hclge_get_link_mode,
10598 	.add_fd_entry = hclge_add_fd_entry,
10599 	.del_fd_entry = hclge_del_fd_entry,
10600 	.del_all_fd_entries = hclge_del_all_fd_entries,
10601 	.get_fd_rule_cnt = hclge_get_fd_rule_cnt,
10602 	.get_fd_rule_info = hclge_get_fd_rule_info,
10603 	.get_fd_all_rules = hclge_get_all_rules,
10604 	.restore_fd_rules = hclge_restore_fd_entries,
10605 	.enable_fd = hclge_enable_fd,
10606 	.add_arfs_entry = hclge_add_fd_entry_by_arfs,
10607 	.dbg_run_cmd = hclge_dbg_run_cmd,
10608 	.handle_hw_ras_error = hclge_handle_hw_ras_error,
10609 	.get_hw_reset_stat = hclge_get_hw_reset_stat,
10610 	.ae_dev_resetting = hclge_ae_dev_resetting,
10611 	.ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
10612 	.set_gro_en = hclge_gro_en,
10613 	.get_global_queue_id = hclge_covert_handle_qid_global,
10614 	.set_timer_task = hclge_set_timer_task,
10615 	.mac_connect_phy = hclge_mac_connect_phy,
10616 	.mac_disconnect_phy = hclge_mac_disconnect_phy,
10617 	.restore_vlan_table = hclge_restore_vlan_table,
10618 	.get_vf_config = hclge_get_vf_config,
10619 	.set_vf_link_state = hclge_set_vf_link_state,
10620 	.set_vf_spoofchk = hclge_set_vf_spoofchk,
10621 	.set_vf_trust = hclge_set_vf_trust,
10622 	.set_vf_rate = hclge_set_vf_rate,
10623 	.set_vf_mac = hclge_set_vf_mac,
10624 };
10625 
10626 static struct hnae3_ae_algo ae_algo = {
10627 	.ops = &hclge_ops,
10628 	.pdev_id_table = ae_algo_pci_tbl,
10629 };
10630 
10631 static int hclge_init(void)
10632 {
10633 	pr_info("%s is initializing\n", HCLGE_NAME);
10634 
10635 	hclge_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, HCLGE_NAME);
10636 	if (!hclge_wq) {
10637 		pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
10638 		return -ENOMEM;
10639 	}
10640 
10641 	hnae3_register_ae_algo(&ae_algo);
10642 
10643 	return 0;
10644 }
10645 
10646 static void hclge_exit(void)
10647 {
10648 	hnae3_unregister_ae_algo(&ae_algo);
10649 	destroy_workqueue(hclge_wq);
10650 }
10651 module_init(hclge_init);
10652 module_exit(hclge_exit);
10653 
10654 MODULE_LICENSE("GPL");
10655 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
10656 MODULE_DESCRIPTION("HCLGE Driver");
10657 MODULE_VERSION(HCLGE_MOD_VERSION);
10658