xref: /linux/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c (revision 10a708c24a31ae1be1ea23d1c38da2691d1fd65c)
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
22 #include "hclge_tm.h"
23 #include "hclge_err.h"
24 #include "hnae3.h"
25 
26 #define HCLGE_NAME			"hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29 
30 #define HCLGE_BUF_SIZE_UNIT	256U
31 #define HCLGE_BUF_MUL_BY	2
32 #define HCLGE_BUF_DIV_BY	2
33 #define NEED_RESERVE_TC_NUM	2
34 #define BUF_MAX_PERCENT		100
35 #define BUF_RESERVE_PERCENT	90
36 
37 #define HCLGE_RESET_MAX_FAIL_CNT	5
38 #define HCLGE_RESET_SYNC_TIME		100
39 #define HCLGE_PF_RESET_SYNC_TIME	20
40 #define HCLGE_PF_RESET_SYNC_CNT		1500
41 
42 /* Get DFX BD number offset */
43 #define HCLGE_DFX_BIOS_BD_OFFSET        1
44 #define HCLGE_DFX_SSU_0_BD_OFFSET       2
45 #define HCLGE_DFX_SSU_1_BD_OFFSET       3
46 #define HCLGE_DFX_IGU_BD_OFFSET         4
47 #define HCLGE_DFX_RPU_0_BD_OFFSET       5
48 #define HCLGE_DFX_RPU_1_BD_OFFSET       6
49 #define HCLGE_DFX_NCSI_BD_OFFSET        7
50 #define HCLGE_DFX_RTC_BD_OFFSET         8
51 #define HCLGE_DFX_PPP_BD_OFFSET         9
52 #define HCLGE_DFX_RCB_BD_OFFSET         10
53 #define HCLGE_DFX_TQP_BD_OFFSET         11
54 #define HCLGE_DFX_SSU_2_BD_OFFSET       12
55 
56 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
57 static int hclge_init_vlan_config(struct hclge_dev *hdev);
58 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
59 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
60 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
61 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
62 			       u16 *allocated_size, bool is_alloc);
63 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
64 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
65 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
66 						   unsigned long *addr);
67 
68 static struct hnae3_ae_algo ae_algo;
69 
70 static const struct pci_device_id ae_algo_pci_tbl[] = {
71 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
72 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
73 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
74 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
75 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
76 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
77 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
78 	/* required last entry */
79 	{0, }
80 };
81 
82 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
83 
84 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
85 					 HCLGE_CMDQ_TX_ADDR_H_REG,
86 					 HCLGE_CMDQ_TX_DEPTH_REG,
87 					 HCLGE_CMDQ_TX_TAIL_REG,
88 					 HCLGE_CMDQ_TX_HEAD_REG,
89 					 HCLGE_CMDQ_RX_ADDR_L_REG,
90 					 HCLGE_CMDQ_RX_ADDR_H_REG,
91 					 HCLGE_CMDQ_RX_DEPTH_REG,
92 					 HCLGE_CMDQ_RX_TAIL_REG,
93 					 HCLGE_CMDQ_RX_HEAD_REG,
94 					 HCLGE_VECTOR0_CMDQ_SRC_REG,
95 					 HCLGE_CMDQ_INTR_STS_REG,
96 					 HCLGE_CMDQ_INTR_EN_REG,
97 					 HCLGE_CMDQ_INTR_GEN_REG};
98 
99 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
100 					   HCLGE_VECTOR0_OTER_EN_REG,
101 					   HCLGE_MISC_RESET_STS_REG,
102 					   HCLGE_MISC_VECTOR_INT_STS,
103 					   HCLGE_GLOBAL_RESET_REG,
104 					   HCLGE_FUN_RST_ING,
105 					   HCLGE_GRO_EN_REG};
106 
107 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
108 					 HCLGE_RING_RX_ADDR_H_REG,
109 					 HCLGE_RING_RX_BD_NUM_REG,
110 					 HCLGE_RING_RX_BD_LENGTH_REG,
111 					 HCLGE_RING_RX_MERGE_EN_REG,
112 					 HCLGE_RING_RX_TAIL_REG,
113 					 HCLGE_RING_RX_HEAD_REG,
114 					 HCLGE_RING_RX_FBD_NUM_REG,
115 					 HCLGE_RING_RX_OFFSET_REG,
116 					 HCLGE_RING_RX_FBD_OFFSET_REG,
117 					 HCLGE_RING_RX_STASH_REG,
118 					 HCLGE_RING_RX_BD_ERR_REG,
119 					 HCLGE_RING_TX_ADDR_L_REG,
120 					 HCLGE_RING_TX_ADDR_H_REG,
121 					 HCLGE_RING_TX_BD_NUM_REG,
122 					 HCLGE_RING_TX_PRIORITY_REG,
123 					 HCLGE_RING_TX_TC_REG,
124 					 HCLGE_RING_TX_MERGE_EN_REG,
125 					 HCLGE_RING_TX_TAIL_REG,
126 					 HCLGE_RING_TX_HEAD_REG,
127 					 HCLGE_RING_TX_FBD_NUM_REG,
128 					 HCLGE_RING_TX_OFFSET_REG,
129 					 HCLGE_RING_TX_EBD_NUM_REG,
130 					 HCLGE_RING_TX_EBD_OFFSET_REG,
131 					 HCLGE_RING_TX_BD_ERR_REG,
132 					 HCLGE_RING_EN_REG};
133 
134 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
135 					     HCLGE_TQP_INTR_GL0_REG,
136 					     HCLGE_TQP_INTR_GL1_REG,
137 					     HCLGE_TQP_INTR_GL2_REG,
138 					     HCLGE_TQP_INTR_RL_REG};
139 
140 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
141 	"App    Loopback test",
142 	"Serdes serial Loopback test",
143 	"Serdes parallel Loopback test",
144 	"Phy    Loopback test"
145 };
146 
147 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
148 	{"mac_tx_mac_pause_num",
149 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
150 	{"mac_rx_mac_pause_num",
151 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
152 	{"mac_tx_control_pkt_num",
153 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
154 	{"mac_rx_control_pkt_num",
155 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
156 	{"mac_tx_pfc_pkt_num",
157 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
158 	{"mac_tx_pfc_pri0_pkt_num",
159 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
160 	{"mac_tx_pfc_pri1_pkt_num",
161 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
162 	{"mac_tx_pfc_pri2_pkt_num",
163 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
164 	{"mac_tx_pfc_pri3_pkt_num",
165 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
166 	{"mac_tx_pfc_pri4_pkt_num",
167 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
168 	{"mac_tx_pfc_pri5_pkt_num",
169 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
170 	{"mac_tx_pfc_pri6_pkt_num",
171 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
172 	{"mac_tx_pfc_pri7_pkt_num",
173 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
174 	{"mac_rx_pfc_pkt_num",
175 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
176 	{"mac_rx_pfc_pri0_pkt_num",
177 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
178 	{"mac_rx_pfc_pri1_pkt_num",
179 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
180 	{"mac_rx_pfc_pri2_pkt_num",
181 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
182 	{"mac_rx_pfc_pri3_pkt_num",
183 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
184 	{"mac_rx_pfc_pri4_pkt_num",
185 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
186 	{"mac_rx_pfc_pri5_pkt_num",
187 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
188 	{"mac_rx_pfc_pri6_pkt_num",
189 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
190 	{"mac_rx_pfc_pri7_pkt_num",
191 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
192 	{"mac_tx_total_pkt_num",
193 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
194 	{"mac_tx_total_oct_num",
195 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
196 	{"mac_tx_good_pkt_num",
197 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
198 	{"mac_tx_bad_pkt_num",
199 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
200 	{"mac_tx_good_oct_num",
201 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
202 	{"mac_tx_bad_oct_num",
203 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
204 	{"mac_tx_uni_pkt_num",
205 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
206 	{"mac_tx_multi_pkt_num",
207 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
208 	{"mac_tx_broad_pkt_num",
209 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
210 	{"mac_tx_undersize_pkt_num",
211 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
212 	{"mac_tx_oversize_pkt_num",
213 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
214 	{"mac_tx_64_oct_pkt_num",
215 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
216 	{"mac_tx_65_127_oct_pkt_num",
217 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
218 	{"mac_tx_128_255_oct_pkt_num",
219 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
220 	{"mac_tx_256_511_oct_pkt_num",
221 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
222 	{"mac_tx_512_1023_oct_pkt_num",
223 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
224 	{"mac_tx_1024_1518_oct_pkt_num",
225 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
226 	{"mac_tx_1519_2047_oct_pkt_num",
227 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
228 	{"mac_tx_2048_4095_oct_pkt_num",
229 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
230 	{"mac_tx_4096_8191_oct_pkt_num",
231 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
232 	{"mac_tx_8192_9216_oct_pkt_num",
233 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
234 	{"mac_tx_9217_12287_oct_pkt_num",
235 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
236 	{"mac_tx_12288_16383_oct_pkt_num",
237 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
238 	{"mac_tx_1519_max_good_pkt_num",
239 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
240 	{"mac_tx_1519_max_bad_pkt_num",
241 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
242 	{"mac_rx_total_pkt_num",
243 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
244 	{"mac_rx_total_oct_num",
245 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
246 	{"mac_rx_good_pkt_num",
247 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
248 	{"mac_rx_bad_pkt_num",
249 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
250 	{"mac_rx_good_oct_num",
251 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
252 	{"mac_rx_bad_oct_num",
253 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
254 	{"mac_rx_uni_pkt_num",
255 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
256 	{"mac_rx_multi_pkt_num",
257 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
258 	{"mac_rx_broad_pkt_num",
259 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
260 	{"mac_rx_undersize_pkt_num",
261 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
262 	{"mac_rx_oversize_pkt_num",
263 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
264 	{"mac_rx_64_oct_pkt_num",
265 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
266 	{"mac_rx_65_127_oct_pkt_num",
267 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
268 	{"mac_rx_128_255_oct_pkt_num",
269 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
270 	{"mac_rx_256_511_oct_pkt_num",
271 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
272 	{"mac_rx_512_1023_oct_pkt_num",
273 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
274 	{"mac_rx_1024_1518_oct_pkt_num",
275 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
276 	{"mac_rx_1519_2047_oct_pkt_num",
277 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
278 	{"mac_rx_2048_4095_oct_pkt_num",
279 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
280 	{"mac_rx_4096_8191_oct_pkt_num",
281 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
282 	{"mac_rx_8192_9216_oct_pkt_num",
283 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
284 	{"mac_rx_9217_12287_oct_pkt_num",
285 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
286 	{"mac_rx_12288_16383_oct_pkt_num",
287 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
288 	{"mac_rx_1519_max_good_pkt_num",
289 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
290 	{"mac_rx_1519_max_bad_pkt_num",
291 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
292 
293 	{"mac_tx_fragment_pkt_num",
294 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
295 	{"mac_tx_undermin_pkt_num",
296 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
297 	{"mac_tx_jabber_pkt_num",
298 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
299 	{"mac_tx_err_all_pkt_num",
300 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
301 	{"mac_tx_from_app_good_pkt_num",
302 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
303 	{"mac_tx_from_app_bad_pkt_num",
304 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
305 	{"mac_rx_fragment_pkt_num",
306 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
307 	{"mac_rx_undermin_pkt_num",
308 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
309 	{"mac_rx_jabber_pkt_num",
310 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
311 	{"mac_rx_fcs_err_pkt_num",
312 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
313 	{"mac_rx_send_app_good_pkt_num",
314 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
315 	{"mac_rx_send_app_bad_pkt_num",
316 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
317 };
318 
319 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
320 	{
321 		.flags = HCLGE_MAC_MGR_MASK_VLAN_B,
322 		.ethter_type = cpu_to_le16(ETH_P_LLDP),
323 		.mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
324 		.mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
325 		.i_port_bitmap = 0x1,
326 	},
327 };
328 
329 static const u8 hclge_hash_key[] = {
330 	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
331 	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
332 	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
333 	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
334 	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
335 };
336 
337 static const u32 hclge_dfx_bd_offset_list[] = {
338 	HCLGE_DFX_BIOS_BD_OFFSET,
339 	HCLGE_DFX_SSU_0_BD_OFFSET,
340 	HCLGE_DFX_SSU_1_BD_OFFSET,
341 	HCLGE_DFX_IGU_BD_OFFSET,
342 	HCLGE_DFX_RPU_0_BD_OFFSET,
343 	HCLGE_DFX_RPU_1_BD_OFFSET,
344 	HCLGE_DFX_NCSI_BD_OFFSET,
345 	HCLGE_DFX_RTC_BD_OFFSET,
346 	HCLGE_DFX_PPP_BD_OFFSET,
347 	HCLGE_DFX_RCB_BD_OFFSET,
348 	HCLGE_DFX_TQP_BD_OFFSET,
349 	HCLGE_DFX_SSU_2_BD_OFFSET
350 };
351 
352 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
353 	HCLGE_OPC_DFX_BIOS_COMMON_REG,
354 	HCLGE_OPC_DFX_SSU_REG_0,
355 	HCLGE_OPC_DFX_SSU_REG_1,
356 	HCLGE_OPC_DFX_IGU_EGU_REG,
357 	HCLGE_OPC_DFX_RPU_REG_0,
358 	HCLGE_OPC_DFX_RPU_REG_1,
359 	HCLGE_OPC_DFX_NCSI_REG,
360 	HCLGE_OPC_DFX_RTC_REG,
361 	HCLGE_OPC_DFX_PPP_REG,
362 	HCLGE_OPC_DFX_RCB_REG,
363 	HCLGE_OPC_DFX_TQP_REG,
364 	HCLGE_OPC_DFX_SSU_REG_2
365 };
366 
367 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
368 {
369 #define HCLGE_MAC_CMD_NUM 21
370 
371 	u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
372 	struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
373 	__le64 *desc_data;
374 	int i, k, n;
375 	int ret;
376 
377 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
378 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
379 	if (ret) {
380 		dev_err(&hdev->pdev->dev,
381 			"Get MAC pkt stats fail, status = %d.\n", ret);
382 
383 		return ret;
384 	}
385 
386 	for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
387 		/* for special opcode 0032, only the first desc has the head */
388 		if (unlikely(i == 0)) {
389 			desc_data = (__le64 *)(&desc[i].data[0]);
390 			n = HCLGE_RD_FIRST_STATS_NUM;
391 		} else {
392 			desc_data = (__le64 *)(&desc[i]);
393 			n = HCLGE_RD_OTHER_STATS_NUM;
394 		}
395 
396 		for (k = 0; k < n; k++) {
397 			*data += le64_to_cpu(*desc_data);
398 			data++;
399 			desc_data++;
400 		}
401 	}
402 
403 	return 0;
404 }
405 
406 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
407 {
408 	u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
409 	struct hclge_desc *desc;
410 	__le64 *desc_data;
411 	u16 i, k, n;
412 	int ret;
413 
414 	/* This may be called inside atomic sections,
415 	 * so GFP_ATOMIC is more suitalbe here
416 	 */
417 	desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
418 	if (!desc)
419 		return -ENOMEM;
420 
421 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
422 	ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
423 	if (ret) {
424 		kfree(desc);
425 		return ret;
426 	}
427 
428 	for (i = 0; i < desc_num; i++) {
429 		/* for special opcode 0034, only the first desc has the head */
430 		if (i == 0) {
431 			desc_data = (__le64 *)(&desc[i].data[0]);
432 			n = HCLGE_RD_FIRST_STATS_NUM;
433 		} else {
434 			desc_data = (__le64 *)(&desc[i]);
435 			n = HCLGE_RD_OTHER_STATS_NUM;
436 		}
437 
438 		for (k = 0; k < n; k++) {
439 			*data += le64_to_cpu(*desc_data);
440 			data++;
441 			desc_data++;
442 		}
443 	}
444 
445 	kfree(desc);
446 
447 	return 0;
448 }
449 
450 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
451 {
452 	struct hclge_desc desc;
453 	__le32 *desc_data;
454 	u32 reg_num;
455 	int ret;
456 
457 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
458 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
459 	if (ret)
460 		return ret;
461 
462 	desc_data = (__le32 *)(&desc.data[0]);
463 	reg_num = le32_to_cpu(*desc_data);
464 
465 	*desc_num = 1 + ((reg_num - 3) >> 2) +
466 		    (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
467 
468 	return 0;
469 }
470 
471 static int hclge_mac_update_stats(struct hclge_dev *hdev)
472 {
473 	u32 desc_num;
474 	int ret;
475 
476 	ret = hclge_mac_query_reg_num(hdev, &desc_num);
477 
478 	/* The firmware supports the new statistics acquisition method */
479 	if (!ret)
480 		ret = hclge_mac_update_stats_complete(hdev, desc_num);
481 	else if (ret == -EOPNOTSUPP)
482 		ret = hclge_mac_update_stats_defective(hdev);
483 	else
484 		dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
485 
486 	return ret;
487 }
488 
489 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
490 {
491 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
492 	struct hclge_vport *vport = hclge_get_vport(handle);
493 	struct hclge_dev *hdev = vport->back;
494 	struct hnae3_queue *queue;
495 	struct hclge_desc desc[1];
496 	struct hclge_tqp *tqp;
497 	int ret, i;
498 
499 	for (i = 0; i < kinfo->num_tqps; i++) {
500 		queue = handle->kinfo.tqp[i];
501 		tqp = container_of(queue, struct hclge_tqp, q);
502 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
503 		hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS,
504 					   true);
505 
506 		desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
507 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
508 		if (ret) {
509 			dev_err(&hdev->pdev->dev,
510 				"Query tqp stat fail, status = %d,queue = %d\n",
511 				ret, i);
512 			return ret;
513 		}
514 		tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
515 			le32_to_cpu(desc[0].data[1]);
516 	}
517 
518 	for (i = 0; i < kinfo->num_tqps; i++) {
519 		queue = handle->kinfo.tqp[i];
520 		tqp = container_of(queue, struct hclge_tqp, q);
521 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
522 		hclge_cmd_setup_basic_desc(&desc[0],
523 					   HCLGE_OPC_QUERY_TX_STATUS,
524 					   true);
525 
526 		desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
527 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
528 		if (ret) {
529 			dev_err(&hdev->pdev->dev,
530 				"Query tqp stat fail, status = %d,queue = %d\n",
531 				ret, i);
532 			return ret;
533 		}
534 		tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
535 			le32_to_cpu(desc[0].data[1]);
536 	}
537 
538 	return 0;
539 }
540 
541 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
542 {
543 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
544 	struct hclge_tqp *tqp;
545 	u64 *buff = data;
546 	int i;
547 
548 	for (i = 0; i < kinfo->num_tqps; i++) {
549 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
550 		*buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
551 	}
552 
553 	for (i = 0; i < kinfo->num_tqps; i++) {
554 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
555 		*buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
556 	}
557 
558 	return buff;
559 }
560 
561 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
562 {
563 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
564 
565 	/* each tqp has TX & RX two queues */
566 	return kinfo->num_tqps * (2);
567 }
568 
569 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
570 {
571 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
572 	u8 *buff = data;
573 	int i = 0;
574 
575 	for (i = 0; i < kinfo->num_tqps; i++) {
576 		struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
577 			struct hclge_tqp, q);
578 		snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
579 			 tqp->index);
580 		buff = buff + ETH_GSTRING_LEN;
581 	}
582 
583 	for (i = 0; i < kinfo->num_tqps; i++) {
584 		struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
585 			struct hclge_tqp, q);
586 		snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
587 			 tqp->index);
588 		buff = buff + ETH_GSTRING_LEN;
589 	}
590 
591 	return buff;
592 }
593 
594 static u64 *hclge_comm_get_stats(const void *comm_stats,
595 				 const struct hclge_comm_stats_str strs[],
596 				 int size, u64 *data)
597 {
598 	u64 *buf = data;
599 	u32 i;
600 
601 	for (i = 0; i < size; i++)
602 		buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
603 
604 	return buf + size;
605 }
606 
607 static u8 *hclge_comm_get_strings(u32 stringset,
608 				  const struct hclge_comm_stats_str strs[],
609 				  int size, u8 *data)
610 {
611 	char *buff = (char *)data;
612 	u32 i;
613 
614 	if (stringset != ETH_SS_STATS)
615 		return buff;
616 
617 	for (i = 0; i < size; i++) {
618 		snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
619 		buff = buff + ETH_GSTRING_LEN;
620 	}
621 
622 	return (u8 *)buff;
623 }
624 
625 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
626 {
627 	struct hnae3_handle *handle;
628 	int status;
629 
630 	handle = &hdev->vport[0].nic;
631 	if (handle->client) {
632 		status = hclge_tqps_update_stats(handle);
633 		if (status) {
634 			dev_err(&hdev->pdev->dev,
635 				"Update TQPS stats fail, status = %d.\n",
636 				status);
637 		}
638 	}
639 
640 	status = hclge_mac_update_stats(hdev);
641 	if (status)
642 		dev_err(&hdev->pdev->dev,
643 			"Update MAC stats fail, status = %d.\n", status);
644 }
645 
646 static void hclge_update_stats(struct hnae3_handle *handle,
647 			       struct net_device_stats *net_stats)
648 {
649 	struct hclge_vport *vport = hclge_get_vport(handle);
650 	struct hclge_dev *hdev = vport->back;
651 	int status;
652 
653 	if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
654 		return;
655 
656 	status = hclge_mac_update_stats(hdev);
657 	if (status)
658 		dev_err(&hdev->pdev->dev,
659 			"Update MAC stats fail, status = %d.\n",
660 			status);
661 
662 	status = hclge_tqps_update_stats(handle);
663 	if (status)
664 		dev_err(&hdev->pdev->dev,
665 			"Update TQPS stats fail, status = %d.\n",
666 			status);
667 
668 	clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
669 }
670 
671 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
672 {
673 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
674 		HNAE3_SUPPORT_PHY_LOOPBACK |\
675 		HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
676 		HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
677 
678 	struct hclge_vport *vport = hclge_get_vport(handle);
679 	struct hclge_dev *hdev = vport->back;
680 	int count = 0;
681 
682 	/* Loopback test support rules:
683 	 * mac: only GE mode support
684 	 * serdes: all mac mode will support include GE/XGE/LGE/CGE
685 	 * phy: only support when phy device exist on board
686 	 */
687 	if (stringset == ETH_SS_TEST) {
688 		/* clear loopback bit flags at first */
689 		handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
690 		if (hdev->pdev->revision >= 0x21 ||
691 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
692 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
693 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
694 			count += 1;
695 			handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
696 		}
697 
698 		count += 2;
699 		handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
700 		handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
701 	} else if (stringset == ETH_SS_STATS) {
702 		count = ARRAY_SIZE(g_mac_stats_string) +
703 			hclge_tqps_get_sset_count(handle, stringset);
704 	}
705 
706 	return count;
707 }
708 
709 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
710 			      u8 *data)
711 {
712 	u8 *p = (char *)data;
713 	int size;
714 
715 	if (stringset == ETH_SS_STATS) {
716 		size = ARRAY_SIZE(g_mac_stats_string);
717 		p = hclge_comm_get_strings(stringset, g_mac_stats_string,
718 					   size, p);
719 		p = hclge_tqps_get_strings(handle, p);
720 	} else if (stringset == ETH_SS_TEST) {
721 		if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
722 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
723 			       ETH_GSTRING_LEN);
724 			p += ETH_GSTRING_LEN;
725 		}
726 		if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
727 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
728 			       ETH_GSTRING_LEN);
729 			p += ETH_GSTRING_LEN;
730 		}
731 		if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
732 			memcpy(p,
733 			       hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
734 			       ETH_GSTRING_LEN);
735 			p += ETH_GSTRING_LEN;
736 		}
737 		if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
738 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
739 			       ETH_GSTRING_LEN);
740 			p += ETH_GSTRING_LEN;
741 		}
742 	}
743 }
744 
745 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
746 {
747 	struct hclge_vport *vport = hclge_get_vport(handle);
748 	struct hclge_dev *hdev = vport->back;
749 	u64 *p;
750 
751 	p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, g_mac_stats_string,
752 				 ARRAY_SIZE(g_mac_stats_string), data);
753 	p = hclge_tqps_get_stats(handle, p);
754 }
755 
756 static void hclge_get_mac_stat(struct hnae3_handle *handle,
757 			       struct hns3_mac_stats *mac_stats)
758 {
759 	struct hclge_vport *vport = hclge_get_vport(handle);
760 	struct hclge_dev *hdev = vport->back;
761 
762 	hclge_update_stats(handle, NULL);
763 
764 	mac_stats->tx_pause_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
765 	mac_stats->rx_pause_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
766 }
767 
768 static int hclge_parse_func_status(struct hclge_dev *hdev,
769 				   struct hclge_func_status_cmd *status)
770 {
771 	if (!(status->pf_state & HCLGE_PF_STATE_DONE))
772 		return -EINVAL;
773 
774 	/* Set the pf to main pf */
775 	if (status->pf_state & HCLGE_PF_STATE_MAIN)
776 		hdev->flag |= HCLGE_FLAG_MAIN;
777 	else
778 		hdev->flag &= ~HCLGE_FLAG_MAIN;
779 
780 	return 0;
781 }
782 
783 static int hclge_query_function_status(struct hclge_dev *hdev)
784 {
785 #define HCLGE_QUERY_MAX_CNT	5
786 
787 	struct hclge_func_status_cmd *req;
788 	struct hclge_desc desc;
789 	int timeout = 0;
790 	int ret;
791 
792 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
793 	req = (struct hclge_func_status_cmd *)desc.data;
794 
795 	do {
796 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
797 		if (ret) {
798 			dev_err(&hdev->pdev->dev,
799 				"query function status failed %d.\n", ret);
800 			return ret;
801 		}
802 
803 		/* Check pf reset is done */
804 		if (req->pf_state)
805 			break;
806 		usleep_range(1000, 2000);
807 	} while (timeout++ < HCLGE_QUERY_MAX_CNT);
808 
809 	ret = hclge_parse_func_status(hdev, req);
810 
811 	return ret;
812 }
813 
814 static int hclge_query_pf_resource(struct hclge_dev *hdev)
815 {
816 	struct hclge_pf_res_cmd *req;
817 	struct hclge_desc desc;
818 	int ret;
819 
820 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
821 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
822 	if (ret) {
823 		dev_err(&hdev->pdev->dev,
824 			"query pf resource failed %d.\n", ret);
825 		return ret;
826 	}
827 
828 	req = (struct hclge_pf_res_cmd *)desc.data;
829 	hdev->num_tqps = __le16_to_cpu(req->tqp_num);
830 	hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
831 
832 	if (req->tx_buf_size)
833 		hdev->tx_buf_size =
834 			__le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
835 	else
836 		hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
837 
838 	hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
839 
840 	if (req->dv_buf_size)
841 		hdev->dv_buf_size =
842 			__le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
843 	else
844 		hdev->dv_buf_size = HCLGE_DEFAULT_DV;
845 
846 	hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
847 
848 	if (hnae3_dev_roce_supported(hdev)) {
849 		hdev->roce_base_msix_offset =
850 		hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
851 				HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
852 		hdev->num_roce_msi =
853 		hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
854 				HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
855 
856 		/* PF should have NIC vectors and Roce vectors,
857 		 * NIC vectors are queued before Roce vectors.
858 		 */
859 		hdev->num_msi = hdev->num_roce_msi +
860 				hdev->roce_base_msix_offset;
861 	} else {
862 		hdev->num_msi =
863 		hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
864 				HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
865 	}
866 
867 	return 0;
868 }
869 
870 static int hclge_parse_speed(int speed_cmd, int *speed)
871 {
872 	switch (speed_cmd) {
873 	case 6:
874 		*speed = HCLGE_MAC_SPEED_10M;
875 		break;
876 	case 7:
877 		*speed = HCLGE_MAC_SPEED_100M;
878 		break;
879 	case 0:
880 		*speed = HCLGE_MAC_SPEED_1G;
881 		break;
882 	case 1:
883 		*speed = HCLGE_MAC_SPEED_10G;
884 		break;
885 	case 2:
886 		*speed = HCLGE_MAC_SPEED_25G;
887 		break;
888 	case 3:
889 		*speed = HCLGE_MAC_SPEED_40G;
890 		break;
891 	case 4:
892 		*speed = HCLGE_MAC_SPEED_50G;
893 		break;
894 	case 5:
895 		*speed = HCLGE_MAC_SPEED_100G;
896 		break;
897 	default:
898 		return -EINVAL;
899 	}
900 
901 	return 0;
902 }
903 
904 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
905 {
906 	struct hclge_vport *vport = hclge_get_vport(handle);
907 	struct hclge_dev *hdev = vport->back;
908 	u32 speed_ability = hdev->hw.mac.speed_ability;
909 	u32 speed_bit = 0;
910 
911 	switch (speed) {
912 	case HCLGE_MAC_SPEED_10M:
913 		speed_bit = HCLGE_SUPPORT_10M_BIT;
914 		break;
915 	case HCLGE_MAC_SPEED_100M:
916 		speed_bit = HCLGE_SUPPORT_100M_BIT;
917 		break;
918 	case HCLGE_MAC_SPEED_1G:
919 		speed_bit = HCLGE_SUPPORT_1G_BIT;
920 		break;
921 	case HCLGE_MAC_SPEED_10G:
922 		speed_bit = HCLGE_SUPPORT_10G_BIT;
923 		break;
924 	case HCLGE_MAC_SPEED_25G:
925 		speed_bit = HCLGE_SUPPORT_25G_BIT;
926 		break;
927 	case HCLGE_MAC_SPEED_40G:
928 		speed_bit = HCLGE_SUPPORT_40G_BIT;
929 		break;
930 	case HCLGE_MAC_SPEED_50G:
931 		speed_bit = HCLGE_SUPPORT_50G_BIT;
932 		break;
933 	case HCLGE_MAC_SPEED_100G:
934 		speed_bit = HCLGE_SUPPORT_100G_BIT;
935 		break;
936 	default:
937 		return -EINVAL;
938 	}
939 
940 	if (speed_bit & speed_ability)
941 		return 0;
942 
943 	return -EINVAL;
944 }
945 
946 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
947 {
948 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
949 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
950 				 mac->supported);
951 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
952 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
953 				 mac->supported);
954 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
955 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
956 				 mac->supported);
957 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
958 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
959 				 mac->supported);
960 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
961 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
962 				 mac->supported);
963 }
964 
965 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
966 {
967 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
968 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
969 				 mac->supported);
970 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
971 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
972 				 mac->supported);
973 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
974 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
975 				 mac->supported);
976 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
977 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
978 				 mac->supported);
979 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
980 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
981 				 mac->supported);
982 }
983 
984 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
985 {
986 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
987 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
988 				 mac->supported);
989 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
990 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
991 				 mac->supported);
992 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
993 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
994 				 mac->supported);
995 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
996 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
997 				 mac->supported);
998 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
999 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1000 				 mac->supported);
1001 }
1002 
1003 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
1004 {
1005 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1006 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1007 				 mac->supported);
1008 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1009 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1010 				 mac->supported);
1011 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1012 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1013 				 mac->supported);
1014 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1015 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1016 				 mac->supported);
1017 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1018 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1019 				 mac->supported);
1020 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1021 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1022 				 mac->supported);
1023 }
1024 
1025 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1026 {
1027 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1028 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1029 
1030 	switch (mac->speed) {
1031 	case HCLGE_MAC_SPEED_10G:
1032 	case HCLGE_MAC_SPEED_40G:
1033 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1034 				 mac->supported);
1035 		mac->fec_ability =
1036 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1037 		break;
1038 	case HCLGE_MAC_SPEED_25G:
1039 	case HCLGE_MAC_SPEED_50G:
1040 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1041 				 mac->supported);
1042 		mac->fec_ability =
1043 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1044 			BIT(HNAE3_FEC_AUTO);
1045 		break;
1046 	case HCLGE_MAC_SPEED_100G:
1047 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1048 		mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1049 		break;
1050 	default:
1051 		mac->fec_ability = 0;
1052 		break;
1053 	}
1054 }
1055 
1056 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1057 					u8 speed_ability)
1058 {
1059 	struct hclge_mac *mac = &hdev->hw.mac;
1060 
1061 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1062 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1063 				 mac->supported);
1064 
1065 	hclge_convert_setting_sr(mac, speed_ability);
1066 	hclge_convert_setting_lr(mac, speed_ability);
1067 	hclge_convert_setting_cr(mac, speed_ability);
1068 	if (hdev->pdev->revision >= 0x21)
1069 		hclge_convert_setting_fec(mac);
1070 
1071 	linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1072 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1073 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1074 }
1075 
1076 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1077 					    u8 speed_ability)
1078 {
1079 	struct hclge_mac *mac = &hdev->hw.mac;
1080 
1081 	hclge_convert_setting_kr(mac, speed_ability);
1082 	if (hdev->pdev->revision >= 0x21)
1083 		hclge_convert_setting_fec(mac);
1084 	linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1085 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1086 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1087 }
1088 
1089 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1090 					 u8 speed_ability)
1091 {
1092 	unsigned long *supported = hdev->hw.mac.supported;
1093 
1094 	/* default to support all speed for GE port */
1095 	if (!speed_ability)
1096 		speed_ability = HCLGE_SUPPORT_GE;
1097 
1098 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1099 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1100 				 supported);
1101 
1102 	if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1103 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1104 				 supported);
1105 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1106 				 supported);
1107 	}
1108 
1109 	if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1110 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1111 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1112 	}
1113 
1114 	linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1115 	linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1116 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1117 	linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1118 }
1119 
1120 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1121 {
1122 	u8 media_type = hdev->hw.mac.media_type;
1123 
1124 	if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1125 		hclge_parse_fiber_link_mode(hdev, speed_ability);
1126 	else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1127 		hclge_parse_copper_link_mode(hdev, speed_ability);
1128 	else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1129 		hclge_parse_backplane_link_mode(hdev, speed_ability);
1130 }
1131 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1132 {
1133 	struct hclge_cfg_param_cmd *req;
1134 	u64 mac_addr_tmp_high;
1135 	u64 mac_addr_tmp;
1136 	unsigned int i;
1137 
1138 	req = (struct hclge_cfg_param_cmd *)desc[0].data;
1139 
1140 	/* get the configuration */
1141 	cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1142 					      HCLGE_CFG_VMDQ_M,
1143 					      HCLGE_CFG_VMDQ_S);
1144 	cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1145 				      HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1146 	cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1147 					    HCLGE_CFG_TQP_DESC_N_M,
1148 					    HCLGE_CFG_TQP_DESC_N_S);
1149 
1150 	cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1151 					HCLGE_CFG_PHY_ADDR_M,
1152 					HCLGE_CFG_PHY_ADDR_S);
1153 	cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1154 					  HCLGE_CFG_MEDIA_TP_M,
1155 					  HCLGE_CFG_MEDIA_TP_S);
1156 	cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1157 					  HCLGE_CFG_RX_BUF_LEN_M,
1158 					  HCLGE_CFG_RX_BUF_LEN_S);
1159 	/* get mac_address */
1160 	mac_addr_tmp = __le32_to_cpu(req->param[2]);
1161 	mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1162 					    HCLGE_CFG_MAC_ADDR_H_M,
1163 					    HCLGE_CFG_MAC_ADDR_H_S);
1164 
1165 	mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1166 
1167 	cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1168 					     HCLGE_CFG_DEFAULT_SPEED_M,
1169 					     HCLGE_CFG_DEFAULT_SPEED_S);
1170 	cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1171 					    HCLGE_CFG_RSS_SIZE_M,
1172 					    HCLGE_CFG_RSS_SIZE_S);
1173 
1174 	for (i = 0; i < ETH_ALEN; i++)
1175 		cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1176 
1177 	req = (struct hclge_cfg_param_cmd *)desc[1].data;
1178 	cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1179 
1180 	cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1181 					     HCLGE_CFG_SPEED_ABILITY_M,
1182 					     HCLGE_CFG_SPEED_ABILITY_S);
1183 	cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1184 					 HCLGE_CFG_UMV_TBL_SPACE_M,
1185 					 HCLGE_CFG_UMV_TBL_SPACE_S);
1186 	if (!cfg->umv_space)
1187 		cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1188 }
1189 
1190 /* hclge_get_cfg: query the static parameter from flash
1191  * @hdev: pointer to struct hclge_dev
1192  * @hcfg: the config structure to be getted
1193  */
1194 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1195 {
1196 	struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1197 	struct hclge_cfg_param_cmd *req;
1198 	unsigned int i;
1199 	int ret;
1200 
1201 	for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1202 		u32 offset = 0;
1203 
1204 		req = (struct hclge_cfg_param_cmd *)desc[i].data;
1205 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1206 					   true);
1207 		hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1208 				HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1209 		/* Len should be united by 4 bytes when send to hardware */
1210 		hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1211 				HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1212 		req->offset = cpu_to_le32(offset);
1213 	}
1214 
1215 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1216 	if (ret) {
1217 		dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1218 		return ret;
1219 	}
1220 
1221 	hclge_parse_cfg(hcfg, desc);
1222 
1223 	return 0;
1224 }
1225 
1226 static int hclge_get_cap(struct hclge_dev *hdev)
1227 {
1228 	int ret;
1229 
1230 	ret = hclge_query_function_status(hdev);
1231 	if (ret) {
1232 		dev_err(&hdev->pdev->dev,
1233 			"query function status error %d.\n", ret);
1234 		return ret;
1235 	}
1236 
1237 	/* get pf resource */
1238 	ret = hclge_query_pf_resource(hdev);
1239 	if (ret)
1240 		dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1241 
1242 	return ret;
1243 }
1244 
1245 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1246 {
1247 #define HCLGE_MIN_TX_DESC	64
1248 #define HCLGE_MIN_RX_DESC	64
1249 
1250 	if (!is_kdump_kernel())
1251 		return;
1252 
1253 	dev_info(&hdev->pdev->dev,
1254 		 "Running kdump kernel. Using minimal resources\n");
1255 
1256 	/* minimal queue pairs equals to the number of vports */
1257 	hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1258 	hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1259 	hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1260 }
1261 
1262 static int hclge_configure(struct hclge_dev *hdev)
1263 {
1264 	struct hclge_cfg cfg;
1265 	unsigned int i;
1266 	int ret;
1267 
1268 	ret = hclge_get_cfg(hdev, &cfg);
1269 	if (ret) {
1270 		dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1271 		return ret;
1272 	}
1273 
1274 	hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1275 	hdev->base_tqp_pid = 0;
1276 	hdev->rss_size_max = cfg.rss_size_max;
1277 	hdev->rx_buf_len = cfg.rx_buf_len;
1278 	ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1279 	hdev->hw.mac.media_type = cfg.media_type;
1280 	hdev->hw.mac.phy_addr = cfg.phy_addr;
1281 	hdev->num_tx_desc = cfg.tqp_desc_num;
1282 	hdev->num_rx_desc = cfg.tqp_desc_num;
1283 	hdev->tm_info.num_pg = 1;
1284 	hdev->tc_max = cfg.tc_num;
1285 	hdev->tm_info.hw_pfc_map = 0;
1286 	hdev->wanted_umv_size = cfg.umv_space;
1287 
1288 	if (hnae3_dev_fd_supported(hdev)) {
1289 		hdev->fd_en = true;
1290 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1291 	}
1292 
1293 	ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1294 	if (ret) {
1295 		dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1296 		return ret;
1297 	}
1298 
1299 	hclge_parse_link_mode(hdev, cfg.speed_ability);
1300 
1301 	if ((hdev->tc_max > HNAE3_MAX_TC) ||
1302 	    (hdev->tc_max < 1)) {
1303 		dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1304 			 hdev->tc_max);
1305 		hdev->tc_max = 1;
1306 	}
1307 
1308 	/* Dev does not support DCB */
1309 	if (!hnae3_dev_dcb_supported(hdev)) {
1310 		hdev->tc_max = 1;
1311 		hdev->pfc_max = 0;
1312 	} else {
1313 		hdev->pfc_max = hdev->tc_max;
1314 	}
1315 
1316 	hdev->tm_info.num_tc = 1;
1317 
1318 	/* Currently not support uncontiuous tc */
1319 	for (i = 0; i < hdev->tm_info.num_tc; i++)
1320 		hnae3_set_bit(hdev->hw_tc_map, i, 1);
1321 
1322 	hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1323 
1324 	hclge_init_kdump_kernel_config(hdev);
1325 
1326 	/* Set the init affinity based on pci func number */
1327 	i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1328 	i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1329 	cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1330 			&hdev->affinity_mask);
1331 
1332 	return ret;
1333 }
1334 
1335 static int hclge_config_tso(struct hclge_dev *hdev, unsigned int tso_mss_min,
1336 			    unsigned int tso_mss_max)
1337 {
1338 	struct hclge_cfg_tso_status_cmd *req;
1339 	struct hclge_desc desc;
1340 	u16 tso_mss;
1341 
1342 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1343 
1344 	req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1345 
1346 	tso_mss = 0;
1347 	hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1348 			HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1349 	req->tso_mss_min = cpu_to_le16(tso_mss);
1350 
1351 	tso_mss = 0;
1352 	hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1353 			HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1354 	req->tso_mss_max = cpu_to_le16(tso_mss);
1355 
1356 	return hclge_cmd_send(&hdev->hw, &desc, 1);
1357 }
1358 
1359 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1360 {
1361 	struct hclge_cfg_gro_status_cmd *req;
1362 	struct hclge_desc desc;
1363 	int ret;
1364 
1365 	if (!hnae3_dev_gro_supported(hdev))
1366 		return 0;
1367 
1368 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1369 	req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1370 
1371 	req->gro_en = cpu_to_le16(en ? 1 : 0);
1372 
1373 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1374 	if (ret)
1375 		dev_err(&hdev->pdev->dev,
1376 			"GRO hardware config cmd failed, ret = %d\n", ret);
1377 
1378 	return ret;
1379 }
1380 
1381 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1382 {
1383 	struct hclge_tqp *tqp;
1384 	int i;
1385 
1386 	hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1387 				  sizeof(struct hclge_tqp), GFP_KERNEL);
1388 	if (!hdev->htqp)
1389 		return -ENOMEM;
1390 
1391 	tqp = hdev->htqp;
1392 
1393 	for (i = 0; i < hdev->num_tqps; i++) {
1394 		tqp->dev = &hdev->pdev->dev;
1395 		tqp->index = i;
1396 
1397 		tqp->q.ae_algo = &ae_algo;
1398 		tqp->q.buf_size = hdev->rx_buf_len;
1399 		tqp->q.tx_desc_num = hdev->num_tx_desc;
1400 		tqp->q.rx_desc_num = hdev->num_rx_desc;
1401 		tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1402 			i * HCLGE_TQP_REG_SIZE;
1403 
1404 		tqp++;
1405 	}
1406 
1407 	return 0;
1408 }
1409 
1410 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1411 				  u16 tqp_pid, u16 tqp_vid, bool is_pf)
1412 {
1413 	struct hclge_tqp_map_cmd *req;
1414 	struct hclge_desc desc;
1415 	int ret;
1416 
1417 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1418 
1419 	req = (struct hclge_tqp_map_cmd *)desc.data;
1420 	req->tqp_id = cpu_to_le16(tqp_pid);
1421 	req->tqp_vf = func_id;
1422 	req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1423 	if (!is_pf)
1424 		req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1425 	req->tqp_vid = cpu_to_le16(tqp_vid);
1426 
1427 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1428 	if (ret)
1429 		dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1430 
1431 	return ret;
1432 }
1433 
1434 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1435 {
1436 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1437 	struct hclge_dev *hdev = vport->back;
1438 	int i, alloced;
1439 
1440 	for (i = 0, alloced = 0; i < hdev->num_tqps &&
1441 	     alloced < num_tqps; i++) {
1442 		if (!hdev->htqp[i].alloced) {
1443 			hdev->htqp[i].q.handle = &vport->nic;
1444 			hdev->htqp[i].q.tqp_index = alloced;
1445 			hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1446 			hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1447 			kinfo->tqp[alloced] = &hdev->htqp[i].q;
1448 			hdev->htqp[i].alloced = true;
1449 			alloced++;
1450 		}
1451 	}
1452 	vport->alloc_tqps = alloced;
1453 	kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1454 				vport->alloc_tqps / hdev->tm_info.num_tc);
1455 
1456 	return 0;
1457 }
1458 
1459 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1460 			    u16 num_tx_desc, u16 num_rx_desc)
1461 
1462 {
1463 	struct hnae3_handle *nic = &vport->nic;
1464 	struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1465 	struct hclge_dev *hdev = vport->back;
1466 	int ret;
1467 
1468 	kinfo->num_tx_desc = num_tx_desc;
1469 	kinfo->num_rx_desc = num_rx_desc;
1470 
1471 	kinfo->rx_buf_len = hdev->rx_buf_len;
1472 
1473 	kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1474 				  sizeof(struct hnae3_queue *), GFP_KERNEL);
1475 	if (!kinfo->tqp)
1476 		return -ENOMEM;
1477 
1478 	ret = hclge_assign_tqp(vport, num_tqps);
1479 	if (ret)
1480 		dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1481 
1482 	return ret;
1483 }
1484 
1485 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1486 				  struct hclge_vport *vport)
1487 {
1488 	struct hnae3_handle *nic = &vport->nic;
1489 	struct hnae3_knic_private_info *kinfo;
1490 	u16 i;
1491 
1492 	kinfo = &nic->kinfo;
1493 	for (i = 0; i < vport->alloc_tqps; i++) {
1494 		struct hclge_tqp *q =
1495 			container_of(kinfo->tqp[i], struct hclge_tqp, q);
1496 		bool is_pf;
1497 		int ret;
1498 
1499 		is_pf = !(vport->vport_id);
1500 		ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1501 					     i, is_pf);
1502 		if (ret)
1503 			return ret;
1504 	}
1505 
1506 	return 0;
1507 }
1508 
1509 static int hclge_map_tqp(struct hclge_dev *hdev)
1510 {
1511 	struct hclge_vport *vport = hdev->vport;
1512 	u16 i, num_vport;
1513 
1514 	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1515 	for (i = 0; i < num_vport; i++)	{
1516 		int ret;
1517 
1518 		ret = hclge_map_tqp_to_vport(hdev, vport);
1519 		if (ret)
1520 			return ret;
1521 
1522 		vport++;
1523 	}
1524 
1525 	return 0;
1526 }
1527 
1528 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1529 {
1530 	struct hnae3_handle *nic = &vport->nic;
1531 	struct hclge_dev *hdev = vport->back;
1532 	int ret;
1533 
1534 	nic->pdev = hdev->pdev;
1535 	nic->ae_algo = &ae_algo;
1536 	nic->numa_node_mask = hdev->numa_node_mask;
1537 
1538 	ret = hclge_knic_setup(vport, num_tqps,
1539 			       hdev->num_tx_desc, hdev->num_rx_desc);
1540 	if (ret)
1541 		dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1542 
1543 	return ret;
1544 }
1545 
1546 static int hclge_alloc_vport(struct hclge_dev *hdev)
1547 {
1548 	struct pci_dev *pdev = hdev->pdev;
1549 	struct hclge_vport *vport;
1550 	u32 tqp_main_vport;
1551 	u32 tqp_per_vport;
1552 	int num_vport, i;
1553 	int ret;
1554 
1555 	/* We need to alloc a vport for main NIC of PF */
1556 	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1557 
1558 	if (hdev->num_tqps < num_vport) {
1559 		dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1560 			hdev->num_tqps, num_vport);
1561 		return -EINVAL;
1562 	}
1563 
1564 	/* Alloc the same number of TQPs for every vport */
1565 	tqp_per_vport = hdev->num_tqps / num_vport;
1566 	tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1567 
1568 	vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1569 			     GFP_KERNEL);
1570 	if (!vport)
1571 		return -ENOMEM;
1572 
1573 	hdev->vport = vport;
1574 	hdev->num_alloc_vport = num_vport;
1575 
1576 	if (IS_ENABLED(CONFIG_PCI_IOV))
1577 		hdev->num_alloc_vfs = hdev->num_req_vfs;
1578 
1579 	for (i = 0; i < num_vport; i++) {
1580 		vport->back = hdev;
1581 		vport->vport_id = i;
1582 		vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1583 		vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1584 		vport->rxvlan_cfg.rx_vlan_offload_en = true;
1585 		INIT_LIST_HEAD(&vport->vlan_list);
1586 		INIT_LIST_HEAD(&vport->uc_mac_list);
1587 		INIT_LIST_HEAD(&vport->mc_mac_list);
1588 
1589 		if (i == 0)
1590 			ret = hclge_vport_setup(vport, tqp_main_vport);
1591 		else
1592 			ret = hclge_vport_setup(vport, tqp_per_vport);
1593 		if (ret) {
1594 			dev_err(&pdev->dev,
1595 				"vport setup failed for vport %d, %d\n",
1596 				i, ret);
1597 			return ret;
1598 		}
1599 
1600 		vport++;
1601 	}
1602 
1603 	return 0;
1604 }
1605 
1606 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1607 				    struct hclge_pkt_buf_alloc *buf_alloc)
1608 {
1609 /* TX buffer size is unit by 128 byte */
1610 #define HCLGE_BUF_SIZE_UNIT_SHIFT	7
1611 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK	BIT(15)
1612 	struct hclge_tx_buff_alloc_cmd *req;
1613 	struct hclge_desc desc;
1614 	int ret;
1615 	u8 i;
1616 
1617 	req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1618 
1619 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1620 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1621 		u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1622 
1623 		req->tx_pkt_buff[i] =
1624 			cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1625 				     HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1626 	}
1627 
1628 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1629 	if (ret)
1630 		dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1631 			ret);
1632 
1633 	return ret;
1634 }
1635 
1636 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1637 				 struct hclge_pkt_buf_alloc *buf_alloc)
1638 {
1639 	int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1640 
1641 	if (ret)
1642 		dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1643 
1644 	return ret;
1645 }
1646 
1647 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1648 {
1649 	unsigned int i;
1650 	u32 cnt = 0;
1651 
1652 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1653 		if (hdev->hw_tc_map & BIT(i))
1654 			cnt++;
1655 	return cnt;
1656 }
1657 
1658 /* Get the number of pfc enabled TCs, which have private buffer */
1659 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1660 				  struct hclge_pkt_buf_alloc *buf_alloc)
1661 {
1662 	struct hclge_priv_buf *priv;
1663 	unsigned int i;
1664 	int cnt = 0;
1665 
1666 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1667 		priv = &buf_alloc->priv_buf[i];
1668 		if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1669 		    priv->enable)
1670 			cnt++;
1671 	}
1672 
1673 	return cnt;
1674 }
1675 
1676 /* Get the number of pfc disabled TCs, which have private buffer */
1677 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1678 				     struct hclge_pkt_buf_alloc *buf_alloc)
1679 {
1680 	struct hclge_priv_buf *priv;
1681 	unsigned int i;
1682 	int cnt = 0;
1683 
1684 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1685 		priv = &buf_alloc->priv_buf[i];
1686 		if (hdev->hw_tc_map & BIT(i) &&
1687 		    !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1688 		    priv->enable)
1689 			cnt++;
1690 	}
1691 
1692 	return cnt;
1693 }
1694 
1695 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1696 {
1697 	struct hclge_priv_buf *priv;
1698 	u32 rx_priv = 0;
1699 	int i;
1700 
1701 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1702 		priv = &buf_alloc->priv_buf[i];
1703 		if (priv->enable)
1704 			rx_priv += priv->buf_size;
1705 	}
1706 	return rx_priv;
1707 }
1708 
1709 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1710 {
1711 	u32 i, total_tx_size = 0;
1712 
1713 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1714 		total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1715 
1716 	return total_tx_size;
1717 }
1718 
1719 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1720 				struct hclge_pkt_buf_alloc *buf_alloc,
1721 				u32 rx_all)
1722 {
1723 	u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1724 	u32 tc_num = hclge_get_tc_num(hdev);
1725 	u32 shared_buf, aligned_mps;
1726 	u32 rx_priv;
1727 	int i;
1728 
1729 	aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1730 
1731 	if (hnae3_dev_dcb_supported(hdev))
1732 		shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1733 					hdev->dv_buf_size;
1734 	else
1735 		shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1736 					+ hdev->dv_buf_size;
1737 
1738 	shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1739 	shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1740 			     HCLGE_BUF_SIZE_UNIT);
1741 
1742 	rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1743 	if (rx_all < rx_priv + shared_std)
1744 		return false;
1745 
1746 	shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1747 	buf_alloc->s_buf.buf_size = shared_buf;
1748 	if (hnae3_dev_dcb_supported(hdev)) {
1749 		buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1750 		buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1751 			- roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1752 				  HCLGE_BUF_SIZE_UNIT);
1753 	} else {
1754 		buf_alloc->s_buf.self.high = aligned_mps +
1755 						HCLGE_NON_DCB_ADDITIONAL_BUF;
1756 		buf_alloc->s_buf.self.low = aligned_mps;
1757 	}
1758 
1759 	if (hnae3_dev_dcb_supported(hdev)) {
1760 		hi_thrd = shared_buf - hdev->dv_buf_size;
1761 
1762 		if (tc_num <= NEED_RESERVE_TC_NUM)
1763 			hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1764 					/ BUF_MAX_PERCENT;
1765 
1766 		if (tc_num)
1767 			hi_thrd = hi_thrd / tc_num;
1768 
1769 		hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1770 		hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1771 		lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1772 	} else {
1773 		hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1774 		lo_thrd = aligned_mps;
1775 	}
1776 
1777 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1778 		buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1779 		buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1780 	}
1781 
1782 	return true;
1783 }
1784 
1785 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1786 				struct hclge_pkt_buf_alloc *buf_alloc)
1787 {
1788 	u32 i, total_size;
1789 
1790 	total_size = hdev->pkt_buf_size;
1791 
1792 	/* alloc tx buffer for all enabled tc */
1793 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1794 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1795 
1796 		if (hdev->hw_tc_map & BIT(i)) {
1797 			if (total_size < hdev->tx_buf_size)
1798 				return -ENOMEM;
1799 
1800 			priv->tx_buf_size = hdev->tx_buf_size;
1801 		} else {
1802 			priv->tx_buf_size = 0;
1803 		}
1804 
1805 		total_size -= priv->tx_buf_size;
1806 	}
1807 
1808 	return 0;
1809 }
1810 
1811 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1812 				  struct hclge_pkt_buf_alloc *buf_alloc)
1813 {
1814 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1815 	u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1816 	unsigned int i;
1817 
1818 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1819 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1820 
1821 		priv->enable = 0;
1822 		priv->wl.low = 0;
1823 		priv->wl.high = 0;
1824 		priv->buf_size = 0;
1825 
1826 		if (!(hdev->hw_tc_map & BIT(i)))
1827 			continue;
1828 
1829 		priv->enable = 1;
1830 
1831 		if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1832 			priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
1833 			priv->wl.high = roundup(priv->wl.low + aligned_mps,
1834 						HCLGE_BUF_SIZE_UNIT);
1835 		} else {
1836 			priv->wl.low = 0;
1837 			priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
1838 					aligned_mps;
1839 		}
1840 
1841 		priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1842 	}
1843 
1844 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1845 }
1846 
1847 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1848 					  struct hclge_pkt_buf_alloc *buf_alloc)
1849 {
1850 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1851 	int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1852 	int i;
1853 
1854 	/* let the last to be cleared first */
1855 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1856 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1857 		unsigned int mask = BIT((unsigned int)i);
1858 
1859 		if (hdev->hw_tc_map & mask &&
1860 		    !(hdev->tm_info.hw_pfc_map & mask)) {
1861 			/* Clear the no pfc TC private buffer */
1862 			priv->wl.low = 0;
1863 			priv->wl.high = 0;
1864 			priv->buf_size = 0;
1865 			priv->enable = 0;
1866 			no_pfc_priv_num--;
1867 		}
1868 
1869 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1870 		    no_pfc_priv_num == 0)
1871 			break;
1872 	}
1873 
1874 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1875 }
1876 
1877 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1878 					struct hclge_pkt_buf_alloc *buf_alloc)
1879 {
1880 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1881 	int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1882 	int i;
1883 
1884 	/* let the last to be cleared first */
1885 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1886 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1887 		unsigned int mask = BIT((unsigned int)i);
1888 
1889 		if (hdev->hw_tc_map & mask &&
1890 		    hdev->tm_info.hw_pfc_map & mask) {
1891 			/* Reduce the number of pfc TC with private buffer */
1892 			priv->wl.low = 0;
1893 			priv->enable = 0;
1894 			priv->wl.high = 0;
1895 			priv->buf_size = 0;
1896 			pfc_priv_num--;
1897 		}
1898 
1899 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1900 		    pfc_priv_num == 0)
1901 			break;
1902 	}
1903 
1904 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1905 }
1906 
1907 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
1908 				      struct hclge_pkt_buf_alloc *buf_alloc)
1909 {
1910 #define COMPENSATE_BUFFER	0x3C00
1911 #define COMPENSATE_HALF_MPS_NUM	5
1912 #define PRIV_WL_GAP		0x1800
1913 
1914 	u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1915 	u32 tc_num = hclge_get_tc_num(hdev);
1916 	u32 half_mps = hdev->mps >> 1;
1917 	u32 min_rx_priv;
1918 	unsigned int i;
1919 
1920 	if (tc_num)
1921 		rx_priv = rx_priv / tc_num;
1922 
1923 	if (tc_num <= NEED_RESERVE_TC_NUM)
1924 		rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
1925 
1926 	min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
1927 			COMPENSATE_HALF_MPS_NUM * half_mps;
1928 	min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
1929 	rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
1930 
1931 	if (rx_priv < min_rx_priv)
1932 		return false;
1933 
1934 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1935 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1936 
1937 		priv->enable = 0;
1938 		priv->wl.low = 0;
1939 		priv->wl.high = 0;
1940 		priv->buf_size = 0;
1941 
1942 		if (!(hdev->hw_tc_map & BIT(i)))
1943 			continue;
1944 
1945 		priv->enable = 1;
1946 		priv->buf_size = rx_priv;
1947 		priv->wl.high = rx_priv - hdev->dv_buf_size;
1948 		priv->wl.low = priv->wl.high - PRIV_WL_GAP;
1949 	}
1950 
1951 	buf_alloc->s_buf.buf_size = 0;
1952 
1953 	return true;
1954 }
1955 
1956 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1957  * @hdev: pointer to struct hclge_dev
1958  * @buf_alloc: pointer to buffer calculation data
1959  * @return: 0: calculate sucessful, negative: fail
1960  */
1961 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1962 				struct hclge_pkt_buf_alloc *buf_alloc)
1963 {
1964 	/* When DCB is not supported, rx private buffer is not allocated. */
1965 	if (!hnae3_dev_dcb_supported(hdev)) {
1966 		u32 rx_all = hdev->pkt_buf_size;
1967 
1968 		rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1969 		if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1970 			return -ENOMEM;
1971 
1972 		return 0;
1973 	}
1974 
1975 	if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
1976 		return 0;
1977 
1978 	if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
1979 		return 0;
1980 
1981 	/* try to decrease the buffer size */
1982 	if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
1983 		return 0;
1984 
1985 	if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
1986 		return 0;
1987 
1988 	if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
1989 		return 0;
1990 
1991 	return -ENOMEM;
1992 }
1993 
1994 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1995 				   struct hclge_pkt_buf_alloc *buf_alloc)
1996 {
1997 	struct hclge_rx_priv_buff_cmd *req;
1998 	struct hclge_desc desc;
1999 	int ret;
2000 	int i;
2001 
2002 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2003 	req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2004 
2005 	/* Alloc private buffer TCs */
2006 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2007 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2008 
2009 		req->buf_num[i] =
2010 			cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2011 		req->buf_num[i] |=
2012 			cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2013 	}
2014 
2015 	req->shared_buf =
2016 		cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2017 			    (1 << HCLGE_TC0_PRI_BUF_EN_B));
2018 
2019 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2020 	if (ret)
2021 		dev_err(&hdev->pdev->dev,
2022 			"rx private buffer alloc cmd failed %d\n", ret);
2023 
2024 	return ret;
2025 }
2026 
2027 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2028 				   struct hclge_pkt_buf_alloc *buf_alloc)
2029 {
2030 	struct hclge_rx_priv_wl_buf *req;
2031 	struct hclge_priv_buf *priv;
2032 	struct hclge_desc desc[2];
2033 	int i, j;
2034 	int ret;
2035 
2036 	for (i = 0; i < 2; i++) {
2037 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2038 					   false);
2039 		req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2040 
2041 		/* The first descriptor set the NEXT bit to 1 */
2042 		if (i == 0)
2043 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2044 		else
2045 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2046 
2047 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2048 			u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2049 
2050 			priv = &buf_alloc->priv_buf[idx];
2051 			req->tc_wl[j].high =
2052 				cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2053 			req->tc_wl[j].high |=
2054 				cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2055 			req->tc_wl[j].low =
2056 				cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2057 			req->tc_wl[j].low |=
2058 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2059 		}
2060 	}
2061 
2062 	/* Send 2 descriptor at one time */
2063 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2064 	if (ret)
2065 		dev_err(&hdev->pdev->dev,
2066 			"rx private waterline config cmd failed %d\n",
2067 			ret);
2068 	return ret;
2069 }
2070 
2071 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2072 				    struct hclge_pkt_buf_alloc *buf_alloc)
2073 {
2074 	struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2075 	struct hclge_rx_com_thrd *req;
2076 	struct hclge_desc desc[2];
2077 	struct hclge_tc_thrd *tc;
2078 	int i, j;
2079 	int ret;
2080 
2081 	for (i = 0; i < 2; i++) {
2082 		hclge_cmd_setup_basic_desc(&desc[i],
2083 					   HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2084 		req = (struct hclge_rx_com_thrd *)&desc[i].data;
2085 
2086 		/* The first descriptor set the NEXT bit to 1 */
2087 		if (i == 0)
2088 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2089 		else
2090 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2091 
2092 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2093 			tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2094 
2095 			req->com_thrd[j].high =
2096 				cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2097 			req->com_thrd[j].high |=
2098 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2099 			req->com_thrd[j].low =
2100 				cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2101 			req->com_thrd[j].low |=
2102 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2103 		}
2104 	}
2105 
2106 	/* Send 2 descriptors at one time */
2107 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2108 	if (ret)
2109 		dev_err(&hdev->pdev->dev,
2110 			"common threshold config cmd failed %d\n", ret);
2111 	return ret;
2112 }
2113 
2114 static int hclge_common_wl_config(struct hclge_dev *hdev,
2115 				  struct hclge_pkt_buf_alloc *buf_alloc)
2116 {
2117 	struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2118 	struct hclge_rx_com_wl *req;
2119 	struct hclge_desc desc;
2120 	int ret;
2121 
2122 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2123 
2124 	req = (struct hclge_rx_com_wl *)desc.data;
2125 	req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2126 	req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2127 
2128 	req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2129 	req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2130 
2131 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2132 	if (ret)
2133 		dev_err(&hdev->pdev->dev,
2134 			"common waterline config cmd failed %d\n", ret);
2135 
2136 	return ret;
2137 }
2138 
2139 int hclge_buffer_alloc(struct hclge_dev *hdev)
2140 {
2141 	struct hclge_pkt_buf_alloc *pkt_buf;
2142 	int ret;
2143 
2144 	pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2145 	if (!pkt_buf)
2146 		return -ENOMEM;
2147 
2148 	ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2149 	if (ret) {
2150 		dev_err(&hdev->pdev->dev,
2151 			"could not calc tx buffer size for all TCs %d\n", ret);
2152 		goto out;
2153 	}
2154 
2155 	ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2156 	if (ret) {
2157 		dev_err(&hdev->pdev->dev,
2158 			"could not alloc tx buffers %d\n", ret);
2159 		goto out;
2160 	}
2161 
2162 	ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2163 	if (ret) {
2164 		dev_err(&hdev->pdev->dev,
2165 			"could not calc rx priv buffer size for all TCs %d\n",
2166 			ret);
2167 		goto out;
2168 	}
2169 
2170 	ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2171 	if (ret) {
2172 		dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2173 			ret);
2174 		goto out;
2175 	}
2176 
2177 	if (hnae3_dev_dcb_supported(hdev)) {
2178 		ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2179 		if (ret) {
2180 			dev_err(&hdev->pdev->dev,
2181 				"could not configure rx private waterline %d\n",
2182 				ret);
2183 			goto out;
2184 		}
2185 
2186 		ret = hclge_common_thrd_config(hdev, pkt_buf);
2187 		if (ret) {
2188 			dev_err(&hdev->pdev->dev,
2189 				"could not configure common threshold %d\n",
2190 				ret);
2191 			goto out;
2192 		}
2193 	}
2194 
2195 	ret = hclge_common_wl_config(hdev, pkt_buf);
2196 	if (ret)
2197 		dev_err(&hdev->pdev->dev,
2198 			"could not configure common waterline %d\n", ret);
2199 
2200 out:
2201 	kfree(pkt_buf);
2202 	return ret;
2203 }
2204 
2205 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2206 {
2207 	struct hnae3_handle *roce = &vport->roce;
2208 	struct hnae3_handle *nic = &vport->nic;
2209 
2210 	roce->rinfo.num_vectors = vport->back->num_roce_msi;
2211 
2212 	if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2213 	    vport->back->num_msi_left == 0)
2214 		return -EINVAL;
2215 
2216 	roce->rinfo.base_vector = vport->back->roce_base_vector;
2217 
2218 	roce->rinfo.netdev = nic->kinfo.netdev;
2219 	roce->rinfo.roce_io_base = vport->back->hw.io_base;
2220 
2221 	roce->pdev = nic->pdev;
2222 	roce->ae_algo = nic->ae_algo;
2223 	roce->numa_node_mask = nic->numa_node_mask;
2224 
2225 	return 0;
2226 }
2227 
2228 static int hclge_init_msi(struct hclge_dev *hdev)
2229 {
2230 	struct pci_dev *pdev = hdev->pdev;
2231 	int vectors;
2232 	int i;
2233 
2234 	vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
2235 					PCI_IRQ_MSI | PCI_IRQ_MSIX);
2236 	if (vectors < 0) {
2237 		dev_err(&pdev->dev,
2238 			"failed(%d) to allocate MSI/MSI-X vectors\n",
2239 			vectors);
2240 		return vectors;
2241 	}
2242 	if (vectors < hdev->num_msi)
2243 		dev_warn(&hdev->pdev->dev,
2244 			 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2245 			 hdev->num_msi, vectors);
2246 
2247 	hdev->num_msi = vectors;
2248 	hdev->num_msi_left = vectors;
2249 	hdev->base_msi_vector = pdev->irq;
2250 	hdev->roce_base_vector = hdev->base_msi_vector +
2251 				hdev->roce_base_msix_offset;
2252 
2253 	hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2254 					   sizeof(u16), GFP_KERNEL);
2255 	if (!hdev->vector_status) {
2256 		pci_free_irq_vectors(pdev);
2257 		return -ENOMEM;
2258 	}
2259 
2260 	for (i = 0; i < hdev->num_msi; i++)
2261 		hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2262 
2263 	hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2264 					sizeof(int), GFP_KERNEL);
2265 	if (!hdev->vector_irq) {
2266 		pci_free_irq_vectors(pdev);
2267 		return -ENOMEM;
2268 	}
2269 
2270 	return 0;
2271 }
2272 
2273 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2274 {
2275 	if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2276 		duplex = HCLGE_MAC_FULL;
2277 
2278 	return duplex;
2279 }
2280 
2281 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2282 				      u8 duplex)
2283 {
2284 	struct hclge_config_mac_speed_dup_cmd *req;
2285 	struct hclge_desc desc;
2286 	int ret;
2287 
2288 	req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2289 
2290 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2291 
2292 	if (duplex)
2293 		hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2294 
2295 	switch (speed) {
2296 	case HCLGE_MAC_SPEED_10M:
2297 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2298 				HCLGE_CFG_SPEED_S, 6);
2299 		break;
2300 	case HCLGE_MAC_SPEED_100M:
2301 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2302 				HCLGE_CFG_SPEED_S, 7);
2303 		break;
2304 	case HCLGE_MAC_SPEED_1G:
2305 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2306 				HCLGE_CFG_SPEED_S, 0);
2307 		break;
2308 	case HCLGE_MAC_SPEED_10G:
2309 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2310 				HCLGE_CFG_SPEED_S, 1);
2311 		break;
2312 	case HCLGE_MAC_SPEED_25G:
2313 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2314 				HCLGE_CFG_SPEED_S, 2);
2315 		break;
2316 	case HCLGE_MAC_SPEED_40G:
2317 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2318 				HCLGE_CFG_SPEED_S, 3);
2319 		break;
2320 	case HCLGE_MAC_SPEED_50G:
2321 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2322 				HCLGE_CFG_SPEED_S, 4);
2323 		break;
2324 	case HCLGE_MAC_SPEED_100G:
2325 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2326 				HCLGE_CFG_SPEED_S, 5);
2327 		break;
2328 	default:
2329 		dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2330 		return -EINVAL;
2331 	}
2332 
2333 	hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2334 		      1);
2335 
2336 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2337 	if (ret) {
2338 		dev_err(&hdev->pdev->dev,
2339 			"mac speed/duplex config cmd failed %d.\n", ret);
2340 		return ret;
2341 	}
2342 
2343 	return 0;
2344 }
2345 
2346 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2347 {
2348 	int ret;
2349 
2350 	duplex = hclge_check_speed_dup(duplex, speed);
2351 	if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2352 		return 0;
2353 
2354 	ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2355 	if (ret)
2356 		return ret;
2357 
2358 	hdev->hw.mac.speed = speed;
2359 	hdev->hw.mac.duplex = duplex;
2360 
2361 	return 0;
2362 }
2363 
2364 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2365 				     u8 duplex)
2366 {
2367 	struct hclge_vport *vport = hclge_get_vport(handle);
2368 	struct hclge_dev *hdev = vport->back;
2369 
2370 	return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2371 }
2372 
2373 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2374 {
2375 	struct hclge_config_auto_neg_cmd *req;
2376 	struct hclge_desc desc;
2377 	u32 flag = 0;
2378 	int ret;
2379 
2380 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2381 
2382 	req = (struct hclge_config_auto_neg_cmd *)desc.data;
2383 	if (enable)
2384 		hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2385 	req->cfg_an_cmd_flag = cpu_to_le32(flag);
2386 
2387 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2388 	if (ret)
2389 		dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2390 			ret);
2391 
2392 	return ret;
2393 }
2394 
2395 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2396 {
2397 	struct hclge_vport *vport = hclge_get_vport(handle);
2398 	struct hclge_dev *hdev = vport->back;
2399 
2400 	if (!hdev->hw.mac.support_autoneg) {
2401 		if (enable) {
2402 			dev_err(&hdev->pdev->dev,
2403 				"autoneg is not supported by current port\n");
2404 			return -EOPNOTSUPP;
2405 		} else {
2406 			return 0;
2407 		}
2408 	}
2409 
2410 	return hclge_set_autoneg_en(hdev, enable);
2411 }
2412 
2413 static int hclge_get_autoneg(struct hnae3_handle *handle)
2414 {
2415 	struct hclge_vport *vport = hclge_get_vport(handle);
2416 	struct hclge_dev *hdev = vport->back;
2417 	struct phy_device *phydev = hdev->hw.mac.phydev;
2418 
2419 	if (phydev)
2420 		return phydev->autoneg;
2421 
2422 	return hdev->hw.mac.autoneg;
2423 }
2424 
2425 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2426 {
2427 	struct hclge_vport *vport = hclge_get_vport(handle);
2428 	struct hclge_dev *hdev = vport->back;
2429 	int ret;
2430 
2431 	dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2432 
2433 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2434 	if (ret)
2435 		return ret;
2436 	return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2437 }
2438 
2439 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2440 {
2441 	struct hclge_vport *vport = hclge_get_vport(handle);
2442 	struct hclge_dev *hdev = vport->back;
2443 
2444 	if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2445 		return hclge_set_autoneg_en(hdev, !halt);
2446 
2447 	return 0;
2448 }
2449 
2450 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2451 {
2452 	struct hclge_config_fec_cmd *req;
2453 	struct hclge_desc desc;
2454 	int ret;
2455 
2456 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2457 
2458 	req = (struct hclge_config_fec_cmd *)desc.data;
2459 	if (fec_mode & BIT(HNAE3_FEC_AUTO))
2460 		hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2461 	if (fec_mode & BIT(HNAE3_FEC_RS))
2462 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2463 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2464 	if (fec_mode & BIT(HNAE3_FEC_BASER))
2465 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2466 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2467 
2468 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2469 	if (ret)
2470 		dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2471 
2472 	return ret;
2473 }
2474 
2475 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2476 {
2477 	struct hclge_vport *vport = hclge_get_vport(handle);
2478 	struct hclge_dev *hdev = vport->back;
2479 	struct hclge_mac *mac = &hdev->hw.mac;
2480 	int ret;
2481 
2482 	if (fec_mode && !(mac->fec_ability & fec_mode)) {
2483 		dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2484 		return -EINVAL;
2485 	}
2486 
2487 	ret = hclge_set_fec_hw(hdev, fec_mode);
2488 	if (ret)
2489 		return ret;
2490 
2491 	mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2492 	return 0;
2493 }
2494 
2495 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2496 			  u8 *fec_mode)
2497 {
2498 	struct hclge_vport *vport = hclge_get_vport(handle);
2499 	struct hclge_dev *hdev = vport->back;
2500 	struct hclge_mac *mac = &hdev->hw.mac;
2501 
2502 	if (fec_ability)
2503 		*fec_ability = mac->fec_ability;
2504 	if (fec_mode)
2505 		*fec_mode = mac->fec_mode;
2506 }
2507 
2508 static int hclge_mac_init(struct hclge_dev *hdev)
2509 {
2510 	struct hclge_mac *mac = &hdev->hw.mac;
2511 	int ret;
2512 
2513 	hdev->support_sfp_query = true;
2514 	hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2515 	ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2516 					 hdev->hw.mac.duplex);
2517 	if (ret) {
2518 		dev_err(&hdev->pdev->dev,
2519 			"Config mac speed dup fail ret=%d\n", ret);
2520 		return ret;
2521 	}
2522 
2523 	if (hdev->hw.mac.support_autoneg) {
2524 		ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2525 		if (ret) {
2526 			dev_err(&hdev->pdev->dev,
2527 				"Config mac autoneg fail ret=%d\n", ret);
2528 			return ret;
2529 		}
2530 	}
2531 
2532 	mac->link = 0;
2533 
2534 	if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2535 		ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2536 		if (ret) {
2537 			dev_err(&hdev->pdev->dev,
2538 				"Fec mode init fail, ret = %d\n", ret);
2539 			return ret;
2540 		}
2541 	}
2542 
2543 	ret = hclge_set_mac_mtu(hdev, hdev->mps);
2544 	if (ret) {
2545 		dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2546 		return ret;
2547 	}
2548 
2549 	ret = hclge_buffer_alloc(hdev);
2550 	if (ret)
2551 		dev_err(&hdev->pdev->dev,
2552 			"allocate buffer fail, ret=%d\n", ret);
2553 
2554 	return ret;
2555 }
2556 
2557 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2558 {
2559 	if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
2560 	    !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2561 		queue_work_on(cpumask_first(&hdev->affinity_mask), system_wq,
2562 			      &hdev->mbx_service_task);
2563 }
2564 
2565 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2566 {
2567 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2568 	    !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2569 		queue_work_on(cpumask_first(&hdev->affinity_mask), system_wq,
2570 			      &hdev->rst_service_task);
2571 }
2572 
2573 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2574 {
2575 	if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2576 	    !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2577 	    !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)) {
2578 		hdev->hw_stats.stats_timer++;
2579 		hdev->fd_arfs_expire_timer++;
2580 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2581 				    system_wq, &hdev->service_task,
2582 				    delay_time);
2583 	}
2584 }
2585 
2586 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2587 {
2588 	struct hclge_link_status_cmd *req;
2589 	struct hclge_desc desc;
2590 	int link_status;
2591 	int ret;
2592 
2593 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2594 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2595 	if (ret) {
2596 		dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2597 			ret);
2598 		return ret;
2599 	}
2600 
2601 	req = (struct hclge_link_status_cmd *)desc.data;
2602 	link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2603 
2604 	return !!link_status;
2605 }
2606 
2607 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2608 {
2609 	unsigned int mac_state;
2610 	int link_stat;
2611 
2612 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2613 		return 0;
2614 
2615 	mac_state = hclge_get_mac_link_status(hdev);
2616 
2617 	if (hdev->hw.mac.phydev) {
2618 		if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2619 			link_stat = mac_state &
2620 				hdev->hw.mac.phydev->link;
2621 		else
2622 			link_stat = 0;
2623 
2624 	} else {
2625 		link_stat = mac_state;
2626 	}
2627 
2628 	return !!link_stat;
2629 }
2630 
2631 static void hclge_update_link_status(struct hclge_dev *hdev)
2632 {
2633 	struct hnae3_client *rclient = hdev->roce_client;
2634 	struct hnae3_client *client = hdev->nic_client;
2635 	struct hnae3_handle *rhandle;
2636 	struct hnae3_handle *handle;
2637 	int state;
2638 	int i;
2639 
2640 	if (!client)
2641 		return;
2642 	state = hclge_get_mac_phy_link(hdev);
2643 	if (state != hdev->hw.mac.link) {
2644 		for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2645 			handle = &hdev->vport[i].nic;
2646 			client->ops->link_status_change(handle, state);
2647 			hclge_config_mac_tnl_int(hdev, state);
2648 			rhandle = &hdev->vport[i].roce;
2649 			if (rclient && rclient->ops->link_status_change)
2650 				rclient->ops->link_status_change(rhandle,
2651 								 state);
2652 		}
2653 		hdev->hw.mac.link = state;
2654 	}
2655 }
2656 
2657 static void hclge_update_port_capability(struct hclge_mac *mac)
2658 {
2659 	/* update fec ability by speed */
2660 	hclge_convert_setting_fec(mac);
2661 
2662 	/* firmware can not identify back plane type, the media type
2663 	 * read from configuration can help deal it
2664 	 */
2665 	if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2666 	    mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2667 		mac->module_type = HNAE3_MODULE_TYPE_KR;
2668 	else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2669 		mac->module_type = HNAE3_MODULE_TYPE_TP;
2670 
2671 	if (mac->support_autoneg == true) {
2672 		linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2673 		linkmode_copy(mac->advertising, mac->supported);
2674 	} else {
2675 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2676 				   mac->supported);
2677 		linkmode_zero(mac->advertising);
2678 	}
2679 }
2680 
2681 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2682 {
2683 	struct hclge_sfp_info_cmd *resp;
2684 	struct hclge_desc desc;
2685 	int ret;
2686 
2687 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2688 	resp = (struct hclge_sfp_info_cmd *)desc.data;
2689 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2690 	if (ret == -EOPNOTSUPP) {
2691 		dev_warn(&hdev->pdev->dev,
2692 			 "IMP do not support get SFP speed %d\n", ret);
2693 		return ret;
2694 	} else if (ret) {
2695 		dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2696 		return ret;
2697 	}
2698 
2699 	*speed = le32_to_cpu(resp->speed);
2700 
2701 	return 0;
2702 }
2703 
2704 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2705 {
2706 	struct hclge_sfp_info_cmd *resp;
2707 	struct hclge_desc desc;
2708 	int ret;
2709 
2710 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2711 	resp = (struct hclge_sfp_info_cmd *)desc.data;
2712 
2713 	resp->query_type = QUERY_ACTIVE_SPEED;
2714 
2715 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2716 	if (ret == -EOPNOTSUPP) {
2717 		dev_warn(&hdev->pdev->dev,
2718 			 "IMP does not support get SFP info %d\n", ret);
2719 		return ret;
2720 	} else if (ret) {
2721 		dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2722 		return ret;
2723 	}
2724 
2725 	mac->speed = le32_to_cpu(resp->speed);
2726 	/* if resp->speed_ability is 0, it means it's an old version
2727 	 * firmware, do not update these params
2728 	 */
2729 	if (resp->speed_ability) {
2730 		mac->module_type = le32_to_cpu(resp->module_type);
2731 		mac->speed_ability = le32_to_cpu(resp->speed_ability);
2732 		mac->autoneg = resp->autoneg;
2733 		mac->support_autoneg = resp->autoneg_ability;
2734 		mac->speed_type = QUERY_ACTIVE_SPEED;
2735 		if (!resp->active_fec)
2736 			mac->fec_mode = 0;
2737 		else
2738 			mac->fec_mode = BIT(resp->active_fec);
2739 	} else {
2740 		mac->speed_type = QUERY_SFP_SPEED;
2741 	}
2742 
2743 	return 0;
2744 }
2745 
2746 static int hclge_update_port_info(struct hclge_dev *hdev)
2747 {
2748 	struct hclge_mac *mac = &hdev->hw.mac;
2749 	int speed = HCLGE_MAC_SPEED_UNKNOWN;
2750 	int ret;
2751 
2752 	/* get the port info from SFP cmd if not copper port */
2753 	if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2754 		return 0;
2755 
2756 	/* if IMP does not support get SFP/qSFP info, return directly */
2757 	if (!hdev->support_sfp_query)
2758 		return 0;
2759 
2760 	if (hdev->pdev->revision >= 0x21)
2761 		ret = hclge_get_sfp_info(hdev, mac);
2762 	else
2763 		ret = hclge_get_sfp_speed(hdev, &speed);
2764 
2765 	if (ret == -EOPNOTSUPP) {
2766 		hdev->support_sfp_query = false;
2767 		return ret;
2768 	} else if (ret) {
2769 		return ret;
2770 	}
2771 
2772 	if (hdev->pdev->revision >= 0x21) {
2773 		if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2774 			hclge_update_port_capability(mac);
2775 			return 0;
2776 		}
2777 		return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2778 					       HCLGE_MAC_FULL);
2779 	} else {
2780 		if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2781 			return 0; /* do nothing if no SFP */
2782 
2783 		/* must config full duplex for SFP */
2784 		return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2785 	}
2786 }
2787 
2788 static int hclge_get_status(struct hnae3_handle *handle)
2789 {
2790 	struct hclge_vport *vport = hclge_get_vport(handle);
2791 	struct hclge_dev *hdev = vport->back;
2792 
2793 	hclge_update_link_status(hdev);
2794 
2795 	return hdev->hw.mac.link;
2796 }
2797 
2798 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2799 {
2800 	u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2801 
2802 	/* fetch the events from their corresponding regs */
2803 	rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2804 	cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2805 	msix_src_reg = hclge_read_dev(&hdev->hw,
2806 				      HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2807 
2808 	/* Assumption: If by any chance reset and mailbox events are reported
2809 	 * together then we will only process reset event in this go and will
2810 	 * defer the processing of the mailbox events. Since, we would have not
2811 	 * cleared RX CMDQ event this time we would receive again another
2812 	 * interrupt from H/W just for the mailbox.
2813 	 */
2814 
2815 	/* check for vector0 reset event sources */
2816 	if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2817 		dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2818 		set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2819 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2820 		*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2821 		hdev->rst_stats.imp_rst_cnt++;
2822 		return HCLGE_VECTOR0_EVENT_RST;
2823 	}
2824 
2825 	if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2826 		dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2827 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2828 		set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2829 		*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2830 		hdev->rst_stats.global_rst_cnt++;
2831 		return HCLGE_VECTOR0_EVENT_RST;
2832 	}
2833 
2834 	/* check for vector0 msix event source */
2835 	if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
2836 		dev_info(&hdev->pdev->dev, "received event 0x%x\n",
2837 			 msix_src_reg);
2838 		*clearval = msix_src_reg;
2839 		return HCLGE_VECTOR0_EVENT_ERR;
2840 	}
2841 
2842 	/* check for vector0 mailbox(=CMDQ RX) event source */
2843 	if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2844 		cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2845 		*clearval = cmdq_src_reg;
2846 		return HCLGE_VECTOR0_EVENT_MBX;
2847 	}
2848 
2849 	/* print other vector0 event source */
2850 	dev_info(&hdev->pdev->dev,
2851 		 "CMDQ INT status:0x%x, other INT status:0x%x\n",
2852 		 cmdq_src_reg, msix_src_reg);
2853 	*clearval = msix_src_reg;
2854 
2855 	return HCLGE_VECTOR0_EVENT_OTHER;
2856 }
2857 
2858 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2859 				    u32 regclr)
2860 {
2861 	switch (event_type) {
2862 	case HCLGE_VECTOR0_EVENT_RST:
2863 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2864 		break;
2865 	case HCLGE_VECTOR0_EVENT_MBX:
2866 		hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2867 		break;
2868 	default:
2869 		break;
2870 	}
2871 }
2872 
2873 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2874 {
2875 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2876 				BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2877 				BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2878 				BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2879 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2880 }
2881 
2882 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2883 {
2884 	writel(enable ? 1 : 0, vector->addr);
2885 }
2886 
2887 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2888 {
2889 	struct hclge_dev *hdev = data;
2890 	u32 clearval = 0;
2891 	u32 event_cause;
2892 
2893 	hclge_enable_vector(&hdev->misc_vector, false);
2894 	event_cause = hclge_check_event_cause(hdev, &clearval);
2895 
2896 	/* vector 0 interrupt is shared with reset and mailbox source events.*/
2897 	switch (event_cause) {
2898 	case HCLGE_VECTOR0_EVENT_ERR:
2899 		/* we do not know what type of reset is required now. This could
2900 		 * only be decided after we fetch the type of errors which
2901 		 * caused this event. Therefore, we will do below for now:
2902 		 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2903 		 *    have defered type of reset to be used.
2904 		 * 2. Schedule the reset serivce task.
2905 		 * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
2906 		 *    will fetch the correct type of reset.  This would be done
2907 		 *    by first decoding the types of errors.
2908 		 */
2909 		set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2910 		/* fall through */
2911 	case HCLGE_VECTOR0_EVENT_RST:
2912 		hclge_reset_task_schedule(hdev);
2913 		break;
2914 	case HCLGE_VECTOR0_EVENT_MBX:
2915 		/* If we are here then,
2916 		 * 1. Either we are not handling any mbx task and we are not
2917 		 *    scheduled as well
2918 		 *                        OR
2919 		 * 2. We could be handling a mbx task but nothing more is
2920 		 *    scheduled.
2921 		 * In both cases, we should schedule mbx task as there are more
2922 		 * mbx messages reported by this interrupt.
2923 		 */
2924 		hclge_mbx_task_schedule(hdev);
2925 		break;
2926 	default:
2927 		dev_warn(&hdev->pdev->dev,
2928 			 "received unknown or unhandled event of vector0\n");
2929 		break;
2930 	}
2931 
2932 	hclge_clear_event_cause(hdev, event_cause, clearval);
2933 
2934 	/* Enable interrupt if it is not cause by reset. And when
2935 	 * clearval equal to 0, it means interrupt status may be
2936 	 * cleared by hardware before driver reads status register.
2937 	 * For this case, vector0 interrupt also should be enabled.
2938 	 */
2939 	if (!clearval ||
2940 	    event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2941 		hclge_enable_vector(&hdev->misc_vector, true);
2942 	}
2943 
2944 	return IRQ_HANDLED;
2945 }
2946 
2947 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2948 {
2949 	if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2950 		dev_warn(&hdev->pdev->dev,
2951 			 "vector(vector_id %d) has been freed.\n", vector_id);
2952 		return;
2953 	}
2954 
2955 	hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2956 	hdev->num_msi_left += 1;
2957 	hdev->num_msi_used -= 1;
2958 }
2959 
2960 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2961 {
2962 	struct hclge_misc_vector *vector = &hdev->misc_vector;
2963 
2964 	vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2965 
2966 	vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2967 	hdev->vector_status[0] = 0;
2968 
2969 	hdev->num_msi_left -= 1;
2970 	hdev->num_msi_used += 1;
2971 }
2972 
2973 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
2974 				      const cpumask_t *mask)
2975 {
2976 	struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
2977 					      affinity_notify);
2978 
2979 	cpumask_copy(&hdev->affinity_mask, mask);
2980 }
2981 
2982 static void hclge_irq_affinity_release(struct kref *ref)
2983 {
2984 }
2985 
2986 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
2987 {
2988 	irq_set_affinity_hint(hdev->misc_vector.vector_irq,
2989 			      &hdev->affinity_mask);
2990 
2991 	hdev->affinity_notify.notify = hclge_irq_affinity_notify;
2992 	hdev->affinity_notify.release = hclge_irq_affinity_release;
2993 	irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
2994 				  &hdev->affinity_notify);
2995 }
2996 
2997 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
2998 {
2999 	irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3000 	irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3001 }
3002 
3003 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3004 {
3005 	int ret;
3006 
3007 	hclge_get_misc_vector(hdev);
3008 
3009 	/* this would be explicitly freed in the end */
3010 	ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3011 			  0, "hclge_misc", hdev);
3012 	if (ret) {
3013 		hclge_free_vector(hdev, 0);
3014 		dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3015 			hdev->misc_vector.vector_irq);
3016 	}
3017 
3018 	return ret;
3019 }
3020 
3021 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3022 {
3023 	free_irq(hdev->misc_vector.vector_irq, hdev);
3024 	hclge_free_vector(hdev, 0);
3025 }
3026 
3027 int hclge_notify_client(struct hclge_dev *hdev,
3028 			enum hnae3_reset_notify_type type)
3029 {
3030 	struct hnae3_client *client = hdev->nic_client;
3031 	u16 i;
3032 
3033 	if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3034 		return 0;
3035 
3036 	if (!client->ops->reset_notify)
3037 		return -EOPNOTSUPP;
3038 
3039 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3040 		struct hnae3_handle *handle = &hdev->vport[i].nic;
3041 		int ret;
3042 
3043 		ret = client->ops->reset_notify(handle, type);
3044 		if (ret) {
3045 			dev_err(&hdev->pdev->dev,
3046 				"notify nic client failed %d(%d)\n", type, ret);
3047 			return ret;
3048 		}
3049 	}
3050 
3051 	return 0;
3052 }
3053 
3054 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3055 				    enum hnae3_reset_notify_type type)
3056 {
3057 	struct hnae3_client *client = hdev->roce_client;
3058 	int ret = 0;
3059 	u16 i;
3060 
3061 	if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3062 		return 0;
3063 
3064 	if (!client->ops->reset_notify)
3065 		return -EOPNOTSUPP;
3066 
3067 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3068 		struct hnae3_handle *handle = &hdev->vport[i].roce;
3069 
3070 		ret = client->ops->reset_notify(handle, type);
3071 		if (ret) {
3072 			dev_err(&hdev->pdev->dev,
3073 				"notify roce client failed %d(%d)",
3074 				type, ret);
3075 			return ret;
3076 		}
3077 	}
3078 
3079 	return ret;
3080 }
3081 
3082 static int hclge_reset_wait(struct hclge_dev *hdev)
3083 {
3084 #define HCLGE_RESET_WATI_MS	100
3085 #define HCLGE_RESET_WAIT_CNT	200
3086 	u32 val, reg, reg_bit;
3087 	u32 cnt = 0;
3088 
3089 	switch (hdev->reset_type) {
3090 	case HNAE3_IMP_RESET:
3091 		reg = HCLGE_GLOBAL_RESET_REG;
3092 		reg_bit = HCLGE_IMP_RESET_BIT;
3093 		break;
3094 	case HNAE3_GLOBAL_RESET:
3095 		reg = HCLGE_GLOBAL_RESET_REG;
3096 		reg_bit = HCLGE_GLOBAL_RESET_BIT;
3097 		break;
3098 	case HNAE3_FUNC_RESET:
3099 		reg = HCLGE_FUN_RST_ING;
3100 		reg_bit = HCLGE_FUN_RST_ING_B;
3101 		break;
3102 	case HNAE3_FLR_RESET:
3103 		break;
3104 	default:
3105 		dev_err(&hdev->pdev->dev,
3106 			"Wait for unsupported reset type: %d\n",
3107 			hdev->reset_type);
3108 		return -EINVAL;
3109 	}
3110 
3111 	if (hdev->reset_type == HNAE3_FLR_RESET) {
3112 		while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
3113 		       cnt++ < HCLGE_RESET_WAIT_CNT)
3114 			msleep(HCLGE_RESET_WATI_MS);
3115 
3116 		if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
3117 			dev_err(&hdev->pdev->dev,
3118 				"flr wait timeout: %d\n", cnt);
3119 			return -EBUSY;
3120 		}
3121 
3122 		return 0;
3123 	}
3124 
3125 	val = hclge_read_dev(&hdev->hw, reg);
3126 	while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3127 		msleep(HCLGE_RESET_WATI_MS);
3128 		val = hclge_read_dev(&hdev->hw, reg);
3129 		cnt++;
3130 	}
3131 
3132 	if (cnt >= HCLGE_RESET_WAIT_CNT) {
3133 		dev_warn(&hdev->pdev->dev,
3134 			 "Wait for reset timeout: %d\n", hdev->reset_type);
3135 		return -EBUSY;
3136 	}
3137 
3138 	return 0;
3139 }
3140 
3141 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3142 {
3143 	struct hclge_vf_rst_cmd *req;
3144 	struct hclge_desc desc;
3145 
3146 	req = (struct hclge_vf_rst_cmd *)desc.data;
3147 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3148 	req->dest_vfid = func_id;
3149 
3150 	if (reset)
3151 		req->vf_rst = 0x1;
3152 
3153 	return hclge_cmd_send(&hdev->hw, &desc, 1);
3154 }
3155 
3156 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3157 {
3158 	int i;
3159 
3160 	for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3161 		struct hclge_vport *vport = &hdev->vport[i];
3162 		int ret;
3163 
3164 		/* Send cmd to set/clear VF's FUNC_RST_ING */
3165 		ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3166 		if (ret) {
3167 			dev_err(&hdev->pdev->dev,
3168 				"set vf(%d) rst failed %d!\n",
3169 				vport->vport_id, ret);
3170 			return ret;
3171 		}
3172 
3173 		if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3174 			continue;
3175 
3176 		/* Inform VF to process the reset.
3177 		 * hclge_inform_reset_assert_to_vf may fail if VF
3178 		 * driver is not loaded.
3179 		 */
3180 		ret = hclge_inform_reset_assert_to_vf(vport);
3181 		if (ret)
3182 			dev_warn(&hdev->pdev->dev,
3183 				 "inform reset to vf(%d) failed %d!\n",
3184 				 vport->vport_id, ret);
3185 	}
3186 
3187 	return 0;
3188 }
3189 
3190 static int hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3191 {
3192 	struct hclge_pf_rst_sync_cmd *req;
3193 	struct hclge_desc desc;
3194 	int cnt = 0;
3195 	int ret;
3196 
3197 	req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3198 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3199 
3200 	do {
3201 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3202 		/* for compatible with old firmware, wait
3203 		 * 100 ms for VF to stop IO
3204 		 */
3205 		if (ret == -EOPNOTSUPP) {
3206 			msleep(HCLGE_RESET_SYNC_TIME);
3207 			return 0;
3208 		} else if (ret) {
3209 			dev_err(&hdev->pdev->dev, "sync with VF fail %d!\n",
3210 				ret);
3211 			return ret;
3212 		} else if (req->all_vf_ready) {
3213 			return 0;
3214 		}
3215 		msleep(HCLGE_PF_RESET_SYNC_TIME);
3216 		hclge_cmd_reuse_desc(&desc, true);
3217 	} while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3218 
3219 	dev_err(&hdev->pdev->dev, "sync with VF timeout!\n");
3220 	return -ETIME;
3221 }
3222 
3223 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3224 {
3225 	struct hclge_desc desc;
3226 	struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3227 	int ret;
3228 
3229 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3230 	hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3231 	req->fun_reset_vfid = func_id;
3232 
3233 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3234 	if (ret)
3235 		dev_err(&hdev->pdev->dev,
3236 			"send function reset cmd fail, status =%d\n", ret);
3237 
3238 	return ret;
3239 }
3240 
3241 static void hclge_do_reset(struct hclge_dev *hdev)
3242 {
3243 	struct hnae3_handle *handle = &hdev->vport[0].nic;
3244 	struct pci_dev *pdev = hdev->pdev;
3245 	u32 val;
3246 
3247 	if (hclge_get_hw_reset_stat(handle)) {
3248 		dev_info(&pdev->dev, "Hardware reset not finish\n");
3249 		dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3250 			 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3251 			 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3252 		return;
3253 	}
3254 
3255 	switch (hdev->reset_type) {
3256 	case HNAE3_GLOBAL_RESET:
3257 		val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3258 		hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3259 		hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3260 		dev_info(&pdev->dev, "Global Reset requested\n");
3261 		break;
3262 	case HNAE3_FUNC_RESET:
3263 		dev_info(&pdev->dev, "PF Reset requested\n");
3264 		/* schedule again to check later */
3265 		set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3266 		hclge_reset_task_schedule(hdev);
3267 		break;
3268 	case HNAE3_FLR_RESET:
3269 		dev_info(&pdev->dev, "FLR requested\n");
3270 		/* schedule again to check later */
3271 		set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
3272 		hclge_reset_task_schedule(hdev);
3273 		break;
3274 	default:
3275 		dev_warn(&pdev->dev,
3276 			 "Unsupported reset type: %d\n", hdev->reset_type);
3277 		break;
3278 	}
3279 }
3280 
3281 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3282 						   unsigned long *addr)
3283 {
3284 	enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3285 	struct hclge_dev *hdev = ae_dev->priv;
3286 
3287 	/* first, resolve any unknown reset type to the known type(s) */
3288 	if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3289 		/* we will intentionally ignore any errors from this function
3290 		 *  as we will end up in *some* reset request in any case
3291 		 */
3292 		hclge_handle_hw_msix_error(hdev, addr);
3293 		clear_bit(HNAE3_UNKNOWN_RESET, addr);
3294 		/* We defered the clearing of the error event which caused
3295 		 * interrupt since it was not posssible to do that in
3296 		 * interrupt context (and this is the reason we introduced
3297 		 * new UNKNOWN reset type). Now, the errors have been
3298 		 * handled and cleared in hardware we can safely enable
3299 		 * interrupts. This is an exception to the norm.
3300 		 */
3301 		hclge_enable_vector(&hdev->misc_vector, true);
3302 	}
3303 
3304 	/* return the highest priority reset level amongst all */
3305 	if (test_bit(HNAE3_IMP_RESET, addr)) {
3306 		rst_level = HNAE3_IMP_RESET;
3307 		clear_bit(HNAE3_IMP_RESET, addr);
3308 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3309 		clear_bit(HNAE3_FUNC_RESET, addr);
3310 	} else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3311 		rst_level = HNAE3_GLOBAL_RESET;
3312 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3313 		clear_bit(HNAE3_FUNC_RESET, addr);
3314 	} else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3315 		rst_level = HNAE3_FUNC_RESET;
3316 		clear_bit(HNAE3_FUNC_RESET, addr);
3317 	} else if (test_bit(HNAE3_FLR_RESET, addr)) {
3318 		rst_level = HNAE3_FLR_RESET;
3319 		clear_bit(HNAE3_FLR_RESET, addr);
3320 	}
3321 
3322 	if (hdev->reset_type != HNAE3_NONE_RESET &&
3323 	    rst_level < hdev->reset_type)
3324 		return HNAE3_NONE_RESET;
3325 
3326 	return rst_level;
3327 }
3328 
3329 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3330 {
3331 	u32 clearval = 0;
3332 
3333 	switch (hdev->reset_type) {
3334 	case HNAE3_IMP_RESET:
3335 		clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3336 		break;
3337 	case HNAE3_GLOBAL_RESET:
3338 		clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3339 		break;
3340 	default:
3341 		break;
3342 	}
3343 
3344 	if (!clearval)
3345 		return;
3346 
3347 	/* For revision 0x20, the reset interrupt source
3348 	 * can only be cleared after hardware reset done
3349 	 */
3350 	if (hdev->pdev->revision == 0x20)
3351 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3352 				clearval);
3353 
3354 	hclge_enable_vector(&hdev->misc_vector, true);
3355 }
3356 
3357 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
3358 {
3359 	int ret = 0;
3360 
3361 	switch (hdev->reset_type) {
3362 	case HNAE3_FUNC_RESET:
3363 		/* fall through */
3364 	case HNAE3_FLR_RESET:
3365 		ret = hclge_set_all_vf_rst(hdev, true);
3366 		break;
3367 	default:
3368 		break;
3369 	}
3370 
3371 	return ret;
3372 }
3373 
3374 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3375 {
3376 	u32 reg_val;
3377 
3378 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3379 	if (enable)
3380 		reg_val |= HCLGE_NIC_SW_RST_RDY;
3381 	else
3382 		reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3383 
3384 	hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3385 }
3386 
3387 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3388 {
3389 	u32 reg_val;
3390 	int ret = 0;
3391 
3392 	switch (hdev->reset_type) {
3393 	case HNAE3_FUNC_RESET:
3394 		/* to confirm whether all running VF is ready
3395 		 * before request PF reset
3396 		 */
3397 		ret = hclge_func_reset_sync_vf(hdev);
3398 		if (ret)
3399 			return ret;
3400 
3401 		ret = hclge_func_reset_cmd(hdev, 0);
3402 		if (ret) {
3403 			dev_err(&hdev->pdev->dev,
3404 				"asserting function reset fail %d!\n", ret);
3405 			return ret;
3406 		}
3407 
3408 		/* After performaning pf reset, it is not necessary to do the
3409 		 * mailbox handling or send any command to firmware, because
3410 		 * any mailbox handling or command to firmware is only valid
3411 		 * after hclge_cmd_init is called.
3412 		 */
3413 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3414 		hdev->rst_stats.pf_rst_cnt++;
3415 		break;
3416 	case HNAE3_FLR_RESET:
3417 		/* to confirm whether all running VF is ready
3418 		 * before request PF reset
3419 		 */
3420 		ret = hclge_func_reset_sync_vf(hdev);
3421 		if (ret)
3422 			return ret;
3423 
3424 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3425 		set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
3426 		hdev->rst_stats.flr_rst_cnt++;
3427 		break;
3428 	case HNAE3_IMP_RESET:
3429 		reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3430 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3431 				BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3432 		break;
3433 	default:
3434 		break;
3435 	}
3436 
3437 	/* inform hardware that preparatory work is done */
3438 	msleep(HCLGE_RESET_SYNC_TIME);
3439 	hclge_reset_handshake(hdev, true);
3440 	dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3441 
3442 	return ret;
3443 }
3444 
3445 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3446 {
3447 #define MAX_RESET_FAIL_CNT 5
3448 
3449 	if (hdev->reset_pending) {
3450 		dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3451 			 hdev->reset_pending);
3452 		return true;
3453 	} else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
3454 		   (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
3455 		    BIT(HCLGE_IMP_RESET_BIT))) {
3456 		dev_info(&hdev->pdev->dev,
3457 			 "reset failed because IMP Reset is pending\n");
3458 		hclge_clear_reset_cause(hdev);
3459 		return false;
3460 	} else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3461 		hdev->reset_fail_cnt++;
3462 		set_bit(hdev->reset_type, &hdev->reset_pending);
3463 		dev_info(&hdev->pdev->dev,
3464 			 "re-schedule reset task(%d)\n",
3465 			 hdev->reset_fail_cnt);
3466 		return true;
3467 	}
3468 
3469 	hclge_clear_reset_cause(hdev);
3470 
3471 	/* recover the handshake status when reset fail */
3472 	hclge_reset_handshake(hdev, true);
3473 
3474 	dev_err(&hdev->pdev->dev, "Reset fail!\n");
3475 	return false;
3476 }
3477 
3478 static int hclge_set_rst_done(struct hclge_dev *hdev)
3479 {
3480 	struct hclge_pf_rst_done_cmd *req;
3481 	struct hclge_desc desc;
3482 
3483 	req = (struct hclge_pf_rst_done_cmd *)desc.data;
3484 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3485 	req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3486 
3487 	return hclge_cmd_send(&hdev->hw, &desc, 1);
3488 }
3489 
3490 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3491 {
3492 	int ret = 0;
3493 
3494 	switch (hdev->reset_type) {
3495 	case HNAE3_FUNC_RESET:
3496 		/* fall through */
3497 	case HNAE3_FLR_RESET:
3498 		ret = hclge_set_all_vf_rst(hdev, false);
3499 		break;
3500 	case HNAE3_GLOBAL_RESET:
3501 		/* fall through */
3502 	case HNAE3_IMP_RESET:
3503 		ret = hclge_set_rst_done(hdev);
3504 		break;
3505 	default:
3506 		break;
3507 	}
3508 
3509 	/* clear up the handshake status after re-initialize done */
3510 	hclge_reset_handshake(hdev, false);
3511 
3512 	return ret;
3513 }
3514 
3515 static int hclge_reset_stack(struct hclge_dev *hdev)
3516 {
3517 	int ret;
3518 
3519 	ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3520 	if (ret)
3521 		return ret;
3522 
3523 	ret = hclge_reset_ae_dev(hdev->ae_dev);
3524 	if (ret)
3525 		return ret;
3526 
3527 	ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3528 	if (ret)
3529 		return ret;
3530 
3531 	return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3532 }
3533 
3534 static void hclge_reset(struct hclge_dev *hdev)
3535 {
3536 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3537 	int ret;
3538 
3539 	/* Initialize ae_dev reset status as well, in case enet layer wants to
3540 	 * know if device is undergoing reset
3541 	 */
3542 	ae_dev->reset_type = hdev->reset_type;
3543 	hdev->rst_stats.reset_cnt++;
3544 	/* perform reset of the stack & ae device for a client */
3545 	ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3546 	if (ret)
3547 		goto err_reset;
3548 
3549 	ret = hclge_reset_prepare_down(hdev);
3550 	if (ret)
3551 		goto err_reset;
3552 
3553 	rtnl_lock();
3554 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3555 	if (ret)
3556 		goto err_reset_lock;
3557 
3558 	rtnl_unlock();
3559 
3560 	ret = hclge_reset_prepare_wait(hdev);
3561 	if (ret)
3562 		goto err_reset;
3563 
3564 	if (hclge_reset_wait(hdev))
3565 		goto err_reset;
3566 
3567 	hdev->rst_stats.hw_reset_done_cnt++;
3568 
3569 	ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3570 	if (ret)
3571 		goto err_reset;
3572 
3573 	rtnl_lock();
3574 
3575 	ret = hclge_reset_stack(hdev);
3576 	if (ret)
3577 		goto err_reset_lock;
3578 
3579 	hclge_clear_reset_cause(hdev);
3580 
3581 	ret = hclge_reset_prepare_up(hdev);
3582 	if (ret)
3583 		goto err_reset_lock;
3584 
3585 	rtnl_unlock();
3586 
3587 	ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3588 	/* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3589 	 * times
3590 	 */
3591 	if (ret && hdev->reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3592 		goto err_reset;
3593 
3594 	rtnl_lock();
3595 
3596 	ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3597 	if (ret)
3598 		goto err_reset_lock;
3599 
3600 	rtnl_unlock();
3601 
3602 	ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3603 	if (ret)
3604 		goto err_reset;
3605 
3606 	hdev->last_reset_time = jiffies;
3607 	hdev->reset_fail_cnt = 0;
3608 	hdev->rst_stats.reset_done_cnt++;
3609 	ae_dev->reset_type = HNAE3_NONE_RESET;
3610 
3611 	/* if default_reset_request has a higher level reset request,
3612 	 * it should be handled as soon as possible. since some errors
3613 	 * need this kind of reset to fix.
3614 	 */
3615 	hdev->reset_level = hclge_get_reset_level(ae_dev,
3616 						  &hdev->default_reset_request);
3617 	if (hdev->reset_level != HNAE3_NONE_RESET)
3618 		set_bit(hdev->reset_level, &hdev->reset_request);
3619 
3620 	return;
3621 
3622 err_reset_lock:
3623 	rtnl_unlock();
3624 err_reset:
3625 	if (hclge_reset_err_handle(hdev))
3626 		hclge_reset_task_schedule(hdev);
3627 }
3628 
3629 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3630 {
3631 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3632 	struct hclge_dev *hdev = ae_dev->priv;
3633 
3634 	/* We might end up getting called broadly because of 2 below cases:
3635 	 * 1. Recoverable error was conveyed through APEI and only way to bring
3636 	 *    normalcy is to reset.
3637 	 * 2. A new reset request from the stack due to timeout
3638 	 *
3639 	 * For the first case,error event might not have ae handle available.
3640 	 * check if this is a new reset request and we are not here just because
3641 	 * last reset attempt did not succeed and watchdog hit us again. We will
3642 	 * know this if last reset request did not occur very recently (watchdog
3643 	 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3644 	 * In case of new request we reset the "reset level" to PF reset.
3645 	 * And if it is a repeat reset request of the most recent one then we
3646 	 * want to make sure we throttle the reset request. Therefore, we will
3647 	 * not allow it again before 3*HZ times.
3648 	 */
3649 	if (!handle)
3650 		handle = &hdev->vport[0].nic;
3651 
3652 	if (time_before(jiffies, (hdev->last_reset_time +
3653 				  HCLGE_RESET_INTERVAL))) {
3654 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
3655 		return;
3656 	} else if (hdev->default_reset_request)
3657 		hdev->reset_level =
3658 			hclge_get_reset_level(ae_dev,
3659 					      &hdev->default_reset_request);
3660 	else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
3661 		hdev->reset_level = HNAE3_FUNC_RESET;
3662 
3663 	dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
3664 		 hdev->reset_level);
3665 
3666 	/* request reset & schedule reset task */
3667 	set_bit(hdev->reset_level, &hdev->reset_request);
3668 	hclge_reset_task_schedule(hdev);
3669 
3670 	if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3671 		hdev->reset_level++;
3672 }
3673 
3674 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3675 					enum hnae3_reset_type rst_type)
3676 {
3677 	struct hclge_dev *hdev = ae_dev->priv;
3678 
3679 	set_bit(rst_type, &hdev->default_reset_request);
3680 }
3681 
3682 static void hclge_reset_timer(struct timer_list *t)
3683 {
3684 	struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3685 
3686 	/* if default_reset_request has no value, it means that this reset
3687 	 * request has already be handled, so just return here
3688 	 */
3689 	if (!hdev->default_reset_request)
3690 		return;
3691 
3692 	dev_info(&hdev->pdev->dev,
3693 		 "triggering reset in reset timer\n");
3694 	hclge_reset_event(hdev->pdev, NULL);
3695 }
3696 
3697 static void hclge_reset_subtask(struct hclge_dev *hdev)
3698 {
3699 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3700 
3701 	/* check if there is any ongoing reset in the hardware. This status can
3702 	 * be checked from reset_pending. If there is then, we need to wait for
3703 	 * hardware to complete reset.
3704 	 *    a. If we are able to figure out in reasonable time that hardware
3705 	 *       has fully resetted then, we can proceed with driver, client
3706 	 *       reset.
3707 	 *    b. else, we can come back later to check this status so re-sched
3708 	 *       now.
3709 	 */
3710 	hdev->last_reset_time = jiffies;
3711 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
3712 	if (hdev->reset_type != HNAE3_NONE_RESET)
3713 		hclge_reset(hdev);
3714 
3715 	/* check if we got any *new* reset requests to be honored */
3716 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
3717 	if (hdev->reset_type != HNAE3_NONE_RESET)
3718 		hclge_do_reset(hdev);
3719 
3720 	hdev->reset_type = HNAE3_NONE_RESET;
3721 }
3722 
3723 static void hclge_reset_service_task(struct work_struct *work)
3724 {
3725 	struct hclge_dev *hdev =
3726 		container_of(work, struct hclge_dev, rst_service_task);
3727 
3728 	if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3729 		return;
3730 
3731 	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3732 
3733 	hclge_reset_subtask(hdev);
3734 
3735 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3736 }
3737 
3738 static void hclge_mailbox_service_task(struct work_struct *work)
3739 {
3740 	struct hclge_dev *hdev =
3741 		container_of(work, struct hclge_dev, mbx_service_task);
3742 
3743 	if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3744 		return;
3745 
3746 	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3747 
3748 	hclge_mbx_handler(hdev);
3749 
3750 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3751 }
3752 
3753 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3754 {
3755 	int i;
3756 
3757 	/* start from vport 1 for PF is always alive */
3758 	for (i = 1; i < hdev->num_alloc_vport; i++) {
3759 		struct hclge_vport *vport = &hdev->vport[i];
3760 
3761 		if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3762 			clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3763 
3764 		/* If vf is not alive, set to default value */
3765 		if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3766 			vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3767 	}
3768 }
3769 
3770 static void hclge_service_task(struct work_struct *work)
3771 {
3772 	struct hclge_dev *hdev =
3773 		container_of(work, struct hclge_dev, service_task.work);
3774 
3775 	clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
3776 
3777 	if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3778 		hclge_update_stats_for_all(hdev);
3779 		hdev->hw_stats.stats_timer = 0;
3780 	}
3781 
3782 	hclge_update_port_info(hdev);
3783 	hclge_update_link_status(hdev);
3784 	hclge_update_vport_alive(hdev);
3785 	hclge_sync_vlan_filter(hdev);
3786 	if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) {
3787 		hclge_rfs_filter_expire(hdev);
3788 		hdev->fd_arfs_expire_timer = 0;
3789 	}
3790 
3791 	hclge_task_schedule(hdev, round_jiffies_relative(HZ));
3792 }
3793 
3794 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3795 {
3796 	/* VF handle has no client */
3797 	if (!handle->client)
3798 		return container_of(handle, struct hclge_vport, nic);
3799 	else if (handle->client->type == HNAE3_CLIENT_ROCE)
3800 		return container_of(handle, struct hclge_vport, roce);
3801 	else
3802 		return container_of(handle, struct hclge_vport, nic);
3803 }
3804 
3805 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3806 			    struct hnae3_vector_info *vector_info)
3807 {
3808 	struct hclge_vport *vport = hclge_get_vport(handle);
3809 	struct hnae3_vector_info *vector = vector_info;
3810 	struct hclge_dev *hdev = vport->back;
3811 	int alloc = 0;
3812 	int i, j;
3813 
3814 	vector_num = min(hdev->num_msi_left, vector_num);
3815 
3816 	for (j = 0; j < vector_num; j++) {
3817 		for (i = 1; i < hdev->num_msi; i++) {
3818 			if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3819 				vector->vector = pci_irq_vector(hdev->pdev, i);
3820 				vector->io_addr = hdev->hw.io_base +
3821 					HCLGE_VECTOR_REG_BASE +
3822 					(i - 1) * HCLGE_VECTOR_REG_OFFSET +
3823 					vport->vport_id *
3824 					HCLGE_VECTOR_VF_OFFSET;
3825 				hdev->vector_status[i] = vport->vport_id;
3826 				hdev->vector_irq[i] = vector->vector;
3827 
3828 				vector++;
3829 				alloc++;
3830 
3831 				break;
3832 			}
3833 		}
3834 	}
3835 	hdev->num_msi_left -= alloc;
3836 	hdev->num_msi_used += alloc;
3837 
3838 	return alloc;
3839 }
3840 
3841 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3842 {
3843 	int i;
3844 
3845 	for (i = 0; i < hdev->num_msi; i++)
3846 		if (vector == hdev->vector_irq[i])
3847 			return i;
3848 
3849 	return -EINVAL;
3850 }
3851 
3852 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3853 {
3854 	struct hclge_vport *vport = hclge_get_vport(handle);
3855 	struct hclge_dev *hdev = vport->back;
3856 	int vector_id;
3857 
3858 	vector_id = hclge_get_vector_index(hdev, vector);
3859 	if (vector_id < 0) {
3860 		dev_err(&hdev->pdev->dev,
3861 			"Get vector index fail. vector_id =%d\n", vector_id);
3862 		return vector_id;
3863 	}
3864 
3865 	hclge_free_vector(hdev, vector_id);
3866 
3867 	return 0;
3868 }
3869 
3870 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3871 {
3872 	return HCLGE_RSS_KEY_SIZE;
3873 }
3874 
3875 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3876 {
3877 	return HCLGE_RSS_IND_TBL_SIZE;
3878 }
3879 
3880 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3881 				  const u8 hfunc, const u8 *key)
3882 {
3883 	struct hclge_rss_config_cmd *req;
3884 	unsigned int key_offset = 0;
3885 	struct hclge_desc desc;
3886 	int key_counts;
3887 	int key_size;
3888 	int ret;
3889 
3890 	key_counts = HCLGE_RSS_KEY_SIZE;
3891 	req = (struct hclge_rss_config_cmd *)desc.data;
3892 
3893 	while (key_counts) {
3894 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3895 					   false);
3896 
3897 		req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3898 		req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3899 
3900 		key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
3901 		memcpy(req->hash_key,
3902 		       key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3903 
3904 		key_counts -= key_size;
3905 		key_offset++;
3906 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3907 		if (ret) {
3908 			dev_err(&hdev->pdev->dev,
3909 				"Configure RSS config fail, status = %d\n",
3910 				ret);
3911 			return ret;
3912 		}
3913 	}
3914 	return 0;
3915 }
3916 
3917 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3918 {
3919 	struct hclge_rss_indirection_table_cmd *req;
3920 	struct hclge_desc desc;
3921 	int i, j;
3922 	int ret;
3923 
3924 	req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3925 
3926 	for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3927 		hclge_cmd_setup_basic_desc
3928 			(&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3929 
3930 		req->start_table_index =
3931 			cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3932 		req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3933 
3934 		for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3935 			req->rss_result[j] =
3936 				indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3937 
3938 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3939 		if (ret) {
3940 			dev_err(&hdev->pdev->dev,
3941 				"Configure rss indir table fail,status = %d\n",
3942 				ret);
3943 			return ret;
3944 		}
3945 	}
3946 	return 0;
3947 }
3948 
3949 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3950 				 u16 *tc_size, u16 *tc_offset)
3951 {
3952 	struct hclge_rss_tc_mode_cmd *req;
3953 	struct hclge_desc desc;
3954 	int ret;
3955 	int i;
3956 
3957 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3958 	req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3959 
3960 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3961 		u16 mode = 0;
3962 
3963 		hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3964 		hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3965 				HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3966 		hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3967 				HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3968 
3969 		req->rss_tc_mode[i] = cpu_to_le16(mode);
3970 	}
3971 
3972 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3973 	if (ret)
3974 		dev_err(&hdev->pdev->dev,
3975 			"Configure rss tc mode fail, status = %d\n", ret);
3976 
3977 	return ret;
3978 }
3979 
3980 static void hclge_get_rss_type(struct hclge_vport *vport)
3981 {
3982 	if (vport->rss_tuple_sets.ipv4_tcp_en ||
3983 	    vport->rss_tuple_sets.ipv4_udp_en ||
3984 	    vport->rss_tuple_sets.ipv4_sctp_en ||
3985 	    vport->rss_tuple_sets.ipv6_tcp_en ||
3986 	    vport->rss_tuple_sets.ipv6_udp_en ||
3987 	    vport->rss_tuple_sets.ipv6_sctp_en)
3988 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
3989 	else if (vport->rss_tuple_sets.ipv4_fragment_en ||
3990 		 vport->rss_tuple_sets.ipv6_fragment_en)
3991 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
3992 	else
3993 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
3994 }
3995 
3996 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3997 {
3998 	struct hclge_rss_input_tuple_cmd *req;
3999 	struct hclge_desc desc;
4000 	int ret;
4001 
4002 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4003 
4004 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4005 
4006 	/* Get the tuple cfg from pf */
4007 	req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4008 	req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4009 	req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4010 	req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4011 	req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4012 	req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4013 	req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4014 	req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4015 	hclge_get_rss_type(&hdev->vport[0]);
4016 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4017 	if (ret)
4018 		dev_err(&hdev->pdev->dev,
4019 			"Configure rss input fail, status = %d\n", ret);
4020 	return ret;
4021 }
4022 
4023 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4024 			 u8 *key, u8 *hfunc)
4025 {
4026 	struct hclge_vport *vport = hclge_get_vport(handle);
4027 	int i;
4028 
4029 	/* Get hash algorithm */
4030 	if (hfunc) {
4031 		switch (vport->rss_algo) {
4032 		case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4033 			*hfunc = ETH_RSS_HASH_TOP;
4034 			break;
4035 		case HCLGE_RSS_HASH_ALGO_SIMPLE:
4036 			*hfunc = ETH_RSS_HASH_XOR;
4037 			break;
4038 		default:
4039 			*hfunc = ETH_RSS_HASH_UNKNOWN;
4040 			break;
4041 		}
4042 	}
4043 
4044 	/* Get the RSS Key required by the user */
4045 	if (key)
4046 		memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4047 
4048 	/* Get indirect table */
4049 	if (indir)
4050 		for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4051 			indir[i] =  vport->rss_indirection_tbl[i];
4052 
4053 	return 0;
4054 }
4055 
4056 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4057 			 const  u8 *key, const  u8 hfunc)
4058 {
4059 	struct hclge_vport *vport = hclge_get_vport(handle);
4060 	struct hclge_dev *hdev = vport->back;
4061 	u8 hash_algo;
4062 	int ret, i;
4063 
4064 	/* Set the RSS Hash Key if specififed by the user */
4065 	if (key) {
4066 		switch (hfunc) {
4067 		case ETH_RSS_HASH_TOP:
4068 			hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4069 			break;
4070 		case ETH_RSS_HASH_XOR:
4071 			hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4072 			break;
4073 		case ETH_RSS_HASH_NO_CHANGE:
4074 			hash_algo = vport->rss_algo;
4075 			break;
4076 		default:
4077 			return -EINVAL;
4078 		}
4079 
4080 		ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4081 		if (ret)
4082 			return ret;
4083 
4084 		/* Update the shadow RSS key with user specified qids */
4085 		memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4086 		vport->rss_algo = hash_algo;
4087 	}
4088 
4089 	/* Update the shadow RSS table with user specified qids */
4090 	for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4091 		vport->rss_indirection_tbl[i] = indir[i];
4092 
4093 	/* Update the hardware */
4094 	return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4095 }
4096 
4097 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4098 {
4099 	u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4100 
4101 	if (nfc->data & RXH_L4_B_2_3)
4102 		hash_sets |= HCLGE_D_PORT_BIT;
4103 	else
4104 		hash_sets &= ~HCLGE_D_PORT_BIT;
4105 
4106 	if (nfc->data & RXH_IP_SRC)
4107 		hash_sets |= HCLGE_S_IP_BIT;
4108 	else
4109 		hash_sets &= ~HCLGE_S_IP_BIT;
4110 
4111 	if (nfc->data & RXH_IP_DST)
4112 		hash_sets |= HCLGE_D_IP_BIT;
4113 	else
4114 		hash_sets &= ~HCLGE_D_IP_BIT;
4115 
4116 	if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4117 		hash_sets |= HCLGE_V_TAG_BIT;
4118 
4119 	return hash_sets;
4120 }
4121 
4122 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4123 			       struct ethtool_rxnfc *nfc)
4124 {
4125 	struct hclge_vport *vport = hclge_get_vport(handle);
4126 	struct hclge_dev *hdev = vport->back;
4127 	struct hclge_rss_input_tuple_cmd *req;
4128 	struct hclge_desc desc;
4129 	u8 tuple_sets;
4130 	int ret;
4131 
4132 	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4133 			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
4134 		return -EINVAL;
4135 
4136 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4137 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4138 
4139 	req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4140 	req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4141 	req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4142 	req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4143 	req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4144 	req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4145 	req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4146 	req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4147 
4148 	tuple_sets = hclge_get_rss_hash_bits(nfc);
4149 	switch (nfc->flow_type) {
4150 	case TCP_V4_FLOW:
4151 		req->ipv4_tcp_en = tuple_sets;
4152 		break;
4153 	case TCP_V6_FLOW:
4154 		req->ipv6_tcp_en = tuple_sets;
4155 		break;
4156 	case UDP_V4_FLOW:
4157 		req->ipv4_udp_en = tuple_sets;
4158 		break;
4159 	case UDP_V6_FLOW:
4160 		req->ipv6_udp_en = tuple_sets;
4161 		break;
4162 	case SCTP_V4_FLOW:
4163 		req->ipv4_sctp_en = tuple_sets;
4164 		break;
4165 	case SCTP_V6_FLOW:
4166 		if ((nfc->data & RXH_L4_B_0_1) ||
4167 		    (nfc->data & RXH_L4_B_2_3))
4168 			return -EINVAL;
4169 
4170 		req->ipv6_sctp_en = tuple_sets;
4171 		break;
4172 	case IPV4_FLOW:
4173 		req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4174 		break;
4175 	case IPV6_FLOW:
4176 		req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4177 		break;
4178 	default:
4179 		return -EINVAL;
4180 	}
4181 
4182 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4183 	if (ret) {
4184 		dev_err(&hdev->pdev->dev,
4185 			"Set rss tuple fail, status = %d\n", ret);
4186 		return ret;
4187 	}
4188 
4189 	vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4190 	vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4191 	vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4192 	vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4193 	vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4194 	vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4195 	vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4196 	vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4197 	hclge_get_rss_type(vport);
4198 	return 0;
4199 }
4200 
4201 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4202 			       struct ethtool_rxnfc *nfc)
4203 {
4204 	struct hclge_vport *vport = hclge_get_vport(handle);
4205 	u8 tuple_sets;
4206 
4207 	nfc->data = 0;
4208 
4209 	switch (nfc->flow_type) {
4210 	case TCP_V4_FLOW:
4211 		tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4212 		break;
4213 	case UDP_V4_FLOW:
4214 		tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4215 		break;
4216 	case TCP_V6_FLOW:
4217 		tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4218 		break;
4219 	case UDP_V6_FLOW:
4220 		tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4221 		break;
4222 	case SCTP_V4_FLOW:
4223 		tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4224 		break;
4225 	case SCTP_V6_FLOW:
4226 		tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4227 		break;
4228 	case IPV4_FLOW:
4229 	case IPV6_FLOW:
4230 		tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4231 		break;
4232 	default:
4233 		return -EINVAL;
4234 	}
4235 
4236 	if (!tuple_sets)
4237 		return 0;
4238 
4239 	if (tuple_sets & HCLGE_D_PORT_BIT)
4240 		nfc->data |= RXH_L4_B_2_3;
4241 	if (tuple_sets & HCLGE_S_PORT_BIT)
4242 		nfc->data |= RXH_L4_B_0_1;
4243 	if (tuple_sets & HCLGE_D_IP_BIT)
4244 		nfc->data |= RXH_IP_DST;
4245 	if (tuple_sets & HCLGE_S_IP_BIT)
4246 		nfc->data |= RXH_IP_SRC;
4247 
4248 	return 0;
4249 }
4250 
4251 static int hclge_get_tc_size(struct hnae3_handle *handle)
4252 {
4253 	struct hclge_vport *vport = hclge_get_vport(handle);
4254 	struct hclge_dev *hdev = vport->back;
4255 
4256 	return hdev->rss_size_max;
4257 }
4258 
4259 int hclge_rss_init_hw(struct hclge_dev *hdev)
4260 {
4261 	struct hclge_vport *vport = hdev->vport;
4262 	u8 *rss_indir = vport[0].rss_indirection_tbl;
4263 	u16 rss_size = vport[0].alloc_rss_size;
4264 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4265 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4266 	u8 *key = vport[0].rss_hash_key;
4267 	u8 hfunc = vport[0].rss_algo;
4268 	u16 tc_valid[HCLGE_MAX_TC_NUM];
4269 	u16 roundup_size;
4270 	unsigned int i;
4271 	int ret;
4272 
4273 	ret = hclge_set_rss_indir_table(hdev, rss_indir);
4274 	if (ret)
4275 		return ret;
4276 
4277 	ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4278 	if (ret)
4279 		return ret;
4280 
4281 	ret = hclge_set_rss_input_tuple(hdev);
4282 	if (ret)
4283 		return ret;
4284 
4285 	/* Each TC have the same queue size, and tc_size set to hardware is
4286 	 * the log2 of roundup power of two of rss_size, the acutal queue
4287 	 * size is limited by indirection table.
4288 	 */
4289 	if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4290 		dev_err(&hdev->pdev->dev,
4291 			"Configure rss tc size failed, invalid TC_SIZE = %d\n",
4292 			rss_size);
4293 		return -EINVAL;
4294 	}
4295 
4296 	roundup_size = roundup_pow_of_two(rss_size);
4297 	roundup_size = ilog2(roundup_size);
4298 
4299 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4300 		tc_valid[i] = 0;
4301 
4302 		if (!(hdev->hw_tc_map & BIT(i)))
4303 			continue;
4304 
4305 		tc_valid[i] = 1;
4306 		tc_size[i] = roundup_size;
4307 		tc_offset[i] = rss_size * i;
4308 	}
4309 
4310 	return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4311 }
4312 
4313 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4314 {
4315 	struct hclge_vport *vport = hdev->vport;
4316 	int i, j;
4317 
4318 	for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4319 		for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4320 			vport[j].rss_indirection_tbl[i] =
4321 				i % vport[j].alloc_rss_size;
4322 	}
4323 }
4324 
4325 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4326 {
4327 	int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4328 	struct hclge_vport *vport = hdev->vport;
4329 
4330 	if (hdev->pdev->revision >= 0x21)
4331 		rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4332 
4333 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4334 		vport[i].rss_tuple_sets.ipv4_tcp_en =
4335 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4336 		vport[i].rss_tuple_sets.ipv4_udp_en =
4337 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4338 		vport[i].rss_tuple_sets.ipv4_sctp_en =
4339 			HCLGE_RSS_INPUT_TUPLE_SCTP;
4340 		vport[i].rss_tuple_sets.ipv4_fragment_en =
4341 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4342 		vport[i].rss_tuple_sets.ipv6_tcp_en =
4343 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4344 		vport[i].rss_tuple_sets.ipv6_udp_en =
4345 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4346 		vport[i].rss_tuple_sets.ipv6_sctp_en =
4347 			HCLGE_RSS_INPUT_TUPLE_SCTP;
4348 		vport[i].rss_tuple_sets.ipv6_fragment_en =
4349 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4350 
4351 		vport[i].rss_algo = rss_algo;
4352 
4353 		memcpy(vport[i].rss_hash_key, hclge_hash_key,
4354 		       HCLGE_RSS_KEY_SIZE);
4355 	}
4356 
4357 	hclge_rss_indir_init_cfg(hdev);
4358 }
4359 
4360 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4361 				int vector_id, bool en,
4362 				struct hnae3_ring_chain_node *ring_chain)
4363 {
4364 	struct hclge_dev *hdev = vport->back;
4365 	struct hnae3_ring_chain_node *node;
4366 	struct hclge_desc desc;
4367 	struct hclge_ctrl_vector_chain_cmd *req
4368 		= (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4369 	enum hclge_cmd_status status;
4370 	enum hclge_opcode_type op;
4371 	u16 tqp_type_and_id;
4372 	int i;
4373 
4374 	op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4375 	hclge_cmd_setup_basic_desc(&desc, op, false);
4376 	req->int_vector_id = vector_id;
4377 
4378 	i = 0;
4379 	for (node = ring_chain; node; node = node->next) {
4380 		tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4381 		hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
4382 				HCLGE_INT_TYPE_S,
4383 				hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4384 		hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4385 				HCLGE_TQP_ID_S, node->tqp_index);
4386 		hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4387 				HCLGE_INT_GL_IDX_S,
4388 				hnae3_get_field(node->int_gl_idx,
4389 						HNAE3_RING_GL_IDX_M,
4390 						HNAE3_RING_GL_IDX_S));
4391 		req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4392 		if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4393 			req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4394 			req->vfid = vport->vport_id;
4395 
4396 			status = hclge_cmd_send(&hdev->hw, &desc, 1);
4397 			if (status) {
4398 				dev_err(&hdev->pdev->dev,
4399 					"Map TQP fail, status is %d.\n",
4400 					status);
4401 				return -EIO;
4402 			}
4403 			i = 0;
4404 
4405 			hclge_cmd_setup_basic_desc(&desc,
4406 						   op,
4407 						   false);
4408 			req->int_vector_id = vector_id;
4409 		}
4410 	}
4411 
4412 	if (i > 0) {
4413 		req->int_cause_num = i;
4414 		req->vfid = vport->vport_id;
4415 		status = hclge_cmd_send(&hdev->hw, &desc, 1);
4416 		if (status) {
4417 			dev_err(&hdev->pdev->dev,
4418 				"Map TQP fail, status is %d.\n", status);
4419 			return -EIO;
4420 		}
4421 	}
4422 
4423 	return 0;
4424 }
4425 
4426 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4427 				    struct hnae3_ring_chain_node *ring_chain)
4428 {
4429 	struct hclge_vport *vport = hclge_get_vport(handle);
4430 	struct hclge_dev *hdev = vport->back;
4431 	int vector_id;
4432 
4433 	vector_id = hclge_get_vector_index(hdev, vector);
4434 	if (vector_id < 0) {
4435 		dev_err(&hdev->pdev->dev,
4436 			"Get vector index fail. vector_id =%d\n", vector_id);
4437 		return vector_id;
4438 	}
4439 
4440 	return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4441 }
4442 
4443 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4444 				       struct hnae3_ring_chain_node *ring_chain)
4445 {
4446 	struct hclge_vport *vport = hclge_get_vport(handle);
4447 	struct hclge_dev *hdev = vport->back;
4448 	int vector_id, ret;
4449 
4450 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4451 		return 0;
4452 
4453 	vector_id = hclge_get_vector_index(hdev, vector);
4454 	if (vector_id < 0) {
4455 		dev_err(&handle->pdev->dev,
4456 			"Get vector index fail. ret =%d\n", vector_id);
4457 		return vector_id;
4458 	}
4459 
4460 	ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4461 	if (ret)
4462 		dev_err(&handle->pdev->dev,
4463 			"Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4464 			vector_id, ret);
4465 
4466 	return ret;
4467 }
4468 
4469 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4470 			       struct hclge_promisc_param *param)
4471 {
4472 	struct hclge_promisc_cfg_cmd *req;
4473 	struct hclge_desc desc;
4474 	int ret;
4475 
4476 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4477 
4478 	req = (struct hclge_promisc_cfg_cmd *)desc.data;
4479 	req->vf_id = param->vf_id;
4480 
4481 	/* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4482 	 * pdev revision(0x20), new revision support them. The
4483 	 * value of this two fields will not return error when driver
4484 	 * send command to fireware in revision(0x20).
4485 	 */
4486 	req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4487 		HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4488 
4489 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4490 	if (ret)
4491 		dev_err(&hdev->pdev->dev,
4492 			"Set promisc mode fail, status is %d.\n", ret);
4493 
4494 	return ret;
4495 }
4496 
4497 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
4498 			      bool en_mc, bool en_bc, int vport_id)
4499 {
4500 	if (!param)
4501 		return;
4502 
4503 	memset(param, 0, sizeof(struct hclge_promisc_param));
4504 	if (en_uc)
4505 		param->enable = HCLGE_PROMISC_EN_UC;
4506 	if (en_mc)
4507 		param->enable |= HCLGE_PROMISC_EN_MC;
4508 	if (en_bc)
4509 		param->enable |= HCLGE_PROMISC_EN_BC;
4510 	param->vf_id = vport_id;
4511 }
4512 
4513 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4514 				  bool en_mc_pmc)
4515 {
4516 	struct hclge_vport *vport = hclge_get_vport(handle);
4517 	struct hclge_dev *hdev = vport->back;
4518 	struct hclge_promisc_param param;
4519 	bool en_bc_pmc = true;
4520 
4521 	/* For revision 0x20, if broadcast promisc enabled, vlan filter is
4522 	 * always bypassed. So broadcast promisc should be disabled until
4523 	 * user enable promisc mode
4524 	 */
4525 	if (handle->pdev->revision == 0x20)
4526 		en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4527 
4528 	hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4529 				 vport->vport_id);
4530 	return hclge_cmd_set_promisc_mode(hdev, &param);
4531 }
4532 
4533 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4534 {
4535 	struct hclge_get_fd_mode_cmd *req;
4536 	struct hclge_desc desc;
4537 	int ret;
4538 
4539 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4540 
4541 	req = (struct hclge_get_fd_mode_cmd *)desc.data;
4542 
4543 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4544 	if (ret) {
4545 		dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4546 		return ret;
4547 	}
4548 
4549 	*fd_mode = req->mode;
4550 
4551 	return ret;
4552 }
4553 
4554 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4555 				   u32 *stage1_entry_num,
4556 				   u32 *stage2_entry_num,
4557 				   u16 *stage1_counter_num,
4558 				   u16 *stage2_counter_num)
4559 {
4560 	struct hclge_get_fd_allocation_cmd *req;
4561 	struct hclge_desc desc;
4562 	int ret;
4563 
4564 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4565 
4566 	req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4567 
4568 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4569 	if (ret) {
4570 		dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4571 			ret);
4572 		return ret;
4573 	}
4574 
4575 	*stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4576 	*stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4577 	*stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4578 	*stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4579 
4580 	return ret;
4581 }
4582 
4583 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4584 {
4585 	struct hclge_set_fd_key_config_cmd *req;
4586 	struct hclge_fd_key_cfg *stage;
4587 	struct hclge_desc desc;
4588 	int ret;
4589 
4590 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4591 
4592 	req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4593 	stage = &hdev->fd_cfg.key_cfg[stage_num];
4594 	req->stage = stage_num;
4595 	req->key_select = stage->key_sel;
4596 	req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4597 	req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4598 	req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4599 	req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4600 	req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4601 	req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4602 
4603 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4604 	if (ret)
4605 		dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4606 
4607 	return ret;
4608 }
4609 
4610 static int hclge_init_fd_config(struct hclge_dev *hdev)
4611 {
4612 #define LOW_2_WORDS		0x03
4613 	struct hclge_fd_key_cfg *key_cfg;
4614 	int ret;
4615 
4616 	if (!hnae3_dev_fd_supported(hdev))
4617 		return 0;
4618 
4619 	ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4620 	if (ret)
4621 		return ret;
4622 
4623 	switch (hdev->fd_cfg.fd_mode) {
4624 	case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4625 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4626 		break;
4627 	case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4628 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4629 		break;
4630 	default:
4631 		dev_err(&hdev->pdev->dev,
4632 			"Unsupported flow director mode %d\n",
4633 			hdev->fd_cfg.fd_mode);
4634 		return -EOPNOTSUPP;
4635 	}
4636 
4637 	hdev->fd_cfg.proto_support =
4638 		TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4639 		UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4640 	key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4641 	key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4642 	key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4643 	key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4644 	key_cfg->outer_sipv6_word_en = 0;
4645 	key_cfg->outer_dipv6_word_en = 0;
4646 
4647 	key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4648 				BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4649 				BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4650 				BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4651 
4652 	/* If use max 400bit key, we can support tuples for ether type */
4653 	if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4654 		hdev->fd_cfg.proto_support |= ETHER_FLOW;
4655 		key_cfg->tuple_active |=
4656 				BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4657 	}
4658 
4659 	/* roce_type is used to filter roce frames
4660 	 * dst_vport is used to specify the rule
4661 	 */
4662 	key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4663 
4664 	ret = hclge_get_fd_allocation(hdev,
4665 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4666 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4667 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4668 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4669 	if (ret)
4670 		return ret;
4671 
4672 	return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4673 }
4674 
4675 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4676 				int loc, u8 *key, bool is_add)
4677 {
4678 	struct hclge_fd_tcam_config_1_cmd *req1;
4679 	struct hclge_fd_tcam_config_2_cmd *req2;
4680 	struct hclge_fd_tcam_config_3_cmd *req3;
4681 	struct hclge_desc desc[3];
4682 	int ret;
4683 
4684 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4685 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4686 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4687 	desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4688 	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4689 
4690 	req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4691 	req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4692 	req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4693 
4694 	req1->stage = stage;
4695 	req1->xy_sel = sel_x ? 1 : 0;
4696 	hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4697 	req1->index = cpu_to_le32(loc);
4698 	req1->entry_vld = sel_x ? is_add : 0;
4699 
4700 	if (key) {
4701 		memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4702 		memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4703 		       sizeof(req2->tcam_data));
4704 		memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4705 		       sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4706 	}
4707 
4708 	ret = hclge_cmd_send(&hdev->hw, desc, 3);
4709 	if (ret)
4710 		dev_err(&hdev->pdev->dev,
4711 			"config tcam key fail, ret=%d\n",
4712 			ret);
4713 
4714 	return ret;
4715 }
4716 
4717 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4718 			      struct hclge_fd_ad_data *action)
4719 {
4720 	struct hclge_fd_ad_config_cmd *req;
4721 	struct hclge_desc desc;
4722 	u64 ad_data = 0;
4723 	int ret;
4724 
4725 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4726 
4727 	req = (struct hclge_fd_ad_config_cmd *)desc.data;
4728 	req->index = cpu_to_le32(loc);
4729 	req->stage = stage;
4730 
4731 	hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4732 		      action->write_rule_id_to_bd);
4733 	hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4734 			action->rule_id);
4735 	ad_data <<= 32;
4736 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4737 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4738 		      action->forward_to_direct_queue);
4739 	hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4740 			action->queue_id);
4741 	hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4742 	hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4743 			HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4744 	hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4745 	hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4746 			action->counter_id);
4747 
4748 	req->ad_data = cpu_to_le64(ad_data);
4749 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4750 	if (ret)
4751 		dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4752 
4753 	return ret;
4754 }
4755 
4756 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4757 				   struct hclge_fd_rule *rule)
4758 {
4759 	u16 tmp_x_s, tmp_y_s;
4760 	u32 tmp_x_l, tmp_y_l;
4761 	int i;
4762 
4763 	if (rule->unused_tuple & tuple_bit)
4764 		return true;
4765 
4766 	switch (tuple_bit) {
4767 	case 0:
4768 		return false;
4769 	case BIT(INNER_DST_MAC):
4770 		for (i = 0; i < ETH_ALEN; i++) {
4771 			calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
4772 			       rule->tuples_mask.dst_mac[i]);
4773 			calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
4774 			       rule->tuples_mask.dst_mac[i]);
4775 		}
4776 
4777 		return true;
4778 	case BIT(INNER_SRC_MAC):
4779 		for (i = 0; i < ETH_ALEN; i++) {
4780 			calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
4781 			       rule->tuples.src_mac[i]);
4782 			calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
4783 			       rule->tuples.src_mac[i]);
4784 		}
4785 
4786 		return true;
4787 	case BIT(INNER_VLAN_TAG_FST):
4788 		calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4789 		       rule->tuples_mask.vlan_tag1);
4790 		calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4791 		       rule->tuples_mask.vlan_tag1);
4792 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4793 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4794 
4795 		return true;
4796 	case BIT(INNER_ETH_TYPE):
4797 		calc_x(tmp_x_s, rule->tuples.ether_proto,
4798 		       rule->tuples_mask.ether_proto);
4799 		calc_y(tmp_y_s, rule->tuples.ether_proto,
4800 		       rule->tuples_mask.ether_proto);
4801 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4802 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4803 
4804 		return true;
4805 	case BIT(INNER_IP_TOS):
4806 		calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4807 		calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4808 
4809 		return true;
4810 	case BIT(INNER_IP_PROTO):
4811 		calc_x(*key_x, rule->tuples.ip_proto,
4812 		       rule->tuples_mask.ip_proto);
4813 		calc_y(*key_y, rule->tuples.ip_proto,
4814 		       rule->tuples_mask.ip_proto);
4815 
4816 		return true;
4817 	case BIT(INNER_SRC_IP):
4818 		calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
4819 		       rule->tuples_mask.src_ip[IPV4_INDEX]);
4820 		calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
4821 		       rule->tuples_mask.src_ip[IPV4_INDEX]);
4822 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4823 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4824 
4825 		return true;
4826 	case BIT(INNER_DST_IP):
4827 		calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
4828 		       rule->tuples_mask.dst_ip[IPV4_INDEX]);
4829 		calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
4830 		       rule->tuples_mask.dst_ip[IPV4_INDEX]);
4831 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4832 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4833 
4834 		return true;
4835 	case BIT(INNER_SRC_PORT):
4836 		calc_x(tmp_x_s, rule->tuples.src_port,
4837 		       rule->tuples_mask.src_port);
4838 		calc_y(tmp_y_s, rule->tuples.src_port,
4839 		       rule->tuples_mask.src_port);
4840 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4841 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4842 
4843 		return true;
4844 	case BIT(INNER_DST_PORT):
4845 		calc_x(tmp_x_s, rule->tuples.dst_port,
4846 		       rule->tuples_mask.dst_port);
4847 		calc_y(tmp_y_s, rule->tuples.dst_port,
4848 		       rule->tuples_mask.dst_port);
4849 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4850 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4851 
4852 		return true;
4853 	default:
4854 		return false;
4855 	}
4856 }
4857 
4858 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4859 				 u8 vf_id, u8 network_port_id)
4860 {
4861 	u32 port_number = 0;
4862 
4863 	if (port_type == HOST_PORT) {
4864 		hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4865 				pf_id);
4866 		hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4867 				vf_id);
4868 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4869 	} else {
4870 		hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4871 				HCLGE_NETWORK_PORT_ID_S, network_port_id);
4872 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4873 	}
4874 
4875 	return port_number;
4876 }
4877 
4878 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4879 				       __le32 *key_x, __le32 *key_y,
4880 				       struct hclge_fd_rule *rule)
4881 {
4882 	u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4883 	u8 cur_pos = 0, tuple_size, shift_bits;
4884 	unsigned int i;
4885 
4886 	for (i = 0; i < MAX_META_DATA; i++) {
4887 		tuple_size = meta_data_key_info[i].key_length;
4888 		tuple_bit = key_cfg->meta_data_active & BIT(i);
4889 
4890 		switch (tuple_bit) {
4891 		case BIT(ROCE_TYPE):
4892 			hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4893 			cur_pos += tuple_size;
4894 			break;
4895 		case BIT(DST_VPORT):
4896 			port_number = hclge_get_port_number(HOST_PORT, 0,
4897 							    rule->vf_id, 0);
4898 			hnae3_set_field(meta_data,
4899 					GENMASK(cur_pos + tuple_size, cur_pos),
4900 					cur_pos, port_number);
4901 			cur_pos += tuple_size;
4902 			break;
4903 		default:
4904 			break;
4905 		}
4906 	}
4907 
4908 	calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4909 	calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4910 	shift_bits = sizeof(meta_data) * 8 - cur_pos;
4911 
4912 	*key_x = cpu_to_le32(tmp_x << shift_bits);
4913 	*key_y = cpu_to_le32(tmp_y << shift_bits);
4914 }
4915 
4916 /* A complete key is combined with meta data key and tuple key.
4917  * Meta data key is stored at the MSB region, and tuple key is stored at
4918  * the LSB region, unused bits will be filled 0.
4919  */
4920 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
4921 			    struct hclge_fd_rule *rule)
4922 {
4923 	struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
4924 	u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
4925 	u8 *cur_key_x, *cur_key_y;
4926 	unsigned int i;
4927 	int ret, tuple_size;
4928 	u8 meta_data_region;
4929 
4930 	memset(key_x, 0, sizeof(key_x));
4931 	memset(key_y, 0, sizeof(key_y));
4932 	cur_key_x = key_x;
4933 	cur_key_y = key_y;
4934 
4935 	for (i = 0 ; i < MAX_TUPLE; i++) {
4936 		bool tuple_valid;
4937 		u32 check_tuple;
4938 
4939 		tuple_size = tuple_key_info[i].key_length / 8;
4940 		check_tuple = key_cfg->tuple_active & BIT(i);
4941 
4942 		tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
4943 						     cur_key_y, rule);
4944 		if (tuple_valid) {
4945 			cur_key_x += tuple_size;
4946 			cur_key_y += tuple_size;
4947 		}
4948 	}
4949 
4950 	meta_data_region = hdev->fd_cfg.max_key_length / 8 -
4951 			MAX_META_DATA_LENGTH / 8;
4952 
4953 	hclge_fd_convert_meta_data(key_cfg,
4954 				   (__le32 *)(key_x + meta_data_region),
4955 				   (__le32 *)(key_y + meta_data_region),
4956 				   rule);
4957 
4958 	ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
4959 				   true);
4960 	if (ret) {
4961 		dev_err(&hdev->pdev->dev,
4962 			"fd key_y config fail, loc=%d, ret=%d\n",
4963 			rule->queue_id, ret);
4964 		return ret;
4965 	}
4966 
4967 	ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
4968 				   true);
4969 	if (ret)
4970 		dev_err(&hdev->pdev->dev,
4971 			"fd key_x config fail, loc=%d, ret=%d\n",
4972 			rule->queue_id, ret);
4973 	return ret;
4974 }
4975 
4976 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
4977 			       struct hclge_fd_rule *rule)
4978 {
4979 	struct hclge_fd_ad_data ad_data;
4980 
4981 	ad_data.ad_id = rule->location;
4982 
4983 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4984 		ad_data.drop_packet = true;
4985 		ad_data.forward_to_direct_queue = false;
4986 		ad_data.queue_id = 0;
4987 	} else {
4988 		ad_data.drop_packet = false;
4989 		ad_data.forward_to_direct_queue = true;
4990 		ad_data.queue_id = rule->queue_id;
4991 	}
4992 
4993 	ad_data.use_counter = false;
4994 	ad_data.counter_id = 0;
4995 
4996 	ad_data.use_next_stage = false;
4997 	ad_data.next_input_key = 0;
4998 
4999 	ad_data.write_rule_id_to_bd = true;
5000 	ad_data.rule_id = rule->location;
5001 
5002 	return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5003 }
5004 
5005 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5006 			       struct ethtool_rx_flow_spec *fs, u32 *unused)
5007 {
5008 	struct ethtool_tcpip4_spec *tcp_ip4_spec;
5009 	struct ethtool_usrip4_spec *usr_ip4_spec;
5010 	struct ethtool_tcpip6_spec *tcp_ip6_spec;
5011 	struct ethtool_usrip6_spec *usr_ip6_spec;
5012 	struct ethhdr *ether_spec;
5013 
5014 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5015 		return -EINVAL;
5016 
5017 	if (!(fs->flow_type & hdev->fd_cfg.proto_support))
5018 		return -EOPNOTSUPP;
5019 
5020 	if ((fs->flow_type & FLOW_EXT) &&
5021 	    (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5022 		dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5023 		return -EOPNOTSUPP;
5024 	}
5025 
5026 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5027 	case SCTP_V4_FLOW:
5028 	case TCP_V4_FLOW:
5029 	case UDP_V4_FLOW:
5030 		tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
5031 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5032 
5033 		if (!tcp_ip4_spec->ip4src)
5034 			*unused |= BIT(INNER_SRC_IP);
5035 
5036 		if (!tcp_ip4_spec->ip4dst)
5037 			*unused |= BIT(INNER_DST_IP);
5038 
5039 		if (!tcp_ip4_spec->psrc)
5040 			*unused |= BIT(INNER_SRC_PORT);
5041 
5042 		if (!tcp_ip4_spec->pdst)
5043 			*unused |= BIT(INNER_DST_PORT);
5044 
5045 		if (!tcp_ip4_spec->tos)
5046 			*unused |= BIT(INNER_IP_TOS);
5047 
5048 		break;
5049 	case IP_USER_FLOW:
5050 		usr_ip4_spec = &fs->h_u.usr_ip4_spec;
5051 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5052 			BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5053 
5054 		if (!usr_ip4_spec->ip4src)
5055 			*unused |= BIT(INNER_SRC_IP);
5056 
5057 		if (!usr_ip4_spec->ip4dst)
5058 			*unused |= BIT(INNER_DST_IP);
5059 
5060 		if (!usr_ip4_spec->tos)
5061 			*unused |= BIT(INNER_IP_TOS);
5062 
5063 		if (!usr_ip4_spec->proto)
5064 			*unused |= BIT(INNER_IP_PROTO);
5065 
5066 		if (usr_ip4_spec->l4_4_bytes)
5067 			return -EOPNOTSUPP;
5068 
5069 		if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
5070 			return -EOPNOTSUPP;
5071 
5072 		break;
5073 	case SCTP_V6_FLOW:
5074 	case TCP_V6_FLOW:
5075 	case UDP_V6_FLOW:
5076 		tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
5077 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5078 			BIT(INNER_IP_TOS);
5079 
5080 		/* check whether src/dst ip address used */
5081 		if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
5082 		    !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
5083 			*unused |= BIT(INNER_SRC_IP);
5084 
5085 		if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
5086 		    !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
5087 			*unused |= BIT(INNER_DST_IP);
5088 
5089 		if (!tcp_ip6_spec->psrc)
5090 			*unused |= BIT(INNER_SRC_PORT);
5091 
5092 		if (!tcp_ip6_spec->pdst)
5093 			*unused |= BIT(INNER_DST_PORT);
5094 
5095 		if (tcp_ip6_spec->tclass)
5096 			return -EOPNOTSUPP;
5097 
5098 		break;
5099 	case IPV6_USER_FLOW:
5100 		usr_ip6_spec = &fs->h_u.usr_ip6_spec;
5101 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5102 			BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
5103 			BIT(INNER_DST_PORT);
5104 
5105 		/* check whether src/dst ip address used */
5106 		if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
5107 		    !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
5108 			*unused |= BIT(INNER_SRC_IP);
5109 
5110 		if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
5111 		    !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
5112 			*unused |= BIT(INNER_DST_IP);
5113 
5114 		if (!usr_ip6_spec->l4_proto)
5115 			*unused |= BIT(INNER_IP_PROTO);
5116 
5117 		if (usr_ip6_spec->tclass)
5118 			return -EOPNOTSUPP;
5119 
5120 		if (usr_ip6_spec->l4_4_bytes)
5121 			return -EOPNOTSUPP;
5122 
5123 		break;
5124 	case ETHER_FLOW:
5125 		ether_spec = &fs->h_u.ether_spec;
5126 		*unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5127 			BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5128 			BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5129 
5130 		if (is_zero_ether_addr(ether_spec->h_source))
5131 			*unused |= BIT(INNER_SRC_MAC);
5132 
5133 		if (is_zero_ether_addr(ether_spec->h_dest))
5134 			*unused |= BIT(INNER_DST_MAC);
5135 
5136 		if (!ether_spec->h_proto)
5137 			*unused |= BIT(INNER_ETH_TYPE);
5138 
5139 		break;
5140 	default:
5141 		return -EOPNOTSUPP;
5142 	}
5143 
5144 	if ((fs->flow_type & FLOW_EXT)) {
5145 		if (fs->h_ext.vlan_etype)
5146 			return -EOPNOTSUPP;
5147 		if (!fs->h_ext.vlan_tci)
5148 			*unused |= BIT(INNER_VLAN_TAG_FST);
5149 
5150 		if (fs->m_ext.vlan_tci) {
5151 			if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
5152 				return -EINVAL;
5153 		}
5154 	} else {
5155 		*unused |= BIT(INNER_VLAN_TAG_FST);
5156 	}
5157 
5158 	if (fs->flow_type & FLOW_MAC_EXT) {
5159 		if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
5160 			return -EOPNOTSUPP;
5161 
5162 		if (is_zero_ether_addr(fs->h_ext.h_dest))
5163 			*unused |= BIT(INNER_DST_MAC);
5164 		else
5165 			*unused &= ~(BIT(INNER_DST_MAC));
5166 	}
5167 
5168 	return 0;
5169 }
5170 
5171 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5172 {
5173 	struct hclge_fd_rule *rule = NULL;
5174 	struct hlist_node *node2;
5175 
5176 	spin_lock_bh(&hdev->fd_rule_lock);
5177 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5178 		if (rule->location >= location)
5179 			break;
5180 	}
5181 
5182 	spin_unlock_bh(&hdev->fd_rule_lock);
5183 
5184 	return  rule && rule->location == location;
5185 }
5186 
5187 /* make sure being called after lock up with fd_rule_lock */
5188 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5189 				     struct hclge_fd_rule *new_rule,
5190 				     u16 location,
5191 				     bool is_add)
5192 {
5193 	struct hclge_fd_rule *rule = NULL, *parent = NULL;
5194 	struct hlist_node *node2;
5195 
5196 	if (is_add && !new_rule)
5197 		return -EINVAL;
5198 
5199 	hlist_for_each_entry_safe(rule, node2,
5200 				  &hdev->fd_rule_list, rule_node) {
5201 		if (rule->location >= location)
5202 			break;
5203 		parent = rule;
5204 	}
5205 
5206 	if (rule && rule->location == location) {
5207 		hlist_del(&rule->rule_node);
5208 		kfree(rule);
5209 		hdev->hclge_fd_rule_num--;
5210 
5211 		if (!is_add) {
5212 			if (!hdev->hclge_fd_rule_num)
5213 				hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5214 			clear_bit(location, hdev->fd_bmap);
5215 
5216 			return 0;
5217 		}
5218 	} else if (!is_add) {
5219 		dev_err(&hdev->pdev->dev,
5220 			"delete fail, rule %d is inexistent\n",
5221 			location);
5222 		return -EINVAL;
5223 	}
5224 
5225 	INIT_HLIST_NODE(&new_rule->rule_node);
5226 
5227 	if (parent)
5228 		hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5229 	else
5230 		hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5231 
5232 	set_bit(location, hdev->fd_bmap);
5233 	hdev->hclge_fd_rule_num++;
5234 	hdev->fd_active_type = new_rule->rule_type;
5235 
5236 	return 0;
5237 }
5238 
5239 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5240 			      struct ethtool_rx_flow_spec *fs,
5241 			      struct hclge_fd_rule *rule)
5242 {
5243 	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5244 
5245 	switch (flow_type) {
5246 	case SCTP_V4_FLOW:
5247 	case TCP_V4_FLOW:
5248 	case UDP_V4_FLOW:
5249 		rule->tuples.src_ip[IPV4_INDEX] =
5250 				be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5251 		rule->tuples_mask.src_ip[IPV4_INDEX] =
5252 				be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5253 
5254 		rule->tuples.dst_ip[IPV4_INDEX] =
5255 				be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5256 		rule->tuples_mask.dst_ip[IPV4_INDEX] =
5257 				be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5258 
5259 		rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5260 		rule->tuples_mask.src_port =
5261 				be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5262 
5263 		rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5264 		rule->tuples_mask.dst_port =
5265 				be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5266 
5267 		rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5268 		rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5269 
5270 		rule->tuples.ether_proto = ETH_P_IP;
5271 		rule->tuples_mask.ether_proto = 0xFFFF;
5272 
5273 		break;
5274 	case IP_USER_FLOW:
5275 		rule->tuples.src_ip[IPV4_INDEX] =
5276 				be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5277 		rule->tuples_mask.src_ip[IPV4_INDEX] =
5278 				be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5279 
5280 		rule->tuples.dst_ip[IPV4_INDEX] =
5281 				be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5282 		rule->tuples_mask.dst_ip[IPV4_INDEX] =
5283 				be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5284 
5285 		rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5286 		rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5287 
5288 		rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5289 		rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5290 
5291 		rule->tuples.ether_proto = ETH_P_IP;
5292 		rule->tuples_mask.ether_proto = 0xFFFF;
5293 
5294 		break;
5295 	case SCTP_V6_FLOW:
5296 	case TCP_V6_FLOW:
5297 	case UDP_V6_FLOW:
5298 		be32_to_cpu_array(rule->tuples.src_ip,
5299 				  fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5300 		be32_to_cpu_array(rule->tuples_mask.src_ip,
5301 				  fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5302 
5303 		be32_to_cpu_array(rule->tuples.dst_ip,
5304 				  fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5305 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
5306 				  fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5307 
5308 		rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5309 		rule->tuples_mask.src_port =
5310 				be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5311 
5312 		rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5313 		rule->tuples_mask.dst_port =
5314 				be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5315 
5316 		rule->tuples.ether_proto = ETH_P_IPV6;
5317 		rule->tuples_mask.ether_proto = 0xFFFF;
5318 
5319 		break;
5320 	case IPV6_USER_FLOW:
5321 		be32_to_cpu_array(rule->tuples.src_ip,
5322 				  fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5323 		be32_to_cpu_array(rule->tuples_mask.src_ip,
5324 				  fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5325 
5326 		be32_to_cpu_array(rule->tuples.dst_ip,
5327 				  fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5328 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
5329 				  fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5330 
5331 		rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5332 		rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5333 
5334 		rule->tuples.ether_proto = ETH_P_IPV6;
5335 		rule->tuples_mask.ether_proto = 0xFFFF;
5336 
5337 		break;
5338 	case ETHER_FLOW:
5339 		ether_addr_copy(rule->tuples.src_mac,
5340 				fs->h_u.ether_spec.h_source);
5341 		ether_addr_copy(rule->tuples_mask.src_mac,
5342 				fs->m_u.ether_spec.h_source);
5343 
5344 		ether_addr_copy(rule->tuples.dst_mac,
5345 				fs->h_u.ether_spec.h_dest);
5346 		ether_addr_copy(rule->tuples_mask.dst_mac,
5347 				fs->m_u.ether_spec.h_dest);
5348 
5349 		rule->tuples.ether_proto =
5350 				be16_to_cpu(fs->h_u.ether_spec.h_proto);
5351 		rule->tuples_mask.ether_proto =
5352 				be16_to_cpu(fs->m_u.ether_spec.h_proto);
5353 
5354 		break;
5355 	default:
5356 		return -EOPNOTSUPP;
5357 	}
5358 
5359 	switch (flow_type) {
5360 	case SCTP_V4_FLOW:
5361 	case SCTP_V6_FLOW:
5362 		rule->tuples.ip_proto = IPPROTO_SCTP;
5363 		rule->tuples_mask.ip_proto = 0xFF;
5364 		break;
5365 	case TCP_V4_FLOW:
5366 	case TCP_V6_FLOW:
5367 		rule->tuples.ip_proto = IPPROTO_TCP;
5368 		rule->tuples_mask.ip_proto = 0xFF;
5369 		break;
5370 	case UDP_V4_FLOW:
5371 	case UDP_V6_FLOW:
5372 		rule->tuples.ip_proto = IPPROTO_UDP;
5373 		rule->tuples_mask.ip_proto = 0xFF;
5374 		break;
5375 	default:
5376 		break;
5377 	}
5378 
5379 	if ((fs->flow_type & FLOW_EXT)) {
5380 		rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5381 		rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5382 	}
5383 
5384 	if (fs->flow_type & FLOW_MAC_EXT) {
5385 		ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5386 		ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5387 	}
5388 
5389 	return 0;
5390 }
5391 
5392 /* make sure being called after lock up with fd_rule_lock */
5393 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5394 				struct hclge_fd_rule *rule)
5395 {
5396 	int ret;
5397 
5398 	if (!rule) {
5399 		dev_err(&hdev->pdev->dev,
5400 			"The flow director rule is NULL\n");
5401 		return -EINVAL;
5402 	}
5403 
5404 	/* it will never fail here, so needn't to check return value */
5405 	hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5406 
5407 	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5408 	if (ret)
5409 		goto clear_rule;
5410 
5411 	ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5412 	if (ret)
5413 		goto clear_rule;
5414 
5415 	return 0;
5416 
5417 clear_rule:
5418 	hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5419 	return ret;
5420 }
5421 
5422 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5423 			      struct ethtool_rxnfc *cmd)
5424 {
5425 	struct hclge_vport *vport = hclge_get_vport(handle);
5426 	struct hclge_dev *hdev = vport->back;
5427 	u16 dst_vport_id = 0, q_index = 0;
5428 	struct ethtool_rx_flow_spec *fs;
5429 	struct hclge_fd_rule *rule;
5430 	u32 unused = 0;
5431 	u8 action;
5432 	int ret;
5433 
5434 	if (!hnae3_dev_fd_supported(hdev))
5435 		return -EOPNOTSUPP;
5436 
5437 	if (!hdev->fd_en) {
5438 		dev_warn(&hdev->pdev->dev,
5439 			 "Please enable flow director first\n");
5440 		return -EOPNOTSUPP;
5441 	}
5442 
5443 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5444 
5445 	ret = hclge_fd_check_spec(hdev, fs, &unused);
5446 	if (ret) {
5447 		dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5448 		return ret;
5449 	}
5450 
5451 	if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5452 		action = HCLGE_FD_ACTION_DROP_PACKET;
5453 	} else {
5454 		u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5455 		u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5456 		u16 tqps;
5457 
5458 		if (vf > hdev->num_req_vfs) {
5459 			dev_err(&hdev->pdev->dev,
5460 				"Error: vf id (%d) > max vf num (%d)\n",
5461 				vf, hdev->num_req_vfs);
5462 			return -EINVAL;
5463 		}
5464 
5465 		dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5466 		tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5467 
5468 		if (ring >= tqps) {
5469 			dev_err(&hdev->pdev->dev,
5470 				"Error: queue id (%d) > max tqp num (%d)\n",
5471 				ring, tqps - 1);
5472 			return -EINVAL;
5473 		}
5474 
5475 		action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5476 		q_index = ring;
5477 	}
5478 
5479 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5480 	if (!rule)
5481 		return -ENOMEM;
5482 
5483 	ret = hclge_fd_get_tuple(hdev, fs, rule);
5484 	if (ret) {
5485 		kfree(rule);
5486 		return ret;
5487 	}
5488 
5489 	rule->flow_type = fs->flow_type;
5490 
5491 	rule->location = fs->location;
5492 	rule->unused_tuple = unused;
5493 	rule->vf_id = dst_vport_id;
5494 	rule->queue_id = q_index;
5495 	rule->action = action;
5496 	rule->rule_type = HCLGE_FD_EP_ACTIVE;
5497 
5498 	/* to avoid rule conflict, when user configure rule by ethtool,
5499 	 * we need to clear all arfs rules
5500 	 */
5501 	hclge_clear_arfs_rules(handle);
5502 
5503 	spin_lock_bh(&hdev->fd_rule_lock);
5504 	ret = hclge_fd_config_rule(hdev, rule);
5505 
5506 	spin_unlock_bh(&hdev->fd_rule_lock);
5507 
5508 	return ret;
5509 }
5510 
5511 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5512 			      struct ethtool_rxnfc *cmd)
5513 {
5514 	struct hclge_vport *vport = hclge_get_vport(handle);
5515 	struct hclge_dev *hdev = vport->back;
5516 	struct ethtool_rx_flow_spec *fs;
5517 	int ret;
5518 
5519 	if (!hnae3_dev_fd_supported(hdev))
5520 		return -EOPNOTSUPP;
5521 
5522 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5523 
5524 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5525 		return -EINVAL;
5526 
5527 	if (!hclge_fd_rule_exist(hdev, fs->location)) {
5528 		dev_err(&hdev->pdev->dev,
5529 			"Delete fail, rule %d is inexistent\n", fs->location);
5530 		return -ENOENT;
5531 	}
5532 
5533 	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5534 				   NULL, false);
5535 	if (ret)
5536 		return ret;
5537 
5538 	spin_lock_bh(&hdev->fd_rule_lock);
5539 	ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5540 
5541 	spin_unlock_bh(&hdev->fd_rule_lock);
5542 
5543 	return ret;
5544 }
5545 
5546 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5547 				     bool clear_list)
5548 {
5549 	struct hclge_vport *vport = hclge_get_vport(handle);
5550 	struct hclge_dev *hdev = vport->back;
5551 	struct hclge_fd_rule *rule;
5552 	struct hlist_node *node;
5553 	u16 location;
5554 
5555 	if (!hnae3_dev_fd_supported(hdev))
5556 		return;
5557 
5558 	spin_lock_bh(&hdev->fd_rule_lock);
5559 	for_each_set_bit(location, hdev->fd_bmap,
5560 			 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5561 		hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5562 				     NULL, false);
5563 
5564 	if (clear_list) {
5565 		hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5566 					  rule_node) {
5567 			hlist_del(&rule->rule_node);
5568 			kfree(rule);
5569 		}
5570 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5571 		hdev->hclge_fd_rule_num = 0;
5572 		bitmap_zero(hdev->fd_bmap,
5573 			    hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5574 	}
5575 
5576 	spin_unlock_bh(&hdev->fd_rule_lock);
5577 }
5578 
5579 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5580 {
5581 	struct hclge_vport *vport = hclge_get_vport(handle);
5582 	struct hclge_dev *hdev = vport->back;
5583 	struct hclge_fd_rule *rule;
5584 	struct hlist_node *node;
5585 	int ret;
5586 
5587 	/* Return ok here, because reset error handling will check this
5588 	 * return value. If error is returned here, the reset process will
5589 	 * fail.
5590 	 */
5591 	if (!hnae3_dev_fd_supported(hdev))
5592 		return 0;
5593 
5594 	/* if fd is disabled, should not restore it when reset */
5595 	if (!hdev->fd_en)
5596 		return 0;
5597 
5598 	spin_lock_bh(&hdev->fd_rule_lock);
5599 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5600 		ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5601 		if (!ret)
5602 			ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5603 
5604 		if (ret) {
5605 			dev_warn(&hdev->pdev->dev,
5606 				 "Restore rule %d failed, remove it\n",
5607 				 rule->location);
5608 			clear_bit(rule->location, hdev->fd_bmap);
5609 			hlist_del(&rule->rule_node);
5610 			kfree(rule);
5611 			hdev->hclge_fd_rule_num--;
5612 		}
5613 	}
5614 
5615 	if (hdev->hclge_fd_rule_num)
5616 		hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5617 
5618 	spin_unlock_bh(&hdev->fd_rule_lock);
5619 
5620 	return 0;
5621 }
5622 
5623 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5624 				 struct ethtool_rxnfc *cmd)
5625 {
5626 	struct hclge_vport *vport = hclge_get_vport(handle);
5627 	struct hclge_dev *hdev = vport->back;
5628 
5629 	if (!hnae3_dev_fd_supported(hdev))
5630 		return -EOPNOTSUPP;
5631 
5632 	cmd->rule_cnt = hdev->hclge_fd_rule_num;
5633 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5634 
5635 	return 0;
5636 }
5637 
5638 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5639 				  struct ethtool_rxnfc *cmd)
5640 {
5641 	struct hclge_vport *vport = hclge_get_vport(handle);
5642 	struct hclge_fd_rule *rule = NULL;
5643 	struct hclge_dev *hdev = vport->back;
5644 	struct ethtool_rx_flow_spec *fs;
5645 	struct hlist_node *node2;
5646 
5647 	if (!hnae3_dev_fd_supported(hdev))
5648 		return -EOPNOTSUPP;
5649 
5650 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5651 
5652 	spin_lock_bh(&hdev->fd_rule_lock);
5653 
5654 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5655 		if (rule->location >= fs->location)
5656 			break;
5657 	}
5658 
5659 	if (!rule || fs->location != rule->location) {
5660 		spin_unlock_bh(&hdev->fd_rule_lock);
5661 
5662 		return -ENOENT;
5663 	}
5664 
5665 	fs->flow_type = rule->flow_type;
5666 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5667 	case SCTP_V4_FLOW:
5668 	case TCP_V4_FLOW:
5669 	case UDP_V4_FLOW:
5670 		fs->h_u.tcp_ip4_spec.ip4src =
5671 				cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5672 		fs->m_u.tcp_ip4_spec.ip4src =
5673 			rule->unused_tuple & BIT(INNER_SRC_IP) ?
5674 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5675 
5676 		fs->h_u.tcp_ip4_spec.ip4dst =
5677 				cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5678 		fs->m_u.tcp_ip4_spec.ip4dst =
5679 			rule->unused_tuple & BIT(INNER_DST_IP) ?
5680 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5681 
5682 		fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5683 		fs->m_u.tcp_ip4_spec.psrc =
5684 				rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5685 				0 : cpu_to_be16(rule->tuples_mask.src_port);
5686 
5687 		fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5688 		fs->m_u.tcp_ip4_spec.pdst =
5689 				rule->unused_tuple & BIT(INNER_DST_PORT) ?
5690 				0 : cpu_to_be16(rule->tuples_mask.dst_port);
5691 
5692 		fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5693 		fs->m_u.tcp_ip4_spec.tos =
5694 				rule->unused_tuple & BIT(INNER_IP_TOS) ?
5695 				0 : rule->tuples_mask.ip_tos;
5696 
5697 		break;
5698 	case IP_USER_FLOW:
5699 		fs->h_u.usr_ip4_spec.ip4src =
5700 				cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5701 		fs->m_u.tcp_ip4_spec.ip4src =
5702 			rule->unused_tuple & BIT(INNER_SRC_IP) ?
5703 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5704 
5705 		fs->h_u.usr_ip4_spec.ip4dst =
5706 				cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5707 		fs->m_u.usr_ip4_spec.ip4dst =
5708 			rule->unused_tuple & BIT(INNER_DST_IP) ?
5709 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5710 
5711 		fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5712 		fs->m_u.usr_ip4_spec.tos =
5713 				rule->unused_tuple & BIT(INNER_IP_TOS) ?
5714 				0 : rule->tuples_mask.ip_tos;
5715 
5716 		fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5717 		fs->m_u.usr_ip4_spec.proto =
5718 				rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5719 				0 : rule->tuples_mask.ip_proto;
5720 
5721 		fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5722 
5723 		break;
5724 	case SCTP_V6_FLOW:
5725 	case TCP_V6_FLOW:
5726 	case UDP_V6_FLOW:
5727 		cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5728 				  rule->tuples.src_ip, IPV6_SIZE);
5729 		if (rule->unused_tuple & BIT(INNER_SRC_IP))
5730 			memset(fs->m_u.tcp_ip6_spec.ip6src, 0,
5731 			       sizeof(int) * IPV6_SIZE);
5732 		else
5733 			cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5734 					  rule->tuples_mask.src_ip, IPV6_SIZE);
5735 
5736 		cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5737 				  rule->tuples.dst_ip, IPV6_SIZE);
5738 		if (rule->unused_tuple & BIT(INNER_DST_IP))
5739 			memset(fs->m_u.tcp_ip6_spec.ip6dst, 0,
5740 			       sizeof(int) * IPV6_SIZE);
5741 		else
5742 			cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5743 					  rule->tuples_mask.dst_ip, IPV6_SIZE);
5744 
5745 		fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5746 		fs->m_u.tcp_ip6_spec.psrc =
5747 				rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5748 				0 : cpu_to_be16(rule->tuples_mask.src_port);
5749 
5750 		fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5751 		fs->m_u.tcp_ip6_spec.pdst =
5752 				rule->unused_tuple & BIT(INNER_DST_PORT) ?
5753 				0 : cpu_to_be16(rule->tuples_mask.dst_port);
5754 
5755 		break;
5756 	case IPV6_USER_FLOW:
5757 		cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5758 				  rule->tuples.src_ip, IPV6_SIZE);
5759 		if (rule->unused_tuple & BIT(INNER_SRC_IP))
5760 			memset(fs->m_u.usr_ip6_spec.ip6src, 0,
5761 			       sizeof(int) * IPV6_SIZE);
5762 		else
5763 			cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5764 					  rule->tuples_mask.src_ip, IPV6_SIZE);
5765 
5766 		cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5767 				  rule->tuples.dst_ip, IPV6_SIZE);
5768 		if (rule->unused_tuple & BIT(INNER_DST_IP))
5769 			memset(fs->m_u.usr_ip6_spec.ip6dst, 0,
5770 			       sizeof(int) * IPV6_SIZE);
5771 		else
5772 			cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5773 					  rule->tuples_mask.dst_ip, IPV6_SIZE);
5774 
5775 		fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5776 		fs->m_u.usr_ip6_spec.l4_proto =
5777 				rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5778 				0 : rule->tuples_mask.ip_proto;
5779 
5780 		break;
5781 	case ETHER_FLOW:
5782 		ether_addr_copy(fs->h_u.ether_spec.h_source,
5783 				rule->tuples.src_mac);
5784 		if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5785 			eth_zero_addr(fs->m_u.ether_spec.h_source);
5786 		else
5787 			ether_addr_copy(fs->m_u.ether_spec.h_source,
5788 					rule->tuples_mask.src_mac);
5789 
5790 		ether_addr_copy(fs->h_u.ether_spec.h_dest,
5791 				rule->tuples.dst_mac);
5792 		if (rule->unused_tuple & BIT(INNER_DST_MAC))
5793 			eth_zero_addr(fs->m_u.ether_spec.h_dest);
5794 		else
5795 			ether_addr_copy(fs->m_u.ether_spec.h_dest,
5796 					rule->tuples_mask.dst_mac);
5797 
5798 		fs->h_u.ether_spec.h_proto =
5799 				cpu_to_be16(rule->tuples.ether_proto);
5800 		fs->m_u.ether_spec.h_proto =
5801 				rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
5802 				0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5803 
5804 		break;
5805 	default:
5806 		spin_unlock_bh(&hdev->fd_rule_lock);
5807 		return -EOPNOTSUPP;
5808 	}
5809 
5810 	if (fs->flow_type & FLOW_EXT) {
5811 		fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5812 		fs->m_ext.vlan_tci =
5813 				rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5814 				cpu_to_be16(VLAN_VID_MASK) :
5815 				cpu_to_be16(rule->tuples_mask.vlan_tag1);
5816 	}
5817 
5818 	if (fs->flow_type & FLOW_MAC_EXT) {
5819 		ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5820 		if (rule->unused_tuple & BIT(INNER_DST_MAC))
5821 			eth_zero_addr(fs->m_u.ether_spec.h_dest);
5822 		else
5823 			ether_addr_copy(fs->m_u.ether_spec.h_dest,
5824 					rule->tuples_mask.dst_mac);
5825 	}
5826 
5827 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5828 		fs->ring_cookie = RX_CLS_FLOW_DISC;
5829 	} else {
5830 		u64 vf_id;
5831 
5832 		fs->ring_cookie = rule->queue_id;
5833 		vf_id = rule->vf_id;
5834 		vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5835 		fs->ring_cookie |= vf_id;
5836 	}
5837 
5838 	spin_unlock_bh(&hdev->fd_rule_lock);
5839 
5840 	return 0;
5841 }
5842 
5843 static int hclge_get_all_rules(struct hnae3_handle *handle,
5844 			       struct ethtool_rxnfc *cmd, u32 *rule_locs)
5845 {
5846 	struct hclge_vport *vport = hclge_get_vport(handle);
5847 	struct hclge_dev *hdev = vport->back;
5848 	struct hclge_fd_rule *rule;
5849 	struct hlist_node *node2;
5850 	int cnt = 0;
5851 
5852 	if (!hnae3_dev_fd_supported(hdev))
5853 		return -EOPNOTSUPP;
5854 
5855 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5856 
5857 	spin_lock_bh(&hdev->fd_rule_lock);
5858 	hlist_for_each_entry_safe(rule, node2,
5859 				  &hdev->fd_rule_list, rule_node) {
5860 		if (cnt == cmd->rule_cnt) {
5861 			spin_unlock_bh(&hdev->fd_rule_lock);
5862 			return -EMSGSIZE;
5863 		}
5864 
5865 		rule_locs[cnt] = rule->location;
5866 		cnt++;
5867 	}
5868 
5869 	spin_unlock_bh(&hdev->fd_rule_lock);
5870 
5871 	cmd->rule_cnt = cnt;
5872 
5873 	return 0;
5874 }
5875 
5876 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
5877 				     struct hclge_fd_rule_tuples *tuples)
5878 {
5879 	tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
5880 	tuples->ip_proto = fkeys->basic.ip_proto;
5881 	tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
5882 
5883 	if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5884 		tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
5885 		tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
5886 	} else {
5887 		memcpy(tuples->src_ip,
5888 		       fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
5889 		       sizeof(tuples->src_ip));
5890 		memcpy(tuples->dst_ip,
5891 		       fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
5892 		       sizeof(tuples->dst_ip));
5893 	}
5894 }
5895 
5896 /* traverse all rules, check whether an existed rule has the same tuples */
5897 static struct hclge_fd_rule *
5898 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
5899 			  const struct hclge_fd_rule_tuples *tuples)
5900 {
5901 	struct hclge_fd_rule *rule = NULL;
5902 	struct hlist_node *node;
5903 
5904 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5905 		if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
5906 			return rule;
5907 	}
5908 
5909 	return NULL;
5910 }
5911 
5912 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
5913 				     struct hclge_fd_rule *rule)
5914 {
5915 	rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5916 			     BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
5917 			     BIT(INNER_SRC_PORT);
5918 	rule->action = 0;
5919 	rule->vf_id = 0;
5920 	rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
5921 	if (tuples->ether_proto == ETH_P_IP) {
5922 		if (tuples->ip_proto == IPPROTO_TCP)
5923 			rule->flow_type = TCP_V4_FLOW;
5924 		else
5925 			rule->flow_type = UDP_V4_FLOW;
5926 	} else {
5927 		if (tuples->ip_proto == IPPROTO_TCP)
5928 			rule->flow_type = TCP_V6_FLOW;
5929 		else
5930 			rule->flow_type = UDP_V6_FLOW;
5931 	}
5932 	memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
5933 	memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
5934 }
5935 
5936 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
5937 				      u16 flow_id, struct flow_keys *fkeys)
5938 {
5939 	struct hclge_vport *vport = hclge_get_vport(handle);
5940 	struct hclge_fd_rule_tuples new_tuples;
5941 	struct hclge_dev *hdev = vport->back;
5942 	struct hclge_fd_rule *rule;
5943 	u16 tmp_queue_id;
5944 	u16 bit_id;
5945 	int ret;
5946 
5947 	if (!hnae3_dev_fd_supported(hdev))
5948 		return -EOPNOTSUPP;
5949 
5950 	memset(&new_tuples, 0, sizeof(new_tuples));
5951 	hclge_fd_get_flow_tuples(fkeys, &new_tuples);
5952 
5953 	spin_lock_bh(&hdev->fd_rule_lock);
5954 
5955 	/* when there is already fd rule existed add by user,
5956 	 * arfs should not work
5957 	 */
5958 	if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
5959 		spin_unlock_bh(&hdev->fd_rule_lock);
5960 
5961 		return -EOPNOTSUPP;
5962 	}
5963 
5964 	/* check is there flow director filter existed for this flow,
5965 	 * if not, create a new filter for it;
5966 	 * if filter exist with different queue id, modify the filter;
5967 	 * if filter exist with same queue id, do nothing
5968 	 */
5969 	rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
5970 	if (!rule) {
5971 		bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
5972 		if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5973 			spin_unlock_bh(&hdev->fd_rule_lock);
5974 
5975 			return -ENOSPC;
5976 		}
5977 
5978 		rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
5979 		if (!rule) {
5980 			spin_unlock_bh(&hdev->fd_rule_lock);
5981 
5982 			return -ENOMEM;
5983 		}
5984 
5985 		set_bit(bit_id, hdev->fd_bmap);
5986 		rule->location = bit_id;
5987 		rule->flow_id = flow_id;
5988 		rule->queue_id = queue_id;
5989 		hclge_fd_build_arfs_rule(&new_tuples, rule);
5990 		ret = hclge_fd_config_rule(hdev, rule);
5991 
5992 		spin_unlock_bh(&hdev->fd_rule_lock);
5993 
5994 		if (ret)
5995 			return ret;
5996 
5997 		return rule->location;
5998 	}
5999 
6000 	spin_unlock_bh(&hdev->fd_rule_lock);
6001 
6002 	if (rule->queue_id == queue_id)
6003 		return rule->location;
6004 
6005 	tmp_queue_id = rule->queue_id;
6006 	rule->queue_id = queue_id;
6007 	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6008 	if (ret) {
6009 		rule->queue_id = tmp_queue_id;
6010 		return ret;
6011 	}
6012 
6013 	return rule->location;
6014 }
6015 
6016 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6017 {
6018 #ifdef CONFIG_RFS_ACCEL
6019 	struct hnae3_handle *handle = &hdev->vport[0].nic;
6020 	struct hclge_fd_rule *rule;
6021 	struct hlist_node *node;
6022 	HLIST_HEAD(del_list);
6023 
6024 	spin_lock_bh(&hdev->fd_rule_lock);
6025 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6026 		spin_unlock_bh(&hdev->fd_rule_lock);
6027 		return;
6028 	}
6029 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6030 		if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6031 					rule->flow_id, rule->location)) {
6032 			hlist_del_init(&rule->rule_node);
6033 			hlist_add_head(&rule->rule_node, &del_list);
6034 			hdev->hclge_fd_rule_num--;
6035 			clear_bit(rule->location, hdev->fd_bmap);
6036 		}
6037 	}
6038 	spin_unlock_bh(&hdev->fd_rule_lock);
6039 
6040 	hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6041 		hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6042 				     rule->location, NULL, false);
6043 		kfree(rule);
6044 	}
6045 #endif
6046 }
6047 
6048 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6049 {
6050 #ifdef CONFIG_RFS_ACCEL
6051 	struct hclge_vport *vport = hclge_get_vport(handle);
6052 	struct hclge_dev *hdev = vport->back;
6053 
6054 	if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6055 		hclge_del_all_fd_entries(handle, true);
6056 #endif
6057 }
6058 
6059 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6060 {
6061 	struct hclge_vport *vport = hclge_get_vport(handle);
6062 	struct hclge_dev *hdev = vport->back;
6063 
6064 	return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6065 	       hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6066 }
6067 
6068 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6069 {
6070 	struct hclge_vport *vport = hclge_get_vport(handle);
6071 	struct hclge_dev *hdev = vport->back;
6072 
6073 	return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6074 }
6075 
6076 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6077 {
6078 	struct hclge_vport *vport = hclge_get_vport(handle);
6079 	struct hclge_dev *hdev = vport->back;
6080 
6081 	return hdev->rst_stats.hw_reset_done_cnt;
6082 }
6083 
6084 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6085 {
6086 	struct hclge_vport *vport = hclge_get_vport(handle);
6087 	struct hclge_dev *hdev = vport->back;
6088 	bool clear;
6089 
6090 	hdev->fd_en = enable;
6091 	clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE ? true : false;
6092 	if (!enable)
6093 		hclge_del_all_fd_entries(handle, clear);
6094 	else
6095 		hclge_restore_fd_entries(handle);
6096 }
6097 
6098 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6099 {
6100 	struct hclge_desc desc;
6101 	struct hclge_config_mac_mode_cmd *req =
6102 		(struct hclge_config_mac_mode_cmd *)desc.data;
6103 	u32 loop_en = 0;
6104 	int ret;
6105 
6106 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
6107 
6108 	if (enable) {
6109 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6110 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6111 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6112 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6113 		hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6114 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6115 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6116 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6117 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6118 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6119 	}
6120 
6121 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6122 
6123 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6124 	if (ret)
6125 		dev_err(&hdev->pdev->dev,
6126 			"mac enable fail, ret =%d.\n", ret);
6127 }
6128 
6129 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
6130 {
6131 	struct hclge_config_mac_mode_cmd *req;
6132 	struct hclge_desc desc;
6133 	u32 loop_en;
6134 	int ret;
6135 
6136 	req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
6137 	/* 1 Read out the MAC mode config at first */
6138 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
6139 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6140 	if (ret) {
6141 		dev_err(&hdev->pdev->dev,
6142 			"mac loopback get fail, ret =%d.\n", ret);
6143 		return ret;
6144 	}
6145 
6146 	/* 2 Then setup the loopback flag */
6147 	loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
6148 	hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
6149 	hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
6150 	hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
6151 
6152 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6153 
6154 	/* 3 Config mac work mode with loopback flag
6155 	 * and its original configure parameters
6156 	 */
6157 	hclge_cmd_reuse_desc(&desc, false);
6158 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6159 	if (ret)
6160 		dev_err(&hdev->pdev->dev,
6161 			"mac loopback set fail, ret =%d.\n", ret);
6162 	return ret;
6163 }
6164 
6165 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
6166 				     enum hnae3_loop loop_mode)
6167 {
6168 #define HCLGE_SERDES_RETRY_MS	10
6169 #define HCLGE_SERDES_RETRY_NUM	100
6170 
6171 #define HCLGE_MAC_LINK_STATUS_MS   10
6172 #define HCLGE_MAC_LINK_STATUS_NUM  100
6173 #define HCLGE_MAC_LINK_STATUS_DOWN 0
6174 #define HCLGE_MAC_LINK_STATUS_UP   1
6175 
6176 	struct hclge_serdes_lb_cmd *req;
6177 	struct hclge_desc desc;
6178 	int mac_link_ret = 0;
6179 	int ret, i = 0;
6180 	u8 loop_mode_b;
6181 
6182 	req = (struct hclge_serdes_lb_cmd *)desc.data;
6183 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
6184 
6185 	switch (loop_mode) {
6186 	case HNAE3_LOOP_SERIAL_SERDES:
6187 		loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6188 		break;
6189 	case HNAE3_LOOP_PARALLEL_SERDES:
6190 		loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6191 		break;
6192 	default:
6193 		dev_err(&hdev->pdev->dev,
6194 			"unsupported serdes loopback mode %d\n", loop_mode);
6195 		return -ENOTSUPP;
6196 	}
6197 
6198 	if (en) {
6199 		req->enable = loop_mode_b;
6200 		req->mask = loop_mode_b;
6201 		mac_link_ret = HCLGE_MAC_LINK_STATUS_UP;
6202 	} else {
6203 		req->mask = loop_mode_b;
6204 		mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN;
6205 	}
6206 
6207 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6208 	if (ret) {
6209 		dev_err(&hdev->pdev->dev,
6210 			"serdes loopback set fail, ret = %d\n", ret);
6211 		return ret;
6212 	}
6213 
6214 	do {
6215 		msleep(HCLGE_SERDES_RETRY_MS);
6216 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
6217 					   true);
6218 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6219 		if (ret) {
6220 			dev_err(&hdev->pdev->dev,
6221 				"serdes loopback get, ret = %d\n", ret);
6222 			return ret;
6223 		}
6224 	} while (++i < HCLGE_SERDES_RETRY_NUM &&
6225 		 !(req->result & HCLGE_CMD_SERDES_DONE_B));
6226 
6227 	if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
6228 		dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
6229 		return -EBUSY;
6230 	} else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
6231 		dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
6232 		return -EIO;
6233 	}
6234 
6235 	hclge_cfg_mac_mode(hdev, en);
6236 
6237 	i = 0;
6238 	do {
6239 		/* serdes Internal loopback, independent of the network cable.*/
6240 		msleep(HCLGE_MAC_LINK_STATUS_MS);
6241 		ret = hclge_get_mac_link_status(hdev);
6242 		if (ret == mac_link_ret)
6243 			return 0;
6244 	} while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6245 
6246 	dev_err(&hdev->pdev->dev, "config mac mode timeout\n");
6247 
6248 	return -EBUSY;
6249 }
6250 
6251 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
6252 			    int stream_id, bool enable)
6253 {
6254 	struct hclge_desc desc;
6255 	struct hclge_cfg_com_tqp_queue_cmd *req =
6256 		(struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6257 	int ret;
6258 
6259 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6260 	req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6261 	req->stream_id = cpu_to_le16(stream_id);
6262 	if (enable)
6263 		req->enable |= 1U << HCLGE_TQP_ENABLE_B;
6264 
6265 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6266 	if (ret)
6267 		dev_err(&hdev->pdev->dev,
6268 			"Tqp enable fail, status =%d.\n", ret);
6269 	return ret;
6270 }
6271 
6272 static int hclge_set_loopback(struct hnae3_handle *handle,
6273 			      enum hnae3_loop loop_mode, bool en)
6274 {
6275 	struct hclge_vport *vport = hclge_get_vport(handle);
6276 	struct hnae3_knic_private_info *kinfo;
6277 	struct hclge_dev *hdev = vport->back;
6278 	int i, ret;
6279 
6280 	switch (loop_mode) {
6281 	case HNAE3_LOOP_APP:
6282 		ret = hclge_set_app_loopback(hdev, en);
6283 		break;
6284 	case HNAE3_LOOP_SERIAL_SERDES:
6285 	case HNAE3_LOOP_PARALLEL_SERDES:
6286 		ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6287 		break;
6288 	default:
6289 		ret = -ENOTSUPP;
6290 		dev_err(&hdev->pdev->dev,
6291 			"loop_mode %d is not supported\n", loop_mode);
6292 		break;
6293 	}
6294 
6295 	if (ret)
6296 		return ret;
6297 
6298 	kinfo = &vport->nic.kinfo;
6299 	for (i = 0; i < kinfo->num_tqps; i++) {
6300 		ret = hclge_tqp_enable(hdev, i, 0, en);
6301 		if (ret)
6302 			return ret;
6303 	}
6304 
6305 	return 0;
6306 }
6307 
6308 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6309 {
6310 	struct hclge_vport *vport = hclge_get_vport(handle);
6311 	struct hnae3_knic_private_info *kinfo;
6312 	struct hnae3_queue *queue;
6313 	struct hclge_tqp *tqp;
6314 	int i;
6315 
6316 	kinfo = &vport->nic.kinfo;
6317 	for (i = 0; i < kinfo->num_tqps; i++) {
6318 		queue = handle->kinfo.tqp[i];
6319 		tqp = container_of(queue, struct hclge_tqp, q);
6320 		memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6321 	}
6322 }
6323 
6324 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6325 {
6326 	struct hclge_vport *vport = hclge_get_vport(handle);
6327 	struct hclge_dev *hdev = vport->back;
6328 
6329 	if (enable) {
6330 		hclge_task_schedule(hdev, round_jiffies_relative(HZ));
6331 	} else {
6332 		/* Set the DOWN flag here to disable the service to be
6333 		 * scheduled again
6334 		 */
6335 		set_bit(HCLGE_STATE_DOWN, &hdev->state);
6336 		cancel_delayed_work_sync(&hdev->service_task);
6337 		clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
6338 	}
6339 }
6340 
6341 static int hclge_ae_start(struct hnae3_handle *handle)
6342 {
6343 	struct hclge_vport *vport = hclge_get_vport(handle);
6344 	struct hclge_dev *hdev = vport->back;
6345 
6346 	/* mac enable */
6347 	hclge_cfg_mac_mode(hdev, true);
6348 	clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6349 	hdev->hw.mac.link = 0;
6350 
6351 	/* reset tqp stats */
6352 	hclge_reset_tqp_stats(handle);
6353 
6354 	hclge_mac_start_phy(hdev);
6355 
6356 	return 0;
6357 }
6358 
6359 static void hclge_ae_stop(struct hnae3_handle *handle)
6360 {
6361 	struct hclge_vport *vport = hclge_get_vport(handle);
6362 	struct hclge_dev *hdev = vport->back;
6363 	int i;
6364 
6365 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
6366 
6367 	hclge_clear_arfs_rules(handle);
6368 
6369 	/* If it is not PF reset, the firmware will disable the MAC,
6370 	 * so it only need to stop phy here.
6371 	 */
6372 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6373 	    hdev->reset_type != HNAE3_FUNC_RESET) {
6374 		hclge_mac_stop_phy(hdev);
6375 		hclge_update_link_status(hdev);
6376 		return;
6377 	}
6378 
6379 	for (i = 0; i < handle->kinfo.num_tqps; i++)
6380 		hclge_reset_tqp(handle, i);
6381 
6382 	/* Mac disable */
6383 	hclge_cfg_mac_mode(hdev, false);
6384 
6385 	hclge_mac_stop_phy(hdev);
6386 
6387 	/* reset tqp stats */
6388 	hclge_reset_tqp_stats(handle);
6389 	hclge_update_link_status(hdev);
6390 }
6391 
6392 int hclge_vport_start(struct hclge_vport *vport)
6393 {
6394 	set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6395 	vport->last_active_jiffies = jiffies;
6396 	return 0;
6397 }
6398 
6399 void hclge_vport_stop(struct hclge_vport *vport)
6400 {
6401 	clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6402 }
6403 
6404 static int hclge_client_start(struct hnae3_handle *handle)
6405 {
6406 	struct hclge_vport *vport = hclge_get_vport(handle);
6407 
6408 	return hclge_vport_start(vport);
6409 }
6410 
6411 static void hclge_client_stop(struct hnae3_handle *handle)
6412 {
6413 	struct hclge_vport *vport = hclge_get_vport(handle);
6414 
6415 	hclge_vport_stop(vport);
6416 }
6417 
6418 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6419 					 u16 cmdq_resp, u8  resp_code,
6420 					 enum hclge_mac_vlan_tbl_opcode op)
6421 {
6422 	struct hclge_dev *hdev = vport->back;
6423 
6424 	if (cmdq_resp) {
6425 		dev_err(&hdev->pdev->dev,
6426 			"cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
6427 			cmdq_resp);
6428 		return -EIO;
6429 	}
6430 
6431 	if (op == HCLGE_MAC_VLAN_ADD) {
6432 		if ((!resp_code) || (resp_code == 1)) {
6433 			return 0;
6434 		} else if (resp_code == HCLGE_ADD_UC_OVERFLOW) {
6435 			dev_err(&hdev->pdev->dev,
6436 				"add mac addr failed for uc_overflow.\n");
6437 			return -ENOSPC;
6438 		} else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
6439 			dev_err(&hdev->pdev->dev,
6440 				"add mac addr failed for mc_overflow.\n");
6441 			return -ENOSPC;
6442 		}
6443 
6444 		dev_err(&hdev->pdev->dev,
6445 			"add mac addr failed for undefined, code=%u.\n",
6446 			resp_code);
6447 		return -EIO;
6448 	} else if (op == HCLGE_MAC_VLAN_REMOVE) {
6449 		if (!resp_code) {
6450 			return 0;
6451 		} else if (resp_code == 1) {
6452 			dev_dbg(&hdev->pdev->dev,
6453 				"remove mac addr failed for miss.\n");
6454 			return -ENOENT;
6455 		}
6456 
6457 		dev_err(&hdev->pdev->dev,
6458 			"remove mac addr failed for undefined, code=%u.\n",
6459 			resp_code);
6460 		return -EIO;
6461 	} else if (op == HCLGE_MAC_VLAN_LKUP) {
6462 		if (!resp_code) {
6463 			return 0;
6464 		} else if (resp_code == 1) {
6465 			dev_dbg(&hdev->pdev->dev,
6466 				"lookup mac addr failed for miss.\n");
6467 			return -ENOENT;
6468 		}
6469 
6470 		dev_err(&hdev->pdev->dev,
6471 			"lookup mac addr failed for undefined, code=%u.\n",
6472 			resp_code);
6473 		return -EIO;
6474 	}
6475 
6476 	dev_err(&hdev->pdev->dev,
6477 		"unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
6478 
6479 	return -EINVAL;
6480 }
6481 
6482 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6483 {
6484 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
6485 
6486 	unsigned int word_num;
6487 	unsigned int bit_num;
6488 
6489 	if (vfid > 255 || vfid < 0)
6490 		return -EIO;
6491 
6492 	if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
6493 		word_num = vfid / 32;
6494 		bit_num  = vfid % 32;
6495 		if (clr)
6496 			desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6497 		else
6498 			desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6499 	} else {
6500 		word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
6501 		bit_num  = vfid % 32;
6502 		if (clr)
6503 			desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6504 		else
6505 			desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6506 	}
6507 
6508 	return 0;
6509 }
6510 
6511 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6512 {
6513 #define HCLGE_DESC_NUMBER 3
6514 #define HCLGE_FUNC_NUMBER_PER_DESC 6
6515 	int i, j;
6516 
6517 	for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6518 		for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6519 			if (desc[i].data[j])
6520 				return false;
6521 
6522 	return true;
6523 }
6524 
6525 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
6526 				   const u8 *addr, bool is_mc)
6527 {
6528 	const unsigned char *mac_addr = addr;
6529 	u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
6530 		       (mac_addr[0]) | (mac_addr[1] << 8);
6531 	u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
6532 
6533 	hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6534 	if (is_mc) {
6535 		hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
6536 		hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6537 	}
6538 
6539 	new_req->mac_addr_hi32 = cpu_to_le32(high_val);
6540 	new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
6541 }
6542 
6543 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
6544 				     struct hclge_mac_vlan_tbl_entry_cmd *req)
6545 {
6546 	struct hclge_dev *hdev = vport->back;
6547 	struct hclge_desc desc;
6548 	u8 resp_code;
6549 	u16 retval;
6550 	int ret;
6551 
6552 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
6553 
6554 	memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6555 
6556 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6557 	if (ret) {
6558 		dev_err(&hdev->pdev->dev,
6559 			"del mac addr failed for cmd_send, ret =%d.\n",
6560 			ret);
6561 		return ret;
6562 	}
6563 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6564 	retval = le16_to_cpu(desc.retval);
6565 
6566 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6567 					     HCLGE_MAC_VLAN_REMOVE);
6568 }
6569 
6570 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
6571 				     struct hclge_mac_vlan_tbl_entry_cmd *req,
6572 				     struct hclge_desc *desc,
6573 				     bool is_mc)
6574 {
6575 	struct hclge_dev *hdev = vport->back;
6576 	u8 resp_code;
6577 	u16 retval;
6578 	int ret;
6579 
6580 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
6581 	if (is_mc) {
6582 		desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6583 		memcpy(desc[0].data,
6584 		       req,
6585 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6586 		hclge_cmd_setup_basic_desc(&desc[1],
6587 					   HCLGE_OPC_MAC_VLAN_ADD,
6588 					   true);
6589 		desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6590 		hclge_cmd_setup_basic_desc(&desc[2],
6591 					   HCLGE_OPC_MAC_VLAN_ADD,
6592 					   true);
6593 		ret = hclge_cmd_send(&hdev->hw, desc, 3);
6594 	} else {
6595 		memcpy(desc[0].data,
6596 		       req,
6597 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6598 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
6599 	}
6600 	if (ret) {
6601 		dev_err(&hdev->pdev->dev,
6602 			"lookup mac addr failed for cmd_send, ret =%d.\n",
6603 			ret);
6604 		return ret;
6605 	}
6606 	resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
6607 	retval = le16_to_cpu(desc[0].retval);
6608 
6609 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6610 					     HCLGE_MAC_VLAN_LKUP);
6611 }
6612 
6613 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
6614 				  struct hclge_mac_vlan_tbl_entry_cmd *req,
6615 				  struct hclge_desc *mc_desc)
6616 {
6617 	struct hclge_dev *hdev = vport->back;
6618 	int cfg_status;
6619 	u8 resp_code;
6620 	u16 retval;
6621 	int ret;
6622 
6623 	if (!mc_desc) {
6624 		struct hclge_desc desc;
6625 
6626 		hclge_cmd_setup_basic_desc(&desc,
6627 					   HCLGE_OPC_MAC_VLAN_ADD,
6628 					   false);
6629 		memcpy(desc.data, req,
6630 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6631 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6632 		resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6633 		retval = le16_to_cpu(desc.retval);
6634 
6635 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6636 							   resp_code,
6637 							   HCLGE_MAC_VLAN_ADD);
6638 	} else {
6639 		hclge_cmd_reuse_desc(&mc_desc[0], false);
6640 		mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6641 		hclge_cmd_reuse_desc(&mc_desc[1], false);
6642 		mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6643 		hclge_cmd_reuse_desc(&mc_desc[2], false);
6644 		mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
6645 		memcpy(mc_desc[0].data, req,
6646 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6647 		ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
6648 		resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
6649 		retval = le16_to_cpu(mc_desc[0].retval);
6650 
6651 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6652 							   resp_code,
6653 							   HCLGE_MAC_VLAN_ADD);
6654 	}
6655 
6656 	if (ret) {
6657 		dev_err(&hdev->pdev->dev,
6658 			"add mac addr failed for cmd_send, ret =%d.\n",
6659 			ret);
6660 		return ret;
6661 	}
6662 
6663 	return cfg_status;
6664 }
6665 
6666 static int hclge_init_umv_space(struct hclge_dev *hdev)
6667 {
6668 	u16 allocated_size = 0;
6669 	int ret;
6670 
6671 	ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
6672 				  true);
6673 	if (ret)
6674 		return ret;
6675 
6676 	if (allocated_size < hdev->wanted_umv_size)
6677 		dev_warn(&hdev->pdev->dev,
6678 			 "Alloc umv space failed, want %d, get %d\n",
6679 			 hdev->wanted_umv_size, allocated_size);
6680 
6681 	mutex_init(&hdev->umv_mutex);
6682 	hdev->max_umv_size = allocated_size;
6683 	/* divide max_umv_size by (hdev->num_req_vfs + 2), in order to
6684 	 * preserve some unicast mac vlan table entries shared by pf
6685 	 * and its vfs.
6686 	 */
6687 	hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
6688 	hdev->share_umv_size = hdev->priv_umv_size +
6689 			hdev->max_umv_size % (hdev->num_req_vfs + 2);
6690 
6691 	return 0;
6692 }
6693 
6694 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
6695 {
6696 	int ret;
6697 
6698 	if (hdev->max_umv_size > 0) {
6699 		ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
6700 					  false);
6701 		if (ret)
6702 			return ret;
6703 		hdev->max_umv_size = 0;
6704 	}
6705 	mutex_destroy(&hdev->umv_mutex);
6706 
6707 	return 0;
6708 }
6709 
6710 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
6711 			       u16 *allocated_size, bool is_alloc)
6712 {
6713 	struct hclge_umv_spc_alc_cmd *req;
6714 	struct hclge_desc desc;
6715 	int ret;
6716 
6717 	req = (struct hclge_umv_spc_alc_cmd *)desc.data;
6718 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
6719 	if (!is_alloc)
6720 		hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, 1);
6721 
6722 	req->space_size = cpu_to_le32(space_size);
6723 
6724 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6725 	if (ret) {
6726 		dev_err(&hdev->pdev->dev,
6727 			"%s umv space failed for cmd_send, ret =%d\n",
6728 			is_alloc ? "allocate" : "free", ret);
6729 		return ret;
6730 	}
6731 
6732 	if (is_alloc && allocated_size)
6733 		*allocated_size = le32_to_cpu(desc.data[1]);
6734 
6735 	return 0;
6736 }
6737 
6738 static void hclge_reset_umv_space(struct hclge_dev *hdev)
6739 {
6740 	struct hclge_vport *vport;
6741 	int i;
6742 
6743 	for (i = 0; i < hdev->num_alloc_vport; i++) {
6744 		vport = &hdev->vport[i];
6745 		vport->used_umv_num = 0;
6746 	}
6747 
6748 	mutex_lock(&hdev->umv_mutex);
6749 	hdev->share_umv_size = hdev->priv_umv_size +
6750 			hdev->max_umv_size % (hdev->num_req_vfs + 2);
6751 	mutex_unlock(&hdev->umv_mutex);
6752 }
6753 
6754 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
6755 {
6756 	struct hclge_dev *hdev = vport->back;
6757 	bool is_full;
6758 
6759 	mutex_lock(&hdev->umv_mutex);
6760 	is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
6761 		   hdev->share_umv_size == 0);
6762 	mutex_unlock(&hdev->umv_mutex);
6763 
6764 	return is_full;
6765 }
6766 
6767 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
6768 {
6769 	struct hclge_dev *hdev = vport->back;
6770 
6771 	mutex_lock(&hdev->umv_mutex);
6772 	if (is_free) {
6773 		if (vport->used_umv_num > hdev->priv_umv_size)
6774 			hdev->share_umv_size++;
6775 
6776 		if (vport->used_umv_num > 0)
6777 			vport->used_umv_num--;
6778 	} else {
6779 		if (vport->used_umv_num >= hdev->priv_umv_size &&
6780 		    hdev->share_umv_size > 0)
6781 			hdev->share_umv_size--;
6782 		vport->used_umv_num++;
6783 	}
6784 	mutex_unlock(&hdev->umv_mutex);
6785 }
6786 
6787 static int hclge_add_uc_addr(struct hnae3_handle *handle,
6788 			     const unsigned char *addr)
6789 {
6790 	struct hclge_vport *vport = hclge_get_vport(handle);
6791 
6792 	return hclge_add_uc_addr_common(vport, addr);
6793 }
6794 
6795 int hclge_add_uc_addr_common(struct hclge_vport *vport,
6796 			     const unsigned char *addr)
6797 {
6798 	struct hclge_dev *hdev = vport->back;
6799 	struct hclge_mac_vlan_tbl_entry_cmd req;
6800 	struct hclge_desc desc;
6801 	u16 egress_port = 0;
6802 	int ret;
6803 
6804 	/* mac addr check */
6805 	if (is_zero_ether_addr(addr) ||
6806 	    is_broadcast_ether_addr(addr) ||
6807 	    is_multicast_ether_addr(addr)) {
6808 		dev_err(&hdev->pdev->dev,
6809 			"Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
6810 			 addr, is_zero_ether_addr(addr),
6811 			 is_broadcast_ether_addr(addr),
6812 			 is_multicast_ether_addr(addr));
6813 		return -EINVAL;
6814 	}
6815 
6816 	memset(&req, 0, sizeof(req));
6817 
6818 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
6819 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
6820 
6821 	req.egress_port = cpu_to_le16(egress_port);
6822 
6823 	hclge_prepare_mac_addr(&req, addr, false);
6824 
6825 	/* Lookup the mac address in the mac_vlan table, and add
6826 	 * it if the entry is inexistent. Repeated unicast entry
6827 	 * is not allowed in the mac vlan table.
6828 	 */
6829 	ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
6830 	if (ret == -ENOENT) {
6831 		if (!hclge_is_umv_space_full(vport)) {
6832 			ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
6833 			if (!ret)
6834 				hclge_update_umv_space(vport, false);
6835 			return ret;
6836 		}
6837 
6838 		dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
6839 			hdev->priv_umv_size);
6840 
6841 		return -ENOSPC;
6842 	}
6843 
6844 	/* check if we just hit the duplicate */
6845 	if (!ret) {
6846 		dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
6847 			 vport->vport_id, addr);
6848 		return 0;
6849 	}
6850 
6851 	dev_err(&hdev->pdev->dev,
6852 		"PF failed to add unicast entry(%pM) in the MAC table\n",
6853 		addr);
6854 
6855 	return ret;
6856 }
6857 
6858 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
6859 			    const unsigned char *addr)
6860 {
6861 	struct hclge_vport *vport = hclge_get_vport(handle);
6862 
6863 	return hclge_rm_uc_addr_common(vport, addr);
6864 }
6865 
6866 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
6867 			    const unsigned char *addr)
6868 {
6869 	struct hclge_dev *hdev = vport->back;
6870 	struct hclge_mac_vlan_tbl_entry_cmd req;
6871 	int ret;
6872 
6873 	/* mac addr check */
6874 	if (is_zero_ether_addr(addr) ||
6875 	    is_broadcast_ether_addr(addr) ||
6876 	    is_multicast_ether_addr(addr)) {
6877 		dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
6878 			addr);
6879 		return -EINVAL;
6880 	}
6881 
6882 	memset(&req, 0, sizeof(req));
6883 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6884 	hclge_prepare_mac_addr(&req, addr, false);
6885 	ret = hclge_remove_mac_vlan_tbl(vport, &req);
6886 	if (!ret)
6887 		hclge_update_umv_space(vport, true);
6888 
6889 	return ret;
6890 }
6891 
6892 static int hclge_add_mc_addr(struct hnae3_handle *handle,
6893 			     const unsigned char *addr)
6894 {
6895 	struct hclge_vport *vport = hclge_get_vport(handle);
6896 
6897 	return hclge_add_mc_addr_common(vport, addr);
6898 }
6899 
6900 int hclge_add_mc_addr_common(struct hclge_vport *vport,
6901 			     const unsigned char *addr)
6902 {
6903 	struct hclge_dev *hdev = vport->back;
6904 	struct hclge_mac_vlan_tbl_entry_cmd req;
6905 	struct hclge_desc desc[3];
6906 	int status;
6907 
6908 	/* mac addr check */
6909 	if (!is_multicast_ether_addr(addr)) {
6910 		dev_err(&hdev->pdev->dev,
6911 			"Add mc mac err! invalid mac:%pM.\n",
6912 			 addr);
6913 		return -EINVAL;
6914 	}
6915 	memset(&req, 0, sizeof(req));
6916 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6917 	hclge_prepare_mac_addr(&req, addr, true);
6918 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6919 	if (status) {
6920 		/* This mac addr do not exist, add new entry for it */
6921 		memset(desc[0].data, 0, sizeof(desc[0].data));
6922 		memset(desc[1].data, 0, sizeof(desc[0].data));
6923 		memset(desc[2].data, 0, sizeof(desc[0].data));
6924 	}
6925 	status = hclge_update_desc_vfid(desc, vport->vport_id, false);
6926 	if (status)
6927 		return status;
6928 	status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6929 
6930 	if (status == -ENOSPC)
6931 		dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
6932 
6933 	return status;
6934 }
6935 
6936 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
6937 			    const unsigned char *addr)
6938 {
6939 	struct hclge_vport *vport = hclge_get_vport(handle);
6940 
6941 	return hclge_rm_mc_addr_common(vport, addr);
6942 }
6943 
6944 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
6945 			    const unsigned char *addr)
6946 {
6947 	struct hclge_dev *hdev = vport->back;
6948 	struct hclge_mac_vlan_tbl_entry_cmd req;
6949 	enum hclge_cmd_status status;
6950 	struct hclge_desc desc[3];
6951 
6952 	/* mac addr check */
6953 	if (!is_multicast_ether_addr(addr)) {
6954 		dev_dbg(&hdev->pdev->dev,
6955 			"Remove mc mac err! invalid mac:%pM.\n",
6956 			 addr);
6957 		return -EINVAL;
6958 	}
6959 
6960 	memset(&req, 0, sizeof(req));
6961 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6962 	hclge_prepare_mac_addr(&req, addr, true);
6963 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6964 	if (!status) {
6965 		/* This mac addr exist, remove this handle's VFID for it */
6966 		status = hclge_update_desc_vfid(desc, vport->vport_id, true);
6967 		if (status)
6968 			return status;
6969 
6970 		if (hclge_is_all_function_id_zero(desc))
6971 			/* All the vfid is zero, so need to delete this entry */
6972 			status = hclge_remove_mac_vlan_tbl(vport, &req);
6973 		else
6974 			/* Not all the vfid is zero, update the vfid */
6975 			status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6976 
6977 	} else {
6978 		/* Maybe this mac address is in mta table, but it cannot be
6979 		 * deleted here because an entry of mta represents an address
6980 		 * range rather than a specific address. the delete action to
6981 		 * all entries will take effect in update_mta_status called by
6982 		 * hns3_nic_set_rx_mode.
6983 		 */
6984 		status = 0;
6985 	}
6986 
6987 	return status;
6988 }
6989 
6990 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6991 			       enum HCLGE_MAC_ADDR_TYPE mac_type)
6992 {
6993 	struct hclge_vport_mac_addr_cfg *mac_cfg;
6994 	struct list_head *list;
6995 
6996 	if (!vport->vport_id)
6997 		return;
6998 
6999 	mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
7000 	if (!mac_cfg)
7001 		return;
7002 
7003 	mac_cfg->hd_tbl_status = true;
7004 	memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
7005 
7006 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7007 	       &vport->uc_mac_list : &vport->mc_mac_list;
7008 
7009 	list_add_tail(&mac_cfg->node, list);
7010 }
7011 
7012 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7013 			      bool is_write_tbl,
7014 			      enum HCLGE_MAC_ADDR_TYPE mac_type)
7015 {
7016 	struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7017 	struct list_head *list;
7018 	bool uc_flag, mc_flag;
7019 
7020 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7021 	       &vport->uc_mac_list : &vport->mc_mac_list;
7022 
7023 	uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
7024 	mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
7025 
7026 	list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7027 		if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
7028 			if (uc_flag && mac_cfg->hd_tbl_status)
7029 				hclge_rm_uc_addr_common(vport, mac_addr);
7030 
7031 			if (mc_flag && mac_cfg->hd_tbl_status)
7032 				hclge_rm_mc_addr_common(vport, mac_addr);
7033 
7034 			list_del(&mac_cfg->node);
7035 			kfree(mac_cfg);
7036 			break;
7037 		}
7038 	}
7039 }
7040 
7041 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
7042 				  enum HCLGE_MAC_ADDR_TYPE mac_type)
7043 {
7044 	struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7045 	struct list_head *list;
7046 
7047 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7048 	       &vport->uc_mac_list : &vport->mc_mac_list;
7049 
7050 	list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7051 		if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
7052 			hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
7053 
7054 		if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
7055 			hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
7056 
7057 		mac_cfg->hd_tbl_status = false;
7058 		if (is_del_list) {
7059 			list_del(&mac_cfg->node);
7060 			kfree(mac_cfg);
7061 		}
7062 	}
7063 }
7064 
7065 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
7066 {
7067 	struct hclge_vport_mac_addr_cfg *mac, *tmp;
7068 	struct hclge_vport *vport;
7069 	int i;
7070 
7071 	mutex_lock(&hdev->vport_cfg_mutex);
7072 	for (i = 0; i < hdev->num_alloc_vport; i++) {
7073 		vport = &hdev->vport[i];
7074 		list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
7075 			list_del(&mac->node);
7076 			kfree(mac);
7077 		}
7078 
7079 		list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
7080 			list_del(&mac->node);
7081 			kfree(mac);
7082 		}
7083 	}
7084 	mutex_unlock(&hdev->vport_cfg_mutex);
7085 }
7086 
7087 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
7088 					      u16 cmdq_resp, u8 resp_code)
7089 {
7090 #define HCLGE_ETHERTYPE_SUCCESS_ADD		0
7091 #define HCLGE_ETHERTYPE_ALREADY_ADD		1
7092 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW	2
7093 #define HCLGE_ETHERTYPE_KEY_CONFLICT		3
7094 
7095 	int return_status;
7096 
7097 	if (cmdq_resp) {
7098 		dev_err(&hdev->pdev->dev,
7099 			"cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
7100 			cmdq_resp);
7101 		return -EIO;
7102 	}
7103 
7104 	switch (resp_code) {
7105 	case HCLGE_ETHERTYPE_SUCCESS_ADD:
7106 	case HCLGE_ETHERTYPE_ALREADY_ADD:
7107 		return_status = 0;
7108 		break;
7109 	case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
7110 		dev_err(&hdev->pdev->dev,
7111 			"add mac ethertype failed for manager table overflow.\n");
7112 		return_status = -EIO;
7113 		break;
7114 	case HCLGE_ETHERTYPE_KEY_CONFLICT:
7115 		dev_err(&hdev->pdev->dev,
7116 			"add mac ethertype failed for key conflict.\n");
7117 		return_status = -EIO;
7118 		break;
7119 	default:
7120 		dev_err(&hdev->pdev->dev,
7121 			"add mac ethertype failed for undefined, code=%d.\n",
7122 			resp_code);
7123 		return_status = -EIO;
7124 	}
7125 
7126 	return return_status;
7127 }
7128 
7129 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
7130 			     const struct hclge_mac_mgr_tbl_entry_cmd *req)
7131 {
7132 	struct hclge_desc desc;
7133 	u8 resp_code;
7134 	u16 retval;
7135 	int ret;
7136 
7137 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
7138 	memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
7139 
7140 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7141 	if (ret) {
7142 		dev_err(&hdev->pdev->dev,
7143 			"add mac ethertype failed for cmd_send, ret =%d.\n",
7144 			ret);
7145 		return ret;
7146 	}
7147 
7148 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7149 	retval = le16_to_cpu(desc.retval);
7150 
7151 	return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
7152 }
7153 
7154 static int init_mgr_tbl(struct hclge_dev *hdev)
7155 {
7156 	int ret;
7157 	int i;
7158 
7159 	for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
7160 		ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
7161 		if (ret) {
7162 			dev_err(&hdev->pdev->dev,
7163 				"add mac ethertype failed, ret =%d.\n",
7164 				ret);
7165 			return ret;
7166 		}
7167 	}
7168 
7169 	return 0;
7170 }
7171 
7172 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
7173 {
7174 	struct hclge_vport *vport = hclge_get_vport(handle);
7175 	struct hclge_dev *hdev = vport->back;
7176 
7177 	ether_addr_copy(p, hdev->hw.mac.mac_addr);
7178 }
7179 
7180 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
7181 			      bool is_first)
7182 {
7183 	const unsigned char *new_addr = (const unsigned char *)p;
7184 	struct hclge_vport *vport = hclge_get_vport(handle);
7185 	struct hclge_dev *hdev = vport->back;
7186 	int ret;
7187 
7188 	/* mac addr check */
7189 	if (is_zero_ether_addr(new_addr) ||
7190 	    is_broadcast_ether_addr(new_addr) ||
7191 	    is_multicast_ether_addr(new_addr)) {
7192 		dev_err(&hdev->pdev->dev,
7193 			"Change uc mac err! invalid mac:%p.\n",
7194 			 new_addr);
7195 		return -EINVAL;
7196 	}
7197 
7198 	if ((!is_first || is_kdump_kernel()) &&
7199 	    hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
7200 		dev_warn(&hdev->pdev->dev,
7201 			 "remove old uc mac address fail.\n");
7202 
7203 	ret = hclge_add_uc_addr(handle, new_addr);
7204 	if (ret) {
7205 		dev_err(&hdev->pdev->dev,
7206 			"add uc mac address fail, ret =%d.\n",
7207 			ret);
7208 
7209 		if (!is_first &&
7210 		    hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
7211 			dev_err(&hdev->pdev->dev,
7212 				"restore uc mac address fail.\n");
7213 
7214 		return -EIO;
7215 	}
7216 
7217 	ret = hclge_pause_addr_cfg(hdev, new_addr);
7218 	if (ret) {
7219 		dev_err(&hdev->pdev->dev,
7220 			"configure mac pause address fail, ret =%d.\n",
7221 			ret);
7222 		return -EIO;
7223 	}
7224 
7225 	ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
7226 
7227 	return 0;
7228 }
7229 
7230 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
7231 			  int cmd)
7232 {
7233 	struct hclge_vport *vport = hclge_get_vport(handle);
7234 	struct hclge_dev *hdev = vport->back;
7235 
7236 	if (!hdev->hw.mac.phydev)
7237 		return -EOPNOTSUPP;
7238 
7239 	return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
7240 }
7241 
7242 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
7243 				      u8 fe_type, bool filter_en, u8 vf_id)
7244 {
7245 	struct hclge_vlan_filter_ctrl_cmd *req;
7246 	struct hclge_desc desc;
7247 	int ret;
7248 
7249 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
7250 
7251 	req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
7252 	req->vlan_type = vlan_type;
7253 	req->vlan_fe = filter_en ? fe_type : 0;
7254 	req->vf_id = vf_id;
7255 
7256 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7257 	if (ret)
7258 		dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
7259 			ret);
7260 
7261 	return ret;
7262 }
7263 
7264 #define HCLGE_FILTER_TYPE_VF		0
7265 #define HCLGE_FILTER_TYPE_PORT		1
7266 #define HCLGE_FILTER_FE_EGRESS_V1_B	BIT(0)
7267 #define HCLGE_FILTER_FE_NIC_INGRESS_B	BIT(0)
7268 #define HCLGE_FILTER_FE_NIC_EGRESS_B	BIT(1)
7269 #define HCLGE_FILTER_FE_ROCE_INGRESS_B	BIT(2)
7270 #define HCLGE_FILTER_FE_ROCE_EGRESS_B	BIT(3)
7271 #define HCLGE_FILTER_FE_EGRESS		(HCLGE_FILTER_FE_NIC_EGRESS_B \
7272 					| HCLGE_FILTER_FE_ROCE_EGRESS_B)
7273 #define HCLGE_FILTER_FE_INGRESS		(HCLGE_FILTER_FE_NIC_INGRESS_B \
7274 					| HCLGE_FILTER_FE_ROCE_INGRESS_B)
7275 
7276 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
7277 {
7278 	struct hclge_vport *vport = hclge_get_vport(handle);
7279 	struct hclge_dev *hdev = vport->back;
7280 
7281 	if (hdev->pdev->revision >= 0x21) {
7282 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7283 					   HCLGE_FILTER_FE_EGRESS, enable, 0);
7284 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7285 					   HCLGE_FILTER_FE_INGRESS, enable, 0);
7286 	} else {
7287 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7288 					   HCLGE_FILTER_FE_EGRESS_V1_B, enable,
7289 					   0);
7290 	}
7291 	if (enable)
7292 		handle->netdev_flags |= HNAE3_VLAN_FLTR;
7293 	else
7294 		handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7295 }
7296 
7297 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
7298 				    bool is_kill, u16 vlan, u8 qos,
7299 				    __be16 proto)
7300 {
7301 #define HCLGE_MAX_VF_BYTES  16
7302 	struct hclge_vlan_filter_vf_cfg_cmd *req0;
7303 	struct hclge_vlan_filter_vf_cfg_cmd *req1;
7304 	struct hclge_desc desc[2];
7305 	u8 vf_byte_val;
7306 	u8 vf_byte_off;
7307 	int ret;
7308 
7309 	/* if vf vlan table is full, firmware will close vf vlan filter, it
7310 	 * is unable and unnecessary to add new vlan id to vf vlan filter
7311 	 */
7312 	if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill)
7313 		return 0;
7314 
7315 	hclge_cmd_setup_basic_desc(&desc[0],
7316 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7317 	hclge_cmd_setup_basic_desc(&desc[1],
7318 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7319 
7320 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7321 
7322 	vf_byte_off = vfid / 8;
7323 	vf_byte_val = 1 << (vfid % 8);
7324 
7325 	req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7326 	req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7327 
7328 	req0->vlan_id  = cpu_to_le16(vlan);
7329 	req0->vlan_cfg = is_kill;
7330 
7331 	if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7332 		req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7333 	else
7334 		req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7335 
7336 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
7337 	if (ret) {
7338 		dev_err(&hdev->pdev->dev,
7339 			"Send vf vlan command fail, ret =%d.\n",
7340 			ret);
7341 		return ret;
7342 	}
7343 
7344 	if (!is_kill) {
7345 #define HCLGE_VF_VLAN_NO_ENTRY	2
7346 		if (!req0->resp_code || req0->resp_code == 1)
7347 			return 0;
7348 
7349 		if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7350 			set_bit(vfid, hdev->vf_vlan_full);
7351 			dev_warn(&hdev->pdev->dev,
7352 				 "vf vlan table is full, vf vlan filter is disabled\n");
7353 			return 0;
7354 		}
7355 
7356 		dev_err(&hdev->pdev->dev,
7357 			"Add vf vlan filter fail, ret =%d.\n",
7358 			req0->resp_code);
7359 	} else {
7360 #define HCLGE_VF_VLAN_DEL_NO_FOUND	1
7361 		if (!req0->resp_code)
7362 			return 0;
7363 
7364 		/* vf vlan filter is disabled when vf vlan table is full,
7365 		 * then new vlan id will not be added into vf vlan table.
7366 		 * Just return 0 without warning, avoid massive verbose
7367 		 * print logs when unload.
7368 		 */
7369 		if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
7370 			return 0;
7371 
7372 		dev_err(&hdev->pdev->dev,
7373 			"Kill vf vlan filter fail, ret =%d.\n",
7374 			req0->resp_code);
7375 	}
7376 
7377 	return -EIO;
7378 }
7379 
7380 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7381 				      u16 vlan_id, bool is_kill)
7382 {
7383 	struct hclge_vlan_filter_pf_cfg_cmd *req;
7384 	struct hclge_desc desc;
7385 	u8 vlan_offset_byte_val;
7386 	u8 vlan_offset_byte;
7387 	u8 vlan_offset_160;
7388 	int ret;
7389 
7390 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7391 
7392 	vlan_offset_160 = vlan_id / 160;
7393 	vlan_offset_byte = (vlan_id % 160) / 8;
7394 	vlan_offset_byte_val = 1 << (vlan_id % 8);
7395 
7396 	req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7397 	req->vlan_offset = vlan_offset_160;
7398 	req->vlan_cfg = is_kill;
7399 	req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7400 
7401 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7402 	if (ret)
7403 		dev_err(&hdev->pdev->dev,
7404 			"port vlan command, send fail, ret =%d.\n", ret);
7405 	return ret;
7406 }
7407 
7408 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7409 				    u16 vport_id, u16 vlan_id, u8 qos,
7410 				    bool is_kill)
7411 {
7412 	u16 vport_idx, vport_num = 0;
7413 	int ret;
7414 
7415 	if (is_kill && !vlan_id)
7416 		return 0;
7417 
7418 	ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7419 				       0, proto);
7420 	if (ret) {
7421 		dev_err(&hdev->pdev->dev,
7422 			"Set %d vport vlan filter config fail, ret =%d.\n",
7423 			vport_id, ret);
7424 		return ret;
7425 	}
7426 
7427 	/* vlan 0 may be added twice when 8021q module is enabled */
7428 	if (!is_kill && !vlan_id &&
7429 	    test_bit(vport_id, hdev->vlan_table[vlan_id]))
7430 		return 0;
7431 
7432 	if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7433 		dev_err(&hdev->pdev->dev,
7434 			"Add port vlan failed, vport %d is already in vlan %d\n",
7435 			vport_id, vlan_id);
7436 		return -EINVAL;
7437 	}
7438 
7439 	if (is_kill &&
7440 	    !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7441 		dev_err(&hdev->pdev->dev,
7442 			"Delete port vlan failed, vport %d is not in vlan %d\n",
7443 			vport_id, vlan_id);
7444 		return -EINVAL;
7445 	}
7446 
7447 	for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7448 		vport_num++;
7449 
7450 	if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7451 		ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
7452 						 is_kill);
7453 
7454 	return ret;
7455 }
7456 
7457 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
7458 {
7459 	struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
7460 	struct hclge_vport_vtag_tx_cfg_cmd *req;
7461 	struct hclge_dev *hdev = vport->back;
7462 	struct hclge_desc desc;
7463 	int status;
7464 
7465 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
7466 
7467 	req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
7468 	req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
7469 	req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
7470 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
7471 		      vcfg->accept_tag1 ? 1 : 0);
7472 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
7473 		      vcfg->accept_untag1 ? 1 : 0);
7474 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
7475 		      vcfg->accept_tag2 ? 1 : 0);
7476 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
7477 		      vcfg->accept_untag2 ? 1 : 0);
7478 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
7479 		      vcfg->insert_tag1_en ? 1 : 0);
7480 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
7481 		      vcfg->insert_tag2_en ? 1 : 0);
7482 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
7483 
7484 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7485 	req->vf_bitmap[req->vf_offset] =
7486 		1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7487 
7488 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
7489 	if (status)
7490 		dev_err(&hdev->pdev->dev,
7491 			"Send port txvlan cfg command fail, ret =%d\n",
7492 			status);
7493 
7494 	return status;
7495 }
7496 
7497 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
7498 {
7499 	struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
7500 	struct hclge_vport_vtag_rx_cfg_cmd *req;
7501 	struct hclge_dev *hdev = vport->back;
7502 	struct hclge_desc desc;
7503 	int status;
7504 
7505 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
7506 
7507 	req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
7508 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
7509 		      vcfg->strip_tag1_en ? 1 : 0);
7510 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
7511 		      vcfg->strip_tag2_en ? 1 : 0);
7512 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
7513 		      vcfg->vlan1_vlan_prionly ? 1 : 0);
7514 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
7515 		      vcfg->vlan2_vlan_prionly ? 1 : 0);
7516 
7517 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7518 	req->vf_bitmap[req->vf_offset] =
7519 		1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7520 
7521 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
7522 	if (status)
7523 		dev_err(&hdev->pdev->dev,
7524 			"Send port rxvlan cfg command fail, ret =%d\n",
7525 			status);
7526 
7527 	return status;
7528 }
7529 
7530 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
7531 				  u16 port_base_vlan_state,
7532 				  u16 vlan_tag)
7533 {
7534 	int ret;
7535 
7536 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7537 		vport->txvlan_cfg.accept_tag1 = true;
7538 		vport->txvlan_cfg.insert_tag1_en = false;
7539 		vport->txvlan_cfg.default_tag1 = 0;
7540 	} else {
7541 		vport->txvlan_cfg.accept_tag1 = false;
7542 		vport->txvlan_cfg.insert_tag1_en = true;
7543 		vport->txvlan_cfg.default_tag1 = vlan_tag;
7544 	}
7545 
7546 	vport->txvlan_cfg.accept_untag1 = true;
7547 
7548 	/* accept_tag2 and accept_untag2 are not supported on
7549 	 * pdev revision(0x20), new revision support them,
7550 	 * this two fields can not be configured by user.
7551 	 */
7552 	vport->txvlan_cfg.accept_tag2 = true;
7553 	vport->txvlan_cfg.accept_untag2 = true;
7554 	vport->txvlan_cfg.insert_tag2_en = false;
7555 	vport->txvlan_cfg.default_tag2 = 0;
7556 
7557 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7558 		vport->rxvlan_cfg.strip_tag1_en = false;
7559 		vport->rxvlan_cfg.strip_tag2_en =
7560 				vport->rxvlan_cfg.rx_vlan_offload_en;
7561 	} else {
7562 		vport->rxvlan_cfg.strip_tag1_en =
7563 				vport->rxvlan_cfg.rx_vlan_offload_en;
7564 		vport->rxvlan_cfg.strip_tag2_en = true;
7565 	}
7566 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7567 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7568 
7569 	ret = hclge_set_vlan_tx_offload_cfg(vport);
7570 	if (ret)
7571 		return ret;
7572 
7573 	return hclge_set_vlan_rx_offload_cfg(vport);
7574 }
7575 
7576 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
7577 {
7578 	struct hclge_rx_vlan_type_cfg_cmd *rx_req;
7579 	struct hclge_tx_vlan_type_cfg_cmd *tx_req;
7580 	struct hclge_desc desc;
7581 	int status;
7582 
7583 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
7584 	rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
7585 	rx_req->ot_fst_vlan_type =
7586 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
7587 	rx_req->ot_sec_vlan_type =
7588 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
7589 	rx_req->in_fst_vlan_type =
7590 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
7591 	rx_req->in_sec_vlan_type =
7592 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
7593 
7594 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
7595 	if (status) {
7596 		dev_err(&hdev->pdev->dev,
7597 			"Send rxvlan protocol type command fail, ret =%d\n",
7598 			status);
7599 		return status;
7600 	}
7601 
7602 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
7603 
7604 	tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
7605 	tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
7606 	tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
7607 
7608 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
7609 	if (status)
7610 		dev_err(&hdev->pdev->dev,
7611 			"Send txvlan protocol type command fail, ret =%d\n",
7612 			status);
7613 
7614 	return status;
7615 }
7616 
7617 static int hclge_init_vlan_config(struct hclge_dev *hdev)
7618 {
7619 #define HCLGE_DEF_VLAN_TYPE		0x8100
7620 
7621 	struct hnae3_handle *handle = &hdev->vport[0].nic;
7622 	struct hclge_vport *vport;
7623 	int ret;
7624 	int i;
7625 
7626 	if (hdev->pdev->revision >= 0x21) {
7627 		/* for revision 0x21, vf vlan filter is per function */
7628 		for (i = 0; i < hdev->num_alloc_vport; i++) {
7629 			vport = &hdev->vport[i];
7630 			ret = hclge_set_vlan_filter_ctrl(hdev,
7631 							 HCLGE_FILTER_TYPE_VF,
7632 							 HCLGE_FILTER_FE_EGRESS,
7633 							 true,
7634 							 vport->vport_id);
7635 			if (ret)
7636 				return ret;
7637 		}
7638 
7639 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7640 						 HCLGE_FILTER_FE_INGRESS, true,
7641 						 0);
7642 		if (ret)
7643 			return ret;
7644 	} else {
7645 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7646 						 HCLGE_FILTER_FE_EGRESS_V1_B,
7647 						 true, 0);
7648 		if (ret)
7649 			return ret;
7650 	}
7651 
7652 	handle->netdev_flags |= HNAE3_VLAN_FLTR;
7653 
7654 	hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7655 	hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7656 	hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7657 	hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7658 	hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
7659 	hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
7660 
7661 	ret = hclge_set_vlan_protocol_type(hdev);
7662 	if (ret)
7663 		return ret;
7664 
7665 	for (i = 0; i < hdev->num_alloc_vport; i++) {
7666 		u16 vlan_tag;
7667 
7668 		vport = &hdev->vport[i];
7669 		vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7670 
7671 		ret = hclge_vlan_offload_cfg(vport,
7672 					     vport->port_base_vlan_cfg.state,
7673 					     vlan_tag);
7674 		if (ret)
7675 			return ret;
7676 	}
7677 
7678 	return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
7679 }
7680 
7681 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7682 				       bool writen_to_tbl)
7683 {
7684 	struct hclge_vport_vlan_cfg *vlan;
7685 
7686 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
7687 	if (!vlan)
7688 		return;
7689 
7690 	vlan->hd_tbl_status = writen_to_tbl;
7691 	vlan->vlan_id = vlan_id;
7692 
7693 	list_add_tail(&vlan->node, &vport->vlan_list);
7694 }
7695 
7696 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
7697 {
7698 	struct hclge_vport_vlan_cfg *vlan, *tmp;
7699 	struct hclge_dev *hdev = vport->back;
7700 	int ret;
7701 
7702 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7703 		if (!vlan->hd_tbl_status) {
7704 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
7705 						       vport->vport_id,
7706 						       vlan->vlan_id, 0, false);
7707 			if (ret) {
7708 				dev_err(&hdev->pdev->dev,
7709 					"restore vport vlan list failed, ret=%d\n",
7710 					ret);
7711 				return ret;
7712 			}
7713 		}
7714 		vlan->hd_tbl_status = true;
7715 	}
7716 
7717 	return 0;
7718 }
7719 
7720 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7721 				      bool is_write_tbl)
7722 {
7723 	struct hclge_vport_vlan_cfg *vlan, *tmp;
7724 	struct hclge_dev *hdev = vport->back;
7725 
7726 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7727 		if (vlan->vlan_id == vlan_id) {
7728 			if (is_write_tbl && vlan->hd_tbl_status)
7729 				hclge_set_vlan_filter_hw(hdev,
7730 							 htons(ETH_P_8021Q),
7731 							 vport->vport_id,
7732 							 vlan_id, 0,
7733 							 true);
7734 
7735 			list_del(&vlan->node);
7736 			kfree(vlan);
7737 			break;
7738 		}
7739 	}
7740 }
7741 
7742 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
7743 {
7744 	struct hclge_vport_vlan_cfg *vlan, *tmp;
7745 	struct hclge_dev *hdev = vport->back;
7746 
7747 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7748 		if (vlan->hd_tbl_status)
7749 			hclge_set_vlan_filter_hw(hdev,
7750 						 htons(ETH_P_8021Q),
7751 						 vport->vport_id,
7752 						 vlan->vlan_id, 0,
7753 						 true);
7754 
7755 		vlan->hd_tbl_status = false;
7756 		if (is_del_list) {
7757 			list_del(&vlan->node);
7758 			kfree(vlan);
7759 		}
7760 	}
7761 }
7762 
7763 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
7764 {
7765 	struct hclge_vport_vlan_cfg *vlan, *tmp;
7766 	struct hclge_vport *vport;
7767 	int i;
7768 
7769 	mutex_lock(&hdev->vport_cfg_mutex);
7770 	for (i = 0; i < hdev->num_alloc_vport; i++) {
7771 		vport = &hdev->vport[i];
7772 		list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7773 			list_del(&vlan->node);
7774 			kfree(vlan);
7775 		}
7776 	}
7777 	mutex_unlock(&hdev->vport_cfg_mutex);
7778 }
7779 
7780 static void hclge_restore_vlan_table(struct hnae3_handle *handle)
7781 {
7782 	struct hclge_vport *vport = hclge_get_vport(handle);
7783 	struct hclge_vport_vlan_cfg *vlan, *tmp;
7784 	struct hclge_dev *hdev = vport->back;
7785 	u16 vlan_proto, qos;
7786 	u16 state, vlan_id;
7787 	int i;
7788 
7789 	mutex_lock(&hdev->vport_cfg_mutex);
7790 	for (i = 0; i < hdev->num_alloc_vport; i++) {
7791 		vport = &hdev->vport[i];
7792 		vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
7793 		vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7794 		qos = vport->port_base_vlan_cfg.vlan_info.qos;
7795 		state = vport->port_base_vlan_cfg.state;
7796 
7797 		if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
7798 			hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
7799 						 vport->vport_id, vlan_id, qos,
7800 						 false);
7801 			continue;
7802 		}
7803 
7804 		list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7805 			if (vlan->hd_tbl_status)
7806 				hclge_set_vlan_filter_hw(hdev,
7807 							 htons(ETH_P_8021Q),
7808 							 vport->vport_id,
7809 							 vlan->vlan_id, 0,
7810 							 false);
7811 		}
7812 	}
7813 
7814 	mutex_unlock(&hdev->vport_cfg_mutex);
7815 }
7816 
7817 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
7818 {
7819 	struct hclge_vport *vport = hclge_get_vport(handle);
7820 
7821 	if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7822 		vport->rxvlan_cfg.strip_tag1_en = false;
7823 		vport->rxvlan_cfg.strip_tag2_en = enable;
7824 	} else {
7825 		vport->rxvlan_cfg.strip_tag1_en = enable;
7826 		vport->rxvlan_cfg.strip_tag2_en = true;
7827 	}
7828 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7829 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7830 	vport->rxvlan_cfg.rx_vlan_offload_en = enable;
7831 
7832 	return hclge_set_vlan_rx_offload_cfg(vport);
7833 }
7834 
7835 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
7836 					    u16 port_base_vlan_state,
7837 					    struct hclge_vlan_info *new_info,
7838 					    struct hclge_vlan_info *old_info)
7839 {
7840 	struct hclge_dev *hdev = vport->back;
7841 	int ret;
7842 
7843 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
7844 		hclge_rm_vport_all_vlan_table(vport, false);
7845 		return hclge_set_vlan_filter_hw(hdev,
7846 						 htons(new_info->vlan_proto),
7847 						 vport->vport_id,
7848 						 new_info->vlan_tag,
7849 						 new_info->qos, false);
7850 	}
7851 
7852 	ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
7853 				       vport->vport_id, old_info->vlan_tag,
7854 				       old_info->qos, true);
7855 	if (ret)
7856 		return ret;
7857 
7858 	return hclge_add_vport_all_vlan_table(vport);
7859 }
7860 
7861 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
7862 				    struct hclge_vlan_info *vlan_info)
7863 {
7864 	struct hnae3_handle *nic = &vport->nic;
7865 	struct hclge_vlan_info *old_vlan_info;
7866 	struct hclge_dev *hdev = vport->back;
7867 	int ret;
7868 
7869 	old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
7870 
7871 	ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
7872 	if (ret)
7873 		return ret;
7874 
7875 	if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
7876 		/* add new VLAN tag */
7877 		ret = hclge_set_vlan_filter_hw(hdev,
7878 					       htons(vlan_info->vlan_proto),
7879 					       vport->vport_id,
7880 					       vlan_info->vlan_tag,
7881 					       vlan_info->qos, false);
7882 		if (ret)
7883 			return ret;
7884 
7885 		/* remove old VLAN tag */
7886 		ret = hclge_set_vlan_filter_hw(hdev,
7887 					       htons(old_vlan_info->vlan_proto),
7888 					       vport->vport_id,
7889 					       old_vlan_info->vlan_tag,
7890 					       old_vlan_info->qos, true);
7891 		if (ret)
7892 			return ret;
7893 
7894 		goto update;
7895 	}
7896 
7897 	ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
7898 					       old_vlan_info);
7899 	if (ret)
7900 		return ret;
7901 
7902 	/* update state only when disable/enable port based VLAN */
7903 	vport->port_base_vlan_cfg.state = state;
7904 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
7905 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
7906 	else
7907 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
7908 
7909 update:
7910 	vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
7911 	vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
7912 	vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
7913 
7914 	return 0;
7915 }
7916 
7917 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
7918 					  enum hnae3_port_base_vlan_state state,
7919 					  u16 vlan)
7920 {
7921 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7922 		if (!vlan)
7923 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7924 		else
7925 			return HNAE3_PORT_BASE_VLAN_ENABLE;
7926 	} else {
7927 		if (!vlan)
7928 			return HNAE3_PORT_BASE_VLAN_DISABLE;
7929 		else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
7930 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7931 		else
7932 			return HNAE3_PORT_BASE_VLAN_MODIFY;
7933 	}
7934 }
7935 
7936 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
7937 				    u16 vlan, u8 qos, __be16 proto)
7938 {
7939 	struct hclge_vport *vport = hclge_get_vport(handle);
7940 	struct hclge_dev *hdev = vport->back;
7941 	struct hclge_vlan_info vlan_info;
7942 	u16 state;
7943 	int ret;
7944 
7945 	if (hdev->pdev->revision == 0x20)
7946 		return -EOPNOTSUPP;
7947 
7948 	/* qos is a 3 bits value, so can not be bigger than 7 */
7949 	if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
7950 		return -EINVAL;
7951 	if (proto != htons(ETH_P_8021Q))
7952 		return -EPROTONOSUPPORT;
7953 
7954 	vport = &hdev->vport[vfid];
7955 	state = hclge_get_port_base_vlan_state(vport,
7956 					       vport->port_base_vlan_cfg.state,
7957 					       vlan);
7958 	if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
7959 		return 0;
7960 
7961 	vlan_info.vlan_tag = vlan;
7962 	vlan_info.qos = qos;
7963 	vlan_info.vlan_proto = ntohs(proto);
7964 
7965 	/* update port based VLAN for PF */
7966 	if (!vfid) {
7967 		hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7968 		ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
7969 		hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7970 
7971 		return ret;
7972 	}
7973 
7974 	if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
7975 		return hclge_update_port_base_vlan_cfg(vport, state,
7976 						       &vlan_info);
7977 	} else {
7978 		ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
7979 							(u8)vfid, state,
7980 							vlan, qos,
7981 							ntohs(proto));
7982 		return ret;
7983 	}
7984 }
7985 
7986 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
7987 			  u16 vlan_id, bool is_kill)
7988 {
7989 	struct hclge_vport *vport = hclge_get_vport(handle);
7990 	struct hclge_dev *hdev = vport->back;
7991 	bool writen_to_tbl = false;
7992 	int ret = 0;
7993 
7994 	/* When device is resetting, firmware is unable to handle
7995 	 * mailbox. Just record the vlan id, and remove it after
7996 	 * reset finished.
7997 	 */
7998 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
7999 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
8000 		return -EBUSY;
8001 	}
8002 
8003 	/* When port base vlan enabled, we use port base vlan as the vlan
8004 	 * filter entry. In this case, we don't update vlan filter table
8005 	 * when user add new vlan or remove exist vlan, just update the vport
8006 	 * vlan list. The vlan id in vlan list will be writen in vlan filter
8007 	 * table until port base vlan disabled
8008 	 */
8009 	if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8010 		ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
8011 					       vlan_id, 0, is_kill);
8012 		writen_to_tbl = true;
8013 	}
8014 
8015 	if (!ret) {
8016 		if (is_kill)
8017 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
8018 		else
8019 			hclge_add_vport_vlan_table(vport, vlan_id,
8020 						   writen_to_tbl);
8021 	} else if (is_kill) {
8022 		/* When remove hw vlan filter failed, record the vlan id,
8023 		 * and try to remove it from hw later, to be consistence
8024 		 * with stack
8025 		 */
8026 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
8027 	}
8028 	return ret;
8029 }
8030 
8031 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
8032 {
8033 #define HCLGE_MAX_SYNC_COUNT	60
8034 
8035 	int i, ret, sync_cnt = 0;
8036 	u16 vlan_id;
8037 
8038 	/* start from vport 1 for PF is always alive */
8039 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8040 		struct hclge_vport *vport = &hdev->vport[i];
8041 
8042 		vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8043 					 VLAN_N_VID);
8044 		while (vlan_id != VLAN_N_VID) {
8045 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8046 						       vport->vport_id, vlan_id,
8047 						       0, true);
8048 			if (ret && ret != -EINVAL)
8049 				return;
8050 
8051 			clear_bit(vlan_id, vport->vlan_del_fail_bmap);
8052 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
8053 
8054 			sync_cnt++;
8055 			if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
8056 				return;
8057 
8058 			vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8059 						 VLAN_N_VID);
8060 		}
8061 	}
8062 }
8063 
8064 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
8065 {
8066 	struct hclge_config_max_frm_size_cmd *req;
8067 	struct hclge_desc desc;
8068 
8069 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
8070 
8071 	req = (struct hclge_config_max_frm_size_cmd *)desc.data;
8072 	req->max_frm_size = cpu_to_le16(new_mps);
8073 	req->min_frm_size = HCLGE_MAC_MIN_FRAME;
8074 
8075 	return hclge_cmd_send(&hdev->hw, &desc, 1);
8076 }
8077 
8078 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
8079 {
8080 	struct hclge_vport *vport = hclge_get_vport(handle);
8081 
8082 	return hclge_set_vport_mtu(vport, new_mtu);
8083 }
8084 
8085 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
8086 {
8087 	struct hclge_dev *hdev = vport->back;
8088 	int i, max_frm_size, ret;
8089 
8090 	max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8091 	if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
8092 	    max_frm_size > HCLGE_MAC_MAX_FRAME)
8093 		return -EINVAL;
8094 
8095 	max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
8096 	mutex_lock(&hdev->vport_lock);
8097 	/* VF's mps must fit within hdev->mps */
8098 	if (vport->vport_id && max_frm_size > hdev->mps) {
8099 		mutex_unlock(&hdev->vport_lock);
8100 		return -EINVAL;
8101 	} else if (vport->vport_id) {
8102 		vport->mps = max_frm_size;
8103 		mutex_unlock(&hdev->vport_lock);
8104 		return 0;
8105 	}
8106 
8107 	/* PF's mps must be greater then VF's mps */
8108 	for (i = 1; i < hdev->num_alloc_vport; i++)
8109 		if (max_frm_size < hdev->vport[i].mps) {
8110 			mutex_unlock(&hdev->vport_lock);
8111 			return -EINVAL;
8112 		}
8113 
8114 	hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
8115 
8116 	ret = hclge_set_mac_mtu(hdev, max_frm_size);
8117 	if (ret) {
8118 		dev_err(&hdev->pdev->dev,
8119 			"Change mtu fail, ret =%d\n", ret);
8120 		goto out;
8121 	}
8122 
8123 	hdev->mps = max_frm_size;
8124 	vport->mps = max_frm_size;
8125 
8126 	ret = hclge_buffer_alloc(hdev);
8127 	if (ret)
8128 		dev_err(&hdev->pdev->dev,
8129 			"Allocate buffer fail, ret =%d\n", ret);
8130 
8131 out:
8132 	hclge_notify_client(hdev, HNAE3_UP_CLIENT);
8133 	mutex_unlock(&hdev->vport_lock);
8134 	return ret;
8135 }
8136 
8137 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
8138 				    bool enable)
8139 {
8140 	struct hclge_reset_tqp_queue_cmd *req;
8141 	struct hclge_desc desc;
8142 	int ret;
8143 
8144 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
8145 
8146 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8147 	req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8148 	if (enable)
8149 		hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
8150 
8151 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8152 	if (ret) {
8153 		dev_err(&hdev->pdev->dev,
8154 			"Send tqp reset cmd error, status =%d\n", ret);
8155 		return ret;
8156 	}
8157 
8158 	return 0;
8159 }
8160 
8161 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
8162 {
8163 	struct hclge_reset_tqp_queue_cmd *req;
8164 	struct hclge_desc desc;
8165 	int ret;
8166 
8167 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
8168 
8169 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8170 	req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8171 
8172 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8173 	if (ret) {
8174 		dev_err(&hdev->pdev->dev,
8175 			"Get reset status error, status =%d\n", ret);
8176 		return ret;
8177 	}
8178 
8179 	return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
8180 }
8181 
8182 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
8183 {
8184 	struct hnae3_queue *queue;
8185 	struct hclge_tqp *tqp;
8186 
8187 	queue = handle->kinfo.tqp[queue_id];
8188 	tqp = container_of(queue, struct hclge_tqp, q);
8189 
8190 	return tqp->index;
8191 }
8192 
8193 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
8194 {
8195 	struct hclge_vport *vport = hclge_get_vport(handle);
8196 	struct hclge_dev *hdev = vport->back;
8197 	int reset_try_times = 0;
8198 	int reset_status;
8199 	u16 queue_gid;
8200 	int ret;
8201 
8202 	queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
8203 
8204 	ret = hclge_tqp_enable(hdev, queue_id, 0, false);
8205 	if (ret) {
8206 		dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
8207 		return ret;
8208 	}
8209 
8210 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8211 	if (ret) {
8212 		dev_err(&hdev->pdev->dev,
8213 			"Send reset tqp cmd fail, ret = %d\n", ret);
8214 		return ret;
8215 	}
8216 
8217 	while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8218 		/* Wait for tqp hw reset */
8219 		msleep(20);
8220 		reset_status = hclge_get_reset_status(hdev, queue_gid);
8221 		if (reset_status)
8222 			break;
8223 	}
8224 
8225 	if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8226 		dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
8227 		return ret;
8228 	}
8229 
8230 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8231 	if (ret)
8232 		dev_err(&hdev->pdev->dev,
8233 			"Deassert the soft reset fail, ret = %d\n", ret);
8234 
8235 	return ret;
8236 }
8237 
8238 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
8239 {
8240 	struct hclge_dev *hdev = vport->back;
8241 	int reset_try_times = 0;
8242 	int reset_status;
8243 	u16 queue_gid;
8244 	int ret;
8245 
8246 	queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
8247 
8248 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8249 	if (ret) {
8250 		dev_warn(&hdev->pdev->dev,
8251 			 "Send reset tqp cmd fail, ret = %d\n", ret);
8252 		return;
8253 	}
8254 
8255 	while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8256 		/* Wait for tqp hw reset */
8257 		msleep(20);
8258 		reset_status = hclge_get_reset_status(hdev, queue_gid);
8259 		if (reset_status)
8260 			break;
8261 	}
8262 
8263 	if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8264 		dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
8265 		return;
8266 	}
8267 
8268 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8269 	if (ret)
8270 		dev_warn(&hdev->pdev->dev,
8271 			 "Deassert the soft reset fail, ret = %d\n", ret);
8272 }
8273 
8274 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
8275 {
8276 	struct hclge_vport *vport = hclge_get_vport(handle);
8277 	struct hclge_dev *hdev = vport->back;
8278 
8279 	return hdev->fw_version;
8280 }
8281 
8282 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8283 {
8284 	struct phy_device *phydev = hdev->hw.mac.phydev;
8285 
8286 	if (!phydev)
8287 		return;
8288 
8289 	phy_set_asym_pause(phydev, rx_en, tx_en);
8290 }
8291 
8292 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8293 {
8294 	int ret;
8295 
8296 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
8297 		return 0;
8298 
8299 	ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
8300 	if (ret)
8301 		dev_err(&hdev->pdev->dev,
8302 			"configure pauseparam error, ret = %d.\n", ret);
8303 
8304 	return ret;
8305 }
8306 
8307 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
8308 {
8309 	struct phy_device *phydev = hdev->hw.mac.phydev;
8310 	u16 remote_advertising = 0;
8311 	u16 local_advertising;
8312 	u32 rx_pause, tx_pause;
8313 	u8 flowctl;
8314 
8315 	if (!phydev->link || !phydev->autoneg)
8316 		return 0;
8317 
8318 	local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
8319 
8320 	if (phydev->pause)
8321 		remote_advertising = LPA_PAUSE_CAP;
8322 
8323 	if (phydev->asym_pause)
8324 		remote_advertising |= LPA_PAUSE_ASYM;
8325 
8326 	flowctl = mii_resolve_flowctrl_fdx(local_advertising,
8327 					   remote_advertising);
8328 	tx_pause = flowctl & FLOW_CTRL_TX;
8329 	rx_pause = flowctl & FLOW_CTRL_RX;
8330 
8331 	if (phydev->duplex == HCLGE_MAC_HALF) {
8332 		tx_pause = 0;
8333 		rx_pause = 0;
8334 	}
8335 
8336 	return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8337 }
8338 
8339 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8340 				 u32 *rx_en, u32 *tx_en)
8341 {
8342 	struct hclge_vport *vport = hclge_get_vport(handle);
8343 	struct hclge_dev *hdev = vport->back;
8344 	struct phy_device *phydev = hdev->hw.mac.phydev;
8345 
8346 	*auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
8347 
8348 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8349 		*rx_en = 0;
8350 		*tx_en = 0;
8351 		return;
8352 	}
8353 
8354 	if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8355 		*rx_en = 1;
8356 		*tx_en = 0;
8357 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8358 		*tx_en = 1;
8359 		*rx_en = 0;
8360 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8361 		*rx_en = 1;
8362 		*tx_en = 1;
8363 	} else {
8364 		*rx_en = 0;
8365 		*tx_en = 0;
8366 	}
8367 }
8368 
8369 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
8370 					 u32 rx_en, u32 tx_en)
8371 {
8372 	if (rx_en && tx_en)
8373 		hdev->fc_mode_last_time = HCLGE_FC_FULL;
8374 	else if (rx_en && !tx_en)
8375 		hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
8376 	else if (!rx_en && tx_en)
8377 		hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
8378 	else
8379 		hdev->fc_mode_last_time = HCLGE_FC_NONE;
8380 
8381 	hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
8382 }
8383 
8384 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8385 				u32 rx_en, u32 tx_en)
8386 {
8387 	struct hclge_vport *vport = hclge_get_vport(handle);
8388 	struct hclge_dev *hdev = vport->back;
8389 	struct phy_device *phydev = hdev->hw.mac.phydev;
8390 	u32 fc_autoneg;
8391 
8392 	if (phydev) {
8393 		fc_autoneg = hclge_get_autoneg(handle);
8394 		if (auto_neg != fc_autoneg) {
8395 			dev_info(&hdev->pdev->dev,
8396 				 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8397 			return -EOPNOTSUPP;
8398 		}
8399 	}
8400 
8401 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8402 		dev_info(&hdev->pdev->dev,
8403 			 "Priority flow control enabled. Cannot set link flow control.\n");
8404 		return -EOPNOTSUPP;
8405 	}
8406 
8407 	hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8408 
8409 	hclge_record_user_pauseparam(hdev, rx_en, tx_en);
8410 
8411 	if (!auto_neg)
8412 		return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8413 
8414 	if (phydev)
8415 		return phy_start_aneg(phydev);
8416 
8417 	return -EOPNOTSUPP;
8418 }
8419 
8420 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8421 					  u8 *auto_neg, u32 *speed, u8 *duplex)
8422 {
8423 	struct hclge_vport *vport = hclge_get_vport(handle);
8424 	struct hclge_dev *hdev = vport->back;
8425 
8426 	if (speed)
8427 		*speed = hdev->hw.mac.speed;
8428 	if (duplex)
8429 		*duplex = hdev->hw.mac.duplex;
8430 	if (auto_neg)
8431 		*auto_neg = hdev->hw.mac.autoneg;
8432 }
8433 
8434 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8435 				 u8 *module_type)
8436 {
8437 	struct hclge_vport *vport = hclge_get_vport(handle);
8438 	struct hclge_dev *hdev = vport->back;
8439 
8440 	if (media_type)
8441 		*media_type = hdev->hw.mac.media_type;
8442 
8443 	if (module_type)
8444 		*module_type = hdev->hw.mac.module_type;
8445 }
8446 
8447 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8448 				u8 *tp_mdix_ctrl, u8 *tp_mdix)
8449 {
8450 	struct hclge_vport *vport = hclge_get_vport(handle);
8451 	struct hclge_dev *hdev = vport->back;
8452 	struct phy_device *phydev = hdev->hw.mac.phydev;
8453 	int mdix_ctrl, mdix, is_resolved;
8454 	unsigned int retval;
8455 
8456 	if (!phydev) {
8457 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8458 		*tp_mdix = ETH_TP_MDI_INVALID;
8459 		return;
8460 	}
8461 
8462 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
8463 
8464 	retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
8465 	mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
8466 				    HCLGE_PHY_MDIX_CTRL_S);
8467 
8468 	retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
8469 	mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
8470 	is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
8471 
8472 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
8473 
8474 	switch (mdix_ctrl) {
8475 	case 0x0:
8476 		*tp_mdix_ctrl = ETH_TP_MDI;
8477 		break;
8478 	case 0x1:
8479 		*tp_mdix_ctrl = ETH_TP_MDI_X;
8480 		break;
8481 	case 0x3:
8482 		*tp_mdix_ctrl = ETH_TP_MDI_AUTO;
8483 		break;
8484 	default:
8485 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8486 		break;
8487 	}
8488 
8489 	if (!is_resolved)
8490 		*tp_mdix = ETH_TP_MDI_INVALID;
8491 	else if (mdix)
8492 		*tp_mdix = ETH_TP_MDI_X;
8493 	else
8494 		*tp_mdix = ETH_TP_MDI;
8495 }
8496 
8497 static void hclge_info_show(struct hclge_dev *hdev)
8498 {
8499 	struct device *dev = &hdev->pdev->dev;
8500 
8501 	dev_info(dev, "PF info begin:\n");
8502 
8503 	dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
8504 	dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
8505 	dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
8506 	dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
8507 	dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport);
8508 	dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs);
8509 	dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
8510 	dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size);
8511 	dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size);
8512 	dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size);
8513 	dev_info(dev, "This is %s PF\n",
8514 		 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
8515 	dev_info(dev, "DCB %s\n",
8516 		 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
8517 	dev_info(dev, "MQPRIO %s\n",
8518 		 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
8519 
8520 	dev_info(dev, "PF info end.\n");
8521 }
8522 
8523 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
8524 					  struct hclge_vport *vport)
8525 {
8526 	struct hnae3_client *client = vport->nic.client;
8527 	struct hclge_dev *hdev = ae_dev->priv;
8528 	int rst_cnt;
8529 	int ret;
8530 
8531 	rst_cnt = hdev->rst_stats.reset_cnt;
8532 	ret = client->ops->init_instance(&vport->nic);
8533 	if (ret)
8534 		return ret;
8535 
8536 	set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8537 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
8538 	    rst_cnt != hdev->rst_stats.reset_cnt) {
8539 		ret = -EBUSY;
8540 		goto init_nic_err;
8541 	}
8542 
8543 	/* Enable nic hw error interrupts */
8544 	ret = hclge_config_nic_hw_error(hdev, true);
8545 	if (ret) {
8546 		dev_err(&ae_dev->pdev->dev,
8547 			"fail(%d) to enable hw error interrupts\n", ret);
8548 		goto init_nic_err;
8549 	}
8550 
8551 	hnae3_set_client_init_flag(client, ae_dev, 1);
8552 
8553 	if (netif_msg_drv(&hdev->vport->nic))
8554 		hclge_info_show(hdev);
8555 
8556 	return ret;
8557 
8558 init_nic_err:
8559 	clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8560 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8561 		msleep(HCLGE_WAIT_RESET_DONE);
8562 
8563 	client->ops->uninit_instance(&vport->nic, 0);
8564 
8565 	return ret;
8566 }
8567 
8568 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
8569 					   struct hclge_vport *vport)
8570 {
8571 	struct hnae3_client *client = vport->roce.client;
8572 	struct hclge_dev *hdev = ae_dev->priv;
8573 	int rst_cnt;
8574 	int ret;
8575 
8576 	if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
8577 	    !hdev->nic_client)
8578 		return 0;
8579 
8580 	client = hdev->roce_client;
8581 	ret = hclge_init_roce_base_info(vport);
8582 	if (ret)
8583 		return ret;
8584 
8585 	rst_cnt = hdev->rst_stats.reset_cnt;
8586 	ret = client->ops->init_instance(&vport->roce);
8587 	if (ret)
8588 		return ret;
8589 
8590 	set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8591 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
8592 	    rst_cnt != hdev->rst_stats.reset_cnt) {
8593 		ret = -EBUSY;
8594 		goto init_roce_err;
8595 	}
8596 
8597 	/* Enable roce ras interrupts */
8598 	ret = hclge_config_rocee_ras_interrupt(hdev, true);
8599 	if (ret) {
8600 		dev_err(&ae_dev->pdev->dev,
8601 			"fail(%d) to enable roce ras interrupts\n", ret);
8602 		goto init_roce_err;
8603 	}
8604 
8605 	hnae3_set_client_init_flag(client, ae_dev, 1);
8606 
8607 	return 0;
8608 
8609 init_roce_err:
8610 	clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8611 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8612 		msleep(HCLGE_WAIT_RESET_DONE);
8613 
8614 	hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
8615 
8616 	return ret;
8617 }
8618 
8619 static int hclge_init_client_instance(struct hnae3_client *client,
8620 				      struct hnae3_ae_dev *ae_dev)
8621 {
8622 	struct hclge_dev *hdev = ae_dev->priv;
8623 	struct hclge_vport *vport;
8624 	int i, ret;
8625 
8626 	for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
8627 		vport = &hdev->vport[i];
8628 
8629 		switch (client->type) {
8630 		case HNAE3_CLIENT_KNIC:
8631 
8632 			hdev->nic_client = client;
8633 			vport->nic.client = client;
8634 			ret = hclge_init_nic_client_instance(ae_dev, vport);
8635 			if (ret)
8636 				goto clear_nic;
8637 
8638 			ret = hclge_init_roce_client_instance(ae_dev, vport);
8639 			if (ret)
8640 				goto clear_roce;
8641 
8642 			break;
8643 		case HNAE3_CLIENT_ROCE:
8644 			if (hnae3_dev_roce_supported(hdev)) {
8645 				hdev->roce_client = client;
8646 				vport->roce.client = client;
8647 			}
8648 
8649 			ret = hclge_init_roce_client_instance(ae_dev, vport);
8650 			if (ret)
8651 				goto clear_roce;
8652 
8653 			break;
8654 		default:
8655 			return -EINVAL;
8656 		}
8657 	}
8658 
8659 	return ret;
8660 
8661 clear_nic:
8662 	hdev->nic_client = NULL;
8663 	vport->nic.client = NULL;
8664 	return ret;
8665 clear_roce:
8666 	hdev->roce_client = NULL;
8667 	vport->roce.client = NULL;
8668 	return ret;
8669 }
8670 
8671 static void hclge_uninit_client_instance(struct hnae3_client *client,
8672 					 struct hnae3_ae_dev *ae_dev)
8673 {
8674 	struct hclge_dev *hdev = ae_dev->priv;
8675 	struct hclge_vport *vport;
8676 	int i;
8677 
8678 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
8679 		vport = &hdev->vport[i];
8680 		if (hdev->roce_client) {
8681 			clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8682 			while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8683 				msleep(HCLGE_WAIT_RESET_DONE);
8684 
8685 			hdev->roce_client->ops->uninit_instance(&vport->roce,
8686 								0);
8687 			hdev->roce_client = NULL;
8688 			vport->roce.client = NULL;
8689 		}
8690 		if (client->type == HNAE3_CLIENT_ROCE)
8691 			return;
8692 		if (hdev->nic_client && client->ops->uninit_instance) {
8693 			clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8694 			while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8695 				msleep(HCLGE_WAIT_RESET_DONE);
8696 
8697 			client->ops->uninit_instance(&vport->nic, 0);
8698 			hdev->nic_client = NULL;
8699 			vport->nic.client = NULL;
8700 		}
8701 	}
8702 }
8703 
8704 static int hclge_pci_init(struct hclge_dev *hdev)
8705 {
8706 	struct pci_dev *pdev = hdev->pdev;
8707 	struct hclge_hw *hw;
8708 	int ret;
8709 
8710 	ret = pci_enable_device(pdev);
8711 	if (ret) {
8712 		dev_err(&pdev->dev, "failed to enable PCI device\n");
8713 		return ret;
8714 	}
8715 
8716 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
8717 	if (ret) {
8718 		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
8719 		if (ret) {
8720 			dev_err(&pdev->dev,
8721 				"can't set consistent PCI DMA");
8722 			goto err_disable_device;
8723 		}
8724 		dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
8725 	}
8726 
8727 	ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
8728 	if (ret) {
8729 		dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
8730 		goto err_disable_device;
8731 	}
8732 
8733 	pci_set_master(pdev);
8734 	hw = &hdev->hw;
8735 	hw->io_base = pcim_iomap(pdev, 2, 0);
8736 	if (!hw->io_base) {
8737 		dev_err(&pdev->dev, "Can't map configuration register space\n");
8738 		ret = -ENOMEM;
8739 		goto err_clr_master;
8740 	}
8741 
8742 	hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
8743 
8744 	return 0;
8745 err_clr_master:
8746 	pci_clear_master(pdev);
8747 	pci_release_regions(pdev);
8748 err_disable_device:
8749 	pci_disable_device(pdev);
8750 
8751 	return ret;
8752 }
8753 
8754 static void hclge_pci_uninit(struct hclge_dev *hdev)
8755 {
8756 	struct pci_dev *pdev = hdev->pdev;
8757 
8758 	pcim_iounmap(pdev, hdev->hw.io_base);
8759 	pci_free_irq_vectors(pdev);
8760 	pci_clear_master(pdev);
8761 	pci_release_mem_regions(pdev);
8762 	pci_disable_device(pdev);
8763 }
8764 
8765 static void hclge_state_init(struct hclge_dev *hdev)
8766 {
8767 	set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
8768 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
8769 	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
8770 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
8771 	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
8772 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
8773 }
8774 
8775 static void hclge_state_uninit(struct hclge_dev *hdev)
8776 {
8777 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
8778 	set_bit(HCLGE_STATE_REMOVING, &hdev->state);
8779 
8780 	if (hdev->reset_timer.function)
8781 		del_timer_sync(&hdev->reset_timer);
8782 	if (hdev->service_task.work.func)
8783 		cancel_delayed_work_sync(&hdev->service_task);
8784 	if (hdev->rst_service_task.func)
8785 		cancel_work_sync(&hdev->rst_service_task);
8786 	if (hdev->mbx_service_task.func)
8787 		cancel_work_sync(&hdev->mbx_service_task);
8788 }
8789 
8790 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
8791 {
8792 #define HCLGE_FLR_WAIT_MS	100
8793 #define HCLGE_FLR_WAIT_CNT	50
8794 	struct hclge_dev *hdev = ae_dev->priv;
8795 	int cnt = 0;
8796 
8797 	clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
8798 	clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8799 	set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
8800 	hclge_reset_event(hdev->pdev, NULL);
8801 
8802 	while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
8803 	       cnt++ < HCLGE_FLR_WAIT_CNT)
8804 		msleep(HCLGE_FLR_WAIT_MS);
8805 
8806 	if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
8807 		dev_err(&hdev->pdev->dev,
8808 			"flr wait down timeout: %d\n", cnt);
8809 }
8810 
8811 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
8812 {
8813 	struct hclge_dev *hdev = ae_dev->priv;
8814 
8815 	set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8816 }
8817 
8818 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
8819 {
8820 	u16 i;
8821 
8822 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8823 		struct hclge_vport *vport = &hdev->vport[i];
8824 		int ret;
8825 
8826 		 /* Send cmd to clear VF's FUNC_RST_ING */
8827 		ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
8828 		if (ret)
8829 			dev_warn(&hdev->pdev->dev,
8830 				 "clear vf(%d) rst failed %d!\n",
8831 				 vport->vport_id, ret);
8832 	}
8833 }
8834 
8835 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
8836 {
8837 	struct pci_dev *pdev = ae_dev->pdev;
8838 	struct hclge_dev *hdev;
8839 	int ret;
8840 
8841 	hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
8842 	if (!hdev) {
8843 		ret = -ENOMEM;
8844 		goto out;
8845 	}
8846 
8847 	hdev->pdev = pdev;
8848 	hdev->ae_dev = ae_dev;
8849 	hdev->reset_type = HNAE3_NONE_RESET;
8850 	hdev->reset_level = HNAE3_FUNC_RESET;
8851 	ae_dev->priv = hdev;
8852 	hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8853 
8854 	mutex_init(&hdev->vport_lock);
8855 	mutex_init(&hdev->vport_cfg_mutex);
8856 	spin_lock_init(&hdev->fd_rule_lock);
8857 
8858 	ret = hclge_pci_init(hdev);
8859 	if (ret) {
8860 		dev_err(&pdev->dev, "PCI init failed\n");
8861 		goto out;
8862 	}
8863 
8864 	/* Firmware command queue initialize */
8865 	ret = hclge_cmd_queue_init(hdev);
8866 	if (ret) {
8867 		dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
8868 		goto err_pci_uninit;
8869 	}
8870 
8871 	/* Firmware command initialize */
8872 	ret = hclge_cmd_init(hdev);
8873 	if (ret)
8874 		goto err_cmd_uninit;
8875 
8876 	ret = hclge_get_cap(hdev);
8877 	if (ret) {
8878 		dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
8879 			ret);
8880 		goto err_cmd_uninit;
8881 	}
8882 
8883 	ret = hclge_configure(hdev);
8884 	if (ret) {
8885 		dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
8886 		goto err_cmd_uninit;
8887 	}
8888 
8889 	ret = hclge_init_msi(hdev);
8890 	if (ret) {
8891 		dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
8892 		goto err_cmd_uninit;
8893 	}
8894 
8895 	ret = hclge_misc_irq_init(hdev);
8896 	if (ret) {
8897 		dev_err(&pdev->dev,
8898 			"Misc IRQ(vector0) init error, ret = %d.\n",
8899 			ret);
8900 		goto err_msi_uninit;
8901 	}
8902 
8903 	ret = hclge_alloc_tqps(hdev);
8904 	if (ret) {
8905 		dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
8906 		goto err_msi_irq_uninit;
8907 	}
8908 
8909 	ret = hclge_alloc_vport(hdev);
8910 	if (ret) {
8911 		dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
8912 		goto err_msi_irq_uninit;
8913 	}
8914 
8915 	ret = hclge_map_tqp(hdev);
8916 	if (ret) {
8917 		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8918 		goto err_msi_irq_uninit;
8919 	}
8920 
8921 	if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
8922 		ret = hclge_mac_mdio_config(hdev);
8923 		if (ret) {
8924 			dev_err(&hdev->pdev->dev,
8925 				"mdio config fail ret=%d\n", ret);
8926 			goto err_msi_irq_uninit;
8927 		}
8928 	}
8929 
8930 	ret = hclge_init_umv_space(hdev);
8931 	if (ret) {
8932 		dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
8933 		goto err_mdiobus_unreg;
8934 	}
8935 
8936 	ret = hclge_mac_init(hdev);
8937 	if (ret) {
8938 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8939 		goto err_mdiobus_unreg;
8940 	}
8941 
8942 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8943 	if (ret) {
8944 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8945 		goto err_mdiobus_unreg;
8946 	}
8947 
8948 	ret = hclge_config_gro(hdev, true);
8949 	if (ret)
8950 		goto err_mdiobus_unreg;
8951 
8952 	ret = hclge_init_vlan_config(hdev);
8953 	if (ret) {
8954 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8955 		goto err_mdiobus_unreg;
8956 	}
8957 
8958 	ret = hclge_tm_schd_init(hdev);
8959 	if (ret) {
8960 		dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
8961 		goto err_mdiobus_unreg;
8962 	}
8963 
8964 	hclge_rss_init_cfg(hdev);
8965 	ret = hclge_rss_init_hw(hdev);
8966 	if (ret) {
8967 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8968 		goto err_mdiobus_unreg;
8969 	}
8970 
8971 	ret = init_mgr_tbl(hdev);
8972 	if (ret) {
8973 		dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
8974 		goto err_mdiobus_unreg;
8975 	}
8976 
8977 	ret = hclge_init_fd_config(hdev);
8978 	if (ret) {
8979 		dev_err(&pdev->dev,
8980 			"fd table init fail, ret=%d\n", ret);
8981 		goto err_mdiobus_unreg;
8982 	}
8983 
8984 	INIT_KFIFO(hdev->mac_tnl_log);
8985 
8986 	hclge_dcb_ops_set(hdev);
8987 
8988 	timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
8989 	INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
8990 	INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
8991 	INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
8992 
8993 	/* Setup affinity after service timer setup because add_timer_on
8994 	 * is called in affinity notify.
8995 	 */
8996 	hclge_misc_affinity_setup(hdev);
8997 
8998 	hclge_clear_all_event_cause(hdev);
8999 	hclge_clear_resetting_state(hdev);
9000 
9001 	/* Log and clear the hw errors those already occurred */
9002 	hclge_handle_all_hns_hw_errors(ae_dev);
9003 
9004 	/* request delayed reset for the error recovery because an immediate
9005 	 * global reset on a PF affecting pending initialization of other PFs
9006 	 */
9007 	if (ae_dev->hw_err_reset_req) {
9008 		enum hnae3_reset_type reset_level;
9009 
9010 		reset_level = hclge_get_reset_level(ae_dev,
9011 						    &ae_dev->hw_err_reset_req);
9012 		hclge_set_def_reset_request(ae_dev, reset_level);
9013 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
9014 	}
9015 
9016 	/* Enable MISC vector(vector0) */
9017 	hclge_enable_vector(&hdev->misc_vector, true);
9018 
9019 	hclge_state_init(hdev);
9020 	hdev->last_reset_time = jiffies;
9021 
9022 	dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
9023 		 HCLGE_DRIVER_NAME);
9024 
9025 	return 0;
9026 
9027 err_mdiobus_unreg:
9028 	if (hdev->hw.mac.phydev)
9029 		mdiobus_unregister(hdev->hw.mac.mdio_bus);
9030 err_msi_irq_uninit:
9031 	hclge_misc_irq_uninit(hdev);
9032 err_msi_uninit:
9033 	pci_free_irq_vectors(pdev);
9034 err_cmd_uninit:
9035 	hclge_cmd_uninit(hdev);
9036 err_pci_uninit:
9037 	pcim_iounmap(pdev, hdev->hw.io_base);
9038 	pci_clear_master(pdev);
9039 	pci_release_regions(pdev);
9040 	pci_disable_device(pdev);
9041 out:
9042 	return ret;
9043 }
9044 
9045 static void hclge_stats_clear(struct hclge_dev *hdev)
9046 {
9047 	memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
9048 }
9049 
9050 static void hclge_reset_vport_state(struct hclge_dev *hdev)
9051 {
9052 	struct hclge_vport *vport = hdev->vport;
9053 	int i;
9054 
9055 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9056 		hclge_vport_stop(vport);
9057 		vport++;
9058 	}
9059 }
9060 
9061 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
9062 {
9063 	struct hclge_dev *hdev = ae_dev->priv;
9064 	struct pci_dev *pdev = ae_dev->pdev;
9065 	int ret;
9066 
9067 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
9068 
9069 	hclge_stats_clear(hdev);
9070 	memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
9071 	memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
9072 
9073 	ret = hclge_cmd_init(hdev);
9074 	if (ret) {
9075 		dev_err(&pdev->dev, "Cmd queue init failed\n");
9076 		return ret;
9077 	}
9078 
9079 	ret = hclge_map_tqp(hdev);
9080 	if (ret) {
9081 		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
9082 		return ret;
9083 	}
9084 
9085 	hclge_reset_umv_space(hdev);
9086 
9087 	ret = hclge_mac_init(hdev);
9088 	if (ret) {
9089 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9090 		return ret;
9091 	}
9092 
9093 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9094 	if (ret) {
9095 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9096 		return ret;
9097 	}
9098 
9099 	ret = hclge_config_gro(hdev, true);
9100 	if (ret)
9101 		return ret;
9102 
9103 	ret = hclge_init_vlan_config(hdev);
9104 	if (ret) {
9105 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9106 		return ret;
9107 	}
9108 
9109 	ret = hclge_tm_init_hw(hdev, true);
9110 	if (ret) {
9111 		dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
9112 		return ret;
9113 	}
9114 
9115 	ret = hclge_rss_init_hw(hdev);
9116 	if (ret) {
9117 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9118 		return ret;
9119 	}
9120 
9121 	ret = hclge_init_fd_config(hdev);
9122 	if (ret) {
9123 		dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
9124 		return ret;
9125 	}
9126 
9127 	/* Re-enable the hw error interrupts because
9128 	 * the interrupts get disabled on global reset.
9129 	 */
9130 	ret = hclge_config_nic_hw_error(hdev, true);
9131 	if (ret) {
9132 		dev_err(&pdev->dev,
9133 			"fail(%d) to re-enable NIC hw error interrupts\n",
9134 			ret);
9135 		return ret;
9136 	}
9137 
9138 	if (hdev->roce_client) {
9139 		ret = hclge_config_rocee_ras_interrupt(hdev, true);
9140 		if (ret) {
9141 			dev_err(&pdev->dev,
9142 				"fail(%d) to re-enable roce ras interrupts\n",
9143 				ret);
9144 			return ret;
9145 		}
9146 	}
9147 
9148 	hclge_reset_vport_state(hdev);
9149 
9150 	dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
9151 		 HCLGE_DRIVER_NAME);
9152 
9153 	return 0;
9154 }
9155 
9156 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
9157 {
9158 	struct hclge_dev *hdev = ae_dev->priv;
9159 	struct hclge_mac *mac = &hdev->hw.mac;
9160 
9161 	hclge_misc_affinity_teardown(hdev);
9162 	hclge_state_uninit(hdev);
9163 
9164 	if (mac->phydev)
9165 		mdiobus_unregister(mac->mdio_bus);
9166 
9167 	hclge_uninit_umv_space(hdev);
9168 
9169 	/* Disable MISC vector(vector0) */
9170 	hclge_enable_vector(&hdev->misc_vector, false);
9171 	synchronize_irq(hdev->misc_vector.vector_irq);
9172 
9173 	/* Disable all hw interrupts */
9174 	hclge_config_mac_tnl_int(hdev, false);
9175 	hclge_config_nic_hw_error(hdev, false);
9176 	hclge_config_rocee_ras_interrupt(hdev, false);
9177 
9178 	hclge_cmd_uninit(hdev);
9179 	hclge_misc_irq_uninit(hdev);
9180 	hclge_pci_uninit(hdev);
9181 	mutex_destroy(&hdev->vport_lock);
9182 	hclge_uninit_vport_mac_table(hdev);
9183 	hclge_uninit_vport_vlan_table(hdev);
9184 	mutex_destroy(&hdev->vport_cfg_mutex);
9185 	ae_dev->priv = NULL;
9186 }
9187 
9188 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
9189 {
9190 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9191 	struct hclge_vport *vport = hclge_get_vport(handle);
9192 	struct hclge_dev *hdev = vport->back;
9193 
9194 	return min_t(u32, hdev->rss_size_max,
9195 		     vport->alloc_tqps / kinfo->num_tc);
9196 }
9197 
9198 static void hclge_get_channels(struct hnae3_handle *handle,
9199 			       struct ethtool_channels *ch)
9200 {
9201 	ch->max_combined = hclge_get_max_channels(handle);
9202 	ch->other_count = 1;
9203 	ch->max_other = 1;
9204 	ch->combined_count = handle->kinfo.rss_size;
9205 }
9206 
9207 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
9208 					u16 *alloc_tqps, u16 *max_rss_size)
9209 {
9210 	struct hclge_vport *vport = hclge_get_vport(handle);
9211 	struct hclge_dev *hdev = vport->back;
9212 
9213 	*alloc_tqps = vport->alloc_tqps;
9214 	*max_rss_size = hdev->rss_size_max;
9215 }
9216 
9217 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
9218 			      bool rxfh_configured)
9219 {
9220 	struct hclge_vport *vport = hclge_get_vport(handle);
9221 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
9222 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
9223 	struct hclge_dev *hdev = vport->back;
9224 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
9225 	int cur_rss_size = kinfo->rss_size;
9226 	int cur_tqps = kinfo->num_tqps;
9227 	u16 tc_valid[HCLGE_MAX_TC_NUM];
9228 	u16 roundup_size;
9229 	u32 *rss_indir;
9230 	unsigned int i;
9231 	int ret;
9232 
9233 	kinfo->req_rss_size = new_tqps_num;
9234 
9235 	ret = hclge_tm_vport_map_update(hdev);
9236 	if (ret) {
9237 		dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
9238 		return ret;
9239 	}
9240 
9241 	roundup_size = roundup_pow_of_two(kinfo->rss_size);
9242 	roundup_size = ilog2(roundup_size);
9243 	/* Set the RSS TC mode according to the new RSS size */
9244 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
9245 		tc_valid[i] = 0;
9246 
9247 		if (!(hdev->hw_tc_map & BIT(i)))
9248 			continue;
9249 
9250 		tc_valid[i] = 1;
9251 		tc_size[i] = roundup_size;
9252 		tc_offset[i] = kinfo->rss_size * i;
9253 	}
9254 	ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
9255 	if (ret)
9256 		return ret;
9257 
9258 	/* RSS indirection table has been configuared by user */
9259 	if (rxfh_configured)
9260 		goto out;
9261 
9262 	/* Reinitializes the rss indirect table according to the new RSS size */
9263 	rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
9264 	if (!rss_indir)
9265 		return -ENOMEM;
9266 
9267 	for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
9268 		rss_indir[i] = i % kinfo->rss_size;
9269 
9270 	ret = hclge_set_rss(handle, rss_indir, NULL, 0);
9271 	if (ret)
9272 		dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
9273 			ret);
9274 
9275 	kfree(rss_indir);
9276 
9277 out:
9278 	if (!ret)
9279 		dev_info(&hdev->pdev->dev,
9280 			 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
9281 			 cur_rss_size, kinfo->rss_size,
9282 			 cur_tqps, kinfo->rss_size * kinfo->num_tc);
9283 
9284 	return ret;
9285 }
9286 
9287 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
9288 			      u32 *regs_num_64_bit)
9289 {
9290 	struct hclge_desc desc;
9291 	u32 total_num;
9292 	int ret;
9293 
9294 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
9295 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9296 	if (ret) {
9297 		dev_err(&hdev->pdev->dev,
9298 			"Query register number cmd failed, ret = %d.\n", ret);
9299 		return ret;
9300 	}
9301 
9302 	*regs_num_32_bit = le32_to_cpu(desc.data[0]);
9303 	*regs_num_64_bit = le32_to_cpu(desc.data[1]);
9304 
9305 	total_num = *regs_num_32_bit + *regs_num_64_bit;
9306 	if (!total_num)
9307 		return -EINVAL;
9308 
9309 	return 0;
9310 }
9311 
9312 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
9313 				 void *data)
9314 {
9315 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
9316 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
9317 
9318 	struct hclge_desc *desc;
9319 	u32 *reg_val = data;
9320 	__le32 *desc_data;
9321 	int nodata_num;
9322 	int cmd_num;
9323 	int i, k, n;
9324 	int ret;
9325 
9326 	if (regs_num == 0)
9327 		return 0;
9328 
9329 	nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
9330 	cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
9331 			       HCLGE_32_BIT_REG_RTN_DATANUM);
9332 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
9333 	if (!desc)
9334 		return -ENOMEM;
9335 
9336 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
9337 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
9338 	if (ret) {
9339 		dev_err(&hdev->pdev->dev,
9340 			"Query 32 bit register cmd failed, ret = %d.\n", ret);
9341 		kfree(desc);
9342 		return ret;
9343 	}
9344 
9345 	for (i = 0; i < cmd_num; i++) {
9346 		if (i == 0) {
9347 			desc_data = (__le32 *)(&desc[i].data[0]);
9348 			n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
9349 		} else {
9350 			desc_data = (__le32 *)(&desc[i]);
9351 			n = HCLGE_32_BIT_REG_RTN_DATANUM;
9352 		}
9353 		for (k = 0; k < n; k++) {
9354 			*reg_val++ = le32_to_cpu(*desc_data++);
9355 
9356 			regs_num--;
9357 			if (!regs_num)
9358 				break;
9359 		}
9360 	}
9361 
9362 	kfree(desc);
9363 	return 0;
9364 }
9365 
9366 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
9367 				 void *data)
9368 {
9369 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
9370 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
9371 
9372 	struct hclge_desc *desc;
9373 	u64 *reg_val = data;
9374 	__le64 *desc_data;
9375 	int nodata_len;
9376 	int cmd_num;
9377 	int i, k, n;
9378 	int ret;
9379 
9380 	if (regs_num == 0)
9381 		return 0;
9382 
9383 	nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
9384 	cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
9385 			       HCLGE_64_BIT_REG_RTN_DATANUM);
9386 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
9387 	if (!desc)
9388 		return -ENOMEM;
9389 
9390 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
9391 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
9392 	if (ret) {
9393 		dev_err(&hdev->pdev->dev,
9394 			"Query 64 bit register cmd failed, ret = %d.\n", ret);
9395 		kfree(desc);
9396 		return ret;
9397 	}
9398 
9399 	for (i = 0; i < cmd_num; i++) {
9400 		if (i == 0) {
9401 			desc_data = (__le64 *)(&desc[i].data[0]);
9402 			n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
9403 		} else {
9404 			desc_data = (__le64 *)(&desc[i]);
9405 			n = HCLGE_64_BIT_REG_RTN_DATANUM;
9406 		}
9407 		for (k = 0; k < n; k++) {
9408 			*reg_val++ = le64_to_cpu(*desc_data++);
9409 
9410 			regs_num--;
9411 			if (!regs_num)
9412 				break;
9413 		}
9414 	}
9415 
9416 	kfree(desc);
9417 	return 0;
9418 }
9419 
9420 #define MAX_SEPARATE_NUM	4
9421 #define SEPARATOR_VALUE		0xFDFCFBFA
9422 #define REG_NUM_PER_LINE	4
9423 #define REG_LEN_PER_LINE	(REG_NUM_PER_LINE * sizeof(u32))
9424 #define REG_SEPARATOR_LINE	1
9425 #define REG_NUM_REMAIN_MASK	3
9426 #define BD_LIST_MAX_NUM		30
9427 
9428 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
9429 {
9430 	/*prepare 4 commands to query DFX BD number*/
9431 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true);
9432 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9433 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true);
9434 	desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9435 	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true);
9436 	desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9437 	hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true);
9438 
9439 	return hclge_cmd_send(&hdev->hw, desc, 4);
9440 }
9441 
9442 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
9443 				    int *bd_num_list,
9444 				    u32 type_num)
9445 {
9446 #define HCLGE_DFX_REG_BD_NUM	4
9447 
9448 	u32 entries_per_desc, desc_index, index, offset, i;
9449 	struct hclge_desc desc[HCLGE_DFX_REG_BD_NUM];
9450 	int ret;
9451 
9452 	ret = hclge_query_bd_num_cmd_send(hdev, desc);
9453 	if (ret) {
9454 		dev_err(&hdev->pdev->dev,
9455 			"Get dfx bd num fail, status is %d.\n", ret);
9456 		return ret;
9457 	}
9458 
9459 	entries_per_desc = ARRAY_SIZE(desc[0].data);
9460 	for (i = 0; i < type_num; i++) {
9461 		offset = hclge_dfx_bd_offset_list[i];
9462 		index = offset % entries_per_desc;
9463 		desc_index = offset / entries_per_desc;
9464 		bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
9465 	}
9466 
9467 	return ret;
9468 }
9469 
9470 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
9471 				  struct hclge_desc *desc_src, int bd_num,
9472 				  enum hclge_opcode_type cmd)
9473 {
9474 	struct hclge_desc *desc = desc_src;
9475 	int i, ret;
9476 
9477 	hclge_cmd_setup_basic_desc(desc, cmd, true);
9478 	for (i = 0; i < bd_num - 1; i++) {
9479 		desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9480 		desc++;
9481 		hclge_cmd_setup_basic_desc(desc, cmd, true);
9482 	}
9483 
9484 	desc = desc_src;
9485 	ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
9486 	if (ret)
9487 		dev_err(&hdev->pdev->dev,
9488 			"Query dfx reg cmd(0x%x) send fail, status is %d.\n",
9489 			cmd, ret);
9490 
9491 	return ret;
9492 }
9493 
9494 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
9495 				    void *data)
9496 {
9497 	int entries_per_desc, reg_num, separator_num, desc_index, index, i;
9498 	struct hclge_desc *desc = desc_src;
9499 	u32 *reg = data;
9500 
9501 	entries_per_desc = ARRAY_SIZE(desc->data);
9502 	reg_num = entries_per_desc * bd_num;
9503 	separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
9504 	for (i = 0; i < reg_num; i++) {
9505 		index = i % entries_per_desc;
9506 		desc_index = i / entries_per_desc;
9507 		*reg++ = le32_to_cpu(desc[desc_index].data[index]);
9508 	}
9509 	for (i = 0; i < separator_num; i++)
9510 		*reg++ = SEPARATOR_VALUE;
9511 
9512 	return reg_num + separator_num;
9513 }
9514 
9515 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
9516 {
9517 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
9518 	int data_len_per_desc, data_len, bd_num, i;
9519 	int bd_num_list[BD_LIST_MAX_NUM];
9520 	int ret;
9521 
9522 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
9523 	if (ret) {
9524 		dev_err(&hdev->pdev->dev,
9525 			"Get dfx reg bd num fail, status is %d.\n", ret);
9526 		return ret;
9527 	}
9528 
9529 	data_len_per_desc = FIELD_SIZEOF(struct hclge_desc, data);
9530 	*len = 0;
9531 	for (i = 0; i < dfx_reg_type_num; i++) {
9532 		bd_num = bd_num_list[i];
9533 		data_len = data_len_per_desc * bd_num;
9534 		*len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
9535 	}
9536 
9537 	return ret;
9538 }
9539 
9540 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
9541 {
9542 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
9543 	int bd_num, bd_num_max, buf_len, i;
9544 	int bd_num_list[BD_LIST_MAX_NUM];
9545 	struct hclge_desc *desc_src;
9546 	u32 *reg = data;
9547 	int ret;
9548 
9549 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
9550 	if (ret) {
9551 		dev_err(&hdev->pdev->dev,
9552 			"Get dfx reg bd num fail, status is %d.\n", ret);
9553 		return ret;
9554 	}
9555 
9556 	bd_num_max = bd_num_list[0];
9557 	for (i = 1; i < dfx_reg_type_num; i++)
9558 		bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
9559 
9560 	buf_len = sizeof(*desc_src) * bd_num_max;
9561 	desc_src = kzalloc(buf_len, GFP_KERNEL);
9562 	if (!desc_src) {
9563 		dev_err(&hdev->pdev->dev, "%s kzalloc failed\n", __func__);
9564 		return -ENOMEM;
9565 	}
9566 
9567 	for (i = 0; i < dfx_reg_type_num; i++) {
9568 		bd_num = bd_num_list[i];
9569 		ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
9570 					     hclge_dfx_reg_opcode_list[i]);
9571 		if (ret) {
9572 			dev_err(&hdev->pdev->dev,
9573 				"Get dfx reg fail, status is %d.\n", ret);
9574 			break;
9575 		}
9576 
9577 		reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
9578 	}
9579 
9580 	kfree(desc_src);
9581 	return ret;
9582 }
9583 
9584 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
9585 			      struct hnae3_knic_private_info *kinfo)
9586 {
9587 #define HCLGE_RING_REG_OFFSET		0x200
9588 #define HCLGE_RING_INT_REG_OFFSET	0x4
9589 
9590 	int i, j, reg_num, separator_num;
9591 	int data_num_sum;
9592 	u32 *reg = data;
9593 
9594 	/* fetching per-PF registers valus from PF PCIe register space */
9595 	reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
9596 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
9597 	for (i = 0; i < reg_num; i++)
9598 		*reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
9599 	for (i = 0; i < separator_num; i++)
9600 		*reg++ = SEPARATOR_VALUE;
9601 	data_num_sum = reg_num + separator_num;
9602 
9603 	reg_num = ARRAY_SIZE(common_reg_addr_list);
9604 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
9605 	for (i = 0; i < reg_num; i++)
9606 		*reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
9607 	for (i = 0; i < separator_num; i++)
9608 		*reg++ = SEPARATOR_VALUE;
9609 	data_num_sum += reg_num + separator_num;
9610 
9611 	reg_num = ARRAY_SIZE(ring_reg_addr_list);
9612 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
9613 	for (j = 0; j < kinfo->num_tqps; j++) {
9614 		for (i = 0; i < reg_num; i++)
9615 			*reg++ = hclge_read_dev(&hdev->hw,
9616 						ring_reg_addr_list[i] +
9617 						HCLGE_RING_REG_OFFSET * j);
9618 		for (i = 0; i < separator_num; i++)
9619 			*reg++ = SEPARATOR_VALUE;
9620 	}
9621 	data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
9622 
9623 	reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
9624 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
9625 	for (j = 0; j < hdev->num_msi_used - 1; j++) {
9626 		for (i = 0; i < reg_num; i++)
9627 			*reg++ = hclge_read_dev(&hdev->hw,
9628 						tqp_intr_reg_addr_list[i] +
9629 						HCLGE_RING_INT_REG_OFFSET * j);
9630 		for (i = 0; i < separator_num; i++)
9631 			*reg++ = SEPARATOR_VALUE;
9632 	}
9633 	data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
9634 
9635 	return data_num_sum;
9636 }
9637 
9638 static int hclge_get_regs_len(struct hnae3_handle *handle)
9639 {
9640 	int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
9641 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9642 	struct hclge_vport *vport = hclge_get_vport(handle);
9643 	struct hclge_dev *hdev = vport->back;
9644 	int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
9645 	int regs_lines_32_bit, regs_lines_64_bit;
9646 	int ret;
9647 
9648 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
9649 	if (ret) {
9650 		dev_err(&hdev->pdev->dev,
9651 			"Get register number failed, ret = %d.\n", ret);
9652 		return ret;
9653 	}
9654 
9655 	ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
9656 	if (ret) {
9657 		dev_err(&hdev->pdev->dev,
9658 			"Get dfx reg len failed, ret = %d.\n", ret);
9659 		return ret;
9660 	}
9661 
9662 	cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
9663 		REG_SEPARATOR_LINE;
9664 	common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
9665 		REG_SEPARATOR_LINE;
9666 	ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
9667 		REG_SEPARATOR_LINE;
9668 	tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
9669 		REG_SEPARATOR_LINE;
9670 	regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
9671 		REG_SEPARATOR_LINE;
9672 	regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
9673 		REG_SEPARATOR_LINE;
9674 
9675 	return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
9676 		tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
9677 		regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
9678 }
9679 
9680 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
9681 			   void *data)
9682 {
9683 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9684 	struct hclge_vport *vport = hclge_get_vport(handle);
9685 	struct hclge_dev *hdev = vport->back;
9686 	u32 regs_num_32_bit, regs_num_64_bit;
9687 	int i, reg_num, separator_num, ret;
9688 	u32 *reg = data;
9689 
9690 	*version = hdev->fw_version;
9691 
9692 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
9693 	if (ret) {
9694 		dev_err(&hdev->pdev->dev,
9695 			"Get register number failed, ret = %d.\n", ret);
9696 		return;
9697 	}
9698 
9699 	reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
9700 
9701 	ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
9702 	if (ret) {
9703 		dev_err(&hdev->pdev->dev,
9704 			"Get 32 bit register failed, ret = %d.\n", ret);
9705 		return;
9706 	}
9707 	reg_num = regs_num_32_bit;
9708 	reg += reg_num;
9709 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
9710 	for (i = 0; i < separator_num; i++)
9711 		*reg++ = SEPARATOR_VALUE;
9712 
9713 	ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
9714 	if (ret) {
9715 		dev_err(&hdev->pdev->dev,
9716 			"Get 64 bit register failed, ret = %d.\n", ret);
9717 		return;
9718 	}
9719 	reg_num = regs_num_64_bit * 2;
9720 	reg += reg_num;
9721 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
9722 	for (i = 0; i < separator_num; i++)
9723 		*reg++ = SEPARATOR_VALUE;
9724 
9725 	ret = hclge_get_dfx_reg(hdev, reg);
9726 	if (ret)
9727 		dev_err(&hdev->pdev->dev,
9728 			"Get dfx register failed, ret = %d.\n", ret);
9729 }
9730 
9731 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
9732 {
9733 	struct hclge_set_led_state_cmd *req;
9734 	struct hclge_desc desc;
9735 	int ret;
9736 
9737 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
9738 
9739 	req = (struct hclge_set_led_state_cmd *)desc.data;
9740 	hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
9741 			HCLGE_LED_LOCATE_STATE_S, locate_led_status);
9742 
9743 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9744 	if (ret)
9745 		dev_err(&hdev->pdev->dev,
9746 			"Send set led state cmd error, ret =%d\n", ret);
9747 
9748 	return ret;
9749 }
9750 
9751 enum hclge_led_status {
9752 	HCLGE_LED_OFF,
9753 	HCLGE_LED_ON,
9754 	HCLGE_LED_NO_CHANGE = 0xFF,
9755 };
9756 
9757 static int hclge_set_led_id(struct hnae3_handle *handle,
9758 			    enum ethtool_phys_id_state status)
9759 {
9760 	struct hclge_vport *vport = hclge_get_vport(handle);
9761 	struct hclge_dev *hdev = vport->back;
9762 
9763 	switch (status) {
9764 	case ETHTOOL_ID_ACTIVE:
9765 		return hclge_set_led_status(hdev, HCLGE_LED_ON);
9766 	case ETHTOOL_ID_INACTIVE:
9767 		return hclge_set_led_status(hdev, HCLGE_LED_OFF);
9768 	default:
9769 		return -EINVAL;
9770 	}
9771 }
9772 
9773 static void hclge_get_link_mode(struct hnae3_handle *handle,
9774 				unsigned long *supported,
9775 				unsigned long *advertising)
9776 {
9777 	unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
9778 	struct hclge_vport *vport = hclge_get_vport(handle);
9779 	struct hclge_dev *hdev = vport->back;
9780 	unsigned int idx = 0;
9781 
9782 	for (; idx < size; idx++) {
9783 		supported[idx] = hdev->hw.mac.supported[idx];
9784 		advertising[idx] = hdev->hw.mac.advertising[idx];
9785 	}
9786 }
9787 
9788 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
9789 {
9790 	struct hclge_vport *vport = hclge_get_vport(handle);
9791 	struct hclge_dev *hdev = vport->back;
9792 
9793 	return hclge_config_gro(hdev, enable);
9794 }
9795 
9796 static const struct hnae3_ae_ops hclge_ops = {
9797 	.init_ae_dev = hclge_init_ae_dev,
9798 	.uninit_ae_dev = hclge_uninit_ae_dev,
9799 	.flr_prepare = hclge_flr_prepare,
9800 	.flr_done = hclge_flr_done,
9801 	.init_client_instance = hclge_init_client_instance,
9802 	.uninit_client_instance = hclge_uninit_client_instance,
9803 	.map_ring_to_vector = hclge_map_ring_to_vector,
9804 	.unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
9805 	.get_vector = hclge_get_vector,
9806 	.put_vector = hclge_put_vector,
9807 	.set_promisc_mode = hclge_set_promisc_mode,
9808 	.set_loopback = hclge_set_loopback,
9809 	.start = hclge_ae_start,
9810 	.stop = hclge_ae_stop,
9811 	.client_start = hclge_client_start,
9812 	.client_stop = hclge_client_stop,
9813 	.get_status = hclge_get_status,
9814 	.get_ksettings_an_result = hclge_get_ksettings_an_result,
9815 	.cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
9816 	.get_media_type = hclge_get_media_type,
9817 	.check_port_speed = hclge_check_port_speed,
9818 	.get_fec = hclge_get_fec,
9819 	.set_fec = hclge_set_fec,
9820 	.get_rss_key_size = hclge_get_rss_key_size,
9821 	.get_rss_indir_size = hclge_get_rss_indir_size,
9822 	.get_rss = hclge_get_rss,
9823 	.set_rss = hclge_set_rss,
9824 	.set_rss_tuple = hclge_set_rss_tuple,
9825 	.get_rss_tuple = hclge_get_rss_tuple,
9826 	.get_tc_size = hclge_get_tc_size,
9827 	.get_mac_addr = hclge_get_mac_addr,
9828 	.set_mac_addr = hclge_set_mac_addr,
9829 	.do_ioctl = hclge_do_ioctl,
9830 	.add_uc_addr = hclge_add_uc_addr,
9831 	.rm_uc_addr = hclge_rm_uc_addr,
9832 	.add_mc_addr = hclge_add_mc_addr,
9833 	.rm_mc_addr = hclge_rm_mc_addr,
9834 	.set_autoneg = hclge_set_autoneg,
9835 	.get_autoneg = hclge_get_autoneg,
9836 	.restart_autoneg = hclge_restart_autoneg,
9837 	.halt_autoneg = hclge_halt_autoneg,
9838 	.get_pauseparam = hclge_get_pauseparam,
9839 	.set_pauseparam = hclge_set_pauseparam,
9840 	.set_mtu = hclge_set_mtu,
9841 	.reset_queue = hclge_reset_tqp,
9842 	.get_stats = hclge_get_stats,
9843 	.get_mac_stats = hclge_get_mac_stat,
9844 	.update_stats = hclge_update_stats,
9845 	.get_strings = hclge_get_strings,
9846 	.get_sset_count = hclge_get_sset_count,
9847 	.get_fw_version = hclge_get_fw_version,
9848 	.get_mdix_mode = hclge_get_mdix_mode,
9849 	.enable_vlan_filter = hclge_enable_vlan_filter,
9850 	.set_vlan_filter = hclge_set_vlan_filter,
9851 	.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
9852 	.enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
9853 	.reset_event = hclge_reset_event,
9854 	.get_reset_level = hclge_get_reset_level,
9855 	.set_default_reset_request = hclge_set_def_reset_request,
9856 	.get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
9857 	.set_channels = hclge_set_channels,
9858 	.get_channels = hclge_get_channels,
9859 	.get_regs_len = hclge_get_regs_len,
9860 	.get_regs = hclge_get_regs,
9861 	.set_led_id = hclge_set_led_id,
9862 	.get_link_mode = hclge_get_link_mode,
9863 	.add_fd_entry = hclge_add_fd_entry,
9864 	.del_fd_entry = hclge_del_fd_entry,
9865 	.del_all_fd_entries = hclge_del_all_fd_entries,
9866 	.get_fd_rule_cnt = hclge_get_fd_rule_cnt,
9867 	.get_fd_rule_info = hclge_get_fd_rule_info,
9868 	.get_fd_all_rules = hclge_get_all_rules,
9869 	.restore_fd_rules = hclge_restore_fd_entries,
9870 	.enable_fd = hclge_enable_fd,
9871 	.add_arfs_entry = hclge_add_fd_entry_by_arfs,
9872 	.dbg_run_cmd = hclge_dbg_run_cmd,
9873 	.handle_hw_ras_error = hclge_handle_hw_ras_error,
9874 	.get_hw_reset_stat = hclge_get_hw_reset_stat,
9875 	.ae_dev_resetting = hclge_ae_dev_resetting,
9876 	.ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
9877 	.set_gro_en = hclge_gro_en,
9878 	.get_global_queue_id = hclge_covert_handle_qid_global,
9879 	.set_timer_task = hclge_set_timer_task,
9880 	.mac_connect_phy = hclge_mac_connect_phy,
9881 	.mac_disconnect_phy = hclge_mac_disconnect_phy,
9882 	.restore_vlan_table = hclge_restore_vlan_table,
9883 };
9884 
9885 static struct hnae3_ae_algo ae_algo = {
9886 	.ops = &hclge_ops,
9887 	.pdev_id_table = ae_algo_pci_tbl,
9888 };
9889 
9890 static int hclge_init(void)
9891 {
9892 	pr_info("%s is initializing\n", HCLGE_NAME);
9893 
9894 	hnae3_register_ae_algo(&ae_algo);
9895 
9896 	return 0;
9897 }
9898 
9899 static void hclge_exit(void)
9900 {
9901 	hnae3_unregister_ae_algo(&ae_algo);
9902 }
9903 module_init(hclge_init);
9904 module_exit(hclge_exit);
9905 
9906 MODULE_LICENSE("GPL");
9907 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
9908 MODULE_DESCRIPTION("HCLGE Driver");
9909 MODULE_VERSION(HCLGE_MOD_VERSION);
9910