xref: /linux/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c (revision 9009b455811b0fa1f6b0adfa94db136984db5a38)
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/ipv6.h>
17 #include <net/rtnetlink.h>
18 #include "hclge_cmd.h"
19 #include "hclge_dcb.h"
20 #include "hclge_main.h"
21 #include "hclge_mbx.h"
22 #include "hclge_mdio.h"
23 #include "hclge_tm.h"
24 #include "hclge_err.h"
25 #include "hnae3.h"
26 
27 #define HCLGE_NAME			"hclge"
28 #define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
29 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
30 
31 #define HCLGE_BUF_SIZE_UNIT	256U
32 #define HCLGE_BUF_MUL_BY	2
33 #define HCLGE_BUF_DIV_BY	2
34 #define NEED_RESERVE_TC_NUM	2
35 #define BUF_MAX_PERCENT		100
36 #define BUF_RESERVE_PERCENT	90
37 
38 #define HCLGE_RESET_MAX_FAIL_CNT	5
39 #define HCLGE_RESET_SYNC_TIME		100
40 #define HCLGE_PF_RESET_SYNC_TIME	20
41 #define HCLGE_PF_RESET_SYNC_CNT		1500
42 
43 /* Get DFX BD number offset */
44 #define HCLGE_DFX_BIOS_BD_OFFSET        1
45 #define HCLGE_DFX_SSU_0_BD_OFFSET       2
46 #define HCLGE_DFX_SSU_1_BD_OFFSET       3
47 #define HCLGE_DFX_IGU_BD_OFFSET         4
48 #define HCLGE_DFX_RPU_0_BD_OFFSET       5
49 #define HCLGE_DFX_RPU_1_BD_OFFSET       6
50 #define HCLGE_DFX_NCSI_BD_OFFSET        7
51 #define HCLGE_DFX_RTC_BD_OFFSET         8
52 #define HCLGE_DFX_PPP_BD_OFFSET         9
53 #define HCLGE_DFX_RCB_BD_OFFSET         10
54 #define HCLGE_DFX_TQP_BD_OFFSET         11
55 #define HCLGE_DFX_SSU_2_BD_OFFSET       12
56 
57 #define HCLGE_LINK_STATUS_MS	10
58 
59 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
60 static int hclge_init_vlan_config(struct hclge_dev *hdev);
61 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
62 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
63 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
64 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
65 static int hclge_clear_arfs_rules(struct hclge_dev *hdev);
66 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
67 						   unsigned long *addr);
68 static int hclge_set_default_loopback(struct hclge_dev *hdev);
69 
70 static void hclge_sync_mac_table(struct hclge_dev *hdev);
71 static void hclge_restore_hw_table(struct hclge_dev *hdev);
72 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
73 static void hclge_sync_fd_table(struct hclge_dev *hdev);
74 
75 static struct hnae3_ae_algo ae_algo;
76 
77 static struct workqueue_struct *hclge_wq;
78 
79 static const struct pci_device_id ae_algo_pci_tbl[] = {
80 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
81 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
82 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
83 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
84 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
85 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
86 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
87 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
88 	/* required last entry */
89 	{0, }
90 };
91 
92 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
93 
94 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
95 					 HCLGE_CMDQ_TX_ADDR_H_REG,
96 					 HCLGE_CMDQ_TX_DEPTH_REG,
97 					 HCLGE_CMDQ_TX_TAIL_REG,
98 					 HCLGE_CMDQ_TX_HEAD_REG,
99 					 HCLGE_CMDQ_RX_ADDR_L_REG,
100 					 HCLGE_CMDQ_RX_ADDR_H_REG,
101 					 HCLGE_CMDQ_RX_DEPTH_REG,
102 					 HCLGE_CMDQ_RX_TAIL_REG,
103 					 HCLGE_CMDQ_RX_HEAD_REG,
104 					 HCLGE_VECTOR0_CMDQ_SRC_REG,
105 					 HCLGE_CMDQ_INTR_STS_REG,
106 					 HCLGE_CMDQ_INTR_EN_REG,
107 					 HCLGE_CMDQ_INTR_GEN_REG};
108 
109 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
110 					   HCLGE_VECTOR0_OTER_EN_REG,
111 					   HCLGE_MISC_RESET_STS_REG,
112 					   HCLGE_MISC_VECTOR_INT_STS,
113 					   HCLGE_GLOBAL_RESET_REG,
114 					   HCLGE_FUN_RST_ING,
115 					   HCLGE_GRO_EN_REG};
116 
117 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
118 					 HCLGE_RING_RX_ADDR_H_REG,
119 					 HCLGE_RING_RX_BD_NUM_REG,
120 					 HCLGE_RING_RX_BD_LENGTH_REG,
121 					 HCLGE_RING_RX_MERGE_EN_REG,
122 					 HCLGE_RING_RX_TAIL_REG,
123 					 HCLGE_RING_RX_HEAD_REG,
124 					 HCLGE_RING_RX_FBD_NUM_REG,
125 					 HCLGE_RING_RX_OFFSET_REG,
126 					 HCLGE_RING_RX_FBD_OFFSET_REG,
127 					 HCLGE_RING_RX_STASH_REG,
128 					 HCLGE_RING_RX_BD_ERR_REG,
129 					 HCLGE_RING_TX_ADDR_L_REG,
130 					 HCLGE_RING_TX_ADDR_H_REG,
131 					 HCLGE_RING_TX_BD_NUM_REG,
132 					 HCLGE_RING_TX_PRIORITY_REG,
133 					 HCLGE_RING_TX_TC_REG,
134 					 HCLGE_RING_TX_MERGE_EN_REG,
135 					 HCLGE_RING_TX_TAIL_REG,
136 					 HCLGE_RING_TX_HEAD_REG,
137 					 HCLGE_RING_TX_FBD_NUM_REG,
138 					 HCLGE_RING_TX_OFFSET_REG,
139 					 HCLGE_RING_TX_EBD_NUM_REG,
140 					 HCLGE_RING_TX_EBD_OFFSET_REG,
141 					 HCLGE_RING_TX_BD_ERR_REG,
142 					 HCLGE_RING_EN_REG};
143 
144 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
145 					     HCLGE_TQP_INTR_GL0_REG,
146 					     HCLGE_TQP_INTR_GL1_REG,
147 					     HCLGE_TQP_INTR_GL2_REG,
148 					     HCLGE_TQP_INTR_RL_REG};
149 
150 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
151 	"App    Loopback test",
152 	"Serdes serial Loopback test",
153 	"Serdes parallel Loopback test",
154 	"Phy    Loopback test"
155 };
156 
157 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
158 	{"mac_tx_mac_pause_num",
159 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
160 	{"mac_rx_mac_pause_num",
161 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
162 	{"mac_tx_control_pkt_num",
163 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
164 	{"mac_rx_control_pkt_num",
165 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
166 	{"mac_tx_pfc_pkt_num",
167 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
168 	{"mac_tx_pfc_pri0_pkt_num",
169 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
170 	{"mac_tx_pfc_pri1_pkt_num",
171 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
172 	{"mac_tx_pfc_pri2_pkt_num",
173 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
174 	{"mac_tx_pfc_pri3_pkt_num",
175 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
176 	{"mac_tx_pfc_pri4_pkt_num",
177 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
178 	{"mac_tx_pfc_pri5_pkt_num",
179 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
180 	{"mac_tx_pfc_pri6_pkt_num",
181 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
182 	{"mac_tx_pfc_pri7_pkt_num",
183 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
184 	{"mac_rx_pfc_pkt_num",
185 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
186 	{"mac_rx_pfc_pri0_pkt_num",
187 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
188 	{"mac_rx_pfc_pri1_pkt_num",
189 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
190 	{"mac_rx_pfc_pri2_pkt_num",
191 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
192 	{"mac_rx_pfc_pri3_pkt_num",
193 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
194 	{"mac_rx_pfc_pri4_pkt_num",
195 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
196 	{"mac_rx_pfc_pri5_pkt_num",
197 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
198 	{"mac_rx_pfc_pri6_pkt_num",
199 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
200 	{"mac_rx_pfc_pri7_pkt_num",
201 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
202 	{"mac_tx_total_pkt_num",
203 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
204 	{"mac_tx_total_oct_num",
205 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
206 	{"mac_tx_good_pkt_num",
207 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
208 	{"mac_tx_bad_pkt_num",
209 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
210 	{"mac_tx_good_oct_num",
211 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
212 	{"mac_tx_bad_oct_num",
213 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
214 	{"mac_tx_uni_pkt_num",
215 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
216 	{"mac_tx_multi_pkt_num",
217 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
218 	{"mac_tx_broad_pkt_num",
219 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
220 	{"mac_tx_undersize_pkt_num",
221 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
222 	{"mac_tx_oversize_pkt_num",
223 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
224 	{"mac_tx_64_oct_pkt_num",
225 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
226 	{"mac_tx_65_127_oct_pkt_num",
227 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
228 	{"mac_tx_128_255_oct_pkt_num",
229 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
230 	{"mac_tx_256_511_oct_pkt_num",
231 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
232 	{"mac_tx_512_1023_oct_pkt_num",
233 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
234 	{"mac_tx_1024_1518_oct_pkt_num",
235 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
236 	{"mac_tx_1519_2047_oct_pkt_num",
237 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
238 	{"mac_tx_2048_4095_oct_pkt_num",
239 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
240 	{"mac_tx_4096_8191_oct_pkt_num",
241 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
242 	{"mac_tx_8192_9216_oct_pkt_num",
243 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
244 	{"mac_tx_9217_12287_oct_pkt_num",
245 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
246 	{"mac_tx_12288_16383_oct_pkt_num",
247 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
248 	{"mac_tx_1519_max_good_pkt_num",
249 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
250 	{"mac_tx_1519_max_bad_pkt_num",
251 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
252 	{"mac_rx_total_pkt_num",
253 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
254 	{"mac_rx_total_oct_num",
255 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
256 	{"mac_rx_good_pkt_num",
257 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
258 	{"mac_rx_bad_pkt_num",
259 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
260 	{"mac_rx_good_oct_num",
261 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
262 	{"mac_rx_bad_oct_num",
263 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
264 	{"mac_rx_uni_pkt_num",
265 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
266 	{"mac_rx_multi_pkt_num",
267 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
268 	{"mac_rx_broad_pkt_num",
269 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
270 	{"mac_rx_undersize_pkt_num",
271 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
272 	{"mac_rx_oversize_pkt_num",
273 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
274 	{"mac_rx_64_oct_pkt_num",
275 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
276 	{"mac_rx_65_127_oct_pkt_num",
277 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
278 	{"mac_rx_128_255_oct_pkt_num",
279 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
280 	{"mac_rx_256_511_oct_pkt_num",
281 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
282 	{"mac_rx_512_1023_oct_pkt_num",
283 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
284 	{"mac_rx_1024_1518_oct_pkt_num",
285 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
286 	{"mac_rx_1519_2047_oct_pkt_num",
287 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
288 	{"mac_rx_2048_4095_oct_pkt_num",
289 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
290 	{"mac_rx_4096_8191_oct_pkt_num",
291 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
292 	{"mac_rx_8192_9216_oct_pkt_num",
293 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
294 	{"mac_rx_9217_12287_oct_pkt_num",
295 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
296 	{"mac_rx_12288_16383_oct_pkt_num",
297 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
298 	{"mac_rx_1519_max_good_pkt_num",
299 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
300 	{"mac_rx_1519_max_bad_pkt_num",
301 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
302 
303 	{"mac_tx_fragment_pkt_num",
304 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
305 	{"mac_tx_undermin_pkt_num",
306 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
307 	{"mac_tx_jabber_pkt_num",
308 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
309 	{"mac_tx_err_all_pkt_num",
310 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
311 	{"mac_tx_from_app_good_pkt_num",
312 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
313 	{"mac_tx_from_app_bad_pkt_num",
314 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
315 	{"mac_rx_fragment_pkt_num",
316 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
317 	{"mac_rx_undermin_pkt_num",
318 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
319 	{"mac_rx_jabber_pkt_num",
320 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
321 	{"mac_rx_fcs_err_pkt_num",
322 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
323 	{"mac_rx_send_app_good_pkt_num",
324 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
325 	{"mac_rx_send_app_bad_pkt_num",
326 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
327 };
328 
329 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
330 	{
331 		.flags = HCLGE_MAC_MGR_MASK_VLAN_B,
332 		.ethter_type = cpu_to_le16(ETH_P_LLDP),
333 		.mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
334 		.i_port_bitmap = 0x1,
335 	},
336 };
337 
338 static const u8 hclge_hash_key[] = {
339 	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
340 	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
341 	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
342 	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
343 	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
344 };
345 
346 static const u32 hclge_dfx_bd_offset_list[] = {
347 	HCLGE_DFX_BIOS_BD_OFFSET,
348 	HCLGE_DFX_SSU_0_BD_OFFSET,
349 	HCLGE_DFX_SSU_1_BD_OFFSET,
350 	HCLGE_DFX_IGU_BD_OFFSET,
351 	HCLGE_DFX_RPU_0_BD_OFFSET,
352 	HCLGE_DFX_RPU_1_BD_OFFSET,
353 	HCLGE_DFX_NCSI_BD_OFFSET,
354 	HCLGE_DFX_RTC_BD_OFFSET,
355 	HCLGE_DFX_PPP_BD_OFFSET,
356 	HCLGE_DFX_RCB_BD_OFFSET,
357 	HCLGE_DFX_TQP_BD_OFFSET,
358 	HCLGE_DFX_SSU_2_BD_OFFSET
359 };
360 
361 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
362 	HCLGE_OPC_DFX_BIOS_COMMON_REG,
363 	HCLGE_OPC_DFX_SSU_REG_0,
364 	HCLGE_OPC_DFX_SSU_REG_1,
365 	HCLGE_OPC_DFX_IGU_EGU_REG,
366 	HCLGE_OPC_DFX_RPU_REG_0,
367 	HCLGE_OPC_DFX_RPU_REG_1,
368 	HCLGE_OPC_DFX_NCSI_REG,
369 	HCLGE_OPC_DFX_RTC_REG,
370 	HCLGE_OPC_DFX_PPP_REG,
371 	HCLGE_OPC_DFX_RCB_REG,
372 	HCLGE_OPC_DFX_TQP_REG,
373 	HCLGE_OPC_DFX_SSU_REG_2
374 };
375 
376 static const struct key_info meta_data_key_info[] = {
377 	{ PACKET_TYPE_ID, 6},
378 	{ IP_FRAGEMENT, 1},
379 	{ ROCE_TYPE, 1},
380 	{ NEXT_KEY, 5},
381 	{ VLAN_NUMBER, 2},
382 	{ SRC_VPORT, 12},
383 	{ DST_VPORT, 12},
384 	{ TUNNEL_PACKET, 1},
385 };
386 
387 static const struct key_info tuple_key_info[] = {
388 	{ OUTER_DST_MAC, 48, KEY_OPT_MAC, -1, -1 },
389 	{ OUTER_SRC_MAC, 48, KEY_OPT_MAC, -1, -1 },
390 	{ OUTER_VLAN_TAG_FST, 16, KEY_OPT_LE16, -1, -1 },
391 	{ OUTER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
392 	{ OUTER_ETH_TYPE, 16, KEY_OPT_LE16, -1, -1 },
393 	{ OUTER_L2_RSV, 16, KEY_OPT_LE16, -1, -1 },
394 	{ OUTER_IP_TOS, 8, KEY_OPT_U8, -1, -1 },
395 	{ OUTER_IP_PROTO, 8, KEY_OPT_U8, -1, -1 },
396 	{ OUTER_SRC_IP, 32, KEY_OPT_IP, -1, -1 },
397 	{ OUTER_DST_IP, 32, KEY_OPT_IP, -1, -1 },
398 	{ OUTER_L3_RSV, 16, KEY_OPT_LE16, -1, -1 },
399 	{ OUTER_SRC_PORT, 16, KEY_OPT_LE16, -1, -1 },
400 	{ OUTER_DST_PORT, 16, KEY_OPT_LE16, -1, -1 },
401 	{ OUTER_L4_RSV, 32, KEY_OPT_LE32, -1, -1 },
402 	{ OUTER_TUN_VNI, 24, KEY_OPT_VNI, -1, -1 },
403 	{ OUTER_TUN_FLOW_ID, 8, KEY_OPT_U8, -1, -1 },
404 	{ INNER_DST_MAC, 48, KEY_OPT_MAC,
405 	  offsetof(struct hclge_fd_rule, tuples.dst_mac),
406 	  offsetof(struct hclge_fd_rule, tuples_mask.dst_mac) },
407 	{ INNER_SRC_MAC, 48, KEY_OPT_MAC,
408 	  offsetof(struct hclge_fd_rule, tuples.src_mac),
409 	  offsetof(struct hclge_fd_rule, tuples_mask.src_mac) },
410 	{ INNER_VLAN_TAG_FST, 16, KEY_OPT_LE16,
411 	  offsetof(struct hclge_fd_rule, tuples.vlan_tag1),
412 	  offsetof(struct hclge_fd_rule, tuples_mask.vlan_tag1) },
413 	{ INNER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
414 	{ INNER_ETH_TYPE, 16, KEY_OPT_LE16,
415 	  offsetof(struct hclge_fd_rule, tuples.ether_proto),
416 	  offsetof(struct hclge_fd_rule, tuples_mask.ether_proto) },
417 	{ INNER_L2_RSV, 16, KEY_OPT_LE16,
418 	  offsetof(struct hclge_fd_rule, tuples.l2_user_def),
419 	  offsetof(struct hclge_fd_rule, tuples_mask.l2_user_def) },
420 	{ INNER_IP_TOS, 8, KEY_OPT_U8,
421 	  offsetof(struct hclge_fd_rule, tuples.ip_tos),
422 	  offsetof(struct hclge_fd_rule, tuples_mask.ip_tos) },
423 	{ INNER_IP_PROTO, 8, KEY_OPT_U8,
424 	  offsetof(struct hclge_fd_rule, tuples.ip_proto),
425 	  offsetof(struct hclge_fd_rule, tuples_mask.ip_proto) },
426 	{ INNER_SRC_IP, 32, KEY_OPT_IP,
427 	  offsetof(struct hclge_fd_rule, tuples.src_ip),
428 	  offsetof(struct hclge_fd_rule, tuples_mask.src_ip) },
429 	{ INNER_DST_IP, 32, KEY_OPT_IP,
430 	  offsetof(struct hclge_fd_rule, tuples.dst_ip),
431 	  offsetof(struct hclge_fd_rule, tuples_mask.dst_ip) },
432 	{ INNER_L3_RSV, 16, KEY_OPT_LE16,
433 	  offsetof(struct hclge_fd_rule, tuples.l3_user_def),
434 	  offsetof(struct hclge_fd_rule, tuples_mask.l3_user_def) },
435 	{ INNER_SRC_PORT, 16, KEY_OPT_LE16,
436 	  offsetof(struct hclge_fd_rule, tuples.src_port),
437 	  offsetof(struct hclge_fd_rule, tuples_mask.src_port) },
438 	{ INNER_DST_PORT, 16, KEY_OPT_LE16,
439 	  offsetof(struct hclge_fd_rule, tuples.dst_port),
440 	  offsetof(struct hclge_fd_rule, tuples_mask.dst_port) },
441 	{ INNER_L4_RSV, 32, KEY_OPT_LE32,
442 	  offsetof(struct hclge_fd_rule, tuples.l4_user_def),
443 	  offsetof(struct hclge_fd_rule, tuples_mask.l4_user_def) },
444 };
445 
446 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
447 {
448 #define HCLGE_MAC_CMD_NUM 21
449 
450 	u64 *data = (u64 *)(&hdev->mac_stats);
451 	struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
452 	__le64 *desc_data;
453 	int i, k, n;
454 	int ret;
455 
456 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
457 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
458 	if (ret) {
459 		dev_err(&hdev->pdev->dev,
460 			"Get MAC pkt stats fail, status = %d.\n", ret);
461 
462 		return ret;
463 	}
464 
465 	for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
466 		/* for special opcode 0032, only the first desc has the head */
467 		if (unlikely(i == 0)) {
468 			desc_data = (__le64 *)(&desc[i].data[0]);
469 			n = HCLGE_RD_FIRST_STATS_NUM;
470 		} else {
471 			desc_data = (__le64 *)(&desc[i]);
472 			n = HCLGE_RD_OTHER_STATS_NUM;
473 		}
474 
475 		for (k = 0; k < n; k++) {
476 			*data += le64_to_cpu(*desc_data);
477 			data++;
478 			desc_data++;
479 		}
480 	}
481 
482 	return 0;
483 }
484 
485 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
486 {
487 	u64 *data = (u64 *)(&hdev->mac_stats);
488 	struct hclge_desc *desc;
489 	__le64 *desc_data;
490 	u16 i, k, n;
491 	int ret;
492 
493 	/* This may be called inside atomic sections,
494 	 * so GFP_ATOMIC is more suitalbe here
495 	 */
496 	desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
497 	if (!desc)
498 		return -ENOMEM;
499 
500 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
501 	ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
502 	if (ret) {
503 		kfree(desc);
504 		return ret;
505 	}
506 
507 	for (i = 0; i < desc_num; i++) {
508 		/* for special opcode 0034, only the first desc has the head */
509 		if (i == 0) {
510 			desc_data = (__le64 *)(&desc[i].data[0]);
511 			n = HCLGE_RD_FIRST_STATS_NUM;
512 		} else {
513 			desc_data = (__le64 *)(&desc[i]);
514 			n = HCLGE_RD_OTHER_STATS_NUM;
515 		}
516 
517 		for (k = 0; k < n; k++) {
518 			*data += le64_to_cpu(*desc_data);
519 			data++;
520 			desc_data++;
521 		}
522 	}
523 
524 	kfree(desc);
525 
526 	return 0;
527 }
528 
529 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
530 {
531 	struct hclge_desc desc;
532 	__le32 *desc_data;
533 	u32 reg_num;
534 	int ret;
535 
536 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
537 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
538 	if (ret)
539 		return ret;
540 
541 	desc_data = (__le32 *)(&desc.data[0]);
542 	reg_num = le32_to_cpu(*desc_data);
543 
544 	*desc_num = 1 + ((reg_num - 3) >> 2) +
545 		    (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
546 
547 	return 0;
548 }
549 
550 static int hclge_mac_update_stats(struct hclge_dev *hdev)
551 {
552 	u32 desc_num;
553 	int ret;
554 
555 	ret = hclge_mac_query_reg_num(hdev, &desc_num);
556 	/* The firmware supports the new statistics acquisition method */
557 	if (!ret)
558 		ret = hclge_mac_update_stats_complete(hdev, desc_num);
559 	else if (ret == -EOPNOTSUPP)
560 		ret = hclge_mac_update_stats_defective(hdev);
561 	else
562 		dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
563 
564 	return ret;
565 }
566 
567 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
568 {
569 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
570 	struct hclge_vport *vport = hclge_get_vport(handle);
571 	struct hclge_dev *hdev = vport->back;
572 	struct hnae3_queue *queue;
573 	struct hclge_desc desc[1];
574 	struct hclge_tqp *tqp;
575 	int ret, i;
576 
577 	for (i = 0; i < kinfo->num_tqps; i++) {
578 		queue = handle->kinfo.tqp[i];
579 		tqp = container_of(queue, struct hclge_tqp, q);
580 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
581 		hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
582 					   true);
583 
584 		desc[0].data[0] = cpu_to_le32(tqp->index);
585 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
586 		if (ret) {
587 			dev_err(&hdev->pdev->dev,
588 				"Query tqp stat fail, status = %d,queue = %d\n",
589 				ret, i);
590 			return ret;
591 		}
592 		tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
593 			le32_to_cpu(desc[0].data[1]);
594 	}
595 
596 	for (i = 0; i < kinfo->num_tqps; i++) {
597 		queue = handle->kinfo.tqp[i];
598 		tqp = container_of(queue, struct hclge_tqp, q);
599 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
600 		hclge_cmd_setup_basic_desc(&desc[0],
601 					   HCLGE_OPC_QUERY_TX_STATS,
602 					   true);
603 
604 		desc[0].data[0] = cpu_to_le32(tqp->index);
605 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
606 		if (ret) {
607 			dev_err(&hdev->pdev->dev,
608 				"Query tqp stat fail, status = %d,queue = %d\n",
609 				ret, i);
610 			return ret;
611 		}
612 		tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
613 			le32_to_cpu(desc[0].data[1]);
614 	}
615 
616 	return 0;
617 }
618 
619 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
620 {
621 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
622 	struct hclge_tqp *tqp;
623 	u64 *buff = data;
624 	int i;
625 
626 	for (i = 0; i < kinfo->num_tqps; i++) {
627 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
628 		*buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
629 	}
630 
631 	for (i = 0; i < kinfo->num_tqps; i++) {
632 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
633 		*buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
634 	}
635 
636 	return buff;
637 }
638 
639 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
640 {
641 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
642 
643 	/* each tqp has TX & RX two queues */
644 	return kinfo->num_tqps * (2);
645 }
646 
647 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
648 {
649 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
650 	u8 *buff = data;
651 	int i;
652 
653 	for (i = 0; i < kinfo->num_tqps; i++) {
654 		struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
655 			struct hclge_tqp, q);
656 		snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
657 			 tqp->index);
658 		buff = buff + ETH_GSTRING_LEN;
659 	}
660 
661 	for (i = 0; i < kinfo->num_tqps; i++) {
662 		struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
663 			struct hclge_tqp, q);
664 		snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
665 			 tqp->index);
666 		buff = buff + ETH_GSTRING_LEN;
667 	}
668 
669 	return buff;
670 }
671 
672 static u64 *hclge_comm_get_stats(const void *comm_stats,
673 				 const struct hclge_comm_stats_str strs[],
674 				 int size, u64 *data)
675 {
676 	u64 *buf = data;
677 	u32 i;
678 
679 	for (i = 0; i < size; i++)
680 		buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
681 
682 	return buf + size;
683 }
684 
685 static u8 *hclge_comm_get_strings(u32 stringset,
686 				  const struct hclge_comm_stats_str strs[],
687 				  int size, u8 *data)
688 {
689 	char *buff = (char *)data;
690 	u32 i;
691 
692 	if (stringset != ETH_SS_STATS)
693 		return buff;
694 
695 	for (i = 0; i < size; i++) {
696 		snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
697 		buff = buff + ETH_GSTRING_LEN;
698 	}
699 
700 	return (u8 *)buff;
701 }
702 
703 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
704 {
705 	struct hnae3_handle *handle;
706 	int status;
707 
708 	handle = &hdev->vport[0].nic;
709 	if (handle->client) {
710 		status = hclge_tqps_update_stats(handle);
711 		if (status) {
712 			dev_err(&hdev->pdev->dev,
713 				"Update TQPS stats fail, status = %d.\n",
714 				status);
715 		}
716 	}
717 
718 	status = hclge_mac_update_stats(hdev);
719 	if (status)
720 		dev_err(&hdev->pdev->dev,
721 			"Update MAC stats fail, status = %d.\n", status);
722 }
723 
724 static void hclge_update_stats(struct hnae3_handle *handle,
725 			       struct net_device_stats *net_stats)
726 {
727 	struct hclge_vport *vport = hclge_get_vport(handle);
728 	struct hclge_dev *hdev = vport->back;
729 	int status;
730 
731 	if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
732 		return;
733 
734 	status = hclge_mac_update_stats(hdev);
735 	if (status)
736 		dev_err(&hdev->pdev->dev,
737 			"Update MAC stats fail, status = %d.\n",
738 			status);
739 
740 	status = hclge_tqps_update_stats(handle);
741 	if (status)
742 		dev_err(&hdev->pdev->dev,
743 			"Update TQPS stats fail, status = %d.\n",
744 			status);
745 
746 	clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
747 }
748 
749 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
750 {
751 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
752 		HNAE3_SUPPORT_PHY_LOOPBACK |\
753 		HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
754 		HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
755 
756 	struct hclge_vport *vport = hclge_get_vport(handle);
757 	struct hclge_dev *hdev = vport->back;
758 	int count = 0;
759 
760 	/* Loopback test support rules:
761 	 * mac: only GE mode support
762 	 * serdes: all mac mode will support include GE/XGE/LGE/CGE
763 	 * phy: only support when phy device exist on board
764 	 */
765 	if (stringset == ETH_SS_TEST) {
766 		/* clear loopback bit flags at first */
767 		handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
768 		if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
769 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
770 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
771 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
772 			count += 1;
773 			handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
774 		}
775 
776 		count += 2;
777 		handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
778 		handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
779 
780 		if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
781 		     hdev->hw.mac.phydev->drv->set_loopback) ||
782 		    hnae3_dev_phy_imp_supported(hdev)) {
783 			count += 1;
784 			handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
785 		}
786 	} else if (stringset == ETH_SS_STATS) {
787 		count = ARRAY_SIZE(g_mac_stats_string) +
788 			hclge_tqps_get_sset_count(handle, stringset);
789 	}
790 
791 	return count;
792 }
793 
794 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
795 			      u8 *data)
796 {
797 	u8 *p = (char *)data;
798 	int size;
799 
800 	if (stringset == ETH_SS_STATS) {
801 		size = ARRAY_SIZE(g_mac_stats_string);
802 		p = hclge_comm_get_strings(stringset, g_mac_stats_string,
803 					   size, p);
804 		p = hclge_tqps_get_strings(handle, p);
805 	} else if (stringset == ETH_SS_TEST) {
806 		if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
807 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
808 			       ETH_GSTRING_LEN);
809 			p += ETH_GSTRING_LEN;
810 		}
811 		if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
812 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
813 			       ETH_GSTRING_LEN);
814 			p += ETH_GSTRING_LEN;
815 		}
816 		if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
817 			memcpy(p,
818 			       hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
819 			       ETH_GSTRING_LEN);
820 			p += ETH_GSTRING_LEN;
821 		}
822 		if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
823 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
824 			       ETH_GSTRING_LEN);
825 			p += ETH_GSTRING_LEN;
826 		}
827 	}
828 }
829 
830 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
831 {
832 	struct hclge_vport *vport = hclge_get_vport(handle);
833 	struct hclge_dev *hdev = vport->back;
834 	u64 *p;
835 
836 	p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
837 				 ARRAY_SIZE(g_mac_stats_string), data);
838 	p = hclge_tqps_get_stats(handle, p);
839 }
840 
841 static void hclge_get_mac_stat(struct hnae3_handle *handle,
842 			       struct hns3_mac_stats *mac_stats)
843 {
844 	struct hclge_vport *vport = hclge_get_vport(handle);
845 	struct hclge_dev *hdev = vport->back;
846 
847 	hclge_update_stats(handle, NULL);
848 
849 	mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
850 	mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
851 }
852 
853 static int hclge_parse_func_status(struct hclge_dev *hdev,
854 				   struct hclge_func_status_cmd *status)
855 {
856 #define HCLGE_MAC_ID_MASK	0xF
857 
858 	if (!(status->pf_state & HCLGE_PF_STATE_DONE))
859 		return -EINVAL;
860 
861 	/* Set the pf to main pf */
862 	if (status->pf_state & HCLGE_PF_STATE_MAIN)
863 		hdev->flag |= HCLGE_FLAG_MAIN;
864 	else
865 		hdev->flag &= ~HCLGE_FLAG_MAIN;
866 
867 	hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
868 	return 0;
869 }
870 
871 static int hclge_query_function_status(struct hclge_dev *hdev)
872 {
873 #define HCLGE_QUERY_MAX_CNT	5
874 
875 	struct hclge_func_status_cmd *req;
876 	struct hclge_desc desc;
877 	int timeout = 0;
878 	int ret;
879 
880 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
881 	req = (struct hclge_func_status_cmd *)desc.data;
882 
883 	do {
884 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
885 		if (ret) {
886 			dev_err(&hdev->pdev->dev,
887 				"query function status failed %d.\n", ret);
888 			return ret;
889 		}
890 
891 		/* Check pf reset is done */
892 		if (req->pf_state)
893 			break;
894 		usleep_range(1000, 2000);
895 	} while (timeout++ < HCLGE_QUERY_MAX_CNT);
896 
897 	return hclge_parse_func_status(hdev, req);
898 }
899 
900 static int hclge_query_pf_resource(struct hclge_dev *hdev)
901 {
902 	struct hclge_pf_res_cmd *req;
903 	struct hclge_desc desc;
904 	int ret;
905 
906 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
907 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
908 	if (ret) {
909 		dev_err(&hdev->pdev->dev,
910 			"query pf resource failed %d.\n", ret);
911 		return ret;
912 	}
913 
914 	req = (struct hclge_pf_res_cmd *)desc.data;
915 	hdev->num_tqps = le16_to_cpu(req->tqp_num) +
916 			 le16_to_cpu(req->ext_tqp_num);
917 	hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
918 
919 	if (req->tx_buf_size)
920 		hdev->tx_buf_size =
921 			le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
922 	else
923 		hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
924 
925 	hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
926 
927 	if (req->dv_buf_size)
928 		hdev->dv_buf_size =
929 			le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
930 	else
931 		hdev->dv_buf_size = HCLGE_DEFAULT_DV;
932 
933 	hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
934 
935 	hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
936 	if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
937 		dev_err(&hdev->pdev->dev,
938 			"only %u msi resources available, not enough for pf(min:2).\n",
939 			hdev->num_nic_msi);
940 		return -EINVAL;
941 	}
942 
943 	if (hnae3_dev_roce_supported(hdev)) {
944 		hdev->num_roce_msi =
945 			le16_to_cpu(req->pf_intr_vector_number_roce);
946 
947 		/* PF should have NIC vectors and Roce vectors,
948 		 * NIC vectors are queued before Roce vectors.
949 		 */
950 		hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
951 	} else {
952 		hdev->num_msi = hdev->num_nic_msi;
953 	}
954 
955 	return 0;
956 }
957 
958 static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
959 {
960 	switch (speed_cmd) {
961 	case 6:
962 		*speed = HCLGE_MAC_SPEED_10M;
963 		break;
964 	case 7:
965 		*speed = HCLGE_MAC_SPEED_100M;
966 		break;
967 	case 0:
968 		*speed = HCLGE_MAC_SPEED_1G;
969 		break;
970 	case 1:
971 		*speed = HCLGE_MAC_SPEED_10G;
972 		break;
973 	case 2:
974 		*speed = HCLGE_MAC_SPEED_25G;
975 		break;
976 	case 3:
977 		*speed = HCLGE_MAC_SPEED_40G;
978 		break;
979 	case 4:
980 		*speed = HCLGE_MAC_SPEED_50G;
981 		break;
982 	case 5:
983 		*speed = HCLGE_MAC_SPEED_100G;
984 		break;
985 	case 8:
986 		*speed = HCLGE_MAC_SPEED_200G;
987 		break;
988 	default:
989 		return -EINVAL;
990 	}
991 
992 	return 0;
993 }
994 
995 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
996 {
997 	struct hclge_vport *vport = hclge_get_vport(handle);
998 	struct hclge_dev *hdev = vport->back;
999 	u32 speed_ability = hdev->hw.mac.speed_ability;
1000 	u32 speed_bit = 0;
1001 
1002 	switch (speed) {
1003 	case HCLGE_MAC_SPEED_10M:
1004 		speed_bit = HCLGE_SUPPORT_10M_BIT;
1005 		break;
1006 	case HCLGE_MAC_SPEED_100M:
1007 		speed_bit = HCLGE_SUPPORT_100M_BIT;
1008 		break;
1009 	case HCLGE_MAC_SPEED_1G:
1010 		speed_bit = HCLGE_SUPPORT_1G_BIT;
1011 		break;
1012 	case HCLGE_MAC_SPEED_10G:
1013 		speed_bit = HCLGE_SUPPORT_10G_BIT;
1014 		break;
1015 	case HCLGE_MAC_SPEED_25G:
1016 		speed_bit = HCLGE_SUPPORT_25G_BIT;
1017 		break;
1018 	case HCLGE_MAC_SPEED_40G:
1019 		speed_bit = HCLGE_SUPPORT_40G_BIT;
1020 		break;
1021 	case HCLGE_MAC_SPEED_50G:
1022 		speed_bit = HCLGE_SUPPORT_50G_BIT;
1023 		break;
1024 	case HCLGE_MAC_SPEED_100G:
1025 		speed_bit = HCLGE_SUPPORT_100G_BIT;
1026 		break;
1027 	case HCLGE_MAC_SPEED_200G:
1028 		speed_bit = HCLGE_SUPPORT_200G_BIT;
1029 		break;
1030 	default:
1031 		return -EINVAL;
1032 	}
1033 
1034 	if (speed_bit & speed_ability)
1035 		return 0;
1036 
1037 	return -EINVAL;
1038 }
1039 
1040 static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
1041 {
1042 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1043 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1044 				 mac->supported);
1045 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1046 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1047 				 mac->supported);
1048 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1049 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1050 				 mac->supported);
1051 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1052 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1053 				 mac->supported);
1054 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1055 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1056 				 mac->supported);
1057 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1058 		linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1059 				 mac->supported);
1060 }
1061 
1062 static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
1063 {
1064 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1065 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1066 				 mac->supported);
1067 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1068 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1069 				 mac->supported);
1070 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1071 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1072 				 mac->supported);
1073 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1074 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1075 				 mac->supported);
1076 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1077 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1078 				 mac->supported);
1079 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1080 		linkmode_set_bit(
1081 			ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1082 			mac->supported);
1083 }
1084 
1085 static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
1086 {
1087 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1088 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1089 				 mac->supported);
1090 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1091 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1092 				 mac->supported);
1093 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1094 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1095 				 mac->supported);
1096 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1097 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1098 				 mac->supported);
1099 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1100 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1101 				 mac->supported);
1102 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1103 		linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1104 				 mac->supported);
1105 }
1106 
1107 static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
1108 {
1109 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1110 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1111 				 mac->supported);
1112 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1113 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1114 				 mac->supported);
1115 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1116 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1117 				 mac->supported);
1118 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1119 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1120 				 mac->supported);
1121 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1122 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1123 				 mac->supported);
1124 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1125 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1126 				 mac->supported);
1127 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1128 		linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1129 				 mac->supported);
1130 }
1131 
1132 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1133 {
1134 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1135 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1136 
1137 	switch (mac->speed) {
1138 	case HCLGE_MAC_SPEED_10G:
1139 	case HCLGE_MAC_SPEED_40G:
1140 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1141 				 mac->supported);
1142 		mac->fec_ability =
1143 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1144 		break;
1145 	case HCLGE_MAC_SPEED_25G:
1146 	case HCLGE_MAC_SPEED_50G:
1147 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1148 				 mac->supported);
1149 		mac->fec_ability =
1150 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1151 			BIT(HNAE3_FEC_AUTO);
1152 		break;
1153 	case HCLGE_MAC_SPEED_100G:
1154 	case HCLGE_MAC_SPEED_200G:
1155 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1156 		mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1157 		break;
1158 	default:
1159 		mac->fec_ability = 0;
1160 		break;
1161 	}
1162 }
1163 
1164 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1165 					u16 speed_ability)
1166 {
1167 	struct hclge_mac *mac = &hdev->hw.mac;
1168 
1169 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1170 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1171 				 mac->supported);
1172 
1173 	hclge_convert_setting_sr(mac, speed_ability);
1174 	hclge_convert_setting_lr(mac, speed_ability);
1175 	hclge_convert_setting_cr(mac, speed_ability);
1176 	if (hnae3_dev_fec_supported(hdev))
1177 		hclge_convert_setting_fec(mac);
1178 
1179 	if (hnae3_dev_pause_supported(hdev))
1180 		linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1181 
1182 	linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1183 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1184 }
1185 
1186 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1187 					    u16 speed_ability)
1188 {
1189 	struct hclge_mac *mac = &hdev->hw.mac;
1190 
1191 	hclge_convert_setting_kr(mac, speed_ability);
1192 	if (hnae3_dev_fec_supported(hdev))
1193 		hclge_convert_setting_fec(mac);
1194 
1195 	if (hnae3_dev_pause_supported(hdev))
1196 		linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1197 
1198 	linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1199 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1200 }
1201 
1202 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1203 					 u16 speed_ability)
1204 {
1205 	unsigned long *supported = hdev->hw.mac.supported;
1206 
1207 	/* default to support all speed for GE port */
1208 	if (!speed_ability)
1209 		speed_ability = HCLGE_SUPPORT_GE;
1210 
1211 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1212 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1213 				 supported);
1214 
1215 	if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1216 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1217 				 supported);
1218 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1219 				 supported);
1220 	}
1221 
1222 	if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1223 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1224 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1225 	}
1226 
1227 	if (hnae3_dev_pause_supported(hdev)) {
1228 		linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1229 		linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1230 	}
1231 
1232 	linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1233 	linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1234 }
1235 
1236 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1237 {
1238 	u8 media_type = hdev->hw.mac.media_type;
1239 
1240 	if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1241 		hclge_parse_fiber_link_mode(hdev, speed_ability);
1242 	else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1243 		hclge_parse_copper_link_mode(hdev, speed_ability);
1244 	else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1245 		hclge_parse_backplane_link_mode(hdev, speed_ability);
1246 }
1247 
1248 static u32 hclge_get_max_speed(u16 speed_ability)
1249 {
1250 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1251 		return HCLGE_MAC_SPEED_200G;
1252 
1253 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1254 		return HCLGE_MAC_SPEED_100G;
1255 
1256 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1257 		return HCLGE_MAC_SPEED_50G;
1258 
1259 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1260 		return HCLGE_MAC_SPEED_40G;
1261 
1262 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1263 		return HCLGE_MAC_SPEED_25G;
1264 
1265 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1266 		return HCLGE_MAC_SPEED_10G;
1267 
1268 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1269 		return HCLGE_MAC_SPEED_1G;
1270 
1271 	if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1272 		return HCLGE_MAC_SPEED_100M;
1273 
1274 	if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1275 		return HCLGE_MAC_SPEED_10M;
1276 
1277 	return HCLGE_MAC_SPEED_1G;
1278 }
1279 
1280 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1281 {
1282 #define SPEED_ABILITY_EXT_SHIFT			8
1283 
1284 	struct hclge_cfg_param_cmd *req;
1285 	u64 mac_addr_tmp_high;
1286 	u16 speed_ability_ext;
1287 	u64 mac_addr_tmp;
1288 	unsigned int i;
1289 
1290 	req = (struct hclge_cfg_param_cmd *)desc[0].data;
1291 
1292 	/* get the configuration */
1293 	cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1294 				      HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1295 	cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1296 					    HCLGE_CFG_TQP_DESC_N_M,
1297 					    HCLGE_CFG_TQP_DESC_N_S);
1298 
1299 	cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1300 					HCLGE_CFG_PHY_ADDR_M,
1301 					HCLGE_CFG_PHY_ADDR_S);
1302 	cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1303 					  HCLGE_CFG_MEDIA_TP_M,
1304 					  HCLGE_CFG_MEDIA_TP_S);
1305 	cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1306 					  HCLGE_CFG_RX_BUF_LEN_M,
1307 					  HCLGE_CFG_RX_BUF_LEN_S);
1308 	/* get mac_address */
1309 	mac_addr_tmp = __le32_to_cpu(req->param[2]);
1310 	mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1311 					    HCLGE_CFG_MAC_ADDR_H_M,
1312 					    HCLGE_CFG_MAC_ADDR_H_S);
1313 
1314 	mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1315 
1316 	cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1317 					     HCLGE_CFG_DEFAULT_SPEED_M,
1318 					     HCLGE_CFG_DEFAULT_SPEED_S);
1319 	cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1320 					       HCLGE_CFG_RSS_SIZE_M,
1321 					       HCLGE_CFG_RSS_SIZE_S);
1322 
1323 	for (i = 0; i < ETH_ALEN; i++)
1324 		cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1325 
1326 	req = (struct hclge_cfg_param_cmd *)desc[1].data;
1327 	cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1328 
1329 	cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1330 					     HCLGE_CFG_SPEED_ABILITY_M,
1331 					     HCLGE_CFG_SPEED_ABILITY_S);
1332 	speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1333 					    HCLGE_CFG_SPEED_ABILITY_EXT_M,
1334 					    HCLGE_CFG_SPEED_ABILITY_EXT_S);
1335 	cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1336 
1337 	cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1338 					 HCLGE_CFG_UMV_TBL_SPACE_M,
1339 					 HCLGE_CFG_UMV_TBL_SPACE_S);
1340 	if (!cfg->umv_space)
1341 		cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1342 
1343 	cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
1344 					       HCLGE_CFG_PF_RSS_SIZE_M,
1345 					       HCLGE_CFG_PF_RSS_SIZE_S);
1346 
1347 	/* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a
1348 	 * power of 2, instead of reading out directly. This would
1349 	 * be more flexible for future changes and expansions.
1350 	 * When VF max  rss size field is HCLGE_CFG_RSS_SIZE_S,
1351 	 * it does not make sense if PF's field is 0. In this case, PF and VF
1352 	 * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S.
1353 	 */
1354 	cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
1355 			       1U << cfg->pf_rss_size_max :
1356 			       cfg->vf_rss_size_max;
1357 }
1358 
1359 /* hclge_get_cfg: query the static parameter from flash
1360  * @hdev: pointer to struct hclge_dev
1361  * @hcfg: the config structure to be getted
1362  */
1363 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1364 {
1365 	struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1366 	struct hclge_cfg_param_cmd *req;
1367 	unsigned int i;
1368 	int ret;
1369 
1370 	for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1371 		u32 offset = 0;
1372 
1373 		req = (struct hclge_cfg_param_cmd *)desc[i].data;
1374 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1375 					   true);
1376 		hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1377 				HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1378 		/* Len should be united by 4 bytes when send to hardware */
1379 		hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1380 				HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1381 		req->offset = cpu_to_le32(offset);
1382 	}
1383 
1384 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1385 	if (ret) {
1386 		dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1387 		return ret;
1388 	}
1389 
1390 	hclge_parse_cfg(hcfg, desc);
1391 
1392 	return 0;
1393 }
1394 
1395 static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1396 {
1397 #define HCLGE_MAX_NON_TSO_BD_NUM			8U
1398 
1399 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1400 
1401 	ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1402 	ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1403 	ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
1404 	ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1405 	ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
1406 	ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
1407 	ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
1408 }
1409 
1410 static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1411 				  struct hclge_desc *desc)
1412 {
1413 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1414 	struct hclge_dev_specs_0_cmd *req0;
1415 	struct hclge_dev_specs_1_cmd *req1;
1416 
1417 	req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1418 	req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
1419 
1420 	ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1421 	ae_dev->dev_specs.rss_ind_tbl_size =
1422 		le16_to_cpu(req0->rss_ind_tbl_size);
1423 	ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
1424 	ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1425 	ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1426 	ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
1427 	ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
1428 	ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
1429 }
1430 
1431 static void hclge_check_dev_specs(struct hclge_dev *hdev)
1432 {
1433 	struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1434 
1435 	if (!dev_specs->max_non_tso_bd_num)
1436 		dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1437 	if (!dev_specs->rss_ind_tbl_size)
1438 		dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1439 	if (!dev_specs->rss_key_size)
1440 		dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
1441 	if (!dev_specs->max_tm_rate)
1442 		dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1443 	if (!dev_specs->max_qset_num)
1444 		dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM;
1445 	if (!dev_specs->max_int_gl)
1446 		dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
1447 	if (!dev_specs->max_frm_size)
1448 		dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
1449 }
1450 
1451 static int hclge_query_dev_specs(struct hclge_dev *hdev)
1452 {
1453 	struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1454 	int ret;
1455 	int i;
1456 
1457 	/* set default specifications as devices lower than version V3 do not
1458 	 * support querying specifications from firmware.
1459 	 */
1460 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1461 		hclge_set_default_dev_specs(hdev);
1462 		return 0;
1463 	}
1464 
1465 	for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1466 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1467 					   true);
1468 		desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1469 	}
1470 	hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1471 
1472 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1473 	if (ret)
1474 		return ret;
1475 
1476 	hclge_parse_dev_specs(hdev, desc);
1477 	hclge_check_dev_specs(hdev);
1478 
1479 	return 0;
1480 }
1481 
1482 static int hclge_get_cap(struct hclge_dev *hdev)
1483 {
1484 	int ret;
1485 
1486 	ret = hclge_query_function_status(hdev);
1487 	if (ret) {
1488 		dev_err(&hdev->pdev->dev,
1489 			"query function status error %d.\n", ret);
1490 		return ret;
1491 	}
1492 
1493 	/* get pf resource */
1494 	return hclge_query_pf_resource(hdev);
1495 }
1496 
1497 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1498 {
1499 #define HCLGE_MIN_TX_DESC	64
1500 #define HCLGE_MIN_RX_DESC	64
1501 
1502 	if (!is_kdump_kernel())
1503 		return;
1504 
1505 	dev_info(&hdev->pdev->dev,
1506 		 "Running kdump kernel. Using minimal resources\n");
1507 
1508 	/* minimal queue pairs equals to the number of vports */
1509 	hdev->num_tqps = hdev->num_req_vfs + 1;
1510 	hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1511 	hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1512 }
1513 
1514 static int hclge_configure(struct hclge_dev *hdev)
1515 {
1516 	struct hclge_cfg cfg;
1517 	unsigned int i;
1518 	int ret;
1519 
1520 	ret = hclge_get_cfg(hdev, &cfg);
1521 	if (ret)
1522 		return ret;
1523 
1524 	hdev->base_tqp_pid = 0;
1525 	hdev->vf_rss_size_max = cfg.vf_rss_size_max;
1526 	hdev->pf_rss_size_max = cfg.pf_rss_size_max;
1527 	hdev->rx_buf_len = cfg.rx_buf_len;
1528 	ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1529 	hdev->hw.mac.media_type = cfg.media_type;
1530 	hdev->hw.mac.phy_addr = cfg.phy_addr;
1531 	hdev->num_tx_desc = cfg.tqp_desc_num;
1532 	hdev->num_rx_desc = cfg.tqp_desc_num;
1533 	hdev->tm_info.num_pg = 1;
1534 	hdev->tc_max = cfg.tc_num;
1535 	hdev->tm_info.hw_pfc_map = 0;
1536 	hdev->wanted_umv_size = cfg.umv_space;
1537 
1538 	if (hnae3_dev_fd_supported(hdev)) {
1539 		hdev->fd_en = true;
1540 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1541 	}
1542 
1543 	ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1544 	if (ret) {
1545 		dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1546 			cfg.default_speed, ret);
1547 		return ret;
1548 	}
1549 
1550 	hclge_parse_link_mode(hdev, cfg.speed_ability);
1551 
1552 	hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1553 
1554 	if ((hdev->tc_max > HNAE3_MAX_TC) ||
1555 	    (hdev->tc_max < 1)) {
1556 		dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1557 			 hdev->tc_max);
1558 		hdev->tc_max = 1;
1559 	}
1560 
1561 	/* Dev does not support DCB */
1562 	if (!hnae3_dev_dcb_supported(hdev)) {
1563 		hdev->tc_max = 1;
1564 		hdev->pfc_max = 0;
1565 	} else {
1566 		hdev->pfc_max = hdev->tc_max;
1567 	}
1568 
1569 	hdev->tm_info.num_tc = 1;
1570 
1571 	/* Currently not support uncontiuous tc */
1572 	for (i = 0; i < hdev->tm_info.num_tc; i++)
1573 		hnae3_set_bit(hdev->hw_tc_map, i, 1);
1574 
1575 	hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1576 
1577 	hclge_init_kdump_kernel_config(hdev);
1578 
1579 	/* Set the init affinity based on pci func number */
1580 	i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1581 	i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1582 	cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1583 			&hdev->affinity_mask);
1584 
1585 	return ret;
1586 }
1587 
1588 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1589 			    u16 tso_mss_max)
1590 {
1591 	struct hclge_cfg_tso_status_cmd *req;
1592 	struct hclge_desc desc;
1593 
1594 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1595 
1596 	req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1597 	req->tso_mss_min = cpu_to_le16(tso_mss_min);
1598 	req->tso_mss_max = cpu_to_le16(tso_mss_max);
1599 
1600 	return hclge_cmd_send(&hdev->hw, &desc, 1);
1601 }
1602 
1603 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1604 {
1605 	struct hclge_cfg_gro_status_cmd *req;
1606 	struct hclge_desc desc;
1607 	int ret;
1608 
1609 	if (!hnae3_dev_gro_supported(hdev))
1610 		return 0;
1611 
1612 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1613 	req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1614 
1615 	req->gro_en = en ? 1 : 0;
1616 
1617 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1618 	if (ret)
1619 		dev_err(&hdev->pdev->dev,
1620 			"GRO hardware config cmd failed, ret = %d\n", ret);
1621 
1622 	return ret;
1623 }
1624 
1625 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1626 {
1627 	struct hclge_tqp *tqp;
1628 	int i;
1629 
1630 	hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1631 				  sizeof(struct hclge_tqp), GFP_KERNEL);
1632 	if (!hdev->htqp)
1633 		return -ENOMEM;
1634 
1635 	tqp = hdev->htqp;
1636 
1637 	for (i = 0; i < hdev->num_tqps; i++) {
1638 		tqp->dev = &hdev->pdev->dev;
1639 		tqp->index = i;
1640 
1641 		tqp->q.ae_algo = &ae_algo;
1642 		tqp->q.buf_size = hdev->rx_buf_len;
1643 		tqp->q.tx_desc_num = hdev->num_tx_desc;
1644 		tqp->q.rx_desc_num = hdev->num_rx_desc;
1645 
1646 		/* need an extended offset to configure queues >=
1647 		 * HCLGE_TQP_MAX_SIZE_DEV_V2
1648 		 */
1649 		if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
1650 			tqp->q.io_base = hdev->hw.io_base +
1651 					 HCLGE_TQP_REG_OFFSET +
1652 					 i * HCLGE_TQP_REG_SIZE;
1653 		else
1654 			tqp->q.io_base = hdev->hw.io_base +
1655 					 HCLGE_TQP_REG_OFFSET +
1656 					 HCLGE_TQP_EXT_REG_OFFSET +
1657 					 (i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
1658 					 HCLGE_TQP_REG_SIZE;
1659 
1660 		tqp++;
1661 	}
1662 
1663 	return 0;
1664 }
1665 
1666 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1667 				  u16 tqp_pid, u16 tqp_vid, bool is_pf)
1668 {
1669 	struct hclge_tqp_map_cmd *req;
1670 	struct hclge_desc desc;
1671 	int ret;
1672 
1673 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1674 
1675 	req = (struct hclge_tqp_map_cmd *)desc.data;
1676 	req->tqp_id = cpu_to_le16(tqp_pid);
1677 	req->tqp_vf = func_id;
1678 	req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1679 	if (!is_pf)
1680 		req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1681 	req->tqp_vid = cpu_to_le16(tqp_vid);
1682 
1683 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1684 	if (ret)
1685 		dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1686 
1687 	return ret;
1688 }
1689 
1690 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1691 {
1692 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1693 	struct hclge_dev *hdev = vport->back;
1694 	int i, alloced;
1695 
1696 	for (i = 0, alloced = 0; i < hdev->num_tqps &&
1697 	     alloced < num_tqps; i++) {
1698 		if (!hdev->htqp[i].alloced) {
1699 			hdev->htqp[i].q.handle = &vport->nic;
1700 			hdev->htqp[i].q.tqp_index = alloced;
1701 			hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1702 			hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1703 			kinfo->tqp[alloced] = &hdev->htqp[i].q;
1704 			hdev->htqp[i].alloced = true;
1705 			alloced++;
1706 		}
1707 	}
1708 	vport->alloc_tqps = alloced;
1709 	kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max,
1710 				vport->alloc_tqps / hdev->tm_info.num_tc);
1711 
1712 	/* ensure one to one mapping between irq and queue at default */
1713 	kinfo->rss_size = min_t(u16, kinfo->rss_size,
1714 				(hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1715 
1716 	return 0;
1717 }
1718 
1719 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1720 			    u16 num_tx_desc, u16 num_rx_desc)
1721 
1722 {
1723 	struct hnae3_handle *nic = &vport->nic;
1724 	struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1725 	struct hclge_dev *hdev = vport->back;
1726 	int ret;
1727 
1728 	kinfo->num_tx_desc = num_tx_desc;
1729 	kinfo->num_rx_desc = num_rx_desc;
1730 
1731 	kinfo->rx_buf_len = hdev->rx_buf_len;
1732 
1733 	kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1734 				  sizeof(struct hnae3_queue *), GFP_KERNEL);
1735 	if (!kinfo->tqp)
1736 		return -ENOMEM;
1737 
1738 	ret = hclge_assign_tqp(vport, num_tqps);
1739 	if (ret)
1740 		dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1741 
1742 	return ret;
1743 }
1744 
1745 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1746 				  struct hclge_vport *vport)
1747 {
1748 	struct hnae3_handle *nic = &vport->nic;
1749 	struct hnae3_knic_private_info *kinfo;
1750 	u16 i;
1751 
1752 	kinfo = &nic->kinfo;
1753 	for (i = 0; i < vport->alloc_tqps; i++) {
1754 		struct hclge_tqp *q =
1755 			container_of(kinfo->tqp[i], struct hclge_tqp, q);
1756 		bool is_pf;
1757 		int ret;
1758 
1759 		is_pf = !(vport->vport_id);
1760 		ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1761 					     i, is_pf);
1762 		if (ret)
1763 			return ret;
1764 	}
1765 
1766 	return 0;
1767 }
1768 
1769 static int hclge_map_tqp(struct hclge_dev *hdev)
1770 {
1771 	struct hclge_vport *vport = hdev->vport;
1772 	u16 i, num_vport;
1773 
1774 	num_vport = hdev->num_req_vfs + 1;
1775 	for (i = 0; i < num_vport; i++)	{
1776 		int ret;
1777 
1778 		ret = hclge_map_tqp_to_vport(hdev, vport);
1779 		if (ret)
1780 			return ret;
1781 
1782 		vport++;
1783 	}
1784 
1785 	return 0;
1786 }
1787 
1788 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1789 {
1790 	struct hnae3_handle *nic = &vport->nic;
1791 	struct hclge_dev *hdev = vport->back;
1792 	int ret;
1793 
1794 	nic->pdev = hdev->pdev;
1795 	nic->ae_algo = &ae_algo;
1796 	nic->numa_node_mask = hdev->numa_node_mask;
1797 
1798 	ret = hclge_knic_setup(vport, num_tqps,
1799 			       hdev->num_tx_desc, hdev->num_rx_desc);
1800 	if (ret)
1801 		dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1802 
1803 	return ret;
1804 }
1805 
1806 static int hclge_alloc_vport(struct hclge_dev *hdev)
1807 {
1808 	struct pci_dev *pdev = hdev->pdev;
1809 	struct hclge_vport *vport;
1810 	u32 tqp_main_vport;
1811 	u32 tqp_per_vport;
1812 	int num_vport, i;
1813 	int ret;
1814 
1815 	/* We need to alloc a vport for main NIC of PF */
1816 	num_vport = hdev->num_req_vfs + 1;
1817 
1818 	if (hdev->num_tqps < num_vport) {
1819 		dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1820 			hdev->num_tqps, num_vport);
1821 		return -EINVAL;
1822 	}
1823 
1824 	/* Alloc the same number of TQPs for every vport */
1825 	tqp_per_vport = hdev->num_tqps / num_vport;
1826 	tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1827 
1828 	vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1829 			     GFP_KERNEL);
1830 	if (!vport)
1831 		return -ENOMEM;
1832 
1833 	hdev->vport = vport;
1834 	hdev->num_alloc_vport = num_vport;
1835 
1836 	if (IS_ENABLED(CONFIG_PCI_IOV))
1837 		hdev->num_alloc_vfs = hdev->num_req_vfs;
1838 
1839 	for (i = 0; i < num_vport; i++) {
1840 		vport->back = hdev;
1841 		vport->vport_id = i;
1842 		vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1843 		vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1844 		vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1845 		vport->rxvlan_cfg.rx_vlan_offload_en = true;
1846 		INIT_LIST_HEAD(&vport->vlan_list);
1847 		INIT_LIST_HEAD(&vport->uc_mac_list);
1848 		INIT_LIST_HEAD(&vport->mc_mac_list);
1849 		spin_lock_init(&vport->mac_list_lock);
1850 
1851 		if (i == 0)
1852 			ret = hclge_vport_setup(vport, tqp_main_vport);
1853 		else
1854 			ret = hclge_vport_setup(vport, tqp_per_vport);
1855 		if (ret) {
1856 			dev_err(&pdev->dev,
1857 				"vport setup failed for vport %d, %d\n",
1858 				i, ret);
1859 			return ret;
1860 		}
1861 
1862 		vport++;
1863 	}
1864 
1865 	return 0;
1866 }
1867 
1868 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1869 				    struct hclge_pkt_buf_alloc *buf_alloc)
1870 {
1871 /* TX buffer size is unit by 128 byte */
1872 #define HCLGE_BUF_SIZE_UNIT_SHIFT	7
1873 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK	BIT(15)
1874 	struct hclge_tx_buff_alloc_cmd *req;
1875 	struct hclge_desc desc;
1876 	int ret;
1877 	u8 i;
1878 
1879 	req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1880 
1881 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1882 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1883 		u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1884 
1885 		req->tx_pkt_buff[i] =
1886 			cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1887 				     HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1888 	}
1889 
1890 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1891 	if (ret)
1892 		dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1893 			ret);
1894 
1895 	return ret;
1896 }
1897 
1898 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1899 				 struct hclge_pkt_buf_alloc *buf_alloc)
1900 {
1901 	int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1902 
1903 	if (ret)
1904 		dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1905 
1906 	return ret;
1907 }
1908 
1909 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1910 {
1911 	unsigned int i;
1912 	u32 cnt = 0;
1913 
1914 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1915 		if (hdev->hw_tc_map & BIT(i))
1916 			cnt++;
1917 	return cnt;
1918 }
1919 
1920 /* Get the number of pfc enabled TCs, which have private buffer */
1921 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1922 				  struct hclge_pkt_buf_alloc *buf_alloc)
1923 {
1924 	struct hclge_priv_buf *priv;
1925 	unsigned int i;
1926 	int cnt = 0;
1927 
1928 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1929 		priv = &buf_alloc->priv_buf[i];
1930 		if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1931 		    priv->enable)
1932 			cnt++;
1933 	}
1934 
1935 	return cnt;
1936 }
1937 
1938 /* Get the number of pfc disabled TCs, which have private buffer */
1939 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1940 				     struct hclge_pkt_buf_alloc *buf_alloc)
1941 {
1942 	struct hclge_priv_buf *priv;
1943 	unsigned int i;
1944 	int cnt = 0;
1945 
1946 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1947 		priv = &buf_alloc->priv_buf[i];
1948 		if (hdev->hw_tc_map & BIT(i) &&
1949 		    !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1950 		    priv->enable)
1951 			cnt++;
1952 	}
1953 
1954 	return cnt;
1955 }
1956 
1957 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1958 {
1959 	struct hclge_priv_buf *priv;
1960 	u32 rx_priv = 0;
1961 	int i;
1962 
1963 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1964 		priv = &buf_alloc->priv_buf[i];
1965 		if (priv->enable)
1966 			rx_priv += priv->buf_size;
1967 	}
1968 	return rx_priv;
1969 }
1970 
1971 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1972 {
1973 	u32 i, total_tx_size = 0;
1974 
1975 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1976 		total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1977 
1978 	return total_tx_size;
1979 }
1980 
1981 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1982 				struct hclge_pkt_buf_alloc *buf_alloc,
1983 				u32 rx_all)
1984 {
1985 	u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1986 	u32 tc_num = hclge_get_tc_num(hdev);
1987 	u32 shared_buf, aligned_mps;
1988 	u32 rx_priv;
1989 	int i;
1990 
1991 	aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1992 
1993 	if (hnae3_dev_dcb_supported(hdev))
1994 		shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1995 					hdev->dv_buf_size;
1996 	else
1997 		shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1998 					+ hdev->dv_buf_size;
1999 
2000 	shared_buf_tc = tc_num * aligned_mps + aligned_mps;
2001 	shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
2002 			     HCLGE_BUF_SIZE_UNIT);
2003 
2004 	rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
2005 	if (rx_all < rx_priv + shared_std)
2006 		return false;
2007 
2008 	shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
2009 	buf_alloc->s_buf.buf_size = shared_buf;
2010 	if (hnae3_dev_dcb_supported(hdev)) {
2011 		buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
2012 		buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
2013 			- roundup(aligned_mps / HCLGE_BUF_DIV_BY,
2014 				  HCLGE_BUF_SIZE_UNIT);
2015 	} else {
2016 		buf_alloc->s_buf.self.high = aligned_mps +
2017 						HCLGE_NON_DCB_ADDITIONAL_BUF;
2018 		buf_alloc->s_buf.self.low = aligned_mps;
2019 	}
2020 
2021 	if (hnae3_dev_dcb_supported(hdev)) {
2022 		hi_thrd = shared_buf - hdev->dv_buf_size;
2023 
2024 		if (tc_num <= NEED_RESERVE_TC_NUM)
2025 			hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
2026 					/ BUF_MAX_PERCENT;
2027 
2028 		if (tc_num)
2029 			hi_thrd = hi_thrd / tc_num;
2030 
2031 		hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
2032 		hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
2033 		lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
2034 	} else {
2035 		hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
2036 		lo_thrd = aligned_mps;
2037 	}
2038 
2039 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2040 		buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
2041 		buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
2042 	}
2043 
2044 	return true;
2045 }
2046 
2047 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
2048 				struct hclge_pkt_buf_alloc *buf_alloc)
2049 {
2050 	u32 i, total_size;
2051 
2052 	total_size = hdev->pkt_buf_size;
2053 
2054 	/* alloc tx buffer for all enabled tc */
2055 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2056 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2057 
2058 		if (hdev->hw_tc_map & BIT(i)) {
2059 			if (total_size < hdev->tx_buf_size)
2060 				return -ENOMEM;
2061 
2062 			priv->tx_buf_size = hdev->tx_buf_size;
2063 		} else {
2064 			priv->tx_buf_size = 0;
2065 		}
2066 
2067 		total_size -= priv->tx_buf_size;
2068 	}
2069 
2070 	return 0;
2071 }
2072 
2073 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2074 				  struct hclge_pkt_buf_alloc *buf_alloc)
2075 {
2076 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2077 	u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2078 	unsigned int i;
2079 
2080 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2081 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2082 
2083 		priv->enable = 0;
2084 		priv->wl.low = 0;
2085 		priv->wl.high = 0;
2086 		priv->buf_size = 0;
2087 
2088 		if (!(hdev->hw_tc_map & BIT(i)))
2089 			continue;
2090 
2091 		priv->enable = 1;
2092 
2093 		if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2094 			priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2095 			priv->wl.high = roundup(priv->wl.low + aligned_mps,
2096 						HCLGE_BUF_SIZE_UNIT);
2097 		} else {
2098 			priv->wl.low = 0;
2099 			priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2100 					aligned_mps;
2101 		}
2102 
2103 		priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2104 	}
2105 
2106 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2107 }
2108 
2109 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2110 					  struct hclge_pkt_buf_alloc *buf_alloc)
2111 {
2112 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2113 	int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2114 	int i;
2115 
2116 	/* let the last to be cleared first */
2117 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2118 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2119 		unsigned int mask = BIT((unsigned int)i);
2120 
2121 		if (hdev->hw_tc_map & mask &&
2122 		    !(hdev->tm_info.hw_pfc_map & mask)) {
2123 			/* Clear the no pfc TC private buffer */
2124 			priv->wl.low = 0;
2125 			priv->wl.high = 0;
2126 			priv->buf_size = 0;
2127 			priv->enable = 0;
2128 			no_pfc_priv_num--;
2129 		}
2130 
2131 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2132 		    no_pfc_priv_num == 0)
2133 			break;
2134 	}
2135 
2136 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2137 }
2138 
2139 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2140 					struct hclge_pkt_buf_alloc *buf_alloc)
2141 {
2142 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2143 	int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2144 	int i;
2145 
2146 	/* let the last to be cleared first */
2147 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2148 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2149 		unsigned int mask = BIT((unsigned int)i);
2150 
2151 		if (hdev->hw_tc_map & mask &&
2152 		    hdev->tm_info.hw_pfc_map & mask) {
2153 			/* Reduce the number of pfc TC with private buffer */
2154 			priv->wl.low = 0;
2155 			priv->enable = 0;
2156 			priv->wl.high = 0;
2157 			priv->buf_size = 0;
2158 			pfc_priv_num--;
2159 		}
2160 
2161 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2162 		    pfc_priv_num == 0)
2163 			break;
2164 	}
2165 
2166 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2167 }
2168 
2169 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2170 				      struct hclge_pkt_buf_alloc *buf_alloc)
2171 {
2172 #define COMPENSATE_BUFFER	0x3C00
2173 #define COMPENSATE_HALF_MPS_NUM	5
2174 #define PRIV_WL_GAP		0x1800
2175 
2176 	u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2177 	u32 tc_num = hclge_get_tc_num(hdev);
2178 	u32 half_mps = hdev->mps >> 1;
2179 	u32 min_rx_priv;
2180 	unsigned int i;
2181 
2182 	if (tc_num)
2183 		rx_priv = rx_priv / tc_num;
2184 
2185 	if (tc_num <= NEED_RESERVE_TC_NUM)
2186 		rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2187 
2188 	min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2189 			COMPENSATE_HALF_MPS_NUM * half_mps;
2190 	min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2191 	rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2192 	if (rx_priv < min_rx_priv)
2193 		return false;
2194 
2195 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2196 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2197 
2198 		priv->enable = 0;
2199 		priv->wl.low = 0;
2200 		priv->wl.high = 0;
2201 		priv->buf_size = 0;
2202 
2203 		if (!(hdev->hw_tc_map & BIT(i)))
2204 			continue;
2205 
2206 		priv->enable = 1;
2207 		priv->buf_size = rx_priv;
2208 		priv->wl.high = rx_priv - hdev->dv_buf_size;
2209 		priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2210 	}
2211 
2212 	buf_alloc->s_buf.buf_size = 0;
2213 
2214 	return true;
2215 }
2216 
2217 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2218  * @hdev: pointer to struct hclge_dev
2219  * @buf_alloc: pointer to buffer calculation data
2220  * @return: 0: calculate successful, negative: fail
2221  */
2222 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2223 				struct hclge_pkt_buf_alloc *buf_alloc)
2224 {
2225 	/* When DCB is not supported, rx private buffer is not allocated. */
2226 	if (!hnae3_dev_dcb_supported(hdev)) {
2227 		u32 rx_all = hdev->pkt_buf_size;
2228 
2229 		rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2230 		if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2231 			return -ENOMEM;
2232 
2233 		return 0;
2234 	}
2235 
2236 	if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2237 		return 0;
2238 
2239 	if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2240 		return 0;
2241 
2242 	/* try to decrease the buffer size */
2243 	if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2244 		return 0;
2245 
2246 	if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2247 		return 0;
2248 
2249 	if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2250 		return 0;
2251 
2252 	return -ENOMEM;
2253 }
2254 
2255 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2256 				   struct hclge_pkt_buf_alloc *buf_alloc)
2257 {
2258 	struct hclge_rx_priv_buff_cmd *req;
2259 	struct hclge_desc desc;
2260 	int ret;
2261 	int i;
2262 
2263 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2264 	req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2265 
2266 	/* Alloc private buffer TCs */
2267 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2268 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2269 
2270 		req->buf_num[i] =
2271 			cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2272 		req->buf_num[i] |=
2273 			cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2274 	}
2275 
2276 	req->shared_buf =
2277 		cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2278 			    (1 << HCLGE_TC0_PRI_BUF_EN_B));
2279 
2280 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2281 	if (ret)
2282 		dev_err(&hdev->pdev->dev,
2283 			"rx private buffer alloc cmd failed %d\n", ret);
2284 
2285 	return ret;
2286 }
2287 
2288 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2289 				   struct hclge_pkt_buf_alloc *buf_alloc)
2290 {
2291 	struct hclge_rx_priv_wl_buf *req;
2292 	struct hclge_priv_buf *priv;
2293 	struct hclge_desc desc[2];
2294 	int i, j;
2295 	int ret;
2296 
2297 	for (i = 0; i < 2; i++) {
2298 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2299 					   false);
2300 		req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2301 
2302 		/* The first descriptor set the NEXT bit to 1 */
2303 		if (i == 0)
2304 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2305 		else
2306 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2307 
2308 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2309 			u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2310 
2311 			priv = &buf_alloc->priv_buf[idx];
2312 			req->tc_wl[j].high =
2313 				cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2314 			req->tc_wl[j].high |=
2315 				cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2316 			req->tc_wl[j].low =
2317 				cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2318 			req->tc_wl[j].low |=
2319 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2320 		}
2321 	}
2322 
2323 	/* Send 2 descriptor at one time */
2324 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2325 	if (ret)
2326 		dev_err(&hdev->pdev->dev,
2327 			"rx private waterline config cmd failed %d\n",
2328 			ret);
2329 	return ret;
2330 }
2331 
2332 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2333 				    struct hclge_pkt_buf_alloc *buf_alloc)
2334 {
2335 	struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2336 	struct hclge_rx_com_thrd *req;
2337 	struct hclge_desc desc[2];
2338 	struct hclge_tc_thrd *tc;
2339 	int i, j;
2340 	int ret;
2341 
2342 	for (i = 0; i < 2; i++) {
2343 		hclge_cmd_setup_basic_desc(&desc[i],
2344 					   HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2345 		req = (struct hclge_rx_com_thrd *)&desc[i].data;
2346 
2347 		/* The first descriptor set the NEXT bit to 1 */
2348 		if (i == 0)
2349 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2350 		else
2351 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2352 
2353 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2354 			tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2355 
2356 			req->com_thrd[j].high =
2357 				cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2358 			req->com_thrd[j].high |=
2359 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2360 			req->com_thrd[j].low =
2361 				cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2362 			req->com_thrd[j].low |=
2363 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2364 		}
2365 	}
2366 
2367 	/* Send 2 descriptors at one time */
2368 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2369 	if (ret)
2370 		dev_err(&hdev->pdev->dev,
2371 			"common threshold config cmd failed %d\n", ret);
2372 	return ret;
2373 }
2374 
2375 static int hclge_common_wl_config(struct hclge_dev *hdev,
2376 				  struct hclge_pkt_buf_alloc *buf_alloc)
2377 {
2378 	struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2379 	struct hclge_rx_com_wl *req;
2380 	struct hclge_desc desc;
2381 	int ret;
2382 
2383 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2384 
2385 	req = (struct hclge_rx_com_wl *)desc.data;
2386 	req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2387 	req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2388 
2389 	req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2390 	req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2391 
2392 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2393 	if (ret)
2394 		dev_err(&hdev->pdev->dev,
2395 			"common waterline config cmd failed %d\n", ret);
2396 
2397 	return ret;
2398 }
2399 
2400 int hclge_buffer_alloc(struct hclge_dev *hdev)
2401 {
2402 	struct hclge_pkt_buf_alloc *pkt_buf;
2403 	int ret;
2404 
2405 	pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2406 	if (!pkt_buf)
2407 		return -ENOMEM;
2408 
2409 	ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2410 	if (ret) {
2411 		dev_err(&hdev->pdev->dev,
2412 			"could not calc tx buffer size for all TCs %d\n", ret);
2413 		goto out;
2414 	}
2415 
2416 	ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2417 	if (ret) {
2418 		dev_err(&hdev->pdev->dev,
2419 			"could not alloc tx buffers %d\n", ret);
2420 		goto out;
2421 	}
2422 
2423 	ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2424 	if (ret) {
2425 		dev_err(&hdev->pdev->dev,
2426 			"could not calc rx priv buffer size for all TCs %d\n",
2427 			ret);
2428 		goto out;
2429 	}
2430 
2431 	ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2432 	if (ret) {
2433 		dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2434 			ret);
2435 		goto out;
2436 	}
2437 
2438 	if (hnae3_dev_dcb_supported(hdev)) {
2439 		ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2440 		if (ret) {
2441 			dev_err(&hdev->pdev->dev,
2442 				"could not configure rx private waterline %d\n",
2443 				ret);
2444 			goto out;
2445 		}
2446 
2447 		ret = hclge_common_thrd_config(hdev, pkt_buf);
2448 		if (ret) {
2449 			dev_err(&hdev->pdev->dev,
2450 				"could not configure common threshold %d\n",
2451 				ret);
2452 			goto out;
2453 		}
2454 	}
2455 
2456 	ret = hclge_common_wl_config(hdev, pkt_buf);
2457 	if (ret)
2458 		dev_err(&hdev->pdev->dev,
2459 			"could not configure common waterline %d\n", ret);
2460 
2461 out:
2462 	kfree(pkt_buf);
2463 	return ret;
2464 }
2465 
2466 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2467 {
2468 	struct hnae3_handle *roce = &vport->roce;
2469 	struct hnae3_handle *nic = &vport->nic;
2470 	struct hclge_dev *hdev = vport->back;
2471 
2472 	roce->rinfo.num_vectors = vport->back->num_roce_msi;
2473 
2474 	if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
2475 		return -EINVAL;
2476 
2477 	roce->rinfo.base_vector = hdev->roce_base_vector;
2478 
2479 	roce->rinfo.netdev = nic->kinfo.netdev;
2480 	roce->rinfo.roce_io_base = hdev->hw.io_base;
2481 	roce->rinfo.roce_mem_base = hdev->hw.mem_base;
2482 
2483 	roce->pdev = nic->pdev;
2484 	roce->ae_algo = nic->ae_algo;
2485 	roce->numa_node_mask = nic->numa_node_mask;
2486 
2487 	return 0;
2488 }
2489 
2490 static int hclge_init_msi(struct hclge_dev *hdev)
2491 {
2492 	struct pci_dev *pdev = hdev->pdev;
2493 	int vectors;
2494 	int i;
2495 
2496 	vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2497 					hdev->num_msi,
2498 					PCI_IRQ_MSI | PCI_IRQ_MSIX);
2499 	if (vectors < 0) {
2500 		dev_err(&pdev->dev,
2501 			"failed(%d) to allocate MSI/MSI-X vectors\n",
2502 			vectors);
2503 		return vectors;
2504 	}
2505 	if (vectors < hdev->num_msi)
2506 		dev_warn(&hdev->pdev->dev,
2507 			 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2508 			 hdev->num_msi, vectors);
2509 
2510 	hdev->num_msi = vectors;
2511 	hdev->num_msi_left = vectors;
2512 
2513 	hdev->base_msi_vector = pdev->irq;
2514 	hdev->roce_base_vector = hdev->base_msi_vector +
2515 				hdev->num_nic_msi;
2516 
2517 	hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2518 					   sizeof(u16), GFP_KERNEL);
2519 	if (!hdev->vector_status) {
2520 		pci_free_irq_vectors(pdev);
2521 		return -ENOMEM;
2522 	}
2523 
2524 	for (i = 0; i < hdev->num_msi; i++)
2525 		hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2526 
2527 	hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2528 					sizeof(int), GFP_KERNEL);
2529 	if (!hdev->vector_irq) {
2530 		pci_free_irq_vectors(pdev);
2531 		return -ENOMEM;
2532 	}
2533 
2534 	return 0;
2535 }
2536 
2537 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2538 {
2539 	if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2540 		duplex = HCLGE_MAC_FULL;
2541 
2542 	return duplex;
2543 }
2544 
2545 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2546 				      u8 duplex)
2547 {
2548 	struct hclge_config_mac_speed_dup_cmd *req;
2549 	struct hclge_desc desc;
2550 	int ret;
2551 
2552 	req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2553 
2554 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2555 
2556 	if (duplex)
2557 		hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2558 
2559 	switch (speed) {
2560 	case HCLGE_MAC_SPEED_10M:
2561 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2562 				HCLGE_CFG_SPEED_S, 6);
2563 		break;
2564 	case HCLGE_MAC_SPEED_100M:
2565 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2566 				HCLGE_CFG_SPEED_S, 7);
2567 		break;
2568 	case HCLGE_MAC_SPEED_1G:
2569 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2570 				HCLGE_CFG_SPEED_S, 0);
2571 		break;
2572 	case HCLGE_MAC_SPEED_10G:
2573 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2574 				HCLGE_CFG_SPEED_S, 1);
2575 		break;
2576 	case HCLGE_MAC_SPEED_25G:
2577 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2578 				HCLGE_CFG_SPEED_S, 2);
2579 		break;
2580 	case HCLGE_MAC_SPEED_40G:
2581 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2582 				HCLGE_CFG_SPEED_S, 3);
2583 		break;
2584 	case HCLGE_MAC_SPEED_50G:
2585 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2586 				HCLGE_CFG_SPEED_S, 4);
2587 		break;
2588 	case HCLGE_MAC_SPEED_100G:
2589 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2590 				HCLGE_CFG_SPEED_S, 5);
2591 		break;
2592 	case HCLGE_MAC_SPEED_200G:
2593 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2594 				HCLGE_CFG_SPEED_S, 8);
2595 		break;
2596 	default:
2597 		dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2598 		return -EINVAL;
2599 	}
2600 
2601 	hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2602 		      1);
2603 
2604 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2605 	if (ret) {
2606 		dev_err(&hdev->pdev->dev,
2607 			"mac speed/duplex config cmd failed %d.\n", ret);
2608 		return ret;
2609 	}
2610 
2611 	return 0;
2612 }
2613 
2614 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2615 {
2616 	struct hclge_mac *mac = &hdev->hw.mac;
2617 	int ret;
2618 
2619 	duplex = hclge_check_speed_dup(duplex, speed);
2620 	if (!mac->support_autoneg && mac->speed == speed &&
2621 	    mac->duplex == duplex)
2622 		return 0;
2623 
2624 	ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2625 	if (ret)
2626 		return ret;
2627 
2628 	hdev->hw.mac.speed = speed;
2629 	hdev->hw.mac.duplex = duplex;
2630 
2631 	return 0;
2632 }
2633 
2634 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2635 				     u8 duplex)
2636 {
2637 	struct hclge_vport *vport = hclge_get_vport(handle);
2638 	struct hclge_dev *hdev = vport->back;
2639 
2640 	return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2641 }
2642 
2643 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2644 {
2645 	struct hclge_config_auto_neg_cmd *req;
2646 	struct hclge_desc desc;
2647 	u32 flag = 0;
2648 	int ret;
2649 
2650 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2651 
2652 	req = (struct hclge_config_auto_neg_cmd *)desc.data;
2653 	if (enable)
2654 		hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2655 	req->cfg_an_cmd_flag = cpu_to_le32(flag);
2656 
2657 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2658 	if (ret)
2659 		dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2660 			ret);
2661 
2662 	return ret;
2663 }
2664 
2665 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2666 {
2667 	struct hclge_vport *vport = hclge_get_vport(handle);
2668 	struct hclge_dev *hdev = vport->back;
2669 
2670 	if (!hdev->hw.mac.support_autoneg) {
2671 		if (enable) {
2672 			dev_err(&hdev->pdev->dev,
2673 				"autoneg is not supported by current port\n");
2674 			return -EOPNOTSUPP;
2675 		} else {
2676 			return 0;
2677 		}
2678 	}
2679 
2680 	return hclge_set_autoneg_en(hdev, enable);
2681 }
2682 
2683 static int hclge_get_autoneg(struct hnae3_handle *handle)
2684 {
2685 	struct hclge_vport *vport = hclge_get_vport(handle);
2686 	struct hclge_dev *hdev = vport->back;
2687 	struct phy_device *phydev = hdev->hw.mac.phydev;
2688 
2689 	if (phydev)
2690 		return phydev->autoneg;
2691 
2692 	return hdev->hw.mac.autoneg;
2693 }
2694 
2695 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2696 {
2697 	struct hclge_vport *vport = hclge_get_vport(handle);
2698 	struct hclge_dev *hdev = vport->back;
2699 	int ret;
2700 
2701 	dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2702 
2703 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2704 	if (ret)
2705 		return ret;
2706 	return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2707 }
2708 
2709 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2710 {
2711 	struct hclge_vport *vport = hclge_get_vport(handle);
2712 	struct hclge_dev *hdev = vport->back;
2713 
2714 	if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2715 		return hclge_set_autoneg_en(hdev, !halt);
2716 
2717 	return 0;
2718 }
2719 
2720 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2721 {
2722 	struct hclge_config_fec_cmd *req;
2723 	struct hclge_desc desc;
2724 	int ret;
2725 
2726 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2727 
2728 	req = (struct hclge_config_fec_cmd *)desc.data;
2729 	if (fec_mode & BIT(HNAE3_FEC_AUTO))
2730 		hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2731 	if (fec_mode & BIT(HNAE3_FEC_RS))
2732 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2733 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2734 	if (fec_mode & BIT(HNAE3_FEC_BASER))
2735 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2736 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2737 
2738 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2739 	if (ret)
2740 		dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2741 
2742 	return ret;
2743 }
2744 
2745 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2746 {
2747 	struct hclge_vport *vport = hclge_get_vport(handle);
2748 	struct hclge_dev *hdev = vport->back;
2749 	struct hclge_mac *mac = &hdev->hw.mac;
2750 	int ret;
2751 
2752 	if (fec_mode && !(mac->fec_ability & fec_mode)) {
2753 		dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2754 		return -EINVAL;
2755 	}
2756 
2757 	ret = hclge_set_fec_hw(hdev, fec_mode);
2758 	if (ret)
2759 		return ret;
2760 
2761 	mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2762 	return 0;
2763 }
2764 
2765 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2766 			  u8 *fec_mode)
2767 {
2768 	struct hclge_vport *vport = hclge_get_vport(handle);
2769 	struct hclge_dev *hdev = vport->back;
2770 	struct hclge_mac *mac = &hdev->hw.mac;
2771 
2772 	if (fec_ability)
2773 		*fec_ability = mac->fec_ability;
2774 	if (fec_mode)
2775 		*fec_mode = mac->fec_mode;
2776 }
2777 
2778 static int hclge_mac_init(struct hclge_dev *hdev)
2779 {
2780 	struct hclge_mac *mac = &hdev->hw.mac;
2781 	int ret;
2782 
2783 	hdev->support_sfp_query = true;
2784 	hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2785 	ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2786 					 hdev->hw.mac.duplex);
2787 	if (ret)
2788 		return ret;
2789 
2790 	if (hdev->hw.mac.support_autoneg) {
2791 		ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2792 		if (ret)
2793 			return ret;
2794 	}
2795 
2796 	mac->link = 0;
2797 
2798 	if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2799 		ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2800 		if (ret)
2801 			return ret;
2802 	}
2803 
2804 	ret = hclge_set_mac_mtu(hdev, hdev->mps);
2805 	if (ret) {
2806 		dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2807 		return ret;
2808 	}
2809 
2810 	ret = hclge_set_default_loopback(hdev);
2811 	if (ret)
2812 		return ret;
2813 
2814 	ret = hclge_buffer_alloc(hdev);
2815 	if (ret)
2816 		dev_err(&hdev->pdev->dev,
2817 			"allocate buffer fail, ret=%d\n", ret);
2818 
2819 	return ret;
2820 }
2821 
2822 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2823 {
2824 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2825 	    !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2826 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2827 				    hclge_wq, &hdev->service_task, 0);
2828 }
2829 
2830 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2831 {
2832 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2833 	    !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2834 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2835 				    hclge_wq, &hdev->service_task, 0);
2836 }
2837 
2838 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2839 {
2840 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2841 	    !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2842 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2843 				    hclge_wq, &hdev->service_task,
2844 				    delay_time);
2845 }
2846 
2847 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
2848 {
2849 	struct hclge_link_status_cmd *req;
2850 	struct hclge_desc desc;
2851 	int ret;
2852 
2853 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2854 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2855 	if (ret) {
2856 		dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2857 			ret);
2858 		return ret;
2859 	}
2860 
2861 	req = (struct hclge_link_status_cmd *)desc.data;
2862 	*link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2863 		HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
2864 
2865 	return 0;
2866 }
2867 
2868 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
2869 {
2870 	struct phy_device *phydev = hdev->hw.mac.phydev;
2871 
2872 	*link_status = HCLGE_LINK_STATUS_DOWN;
2873 
2874 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2875 		return 0;
2876 
2877 	if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2878 		return 0;
2879 
2880 	return hclge_get_mac_link_status(hdev, link_status);
2881 }
2882 
2883 static void hclge_push_link_status(struct hclge_dev *hdev)
2884 {
2885 	struct hclge_vport *vport;
2886 	int ret;
2887 	u16 i;
2888 
2889 	for (i = 0; i < pci_num_vf(hdev->pdev); i++) {
2890 		vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM];
2891 
2892 		if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) ||
2893 		    vport->vf_info.link_state != IFLA_VF_LINK_STATE_AUTO)
2894 			continue;
2895 
2896 		ret = hclge_push_vf_link_status(vport);
2897 		if (ret) {
2898 			dev_err(&hdev->pdev->dev,
2899 				"failed to push link status to vf%u, ret = %d\n",
2900 				i, ret);
2901 		}
2902 	}
2903 }
2904 
2905 static void hclge_update_link_status(struct hclge_dev *hdev)
2906 {
2907 	struct hnae3_handle *rhandle = &hdev->vport[0].roce;
2908 	struct hnae3_handle *handle = &hdev->vport[0].nic;
2909 	struct hnae3_client *rclient = hdev->roce_client;
2910 	struct hnae3_client *client = hdev->nic_client;
2911 	int state;
2912 	int ret;
2913 
2914 	if (!client)
2915 		return;
2916 
2917 	if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2918 		return;
2919 
2920 	ret = hclge_get_mac_phy_link(hdev, &state);
2921 	if (ret) {
2922 		clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2923 		return;
2924 	}
2925 
2926 	if (state != hdev->hw.mac.link) {
2927 		client->ops->link_status_change(handle, state);
2928 		hclge_config_mac_tnl_int(hdev, state);
2929 		if (rclient && rclient->ops->link_status_change)
2930 			rclient->ops->link_status_change(rhandle, state);
2931 
2932 		hdev->hw.mac.link = state;
2933 		hclge_push_link_status(hdev);
2934 	}
2935 
2936 	clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2937 }
2938 
2939 static void hclge_update_port_capability(struct hclge_dev *hdev,
2940 					 struct hclge_mac *mac)
2941 {
2942 	if (hnae3_dev_fec_supported(hdev))
2943 		/* update fec ability by speed */
2944 		hclge_convert_setting_fec(mac);
2945 
2946 	/* firmware can not identify back plane type, the media type
2947 	 * read from configuration can help deal it
2948 	 */
2949 	if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2950 	    mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2951 		mac->module_type = HNAE3_MODULE_TYPE_KR;
2952 	else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2953 		mac->module_type = HNAE3_MODULE_TYPE_TP;
2954 
2955 	if (mac->support_autoneg) {
2956 		linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2957 		linkmode_copy(mac->advertising, mac->supported);
2958 	} else {
2959 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2960 				   mac->supported);
2961 		linkmode_zero(mac->advertising);
2962 	}
2963 }
2964 
2965 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2966 {
2967 	struct hclge_sfp_info_cmd *resp;
2968 	struct hclge_desc desc;
2969 	int ret;
2970 
2971 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2972 	resp = (struct hclge_sfp_info_cmd *)desc.data;
2973 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2974 	if (ret == -EOPNOTSUPP) {
2975 		dev_warn(&hdev->pdev->dev,
2976 			 "IMP do not support get SFP speed %d\n", ret);
2977 		return ret;
2978 	} else if (ret) {
2979 		dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2980 		return ret;
2981 	}
2982 
2983 	*speed = le32_to_cpu(resp->speed);
2984 
2985 	return 0;
2986 }
2987 
2988 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2989 {
2990 	struct hclge_sfp_info_cmd *resp;
2991 	struct hclge_desc desc;
2992 	int ret;
2993 
2994 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2995 	resp = (struct hclge_sfp_info_cmd *)desc.data;
2996 
2997 	resp->query_type = QUERY_ACTIVE_SPEED;
2998 
2999 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3000 	if (ret == -EOPNOTSUPP) {
3001 		dev_warn(&hdev->pdev->dev,
3002 			 "IMP does not support get SFP info %d\n", ret);
3003 		return ret;
3004 	} else if (ret) {
3005 		dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
3006 		return ret;
3007 	}
3008 
3009 	/* In some case, mac speed get from IMP may be 0, it shouldn't be
3010 	 * set to mac->speed.
3011 	 */
3012 	if (!le32_to_cpu(resp->speed))
3013 		return 0;
3014 
3015 	mac->speed = le32_to_cpu(resp->speed);
3016 	/* if resp->speed_ability is 0, it means it's an old version
3017 	 * firmware, do not update these params
3018 	 */
3019 	if (resp->speed_ability) {
3020 		mac->module_type = le32_to_cpu(resp->module_type);
3021 		mac->speed_ability = le32_to_cpu(resp->speed_ability);
3022 		mac->autoneg = resp->autoneg;
3023 		mac->support_autoneg = resp->autoneg_ability;
3024 		mac->speed_type = QUERY_ACTIVE_SPEED;
3025 		if (!resp->active_fec)
3026 			mac->fec_mode = 0;
3027 		else
3028 			mac->fec_mode = BIT(resp->active_fec);
3029 	} else {
3030 		mac->speed_type = QUERY_SFP_SPEED;
3031 	}
3032 
3033 	return 0;
3034 }
3035 
3036 static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle,
3037 					struct ethtool_link_ksettings *cmd)
3038 {
3039 	struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3040 	struct hclge_vport *vport = hclge_get_vport(handle);
3041 	struct hclge_phy_link_ksetting_0_cmd *req0;
3042 	struct hclge_phy_link_ksetting_1_cmd *req1;
3043 	u32 supported, advertising, lp_advertising;
3044 	struct hclge_dev *hdev = vport->back;
3045 	int ret;
3046 
3047 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3048 				   true);
3049 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3050 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3051 				   true);
3052 
3053 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3054 	if (ret) {
3055 		dev_err(&hdev->pdev->dev,
3056 			"failed to get phy link ksetting, ret = %d.\n", ret);
3057 		return ret;
3058 	}
3059 
3060 	req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3061 	cmd->base.autoneg = req0->autoneg;
3062 	cmd->base.speed = le32_to_cpu(req0->speed);
3063 	cmd->base.duplex = req0->duplex;
3064 	cmd->base.port = req0->port;
3065 	cmd->base.transceiver = req0->transceiver;
3066 	cmd->base.phy_address = req0->phy_address;
3067 	cmd->base.eth_tp_mdix = req0->eth_tp_mdix;
3068 	cmd->base.eth_tp_mdix_ctrl = req0->eth_tp_mdix_ctrl;
3069 	supported = le32_to_cpu(req0->supported);
3070 	advertising = le32_to_cpu(req0->advertising);
3071 	lp_advertising = le32_to_cpu(req0->lp_advertising);
3072 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
3073 						supported);
3074 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
3075 						advertising);
3076 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
3077 						lp_advertising);
3078 
3079 	req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3080 	cmd->base.master_slave_cfg = req1->master_slave_cfg;
3081 	cmd->base.master_slave_state = req1->master_slave_state;
3082 
3083 	return 0;
3084 }
3085 
3086 static int
3087 hclge_set_phy_link_ksettings(struct hnae3_handle *handle,
3088 			     const struct ethtool_link_ksettings *cmd)
3089 {
3090 	struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3091 	struct hclge_vport *vport = hclge_get_vport(handle);
3092 	struct hclge_phy_link_ksetting_0_cmd *req0;
3093 	struct hclge_phy_link_ksetting_1_cmd *req1;
3094 	struct hclge_dev *hdev = vport->back;
3095 	u32 advertising;
3096 	int ret;
3097 
3098 	if (cmd->base.autoneg == AUTONEG_DISABLE &&
3099 	    ((cmd->base.speed != SPEED_100 && cmd->base.speed != SPEED_10) ||
3100 	     (cmd->base.duplex != DUPLEX_HALF &&
3101 	      cmd->base.duplex != DUPLEX_FULL)))
3102 		return -EINVAL;
3103 
3104 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3105 				   false);
3106 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3107 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3108 				   false);
3109 
3110 	req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3111 	req0->autoneg = cmd->base.autoneg;
3112 	req0->speed = cpu_to_le32(cmd->base.speed);
3113 	req0->duplex = cmd->base.duplex;
3114 	ethtool_convert_link_mode_to_legacy_u32(&advertising,
3115 						cmd->link_modes.advertising);
3116 	req0->advertising = cpu_to_le32(advertising);
3117 	req0->eth_tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
3118 
3119 	req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3120 	req1->master_slave_cfg = cmd->base.master_slave_cfg;
3121 
3122 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3123 	if (ret) {
3124 		dev_err(&hdev->pdev->dev,
3125 			"failed to set phy link ksettings, ret = %d.\n", ret);
3126 		return ret;
3127 	}
3128 
3129 	hdev->hw.mac.autoneg = cmd->base.autoneg;
3130 	hdev->hw.mac.speed = cmd->base.speed;
3131 	hdev->hw.mac.duplex = cmd->base.duplex;
3132 	linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising);
3133 
3134 	return 0;
3135 }
3136 
3137 static int hclge_update_tp_port_info(struct hclge_dev *hdev)
3138 {
3139 	struct ethtool_link_ksettings cmd;
3140 	int ret;
3141 
3142 	if (!hnae3_dev_phy_imp_supported(hdev))
3143 		return 0;
3144 
3145 	ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd);
3146 	if (ret)
3147 		return ret;
3148 
3149 	hdev->hw.mac.autoneg = cmd.base.autoneg;
3150 	hdev->hw.mac.speed = cmd.base.speed;
3151 	hdev->hw.mac.duplex = cmd.base.duplex;
3152 
3153 	return 0;
3154 }
3155 
3156 static int hclge_tp_port_init(struct hclge_dev *hdev)
3157 {
3158 	struct ethtool_link_ksettings cmd;
3159 
3160 	if (!hnae3_dev_phy_imp_supported(hdev))
3161 		return 0;
3162 
3163 	cmd.base.autoneg = hdev->hw.mac.autoneg;
3164 	cmd.base.speed = hdev->hw.mac.speed;
3165 	cmd.base.duplex = hdev->hw.mac.duplex;
3166 	linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising);
3167 
3168 	return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd);
3169 }
3170 
3171 static int hclge_update_port_info(struct hclge_dev *hdev)
3172 {
3173 	struct hclge_mac *mac = &hdev->hw.mac;
3174 	int speed = HCLGE_MAC_SPEED_UNKNOWN;
3175 	int ret;
3176 
3177 	/* get the port info from SFP cmd if not copper port */
3178 	if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
3179 		return hclge_update_tp_port_info(hdev);
3180 
3181 	/* if IMP does not support get SFP/qSFP info, return directly */
3182 	if (!hdev->support_sfp_query)
3183 		return 0;
3184 
3185 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
3186 		ret = hclge_get_sfp_info(hdev, mac);
3187 	else
3188 		ret = hclge_get_sfp_speed(hdev, &speed);
3189 
3190 	if (ret == -EOPNOTSUPP) {
3191 		hdev->support_sfp_query = false;
3192 		return ret;
3193 	} else if (ret) {
3194 		return ret;
3195 	}
3196 
3197 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3198 		if (mac->speed_type == QUERY_ACTIVE_SPEED) {
3199 			hclge_update_port_capability(hdev, mac);
3200 			return 0;
3201 		}
3202 		return hclge_cfg_mac_speed_dup(hdev, mac->speed,
3203 					       HCLGE_MAC_FULL);
3204 	} else {
3205 		if (speed == HCLGE_MAC_SPEED_UNKNOWN)
3206 			return 0; /* do nothing if no SFP */
3207 
3208 		/* must config full duplex for SFP */
3209 		return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
3210 	}
3211 }
3212 
3213 static int hclge_get_status(struct hnae3_handle *handle)
3214 {
3215 	struct hclge_vport *vport = hclge_get_vport(handle);
3216 	struct hclge_dev *hdev = vport->back;
3217 
3218 	hclge_update_link_status(hdev);
3219 
3220 	return hdev->hw.mac.link;
3221 }
3222 
3223 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3224 {
3225 	if (!pci_num_vf(hdev->pdev)) {
3226 		dev_err(&hdev->pdev->dev,
3227 			"SRIOV is disabled, can not get vport(%d) info.\n", vf);
3228 		return NULL;
3229 	}
3230 
3231 	if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3232 		dev_err(&hdev->pdev->dev,
3233 			"vf id(%d) is out of range(0 <= vfid < %d)\n",
3234 			vf, pci_num_vf(hdev->pdev));
3235 		return NULL;
3236 	}
3237 
3238 	/* VF start from 1 in vport */
3239 	vf += HCLGE_VF_VPORT_START_NUM;
3240 	return &hdev->vport[vf];
3241 }
3242 
3243 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3244 			       struct ifla_vf_info *ivf)
3245 {
3246 	struct hclge_vport *vport = hclge_get_vport(handle);
3247 	struct hclge_dev *hdev = vport->back;
3248 
3249 	vport = hclge_get_vf_vport(hdev, vf);
3250 	if (!vport)
3251 		return -EINVAL;
3252 
3253 	ivf->vf = vf;
3254 	ivf->linkstate = vport->vf_info.link_state;
3255 	ivf->spoofchk = vport->vf_info.spoofchk;
3256 	ivf->trusted = vport->vf_info.trusted;
3257 	ivf->min_tx_rate = 0;
3258 	ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3259 	ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3260 	ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3261 	ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3262 	ether_addr_copy(ivf->mac, vport->vf_info.mac);
3263 
3264 	return 0;
3265 }
3266 
3267 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3268 				   int link_state)
3269 {
3270 	struct hclge_vport *vport = hclge_get_vport(handle);
3271 	struct hclge_dev *hdev = vport->back;
3272 	int link_state_old;
3273 	int ret;
3274 
3275 	vport = hclge_get_vf_vport(hdev, vf);
3276 	if (!vport)
3277 		return -EINVAL;
3278 
3279 	link_state_old = vport->vf_info.link_state;
3280 	vport->vf_info.link_state = link_state;
3281 
3282 	ret = hclge_push_vf_link_status(vport);
3283 	if (ret) {
3284 		vport->vf_info.link_state = link_state_old;
3285 		dev_err(&hdev->pdev->dev,
3286 			"failed to push vf%d link status, ret = %d\n", vf, ret);
3287 	}
3288 
3289 	return ret;
3290 }
3291 
3292 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3293 {
3294 	u32 cmdq_src_reg, msix_src_reg;
3295 
3296 	/* fetch the events from their corresponding regs */
3297 	cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3298 	msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3299 
3300 	/* Assumption: If by any chance reset and mailbox events are reported
3301 	 * together then we will only process reset event in this go and will
3302 	 * defer the processing of the mailbox events. Since, we would have not
3303 	 * cleared RX CMDQ event this time we would receive again another
3304 	 * interrupt from H/W just for the mailbox.
3305 	 *
3306 	 * check for vector0 reset event sources
3307 	 */
3308 	if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3309 		dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3310 		set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3311 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3312 		*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3313 		hdev->rst_stats.imp_rst_cnt++;
3314 		return HCLGE_VECTOR0_EVENT_RST;
3315 	}
3316 
3317 	if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3318 		dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3319 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3320 		set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3321 		*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3322 		hdev->rst_stats.global_rst_cnt++;
3323 		return HCLGE_VECTOR0_EVENT_RST;
3324 	}
3325 
3326 	/* check for vector0 msix event source */
3327 	if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
3328 		*clearval = msix_src_reg;
3329 		return HCLGE_VECTOR0_EVENT_ERR;
3330 	}
3331 
3332 	/* check for vector0 mailbox(=CMDQ RX) event source */
3333 	if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3334 		cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3335 		*clearval = cmdq_src_reg;
3336 		return HCLGE_VECTOR0_EVENT_MBX;
3337 	}
3338 
3339 	/* print other vector0 event source */
3340 	dev_info(&hdev->pdev->dev,
3341 		 "CMDQ INT status:0x%x, other INT status:0x%x\n",
3342 		 cmdq_src_reg, msix_src_reg);
3343 	*clearval = msix_src_reg;
3344 
3345 	return HCLGE_VECTOR0_EVENT_OTHER;
3346 }
3347 
3348 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3349 				    u32 regclr)
3350 {
3351 	switch (event_type) {
3352 	case HCLGE_VECTOR0_EVENT_RST:
3353 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3354 		break;
3355 	case HCLGE_VECTOR0_EVENT_MBX:
3356 		hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3357 		break;
3358 	default:
3359 		break;
3360 	}
3361 }
3362 
3363 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3364 {
3365 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3366 				BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3367 				BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3368 				BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3369 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3370 }
3371 
3372 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3373 {
3374 	writel(enable ? 1 : 0, vector->addr);
3375 }
3376 
3377 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3378 {
3379 	struct hclge_dev *hdev = data;
3380 	u32 clearval = 0;
3381 	u32 event_cause;
3382 
3383 	hclge_enable_vector(&hdev->misc_vector, false);
3384 	event_cause = hclge_check_event_cause(hdev, &clearval);
3385 
3386 	/* vector 0 interrupt is shared with reset and mailbox source events.*/
3387 	switch (event_cause) {
3388 	case HCLGE_VECTOR0_EVENT_ERR:
3389 		/* we do not know what type of reset is required now. This could
3390 		 * only be decided after we fetch the type of errors which
3391 		 * caused this event. Therefore, we will do below for now:
3392 		 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3393 		 *    have defered type of reset to be used.
3394 		 * 2. Schedule the reset service task.
3395 		 * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
3396 		 *    will fetch the correct type of reset.  This would be done
3397 		 *    by first decoding the types of errors.
3398 		 */
3399 		set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3400 		fallthrough;
3401 	case HCLGE_VECTOR0_EVENT_RST:
3402 		hclge_reset_task_schedule(hdev);
3403 		break;
3404 	case HCLGE_VECTOR0_EVENT_MBX:
3405 		/* If we are here then,
3406 		 * 1. Either we are not handling any mbx task and we are not
3407 		 *    scheduled as well
3408 		 *                        OR
3409 		 * 2. We could be handling a mbx task but nothing more is
3410 		 *    scheduled.
3411 		 * In both cases, we should schedule mbx task as there are more
3412 		 * mbx messages reported by this interrupt.
3413 		 */
3414 		hclge_mbx_task_schedule(hdev);
3415 		break;
3416 	default:
3417 		dev_warn(&hdev->pdev->dev,
3418 			 "received unknown or unhandled event of vector0\n");
3419 		break;
3420 	}
3421 
3422 	hclge_clear_event_cause(hdev, event_cause, clearval);
3423 
3424 	/* Enable interrupt if it is not cause by reset. And when
3425 	 * clearval equal to 0, it means interrupt status may be
3426 	 * cleared by hardware before driver reads status register.
3427 	 * For this case, vector0 interrupt also should be enabled.
3428 	 */
3429 	if (!clearval ||
3430 	    event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3431 		hclge_enable_vector(&hdev->misc_vector, true);
3432 	}
3433 
3434 	return IRQ_HANDLED;
3435 }
3436 
3437 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3438 {
3439 	if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3440 		dev_warn(&hdev->pdev->dev,
3441 			 "vector(vector_id %d) has been freed.\n", vector_id);
3442 		return;
3443 	}
3444 
3445 	hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3446 	hdev->num_msi_left += 1;
3447 	hdev->num_msi_used -= 1;
3448 }
3449 
3450 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3451 {
3452 	struct hclge_misc_vector *vector = &hdev->misc_vector;
3453 
3454 	vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3455 
3456 	vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3457 	hdev->vector_status[0] = 0;
3458 
3459 	hdev->num_msi_left -= 1;
3460 	hdev->num_msi_used += 1;
3461 }
3462 
3463 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3464 				      const cpumask_t *mask)
3465 {
3466 	struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3467 					      affinity_notify);
3468 
3469 	cpumask_copy(&hdev->affinity_mask, mask);
3470 }
3471 
3472 static void hclge_irq_affinity_release(struct kref *ref)
3473 {
3474 }
3475 
3476 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3477 {
3478 	irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3479 			      &hdev->affinity_mask);
3480 
3481 	hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3482 	hdev->affinity_notify.release = hclge_irq_affinity_release;
3483 	irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3484 				  &hdev->affinity_notify);
3485 }
3486 
3487 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3488 {
3489 	irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3490 	irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3491 }
3492 
3493 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3494 {
3495 	int ret;
3496 
3497 	hclge_get_misc_vector(hdev);
3498 
3499 	/* this would be explicitly freed in the end */
3500 	snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3501 		 HCLGE_NAME, pci_name(hdev->pdev));
3502 	ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3503 			  0, hdev->misc_vector.name, hdev);
3504 	if (ret) {
3505 		hclge_free_vector(hdev, 0);
3506 		dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3507 			hdev->misc_vector.vector_irq);
3508 	}
3509 
3510 	return ret;
3511 }
3512 
3513 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3514 {
3515 	free_irq(hdev->misc_vector.vector_irq, hdev);
3516 	hclge_free_vector(hdev, 0);
3517 }
3518 
3519 int hclge_notify_client(struct hclge_dev *hdev,
3520 			enum hnae3_reset_notify_type type)
3521 {
3522 	struct hnae3_handle *handle = &hdev->vport[0].nic;
3523 	struct hnae3_client *client = hdev->nic_client;
3524 	int ret;
3525 
3526 	if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3527 		return 0;
3528 
3529 	if (!client->ops->reset_notify)
3530 		return -EOPNOTSUPP;
3531 
3532 	ret = client->ops->reset_notify(handle, type);
3533 	if (ret)
3534 		dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
3535 			type, ret);
3536 
3537 	return ret;
3538 }
3539 
3540 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3541 				    enum hnae3_reset_notify_type type)
3542 {
3543 	struct hnae3_handle *handle = &hdev->vport[0].roce;
3544 	struct hnae3_client *client = hdev->roce_client;
3545 	int ret;
3546 
3547 	if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3548 		return 0;
3549 
3550 	if (!client->ops->reset_notify)
3551 		return -EOPNOTSUPP;
3552 
3553 	ret = client->ops->reset_notify(handle, type);
3554 	if (ret)
3555 		dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)",
3556 			type, ret);
3557 
3558 	return ret;
3559 }
3560 
3561 static int hclge_reset_wait(struct hclge_dev *hdev)
3562 {
3563 #define HCLGE_RESET_WATI_MS	100
3564 #define HCLGE_RESET_WAIT_CNT	350
3565 
3566 	u32 val, reg, reg_bit;
3567 	u32 cnt = 0;
3568 
3569 	switch (hdev->reset_type) {
3570 	case HNAE3_IMP_RESET:
3571 		reg = HCLGE_GLOBAL_RESET_REG;
3572 		reg_bit = HCLGE_IMP_RESET_BIT;
3573 		break;
3574 	case HNAE3_GLOBAL_RESET:
3575 		reg = HCLGE_GLOBAL_RESET_REG;
3576 		reg_bit = HCLGE_GLOBAL_RESET_BIT;
3577 		break;
3578 	case HNAE3_FUNC_RESET:
3579 		reg = HCLGE_FUN_RST_ING;
3580 		reg_bit = HCLGE_FUN_RST_ING_B;
3581 		break;
3582 	default:
3583 		dev_err(&hdev->pdev->dev,
3584 			"Wait for unsupported reset type: %d\n",
3585 			hdev->reset_type);
3586 		return -EINVAL;
3587 	}
3588 
3589 	val = hclge_read_dev(&hdev->hw, reg);
3590 	while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3591 		msleep(HCLGE_RESET_WATI_MS);
3592 		val = hclge_read_dev(&hdev->hw, reg);
3593 		cnt++;
3594 	}
3595 
3596 	if (cnt >= HCLGE_RESET_WAIT_CNT) {
3597 		dev_warn(&hdev->pdev->dev,
3598 			 "Wait for reset timeout: %d\n", hdev->reset_type);
3599 		return -EBUSY;
3600 	}
3601 
3602 	return 0;
3603 }
3604 
3605 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3606 {
3607 	struct hclge_vf_rst_cmd *req;
3608 	struct hclge_desc desc;
3609 
3610 	req = (struct hclge_vf_rst_cmd *)desc.data;
3611 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3612 	req->dest_vfid = func_id;
3613 
3614 	if (reset)
3615 		req->vf_rst = 0x1;
3616 
3617 	return hclge_cmd_send(&hdev->hw, &desc, 1);
3618 }
3619 
3620 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3621 {
3622 	int i;
3623 
3624 	for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++) {
3625 		struct hclge_vport *vport = &hdev->vport[i];
3626 		int ret;
3627 
3628 		/* Send cmd to set/clear VF's FUNC_RST_ING */
3629 		ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3630 		if (ret) {
3631 			dev_err(&hdev->pdev->dev,
3632 				"set vf(%u) rst failed %d!\n",
3633 				vport->vport_id, ret);
3634 			return ret;
3635 		}
3636 
3637 		if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3638 			continue;
3639 
3640 		/* Inform VF to process the reset.
3641 		 * hclge_inform_reset_assert_to_vf may fail if VF
3642 		 * driver is not loaded.
3643 		 */
3644 		ret = hclge_inform_reset_assert_to_vf(vport);
3645 		if (ret)
3646 			dev_warn(&hdev->pdev->dev,
3647 				 "inform reset to vf(%u) failed %d!\n",
3648 				 vport->vport_id, ret);
3649 	}
3650 
3651 	return 0;
3652 }
3653 
3654 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3655 {
3656 	if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3657 	    test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3658 	    test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3659 		return;
3660 
3661 	hclge_mbx_handler(hdev);
3662 
3663 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3664 }
3665 
3666 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3667 {
3668 	struct hclge_pf_rst_sync_cmd *req;
3669 	struct hclge_desc desc;
3670 	int cnt = 0;
3671 	int ret;
3672 
3673 	req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3674 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3675 
3676 	do {
3677 		/* vf need to down netdev by mbx during PF or FLR reset */
3678 		hclge_mailbox_service_task(hdev);
3679 
3680 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3681 		/* for compatible with old firmware, wait
3682 		 * 100 ms for VF to stop IO
3683 		 */
3684 		if (ret == -EOPNOTSUPP) {
3685 			msleep(HCLGE_RESET_SYNC_TIME);
3686 			return;
3687 		} else if (ret) {
3688 			dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3689 				 ret);
3690 			return;
3691 		} else if (req->all_vf_ready) {
3692 			return;
3693 		}
3694 		msleep(HCLGE_PF_RESET_SYNC_TIME);
3695 		hclge_cmd_reuse_desc(&desc, true);
3696 	} while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3697 
3698 	dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3699 }
3700 
3701 void hclge_report_hw_error(struct hclge_dev *hdev,
3702 			   enum hnae3_hw_error_type type)
3703 {
3704 	struct hnae3_client *client = hdev->nic_client;
3705 
3706 	if (!client || !client->ops->process_hw_error ||
3707 	    !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3708 		return;
3709 
3710 	client->ops->process_hw_error(&hdev->vport[0].nic, type);
3711 }
3712 
3713 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3714 {
3715 	u32 reg_val;
3716 
3717 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3718 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3719 		hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3720 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3721 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3722 	}
3723 
3724 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3725 		hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3726 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3727 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3728 	}
3729 }
3730 
3731 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3732 {
3733 	struct hclge_desc desc;
3734 	struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3735 	int ret;
3736 
3737 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3738 	hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3739 	req->fun_reset_vfid = func_id;
3740 
3741 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3742 	if (ret)
3743 		dev_err(&hdev->pdev->dev,
3744 			"send function reset cmd fail, status =%d\n", ret);
3745 
3746 	return ret;
3747 }
3748 
3749 static void hclge_do_reset(struct hclge_dev *hdev)
3750 {
3751 	struct hnae3_handle *handle = &hdev->vport[0].nic;
3752 	struct pci_dev *pdev = hdev->pdev;
3753 	u32 val;
3754 
3755 	if (hclge_get_hw_reset_stat(handle)) {
3756 		dev_info(&pdev->dev, "hardware reset not finish\n");
3757 		dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3758 			 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3759 			 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3760 		return;
3761 	}
3762 
3763 	switch (hdev->reset_type) {
3764 	case HNAE3_GLOBAL_RESET:
3765 		dev_info(&pdev->dev, "global reset requested\n");
3766 		val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3767 		hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3768 		hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3769 		break;
3770 	case HNAE3_FUNC_RESET:
3771 		dev_info(&pdev->dev, "PF reset requested\n");
3772 		/* schedule again to check later */
3773 		set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3774 		hclge_reset_task_schedule(hdev);
3775 		break;
3776 	default:
3777 		dev_warn(&pdev->dev,
3778 			 "unsupported reset type: %d\n", hdev->reset_type);
3779 		break;
3780 	}
3781 }
3782 
3783 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3784 						   unsigned long *addr)
3785 {
3786 	enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3787 	struct hclge_dev *hdev = ae_dev->priv;
3788 
3789 	/* first, resolve any unknown reset type to the known type(s) */
3790 	if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3791 		u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
3792 					HCLGE_MISC_VECTOR_INT_STS);
3793 		/* we will intentionally ignore any errors from this function
3794 		 *  as we will end up in *some* reset request in any case
3795 		 */
3796 		if (hclge_handle_hw_msix_error(hdev, addr))
3797 			dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3798 				 msix_sts_reg);
3799 
3800 		clear_bit(HNAE3_UNKNOWN_RESET, addr);
3801 		/* We defered the clearing of the error event which caused
3802 		 * interrupt since it was not posssible to do that in
3803 		 * interrupt context (and this is the reason we introduced
3804 		 * new UNKNOWN reset type). Now, the errors have been
3805 		 * handled and cleared in hardware we can safely enable
3806 		 * interrupts. This is an exception to the norm.
3807 		 */
3808 		hclge_enable_vector(&hdev->misc_vector, true);
3809 	}
3810 
3811 	/* return the highest priority reset level amongst all */
3812 	if (test_bit(HNAE3_IMP_RESET, addr)) {
3813 		rst_level = HNAE3_IMP_RESET;
3814 		clear_bit(HNAE3_IMP_RESET, addr);
3815 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3816 		clear_bit(HNAE3_FUNC_RESET, addr);
3817 	} else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3818 		rst_level = HNAE3_GLOBAL_RESET;
3819 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3820 		clear_bit(HNAE3_FUNC_RESET, addr);
3821 	} else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3822 		rst_level = HNAE3_FUNC_RESET;
3823 		clear_bit(HNAE3_FUNC_RESET, addr);
3824 	} else if (test_bit(HNAE3_FLR_RESET, addr)) {
3825 		rst_level = HNAE3_FLR_RESET;
3826 		clear_bit(HNAE3_FLR_RESET, addr);
3827 	}
3828 
3829 	if (hdev->reset_type != HNAE3_NONE_RESET &&
3830 	    rst_level < hdev->reset_type)
3831 		return HNAE3_NONE_RESET;
3832 
3833 	return rst_level;
3834 }
3835 
3836 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3837 {
3838 	u32 clearval = 0;
3839 
3840 	switch (hdev->reset_type) {
3841 	case HNAE3_IMP_RESET:
3842 		clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3843 		break;
3844 	case HNAE3_GLOBAL_RESET:
3845 		clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3846 		break;
3847 	default:
3848 		break;
3849 	}
3850 
3851 	if (!clearval)
3852 		return;
3853 
3854 	/* For revision 0x20, the reset interrupt source
3855 	 * can only be cleared after hardware reset done
3856 	 */
3857 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
3858 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3859 				clearval);
3860 
3861 	hclge_enable_vector(&hdev->misc_vector, true);
3862 }
3863 
3864 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3865 {
3866 	u32 reg_val;
3867 
3868 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3869 	if (enable)
3870 		reg_val |= HCLGE_NIC_SW_RST_RDY;
3871 	else
3872 		reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3873 
3874 	hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3875 }
3876 
3877 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3878 {
3879 	int ret;
3880 
3881 	ret = hclge_set_all_vf_rst(hdev, true);
3882 	if (ret)
3883 		return ret;
3884 
3885 	hclge_func_reset_sync_vf(hdev);
3886 
3887 	return 0;
3888 }
3889 
3890 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3891 {
3892 	u32 reg_val;
3893 	int ret = 0;
3894 
3895 	switch (hdev->reset_type) {
3896 	case HNAE3_FUNC_RESET:
3897 		ret = hclge_func_reset_notify_vf(hdev);
3898 		if (ret)
3899 			return ret;
3900 
3901 		ret = hclge_func_reset_cmd(hdev, 0);
3902 		if (ret) {
3903 			dev_err(&hdev->pdev->dev,
3904 				"asserting function reset fail %d!\n", ret);
3905 			return ret;
3906 		}
3907 
3908 		/* After performaning pf reset, it is not necessary to do the
3909 		 * mailbox handling or send any command to firmware, because
3910 		 * any mailbox handling or command to firmware is only valid
3911 		 * after hclge_cmd_init is called.
3912 		 */
3913 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3914 		hdev->rst_stats.pf_rst_cnt++;
3915 		break;
3916 	case HNAE3_FLR_RESET:
3917 		ret = hclge_func_reset_notify_vf(hdev);
3918 		if (ret)
3919 			return ret;
3920 		break;
3921 	case HNAE3_IMP_RESET:
3922 		hclge_handle_imp_error(hdev);
3923 		reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3924 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3925 				BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3926 		break;
3927 	default:
3928 		break;
3929 	}
3930 
3931 	/* inform hardware that preparatory work is done */
3932 	msleep(HCLGE_RESET_SYNC_TIME);
3933 	hclge_reset_handshake(hdev, true);
3934 	dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3935 
3936 	return ret;
3937 }
3938 
3939 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3940 {
3941 #define MAX_RESET_FAIL_CNT 5
3942 
3943 	if (hdev->reset_pending) {
3944 		dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3945 			 hdev->reset_pending);
3946 		return true;
3947 	} else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3948 		   HCLGE_RESET_INT_M) {
3949 		dev_info(&hdev->pdev->dev,
3950 			 "reset failed because new reset interrupt\n");
3951 		hclge_clear_reset_cause(hdev);
3952 		return false;
3953 	} else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3954 		hdev->rst_stats.reset_fail_cnt++;
3955 		set_bit(hdev->reset_type, &hdev->reset_pending);
3956 		dev_info(&hdev->pdev->dev,
3957 			 "re-schedule reset task(%u)\n",
3958 			 hdev->rst_stats.reset_fail_cnt);
3959 		return true;
3960 	}
3961 
3962 	hclge_clear_reset_cause(hdev);
3963 
3964 	/* recover the handshake status when reset fail */
3965 	hclge_reset_handshake(hdev, true);
3966 
3967 	dev_err(&hdev->pdev->dev, "Reset fail!\n");
3968 
3969 	hclge_dbg_dump_rst_info(hdev);
3970 
3971 	set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3972 
3973 	return false;
3974 }
3975 
3976 static void hclge_update_reset_level(struct hclge_dev *hdev)
3977 {
3978 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3979 	enum hnae3_reset_type reset_level;
3980 
3981 	/* if default_reset_request has a higher level reset request,
3982 	 * it should be handled as soon as possible. since some errors
3983 	 * need this kind of reset to fix.
3984 	 */
3985 	reset_level = hclge_get_reset_level(ae_dev,
3986 					    &hdev->default_reset_request);
3987 	if (reset_level != HNAE3_NONE_RESET)
3988 		set_bit(reset_level, &hdev->reset_request);
3989 }
3990 
3991 static int hclge_set_rst_done(struct hclge_dev *hdev)
3992 {
3993 	struct hclge_pf_rst_done_cmd *req;
3994 	struct hclge_desc desc;
3995 	int ret;
3996 
3997 	req = (struct hclge_pf_rst_done_cmd *)desc.data;
3998 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3999 	req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
4000 
4001 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4002 	/* To be compatible with the old firmware, which does not support
4003 	 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
4004 	 * return success
4005 	 */
4006 	if (ret == -EOPNOTSUPP) {
4007 		dev_warn(&hdev->pdev->dev,
4008 			 "current firmware does not support command(0x%x)!\n",
4009 			 HCLGE_OPC_PF_RST_DONE);
4010 		return 0;
4011 	} else if (ret) {
4012 		dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
4013 			ret);
4014 	}
4015 
4016 	return ret;
4017 }
4018 
4019 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
4020 {
4021 	int ret = 0;
4022 
4023 	switch (hdev->reset_type) {
4024 	case HNAE3_FUNC_RESET:
4025 	case HNAE3_FLR_RESET:
4026 		ret = hclge_set_all_vf_rst(hdev, false);
4027 		break;
4028 	case HNAE3_GLOBAL_RESET:
4029 	case HNAE3_IMP_RESET:
4030 		ret = hclge_set_rst_done(hdev);
4031 		break;
4032 	default:
4033 		break;
4034 	}
4035 
4036 	/* clear up the handshake status after re-initialize done */
4037 	hclge_reset_handshake(hdev, false);
4038 
4039 	return ret;
4040 }
4041 
4042 static int hclge_reset_stack(struct hclge_dev *hdev)
4043 {
4044 	int ret;
4045 
4046 	ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
4047 	if (ret)
4048 		return ret;
4049 
4050 	ret = hclge_reset_ae_dev(hdev->ae_dev);
4051 	if (ret)
4052 		return ret;
4053 
4054 	return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
4055 }
4056 
4057 static int hclge_reset_prepare(struct hclge_dev *hdev)
4058 {
4059 	int ret;
4060 
4061 	hdev->rst_stats.reset_cnt++;
4062 	/* perform reset of the stack & ae device for a client */
4063 	ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
4064 	if (ret)
4065 		return ret;
4066 
4067 	rtnl_lock();
4068 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
4069 	rtnl_unlock();
4070 	if (ret)
4071 		return ret;
4072 
4073 	return hclge_reset_prepare_wait(hdev);
4074 }
4075 
4076 static int hclge_reset_rebuild(struct hclge_dev *hdev)
4077 {
4078 	int ret;
4079 
4080 	hdev->rst_stats.hw_reset_done_cnt++;
4081 
4082 	ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
4083 	if (ret)
4084 		return ret;
4085 
4086 	rtnl_lock();
4087 	ret = hclge_reset_stack(hdev);
4088 	rtnl_unlock();
4089 	if (ret)
4090 		return ret;
4091 
4092 	hclge_clear_reset_cause(hdev);
4093 
4094 	ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
4095 	/* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
4096 	 * times
4097 	 */
4098 	if (ret &&
4099 	    hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
4100 		return ret;
4101 
4102 	ret = hclge_reset_prepare_up(hdev);
4103 	if (ret)
4104 		return ret;
4105 
4106 	rtnl_lock();
4107 	ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
4108 	rtnl_unlock();
4109 	if (ret)
4110 		return ret;
4111 
4112 	ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
4113 	if (ret)
4114 		return ret;
4115 
4116 	hdev->last_reset_time = jiffies;
4117 	hdev->rst_stats.reset_fail_cnt = 0;
4118 	hdev->rst_stats.reset_done_cnt++;
4119 	clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4120 
4121 	hclge_update_reset_level(hdev);
4122 
4123 	return 0;
4124 }
4125 
4126 static void hclge_reset(struct hclge_dev *hdev)
4127 {
4128 	if (hclge_reset_prepare(hdev))
4129 		goto err_reset;
4130 
4131 	if (hclge_reset_wait(hdev))
4132 		goto err_reset;
4133 
4134 	if (hclge_reset_rebuild(hdev))
4135 		goto err_reset;
4136 
4137 	return;
4138 
4139 err_reset:
4140 	if (hclge_reset_err_handle(hdev))
4141 		hclge_reset_task_schedule(hdev);
4142 }
4143 
4144 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
4145 {
4146 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
4147 	struct hclge_dev *hdev = ae_dev->priv;
4148 
4149 	/* We might end up getting called broadly because of 2 below cases:
4150 	 * 1. Recoverable error was conveyed through APEI and only way to bring
4151 	 *    normalcy is to reset.
4152 	 * 2. A new reset request from the stack due to timeout
4153 	 *
4154 	 * check if this is a new reset request and we are not here just because
4155 	 * last reset attempt did not succeed and watchdog hit us again. We will
4156 	 * know this if last reset request did not occur very recently (watchdog
4157 	 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
4158 	 * In case of new request we reset the "reset level" to PF reset.
4159 	 * And if it is a repeat reset request of the most recent one then we
4160 	 * want to make sure we throttle the reset request. Therefore, we will
4161 	 * not allow it again before 3*HZ times.
4162 	 */
4163 
4164 	if (time_before(jiffies, (hdev->last_reset_time +
4165 				  HCLGE_RESET_INTERVAL))) {
4166 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
4167 		return;
4168 	}
4169 
4170 	if (hdev->default_reset_request) {
4171 		hdev->reset_level =
4172 			hclge_get_reset_level(ae_dev,
4173 					      &hdev->default_reset_request);
4174 	} else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
4175 		hdev->reset_level = HNAE3_FUNC_RESET;
4176 	}
4177 
4178 	dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
4179 		 hdev->reset_level);
4180 
4181 	/* request reset & schedule reset task */
4182 	set_bit(hdev->reset_level, &hdev->reset_request);
4183 	hclge_reset_task_schedule(hdev);
4184 
4185 	if (hdev->reset_level < HNAE3_GLOBAL_RESET)
4186 		hdev->reset_level++;
4187 }
4188 
4189 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
4190 					enum hnae3_reset_type rst_type)
4191 {
4192 	struct hclge_dev *hdev = ae_dev->priv;
4193 
4194 	set_bit(rst_type, &hdev->default_reset_request);
4195 }
4196 
4197 static void hclge_reset_timer(struct timer_list *t)
4198 {
4199 	struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
4200 
4201 	/* if default_reset_request has no value, it means that this reset
4202 	 * request has already be handled, so just return here
4203 	 */
4204 	if (!hdev->default_reset_request)
4205 		return;
4206 
4207 	dev_info(&hdev->pdev->dev,
4208 		 "triggering reset in reset timer\n");
4209 	hclge_reset_event(hdev->pdev, NULL);
4210 }
4211 
4212 static void hclge_reset_subtask(struct hclge_dev *hdev)
4213 {
4214 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4215 
4216 	/* check if there is any ongoing reset in the hardware. This status can
4217 	 * be checked from reset_pending. If there is then, we need to wait for
4218 	 * hardware to complete reset.
4219 	 *    a. If we are able to figure out in reasonable time that hardware
4220 	 *       has fully resetted then, we can proceed with driver, client
4221 	 *       reset.
4222 	 *    b. else, we can come back later to check this status so re-sched
4223 	 *       now.
4224 	 */
4225 	hdev->last_reset_time = jiffies;
4226 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
4227 	if (hdev->reset_type != HNAE3_NONE_RESET)
4228 		hclge_reset(hdev);
4229 
4230 	/* check if we got any *new* reset requests to be honored */
4231 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
4232 	if (hdev->reset_type != HNAE3_NONE_RESET)
4233 		hclge_do_reset(hdev);
4234 
4235 	hdev->reset_type = HNAE3_NONE_RESET;
4236 }
4237 
4238 static void hclge_reset_service_task(struct hclge_dev *hdev)
4239 {
4240 	if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4241 		return;
4242 
4243 	down(&hdev->reset_sem);
4244 	set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4245 
4246 	hclge_reset_subtask(hdev);
4247 
4248 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4249 	up(&hdev->reset_sem);
4250 }
4251 
4252 static void hclge_update_vport_alive(struct hclge_dev *hdev)
4253 {
4254 	int i;
4255 
4256 	/* start from vport 1 for PF is always alive */
4257 	for (i = 1; i < hdev->num_alloc_vport; i++) {
4258 		struct hclge_vport *vport = &hdev->vport[i];
4259 
4260 		if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
4261 			clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4262 
4263 		/* If vf is not alive, set to default value */
4264 		if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4265 			vport->mps = HCLGE_MAC_DEFAULT_FRAME;
4266 	}
4267 }
4268 
4269 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4270 {
4271 	unsigned long delta = round_jiffies_relative(HZ);
4272 
4273 	if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4274 		return;
4275 
4276 	/* Always handle the link updating to make sure link state is
4277 	 * updated when it is triggered by mbx.
4278 	 */
4279 	hclge_update_link_status(hdev);
4280 	hclge_sync_mac_table(hdev);
4281 	hclge_sync_promisc_mode(hdev);
4282 	hclge_sync_fd_table(hdev);
4283 
4284 	if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4285 		delta = jiffies - hdev->last_serv_processed;
4286 
4287 		if (delta < round_jiffies_relative(HZ)) {
4288 			delta = round_jiffies_relative(HZ) - delta;
4289 			goto out;
4290 		}
4291 	}
4292 
4293 	hdev->serv_processed_cnt++;
4294 	hclge_update_vport_alive(hdev);
4295 
4296 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4297 		hdev->last_serv_processed = jiffies;
4298 		goto out;
4299 	}
4300 
4301 	if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4302 		hclge_update_stats_for_all(hdev);
4303 
4304 	hclge_update_port_info(hdev);
4305 	hclge_sync_vlan_filter(hdev);
4306 
4307 	if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4308 		hclge_rfs_filter_expire(hdev);
4309 
4310 	hdev->last_serv_processed = jiffies;
4311 
4312 out:
4313 	hclge_task_schedule(hdev, delta);
4314 }
4315 
4316 static void hclge_service_task(struct work_struct *work)
4317 {
4318 	struct hclge_dev *hdev =
4319 		container_of(work, struct hclge_dev, service_task.work);
4320 
4321 	hclge_reset_service_task(hdev);
4322 	hclge_mailbox_service_task(hdev);
4323 	hclge_periodic_service_task(hdev);
4324 
4325 	/* Handle reset and mbx again in case periodical task delays the
4326 	 * handling by calling hclge_task_schedule() in
4327 	 * hclge_periodic_service_task().
4328 	 */
4329 	hclge_reset_service_task(hdev);
4330 	hclge_mailbox_service_task(hdev);
4331 }
4332 
4333 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4334 {
4335 	/* VF handle has no client */
4336 	if (!handle->client)
4337 		return container_of(handle, struct hclge_vport, nic);
4338 	else if (handle->client->type == HNAE3_CLIENT_ROCE)
4339 		return container_of(handle, struct hclge_vport, roce);
4340 	else
4341 		return container_of(handle, struct hclge_vport, nic);
4342 }
4343 
4344 static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
4345 				  struct hnae3_vector_info *vector_info)
4346 {
4347 #define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2	64
4348 
4349 	vector_info->vector = pci_irq_vector(hdev->pdev, idx);
4350 
4351 	/* need an extend offset to config vector >= 64 */
4352 	if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
4353 		vector_info->io_addr = hdev->hw.io_base +
4354 				HCLGE_VECTOR_REG_BASE +
4355 				(idx - 1) * HCLGE_VECTOR_REG_OFFSET;
4356 	else
4357 		vector_info->io_addr = hdev->hw.io_base +
4358 				HCLGE_VECTOR_EXT_REG_BASE +
4359 				(idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4360 				HCLGE_VECTOR_REG_OFFSET_H +
4361 				(idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4362 				HCLGE_VECTOR_REG_OFFSET;
4363 
4364 	hdev->vector_status[idx] = hdev->vport[0].vport_id;
4365 	hdev->vector_irq[idx] = vector_info->vector;
4366 }
4367 
4368 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4369 			    struct hnae3_vector_info *vector_info)
4370 {
4371 	struct hclge_vport *vport = hclge_get_vport(handle);
4372 	struct hnae3_vector_info *vector = vector_info;
4373 	struct hclge_dev *hdev = vport->back;
4374 	int alloc = 0;
4375 	u16 i = 0;
4376 	u16 j;
4377 
4378 	vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4379 	vector_num = min(hdev->num_msi_left, vector_num);
4380 
4381 	for (j = 0; j < vector_num; j++) {
4382 		while (++i < hdev->num_nic_msi) {
4383 			if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4384 				hclge_get_vector_info(hdev, i, vector);
4385 				vector++;
4386 				alloc++;
4387 
4388 				break;
4389 			}
4390 		}
4391 	}
4392 	hdev->num_msi_left -= alloc;
4393 	hdev->num_msi_used += alloc;
4394 
4395 	return alloc;
4396 }
4397 
4398 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4399 {
4400 	int i;
4401 
4402 	for (i = 0; i < hdev->num_msi; i++)
4403 		if (vector == hdev->vector_irq[i])
4404 			return i;
4405 
4406 	return -EINVAL;
4407 }
4408 
4409 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4410 {
4411 	struct hclge_vport *vport = hclge_get_vport(handle);
4412 	struct hclge_dev *hdev = vport->back;
4413 	int vector_id;
4414 
4415 	vector_id = hclge_get_vector_index(hdev, vector);
4416 	if (vector_id < 0) {
4417 		dev_err(&hdev->pdev->dev,
4418 			"Get vector index fail. vector = %d\n", vector);
4419 		return vector_id;
4420 	}
4421 
4422 	hclge_free_vector(hdev, vector_id);
4423 
4424 	return 0;
4425 }
4426 
4427 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4428 {
4429 	return HCLGE_RSS_KEY_SIZE;
4430 }
4431 
4432 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4433 				  const u8 hfunc, const u8 *key)
4434 {
4435 	struct hclge_rss_config_cmd *req;
4436 	unsigned int key_offset = 0;
4437 	struct hclge_desc desc;
4438 	int key_counts;
4439 	int key_size;
4440 	int ret;
4441 
4442 	key_counts = HCLGE_RSS_KEY_SIZE;
4443 	req = (struct hclge_rss_config_cmd *)desc.data;
4444 
4445 	while (key_counts) {
4446 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4447 					   false);
4448 
4449 		req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4450 		req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4451 
4452 		key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4453 		memcpy(req->hash_key,
4454 		       key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4455 
4456 		key_counts -= key_size;
4457 		key_offset++;
4458 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4459 		if (ret) {
4460 			dev_err(&hdev->pdev->dev,
4461 				"Configure RSS config fail, status = %d\n",
4462 				ret);
4463 			return ret;
4464 		}
4465 	}
4466 	return 0;
4467 }
4468 
4469 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
4470 {
4471 	struct hclge_rss_indirection_table_cmd *req;
4472 	struct hclge_desc desc;
4473 	int rss_cfg_tbl_num;
4474 	u8 rss_msb_oft;
4475 	u8 rss_msb_val;
4476 	int ret;
4477 	u16 qid;
4478 	int i;
4479 	u32 j;
4480 
4481 	req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4482 	rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
4483 			  HCLGE_RSS_CFG_TBL_SIZE;
4484 
4485 	for (i = 0; i < rss_cfg_tbl_num; i++) {
4486 		hclge_cmd_setup_basic_desc
4487 			(&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4488 
4489 		req->start_table_index =
4490 			cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4491 		req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4492 		for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) {
4493 			qid = indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4494 			req->rss_qid_l[j] = qid & 0xff;
4495 			rss_msb_oft =
4496 				j * HCLGE_RSS_CFG_TBL_BW_H / BITS_PER_BYTE;
4497 			rss_msb_val = (qid >> HCLGE_RSS_CFG_TBL_BW_L & 0x1) <<
4498 				(j * HCLGE_RSS_CFG_TBL_BW_H % BITS_PER_BYTE);
4499 			req->rss_qid_h[rss_msb_oft] |= rss_msb_val;
4500 		}
4501 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4502 		if (ret) {
4503 			dev_err(&hdev->pdev->dev,
4504 				"Configure rss indir table fail,status = %d\n",
4505 				ret);
4506 			return ret;
4507 		}
4508 	}
4509 	return 0;
4510 }
4511 
4512 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4513 				 u16 *tc_size, u16 *tc_offset)
4514 {
4515 	struct hclge_rss_tc_mode_cmd *req;
4516 	struct hclge_desc desc;
4517 	int ret;
4518 	int i;
4519 
4520 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4521 	req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4522 
4523 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4524 		u16 mode = 0;
4525 
4526 		hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4527 		hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4528 				HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4529 		hnae3_set_bit(mode, HCLGE_RSS_TC_SIZE_MSB_B,
4530 			      tc_size[i] >> HCLGE_RSS_TC_SIZE_MSB_OFFSET & 0x1);
4531 		hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4532 				HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4533 
4534 		req->rss_tc_mode[i] = cpu_to_le16(mode);
4535 	}
4536 
4537 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4538 	if (ret)
4539 		dev_err(&hdev->pdev->dev,
4540 			"Configure rss tc mode fail, status = %d\n", ret);
4541 
4542 	return ret;
4543 }
4544 
4545 static void hclge_get_rss_type(struct hclge_vport *vport)
4546 {
4547 	if (vport->rss_tuple_sets.ipv4_tcp_en ||
4548 	    vport->rss_tuple_sets.ipv4_udp_en ||
4549 	    vport->rss_tuple_sets.ipv4_sctp_en ||
4550 	    vport->rss_tuple_sets.ipv6_tcp_en ||
4551 	    vport->rss_tuple_sets.ipv6_udp_en ||
4552 	    vport->rss_tuple_sets.ipv6_sctp_en)
4553 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4554 	else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4555 		 vport->rss_tuple_sets.ipv6_fragment_en)
4556 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4557 	else
4558 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4559 }
4560 
4561 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4562 {
4563 	struct hclge_rss_input_tuple_cmd *req;
4564 	struct hclge_desc desc;
4565 	int ret;
4566 
4567 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4568 
4569 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4570 
4571 	/* Get the tuple cfg from pf */
4572 	req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4573 	req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4574 	req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4575 	req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4576 	req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4577 	req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4578 	req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4579 	req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4580 	hclge_get_rss_type(&hdev->vport[0]);
4581 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4582 	if (ret)
4583 		dev_err(&hdev->pdev->dev,
4584 			"Configure rss input fail, status = %d\n", ret);
4585 	return ret;
4586 }
4587 
4588 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4589 			 u8 *key, u8 *hfunc)
4590 {
4591 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4592 	struct hclge_vport *vport = hclge_get_vport(handle);
4593 	int i;
4594 
4595 	/* Get hash algorithm */
4596 	if (hfunc) {
4597 		switch (vport->rss_algo) {
4598 		case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4599 			*hfunc = ETH_RSS_HASH_TOP;
4600 			break;
4601 		case HCLGE_RSS_HASH_ALGO_SIMPLE:
4602 			*hfunc = ETH_RSS_HASH_XOR;
4603 			break;
4604 		default:
4605 			*hfunc = ETH_RSS_HASH_UNKNOWN;
4606 			break;
4607 		}
4608 	}
4609 
4610 	/* Get the RSS Key required by the user */
4611 	if (key)
4612 		memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4613 
4614 	/* Get indirect table */
4615 	if (indir)
4616 		for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4617 			indir[i] =  vport->rss_indirection_tbl[i];
4618 
4619 	return 0;
4620 }
4621 
4622 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4623 			 const  u8 *key, const  u8 hfunc)
4624 {
4625 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4626 	struct hclge_vport *vport = hclge_get_vport(handle);
4627 	struct hclge_dev *hdev = vport->back;
4628 	u8 hash_algo;
4629 	int ret, i;
4630 
4631 	/* Set the RSS Hash Key if specififed by the user */
4632 	if (key) {
4633 		switch (hfunc) {
4634 		case ETH_RSS_HASH_TOP:
4635 			hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4636 			break;
4637 		case ETH_RSS_HASH_XOR:
4638 			hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4639 			break;
4640 		case ETH_RSS_HASH_NO_CHANGE:
4641 			hash_algo = vport->rss_algo;
4642 			break;
4643 		default:
4644 			return -EINVAL;
4645 		}
4646 
4647 		ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4648 		if (ret)
4649 			return ret;
4650 
4651 		/* Update the shadow RSS key with user specified qids */
4652 		memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4653 		vport->rss_algo = hash_algo;
4654 	}
4655 
4656 	/* Update the shadow RSS table with user specified qids */
4657 	for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4658 		vport->rss_indirection_tbl[i] = indir[i];
4659 
4660 	/* Update the hardware */
4661 	return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4662 }
4663 
4664 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4665 {
4666 	u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4667 
4668 	if (nfc->data & RXH_L4_B_2_3)
4669 		hash_sets |= HCLGE_D_PORT_BIT;
4670 	else
4671 		hash_sets &= ~HCLGE_D_PORT_BIT;
4672 
4673 	if (nfc->data & RXH_IP_SRC)
4674 		hash_sets |= HCLGE_S_IP_BIT;
4675 	else
4676 		hash_sets &= ~HCLGE_S_IP_BIT;
4677 
4678 	if (nfc->data & RXH_IP_DST)
4679 		hash_sets |= HCLGE_D_IP_BIT;
4680 	else
4681 		hash_sets &= ~HCLGE_D_IP_BIT;
4682 
4683 	if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4684 		hash_sets |= HCLGE_V_TAG_BIT;
4685 
4686 	return hash_sets;
4687 }
4688 
4689 static int hclge_init_rss_tuple_cmd(struct hclge_vport *vport,
4690 				    struct ethtool_rxnfc *nfc,
4691 				    struct hclge_rss_input_tuple_cmd *req)
4692 {
4693 	struct hclge_dev *hdev = vport->back;
4694 	u8 tuple_sets;
4695 
4696 	req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4697 	req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4698 	req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4699 	req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4700 	req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4701 	req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4702 	req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4703 	req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4704 
4705 	tuple_sets = hclge_get_rss_hash_bits(nfc);
4706 	switch (nfc->flow_type) {
4707 	case TCP_V4_FLOW:
4708 		req->ipv4_tcp_en = tuple_sets;
4709 		break;
4710 	case TCP_V6_FLOW:
4711 		req->ipv6_tcp_en = tuple_sets;
4712 		break;
4713 	case UDP_V4_FLOW:
4714 		req->ipv4_udp_en = tuple_sets;
4715 		break;
4716 	case UDP_V6_FLOW:
4717 		req->ipv6_udp_en = tuple_sets;
4718 		break;
4719 	case SCTP_V4_FLOW:
4720 		req->ipv4_sctp_en = tuple_sets;
4721 		break;
4722 	case SCTP_V6_FLOW:
4723 		if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
4724 		    (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
4725 			return -EINVAL;
4726 
4727 		req->ipv6_sctp_en = tuple_sets;
4728 		break;
4729 	case IPV4_FLOW:
4730 		req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4731 		break;
4732 	case IPV6_FLOW:
4733 		req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4734 		break;
4735 	default:
4736 		return -EINVAL;
4737 	}
4738 
4739 	return 0;
4740 }
4741 
4742 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4743 			       struct ethtool_rxnfc *nfc)
4744 {
4745 	struct hclge_vport *vport = hclge_get_vport(handle);
4746 	struct hclge_dev *hdev = vport->back;
4747 	struct hclge_rss_input_tuple_cmd *req;
4748 	struct hclge_desc desc;
4749 	int ret;
4750 
4751 	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4752 			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
4753 		return -EINVAL;
4754 
4755 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4756 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4757 
4758 	ret = hclge_init_rss_tuple_cmd(vport, nfc, req);
4759 	if (ret) {
4760 		dev_err(&hdev->pdev->dev,
4761 			"failed to init rss tuple cmd, ret = %d\n", ret);
4762 		return ret;
4763 	}
4764 
4765 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4766 	if (ret) {
4767 		dev_err(&hdev->pdev->dev,
4768 			"Set rss tuple fail, status = %d\n", ret);
4769 		return ret;
4770 	}
4771 
4772 	vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4773 	vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4774 	vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4775 	vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4776 	vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4777 	vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4778 	vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4779 	vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4780 	hclge_get_rss_type(vport);
4781 	return 0;
4782 }
4783 
4784 static int hclge_get_vport_rss_tuple(struct hclge_vport *vport, int flow_type,
4785 				     u8 *tuple_sets)
4786 {
4787 	switch (flow_type) {
4788 	case TCP_V4_FLOW:
4789 		*tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4790 		break;
4791 	case UDP_V4_FLOW:
4792 		*tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4793 		break;
4794 	case TCP_V6_FLOW:
4795 		*tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4796 		break;
4797 	case UDP_V6_FLOW:
4798 		*tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4799 		break;
4800 	case SCTP_V4_FLOW:
4801 		*tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4802 		break;
4803 	case SCTP_V6_FLOW:
4804 		*tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4805 		break;
4806 	case IPV4_FLOW:
4807 	case IPV6_FLOW:
4808 		*tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4809 		break;
4810 	default:
4811 		return -EINVAL;
4812 	}
4813 
4814 	return 0;
4815 }
4816 
4817 static u64 hclge_convert_rss_tuple(u8 tuple_sets)
4818 {
4819 	u64 tuple_data = 0;
4820 
4821 	if (tuple_sets & HCLGE_D_PORT_BIT)
4822 		tuple_data |= RXH_L4_B_2_3;
4823 	if (tuple_sets & HCLGE_S_PORT_BIT)
4824 		tuple_data |= RXH_L4_B_0_1;
4825 	if (tuple_sets & HCLGE_D_IP_BIT)
4826 		tuple_data |= RXH_IP_DST;
4827 	if (tuple_sets & HCLGE_S_IP_BIT)
4828 		tuple_data |= RXH_IP_SRC;
4829 
4830 	return tuple_data;
4831 }
4832 
4833 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4834 			       struct ethtool_rxnfc *nfc)
4835 {
4836 	struct hclge_vport *vport = hclge_get_vport(handle);
4837 	u8 tuple_sets;
4838 	int ret;
4839 
4840 	nfc->data = 0;
4841 
4842 	ret = hclge_get_vport_rss_tuple(vport, nfc->flow_type, &tuple_sets);
4843 	if (ret || !tuple_sets)
4844 		return ret;
4845 
4846 	nfc->data = hclge_convert_rss_tuple(tuple_sets);
4847 
4848 	return 0;
4849 }
4850 
4851 static int hclge_get_tc_size(struct hnae3_handle *handle)
4852 {
4853 	struct hclge_vport *vport = hclge_get_vport(handle);
4854 	struct hclge_dev *hdev = vport->back;
4855 
4856 	return hdev->pf_rss_size_max;
4857 }
4858 
4859 static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
4860 {
4861 	struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
4862 	struct hclge_vport *vport = hdev->vport;
4863 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4864 	u16 tc_valid[HCLGE_MAX_TC_NUM] = {0};
4865 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4866 	struct hnae3_tc_info *tc_info;
4867 	u16 roundup_size;
4868 	u16 rss_size;
4869 	int i;
4870 
4871 	tc_info = &vport->nic.kinfo.tc_info;
4872 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4873 		rss_size = tc_info->tqp_count[i];
4874 		tc_valid[i] = 0;
4875 
4876 		if (!(hdev->hw_tc_map & BIT(i)))
4877 			continue;
4878 
4879 		/* tc_size set to hardware is the log2 of roundup power of two
4880 		 * of rss_size, the acutal queue size is limited by indirection
4881 		 * table.
4882 		 */
4883 		if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size ||
4884 		    rss_size == 0) {
4885 			dev_err(&hdev->pdev->dev,
4886 				"Configure rss tc size failed, invalid TC_SIZE = %u\n",
4887 				rss_size);
4888 			return -EINVAL;
4889 		}
4890 
4891 		roundup_size = roundup_pow_of_two(rss_size);
4892 		roundup_size = ilog2(roundup_size);
4893 
4894 		tc_valid[i] = 1;
4895 		tc_size[i] = roundup_size;
4896 		tc_offset[i] = tc_info->tqp_offset[i];
4897 	}
4898 
4899 	return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4900 }
4901 
4902 int hclge_rss_init_hw(struct hclge_dev *hdev)
4903 {
4904 	struct hclge_vport *vport = hdev->vport;
4905 	u16 *rss_indir = vport[0].rss_indirection_tbl;
4906 	u8 *key = vport[0].rss_hash_key;
4907 	u8 hfunc = vport[0].rss_algo;
4908 	int ret;
4909 
4910 	ret = hclge_set_rss_indir_table(hdev, rss_indir);
4911 	if (ret)
4912 		return ret;
4913 
4914 	ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4915 	if (ret)
4916 		return ret;
4917 
4918 	ret = hclge_set_rss_input_tuple(hdev);
4919 	if (ret)
4920 		return ret;
4921 
4922 	return hclge_init_rss_tc_mode(hdev);
4923 }
4924 
4925 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4926 {
4927 	struct hclge_vport *vport = &hdev->vport[0];
4928 	int i;
4929 
4930 	for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
4931 		vport->rss_indirection_tbl[i] = i % vport->alloc_rss_size;
4932 }
4933 
4934 static int hclge_rss_init_cfg(struct hclge_dev *hdev)
4935 {
4936 	u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
4937 	int rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4938 	struct hclge_vport *vport = &hdev->vport[0];
4939 	u16 *rss_ind_tbl;
4940 
4941 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
4942 		rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4943 
4944 	vport->rss_tuple_sets.ipv4_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4945 	vport->rss_tuple_sets.ipv4_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4946 	vport->rss_tuple_sets.ipv4_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP;
4947 	vport->rss_tuple_sets.ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4948 	vport->rss_tuple_sets.ipv6_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4949 	vport->rss_tuple_sets.ipv6_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4950 	vport->rss_tuple_sets.ipv6_sctp_en =
4951 		hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
4952 		HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
4953 		HCLGE_RSS_INPUT_TUPLE_SCTP;
4954 	vport->rss_tuple_sets.ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4955 
4956 	vport->rss_algo = rss_algo;
4957 
4958 	rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
4959 				   sizeof(*rss_ind_tbl), GFP_KERNEL);
4960 	if (!rss_ind_tbl)
4961 		return -ENOMEM;
4962 
4963 	vport->rss_indirection_tbl = rss_ind_tbl;
4964 	memcpy(vport->rss_hash_key, hclge_hash_key, HCLGE_RSS_KEY_SIZE);
4965 
4966 	hclge_rss_indir_init_cfg(hdev);
4967 
4968 	return 0;
4969 }
4970 
4971 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4972 				int vector_id, bool en,
4973 				struct hnae3_ring_chain_node *ring_chain)
4974 {
4975 	struct hclge_dev *hdev = vport->back;
4976 	struct hnae3_ring_chain_node *node;
4977 	struct hclge_desc desc;
4978 	struct hclge_ctrl_vector_chain_cmd *req =
4979 		(struct hclge_ctrl_vector_chain_cmd *)desc.data;
4980 	enum hclge_cmd_status status;
4981 	enum hclge_opcode_type op;
4982 	u16 tqp_type_and_id;
4983 	int i;
4984 
4985 	op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4986 	hclge_cmd_setup_basic_desc(&desc, op, false);
4987 	req->int_vector_id_l = hnae3_get_field(vector_id,
4988 					       HCLGE_VECTOR_ID_L_M,
4989 					       HCLGE_VECTOR_ID_L_S);
4990 	req->int_vector_id_h = hnae3_get_field(vector_id,
4991 					       HCLGE_VECTOR_ID_H_M,
4992 					       HCLGE_VECTOR_ID_H_S);
4993 
4994 	i = 0;
4995 	for (node = ring_chain; node; node = node->next) {
4996 		tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4997 		hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
4998 				HCLGE_INT_TYPE_S,
4999 				hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
5000 		hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
5001 				HCLGE_TQP_ID_S, node->tqp_index);
5002 		hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
5003 				HCLGE_INT_GL_IDX_S,
5004 				hnae3_get_field(node->int_gl_idx,
5005 						HNAE3_RING_GL_IDX_M,
5006 						HNAE3_RING_GL_IDX_S));
5007 		req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
5008 		if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
5009 			req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
5010 			req->vfid = vport->vport_id;
5011 
5012 			status = hclge_cmd_send(&hdev->hw, &desc, 1);
5013 			if (status) {
5014 				dev_err(&hdev->pdev->dev,
5015 					"Map TQP fail, status is %d.\n",
5016 					status);
5017 				return -EIO;
5018 			}
5019 			i = 0;
5020 
5021 			hclge_cmd_setup_basic_desc(&desc,
5022 						   op,
5023 						   false);
5024 			req->int_vector_id_l =
5025 				hnae3_get_field(vector_id,
5026 						HCLGE_VECTOR_ID_L_M,
5027 						HCLGE_VECTOR_ID_L_S);
5028 			req->int_vector_id_h =
5029 				hnae3_get_field(vector_id,
5030 						HCLGE_VECTOR_ID_H_M,
5031 						HCLGE_VECTOR_ID_H_S);
5032 		}
5033 	}
5034 
5035 	if (i > 0) {
5036 		req->int_cause_num = i;
5037 		req->vfid = vport->vport_id;
5038 		status = hclge_cmd_send(&hdev->hw, &desc, 1);
5039 		if (status) {
5040 			dev_err(&hdev->pdev->dev,
5041 				"Map TQP fail, status is %d.\n", status);
5042 			return -EIO;
5043 		}
5044 	}
5045 
5046 	return 0;
5047 }
5048 
5049 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
5050 				    struct hnae3_ring_chain_node *ring_chain)
5051 {
5052 	struct hclge_vport *vport = hclge_get_vport(handle);
5053 	struct hclge_dev *hdev = vport->back;
5054 	int vector_id;
5055 
5056 	vector_id = hclge_get_vector_index(hdev, vector);
5057 	if (vector_id < 0) {
5058 		dev_err(&hdev->pdev->dev,
5059 			"failed to get vector index. vector=%d\n", vector);
5060 		return vector_id;
5061 	}
5062 
5063 	return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
5064 }
5065 
5066 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
5067 				       struct hnae3_ring_chain_node *ring_chain)
5068 {
5069 	struct hclge_vport *vport = hclge_get_vport(handle);
5070 	struct hclge_dev *hdev = vport->back;
5071 	int vector_id, ret;
5072 
5073 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
5074 		return 0;
5075 
5076 	vector_id = hclge_get_vector_index(hdev, vector);
5077 	if (vector_id < 0) {
5078 		dev_err(&handle->pdev->dev,
5079 			"Get vector index fail. ret =%d\n", vector_id);
5080 		return vector_id;
5081 	}
5082 
5083 	ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
5084 	if (ret)
5085 		dev_err(&handle->pdev->dev,
5086 			"Unmap ring from vector fail. vectorid=%d, ret =%d\n",
5087 			vector_id, ret);
5088 
5089 	return ret;
5090 }
5091 
5092 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id,
5093 				      bool en_uc, bool en_mc, bool en_bc)
5094 {
5095 	struct hclge_vport *vport = &hdev->vport[vf_id];
5096 	struct hnae3_handle *handle = &vport->nic;
5097 	struct hclge_promisc_cfg_cmd *req;
5098 	struct hclge_desc desc;
5099 	bool uc_tx_en = en_uc;
5100 	u8 promisc_cfg = 0;
5101 	int ret;
5102 
5103 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
5104 
5105 	req = (struct hclge_promisc_cfg_cmd *)desc.data;
5106 	req->vf_id = vf_id;
5107 
5108 	if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags))
5109 		uc_tx_en = false;
5110 
5111 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0);
5112 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0);
5113 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0);
5114 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0);
5115 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0);
5116 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0);
5117 	req->extend_promisc = promisc_cfg;
5118 
5119 	/* to be compatible with DEVICE_VERSION_V1/2 */
5120 	promisc_cfg = 0;
5121 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0);
5122 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0);
5123 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0);
5124 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1);
5125 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1);
5126 	req->promisc = promisc_cfg;
5127 
5128 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5129 	if (ret)
5130 		dev_err(&hdev->pdev->dev,
5131 			"failed to set vport %u promisc mode, ret = %d.\n",
5132 			vf_id, ret);
5133 
5134 	return ret;
5135 }
5136 
5137 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
5138 				 bool en_mc_pmc, bool en_bc_pmc)
5139 {
5140 	return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id,
5141 					  en_uc_pmc, en_mc_pmc, en_bc_pmc);
5142 }
5143 
5144 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
5145 				  bool en_mc_pmc)
5146 {
5147 	struct hclge_vport *vport = hclge_get_vport(handle);
5148 	struct hclge_dev *hdev = vport->back;
5149 	bool en_bc_pmc = true;
5150 
5151 	/* For device whose version below V2, if broadcast promisc enabled,
5152 	 * vlan filter is always bypassed. So broadcast promisc should be
5153 	 * disabled until user enable promisc mode
5154 	 */
5155 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
5156 		en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
5157 
5158 	return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
5159 					    en_bc_pmc);
5160 }
5161 
5162 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
5163 {
5164 	struct hclge_vport *vport = hclge_get_vport(handle);
5165 	struct hclge_dev *hdev = vport->back;
5166 
5167 	set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
5168 }
5169 
5170 static void hclge_sync_fd_state(struct hclge_dev *hdev)
5171 {
5172 	if (hlist_empty(&hdev->fd_rule_list))
5173 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5174 }
5175 
5176 static void hclge_fd_inc_rule_cnt(struct hclge_dev *hdev, u16 location)
5177 {
5178 	if (!test_bit(location, hdev->fd_bmap)) {
5179 		set_bit(location, hdev->fd_bmap);
5180 		hdev->hclge_fd_rule_num++;
5181 	}
5182 }
5183 
5184 static void hclge_fd_dec_rule_cnt(struct hclge_dev *hdev, u16 location)
5185 {
5186 	if (test_bit(location, hdev->fd_bmap)) {
5187 		clear_bit(location, hdev->fd_bmap);
5188 		hdev->hclge_fd_rule_num--;
5189 	}
5190 }
5191 
5192 static void hclge_fd_free_node(struct hclge_dev *hdev,
5193 			       struct hclge_fd_rule *rule)
5194 {
5195 	hlist_del(&rule->rule_node);
5196 	kfree(rule);
5197 	hclge_sync_fd_state(hdev);
5198 }
5199 
5200 static void hclge_update_fd_rule_node(struct hclge_dev *hdev,
5201 				      struct hclge_fd_rule *old_rule,
5202 				      struct hclge_fd_rule *new_rule,
5203 				      enum HCLGE_FD_NODE_STATE state)
5204 {
5205 	switch (state) {
5206 	case HCLGE_FD_TO_ADD:
5207 	case HCLGE_FD_ACTIVE:
5208 		/* 1) if the new state is TO_ADD, just replace the old rule
5209 		 * with the same location, no matter its state, because the
5210 		 * new rule will be configured to the hardware.
5211 		 * 2) if the new state is ACTIVE, it means the new rule
5212 		 * has been configured to the hardware, so just replace
5213 		 * the old rule node with the same location.
5214 		 * 3) for it doesn't add a new node to the list, so it's
5215 		 * unnecessary to update the rule number and fd_bmap.
5216 		 */
5217 		new_rule->rule_node.next = old_rule->rule_node.next;
5218 		new_rule->rule_node.pprev = old_rule->rule_node.pprev;
5219 		memcpy(old_rule, new_rule, sizeof(*old_rule));
5220 		kfree(new_rule);
5221 		break;
5222 	case HCLGE_FD_DELETED:
5223 		hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5224 		hclge_fd_free_node(hdev, old_rule);
5225 		break;
5226 	case HCLGE_FD_TO_DEL:
5227 		/* if new request is TO_DEL, and old rule is existent
5228 		 * 1) the state of old rule is TO_DEL, we need do nothing,
5229 		 * because we delete rule by location, other rule content
5230 		 * is unncessary.
5231 		 * 2) the state of old rule is ACTIVE, we need to change its
5232 		 * state to TO_DEL, so the rule will be deleted when periodic
5233 		 * task being scheduled.
5234 		 * 3) the state of old rule is TO_ADD, it means the rule hasn't
5235 		 * been added to hardware, so we just delete the rule node from
5236 		 * fd_rule_list directly.
5237 		 */
5238 		if (old_rule->state == HCLGE_FD_TO_ADD) {
5239 			hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5240 			hclge_fd_free_node(hdev, old_rule);
5241 			return;
5242 		}
5243 		old_rule->state = HCLGE_FD_TO_DEL;
5244 		break;
5245 	}
5246 }
5247 
5248 static struct hclge_fd_rule *hclge_find_fd_rule(struct hlist_head *hlist,
5249 						u16 location,
5250 						struct hclge_fd_rule **parent)
5251 {
5252 	struct hclge_fd_rule *rule;
5253 	struct hlist_node *node;
5254 
5255 	hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
5256 		if (rule->location == location)
5257 			return rule;
5258 		else if (rule->location > location)
5259 			return NULL;
5260 		/* record the parent node, use to keep the nodes in fd_rule_list
5261 		 * in ascend order.
5262 		 */
5263 		*parent = rule;
5264 	}
5265 
5266 	return NULL;
5267 }
5268 
5269 /* insert fd rule node in ascend order according to rule->location */
5270 static void hclge_fd_insert_rule_node(struct hlist_head *hlist,
5271 				      struct hclge_fd_rule *rule,
5272 				      struct hclge_fd_rule *parent)
5273 {
5274 	INIT_HLIST_NODE(&rule->rule_node);
5275 
5276 	if (parent)
5277 		hlist_add_behind(&rule->rule_node, &parent->rule_node);
5278 	else
5279 		hlist_add_head(&rule->rule_node, hlist);
5280 }
5281 
5282 static int hclge_fd_set_user_def_cmd(struct hclge_dev *hdev,
5283 				     struct hclge_fd_user_def_cfg *cfg)
5284 {
5285 	struct hclge_fd_user_def_cfg_cmd *req;
5286 	struct hclge_desc desc;
5287 	u16 data = 0;
5288 	int ret;
5289 
5290 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_USER_DEF_OP, false);
5291 
5292 	req = (struct hclge_fd_user_def_cfg_cmd *)desc.data;
5293 
5294 	hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[0].ref_cnt > 0);
5295 	hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5296 			HCLGE_FD_USER_DEF_OFT_S, cfg[0].offset);
5297 	req->ol2_cfg = cpu_to_le16(data);
5298 
5299 	data = 0;
5300 	hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[1].ref_cnt > 0);
5301 	hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5302 			HCLGE_FD_USER_DEF_OFT_S, cfg[1].offset);
5303 	req->ol3_cfg = cpu_to_le16(data);
5304 
5305 	data = 0;
5306 	hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[2].ref_cnt > 0);
5307 	hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5308 			HCLGE_FD_USER_DEF_OFT_S, cfg[2].offset);
5309 	req->ol4_cfg = cpu_to_le16(data);
5310 
5311 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5312 	if (ret)
5313 		dev_err(&hdev->pdev->dev,
5314 			"failed to set fd user def data, ret= %d\n", ret);
5315 	return ret;
5316 }
5317 
5318 static void hclge_sync_fd_user_def_cfg(struct hclge_dev *hdev, bool locked)
5319 {
5320 	int ret;
5321 
5322 	if (!test_and_clear_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state))
5323 		return;
5324 
5325 	if (!locked)
5326 		spin_lock_bh(&hdev->fd_rule_lock);
5327 
5328 	ret = hclge_fd_set_user_def_cmd(hdev, hdev->fd_cfg.user_def_cfg);
5329 	if (ret)
5330 		set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5331 
5332 	if (!locked)
5333 		spin_unlock_bh(&hdev->fd_rule_lock);
5334 }
5335 
5336 static int hclge_fd_check_user_def_refcnt(struct hclge_dev *hdev,
5337 					  struct hclge_fd_rule *rule)
5338 {
5339 	struct hlist_head *hlist = &hdev->fd_rule_list;
5340 	struct hclge_fd_rule *fd_rule, *parent = NULL;
5341 	struct hclge_fd_user_def_info *info, *old_info;
5342 	struct hclge_fd_user_def_cfg *cfg;
5343 
5344 	if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5345 	    rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5346 		return 0;
5347 
5348 	/* for valid layer is start from 1, so need minus 1 to get the cfg */
5349 	cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5350 	info = &rule->ep.user_def;
5351 
5352 	if (!cfg->ref_cnt || cfg->offset == info->offset)
5353 		return 0;
5354 
5355 	if (cfg->ref_cnt > 1)
5356 		goto error;
5357 
5358 	fd_rule = hclge_find_fd_rule(hlist, rule->location, &parent);
5359 	if (fd_rule) {
5360 		old_info = &fd_rule->ep.user_def;
5361 		if (info->layer == old_info->layer)
5362 			return 0;
5363 	}
5364 
5365 error:
5366 	dev_err(&hdev->pdev->dev,
5367 		"No available offset for layer%d fd rule, each layer only support one user def offset.\n",
5368 		info->layer + 1);
5369 	return -ENOSPC;
5370 }
5371 
5372 static void hclge_fd_inc_user_def_refcnt(struct hclge_dev *hdev,
5373 					 struct hclge_fd_rule *rule)
5374 {
5375 	struct hclge_fd_user_def_cfg *cfg;
5376 
5377 	if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5378 	    rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5379 		return;
5380 
5381 	cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5382 	if (!cfg->ref_cnt) {
5383 		cfg->offset = rule->ep.user_def.offset;
5384 		set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5385 	}
5386 	cfg->ref_cnt++;
5387 }
5388 
5389 static void hclge_fd_dec_user_def_refcnt(struct hclge_dev *hdev,
5390 					 struct hclge_fd_rule *rule)
5391 {
5392 	struct hclge_fd_user_def_cfg *cfg;
5393 
5394 	if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5395 	    rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5396 		return;
5397 
5398 	cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5399 	if (!cfg->ref_cnt)
5400 		return;
5401 
5402 	cfg->ref_cnt--;
5403 	if (!cfg->ref_cnt) {
5404 		cfg->offset = 0;
5405 		set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5406 	}
5407 }
5408 
5409 static void hclge_update_fd_list(struct hclge_dev *hdev,
5410 				 enum HCLGE_FD_NODE_STATE state, u16 location,
5411 				 struct hclge_fd_rule *new_rule)
5412 {
5413 	struct hlist_head *hlist = &hdev->fd_rule_list;
5414 	struct hclge_fd_rule *fd_rule, *parent = NULL;
5415 
5416 	fd_rule = hclge_find_fd_rule(hlist, location, &parent);
5417 	if (fd_rule) {
5418 		hclge_fd_dec_user_def_refcnt(hdev, fd_rule);
5419 		if (state == HCLGE_FD_ACTIVE)
5420 			hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5421 		hclge_sync_fd_user_def_cfg(hdev, true);
5422 
5423 		hclge_update_fd_rule_node(hdev, fd_rule, new_rule, state);
5424 		return;
5425 	}
5426 
5427 	/* it's unlikely to fail here, because we have checked the rule
5428 	 * exist before.
5429 	 */
5430 	if (unlikely(state == HCLGE_FD_TO_DEL || state == HCLGE_FD_DELETED)) {
5431 		dev_warn(&hdev->pdev->dev,
5432 			 "failed to delete fd rule %u, it's inexistent\n",
5433 			 location);
5434 		return;
5435 	}
5436 
5437 	hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5438 	hclge_sync_fd_user_def_cfg(hdev, true);
5439 
5440 	hclge_fd_insert_rule_node(hlist, new_rule, parent);
5441 	hclge_fd_inc_rule_cnt(hdev, new_rule->location);
5442 
5443 	if (state == HCLGE_FD_TO_ADD) {
5444 		set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
5445 		hclge_task_schedule(hdev, 0);
5446 	}
5447 }
5448 
5449 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
5450 {
5451 	struct hclge_get_fd_mode_cmd *req;
5452 	struct hclge_desc desc;
5453 	int ret;
5454 
5455 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
5456 
5457 	req = (struct hclge_get_fd_mode_cmd *)desc.data;
5458 
5459 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5460 	if (ret) {
5461 		dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
5462 		return ret;
5463 	}
5464 
5465 	*fd_mode = req->mode;
5466 
5467 	return ret;
5468 }
5469 
5470 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
5471 				   u32 *stage1_entry_num,
5472 				   u32 *stage2_entry_num,
5473 				   u16 *stage1_counter_num,
5474 				   u16 *stage2_counter_num)
5475 {
5476 	struct hclge_get_fd_allocation_cmd *req;
5477 	struct hclge_desc desc;
5478 	int ret;
5479 
5480 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
5481 
5482 	req = (struct hclge_get_fd_allocation_cmd *)desc.data;
5483 
5484 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5485 	if (ret) {
5486 		dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
5487 			ret);
5488 		return ret;
5489 	}
5490 
5491 	*stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
5492 	*stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
5493 	*stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
5494 	*stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
5495 
5496 	return ret;
5497 }
5498 
5499 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
5500 				   enum HCLGE_FD_STAGE stage_num)
5501 {
5502 	struct hclge_set_fd_key_config_cmd *req;
5503 	struct hclge_fd_key_cfg *stage;
5504 	struct hclge_desc desc;
5505 	int ret;
5506 
5507 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
5508 
5509 	req = (struct hclge_set_fd_key_config_cmd *)desc.data;
5510 	stage = &hdev->fd_cfg.key_cfg[stage_num];
5511 	req->stage = stage_num;
5512 	req->key_select = stage->key_sel;
5513 	req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
5514 	req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
5515 	req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
5516 	req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
5517 	req->tuple_mask = cpu_to_le32(~stage->tuple_active);
5518 	req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
5519 
5520 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5521 	if (ret)
5522 		dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
5523 
5524 	return ret;
5525 }
5526 
5527 static void hclge_fd_disable_user_def(struct hclge_dev *hdev)
5528 {
5529 	struct hclge_fd_user_def_cfg *cfg = hdev->fd_cfg.user_def_cfg;
5530 
5531 	spin_lock_bh(&hdev->fd_rule_lock);
5532 	memset(cfg, 0, sizeof(hdev->fd_cfg.user_def_cfg));
5533 	spin_unlock_bh(&hdev->fd_rule_lock);
5534 
5535 	hclge_fd_set_user_def_cmd(hdev, cfg);
5536 }
5537 
5538 static int hclge_init_fd_config(struct hclge_dev *hdev)
5539 {
5540 #define LOW_2_WORDS		0x03
5541 	struct hclge_fd_key_cfg *key_cfg;
5542 	int ret;
5543 
5544 	if (!hnae3_dev_fd_supported(hdev))
5545 		return 0;
5546 
5547 	ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
5548 	if (ret)
5549 		return ret;
5550 
5551 	switch (hdev->fd_cfg.fd_mode) {
5552 	case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
5553 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
5554 		break;
5555 	case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
5556 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
5557 		break;
5558 	default:
5559 		dev_err(&hdev->pdev->dev,
5560 			"Unsupported flow director mode %u\n",
5561 			hdev->fd_cfg.fd_mode);
5562 		return -EOPNOTSUPP;
5563 	}
5564 
5565 	key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
5566 	key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE;
5567 	key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
5568 	key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
5569 	key_cfg->outer_sipv6_word_en = 0;
5570 	key_cfg->outer_dipv6_word_en = 0;
5571 
5572 	key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
5573 				BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
5574 				BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5575 				BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5576 
5577 	/* If use max 400bit key, we can support tuples for ether type */
5578 	if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5579 		key_cfg->tuple_active |=
5580 				BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
5581 		if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
5582 			key_cfg->tuple_active |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
5583 	}
5584 
5585 	/* roce_type is used to filter roce frames
5586 	 * dst_vport is used to specify the rule
5587 	 */
5588 	key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5589 
5590 	ret = hclge_get_fd_allocation(hdev,
5591 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5592 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5593 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5594 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5595 	if (ret)
5596 		return ret;
5597 
5598 	return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5599 }
5600 
5601 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5602 				int loc, u8 *key, bool is_add)
5603 {
5604 	struct hclge_fd_tcam_config_1_cmd *req1;
5605 	struct hclge_fd_tcam_config_2_cmd *req2;
5606 	struct hclge_fd_tcam_config_3_cmd *req3;
5607 	struct hclge_desc desc[3];
5608 	int ret;
5609 
5610 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5611 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5612 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5613 	desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5614 	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5615 
5616 	req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5617 	req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5618 	req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5619 
5620 	req1->stage = stage;
5621 	req1->xy_sel = sel_x ? 1 : 0;
5622 	hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5623 	req1->index = cpu_to_le32(loc);
5624 	req1->entry_vld = sel_x ? is_add : 0;
5625 
5626 	if (key) {
5627 		memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5628 		memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5629 		       sizeof(req2->tcam_data));
5630 		memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5631 		       sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5632 	}
5633 
5634 	ret = hclge_cmd_send(&hdev->hw, desc, 3);
5635 	if (ret)
5636 		dev_err(&hdev->pdev->dev,
5637 			"config tcam key fail, ret=%d\n",
5638 			ret);
5639 
5640 	return ret;
5641 }
5642 
5643 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5644 			      struct hclge_fd_ad_data *action)
5645 {
5646 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
5647 	struct hclge_fd_ad_config_cmd *req;
5648 	struct hclge_desc desc;
5649 	u64 ad_data = 0;
5650 	int ret;
5651 
5652 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5653 
5654 	req = (struct hclge_fd_ad_config_cmd *)desc.data;
5655 	req->index = cpu_to_le32(loc);
5656 	req->stage = stage;
5657 
5658 	hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5659 		      action->write_rule_id_to_bd);
5660 	hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5661 			action->rule_id);
5662 	if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) {
5663 		hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B,
5664 			      action->override_tc);
5665 		hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M,
5666 				HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size);
5667 	}
5668 	ad_data <<= 32;
5669 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5670 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5671 		      action->forward_to_direct_queue);
5672 	hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5673 			action->queue_id);
5674 	hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5675 	hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5676 			HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5677 	hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5678 	hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5679 			action->counter_id);
5680 
5681 	req->ad_data = cpu_to_le64(ad_data);
5682 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5683 	if (ret)
5684 		dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5685 
5686 	return ret;
5687 }
5688 
5689 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5690 				   struct hclge_fd_rule *rule)
5691 {
5692 	int offset, moffset, ip_offset;
5693 	enum HCLGE_FD_KEY_OPT key_opt;
5694 	u16 tmp_x_s, tmp_y_s;
5695 	u32 tmp_x_l, tmp_y_l;
5696 	u8 *p = (u8 *)rule;
5697 	int i;
5698 
5699 	if (rule->unused_tuple & BIT(tuple_bit))
5700 		return true;
5701 
5702 	key_opt = tuple_key_info[tuple_bit].key_opt;
5703 	offset = tuple_key_info[tuple_bit].offset;
5704 	moffset = tuple_key_info[tuple_bit].moffset;
5705 
5706 	switch (key_opt) {
5707 	case KEY_OPT_U8:
5708 		calc_x(*key_x, p[offset], p[moffset]);
5709 		calc_y(*key_y, p[offset], p[moffset]);
5710 
5711 		return true;
5712 	case KEY_OPT_LE16:
5713 		calc_x(tmp_x_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5714 		calc_y(tmp_y_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5715 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5716 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5717 
5718 		return true;
5719 	case KEY_OPT_LE32:
5720 		calc_x(tmp_x_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5721 		calc_y(tmp_y_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5722 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5723 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5724 
5725 		return true;
5726 	case KEY_OPT_MAC:
5727 		for (i = 0; i < ETH_ALEN; i++) {
5728 			calc_x(key_x[ETH_ALEN - 1 - i], p[offset + i],
5729 			       p[moffset + i]);
5730 			calc_y(key_y[ETH_ALEN - 1 - i], p[offset + i],
5731 			       p[moffset + i]);
5732 		}
5733 
5734 		return true;
5735 	case KEY_OPT_IP:
5736 		ip_offset = IPV4_INDEX * sizeof(u32);
5737 		calc_x(tmp_x_l, *(u32 *)(&p[offset + ip_offset]),
5738 		       *(u32 *)(&p[moffset + ip_offset]));
5739 		calc_y(tmp_y_l, *(u32 *)(&p[offset + ip_offset]),
5740 		       *(u32 *)(&p[moffset + ip_offset]));
5741 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5742 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5743 
5744 		return true;
5745 	default:
5746 		return false;
5747 	}
5748 }
5749 
5750 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5751 				 u8 vf_id, u8 network_port_id)
5752 {
5753 	u32 port_number = 0;
5754 
5755 	if (port_type == HOST_PORT) {
5756 		hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5757 				pf_id);
5758 		hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5759 				vf_id);
5760 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5761 	} else {
5762 		hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5763 				HCLGE_NETWORK_PORT_ID_S, network_port_id);
5764 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5765 	}
5766 
5767 	return port_number;
5768 }
5769 
5770 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5771 				       __le32 *key_x, __le32 *key_y,
5772 				       struct hclge_fd_rule *rule)
5773 {
5774 	u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5775 	u8 cur_pos = 0, tuple_size, shift_bits;
5776 	unsigned int i;
5777 
5778 	for (i = 0; i < MAX_META_DATA; i++) {
5779 		tuple_size = meta_data_key_info[i].key_length;
5780 		tuple_bit = key_cfg->meta_data_active & BIT(i);
5781 
5782 		switch (tuple_bit) {
5783 		case BIT(ROCE_TYPE):
5784 			hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5785 			cur_pos += tuple_size;
5786 			break;
5787 		case BIT(DST_VPORT):
5788 			port_number = hclge_get_port_number(HOST_PORT, 0,
5789 							    rule->vf_id, 0);
5790 			hnae3_set_field(meta_data,
5791 					GENMASK(cur_pos + tuple_size, cur_pos),
5792 					cur_pos, port_number);
5793 			cur_pos += tuple_size;
5794 			break;
5795 		default:
5796 			break;
5797 		}
5798 	}
5799 
5800 	calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5801 	calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5802 	shift_bits = sizeof(meta_data) * 8 - cur_pos;
5803 
5804 	*key_x = cpu_to_le32(tmp_x << shift_bits);
5805 	*key_y = cpu_to_le32(tmp_y << shift_bits);
5806 }
5807 
5808 /* A complete key is combined with meta data key and tuple key.
5809  * Meta data key is stored at the MSB region, and tuple key is stored at
5810  * the LSB region, unused bits will be filled 0.
5811  */
5812 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5813 			    struct hclge_fd_rule *rule)
5814 {
5815 	struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5816 	u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5817 	u8 *cur_key_x, *cur_key_y;
5818 	u8 meta_data_region;
5819 	u8 tuple_size;
5820 	int ret;
5821 	u32 i;
5822 
5823 	memset(key_x, 0, sizeof(key_x));
5824 	memset(key_y, 0, sizeof(key_y));
5825 	cur_key_x = key_x;
5826 	cur_key_y = key_y;
5827 
5828 	for (i = 0 ; i < MAX_TUPLE; i++) {
5829 		bool tuple_valid;
5830 
5831 		tuple_size = tuple_key_info[i].key_length / 8;
5832 		if (!(key_cfg->tuple_active & BIT(i)))
5833 			continue;
5834 
5835 		tuple_valid = hclge_fd_convert_tuple(i, cur_key_x,
5836 						     cur_key_y, rule);
5837 		if (tuple_valid) {
5838 			cur_key_x += tuple_size;
5839 			cur_key_y += tuple_size;
5840 		}
5841 	}
5842 
5843 	meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5844 			MAX_META_DATA_LENGTH / 8;
5845 
5846 	hclge_fd_convert_meta_data(key_cfg,
5847 				   (__le32 *)(key_x + meta_data_region),
5848 				   (__le32 *)(key_y + meta_data_region),
5849 				   rule);
5850 
5851 	ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5852 				   true);
5853 	if (ret) {
5854 		dev_err(&hdev->pdev->dev,
5855 			"fd key_y config fail, loc=%u, ret=%d\n",
5856 			rule->queue_id, ret);
5857 		return ret;
5858 	}
5859 
5860 	ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5861 				   true);
5862 	if (ret)
5863 		dev_err(&hdev->pdev->dev,
5864 			"fd key_x config fail, loc=%u, ret=%d\n",
5865 			rule->queue_id, ret);
5866 	return ret;
5867 }
5868 
5869 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5870 			       struct hclge_fd_rule *rule)
5871 {
5872 	struct hclge_vport *vport = hdev->vport;
5873 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
5874 	struct hclge_fd_ad_data ad_data;
5875 
5876 	memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data));
5877 	ad_data.ad_id = rule->location;
5878 
5879 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5880 		ad_data.drop_packet = true;
5881 	} else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) {
5882 		ad_data.override_tc = true;
5883 		ad_data.queue_id =
5884 			kinfo->tc_info.tqp_offset[rule->cls_flower.tc];
5885 		ad_data.tc_size =
5886 			ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]);
5887 	} else {
5888 		ad_data.forward_to_direct_queue = true;
5889 		ad_data.queue_id = rule->queue_id;
5890 	}
5891 
5892 	ad_data.use_counter = false;
5893 	ad_data.counter_id = 0;
5894 
5895 	ad_data.use_next_stage = false;
5896 	ad_data.next_input_key = 0;
5897 
5898 	ad_data.write_rule_id_to_bd = true;
5899 	ad_data.rule_id = rule->location;
5900 
5901 	return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5902 }
5903 
5904 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
5905 				       u32 *unused_tuple)
5906 {
5907 	if (!spec || !unused_tuple)
5908 		return -EINVAL;
5909 
5910 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5911 
5912 	if (!spec->ip4src)
5913 		*unused_tuple |= BIT(INNER_SRC_IP);
5914 
5915 	if (!spec->ip4dst)
5916 		*unused_tuple |= BIT(INNER_DST_IP);
5917 
5918 	if (!spec->psrc)
5919 		*unused_tuple |= BIT(INNER_SRC_PORT);
5920 
5921 	if (!spec->pdst)
5922 		*unused_tuple |= BIT(INNER_DST_PORT);
5923 
5924 	if (!spec->tos)
5925 		*unused_tuple |= BIT(INNER_IP_TOS);
5926 
5927 	return 0;
5928 }
5929 
5930 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
5931 				    u32 *unused_tuple)
5932 {
5933 	if (!spec || !unused_tuple)
5934 		return -EINVAL;
5935 
5936 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5937 		BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5938 
5939 	if (!spec->ip4src)
5940 		*unused_tuple |= BIT(INNER_SRC_IP);
5941 
5942 	if (!spec->ip4dst)
5943 		*unused_tuple |= BIT(INNER_DST_IP);
5944 
5945 	if (!spec->tos)
5946 		*unused_tuple |= BIT(INNER_IP_TOS);
5947 
5948 	if (!spec->proto)
5949 		*unused_tuple |= BIT(INNER_IP_PROTO);
5950 
5951 	if (spec->l4_4_bytes)
5952 		return -EOPNOTSUPP;
5953 
5954 	if (spec->ip_ver != ETH_RX_NFC_IP4)
5955 		return -EOPNOTSUPP;
5956 
5957 	return 0;
5958 }
5959 
5960 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
5961 				       u32 *unused_tuple)
5962 {
5963 	if (!spec || !unused_tuple)
5964 		return -EINVAL;
5965 
5966 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5967 
5968 	/* check whether src/dst ip address used */
5969 	if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
5970 		*unused_tuple |= BIT(INNER_SRC_IP);
5971 
5972 	if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
5973 		*unused_tuple |= BIT(INNER_DST_IP);
5974 
5975 	if (!spec->psrc)
5976 		*unused_tuple |= BIT(INNER_SRC_PORT);
5977 
5978 	if (!spec->pdst)
5979 		*unused_tuple |= BIT(INNER_DST_PORT);
5980 
5981 	if (!spec->tclass)
5982 		*unused_tuple |= BIT(INNER_IP_TOS);
5983 
5984 	return 0;
5985 }
5986 
5987 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
5988 				    u32 *unused_tuple)
5989 {
5990 	if (!spec || !unused_tuple)
5991 		return -EINVAL;
5992 
5993 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5994 			BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5995 
5996 	/* check whether src/dst ip address used */
5997 	if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
5998 		*unused_tuple |= BIT(INNER_SRC_IP);
5999 
6000 	if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
6001 		*unused_tuple |= BIT(INNER_DST_IP);
6002 
6003 	if (!spec->l4_proto)
6004 		*unused_tuple |= BIT(INNER_IP_PROTO);
6005 
6006 	if (!spec->tclass)
6007 		*unused_tuple |= BIT(INNER_IP_TOS);
6008 
6009 	if (spec->l4_4_bytes)
6010 		return -EOPNOTSUPP;
6011 
6012 	return 0;
6013 }
6014 
6015 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
6016 {
6017 	if (!spec || !unused_tuple)
6018 		return -EINVAL;
6019 
6020 	*unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
6021 		BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
6022 		BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
6023 
6024 	if (is_zero_ether_addr(spec->h_source))
6025 		*unused_tuple |= BIT(INNER_SRC_MAC);
6026 
6027 	if (is_zero_ether_addr(spec->h_dest))
6028 		*unused_tuple |= BIT(INNER_DST_MAC);
6029 
6030 	if (!spec->h_proto)
6031 		*unused_tuple |= BIT(INNER_ETH_TYPE);
6032 
6033 	return 0;
6034 }
6035 
6036 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
6037 				    struct ethtool_rx_flow_spec *fs,
6038 				    u32 *unused_tuple)
6039 {
6040 	if (fs->flow_type & FLOW_EXT) {
6041 		if (fs->h_ext.vlan_etype) {
6042 			dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
6043 			return -EOPNOTSUPP;
6044 		}
6045 
6046 		if (!fs->h_ext.vlan_tci)
6047 			*unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6048 
6049 		if (fs->m_ext.vlan_tci &&
6050 		    be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
6051 			dev_err(&hdev->pdev->dev,
6052 				"failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n",
6053 				ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
6054 			return -EINVAL;
6055 		}
6056 	} else {
6057 		*unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6058 	}
6059 
6060 	if (fs->flow_type & FLOW_MAC_EXT) {
6061 		if (hdev->fd_cfg.fd_mode !=
6062 		    HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6063 			dev_err(&hdev->pdev->dev,
6064 				"FLOW_MAC_EXT is not supported in current fd mode!\n");
6065 			return -EOPNOTSUPP;
6066 		}
6067 
6068 		if (is_zero_ether_addr(fs->h_ext.h_dest))
6069 			*unused_tuple |= BIT(INNER_DST_MAC);
6070 		else
6071 			*unused_tuple &= ~BIT(INNER_DST_MAC);
6072 	}
6073 
6074 	return 0;
6075 }
6076 
6077 static int hclge_fd_get_user_def_layer(u32 flow_type, u32 *unused_tuple,
6078 				       struct hclge_fd_user_def_info *info)
6079 {
6080 	switch (flow_type) {
6081 	case ETHER_FLOW:
6082 		info->layer = HCLGE_FD_USER_DEF_L2;
6083 		*unused_tuple &= ~BIT(INNER_L2_RSV);
6084 		break;
6085 	case IP_USER_FLOW:
6086 	case IPV6_USER_FLOW:
6087 		info->layer = HCLGE_FD_USER_DEF_L3;
6088 		*unused_tuple &= ~BIT(INNER_L3_RSV);
6089 		break;
6090 	case TCP_V4_FLOW:
6091 	case UDP_V4_FLOW:
6092 	case TCP_V6_FLOW:
6093 	case UDP_V6_FLOW:
6094 		info->layer = HCLGE_FD_USER_DEF_L4;
6095 		*unused_tuple &= ~BIT(INNER_L4_RSV);
6096 		break;
6097 	default:
6098 		return -EOPNOTSUPP;
6099 	}
6100 
6101 	return 0;
6102 }
6103 
6104 static bool hclge_fd_is_user_def_all_masked(struct ethtool_rx_flow_spec *fs)
6105 {
6106 	return be32_to_cpu(fs->m_ext.data[1] | fs->m_ext.data[0]) == 0;
6107 }
6108 
6109 static int hclge_fd_parse_user_def_field(struct hclge_dev *hdev,
6110 					 struct ethtool_rx_flow_spec *fs,
6111 					 u32 *unused_tuple,
6112 					 struct hclge_fd_user_def_info *info)
6113 {
6114 	u32 tuple_active = hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1].tuple_active;
6115 	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6116 	u16 data, offset, data_mask, offset_mask;
6117 	int ret;
6118 
6119 	info->layer = HCLGE_FD_USER_DEF_NONE;
6120 	*unused_tuple |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
6121 
6122 	if (!(fs->flow_type & FLOW_EXT) || hclge_fd_is_user_def_all_masked(fs))
6123 		return 0;
6124 
6125 	/* user-def data from ethtool is 64 bit value, the bit0~15 is used
6126 	 * for data, and bit32~47 is used for offset.
6127 	 */
6128 	data = be32_to_cpu(fs->h_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6129 	data_mask = be32_to_cpu(fs->m_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6130 	offset = be32_to_cpu(fs->h_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6131 	offset_mask = be32_to_cpu(fs->m_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6132 
6133 	if (!(tuple_active & HCLGE_FD_TUPLE_USER_DEF_TUPLES)) {
6134 		dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
6135 		return -EOPNOTSUPP;
6136 	}
6137 
6138 	if (offset > HCLGE_FD_MAX_USER_DEF_OFFSET) {
6139 		dev_err(&hdev->pdev->dev,
6140 			"user-def offset[%u] should be no more than %u\n",
6141 			offset, HCLGE_FD_MAX_USER_DEF_OFFSET);
6142 		return -EINVAL;
6143 	}
6144 
6145 	if (offset_mask != HCLGE_FD_USER_DEF_OFFSET_UNMASK) {
6146 		dev_err(&hdev->pdev->dev, "user-def offset can't be masked\n");
6147 		return -EINVAL;
6148 	}
6149 
6150 	ret = hclge_fd_get_user_def_layer(flow_type, unused_tuple, info);
6151 	if (ret) {
6152 		dev_err(&hdev->pdev->dev,
6153 			"unsupported flow type for user-def bytes, ret = %d\n",
6154 			ret);
6155 		return ret;
6156 	}
6157 
6158 	info->data = data;
6159 	info->data_mask = data_mask;
6160 	info->offset = offset;
6161 
6162 	return 0;
6163 }
6164 
6165 static int hclge_fd_check_spec(struct hclge_dev *hdev,
6166 			       struct ethtool_rx_flow_spec *fs,
6167 			       u32 *unused_tuple,
6168 			       struct hclge_fd_user_def_info *info)
6169 {
6170 	u32 flow_type;
6171 	int ret;
6172 
6173 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6174 		dev_err(&hdev->pdev->dev,
6175 			"failed to config fd rules, invalid rule location: %u, max is %u\n.",
6176 			fs->location,
6177 			hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
6178 		return -EINVAL;
6179 	}
6180 
6181 	ret = hclge_fd_parse_user_def_field(hdev, fs, unused_tuple, info);
6182 	if (ret)
6183 		return ret;
6184 
6185 	flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6186 	switch (flow_type) {
6187 	case SCTP_V4_FLOW:
6188 	case TCP_V4_FLOW:
6189 	case UDP_V4_FLOW:
6190 		ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
6191 						  unused_tuple);
6192 		break;
6193 	case IP_USER_FLOW:
6194 		ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
6195 					       unused_tuple);
6196 		break;
6197 	case SCTP_V6_FLOW:
6198 	case TCP_V6_FLOW:
6199 	case UDP_V6_FLOW:
6200 		ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
6201 						  unused_tuple);
6202 		break;
6203 	case IPV6_USER_FLOW:
6204 		ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
6205 					       unused_tuple);
6206 		break;
6207 	case ETHER_FLOW:
6208 		if (hdev->fd_cfg.fd_mode !=
6209 			HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6210 			dev_err(&hdev->pdev->dev,
6211 				"ETHER_FLOW is not supported in current fd mode!\n");
6212 			return -EOPNOTSUPP;
6213 		}
6214 
6215 		ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
6216 						 unused_tuple);
6217 		break;
6218 	default:
6219 		dev_err(&hdev->pdev->dev,
6220 			"unsupported protocol type, protocol type = %#x\n",
6221 			flow_type);
6222 		return -EOPNOTSUPP;
6223 	}
6224 
6225 	if (ret) {
6226 		dev_err(&hdev->pdev->dev,
6227 			"failed to check flow union tuple, ret = %d\n",
6228 			ret);
6229 		return ret;
6230 	}
6231 
6232 	return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
6233 }
6234 
6235 static void hclge_fd_get_tcpip4_tuple(struct hclge_dev *hdev,
6236 				      struct ethtool_rx_flow_spec *fs,
6237 				      struct hclge_fd_rule *rule, u8 ip_proto)
6238 {
6239 	rule->tuples.src_ip[IPV4_INDEX] =
6240 			be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
6241 	rule->tuples_mask.src_ip[IPV4_INDEX] =
6242 			be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
6243 
6244 	rule->tuples.dst_ip[IPV4_INDEX] =
6245 			be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
6246 	rule->tuples_mask.dst_ip[IPV4_INDEX] =
6247 			be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
6248 
6249 	rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
6250 	rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
6251 
6252 	rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
6253 	rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
6254 
6255 	rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
6256 	rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
6257 
6258 	rule->tuples.ether_proto = ETH_P_IP;
6259 	rule->tuples_mask.ether_proto = 0xFFFF;
6260 
6261 	rule->tuples.ip_proto = ip_proto;
6262 	rule->tuples_mask.ip_proto = 0xFF;
6263 }
6264 
6265 static void hclge_fd_get_ip4_tuple(struct hclge_dev *hdev,
6266 				   struct ethtool_rx_flow_spec *fs,
6267 				   struct hclge_fd_rule *rule)
6268 {
6269 	rule->tuples.src_ip[IPV4_INDEX] =
6270 			be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
6271 	rule->tuples_mask.src_ip[IPV4_INDEX] =
6272 			be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
6273 
6274 	rule->tuples.dst_ip[IPV4_INDEX] =
6275 			be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
6276 	rule->tuples_mask.dst_ip[IPV4_INDEX] =
6277 			be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
6278 
6279 	rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
6280 	rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
6281 
6282 	rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
6283 	rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
6284 
6285 	rule->tuples.ether_proto = ETH_P_IP;
6286 	rule->tuples_mask.ether_proto = 0xFFFF;
6287 }
6288 
6289 static void hclge_fd_get_tcpip6_tuple(struct hclge_dev *hdev,
6290 				      struct ethtool_rx_flow_spec *fs,
6291 				      struct hclge_fd_rule *rule, u8 ip_proto)
6292 {
6293 	be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.tcp_ip6_spec.ip6src,
6294 			  IPV6_SIZE);
6295 	be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.tcp_ip6_spec.ip6src,
6296 			  IPV6_SIZE);
6297 
6298 	be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.tcp_ip6_spec.ip6dst,
6299 			  IPV6_SIZE);
6300 	be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.tcp_ip6_spec.ip6dst,
6301 			  IPV6_SIZE);
6302 
6303 	rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
6304 	rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
6305 
6306 	rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
6307 	rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
6308 
6309 	rule->tuples.ether_proto = ETH_P_IPV6;
6310 	rule->tuples_mask.ether_proto = 0xFFFF;
6311 
6312 	rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6313 	rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6314 
6315 	rule->tuples.ip_proto = ip_proto;
6316 	rule->tuples_mask.ip_proto = 0xFF;
6317 }
6318 
6319 static void hclge_fd_get_ip6_tuple(struct hclge_dev *hdev,
6320 				   struct ethtool_rx_flow_spec *fs,
6321 				   struct hclge_fd_rule *rule)
6322 {
6323 	be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.usr_ip6_spec.ip6src,
6324 			  IPV6_SIZE);
6325 	be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.usr_ip6_spec.ip6src,
6326 			  IPV6_SIZE);
6327 
6328 	be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.usr_ip6_spec.ip6dst,
6329 			  IPV6_SIZE);
6330 	be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.usr_ip6_spec.ip6dst,
6331 			  IPV6_SIZE);
6332 
6333 	rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
6334 	rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
6335 
6336 	rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6337 	rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6338 
6339 	rule->tuples.ether_proto = ETH_P_IPV6;
6340 	rule->tuples_mask.ether_proto = 0xFFFF;
6341 }
6342 
6343 static void hclge_fd_get_ether_tuple(struct hclge_dev *hdev,
6344 				     struct ethtool_rx_flow_spec *fs,
6345 				     struct hclge_fd_rule *rule)
6346 {
6347 	ether_addr_copy(rule->tuples.src_mac, fs->h_u.ether_spec.h_source);
6348 	ether_addr_copy(rule->tuples_mask.src_mac, fs->m_u.ether_spec.h_source);
6349 
6350 	ether_addr_copy(rule->tuples.dst_mac, fs->h_u.ether_spec.h_dest);
6351 	ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_u.ether_spec.h_dest);
6352 
6353 	rule->tuples.ether_proto = be16_to_cpu(fs->h_u.ether_spec.h_proto);
6354 	rule->tuples_mask.ether_proto = be16_to_cpu(fs->m_u.ether_spec.h_proto);
6355 }
6356 
6357 static void hclge_fd_get_user_def_tuple(struct hclge_fd_user_def_info *info,
6358 					struct hclge_fd_rule *rule)
6359 {
6360 	switch (info->layer) {
6361 	case HCLGE_FD_USER_DEF_L2:
6362 		rule->tuples.l2_user_def = info->data;
6363 		rule->tuples_mask.l2_user_def = info->data_mask;
6364 		break;
6365 	case HCLGE_FD_USER_DEF_L3:
6366 		rule->tuples.l3_user_def = info->data;
6367 		rule->tuples_mask.l3_user_def = info->data_mask;
6368 		break;
6369 	case HCLGE_FD_USER_DEF_L4:
6370 		rule->tuples.l4_user_def = (u32)info->data << 16;
6371 		rule->tuples_mask.l4_user_def = (u32)info->data_mask << 16;
6372 		break;
6373 	default:
6374 		break;
6375 	}
6376 
6377 	rule->ep.user_def = *info;
6378 }
6379 
6380 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
6381 			      struct ethtool_rx_flow_spec *fs,
6382 			      struct hclge_fd_rule *rule,
6383 			      struct hclge_fd_user_def_info *info)
6384 {
6385 	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6386 
6387 	switch (flow_type) {
6388 	case SCTP_V4_FLOW:
6389 		hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_SCTP);
6390 		break;
6391 	case TCP_V4_FLOW:
6392 		hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_TCP);
6393 		break;
6394 	case UDP_V4_FLOW:
6395 		hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_UDP);
6396 		break;
6397 	case IP_USER_FLOW:
6398 		hclge_fd_get_ip4_tuple(hdev, fs, rule);
6399 		break;
6400 	case SCTP_V6_FLOW:
6401 		hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_SCTP);
6402 		break;
6403 	case TCP_V6_FLOW:
6404 		hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_TCP);
6405 		break;
6406 	case UDP_V6_FLOW:
6407 		hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_UDP);
6408 		break;
6409 	case IPV6_USER_FLOW:
6410 		hclge_fd_get_ip6_tuple(hdev, fs, rule);
6411 		break;
6412 	case ETHER_FLOW:
6413 		hclge_fd_get_ether_tuple(hdev, fs, rule);
6414 		break;
6415 	default:
6416 		return -EOPNOTSUPP;
6417 	}
6418 
6419 	if (fs->flow_type & FLOW_EXT) {
6420 		rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
6421 		rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
6422 		hclge_fd_get_user_def_tuple(info, rule);
6423 	}
6424 
6425 	if (fs->flow_type & FLOW_MAC_EXT) {
6426 		ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
6427 		ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
6428 	}
6429 
6430 	return 0;
6431 }
6432 
6433 static int hclge_fd_config_rule(struct hclge_dev *hdev,
6434 				struct hclge_fd_rule *rule)
6435 {
6436 	int ret;
6437 
6438 	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6439 	if (ret)
6440 		return ret;
6441 
6442 	return hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6443 }
6444 
6445 static int hclge_add_fd_entry_common(struct hclge_dev *hdev,
6446 				     struct hclge_fd_rule *rule)
6447 {
6448 	int ret;
6449 
6450 	spin_lock_bh(&hdev->fd_rule_lock);
6451 
6452 	if (hdev->fd_active_type != rule->rule_type &&
6453 	    (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6454 	     hdev->fd_active_type == HCLGE_FD_EP_ACTIVE)) {
6455 		dev_err(&hdev->pdev->dev,
6456 			"mode conflict(new type %d, active type %d), please delete existent rules first\n",
6457 			rule->rule_type, hdev->fd_active_type);
6458 		spin_unlock_bh(&hdev->fd_rule_lock);
6459 		return -EINVAL;
6460 	}
6461 
6462 	ret = hclge_fd_check_user_def_refcnt(hdev, rule);
6463 	if (ret)
6464 		goto out;
6465 
6466 	ret = hclge_clear_arfs_rules(hdev);
6467 	if (ret)
6468 		goto out;
6469 
6470 	ret = hclge_fd_config_rule(hdev, rule);
6471 	if (ret)
6472 		goto out;
6473 
6474 	rule->state = HCLGE_FD_ACTIVE;
6475 	hdev->fd_active_type = rule->rule_type;
6476 	hclge_update_fd_list(hdev, rule->state, rule->location, rule);
6477 
6478 out:
6479 	spin_unlock_bh(&hdev->fd_rule_lock);
6480 	return ret;
6481 }
6482 
6483 static bool hclge_is_cls_flower_active(struct hnae3_handle *handle)
6484 {
6485 	struct hclge_vport *vport = hclge_get_vport(handle);
6486 	struct hclge_dev *hdev = vport->back;
6487 
6488 	return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE;
6489 }
6490 
6491 static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
6492 				      u16 *vport_id, u8 *action, u16 *queue_id)
6493 {
6494 	struct hclge_vport *vport = hdev->vport;
6495 
6496 	if (ring_cookie == RX_CLS_FLOW_DISC) {
6497 		*action = HCLGE_FD_ACTION_DROP_PACKET;
6498 	} else {
6499 		u32 ring = ethtool_get_flow_spec_ring(ring_cookie);
6500 		u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
6501 		u16 tqps;
6502 
6503 		if (vf > hdev->num_req_vfs) {
6504 			dev_err(&hdev->pdev->dev,
6505 				"Error: vf id (%u) > max vf num (%u)\n",
6506 				vf, hdev->num_req_vfs);
6507 			return -EINVAL;
6508 		}
6509 
6510 		*vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
6511 		tqps = hdev->vport[vf].nic.kinfo.num_tqps;
6512 
6513 		if (ring >= tqps) {
6514 			dev_err(&hdev->pdev->dev,
6515 				"Error: queue id (%u) > max tqp num (%u)\n",
6516 				ring, tqps - 1);
6517 			return -EINVAL;
6518 		}
6519 
6520 		*action = HCLGE_FD_ACTION_SELECT_QUEUE;
6521 		*queue_id = ring;
6522 	}
6523 
6524 	return 0;
6525 }
6526 
6527 static int hclge_add_fd_entry(struct hnae3_handle *handle,
6528 			      struct ethtool_rxnfc *cmd)
6529 {
6530 	struct hclge_vport *vport = hclge_get_vport(handle);
6531 	struct hclge_dev *hdev = vport->back;
6532 	struct hclge_fd_user_def_info info;
6533 	u16 dst_vport_id = 0, q_index = 0;
6534 	struct ethtool_rx_flow_spec *fs;
6535 	struct hclge_fd_rule *rule;
6536 	u32 unused = 0;
6537 	u8 action;
6538 	int ret;
6539 
6540 	if (!hnae3_dev_fd_supported(hdev)) {
6541 		dev_err(&hdev->pdev->dev,
6542 			"flow table director is not supported\n");
6543 		return -EOPNOTSUPP;
6544 	}
6545 
6546 	if (!hdev->fd_en) {
6547 		dev_err(&hdev->pdev->dev,
6548 			"please enable flow director first\n");
6549 		return -EOPNOTSUPP;
6550 	}
6551 
6552 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6553 
6554 	ret = hclge_fd_check_spec(hdev, fs, &unused, &info);
6555 	if (ret)
6556 		return ret;
6557 
6558 	ret = hclge_fd_parse_ring_cookie(hdev, fs->ring_cookie, &dst_vport_id,
6559 					 &action, &q_index);
6560 	if (ret)
6561 		return ret;
6562 
6563 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6564 	if (!rule)
6565 		return -ENOMEM;
6566 
6567 	ret = hclge_fd_get_tuple(hdev, fs, rule, &info);
6568 	if (ret) {
6569 		kfree(rule);
6570 		return ret;
6571 	}
6572 
6573 	rule->flow_type = fs->flow_type;
6574 	rule->location = fs->location;
6575 	rule->unused_tuple = unused;
6576 	rule->vf_id = dst_vport_id;
6577 	rule->queue_id = q_index;
6578 	rule->action = action;
6579 	rule->rule_type = HCLGE_FD_EP_ACTIVE;
6580 
6581 	ret = hclge_add_fd_entry_common(hdev, rule);
6582 	if (ret)
6583 		kfree(rule);
6584 
6585 	return ret;
6586 }
6587 
6588 static int hclge_del_fd_entry(struct hnae3_handle *handle,
6589 			      struct ethtool_rxnfc *cmd)
6590 {
6591 	struct hclge_vport *vport = hclge_get_vport(handle);
6592 	struct hclge_dev *hdev = vport->back;
6593 	struct ethtool_rx_flow_spec *fs;
6594 	int ret;
6595 
6596 	if (!hnae3_dev_fd_supported(hdev))
6597 		return -EOPNOTSUPP;
6598 
6599 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6600 
6601 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6602 		return -EINVAL;
6603 
6604 	spin_lock_bh(&hdev->fd_rule_lock);
6605 	if (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6606 	    !test_bit(fs->location, hdev->fd_bmap)) {
6607 		dev_err(&hdev->pdev->dev,
6608 			"Delete fail, rule %u is inexistent\n", fs->location);
6609 		spin_unlock_bh(&hdev->fd_rule_lock);
6610 		return -ENOENT;
6611 	}
6612 
6613 	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
6614 				   NULL, false);
6615 	if (ret)
6616 		goto out;
6617 
6618 	hclge_update_fd_list(hdev, HCLGE_FD_DELETED, fs->location, NULL);
6619 
6620 out:
6621 	spin_unlock_bh(&hdev->fd_rule_lock);
6622 	return ret;
6623 }
6624 
6625 static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev,
6626 					 bool clear_list)
6627 {
6628 	struct hclge_fd_rule *rule;
6629 	struct hlist_node *node;
6630 	u16 location;
6631 
6632 	if (!hnae3_dev_fd_supported(hdev))
6633 		return;
6634 
6635 	spin_lock_bh(&hdev->fd_rule_lock);
6636 
6637 	for_each_set_bit(location, hdev->fd_bmap,
6638 			 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6639 		hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6640 				     NULL, false);
6641 
6642 	if (clear_list) {
6643 		hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6644 					  rule_node) {
6645 			hlist_del(&rule->rule_node);
6646 			kfree(rule);
6647 		}
6648 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6649 		hdev->hclge_fd_rule_num = 0;
6650 		bitmap_zero(hdev->fd_bmap,
6651 			    hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6652 	}
6653 
6654 	spin_unlock_bh(&hdev->fd_rule_lock);
6655 }
6656 
6657 static void hclge_del_all_fd_entries(struct hclge_dev *hdev)
6658 {
6659 	hclge_clear_fd_rules_in_list(hdev, true);
6660 	hclge_fd_disable_user_def(hdev);
6661 }
6662 
6663 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6664 {
6665 	struct hclge_vport *vport = hclge_get_vport(handle);
6666 	struct hclge_dev *hdev = vport->back;
6667 	struct hclge_fd_rule *rule;
6668 	struct hlist_node *node;
6669 
6670 	/* Return ok here, because reset error handling will check this
6671 	 * return value. If error is returned here, the reset process will
6672 	 * fail.
6673 	 */
6674 	if (!hnae3_dev_fd_supported(hdev))
6675 		return 0;
6676 
6677 	/* if fd is disabled, should not restore it when reset */
6678 	if (!hdev->fd_en)
6679 		return 0;
6680 
6681 	spin_lock_bh(&hdev->fd_rule_lock);
6682 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6683 		if (rule->state == HCLGE_FD_ACTIVE)
6684 			rule->state = HCLGE_FD_TO_ADD;
6685 	}
6686 	spin_unlock_bh(&hdev->fd_rule_lock);
6687 	set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
6688 
6689 	return 0;
6690 }
6691 
6692 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6693 				 struct ethtool_rxnfc *cmd)
6694 {
6695 	struct hclge_vport *vport = hclge_get_vport(handle);
6696 	struct hclge_dev *hdev = vport->back;
6697 
6698 	if (!hnae3_dev_fd_supported(hdev) || hclge_is_cls_flower_active(handle))
6699 		return -EOPNOTSUPP;
6700 
6701 	cmd->rule_cnt = hdev->hclge_fd_rule_num;
6702 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6703 
6704 	return 0;
6705 }
6706 
6707 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6708 				     struct ethtool_tcpip4_spec *spec,
6709 				     struct ethtool_tcpip4_spec *spec_mask)
6710 {
6711 	spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6712 	spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6713 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6714 
6715 	spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6716 	spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6717 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6718 
6719 	spec->psrc = cpu_to_be16(rule->tuples.src_port);
6720 	spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6721 			0 : cpu_to_be16(rule->tuples_mask.src_port);
6722 
6723 	spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6724 	spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6725 			0 : cpu_to_be16(rule->tuples_mask.dst_port);
6726 
6727 	spec->tos = rule->tuples.ip_tos;
6728 	spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6729 			0 : rule->tuples_mask.ip_tos;
6730 }
6731 
6732 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6733 				  struct ethtool_usrip4_spec *spec,
6734 				  struct ethtool_usrip4_spec *spec_mask)
6735 {
6736 	spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6737 	spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6738 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6739 
6740 	spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6741 	spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6742 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6743 
6744 	spec->tos = rule->tuples.ip_tos;
6745 	spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6746 			0 : rule->tuples_mask.ip_tos;
6747 
6748 	spec->proto = rule->tuples.ip_proto;
6749 	spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6750 			0 : rule->tuples_mask.ip_proto;
6751 
6752 	spec->ip_ver = ETH_RX_NFC_IP4;
6753 }
6754 
6755 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6756 				     struct ethtool_tcpip6_spec *spec,
6757 				     struct ethtool_tcpip6_spec *spec_mask)
6758 {
6759 	cpu_to_be32_array(spec->ip6src,
6760 			  rule->tuples.src_ip, IPV6_SIZE);
6761 	cpu_to_be32_array(spec->ip6dst,
6762 			  rule->tuples.dst_ip, IPV6_SIZE);
6763 	if (rule->unused_tuple & BIT(INNER_SRC_IP))
6764 		memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6765 	else
6766 		cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6767 				  IPV6_SIZE);
6768 
6769 	if (rule->unused_tuple & BIT(INNER_DST_IP))
6770 		memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6771 	else
6772 		cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6773 				  IPV6_SIZE);
6774 
6775 	spec->tclass = rule->tuples.ip_tos;
6776 	spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6777 			0 : rule->tuples_mask.ip_tos;
6778 
6779 	spec->psrc = cpu_to_be16(rule->tuples.src_port);
6780 	spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6781 			0 : cpu_to_be16(rule->tuples_mask.src_port);
6782 
6783 	spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6784 	spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6785 			0 : cpu_to_be16(rule->tuples_mask.dst_port);
6786 }
6787 
6788 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6789 				  struct ethtool_usrip6_spec *spec,
6790 				  struct ethtool_usrip6_spec *spec_mask)
6791 {
6792 	cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6793 	cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6794 	if (rule->unused_tuple & BIT(INNER_SRC_IP))
6795 		memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6796 	else
6797 		cpu_to_be32_array(spec_mask->ip6src,
6798 				  rule->tuples_mask.src_ip, IPV6_SIZE);
6799 
6800 	if (rule->unused_tuple & BIT(INNER_DST_IP))
6801 		memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6802 	else
6803 		cpu_to_be32_array(spec_mask->ip6dst,
6804 				  rule->tuples_mask.dst_ip, IPV6_SIZE);
6805 
6806 	spec->tclass = rule->tuples.ip_tos;
6807 	spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6808 			0 : rule->tuples_mask.ip_tos;
6809 
6810 	spec->l4_proto = rule->tuples.ip_proto;
6811 	spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6812 			0 : rule->tuples_mask.ip_proto;
6813 }
6814 
6815 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6816 				    struct ethhdr *spec,
6817 				    struct ethhdr *spec_mask)
6818 {
6819 	ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6820 	ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6821 
6822 	if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6823 		eth_zero_addr(spec_mask->h_source);
6824 	else
6825 		ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6826 
6827 	if (rule->unused_tuple & BIT(INNER_DST_MAC))
6828 		eth_zero_addr(spec_mask->h_dest);
6829 	else
6830 		ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6831 
6832 	spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6833 	spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6834 			0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6835 }
6836 
6837 static void hclge_fd_get_user_def_info(struct ethtool_rx_flow_spec *fs,
6838 				       struct hclge_fd_rule *rule)
6839 {
6840 	if ((rule->unused_tuple & HCLGE_FD_TUPLE_USER_DEF_TUPLES) ==
6841 	    HCLGE_FD_TUPLE_USER_DEF_TUPLES) {
6842 		fs->h_ext.data[0] = 0;
6843 		fs->h_ext.data[1] = 0;
6844 		fs->m_ext.data[0] = 0;
6845 		fs->m_ext.data[1] = 0;
6846 	} else {
6847 		fs->h_ext.data[0] = cpu_to_be32(rule->ep.user_def.offset);
6848 		fs->h_ext.data[1] = cpu_to_be32(rule->ep.user_def.data);
6849 		fs->m_ext.data[0] =
6850 				cpu_to_be32(HCLGE_FD_USER_DEF_OFFSET_UNMASK);
6851 		fs->m_ext.data[1] = cpu_to_be32(rule->ep.user_def.data_mask);
6852 	}
6853 }
6854 
6855 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6856 				  struct hclge_fd_rule *rule)
6857 {
6858 	if (fs->flow_type & FLOW_EXT) {
6859 		fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6860 		fs->m_ext.vlan_tci =
6861 				rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6862 				0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
6863 
6864 		hclge_fd_get_user_def_info(fs, rule);
6865 	}
6866 
6867 	if (fs->flow_type & FLOW_MAC_EXT) {
6868 		ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6869 		if (rule->unused_tuple & BIT(INNER_DST_MAC))
6870 			eth_zero_addr(fs->m_u.ether_spec.h_dest);
6871 		else
6872 			ether_addr_copy(fs->m_u.ether_spec.h_dest,
6873 					rule->tuples_mask.dst_mac);
6874 	}
6875 }
6876 
6877 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
6878 				  struct ethtool_rxnfc *cmd)
6879 {
6880 	struct hclge_vport *vport = hclge_get_vport(handle);
6881 	struct hclge_fd_rule *rule = NULL;
6882 	struct hclge_dev *hdev = vport->back;
6883 	struct ethtool_rx_flow_spec *fs;
6884 	struct hlist_node *node2;
6885 
6886 	if (!hnae3_dev_fd_supported(hdev))
6887 		return -EOPNOTSUPP;
6888 
6889 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6890 
6891 	spin_lock_bh(&hdev->fd_rule_lock);
6892 
6893 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
6894 		if (rule->location >= fs->location)
6895 			break;
6896 	}
6897 
6898 	if (!rule || fs->location != rule->location) {
6899 		spin_unlock_bh(&hdev->fd_rule_lock);
6900 
6901 		return -ENOENT;
6902 	}
6903 
6904 	fs->flow_type = rule->flow_type;
6905 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
6906 	case SCTP_V4_FLOW:
6907 	case TCP_V4_FLOW:
6908 	case UDP_V4_FLOW:
6909 		hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
6910 					 &fs->m_u.tcp_ip4_spec);
6911 		break;
6912 	case IP_USER_FLOW:
6913 		hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
6914 				      &fs->m_u.usr_ip4_spec);
6915 		break;
6916 	case SCTP_V6_FLOW:
6917 	case TCP_V6_FLOW:
6918 	case UDP_V6_FLOW:
6919 		hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
6920 					 &fs->m_u.tcp_ip6_spec);
6921 		break;
6922 	case IPV6_USER_FLOW:
6923 		hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
6924 				      &fs->m_u.usr_ip6_spec);
6925 		break;
6926 	/* The flow type of fd rule has been checked before adding in to rule
6927 	 * list. As other flow types have been handled, it must be ETHER_FLOW
6928 	 * for the default case
6929 	 */
6930 	default:
6931 		hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
6932 					&fs->m_u.ether_spec);
6933 		break;
6934 	}
6935 
6936 	hclge_fd_get_ext_info(fs, rule);
6937 
6938 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6939 		fs->ring_cookie = RX_CLS_FLOW_DISC;
6940 	} else {
6941 		u64 vf_id;
6942 
6943 		fs->ring_cookie = rule->queue_id;
6944 		vf_id = rule->vf_id;
6945 		vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6946 		fs->ring_cookie |= vf_id;
6947 	}
6948 
6949 	spin_unlock_bh(&hdev->fd_rule_lock);
6950 
6951 	return 0;
6952 }
6953 
6954 static int hclge_get_all_rules(struct hnae3_handle *handle,
6955 			       struct ethtool_rxnfc *cmd, u32 *rule_locs)
6956 {
6957 	struct hclge_vport *vport = hclge_get_vport(handle);
6958 	struct hclge_dev *hdev = vport->back;
6959 	struct hclge_fd_rule *rule;
6960 	struct hlist_node *node2;
6961 	int cnt = 0;
6962 
6963 	if (!hnae3_dev_fd_supported(hdev))
6964 		return -EOPNOTSUPP;
6965 
6966 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6967 
6968 	spin_lock_bh(&hdev->fd_rule_lock);
6969 	hlist_for_each_entry_safe(rule, node2,
6970 				  &hdev->fd_rule_list, rule_node) {
6971 		if (cnt == cmd->rule_cnt) {
6972 			spin_unlock_bh(&hdev->fd_rule_lock);
6973 			return -EMSGSIZE;
6974 		}
6975 
6976 		if (rule->state == HCLGE_FD_TO_DEL)
6977 			continue;
6978 
6979 		rule_locs[cnt] = rule->location;
6980 		cnt++;
6981 	}
6982 
6983 	spin_unlock_bh(&hdev->fd_rule_lock);
6984 
6985 	cmd->rule_cnt = cnt;
6986 
6987 	return 0;
6988 }
6989 
6990 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6991 				     struct hclge_fd_rule_tuples *tuples)
6992 {
6993 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
6994 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
6995 
6996 	tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6997 	tuples->ip_proto = fkeys->basic.ip_proto;
6998 	tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6999 
7000 	if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
7001 		tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
7002 		tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
7003 	} else {
7004 		int i;
7005 
7006 		for (i = 0; i < IPV6_SIZE; i++) {
7007 			tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
7008 			tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
7009 		}
7010 	}
7011 }
7012 
7013 /* traverse all rules, check whether an existed rule has the same tuples */
7014 static struct hclge_fd_rule *
7015 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
7016 			  const struct hclge_fd_rule_tuples *tuples)
7017 {
7018 	struct hclge_fd_rule *rule = NULL;
7019 	struct hlist_node *node;
7020 
7021 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7022 		if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
7023 			return rule;
7024 	}
7025 
7026 	return NULL;
7027 }
7028 
7029 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
7030 				     struct hclge_fd_rule *rule)
7031 {
7032 	rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
7033 			     BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
7034 			     BIT(INNER_SRC_PORT);
7035 	rule->action = 0;
7036 	rule->vf_id = 0;
7037 	rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
7038 	rule->state = HCLGE_FD_TO_ADD;
7039 	if (tuples->ether_proto == ETH_P_IP) {
7040 		if (tuples->ip_proto == IPPROTO_TCP)
7041 			rule->flow_type = TCP_V4_FLOW;
7042 		else
7043 			rule->flow_type = UDP_V4_FLOW;
7044 	} else {
7045 		if (tuples->ip_proto == IPPROTO_TCP)
7046 			rule->flow_type = TCP_V6_FLOW;
7047 		else
7048 			rule->flow_type = UDP_V6_FLOW;
7049 	}
7050 	memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
7051 	memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
7052 }
7053 
7054 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
7055 				      u16 flow_id, struct flow_keys *fkeys)
7056 {
7057 	struct hclge_vport *vport = hclge_get_vport(handle);
7058 	struct hclge_fd_rule_tuples new_tuples = {};
7059 	struct hclge_dev *hdev = vport->back;
7060 	struct hclge_fd_rule *rule;
7061 	u16 bit_id;
7062 
7063 	if (!hnae3_dev_fd_supported(hdev))
7064 		return -EOPNOTSUPP;
7065 
7066 	/* when there is already fd rule existed add by user,
7067 	 * arfs should not work
7068 	 */
7069 	spin_lock_bh(&hdev->fd_rule_lock);
7070 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE &&
7071 	    hdev->fd_active_type != HCLGE_FD_RULE_NONE) {
7072 		spin_unlock_bh(&hdev->fd_rule_lock);
7073 		return -EOPNOTSUPP;
7074 	}
7075 
7076 	hclge_fd_get_flow_tuples(fkeys, &new_tuples);
7077 
7078 	/* check is there flow director filter existed for this flow,
7079 	 * if not, create a new filter for it;
7080 	 * if filter exist with different queue id, modify the filter;
7081 	 * if filter exist with same queue id, do nothing
7082 	 */
7083 	rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
7084 	if (!rule) {
7085 		bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
7086 		if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7087 			spin_unlock_bh(&hdev->fd_rule_lock);
7088 			return -ENOSPC;
7089 		}
7090 
7091 		rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
7092 		if (!rule) {
7093 			spin_unlock_bh(&hdev->fd_rule_lock);
7094 			return -ENOMEM;
7095 		}
7096 
7097 		rule->location = bit_id;
7098 		rule->arfs.flow_id = flow_id;
7099 		rule->queue_id = queue_id;
7100 		hclge_fd_build_arfs_rule(&new_tuples, rule);
7101 		hclge_update_fd_list(hdev, rule->state, rule->location, rule);
7102 		hdev->fd_active_type = HCLGE_FD_ARFS_ACTIVE;
7103 	} else if (rule->queue_id != queue_id) {
7104 		rule->queue_id = queue_id;
7105 		rule->state = HCLGE_FD_TO_ADD;
7106 		set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7107 		hclge_task_schedule(hdev, 0);
7108 	}
7109 	spin_unlock_bh(&hdev->fd_rule_lock);
7110 	return rule->location;
7111 }
7112 
7113 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
7114 {
7115 #ifdef CONFIG_RFS_ACCEL
7116 	struct hnae3_handle *handle = &hdev->vport[0].nic;
7117 	struct hclge_fd_rule *rule;
7118 	struct hlist_node *node;
7119 
7120 	spin_lock_bh(&hdev->fd_rule_lock);
7121 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
7122 		spin_unlock_bh(&hdev->fd_rule_lock);
7123 		return;
7124 	}
7125 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7126 		if (rule->state != HCLGE_FD_ACTIVE)
7127 			continue;
7128 		if (rps_may_expire_flow(handle->netdev, rule->queue_id,
7129 					rule->arfs.flow_id, rule->location)) {
7130 			rule->state = HCLGE_FD_TO_DEL;
7131 			set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7132 		}
7133 	}
7134 	spin_unlock_bh(&hdev->fd_rule_lock);
7135 #endif
7136 }
7137 
7138 /* make sure being called after lock up with fd_rule_lock */
7139 static int hclge_clear_arfs_rules(struct hclge_dev *hdev)
7140 {
7141 #ifdef CONFIG_RFS_ACCEL
7142 	struct hclge_fd_rule *rule;
7143 	struct hlist_node *node;
7144 	int ret;
7145 
7146 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE)
7147 		return 0;
7148 
7149 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7150 		switch (rule->state) {
7151 		case HCLGE_FD_TO_DEL:
7152 		case HCLGE_FD_ACTIVE:
7153 			ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7154 						   rule->location, NULL, false);
7155 			if (ret)
7156 				return ret;
7157 			fallthrough;
7158 		case HCLGE_FD_TO_ADD:
7159 			hclge_fd_dec_rule_cnt(hdev, rule->location);
7160 			hlist_del(&rule->rule_node);
7161 			kfree(rule);
7162 			break;
7163 		default:
7164 			break;
7165 		}
7166 	}
7167 	hclge_sync_fd_state(hdev);
7168 
7169 #endif
7170 	return 0;
7171 }
7172 
7173 static void hclge_get_cls_key_basic(const struct flow_rule *flow,
7174 				    struct hclge_fd_rule *rule)
7175 {
7176 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) {
7177 		struct flow_match_basic match;
7178 		u16 ethtype_key, ethtype_mask;
7179 
7180 		flow_rule_match_basic(flow, &match);
7181 		ethtype_key = ntohs(match.key->n_proto);
7182 		ethtype_mask = ntohs(match.mask->n_proto);
7183 
7184 		if (ethtype_key == ETH_P_ALL) {
7185 			ethtype_key = 0;
7186 			ethtype_mask = 0;
7187 		}
7188 		rule->tuples.ether_proto = ethtype_key;
7189 		rule->tuples_mask.ether_proto = ethtype_mask;
7190 		rule->tuples.ip_proto = match.key->ip_proto;
7191 		rule->tuples_mask.ip_proto = match.mask->ip_proto;
7192 	} else {
7193 		rule->unused_tuple |= BIT(INNER_IP_PROTO);
7194 		rule->unused_tuple |= BIT(INNER_ETH_TYPE);
7195 	}
7196 }
7197 
7198 static void hclge_get_cls_key_mac(const struct flow_rule *flow,
7199 				  struct hclge_fd_rule *rule)
7200 {
7201 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
7202 		struct flow_match_eth_addrs match;
7203 
7204 		flow_rule_match_eth_addrs(flow, &match);
7205 		ether_addr_copy(rule->tuples.dst_mac, match.key->dst);
7206 		ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst);
7207 		ether_addr_copy(rule->tuples.src_mac, match.key->src);
7208 		ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src);
7209 	} else {
7210 		rule->unused_tuple |= BIT(INNER_DST_MAC);
7211 		rule->unused_tuple |= BIT(INNER_SRC_MAC);
7212 	}
7213 }
7214 
7215 static void hclge_get_cls_key_vlan(const struct flow_rule *flow,
7216 				   struct hclge_fd_rule *rule)
7217 {
7218 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
7219 		struct flow_match_vlan match;
7220 
7221 		flow_rule_match_vlan(flow, &match);
7222 		rule->tuples.vlan_tag1 = match.key->vlan_id |
7223 				(match.key->vlan_priority << VLAN_PRIO_SHIFT);
7224 		rule->tuples_mask.vlan_tag1 = match.mask->vlan_id |
7225 				(match.mask->vlan_priority << VLAN_PRIO_SHIFT);
7226 	} else {
7227 		rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST);
7228 	}
7229 }
7230 
7231 static void hclge_get_cls_key_ip(const struct flow_rule *flow,
7232 				 struct hclge_fd_rule *rule)
7233 {
7234 	u16 addr_type = 0;
7235 
7236 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) {
7237 		struct flow_match_control match;
7238 
7239 		flow_rule_match_control(flow, &match);
7240 		addr_type = match.key->addr_type;
7241 	}
7242 
7243 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
7244 		struct flow_match_ipv4_addrs match;
7245 
7246 		flow_rule_match_ipv4_addrs(flow, &match);
7247 		rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src);
7248 		rule->tuples_mask.src_ip[IPV4_INDEX] =
7249 						be32_to_cpu(match.mask->src);
7250 		rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst);
7251 		rule->tuples_mask.dst_ip[IPV4_INDEX] =
7252 						be32_to_cpu(match.mask->dst);
7253 	} else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
7254 		struct flow_match_ipv6_addrs match;
7255 
7256 		flow_rule_match_ipv6_addrs(flow, &match);
7257 		be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32,
7258 				  IPV6_SIZE);
7259 		be32_to_cpu_array(rule->tuples_mask.src_ip,
7260 				  match.mask->src.s6_addr32, IPV6_SIZE);
7261 		be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32,
7262 				  IPV6_SIZE);
7263 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
7264 				  match.mask->dst.s6_addr32, IPV6_SIZE);
7265 	} else {
7266 		rule->unused_tuple |= BIT(INNER_SRC_IP);
7267 		rule->unused_tuple |= BIT(INNER_DST_IP);
7268 	}
7269 }
7270 
7271 static void hclge_get_cls_key_port(const struct flow_rule *flow,
7272 				   struct hclge_fd_rule *rule)
7273 {
7274 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
7275 		struct flow_match_ports match;
7276 
7277 		flow_rule_match_ports(flow, &match);
7278 
7279 		rule->tuples.src_port = be16_to_cpu(match.key->src);
7280 		rule->tuples_mask.src_port = be16_to_cpu(match.mask->src);
7281 		rule->tuples.dst_port = be16_to_cpu(match.key->dst);
7282 		rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst);
7283 	} else {
7284 		rule->unused_tuple |= BIT(INNER_SRC_PORT);
7285 		rule->unused_tuple |= BIT(INNER_DST_PORT);
7286 	}
7287 }
7288 
7289 static int hclge_parse_cls_flower(struct hclge_dev *hdev,
7290 				  struct flow_cls_offload *cls_flower,
7291 				  struct hclge_fd_rule *rule)
7292 {
7293 	struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower);
7294 	struct flow_dissector *dissector = flow->match.dissector;
7295 
7296 	if (dissector->used_keys &
7297 	    ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
7298 	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
7299 	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
7300 	      BIT(FLOW_DISSECTOR_KEY_VLAN) |
7301 	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
7302 	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
7303 	      BIT(FLOW_DISSECTOR_KEY_PORTS))) {
7304 		dev_err(&hdev->pdev->dev, "unsupported key set: %#x\n",
7305 			dissector->used_keys);
7306 		return -EOPNOTSUPP;
7307 	}
7308 
7309 	hclge_get_cls_key_basic(flow, rule);
7310 	hclge_get_cls_key_mac(flow, rule);
7311 	hclge_get_cls_key_vlan(flow, rule);
7312 	hclge_get_cls_key_ip(flow, rule);
7313 	hclge_get_cls_key_port(flow, rule);
7314 
7315 	return 0;
7316 }
7317 
7318 static int hclge_check_cls_flower(struct hclge_dev *hdev,
7319 				  struct flow_cls_offload *cls_flower, int tc)
7320 {
7321 	u32 prio = cls_flower->common.prio;
7322 
7323 	if (tc < 0 || tc > hdev->tc_max) {
7324 		dev_err(&hdev->pdev->dev, "invalid traffic class\n");
7325 		return -EINVAL;
7326 	}
7327 
7328 	if (prio == 0 ||
7329 	    prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7330 		dev_err(&hdev->pdev->dev,
7331 			"prio %u should be in range[1, %u]\n",
7332 			prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
7333 		return -EINVAL;
7334 	}
7335 
7336 	if (test_bit(prio - 1, hdev->fd_bmap)) {
7337 		dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio);
7338 		return -EINVAL;
7339 	}
7340 	return 0;
7341 }
7342 
7343 static int hclge_add_cls_flower(struct hnae3_handle *handle,
7344 				struct flow_cls_offload *cls_flower,
7345 				int tc)
7346 {
7347 	struct hclge_vport *vport = hclge_get_vport(handle);
7348 	struct hclge_dev *hdev = vport->back;
7349 	struct hclge_fd_rule *rule;
7350 	int ret;
7351 
7352 	ret = hclge_check_cls_flower(hdev, cls_flower, tc);
7353 	if (ret) {
7354 		dev_err(&hdev->pdev->dev,
7355 			"failed to check cls flower params, ret = %d\n", ret);
7356 		return ret;
7357 	}
7358 
7359 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
7360 	if (!rule)
7361 		return -ENOMEM;
7362 
7363 	ret = hclge_parse_cls_flower(hdev, cls_flower, rule);
7364 	if (ret) {
7365 		kfree(rule);
7366 		return ret;
7367 	}
7368 
7369 	rule->action = HCLGE_FD_ACTION_SELECT_TC;
7370 	rule->cls_flower.tc = tc;
7371 	rule->location = cls_flower->common.prio - 1;
7372 	rule->vf_id = 0;
7373 	rule->cls_flower.cookie = cls_flower->cookie;
7374 	rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE;
7375 
7376 	ret = hclge_add_fd_entry_common(hdev, rule);
7377 	if (ret)
7378 		kfree(rule);
7379 
7380 	return ret;
7381 }
7382 
7383 static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev,
7384 						   unsigned long cookie)
7385 {
7386 	struct hclge_fd_rule *rule;
7387 	struct hlist_node *node;
7388 
7389 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7390 		if (rule->cls_flower.cookie == cookie)
7391 			return rule;
7392 	}
7393 
7394 	return NULL;
7395 }
7396 
7397 static int hclge_del_cls_flower(struct hnae3_handle *handle,
7398 				struct flow_cls_offload *cls_flower)
7399 {
7400 	struct hclge_vport *vport = hclge_get_vport(handle);
7401 	struct hclge_dev *hdev = vport->back;
7402 	struct hclge_fd_rule *rule;
7403 	int ret;
7404 
7405 	spin_lock_bh(&hdev->fd_rule_lock);
7406 
7407 	rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
7408 	if (!rule) {
7409 		spin_unlock_bh(&hdev->fd_rule_lock);
7410 		return -EINVAL;
7411 	}
7412 
7413 	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
7414 				   NULL, false);
7415 	if (ret) {
7416 		spin_unlock_bh(&hdev->fd_rule_lock);
7417 		return ret;
7418 	}
7419 
7420 	hclge_update_fd_list(hdev, HCLGE_FD_DELETED, rule->location, NULL);
7421 	spin_unlock_bh(&hdev->fd_rule_lock);
7422 
7423 	return 0;
7424 }
7425 
7426 static void hclge_sync_fd_list(struct hclge_dev *hdev, struct hlist_head *hlist)
7427 {
7428 	struct hclge_fd_rule *rule;
7429 	struct hlist_node *node;
7430 	int ret = 0;
7431 
7432 	if (!test_and_clear_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state))
7433 		return;
7434 
7435 	spin_lock_bh(&hdev->fd_rule_lock);
7436 
7437 	hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
7438 		switch (rule->state) {
7439 		case HCLGE_FD_TO_ADD:
7440 			ret = hclge_fd_config_rule(hdev, rule);
7441 			if (ret)
7442 				goto out;
7443 			rule->state = HCLGE_FD_ACTIVE;
7444 			break;
7445 		case HCLGE_FD_TO_DEL:
7446 			ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7447 						   rule->location, NULL, false);
7448 			if (ret)
7449 				goto out;
7450 			hclge_fd_dec_rule_cnt(hdev, rule->location);
7451 			hclge_fd_free_node(hdev, rule);
7452 			break;
7453 		default:
7454 			break;
7455 		}
7456 	}
7457 
7458 out:
7459 	if (ret)
7460 		set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7461 
7462 	spin_unlock_bh(&hdev->fd_rule_lock);
7463 }
7464 
7465 static void hclge_sync_fd_table(struct hclge_dev *hdev)
7466 {
7467 	if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) {
7468 		bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
7469 
7470 		hclge_clear_fd_rules_in_list(hdev, clear_list);
7471 	}
7472 
7473 	hclge_sync_fd_user_def_cfg(hdev, false);
7474 
7475 	hclge_sync_fd_list(hdev, &hdev->fd_rule_list);
7476 }
7477 
7478 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
7479 {
7480 	struct hclge_vport *vport = hclge_get_vport(handle);
7481 	struct hclge_dev *hdev = vport->back;
7482 
7483 	return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
7484 	       hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
7485 }
7486 
7487 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
7488 {
7489 	struct hclge_vport *vport = hclge_get_vport(handle);
7490 	struct hclge_dev *hdev = vport->back;
7491 
7492 	return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
7493 }
7494 
7495 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
7496 {
7497 	struct hclge_vport *vport = hclge_get_vport(handle);
7498 	struct hclge_dev *hdev = vport->back;
7499 
7500 	return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7501 }
7502 
7503 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
7504 {
7505 	struct hclge_vport *vport = hclge_get_vport(handle);
7506 	struct hclge_dev *hdev = vport->back;
7507 
7508 	return hdev->rst_stats.hw_reset_done_cnt;
7509 }
7510 
7511 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
7512 {
7513 	struct hclge_vport *vport = hclge_get_vport(handle);
7514 	struct hclge_dev *hdev = vport->back;
7515 
7516 	hdev->fd_en = enable;
7517 
7518 	if (!enable)
7519 		set_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state);
7520 	else
7521 		hclge_restore_fd_entries(handle);
7522 
7523 	hclge_task_schedule(hdev, 0);
7524 }
7525 
7526 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
7527 {
7528 	struct hclge_desc desc;
7529 	struct hclge_config_mac_mode_cmd *req =
7530 		(struct hclge_config_mac_mode_cmd *)desc.data;
7531 	u32 loop_en = 0;
7532 	int ret;
7533 
7534 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
7535 
7536 	if (enable) {
7537 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
7538 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
7539 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
7540 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
7541 		hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
7542 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
7543 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
7544 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
7545 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
7546 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
7547 	}
7548 
7549 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7550 
7551 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7552 	if (ret)
7553 		dev_err(&hdev->pdev->dev,
7554 			"mac enable fail, ret =%d.\n", ret);
7555 }
7556 
7557 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
7558 				     u8 switch_param, u8 param_mask)
7559 {
7560 	struct hclge_mac_vlan_switch_cmd *req;
7561 	struct hclge_desc desc;
7562 	u32 func_id;
7563 	int ret;
7564 
7565 	func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
7566 	req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
7567 
7568 	/* read current config parameter */
7569 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
7570 				   true);
7571 	req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
7572 	req->func_id = cpu_to_le32(func_id);
7573 
7574 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7575 	if (ret) {
7576 		dev_err(&hdev->pdev->dev,
7577 			"read mac vlan switch parameter fail, ret = %d\n", ret);
7578 		return ret;
7579 	}
7580 
7581 	/* modify and write new config parameter */
7582 	hclge_cmd_reuse_desc(&desc, false);
7583 	req->switch_param = (req->switch_param & param_mask) | switch_param;
7584 	req->param_mask = param_mask;
7585 
7586 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7587 	if (ret)
7588 		dev_err(&hdev->pdev->dev,
7589 			"set mac vlan switch parameter fail, ret = %d\n", ret);
7590 	return ret;
7591 }
7592 
7593 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
7594 				       int link_ret)
7595 {
7596 #define HCLGE_PHY_LINK_STATUS_NUM  200
7597 
7598 	struct phy_device *phydev = hdev->hw.mac.phydev;
7599 	int i = 0;
7600 	int ret;
7601 
7602 	do {
7603 		ret = phy_read_status(phydev);
7604 		if (ret) {
7605 			dev_err(&hdev->pdev->dev,
7606 				"phy update link status fail, ret = %d\n", ret);
7607 			return;
7608 		}
7609 
7610 		if (phydev->link == link_ret)
7611 			break;
7612 
7613 		msleep(HCLGE_LINK_STATUS_MS);
7614 	} while (++i < HCLGE_PHY_LINK_STATUS_NUM);
7615 }
7616 
7617 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
7618 {
7619 #define HCLGE_MAC_LINK_STATUS_NUM  100
7620 
7621 	int link_status;
7622 	int i = 0;
7623 	int ret;
7624 
7625 	do {
7626 		ret = hclge_get_mac_link_status(hdev, &link_status);
7627 		if (ret)
7628 			return ret;
7629 		if (link_status == link_ret)
7630 			return 0;
7631 
7632 		msleep(HCLGE_LINK_STATUS_MS);
7633 	} while (++i < HCLGE_MAC_LINK_STATUS_NUM);
7634 	return -EBUSY;
7635 }
7636 
7637 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
7638 					  bool is_phy)
7639 {
7640 	int link_ret;
7641 
7642 	link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
7643 
7644 	if (is_phy)
7645 		hclge_phy_link_status_wait(hdev, link_ret);
7646 
7647 	return hclge_mac_link_status_wait(hdev, link_ret);
7648 }
7649 
7650 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
7651 {
7652 	struct hclge_config_mac_mode_cmd *req;
7653 	struct hclge_desc desc;
7654 	u32 loop_en;
7655 	int ret;
7656 
7657 	req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
7658 	/* 1 Read out the MAC mode config at first */
7659 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
7660 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7661 	if (ret) {
7662 		dev_err(&hdev->pdev->dev,
7663 			"mac loopback get fail, ret =%d.\n", ret);
7664 		return ret;
7665 	}
7666 
7667 	/* 2 Then setup the loopback flag */
7668 	loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
7669 	hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
7670 
7671 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7672 
7673 	/* 3 Config mac work mode with loopback flag
7674 	 * and its original configure parameters
7675 	 */
7676 	hclge_cmd_reuse_desc(&desc, false);
7677 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7678 	if (ret)
7679 		dev_err(&hdev->pdev->dev,
7680 			"mac loopback set fail, ret =%d.\n", ret);
7681 	return ret;
7682 }
7683 
7684 static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
7685 				     enum hnae3_loop loop_mode)
7686 {
7687 #define HCLGE_COMMON_LB_RETRY_MS	10
7688 #define HCLGE_COMMON_LB_RETRY_NUM	100
7689 
7690 	struct hclge_common_lb_cmd *req;
7691 	struct hclge_desc desc;
7692 	int ret, i = 0;
7693 	u8 loop_mode_b;
7694 
7695 	req = (struct hclge_common_lb_cmd *)desc.data;
7696 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, false);
7697 
7698 	switch (loop_mode) {
7699 	case HNAE3_LOOP_SERIAL_SERDES:
7700 		loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
7701 		break;
7702 	case HNAE3_LOOP_PARALLEL_SERDES:
7703 		loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
7704 		break;
7705 	case HNAE3_LOOP_PHY:
7706 		loop_mode_b = HCLGE_CMD_GE_PHY_INNER_LOOP_B;
7707 		break;
7708 	default:
7709 		dev_err(&hdev->pdev->dev,
7710 			"unsupported common loopback mode %d\n", loop_mode);
7711 		return -ENOTSUPP;
7712 	}
7713 
7714 	if (en) {
7715 		req->enable = loop_mode_b;
7716 		req->mask = loop_mode_b;
7717 	} else {
7718 		req->mask = loop_mode_b;
7719 	}
7720 
7721 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7722 	if (ret) {
7723 		dev_err(&hdev->pdev->dev,
7724 			"common loopback set fail, ret = %d\n", ret);
7725 		return ret;
7726 	}
7727 
7728 	do {
7729 		msleep(HCLGE_COMMON_LB_RETRY_MS);
7730 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK,
7731 					   true);
7732 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7733 		if (ret) {
7734 			dev_err(&hdev->pdev->dev,
7735 				"common loopback get, ret = %d\n", ret);
7736 			return ret;
7737 		}
7738 	} while (++i < HCLGE_COMMON_LB_RETRY_NUM &&
7739 		 !(req->result & HCLGE_CMD_COMMON_LB_DONE_B));
7740 
7741 	if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) {
7742 		dev_err(&hdev->pdev->dev, "common loopback set timeout\n");
7743 		return -EBUSY;
7744 	} else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) {
7745 		dev_err(&hdev->pdev->dev, "common loopback set failed in fw\n");
7746 		return -EIO;
7747 	}
7748 	return ret;
7749 }
7750 
7751 static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en,
7752 				     enum hnae3_loop loop_mode)
7753 {
7754 	int ret;
7755 
7756 	ret = hclge_cfg_common_loopback(hdev, en, loop_mode);
7757 	if (ret)
7758 		return ret;
7759 
7760 	hclge_cfg_mac_mode(hdev, en);
7761 
7762 	ret = hclge_mac_phy_link_status_wait(hdev, en, false);
7763 	if (ret)
7764 		dev_err(&hdev->pdev->dev,
7765 			"serdes loopback config mac mode timeout\n");
7766 
7767 	return ret;
7768 }
7769 
7770 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
7771 				     struct phy_device *phydev)
7772 {
7773 	int ret;
7774 
7775 	if (!phydev->suspended) {
7776 		ret = phy_suspend(phydev);
7777 		if (ret)
7778 			return ret;
7779 	}
7780 
7781 	ret = phy_resume(phydev);
7782 	if (ret)
7783 		return ret;
7784 
7785 	return phy_loopback(phydev, true);
7786 }
7787 
7788 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
7789 				      struct phy_device *phydev)
7790 {
7791 	int ret;
7792 
7793 	ret = phy_loopback(phydev, false);
7794 	if (ret)
7795 		return ret;
7796 
7797 	return phy_suspend(phydev);
7798 }
7799 
7800 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
7801 {
7802 	struct phy_device *phydev = hdev->hw.mac.phydev;
7803 	int ret;
7804 
7805 	if (!phydev) {
7806 		if (hnae3_dev_phy_imp_supported(hdev))
7807 			return hclge_set_common_loopback(hdev, en,
7808 							 HNAE3_LOOP_PHY);
7809 		return -ENOTSUPP;
7810 	}
7811 
7812 	if (en)
7813 		ret = hclge_enable_phy_loopback(hdev, phydev);
7814 	else
7815 		ret = hclge_disable_phy_loopback(hdev, phydev);
7816 	if (ret) {
7817 		dev_err(&hdev->pdev->dev,
7818 			"set phy loopback fail, ret = %d\n", ret);
7819 		return ret;
7820 	}
7821 
7822 	hclge_cfg_mac_mode(hdev, en);
7823 
7824 	ret = hclge_mac_phy_link_status_wait(hdev, en, true);
7825 	if (ret)
7826 		dev_err(&hdev->pdev->dev,
7827 			"phy loopback config mac mode timeout\n");
7828 
7829 	return ret;
7830 }
7831 
7832 static int hclge_tqp_enable_cmd_send(struct hclge_dev *hdev, u16 tqp_id,
7833 				     u16 stream_id, bool enable)
7834 {
7835 	struct hclge_desc desc;
7836 	struct hclge_cfg_com_tqp_queue_cmd *req =
7837 		(struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
7838 
7839 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
7840 	req->tqp_id = cpu_to_le16(tqp_id);
7841 	req->stream_id = cpu_to_le16(stream_id);
7842 	if (enable)
7843 		req->enable |= 1U << HCLGE_TQP_ENABLE_B;
7844 
7845 	return hclge_cmd_send(&hdev->hw, &desc, 1);
7846 }
7847 
7848 static int hclge_tqp_enable(struct hnae3_handle *handle, bool enable)
7849 {
7850 	struct hclge_vport *vport = hclge_get_vport(handle);
7851 	struct hclge_dev *hdev = vport->back;
7852 	int ret;
7853 	u16 i;
7854 
7855 	for (i = 0; i < handle->kinfo.num_tqps; i++) {
7856 		ret = hclge_tqp_enable_cmd_send(hdev, i, 0, enable);
7857 		if (ret)
7858 			return ret;
7859 	}
7860 	return 0;
7861 }
7862 
7863 static int hclge_set_loopback(struct hnae3_handle *handle,
7864 			      enum hnae3_loop loop_mode, bool en)
7865 {
7866 	struct hclge_vport *vport = hclge_get_vport(handle);
7867 	struct hclge_dev *hdev = vport->back;
7868 	int ret;
7869 
7870 	/* Loopback can be enabled in three places: SSU, MAC, and serdes. By
7871 	 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
7872 	 * the same, the packets are looped back in the SSU. If SSU loopback
7873 	 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
7874 	 */
7875 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
7876 		u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
7877 
7878 		ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
7879 						HCLGE_SWITCH_ALW_LPBK_MASK);
7880 		if (ret)
7881 			return ret;
7882 	}
7883 
7884 	switch (loop_mode) {
7885 	case HNAE3_LOOP_APP:
7886 		ret = hclge_set_app_loopback(hdev, en);
7887 		break;
7888 	case HNAE3_LOOP_SERIAL_SERDES:
7889 	case HNAE3_LOOP_PARALLEL_SERDES:
7890 		ret = hclge_set_common_loopback(hdev, en, loop_mode);
7891 		break;
7892 	case HNAE3_LOOP_PHY:
7893 		ret = hclge_set_phy_loopback(hdev, en);
7894 		break;
7895 	default:
7896 		ret = -ENOTSUPP;
7897 		dev_err(&hdev->pdev->dev,
7898 			"loop_mode %d is not supported\n", loop_mode);
7899 		break;
7900 	}
7901 
7902 	if (ret)
7903 		return ret;
7904 
7905 	ret = hclge_tqp_enable(handle, en);
7906 	if (ret)
7907 		dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n",
7908 			en ? "enable" : "disable", ret);
7909 
7910 	return ret;
7911 }
7912 
7913 static int hclge_set_default_loopback(struct hclge_dev *hdev)
7914 {
7915 	int ret;
7916 
7917 	ret = hclge_set_app_loopback(hdev, false);
7918 	if (ret)
7919 		return ret;
7920 
7921 	ret = hclge_cfg_common_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
7922 	if (ret)
7923 		return ret;
7924 
7925 	return hclge_cfg_common_loopback(hdev, false,
7926 					 HNAE3_LOOP_PARALLEL_SERDES);
7927 }
7928 
7929 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
7930 {
7931 	struct hclge_vport *vport = hclge_get_vport(handle);
7932 	struct hnae3_knic_private_info *kinfo;
7933 	struct hnae3_queue *queue;
7934 	struct hclge_tqp *tqp;
7935 	int i;
7936 
7937 	kinfo = &vport->nic.kinfo;
7938 	for (i = 0; i < kinfo->num_tqps; i++) {
7939 		queue = handle->kinfo.tqp[i];
7940 		tqp = container_of(queue, struct hclge_tqp, q);
7941 		memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
7942 	}
7943 }
7944 
7945 static void hclge_flush_link_update(struct hclge_dev *hdev)
7946 {
7947 #define HCLGE_FLUSH_LINK_TIMEOUT	100000
7948 
7949 	unsigned long last = hdev->serv_processed_cnt;
7950 	int i = 0;
7951 
7952 	while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
7953 	       i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
7954 	       last == hdev->serv_processed_cnt)
7955 		usleep_range(1, 1);
7956 }
7957 
7958 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
7959 {
7960 	struct hclge_vport *vport = hclge_get_vport(handle);
7961 	struct hclge_dev *hdev = vport->back;
7962 
7963 	if (enable) {
7964 		hclge_task_schedule(hdev, 0);
7965 	} else {
7966 		/* Set the DOWN flag here to disable link updating */
7967 		set_bit(HCLGE_STATE_DOWN, &hdev->state);
7968 
7969 		/* flush memory to make sure DOWN is seen by service task */
7970 		smp_mb__before_atomic();
7971 		hclge_flush_link_update(hdev);
7972 	}
7973 }
7974 
7975 static int hclge_ae_start(struct hnae3_handle *handle)
7976 {
7977 	struct hclge_vport *vport = hclge_get_vport(handle);
7978 	struct hclge_dev *hdev = vport->back;
7979 
7980 	/* mac enable */
7981 	hclge_cfg_mac_mode(hdev, true);
7982 	clear_bit(HCLGE_STATE_DOWN, &hdev->state);
7983 	hdev->hw.mac.link = 0;
7984 
7985 	/* reset tqp stats */
7986 	hclge_reset_tqp_stats(handle);
7987 
7988 	hclge_mac_start_phy(hdev);
7989 
7990 	return 0;
7991 }
7992 
7993 static void hclge_ae_stop(struct hnae3_handle *handle)
7994 {
7995 	struct hclge_vport *vport = hclge_get_vport(handle);
7996 	struct hclge_dev *hdev = vport->back;
7997 
7998 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
7999 	spin_lock_bh(&hdev->fd_rule_lock);
8000 	hclge_clear_arfs_rules(hdev);
8001 	spin_unlock_bh(&hdev->fd_rule_lock);
8002 
8003 	/* If it is not PF reset, the firmware will disable the MAC,
8004 	 * so it only need to stop phy here.
8005 	 */
8006 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
8007 	    hdev->reset_type != HNAE3_FUNC_RESET) {
8008 		hclge_mac_stop_phy(hdev);
8009 		hclge_update_link_status(hdev);
8010 		return;
8011 	}
8012 
8013 	hclge_reset_tqp(handle);
8014 
8015 	hclge_config_mac_tnl_int(hdev, false);
8016 
8017 	/* Mac disable */
8018 	hclge_cfg_mac_mode(hdev, false);
8019 
8020 	hclge_mac_stop_phy(hdev);
8021 
8022 	/* reset tqp stats */
8023 	hclge_reset_tqp_stats(handle);
8024 	hclge_update_link_status(hdev);
8025 }
8026 
8027 int hclge_vport_start(struct hclge_vport *vport)
8028 {
8029 	struct hclge_dev *hdev = vport->back;
8030 
8031 	set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8032 	vport->last_active_jiffies = jiffies;
8033 
8034 	if (test_bit(vport->vport_id, hdev->vport_config_block)) {
8035 		if (vport->vport_id) {
8036 			hclge_restore_mac_table_common(vport);
8037 			hclge_restore_vport_vlan_table(vport);
8038 		} else {
8039 			hclge_restore_hw_table(hdev);
8040 		}
8041 	}
8042 
8043 	clear_bit(vport->vport_id, hdev->vport_config_block);
8044 
8045 	return 0;
8046 }
8047 
8048 void hclge_vport_stop(struct hclge_vport *vport)
8049 {
8050 	clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8051 }
8052 
8053 static int hclge_client_start(struct hnae3_handle *handle)
8054 {
8055 	struct hclge_vport *vport = hclge_get_vport(handle);
8056 
8057 	return hclge_vport_start(vport);
8058 }
8059 
8060 static void hclge_client_stop(struct hnae3_handle *handle)
8061 {
8062 	struct hclge_vport *vport = hclge_get_vport(handle);
8063 
8064 	hclge_vport_stop(vport);
8065 }
8066 
8067 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
8068 					 u16 cmdq_resp, u8  resp_code,
8069 					 enum hclge_mac_vlan_tbl_opcode op)
8070 {
8071 	struct hclge_dev *hdev = vport->back;
8072 
8073 	if (cmdq_resp) {
8074 		dev_err(&hdev->pdev->dev,
8075 			"cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
8076 			cmdq_resp);
8077 		return -EIO;
8078 	}
8079 
8080 	if (op == HCLGE_MAC_VLAN_ADD) {
8081 		if (!resp_code || resp_code == 1)
8082 			return 0;
8083 		else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
8084 			 resp_code == HCLGE_ADD_MC_OVERFLOW)
8085 			return -ENOSPC;
8086 
8087 		dev_err(&hdev->pdev->dev,
8088 			"add mac addr failed for undefined, code=%u.\n",
8089 			resp_code);
8090 		return -EIO;
8091 	} else if (op == HCLGE_MAC_VLAN_REMOVE) {
8092 		if (!resp_code) {
8093 			return 0;
8094 		} else if (resp_code == 1) {
8095 			dev_dbg(&hdev->pdev->dev,
8096 				"remove mac addr failed for miss.\n");
8097 			return -ENOENT;
8098 		}
8099 
8100 		dev_err(&hdev->pdev->dev,
8101 			"remove mac addr failed for undefined, code=%u.\n",
8102 			resp_code);
8103 		return -EIO;
8104 	} else if (op == HCLGE_MAC_VLAN_LKUP) {
8105 		if (!resp_code) {
8106 			return 0;
8107 		} else if (resp_code == 1) {
8108 			dev_dbg(&hdev->pdev->dev,
8109 				"lookup mac addr failed for miss.\n");
8110 			return -ENOENT;
8111 		}
8112 
8113 		dev_err(&hdev->pdev->dev,
8114 			"lookup mac addr failed for undefined, code=%u.\n",
8115 			resp_code);
8116 		return -EIO;
8117 	}
8118 
8119 	dev_err(&hdev->pdev->dev,
8120 		"unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
8121 
8122 	return -EINVAL;
8123 }
8124 
8125 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
8126 {
8127 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
8128 
8129 	unsigned int word_num;
8130 	unsigned int bit_num;
8131 
8132 	if (vfid > 255 || vfid < 0)
8133 		return -EIO;
8134 
8135 	if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
8136 		word_num = vfid / 32;
8137 		bit_num  = vfid % 32;
8138 		if (clr)
8139 			desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8140 		else
8141 			desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
8142 	} else {
8143 		word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
8144 		bit_num  = vfid % 32;
8145 		if (clr)
8146 			desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8147 		else
8148 			desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
8149 	}
8150 
8151 	return 0;
8152 }
8153 
8154 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
8155 {
8156 #define HCLGE_DESC_NUMBER 3
8157 #define HCLGE_FUNC_NUMBER_PER_DESC 6
8158 	int i, j;
8159 
8160 	for (i = 1; i < HCLGE_DESC_NUMBER; i++)
8161 		for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
8162 			if (desc[i].data[j])
8163 				return false;
8164 
8165 	return true;
8166 }
8167 
8168 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
8169 				   const u8 *addr, bool is_mc)
8170 {
8171 	const unsigned char *mac_addr = addr;
8172 	u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
8173 		       (mac_addr[0]) | (mac_addr[1] << 8);
8174 	u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
8175 
8176 	hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8177 	if (is_mc) {
8178 		hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
8179 		hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8180 	}
8181 
8182 	new_req->mac_addr_hi32 = cpu_to_le32(high_val);
8183 	new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
8184 }
8185 
8186 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
8187 				     struct hclge_mac_vlan_tbl_entry_cmd *req)
8188 {
8189 	struct hclge_dev *hdev = vport->back;
8190 	struct hclge_desc desc;
8191 	u8 resp_code;
8192 	u16 retval;
8193 	int ret;
8194 
8195 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
8196 
8197 	memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8198 
8199 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8200 	if (ret) {
8201 		dev_err(&hdev->pdev->dev,
8202 			"del mac addr failed for cmd_send, ret =%d.\n",
8203 			ret);
8204 		return ret;
8205 	}
8206 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8207 	retval = le16_to_cpu(desc.retval);
8208 
8209 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8210 					     HCLGE_MAC_VLAN_REMOVE);
8211 }
8212 
8213 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
8214 				     struct hclge_mac_vlan_tbl_entry_cmd *req,
8215 				     struct hclge_desc *desc,
8216 				     bool is_mc)
8217 {
8218 	struct hclge_dev *hdev = vport->back;
8219 	u8 resp_code;
8220 	u16 retval;
8221 	int ret;
8222 
8223 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
8224 	if (is_mc) {
8225 		desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8226 		memcpy(desc[0].data,
8227 		       req,
8228 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8229 		hclge_cmd_setup_basic_desc(&desc[1],
8230 					   HCLGE_OPC_MAC_VLAN_ADD,
8231 					   true);
8232 		desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8233 		hclge_cmd_setup_basic_desc(&desc[2],
8234 					   HCLGE_OPC_MAC_VLAN_ADD,
8235 					   true);
8236 		ret = hclge_cmd_send(&hdev->hw, desc, 3);
8237 	} else {
8238 		memcpy(desc[0].data,
8239 		       req,
8240 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8241 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
8242 	}
8243 	if (ret) {
8244 		dev_err(&hdev->pdev->dev,
8245 			"lookup mac addr failed for cmd_send, ret =%d.\n",
8246 			ret);
8247 		return ret;
8248 	}
8249 	resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
8250 	retval = le16_to_cpu(desc[0].retval);
8251 
8252 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8253 					     HCLGE_MAC_VLAN_LKUP);
8254 }
8255 
8256 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
8257 				  struct hclge_mac_vlan_tbl_entry_cmd *req,
8258 				  struct hclge_desc *mc_desc)
8259 {
8260 	struct hclge_dev *hdev = vport->back;
8261 	int cfg_status;
8262 	u8 resp_code;
8263 	u16 retval;
8264 	int ret;
8265 
8266 	if (!mc_desc) {
8267 		struct hclge_desc desc;
8268 
8269 		hclge_cmd_setup_basic_desc(&desc,
8270 					   HCLGE_OPC_MAC_VLAN_ADD,
8271 					   false);
8272 		memcpy(desc.data, req,
8273 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8274 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8275 		resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8276 		retval = le16_to_cpu(desc.retval);
8277 
8278 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8279 							   resp_code,
8280 							   HCLGE_MAC_VLAN_ADD);
8281 	} else {
8282 		hclge_cmd_reuse_desc(&mc_desc[0], false);
8283 		mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8284 		hclge_cmd_reuse_desc(&mc_desc[1], false);
8285 		mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8286 		hclge_cmd_reuse_desc(&mc_desc[2], false);
8287 		mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
8288 		memcpy(mc_desc[0].data, req,
8289 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8290 		ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
8291 		resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
8292 		retval = le16_to_cpu(mc_desc[0].retval);
8293 
8294 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8295 							   resp_code,
8296 							   HCLGE_MAC_VLAN_ADD);
8297 	}
8298 
8299 	if (ret) {
8300 		dev_err(&hdev->pdev->dev,
8301 			"add mac addr failed for cmd_send, ret =%d.\n",
8302 			ret);
8303 		return ret;
8304 	}
8305 
8306 	return cfg_status;
8307 }
8308 
8309 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
8310 			       u16 *allocated_size)
8311 {
8312 	struct hclge_umv_spc_alc_cmd *req;
8313 	struct hclge_desc desc;
8314 	int ret;
8315 
8316 	req = (struct hclge_umv_spc_alc_cmd *)desc.data;
8317 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
8318 
8319 	req->space_size = cpu_to_le32(space_size);
8320 
8321 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8322 	if (ret) {
8323 		dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
8324 			ret);
8325 		return ret;
8326 	}
8327 
8328 	*allocated_size = le32_to_cpu(desc.data[1]);
8329 
8330 	return 0;
8331 }
8332 
8333 static int hclge_init_umv_space(struct hclge_dev *hdev)
8334 {
8335 	u16 allocated_size = 0;
8336 	int ret;
8337 
8338 	ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
8339 	if (ret)
8340 		return ret;
8341 
8342 	if (allocated_size < hdev->wanted_umv_size)
8343 		dev_warn(&hdev->pdev->dev,
8344 			 "failed to alloc umv space, want %u, get %u\n",
8345 			 hdev->wanted_umv_size, allocated_size);
8346 
8347 	hdev->max_umv_size = allocated_size;
8348 	hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
8349 	hdev->share_umv_size = hdev->priv_umv_size +
8350 			hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8351 
8352 	return 0;
8353 }
8354 
8355 static void hclge_reset_umv_space(struct hclge_dev *hdev)
8356 {
8357 	struct hclge_vport *vport;
8358 	int i;
8359 
8360 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8361 		vport = &hdev->vport[i];
8362 		vport->used_umv_num = 0;
8363 	}
8364 
8365 	mutex_lock(&hdev->vport_lock);
8366 	hdev->share_umv_size = hdev->priv_umv_size +
8367 			hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8368 	mutex_unlock(&hdev->vport_lock);
8369 }
8370 
8371 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
8372 {
8373 	struct hclge_dev *hdev = vport->back;
8374 	bool is_full;
8375 
8376 	if (need_lock)
8377 		mutex_lock(&hdev->vport_lock);
8378 
8379 	is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
8380 		   hdev->share_umv_size == 0);
8381 
8382 	if (need_lock)
8383 		mutex_unlock(&hdev->vport_lock);
8384 
8385 	return is_full;
8386 }
8387 
8388 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
8389 {
8390 	struct hclge_dev *hdev = vport->back;
8391 
8392 	if (is_free) {
8393 		if (vport->used_umv_num > hdev->priv_umv_size)
8394 			hdev->share_umv_size++;
8395 
8396 		if (vport->used_umv_num > 0)
8397 			vport->used_umv_num--;
8398 	} else {
8399 		if (vport->used_umv_num >= hdev->priv_umv_size &&
8400 		    hdev->share_umv_size > 0)
8401 			hdev->share_umv_size--;
8402 		vport->used_umv_num++;
8403 	}
8404 }
8405 
8406 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
8407 						  const u8 *mac_addr)
8408 {
8409 	struct hclge_mac_node *mac_node, *tmp;
8410 
8411 	list_for_each_entry_safe(mac_node, tmp, list, node)
8412 		if (ether_addr_equal(mac_addr, mac_node->mac_addr))
8413 			return mac_node;
8414 
8415 	return NULL;
8416 }
8417 
8418 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
8419 				  enum HCLGE_MAC_NODE_STATE state)
8420 {
8421 	switch (state) {
8422 	/* from set_rx_mode or tmp_add_list */
8423 	case HCLGE_MAC_TO_ADD:
8424 		if (mac_node->state == HCLGE_MAC_TO_DEL)
8425 			mac_node->state = HCLGE_MAC_ACTIVE;
8426 		break;
8427 	/* only from set_rx_mode */
8428 	case HCLGE_MAC_TO_DEL:
8429 		if (mac_node->state == HCLGE_MAC_TO_ADD) {
8430 			list_del(&mac_node->node);
8431 			kfree(mac_node);
8432 		} else {
8433 			mac_node->state = HCLGE_MAC_TO_DEL;
8434 		}
8435 		break;
8436 	/* only from tmp_add_list, the mac_node->state won't be
8437 	 * ACTIVE.
8438 	 */
8439 	case HCLGE_MAC_ACTIVE:
8440 		if (mac_node->state == HCLGE_MAC_TO_ADD)
8441 			mac_node->state = HCLGE_MAC_ACTIVE;
8442 
8443 		break;
8444 	}
8445 }
8446 
8447 int hclge_update_mac_list(struct hclge_vport *vport,
8448 			  enum HCLGE_MAC_NODE_STATE state,
8449 			  enum HCLGE_MAC_ADDR_TYPE mac_type,
8450 			  const unsigned char *addr)
8451 {
8452 	struct hclge_dev *hdev = vport->back;
8453 	struct hclge_mac_node *mac_node;
8454 	struct list_head *list;
8455 
8456 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8457 		&vport->uc_mac_list : &vport->mc_mac_list;
8458 
8459 	spin_lock_bh(&vport->mac_list_lock);
8460 
8461 	/* if the mac addr is already in the mac list, no need to add a new
8462 	 * one into it, just check the mac addr state, convert it to a new
8463 	 * state, or just remove it, or do nothing.
8464 	 */
8465 	mac_node = hclge_find_mac_node(list, addr);
8466 	if (mac_node) {
8467 		hclge_update_mac_node(mac_node, state);
8468 		spin_unlock_bh(&vport->mac_list_lock);
8469 		set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8470 		return 0;
8471 	}
8472 
8473 	/* if this address is never added, unnecessary to delete */
8474 	if (state == HCLGE_MAC_TO_DEL) {
8475 		spin_unlock_bh(&vport->mac_list_lock);
8476 		dev_err(&hdev->pdev->dev,
8477 			"failed to delete address %pM from mac list\n",
8478 			addr);
8479 		return -ENOENT;
8480 	}
8481 
8482 	mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
8483 	if (!mac_node) {
8484 		spin_unlock_bh(&vport->mac_list_lock);
8485 		return -ENOMEM;
8486 	}
8487 
8488 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8489 
8490 	mac_node->state = state;
8491 	ether_addr_copy(mac_node->mac_addr, addr);
8492 	list_add_tail(&mac_node->node, list);
8493 
8494 	spin_unlock_bh(&vport->mac_list_lock);
8495 
8496 	return 0;
8497 }
8498 
8499 static int hclge_add_uc_addr(struct hnae3_handle *handle,
8500 			     const unsigned char *addr)
8501 {
8502 	struct hclge_vport *vport = hclge_get_vport(handle);
8503 
8504 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
8505 				     addr);
8506 }
8507 
8508 int hclge_add_uc_addr_common(struct hclge_vport *vport,
8509 			     const unsigned char *addr)
8510 {
8511 	struct hclge_dev *hdev = vport->back;
8512 	struct hclge_mac_vlan_tbl_entry_cmd req;
8513 	struct hclge_desc desc;
8514 	u16 egress_port = 0;
8515 	int ret;
8516 
8517 	/* mac addr check */
8518 	if (is_zero_ether_addr(addr) ||
8519 	    is_broadcast_ether_addr(addr) ||
8520 	    is_multicast_ether_addr(addr)) {
8521 		dev_err(&hdev->pdev->dev,
8522 			"Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
8523 			 addr, is_zero_ether_addr(addr),
8524 			 is_broadcast_ether_addr(addr),
8525 			 is_multicast_ether_addr(addr));
8526 		return -EINVAL;
8527 	}
8528 
8529 	memset(&req, 0, sizeof(req));
8530 
8531 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8532 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8533 
8534 	req.egress_port = cpu_to_le16(egress_port);
8535 
8536 	hclge_prepare_mac_addr(&req, addr, false);
8537 
8538 	/* Lookup the mac address in the mac_vlan table, and add
8539 	 * it if the entry is inexistent. Repeated unicast entry
8540 	 * is not allowed in the mac vlan table.
8541 	 */
8542 	ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
8543 	if (ret == -ENOENT) {
8544 		mutex_lock(&hdev->vport_lock);
8545 		if (!hclge_is_umv_space_full(vport, false)) {
8546 			ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
8547 			if (!ret)
8548 				hclge_update_umv_space(vport, false);
8549 			mutex_unlock(&hdev->vport_lock);
8550 			return ret;
8551 		}
8552 		mutex_unlock(&hdev->vport_lock);
8553 
8554 		if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
8555 			dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
8556 				hdev->priv_umv_size);
8557 
8558 		return -ENOSPC;
8559 	}
8560 
8561 	/* check if we just hit the duplicate */
8562 	if (!ret) {
8563 		dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
8564 			 vport->vport_id, addr);
8565 		return 0;
8566 	}
8567 
8568 	dev_err(&hdev->pdev->dev,
8569 		"PF failed to add unicast entry(%pM) in the MAC table\n",
8570 		addr);
8571 
8572 	return ret;
8573 }
8574 
8575 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
8576 			    const unsigned char *addr)
8577 {
8578 	struct hclge_vport *vport = hclge_get_vport(handle);
8579 
8580 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
8581 				     addr);
8582 }
8583 
8584 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
8585 			    const unsigned char *addr)
8586 {
8587 	struct hclge_dev *hdev = vport->back;
8588 	struct hclge_mac_vlan_tbl_entry_cmd req;
8589 	int ret;
8590 
8591 	/* mac addr check */
8592 	if (is_zero_ether_addr(addr) ||
8593 	    is_broadcast_ether_addr(addr) ||
8594 	    is_multicast_ether_addr(addr)) {
8595 		dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
8596 			addr);
8597 		return -EINVAL;
8598 	}
8599 
8600 	memset(&req, 0, sizeof(req));
8601 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
8602 	hclge_prepare_mac_addr(&req, addr, false);
8603 	ret = hclge_remove_mac_vlan_tbl(vport, &req);
8604 	if (!ret) {
8605 		mutex_lock(&hdev->vport_lock);
8606 		hclge_update_umv_space(vport, true);
8607 		mutex_unlock(&hdev->vport_lock);
8608 	} else if (ret == -ENOENT) {
8609 		ret = 0;
8610 	}
8611 
8612 	return ret;
8613 }
8614 
8615 static int hclge_add_mc_addr(struct hnae3_handle *handle,
8616 			     const unsigned char *addr)
8617 {
8618 	struct hclge_vport *vport = hclge_get_vport(handle);
8619 
8620 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
8621 				     addr);
8622 }
8623 
8624 int hclge_add_mc_addr_common(struct hclge_vport *vport,
8625 			     const unsigned char *addr)
8626 {
8627 	struct hclge_dev *hdev = vport->back;
8628 	struct hclge_mac_vlan_tbl_entry_cmd req;
8629 	struct hclge_desc desc[3];
8630 	int status;
8631 
8632 	/* mac addr check */
8633 	if (!is_multicast_ether_addr(addr)) {
8634 		dev_err(&hdev->pdev->dev,
8635 			"Add mc mac err! invalid mac:%pM.\n",
8636 			 addr);
8637 		return -EINVAL;
8638 	}
8639 	memset(&req, 0, sizeof(req));
8640 	hclge_prepare_mac_addr(&req, addr, true);
8641 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8642 	if (status) {
8643 		/* This mac addr do not exist, add new entry for it */
8644 		memset(desc[0].data, 0, sizeof(desc[0].data));
8645 		memset(desc[1].data, 0, sizeof(desc[0].data));
8646 		memset(desc[2].data, 0, sizeof(desc[0].data));
8647 	}
8648 	status = hclge_update_desc_vfid(desc, vport->vport_id, false);
8649 	if (status)
8650 		return status;
8651 	status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8652 	/* if already overflow, not to print each time */
8653 	if (status == -ENOSPC &&
8654 	    !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
8655 		dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
8656 
8657 	return status;
8658 }
8659 
8660 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
8661 			    const unsigned char *addr)
8662 {
8663 	struct hclge_vport *vport = hclge_get_vport(handle);
8664 
8665 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
8666 				     addr);
8667 }
8668 
8669 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
8670 			    const unsigned char *addr)
8671 {
8672 	struct hclge_dev *hdev = vport->back;
8673 	struct hclge_mac_vlan_tbl_entry_cmd req;
8674 	enum hclge_cmd_status status;
8675 	struct hclge_desc desc[3];
8676 
8677 	/* mac addr check */
8678 	if (!is_multicast_ether_addr(addr)) {
8679 		dev_dbg(&hdev->pdev->dev,
8680 			"Remove mc mac err! invalid mac:%pM.\n",
8681 			 addr);
8682 		return -EINVAL;
8683 	}
8684 
8685 	memset(&req, 0, sizeof(req));
8686 	hclge_prepare_mac_addr(&req, addr, true);
8687 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8688 	if (!status) {
8689 		/* This mac addr exist, remove this handle's VFID for it */
8690 		status = hclge_update_desc_vfid(desc, vport->vport_id, true);
8691 		if (status)
8692 			return status;
8693 
8694 		if (hclge_is_all_function_id_zero(desc))
8695 			/* All the vfid is zero, so need to delete this entry */
8696 			status = hclge_remove_mac_vlan_tbl(vport, &req);
8697 		else
8698 			/* Not all the vfid is zero, update the vfid */
8699 			status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8700 	} else if (status == -ENOENT) {
8701 		status = 0;
8702 	}
8703 
8704 	return status;
8705 }
8706 
8707 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
8708 				      struct list_head *list,
8709 				      int (*sync)(struct hclge_vport *,
8710 						  const unsigned char *))
8711 {
8712 	struct hclge_mac_node *mac_node, *tmp;
8713 	int ret;
8714 
8715 	list_for_each_entry_safe(mac_node, tmp, list, node) {
8716 		ret = sync(vport, mac_node->mac_addr);
8717 		if (!ret) {
8718 			mac_node->state = HCLGE_MAC_ACTIVE;
8719 		} else {
8720 			set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8721 				&vport->state);
8722 			break;
8723 		}
8724 	}
8725 }
8726 
8727 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
8728 					struct list_head *list,
8729 					int (*unsync)(struct hclge_vport *,
8730 						      const unsigned char *))
8731 {
8732 	struct hclge_mac_node *mac_node, *tmp;
8733 	int ret;
8734 
8735 	list_for_each_entry_safe(mac_node, tmp, list, node) {
8736 		ret = unsync(vport, mac_node->mac_addr);
8737 		if (!ret || ret == -ENOENT) {
8738 			list_del(&mac_node->node);
8739 			kfree(mac_node);
8740 		} else {
8741 			set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8742 				&vport->state);
8743 			break;
8744 		}
8745 	}
8746 }
8747 
8748 static bool hclge_sync_from_add_list(struct list_head *add_list,
8749 				     struct list_head *mac_list)
8750 {
8751 	struct hclge_mac_node *mac_node, *tmp, *new_node;
8752 	bool all_added = true;
8753 
8754 	list_for_each_entry_safe(mac_node, tmp, add_list, node) {
8755 		if (mac_node->state == HCLGE_MAC_TO_ADD)
8756 			all_added = false;
8757 
8758 		/* if the mac address from tmp_add_list is not in the
8759 		 * uc/mc_mac_list, it means have received a TO_DEL request
8760 		 * during the time window of adding the mac address into mac
8761 		 * table. if mac_node state is ACTIVE, then change it to TO_DEL,
8762 		 * then it will be removed at next time. else it must be TO_ADD,
8763 		 * this address hasn't been added into mac table,
8764 		 * so just remove the mac node.
8765 		 */
8766 		new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8767 		if (new_node) {
8768 			hclge_update_mac_node(new_node, mac_node->state);
8769 			list_del(&mac_node->node);
8770 			kfree(mac_node);
8771 		} else if (mac_node->state == HCLGE_MAC_ACTIVE) {
8772 			mac_node->state = HCLGE_MAC_TO_DEL;
8773 			list_del(&mac_node->node);
8774 			list_add_tail(&mac_node->node, mac_list);
8775 		} else {
8776 			list_del(&mac_node->node);
8777 			kfree(mac_node);
8778 		}
8779 	}
8780 
8781 	return all_added;
8782 }
8783 
8784 static void hclge_sync_from_del_list(struct list_head *del_list,
8785 				     struct list_head *mac_list)
8786 {
8787 	struct hclge_mac_node *mac_node, *tmp, *new_node;
8788 
8789 	list_for_each_entry_safe(mac_node, tmp, del_list, node) {
8790 		new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8791 		if (new_node) {
8792 			/* If the mac addr exists in the mac list, it means
8793 			 * received a new TO_ADD request during the time window
8794 			 * of configuring the mac address. For the mac node
8795 			 * state is TO_ADD, and the address is already in the
8796 			 * in the hardware(due to delete fail), so we just need
8797 			 * to change the mac node state to ACTIVE.
8798 			 */
8799 			new_node->state = HCLGE_MAC_ACTIVE;
8800 			list_del(&mac_node->node);
8801 			kfree(mac_node);
8802 		} else {
8803 			list_del(&mac_node->node);
8804 			list_add_tail(&mac_node->node, mac_list);
8805 		}
8806 	}
8807 }
8808 
8809 static void hclge_update_overflow_flags(struct hclge_vport *vport,
8810 					enum HCLGE_MAC_ADDR_TYPE mac_type,
8811 					bool is_all_added)
8812 {
8813 	if (mac_type == HCLGE_MAC_ADDR_UC) {
8814 		if (is_all_added)
8815 			vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
8816 		else
8817 			vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
8818 	} else {
8819 		if (is_all_added)
8820 			vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
8821 		else
8822 			vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
8823 	}
8824 }
8825 
8826 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
8827 				       enum HCLGE_MAC_ADDR_TYPE mac_type)
8828 {
8829 	struct hclge_mac_node *mac_node, *tmp, *new_node;
8830 	struct list_head tmp_add_list, tmp_del_list;
8831 	struct list_head *list;
8832 	bool all_added;
8833 
8834 	INIT_LIST_HEAD(&tmp_add_list);
8835 	INIT_LIST_HEAD(&tmp_del_list);
8836 
8837 	/* move the mac addr to the tmp_add_list and tmp_del_list, then
8838 	 * we can add/delete these mac addr outside the spin lock
8839 	 */
8840 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8841 		&vport->uc_mac_list : &vport->mc_mac_list;
8842 
8843 	spin_lock_bh(&vport->mac_list_lock);
8844 
8845 	list_for_each_entry_safe(mac_node, tmp, list, node) {
8846 		switch (mac_node->state) {
8847 		case HCLGE_MAC_TO_DEL:
8848 			list_del(&mac_node->node);
8849 			list_add_tail(&mac_node->node, &tmp_del_list);
8850 			break;
8851 		case HCLGE_MAC_TO_ADD:
8852 			new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8853 			if (!new_node)
8854 				goto stop_traverse;
8855 			ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
8856 			new_node->state = mac_node->state;
8857 			list_add_tail(&new_node->node, &tmp_add_list);
8858 			break;
8859 		default:
8860 			break;
8861 		}
8862 	}
8863 
8864 stop_traverse:
8865 	spin_unlock_bh(&vport->mac_list_lock);
8866 
8867 	/* delete first, in order to get max mac table space for adding */
8868 	if (mac_type == HCLGE_MAC_ADDR_UC) {
8869 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8870 					    hclge_rm_uc_addr_common);
8871 		hclge_sync_vport_mac_list(vport, &tmp_add_list,
8872 					  hclge_add_uc_addr_common);
8873 	} else {
8874 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8875 					    hclge_rm_mc_addr_common);
8876 		hclge_sync_vport_mac_list(vport, &tmp_add_list,
8877 					  hclge_add_mc_addr_common);
8878 	}
8879 
8880 	/* if some mac addresses were added/deleted fail, move back to the
8881 	 * mac_list, and retry at next time.
8882 	 */
8883 	spin_lock_bh(&vport->mac_list_lock);
8884 
8885 	hclge_sync_from_del_list(&tmp_del_list, list);
8886 	all_added = hclge_sync_from_add_list(&tmp_add_list, list);
8887 
8888 	spin_unlock_bh(&vport->mac_list_lock);
8889 
8890 	hclge_update_overflow_flags(vport, mac_type, all_added);
8891 }
8892 
8893 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
8894 {
8895 	struct hclge_dev *hdev = vport->back;
8896 
8897 	if (test_bit(vport->vport_id, hdev->vport_config_block))
8898 		return false;
8899 
8900 	if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
8901 		return true;
8902 
8903 	return false;
8904 }
8905 
8906 static void hclge_sync_mac_table(struct hclge_dev *hdev)
8907 {
8908 	int i;
8909 
8910 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8911 		struct hclge_vport *vport = &hdev->vport[i];
8912 
8913 		if (!hclge_need_sync_mac_table(vport))
8914 			continue;
8915 
8916 		hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
8917 		hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
8918 	}
8919 }
8920 
8921 static void hclge_build_del_list(struct list_head *list,
8922 				 bool is_del_list,
8923 				 struct list_head *tmp_del_list)
8924 {
8925 	struct hclge_mac_node *mac_cfg, *tmp;
8926 
8927 	list_for_each_entry_safe(mac_cfg, tmp, list, node) {
8928 		switch (mac_cfg->state) {
8929 		case HCLGE_MAC_TO_DEL:
8930 		case HCLGE_MAC_ACTIVE:
8931 			list_del(&mac_cfg->node);
8932 			list_add_tail(&mac_cfg->node, tmp_del_list);
8933 			break;
8934 		case HCLGE_MAC_TO_ADD:
8935 			if (is_del_list) {
8936 				list_del(&mac_cfg->node);
8937 				kfree(mac_cfg);
8938 			}
8939 			break;
8940 		}
8941 	}
8942 }
8943 
8944 static void hclge_unsync_del_list(struct hclge_vport *vport,
8945 				  int (*unsync)(struct hclge_vport *vport,
8946 						const unsigned char *addr),
8947 				  bool is_del_list,
8948 				  struct list_head *tmp_del_list)
8949 {
8950 	struct hclge_mac_node *mac_cfg, *tmp;
8951 	int ret;
8952 
8953 	list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) {
8954 		ret = unsync(vport, mac_cfg->mac_addr);
8955 		if (!ret || ret == -ENOENT) {
8956 			/* clear all mac addr from hardware, but remain these
8957 			 * mac addr in the mac list, and restore them after
8958 			 * vf reset finished.
8959 			 */
8960 			if (!is_del_list &&
8961 			    mac_cfg->state == HCLGE_MAC_ACTIVE) {
8962 				mac_cfg->state = HCLGE_MAC_TO_ADD;
8963 			} else {
8964 				list_del(&mac_cfg->node);
8965 				kfree(mac_cfg);
8966 			}
8967 		} else if (is_del_list) {
8968 			mac_cfg->state = HCLGE_MAC_TO_DEL;
8969 		}
8970 	}
8971 }
8972 
8973 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
8974 				  enum HCLGE_MAC_ADDR_TYPE mac_type)
8975 {
8976 	int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
8977 	struct hclge_dev *hdev = vport->back;
8978 	struct list_head tmp_del_list, *list;
8979 
8980 	if (mac_type == HCLGE_MAC_ADDR_UC) {
8981 		list = &vport->uc_mac_list;
8982 		unsync = hclge_rm_uc_addr_common;
8983 	} else {
8984 		list = &vport->mc_mac_list;
8985 		unsync = hclge_rm_mc_addr_common;
8986 	}
8987 
8988 	INIT_LIST_HEAD(&tmp_del_list);
8989 
8990 	if (!is_del_list)
8991 		set_bit(vport->vport_id, hdev->vport_config_block);
8992 
8993 	spin_lock_bh(&vport->mac_list_lock);
8994 
8995 	hclge_build_del_list(list, is_del_list, &tmp_del_list);
8996 
8997 	spin_unlock_bh(&vport->mac_list_lock);
8998 
8999 	hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list);
9000 
9001 	spin_lock_bh(&vport->mac_list_lock);
9002 
9003 	hclge_sync_from_del_list(&tmp_del_list, list);
9004 
9005 	spin_unlock_bh(&vport->mac_list_lock);
9006 }
9007 
9008 /* remove all mac address when uninitailize */
9009 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
9010 					enum HCLGE_MAC_ADDR_TYPE mac_type)
9011 {
9012 	struct hclge_mac_node *mac_node, *tmp;
9013 	struct hclge_dev *hdev = vport->back;
9014 	struct list_head tmp_del_list, *list;
9015 
9016 	INIT_LIST_HEAD(&tmp_del_list);
9017 
9018 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
9019 		&vport->uc_mac_list : &vport->mc_mac_list;
9020 
9021 	spin_lock_bh(&vport->mac_list_lock);
9022 
9023 	list_for_each_entry_safe(mac_node, tmp, list, node) {
9024 		switch (mac_node->state) {
9025 		case HCLGE_MAC_TO_DEL:
9026 		case HCLGE_MAC_ACTIVE:
9027 			list_del(&mac_node->node);
9028 			list_add_tail(&mac_node->node, &tmp_del_list);
9029 			break;
9030 		case HCLGE_MAC_TO_ADD:
9031 			list_del(&mac_node->node);
9032 			kfree(mac_node);
9033 			break;
9034 		}
9035 	}
9036 
9037 	spin_unlock_bh(&vport->mac_list_lock);
9038 
9039 	if (mac_type == HCLGE_MAC_ADDR_UC)
9040 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9041 					    hclge_rm_uc_addr_common);
9042 	else
9043 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9044 					    hclge_rm_mc_addr_common);
9045 
9046 	if (!list_empty(&tmp_del_list))
9047 		dev_warn(&hdev->pdev->dev,
9048 			 "uninit %s mac list for vport %u not completely.\n",
9049 			 mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
9050 			 vport->vport_id);
9051 
9052 	list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
9053 		list_del(&mac_node->node);
9054 		kfree(mac_node);
9055 	}
9056 }
9057 
9058 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
9059 {
9060 	struct hclge_vport *vport;
9061 	int i;
9062 
9063 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9064 		vport = &hdev->vport[i];
9065 		hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
9066 		hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
9067 	}
9068 }
9069 
9070 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
9071 					      u16 cmdq_resp, u8 resp_code)
9072 {
9073 #define HCLGE_ETHERTYPE_SUCCESS_ADD		0
9074 #define HCLGE_ETHERTYPE_ALREADY_ADD		1
9075 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW	2
9076 #define HCLGE_ETHERTYPE_KEY_CONFLICT		3
9077 
9078 	int return_status;
9079 
9080 	if (cmdq_resp) {
9081 		dev_err(&hdev->pdev->dev,
9082 			"cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
9083 			cmdq_resp);
9084 		return -EIO;
9085 	}
9086 
9087 	switch (resp_code) {
9088 	case HCLGE_ETHERTYPE_SUCCESS_ADD:
9089 	case HCLGE_ETHERTYPE_ALREADY_ADD:
9090 		return_status = 0;
9091 		break;
9092 	case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
9093 		dev_err(&hdev->pdev->dev,
9094 			"add mac ethertype failed for manager table overflow.\n");
9095 		return_status = -EIO;
9096 		break;
9097 	case HCLGE_ETHERTYPE_KEY_CONFLICT:
9098 		dev_err(&hdev->pdev->dev,
9099 			"add mac ethertype failed for key conflict.\n");
9100 		return_status = -EIO;
9101 		break;
9102 	default:
9103 		dev_err(&hdev->pdev->dev,
9104 			"add mac ethertype failed for undefined, code=%u.\n",
9105 			resp_code);
9106 		return_status = -EIO;
9107 	}
9108 
9109 	return return_status;
9110 }
9111 
9112 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
9113 				     u8 *mac_addr)
9114 {
9115 	struct hclge_mac_vlan_tbl_entry_cmd req;
9116 	struct hclge_dev *hdev = vport->back;
9117 	struct hclge_desc desc;
9118 	u16 egress_port = 0;
9119 	int i;
9120 
9121 	if (is_zero_ether_addr(mac_addr))
9122 		return false;
9123 
9124 	memset(&req, 0, sizeof(req));
9125 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
9126 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
9127 	req.egress_port = cpu_to_le16(egress_port);
9128 	hclge_prepare_mac_addr(&req, mac_addr, false);
9129 
9130 	if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
9131 		return true;
9132 
9133 	vf_idx += HCLGE_VF_VPORT_START_NUM;
9134 	for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++)
9135 		if (i != vf_idx &&
9136 		    ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
9137 			return true;
9138 
9139 	return false;
9140 }
9141 
9142 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
9143 			    u8 *mac_addr)
9144 {
9145 	struct hclge_vport *vport = hclge_get_vport(handle);
9146 	struct hclge_dev *hdev = vport->back;
9147 
9148 	vport = hclge_get_vf_vport(hdev, vf);
9149 	if (!vport)
9150 		return -EINVAL;
9151 
9152 	if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
9153 		dev_info(&hdev->pdev->dev,
9154 			 "Specified MAC(=%pM) is same as before, no change committed!\n",
9155 			 mac_addr);
9156 		return 0;
9157 	}
9158 
9159 	if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
9160 		dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
9161 			mac_addr);
9162 		return -EEXIST;
9163 	}
9164 
9165 	ether_addr_copy(vport->vf_info.mac, mac_addr);
9166 
9167 	if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
9168 		dev_info(&hdev->pdev->dev,
9169 			 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
9170 			 vf, mac_addr);
9171 		return hclge_inform_reset_assert_to_vf(vport);
9172 	}
9173 
9174 	dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
9175 		 vf, mac_addr);
9176 	return 0;
9177 }
9178 
9179 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
9180 			     const struct hclge_mac_mgr_tbl_entry_cmd *req)
9181 {
9182 	struct hclge_desc desc;
9183 	u8 resp_code;
9184 	u16 retval;
9185 	int ret;
9186 
9187 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
9188 	memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
9189 
9190 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9191 	if (ret) {
9192 		dev_err(&hdev->pdev->dev,
9193 			"add mac ethertype failed for cmd_send, ret =%d.\n",
9194 			ret);
9195 		return ret;
9196 	}
9197 
9198 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
9199 	retval = le16_to_cpu(desc.retval);
9200 
9201 	return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
9202 }
9203 
9204 static int init_mgr_tbl(struct hclge_dev *hdev)
9205 {
9206 	int ret;
9207 	int i;
9208 
9209 	for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
9210 		ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
9211 		if (ret) {
9212 			dev_err(&hdev->pdev->dev,
9213 				"add mac ethertype failed, ret =%d.\n",
9214 				ret);
9215 			return ret;
9216 		}
9217 	}
9218 
9219 	return 0;
9220 }
9221 
9222 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
9223 {
9224 	struct hclge_vport *vport = hclge_get_vport(handle);
9225 	struct hclge_dev *hdev = vport->back;
9226 
9227 	ether_addr_copy(p, hdev->hw.mac.mac_addr);
9228 }
9229 
9230 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
9231 				       const u8 *old_addr, const u8 *new_addr)
9232 {
9233 	struct list_head *list = &vport->uc_mac_list;
9234 	struct hclge_mac_node *old_node, *new_node;
9235 
9236 	new_node = hclge_find_mac_node(list, new_addr);
9237 	if (!new_node) {
9238 		new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
9239 		if (!new_node)
9240 			return -ENOMEM;
9241 
9242 		new_node->state = HCLGE_MAC_TO_ADD;
9243 		ether_addr_copy(new_node->mac_addr, new_addr);
9244 		list_add(&new_node->node, list);
9245 	} else {
9246 		if (new_node->state == HCLGE_MAC_TO_DEL)
9247 			new_node->state = HCLGE_MAC_ACTIVE;
9248 
9249 		/* make sure the new addr is in the list head, avoid dev
9250 		 * addr may be not re-added into mac table for the umv space
9251 		 * limitation after global/imp reset which will clear mac
9252 		 * table by hardware.
9253 		 */
9254 		list_move(&new_node->node, list);
9255 	}
9256 
9257 	if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
9258 		old_node = hclge_find_mac_node(list, old_addr);
9259 		if (old_node) {
9260 			if (old_node->state == HCLGE_MAC_TO_ADD) {
9261 				list_del(&old_node->node);
9262 				kfree(old_node);
9263 			} else {
9264 				old_node->state = HCLGE_MAC_TO_DEL;
9265 			}
9266 		}
9267 	}
9268 
9269 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
9270 
9271 	return 0;
9272 }
9273 
9274 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
9275 			      bool is_first)
9276 {
9277 	const unsigned char *new_addr = (const unsigned char *)p;
9278 	struct hclge_vport *vport = hclge_get_vport(handle);
9279 	struct hclge_dev *hdev = vport->back;
9280 	unsigned char *old_addr = NULL;
9281 	int ret;
9282 
9283 	/* mac addr check */
9284 	if (is_zero_ether_addr(new_addr) ||
9285 	    is_broadcast_ether_addr(new_addr) ||
9286 	    is_multicast_ether_addr(new_addr)) {
9287 		dev_err(&hdev->pdev->dev,
9288 			"change uc mac err! invalid mac: %pM.\n",
9289 			 new_addr);
9290 		return -EINVAL;
9291 	}
9292 
9293 	ret = hclge_pause_addr_cfg(hdev, new_addr);
9294 	if (ret) {
9295 		dev_err(&hdev->pdev->dev,
9296 			"failed to configure mac pause address, ret = %d\n",
9297 			ret);
9298 		return ret;
9299 	}
9300 
9301 	if (!is_first)
9302 		old_addr = hdev->hw.mac.mac_addr;
9303 
9304 	spin_lock_bh(&vport->mac_list_lock);
9305 	ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
9306 	if (ret) {
9307 		dev_err(&hdev->pdev->dev,
9308 			"failed to change the mac addr:%pM, ret = %d\n",
9309 			new_addr, ret);
9310 		spin_unlock_bh(&vport->mac_list_lock);
9311 
9312 		if (!is_first)
9313 			hclge_pause_addr_cfg(hdev, old_addr);
9314 
9315 		return ret;
9316 	}
9317 	/* we must update dev addr with spin lock protect, preventing dev addr
9318 	 * being removed by set_rx_mode path.
9319 	 */
9320 	ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
9321 	spin_unlock_bh(&vport->mac_list_lock);
9322 
9323 	hclge_task_schedule(hdev, 0);
9324 
9325 	return 0;
9326 }
9327 
9328 static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd)
9329 {
9330 	struct mii_ioctl_data *data = if_mii(ifr);
9331 
9332 	if (!hnae3_dev_phy_imp_supported(hdev))
9333 		return -EOPNOTSUPP;
9334 
9335 	switch (cmd) {
9336 	case SIOCGMIIPHY:
9337 		data->phy_id = hdev->hw.mac.phy_addr;
9338 		/* this command reads phy id and register at the same time */
9339 		fallthrough;
9340 	case SIOCGMIIREG:
9341 		data->val_out = hclge_read_phy_reg(hdev, data->reg_num);
9342 		return 0;
9343 
9344 	case SIOCSMIIREG:
9345 		return hclge_write_phy_reg(hdev, data->reg_num, data->val_in);
9346 	default:
9347 		return -EOPNOTSUPP;
9348 	}
9349 }
9350 
9351 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
9352 			  int cmd)
9353 {
9354 	struct hclge_vport *vport = hclge_get_vport(handle);
9355 	struct hclge_dev *hdev = vport->back;
9356 
9357 	if (!hdev->hw.mac.phydev)
9358 		return hclge_mii_ioctl(hdev, ifr, cmd);
9359 
9360 	return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
9361 }
9362 
9363 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
9364 				      u8 fe_type, bool filter_en, u8 vf_id)
9365 {
9366 	struct hclge_vlan_filter_ctrl_cmd *req;
9367 	struct hclge_desc desc;
9368 	int ret;
9369 
9370 	/* read current vlan filter parameter */
9371 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
9372 	req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
9373 	req->vlan_type = vlan_type;
9374 	req->vf_id = vf_id;
9375 
9376 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9377 	if (ret) {
9378 		dev_err(&hdev->pdev->dev,
9379 			"failed to get vlan filter config, ret = %d.\n", ret);
9380 		return ret;
9381 	}
9382 
9383 	/* modify and write new config parameter */
9384 	hclge_cmd_reuse_desc(&desc, false);
9385 	req->vlan_fe = filter_en ?
9386 			(req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
9387 
9388 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9389 	if (ret)
9390 		dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
9391 			ret);
9392 
9393 	return ret;
9394 }
9395 
9396 #define HCLGE_FILTER_TYPE_VF		0
9397 #define HCLGE_FILTER_TYPE_PORT		1
9398 #define HCLGE_FILTER_FE_EGRESS_V1_B	BIT(0)
9399 #define HCLGE_FILTER_FE_NIC_INGRESS_B	BIT(0)
9400 #define HCLGE_FILTER_FE_NIC_EGRESS_B	BIT(1)
9401 #define HCLGE_FILTER_FE_ROCE_INGRESS_B	BIT(2)
9402 #define HCLGE_FILTER_FE_ROCE_EGRESS_B	BIT(3)
9403 #define HCLGE_FILTER_FE_EGRESS		(HCLGE_FILTER_FE_NIC_EGRESS_B \
9404 					| HCLGE_FILTER_FE_ROCE_EGRESS_B)
9405 #define HCLGE_FILTER_FE_INGRESS		(HCLGE_FILTER_FE_NIC_INGRESS_B \
9406 					| HCLGE_FILTER_FE_ROCE_INGRESS_B)
9407 
9408 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
9409 {
9410 	struct hclge_vport *vport = hclge_get_vport(handle);
9411 	struct hclge_dev *hdev = vport->back;
9412 
9413 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
9414 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9415 					   HCLGE_FILTER_FE_EGRESS, enable, 0);
9416 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9417 					   HCLGE_FILTER_FE_INGRESS, enable, 0);
9418 	} else {
9419 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9420 					   HCLGE_FILTER_FE_EGRESS_V1_B, enable,
9421 					   0);
9422 	}
9423 	if (enable)
9424 		handle->netdev_flags |= HNAE3_VLAN_FLTR;
9425 	else
9426 		handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
9427 }
9428 
9429 static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
9430 					bool is_kill, u16 vlan,
9431 					struct hclge_desc *desc)
9432 {
9433 	struct hclge_vlan_filter_vf_cfg_cmd *req0;
9434 	struct hclge_vlan_filter_vf_cfg_cmd *req1;
9435 	u8 vf_byte_val;
9436 	u8 vf_byte_off;
9437 	int ret;
9438 
9439 	hclge_cmd_setup_basic_desc(&desc[0],
9440 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9441 	hclge_cmd_setup_basic_desc(&desc[1],
9442 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9443 
9444 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9445 
9446 	vf_byte_off = vfid / 8;
9447 	vf_byte_val = 1 << (vfid % 8);
9448 
9449 	req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9450 	req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
9451 
9452 	req0->vlan_id  = cpu_to_le16(vlan);
9453 	req0->vlan_cfg = is_kill;
9454 
9455 	if (vf_byte_off < HCLGE_MAX_VF_BYTES)
9456 		req0->vf_bitmap[vf_byte_off] = vf_byte_val;
9457 	else
9458 		req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
9459 
9460 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
9461 	if (ret) {
9462 		dev_err(&hdev->pdev->dev,
9463 			"Send vf vlan command fail, ret =%d.\n",
9464 			ret);
9465 		return ret;
9466 	}
9467 
9468 	return 0;
9469 }
9470 
9471 static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid,
9472 					  bool is_kill, struct hclge_desc *desc)
9473 {
9474 	struct hclge_vlan_filter_vf_cfg_cmd *req;
9475 
9476 	req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9477 
9478 	if (!is_kill) {
9479 #define HCLGE_VF_VLAN_NO_ENTRY	2
9480 		if (!req->resp_code || req->resp_code == 1)
9481 			return 0;
9482 
9483 		if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
9484 			set_bit(vfid, hdev->vf_vlan_full);
9485 			dev_warn(&hdev->pdev->dev,
9486 				 "vf vlan table is full, vf vlan filter is disabled\n");
9487 			return 0;
9488 		}
9489 
9490 		dev_err(&hdev->pdev->dev,
9491 			"Add vf vlan filter fail, ret =%u.\n",
9492 			req->resp_code);
9493 	} else {
9494 #define HCLGE_VF_VLAN_DEL_NO_FOUND	1
9495 		if (!req->resp_code)
9496 			return 0;
9497 
9498 		/* vf vlan filter is disabled when vf vlan table is full,
9499 		 * then new vlan id will not be added into vf vlan table.
9500 		 * Just return 0 without warning, avoid massive verbose
9501 		 * print logs when unload.
9502 		 */
9503 		if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
9504 			return 0;
9505 
9506 		dev_err(&hdev->pdev->dev,
9507 			"Kill vf vlan filter fail, ret =%u.\n",
9508 			req->resp_code);
9509 	}
9510 
9511 	return -EIO;
9512 }
9513 
9514 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
9515 				    bool is_kill, u16 vlan)
9516 {
9517 	struct hclge_vport *vport = &hdev->vport[vfid];
9518 	struct hclge_desc desc[2];
9519 	int ret;
9520 
9521 	/* if vf vlan table is full, firmware will close vf vlan filter, it
9522 	 * is unable and unnecessary to add new vlan id to vf vlan filter.
9523 	 * If spoof check is enable, and vf vlan is full, it shouldn't add
9524 	 * new vlan, because tx packets with these vlan id will be dropped.
9525 	 */
9526 	if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
9527 		if (vport->vf_info.spoofchk && vlan) {
9528 			dev_err(&hdev->pdev->dev,
9529 				"Can't add vlan due to spoof check is on and vf vlan table is full\n");
9530 			return -EPERM;
9531 		}
9532 		return 0;
9533 	}
9534 
9535 	ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc);
9536 	if (ret)
9537 		return ret;
9538 
9539 	return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc);
9540 }
9541 
9542 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
9543 				      u16 vlan_id, bool is_kill)
9544 {
9545 	struct hclge_vlan_filter_pf_cfg_cmd *req;
9546 	struct hclge_desc desc;
9547 	u8 vlan_offset_byte_val;
9548 	u8 vlan_offset_byte;
9549 	u8 vlan_offset_160;
9550 	int ret;
9551 
9552 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
9553 
9554 	vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
9555 	vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
9556 			   HCLGE_VLAN_BYTE_SIZE;
9557 	vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
9558 
9559 	req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
9560 	req->vlan_offset = vlan_offset_160;
9561 	req->vlan_cfg = is_kill;
9562 	req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
9563 
9564 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9565 	if (ret)
9566 		dev_err(&hdev->pdev->dev,
9567 			"port vlan command, send fail, ret =%d.\n", ret);
9568 	return ret;
9569 }
9570 
9571 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
9572 				    u16 vport_id, u16 vlan_id,
9573 				    bool is_kill)
9574 {
9575 	u16 vport_idx, vport_num = 0;
9576 	int ret;
9577 
9578 	if (is_kill && !vlan_id)
9579 		return 0;
9580 
9581 	ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id);
9582 	if (ret) {
9583 		dev_err(&hdev->pdev->dev,
9584 			"Set %u vport vlan filter config fail, ret =%d.\n",
9585 			vport_id, ret);
9586 		return ret;
9587 	}
9588 
9589 	/* vlan 0 may be added twice when 8021q module is enabled */
9590 	if (!is_kill && !vlan_id &&
9591 	    test_bit(vport_id, hdev->vlan_table[vlan_id]))
9592 		return 0;
9593 
9594 	if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
9595 		dev_err(&hdev->pdev->dev,
9596 			"Add port vlan failed, vport %u is already in vlan %u\n",
9597 			vport_id, vlan_id);
9598 		return -EINVAL;
9599 	}
9600 
9601 	if (is_kill &&
9602 	    !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
9603 		dev_err(&hdev->pdev->dev,
9604 			"Delete port vlan failed, vport %u is not in vlan %u\n",
9605 			vport_id, vlan_id);
9606 		return -EINVAL;
9607 	}
9608 
9609 	for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
9610 		vport_num++;
9611 
9612 	if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
9613 		ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
9614 						 is_kill);
9615 
9616 	return ret;
9617 }
9618 
9619 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
9620 {
9621 	struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
9622 	struct hclge_vport_vtag_tx_cfg_cmd *req;
9623 	struct hclge_dev *hdev = vport->back;
9624 	struct hclge_desc desc;
9625 	u16 bmap_index;
9626 	int status;
9627 
9628 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
9629 
9630 	req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
9631 	req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
9632 	req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
9633 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
9634 		      vcfg->accept_tag1 ? 1 : 0);
9635 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
9636 		      vcfg->accept_untag1 ? 1 : 0);
9637 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
9638 		      vcfg->accept_tag2 ? 1 : 0);
9639 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
9640 		      vcfg->accept_untag2 ? 1 : 0);
9641 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
9642 		      vcfg->insert_tag1_en ? 1 : 0);
9643 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
9644 		      vcfg->insert_tag2_en ? 1 : 0);
9645 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B,
9646 		      vcfg->tag_shift_mode_en ? 1 : 0);
9647 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
9648 
9649 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9650 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9651 			HCLGE_VF_NUM_PER_BYTE;
9652 	req->vf_bitmap[bmap_index] =
9653 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9654 
9655 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
9656 	if (status)
9657 		dev_err(&hdev->pdev->dev,
9658 			"Send port txvlan cfg command fail, ret =%d\n",
9659 			status);
9660 
9661 	return status;
9662 }
9663 
9664 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
9665 {
9666 	struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
9667 	struct hclge_vport_vtag_rx_cfg_cmd *req;
9668 	struct hclge_dev *hdev = vport->back;
9669 	struct hclge_desc desc;
9670 	u16 bmap_index;
9671 	int status;
9672 
9673 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
9674 
9675 	req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
9676 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
9677 		      vcfg->strip_tag1_en ? 1 : 0);
9678 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
9679 		      vcfg->strip_tag2_en ? 1 : 0);
9680 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
9681 		      vcfg->vlan1_vlan_prionly ? 1 : 0);
9682 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
9683 		      vcfg->vlan2_vlan_prionly ? 1 : 0);
9684 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B,
9685 		      vcfg->strip_tag1_discard_en ? 1 : 0);
9686 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B,
9687 		      vcfg->strip_tag2_discard_en ? 1 : 0);
9688 
9689 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9690 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9691 			HCLGE_VF_NUM_PER_BYTE;
9692 	req->vf_bitmap[bmap_index] =
9693 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9694 
9695 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
9696 	if (status)
9697 		dev_err(&hdev->pdev->dev,
9698 			"Send port rxvlan cfg command fail, ret =%d\n",
9699 			status);
9700 
9701 	return status;
9702 }
9703 
9704 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
9705 				  u16 port_base_vlan_state,
9706 				  u16 vlan_tag)
9707 {
9708 	int ret;
9709 
9710 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9711 		vport->txvlan_cfg.accept_tag1 = true;
9712 		vport->txvlan_cfg.insert_tag1_en = false;
9713 		vport->txvlan_cfg.default_tag1 = 0;
9714 	} else {
9715 		struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev);
9716 
9717 		vport->txvlan_cfg.accept_tag1 =
9718 			ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3;
9719 		vport->txvlan_cfg.insert_tag1_en = true;
9720 		vport->txvlan_cfg.default_tag1 = vlan_tag;
9721 	}
9722 
9723 	vport->txvlan_cfg.accept_untag1 = true;
9724 
9725 	/* accept_tag2 and accept_untag2 are not supported on
9726 	 * pdev revision(0x20), new revision support them,
9727 	 * this two fields can not be configured by user.
9728 	 */
9729 	vport->txvlan_cfg.accept_tag2 = true;
9730 	vport->txvlan_cfg.accept_untag2 = true;
9731 	vport->txvlan_cfg.insert_tag2_en = false;
9732 	vport->txvlan_cfg.default_tag2 = 0;
9733 	vport->txvlan_cfg.tag_shift_mode_en = true;
9734 
9735 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9736 		vport->rxvlan_cfg.strip_tag1_en = false;
9737 		vport->rxvlan_cfg.strip_tag2_en =
9738 				vport->rxvlan_cfg.rx_vlan_offload_en;
9739 		vport->rxvlan_cfg.strip_tag2_discard_en = false;
9740 	} else {
9741 		vport->rxvlan_cfg.strip_tag1_en =
9742 				vport->rxvlan_cfg.rx_vlan_offload_en;
9743 		vport->rxvlan_cfg.strip_tag2_en = true;
9744 		vport->rxvlan_cfg.strip_tag2_discard_en = true;
9745 	}
9746 
9747 	vport->rxvlan_cfg.strip_tag1_discard_en = false;
9748 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9749 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9750 
9751 	ret = hclge_set_vlan_tx_offload_cfg(vport);
9752 	if (ret)
9753 		return ret;
9754 
9755 	return hclge_set_vlan_rx_offload_cfg(vport);
9756 }
9757 
9758 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
9759 {
9760 	struct hclge_rx_vlan_type_cfg_cmd *rx_req;
9761 	struct hclge_tx_vlan_type_cfg_cmd *tx_req;
9762 	struct hclge_desc desc;
9763 	int status;
9764 
9765 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
9766 	rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
9767 	rx_req->ot_fst_vlan_type =
9768 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
9769 	rx_req->ot_sec_vlan_type =
9770 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
9771 	rx_req->in_fst_vlan_type =
9772 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
9773 	rx_req->in_sec_vlan_type =
9774 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
9775 
9776 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
9777 	if (status) {
9778 		dev_err(&hdev->pdev->dev,
9779 			"Send rxvlan protocol type command fail, ret =%d\n",
9780 			status);
9781 		return status;
9782 	}
9783 
9784 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
9785 
9786 	tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
9787 	tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
9788 	tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
9789 
9790 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
9791 	if (status)
9792 		dev_err(&hdev->pdev->dev,
9793 			"Send txvlan protocol type command fail, ret =%d\n",
9794 			status);
9795 
9796 	return status;
9797 }
9798 
9799 static int hclge_init_vlan_config(struct hclge_dev *hdev)
9800 {
9801 #define HCLGE_DEF_VLAN_TYPE		0x8100
9802 
9803 	struct hnae3_handle *handle = &hdev->vport[0].nic;
9804 	struct hclge_vport *vport;
9805 	int ret;
9806 	int i;
9807 
9808 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
9809 		/* for revision 0x21, vf vlan filter is per function */
9810 		for (i = 0; i < hdev->num_alloc_vport; i++) {
9811 			vport = &hdev->vport[i];
9812 			ret = hclge_set_vlan_filter_ctrl(hdev,
9813 							 HCLGE_FILTER_TYPE_VF,
9814 							 HCLGE_FILTER_FE_EGRESS,
9815 							 true,
9816 							 vport->vport_id);
9817 			if (ret)
9818 				return ret;
9819 		}
9820 
9821 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9822 						 HCLGE_FILTER_FE_INGRESS, true,
9823 						 0);
9824 		if (ret)
9825 			return ret;
9826 	} else {
9827 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9828 						 HCLGE_FILTER_FE_EGRESS_V1_B,
9829 						 true, 0);
9830 		if (ret)
9831 			return ret;
9832 	}
9833 
9834 	handle->netdev_flags |= HNAE3_VLAN_FLTR;
9835 
9836 	hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
9837 	hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
9838 	hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
9839 	hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
9840 	hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
9841 	hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
9842 
9843 	ret = hclge_set_vlan_protocol_type(hdev);
9844 	if (ret)
9845 		return ret;
9846 
9847 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9848 		u16 vlan_tag;
9849 
9850 		vport = &hdev->vport[i];
9851 		vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
9852 
9853 		ret = hclge_vlan_offload_cfg(vport,
9854 					     vport->port_base_vlan_cfg.state,
9855 					     vlan_tag);
9856 		if (ret)
9857 			return ret;
9858 	}
9859 
9860 	return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
9861 }
9862 
9863 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
9864 				       bool writen_to_tbl)
9865 {
9866 	struct hclge_vport_vlan_cfg *vlan;
9867 
9868 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
9869 	if (!vlan)
9870 		return;
9871 
9872 	vlan->hd_tbl_status = writen_to_tbl;
9873 	vlan->vlan_id = vlan_id;
9874 
9875 	list_add_tail(&vlan->node, &vport->vlan_list);
9876 }
9877 
9878 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
9879 {
9880 	struct hclge_vport_vlan_cfg *vlan, *tmp;
9881 	struct hclge_dev *hdev = vport->back;
9882 	int ret;
9883 
9884 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9885 		if (!vlan->hd_tbl_status) {
9886 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9887 						       vport->vport_id,
9888 						       vlan->vlan_id, false);
9889 			if (ret) {
9890 				dev_err(&hdev->pdev->dev,
9891 					"restore vport vlan list failed, ret=%d\n",
9892 					ret);
9893 				return ret;
9894 			}
9895 		}
9896 		vlan->hd_tbl_status = true;
9897 	}
9898 
9899 	return 0;
9900 }
9901 
9902 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
9903 				      bool is_write_tbl)
9904 {
9905 	struct hclge_vport_vlan_cfg *vlan, *tmp;
9906 	struct hclge_dev *hdev = vport->back;
9907 
9908 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9909 		if (vlan->vlan_id == vlan_id) {
9910 			if (is_write_tbl && vlan->hd_tbl_status)
9911 				hclge_set_vlan_filter_hw(hdev,
9912 							 htons(ETH_P_8021Q),
9913 							 vport->vport_id,
9914 							 vlan_id,
9915 							 true);
9916 
9917 			list_del(&vlan->node);
9918 			kfree(vlan);
9919 			break;
9920 		}
9921 	}
9922 }
9923 
9924 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
9925 {
9926 	struct hclge_vport_vlan_cfg *vlan, *tmp;
9927 	struct hclge_dev *hdev = vport->back;
9928 
9929 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9930 		if (vlan->hd_tbl_status)
9931 			hclge_set_vlan_filter_hw(hdev,
9932 						 htons(ETH_P_8021Q),
9933 						 vport->vport_id,
9934 						 vlan->vlan_id,
9935 						 true);
9936 
9937 		vlan->hd_tbl_status = false;
9938 		if (is_del_list) {
9939 			list_del(&vlan->node);
9940 			kfree(vlan);
9941 		}
9942 	}
9943 	clear_bit(vport->vport_id, hdev->vf_vlan_full);
9944 }
9945 
9946 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
9947 {
9948 	struct hclge_vport_vlan_cfg *vlan, *tmp;
9949 	struct hclge_vport *vport;
9950 	int i;
9951 
9952 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9953 		vport = &hdev->vport[i];
9954 		list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9955 			list_del(&vlan->node);
9956 			kfree(vlan);
9957 		}
9958 	}
9959 }
9960 
9961 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
9962 {
9963 	struct hclge_vport_vlan_cfg *vlan, *tmp;
9964 	struct hclge_dev *hdev = vport->back;
9965 	u16 vlan_proto;
9966 	u16 vlan_id;
9967 	u16 state;
9968 	int ret;
9969 
9970 	vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
9971 	vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
9972 	state = vport->port_base_vlan_cfg.state;
9973 
9974 	if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
9975 		clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
9976 		hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
9977 					 vport->vport_id, vlan_id,
9978 					 false);
9979 		return;
9980 	}
9981 
9982 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9983 		ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9984 					       vport->vport_id,
9985 					       vlan->vlan_id, false);
9986 		if (ret)
9987 			break;
9988 		vlan->hd_tbl_status = true;
9989 	}
9990 }
9991 
9992 /* For global reset and imp reset, hardware will clear the mac table,
9993  * so we change the mac address state from ACTIVE to TO_ADD, then they
9994  * can be restored in the service task after reset complete. Furtherly,
9995  * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
9996  * be restored after reset, so just remove these mac nodes from mac_list.
9997  */
9998 static void hclge_mac_node_convert_for_reset(struct list_head *list)
9999 {
10000 	struct hclge_mac_node *mac_node, *tmp;
10001 
10002 	list_for_each_entry_safe(mac_node, tmp, list, node) {
10003 		if (mac_node->state == HCLGE_MAC_ACTIVE) {
10004 			mac_node->state = HCLGE_MAC_TO_ADD;
10005 		} else if (mac_node->state == HCLGE_MAC_TO_DEL) {
10006 			list_del(&mac_node->node);
10007 			kfree(mac_node);
10008 		}
10009 	}
10010 }
10011 
10012 void hclge_restore_mac_table_common(struct hclge_vport *vport)
10013 {
10014 	spin_lock_bh(&vport->mac_list_lock);
10015 
10016 	hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
10017 	hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
10018 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
10019 
10020 	spin_unlock_bh(&vport->mac_list_lock);
10021 }
10022 
10023 static void hclge_restore_hw_table(struct hclge_dev *hdev)
10024 {
10025 	struct hclge_vport *vport = &hdev->vport[0];
10026 	struct hnae3_handle *handle = &vport->nic;
10027 
10028 	hclge_restore_mac_table_common(vport);
10029 	hclge_restore_vport_vlan_table(vport);
10030 	set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
10031 	set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
10032 	hclge_restore_fd_entries(handle);
10033 }
10034 
10035 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
10036 {
10037 	struct hclge_vport *vport = hclge_get_vport(handle);
10038 
10039 	if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10040 		vport->rxvlan_cfg.strip_tag1_en = false;
10041 		vport->rxvlan_cfg.strip_tag2_en = enable;
10042 		vport->rxvlan_cfg.strip_tag2_discard_en = false;
10043 	} else {
10044 		vport->rxvlan_cfg.strip_tag1_en = enable;
10045 		vport->rxvlan_cfg.strip_tag2_en = true;
10046 		vport->rxvlan_cfg.strip_tag2_discard_en = true;
10047 	}
10048 
10049 	vport->rxvlan_cfg.strip_tag1_discard_en = false;
10050 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
10051 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
10052 	vport->rxvlan_cfg.rx_vlan_offload_en = enable;
10053 
10054 	return hclge_set_vlan_rx_offload_cfg(vport);
10055 }
10056 
10057 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
10058 					    u16 port_base_vlan_state,
10059 					    struct hclge_vlan_info *new_info,
10060 					    struct hclge_vlan_info *old_info)
10061 {
10062 	struct hclge_dev *hdev = vport->back;
10063 	int ret;
10064 
10065 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
10066 		hclge_rm_vport_all_vlan_table(vport, false);
10067 		return hclge_set_vlan_filter_hw(hdev,
10068 						 htons(new_info->vlan_proto),
10069 						 vport->vport_id,
10070 						 new_info->vlan_tag,
10071 						 false);
10072 	}
10073 
10074 	ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
10075 				       vport->vport_id, old_info->vlan_tag,
10076 				       true);
10077 	if (ret)
10078 		return ret;
10079 
10080 	return hclge_add_vport_all_vlan_table(vport);
10081 }
10082 
10083 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
10084 				    struct hclge_vlan_info *vlan_info)
10085 {
10086 	struct hnae3_handle *nic = &vport->nic;
10087 	struct hclge_vlan_info *old_vlan_info;
10088 	struct hclge_dev *hdev = vport->back;
10089 	int ret;
10090 
10091 	old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10092 
10093 	ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
10094 	if (ret)
10095 		return ret;
10096 
10097 	if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
10098 		/* add new VLAN tag */
10099 		ret = hclge_set_vlan_filter_hw(hdev,
10100 					       htons(vlan_info->vlan_proto),
10101 					       vport->vport_id,
10102 					       vlan_info->vlan_tag,
10103 					       false);
10104 		if (ret)
10105 			return ret;
10106 
10107 		/* remove old VLAN tag */
10108 		ret = hclge_set_vlan_filter_hw(hdev,
10109 					       htons(old_vlan_info->vlan_proto),
10110 					       vport->vport_id,
10111 					       old_vlan_info->vlan_tag,
10112 					       true);
10113 		if (ret)
10114 			return ret;
10115 
10116 		goto update;
10117 	}
10118 
10119 	ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
10120 					       old_vlan_info);
10121 	if (ret)
10122 		return ret;
10123 
10124 	/* update state only when disable/enable port based VLAN */
10125 	vport->port_base_vlan_cfg.state = state;
10126 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
10127 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
10128 	else
10129 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
10130 
10131 update:
10132 	vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
10133 	vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
10134 	vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
10135 
10136 	return 0;
10137 }
10138 
10139 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
10140 					  enum hnae3_port_base_vlan_state state,
10141 					  u16 vlan)
10142 {
10143 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10144 		if (!vlan)
10145 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10146 		else
10147 			return HNAE3_PORT_BASE_VLAN_ENABLE;
10148 	} else {
10149 		if (!vlan)
10150 			return HNAE3_PORT_BASE_VLAN_DISABLE;
10151 		else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
10152 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10153 		else
10154 			return HNAE3_PORT_BASE_VLAN_MODIFY;
10155 	}
10156 }
10157 
10158 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
10159 				    u16 vlan, u8 qos, __be16 proto)
10160 {
10161 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
10162 	struct hclge_vport *vport = hclge_get_vport(handle);
10163 	struct hclge_dev *hdev = vport->back;
10164 	struct hclge_vlan_info vlan_info;
10165 	u16 state;
10166 	int ret;
10167 
10168 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10169 		return -EOPNOTSUPP;
10170 
10171 	vport = hclge_get_vf_vport(hdev, vfid);
10172 	if (!vport)
10173 		return -EINVAL;
10174 
10175 	/* qos is a 3 bits value, so can not be bigger than 7 */
10176 	if (vlan > VLAN_N_VID - 1 || qos > 7)
10177 		return -EINVAL;
10178 	if (proto != htons(ETH_P_8021Q))
10179 		return -EPROTONOSUPPORT;
10180 
10181 	state = hclge_get_port_base_vlan_state(vport,
10182 					       vport->port_base_vlan_cfg.state,
10183 					       vlan);
10184 	if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
10185 		return 0;
10186 
10187 	vlan_info.vlan_tag = vlan;
10188 	vlan_info.qos = qos;
10189 	vlan_info.vlan_proto = ntohs(proto);
10190 
10191 	ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
10192 	if (ret) {
10193 		dev_err(&hdev->pdev->dev,
10194 			"failed to update port base vlan for vf %d, ret = %d\n",
10195 			vfid, ret);
10196 		return ret;
10197 	}
10198 
10199 	/* for DEVICE_VERSION_V3, vf doesn't need to know about the port based
10200 	 * VLAN state.
10201 	 */
10202 	if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
10203 	    test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
10204 		hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
10205 						  vport->vport_id, state,
10206 						  vlan, qos,
10207 						  ntohs(proto));
10208 
10209 	return 0;
10210 }
10211 
10212 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
10213 {
10214 	struct hclge_vlan_info *vlan_info;
10215 	struct hclge_vport *vport;
10216 	int ret;
10217 	int vf;
10218 
10219 	/* clear port base vlan for all vf */
10220 	for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10221 		vport = &hdev->vport[vf];
10222 		vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10223 
10224 		ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10225 					       vport->vport_id,
10226 					       vlan_info->vlan_tag, true);
10227 		if (ret)
10228 			dev_err(&hdev->pdev->dev,
10229 				"failed to clear vf vlan for vf%d, ret = %d\n",
10230 				vf - HCLGE_VF_VPORT_START_NUM, ret);
10231 	}
10232 }
10233 
10234 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
10235 			  u16 vlan_id, bool is_kill)
10236 {
10237 	struct hclge_vport *vport = hclge_get_vport(handle);
10238 	struct hclge_dev *hdev = vport->back;
10239 	bool writen_to_tbl = false;
10240 	int ret = 0;
10241 
10242 	/* When device is resetting or reset failed, firmware is unable to
10243 	 * handle mailbox. Just record the vlan id, and remove it after
10244 	 * reset finished.
10245 	 */
10246 	if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10247 	     test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
10248 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
10249 		return -EBUSY;
10250 	}
10251 
10252 	/* when port base vlan enabled, we use port base vlan as the vlan
10253 	 * filter entry. In this case, we don't update vlan filter table
10254 	 * when user add new vlan or remove exist vlan, just update the vport
10255 	 * vlan list. The vlan id in vlan list will be writen in vlan filter
10256 	 * table until port base vlan disabled
10257 	 */
10258 	if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10259 		ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
10260 					       vlan_id, is_kill);
10261 		writen_to_tbl = true;
10262 	}
10263 
10264 	if (!ret) {
10265 		if (is_kill)
10266 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
10267 		else
10268 			hclge_add_vport_vlan_table(vport, vlan_id,
10269 						   writen_to_tbl);
10270 	} else if (is_kill) {
10271 		/* when remove hw vlan filter failed, record the vlan id,
10272 		 * and try to remove it from hw later, to be consistence
10273 		 * with stack
10274 		 */
10275 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
10276 	}
10277 	return ret;
10278 }
10279 
10280 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
10281 {
10282 #define HCLGE_MAX_SYNC_COUNT	60
10283 
10284 	int i, ret, sync_cnt = 0;
10285 	u16 vlan_id;
10286 
10287 	/* start from vport 1 for PF is always alive */
10288 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10289 		struct hclge_vport *vport = &hdev->vport[i];
10290 
10291 		vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10292 					 VLAN_N_VID);
10293 		while (vlan_id != VLAN_N_VID) {
10294 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10295 						       vport->vport_id, vlan_id,
10296 						       true);
10297 			if (ret && ret != -EINVAL)
10298 				return;
10299 
10300 			clear_bit(vlan_id, vport->vlan_del_fail_bmap);
10301 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
10302 
10303 			sync_cnt++;
10304 			if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
10305 				return;
10306 
10307 			vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10308 						 VLAN_N_VID);
10309 		}
10310 	}
10311 }
10312 
10313 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
10314 {
10315 	struct hclge_config_max_frm_size_cmd *req;
10316 	struct hclge_desc desc;
10317 
10318 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
10319 
10320 	req = (struct hclge_config_max_frm_size_cmd *)desc.data;
10321 	req->max_frm_size = cpu_to_le16(new_mps);
10322 	req->min_frm_size = HCLGE_MAC_MIN_FRAME;
10323 
10324 	return hclge_cmd_send(&hdev->hw, &desc, 1);
10325 }
10326 
10327 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
10328 {
10329 	struct hclge_vport *vport = hclge_get_vport(handle);
10330 
10331 	return hclge_set_vport_mtu(vport, new_mtu);
10332 }
10333 
10334 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
10335 {
10336 	struct hclge_dev *hdev = vport->back;
10337 	int i, max_frm_size, ret;
10338 
10339 	/* HW supprt 2 layer vlan */
10340 	max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10341 	if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
10342 	    max_frm_size > hdev->ae_dev->dev_specs.max_frm_size)
10343 		return -EINVAL;
10344 
10345 	max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
10346 	mutex_lock(&hdev->vport_lock);
10347 	/* VF's mps must fit within hdev->mps */
10348 	if (vport->vport_id && max_frm_size > hdev->mps) {
10349 		mutex_unlock(&hdev->vport_lock);
10350 		return -EINVAL;
10351 	} else if (vport->vport_id) {
10352 		vport->mps = max_frm_size;
10353 		mutex_unlock(&hdev->vport_lock);
10354 		return 0;
10355 	}
10356 
10357 	/* PF's mps must be greater then VF's mps */
10358 	for (i = 1; i < hdev->num_alloc_vport; i++)
10359 		if (max_frm_size < hdev->vport[i].mps) {
10360 			mutex_unlock(&hdev->vport_lock);
10361 			return -EINVAL;
10362 		}
10363 
10364 	hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
10365 
10366 	ret = hclge_set_mac_mtu(hdev, max_frm_size);
10367 	if (ret) {
10368 		dev_err(&hdev->pdev->dev,
10369 			"Change mtu fail, ret =%d\n", ret);
10370 		goto out;
10371 	}
10372 
10373 	hdev->mps = max_frm_size;
10374 	vport->mps = max_frm_size;
10375 
10376 	ret = hclge_buffer_alloc(hdev);
10377 	if (ret)
10378 		dev_err(&hdev->pdev->dev,
10379 			"Allocate buffer fail, ret =%d\n", ret);
10380 
10381 out:
10382 	hclge_notify_client(hdev, HNAE3_UP_CLIENT);
10383 	mutex_unlock(&hdev->vport_lock);
10384 	return ret;
10385 }
10386 
10387 static int hclge_reset_tqp_cmd_send(struct hclge_dev *hdev, u16 queue_id,
10388 				    bool enable)
10389 {
10390 	struct hclge_reset_tqp_queue_cmd *req;
10391 	struct hclge_desc desc;
10392 	int ret;
10393 
10394 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
10395 
10396 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10397 	req->tqp_id = cpu_to_le16(queue_id);
10398 	if (enable)
10399 		hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
10400 
10401 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10402 	if (ret) {
10403 		dev_err(&hdev->pdev->dev,
10404 			"Send tqp reset cmd error, status =%d\n", ret);
10405 		return ret;
10406 	}
10407 
10408 	return 0;
10409 }
10410 
10411 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
10412 {
10413 	struct hclge_reset_tqp_queue_cmd *req;
10414 	struct hclge_desc desc;
10415 	int ret;
10416 
10417 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
10418 
10419 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10420 	req->tqp_id = cpu_to_le16(queue_id);
10421 
10422 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10423 	if (ret) {
10424 		dev_err(&hdev->pdev->dev,
10425 			"Get reset status error, status =%d\n", ret);
10426 		return ret;
10427 	}
10428 
10429 	return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
10430 }
10431 
10432 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
10433 {
10434 	struct hnae3_queue *queue;
10435 	struct hclge_tqp *tqp;
10436 
10437 	queue = handle->kinfo.tqp[queue_id];
10438 	tqp = container_of(queue, struct hclge_tqp, q);
10439 
10440 	return tqp->index;
10441 }
10442 
10443 static int hclge_reset_tqp_cmd(struct hnae3_handle *handle)
10444 {
10445 	struct hclge_vport *vport = hclge_get_vport(handle);
10446 	struct hclge_dev *hdev = vport->back;
10447 	u16 reset_try_times = 0;
10448 	int reset_status;
10449 	u16 queue_gid;
10450 	int ret;
10451 	u16 i;
10452 
10453 	for (i = 0; i < handle->kinfo.num_tqps; i++) {
10454 		queue_gid = hclge_covert_handle_qid_global(handle, i);
10455 		ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, true);
10456 		if (ret) {
10457 			dev_err(&hdev->pdev->dev,
10458 				"failed to send reset tqp cmd, ret = %d\n",
10459 				ret);
10460 			return ret;
10461 		}
10462 
10463 		while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
10464 			reset_status = hclge_get_reset_status(hdev, queue_gid);
10465 			if (reset_status)
10466 				break;
10467 
10468 			/* Wait for tqp hw reset */
10469 			usleep_range(1000, 1200);
10470 		}
10471 
10472 		if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
10473 			dev_err(&hdev->pdev->dev,
10474 				"wait for tqp hw reset timeout\n");
10475 			return -ETIME;
10476 		}
10477 
10478 		ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, false);
10479 		if (ret) {
10480 			dev_err(&hdev->pdev->dev,
10481 				"failed to deassert soft reset, ret = %d\n",
10482 				ret);
10483 			return ret;
10484 		}
10485 		reset_try_times = 0;
10486 	}
10487 	return 0;
10488 }
10489 
10490 static int hclge_reset_rcb(struct hnae3_handle *handle)
10491 {
10492 #define HCLGE_RESET_RCB_NOT_SUPPORT	0U
10493 #define HCLGE_RESET_RCB_SUCCESS		1U
10494 
10495 	struct hclge_vport *vport = hclge_get_vport(handle);
10496 	struct hclge_dev *hdev = vport->back;
10497 	struct hclge_reset_cmd *req;
10498 	struct hclge_desc desc;
10499 	u8 return_status;
10500 	u16 queue_gid;
10501 	int ret;
10502 
10503 	queue_gid = hclge_covert_handle_qid_global(handle, 0);
10504 
10505 	req = (struct hclge_reset_cmd *)desc.data;
10506 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
10507 	hnae3_set_bit(req->fun_reset_rcb, HCLGE_CFG_RESET_RCB_B, 1);
10508 	req->fun_reset_rcb_vqid_start = cpu_to_le16(queue_gid);
10509 	req->fun_reset_rcb_vqid_num = cpu_to_le16(handle->kinfo.num_tqps);
10510 
10511 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10512 	if (ret) {
10513 		dev_err(&hdev->pdev->dev,
10514 			"failed to send rcb reset cmd, ret = %d\n", ret);
10515 		return ret;
10516 	}
10517 
10518 	return_status = req->fun_reset_rcb_return_status;
10519 	if (return_status == HCLGE_RESET_RCB_SUCCESS)
10520 		return 0;
10521 
10522 	if (return_status != HCLGE_RESET_RCB_NOT_SUPPORT) {
10523 		dev_err(&hdev->pdev->dev, "failed to reset rcb, ret = %u\n",
10524 			return_status);
10525 		return -EIO;
10526 	}
10527 
10528 	/* if reset rcb cmd is unsupported, we need to send reset tqp cmd
10529 	 * again to reset all tqps
10530 	 */
10531 	return hclge_reset_tqp_cmd(handle);
10532 }
10533 
10534 int hclge_reset_tqp(struct hnae3_handle *handle)
10535 {
10536 	struct hclge_vport *vport = hclge_get_vport(handle);
10537 	struct hclge_dev *hdev = vport->back;
10538 	int ret;
10539 
10540 	/* only need to disable PF's tqp */
10541 	if (!vport->vport_id) {
10542 		ret = hclge_tqp_enable(handle, false);
10543 		if (ret) {
10544 			dev_err(&hdev->pdev->dev,
10545 				"failed to disable tqp, ret = %d\n", ret);
10546 			return ret;
10547 		}
10548 	}
10549 
10550 	return hclge_reset_rcb(handle);
10551 }
10552 
10553 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
10554 {
10555 	struct hclge_vport *vport = hclge_get_vport(handle);
10556 	struct hclge_dev *hdev = vport->back;
10557 
10558 	return hdev->fw_version;
10559 }
10560 
10561 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10562 {
10563 	struct phy_device *phydev = hdev->hw.mac.phydev;
10564 
10565 	if (!phydev)
10566 		return;
10567 
10568 	phy_set_asym_pause(phydev, rx_en, tx_en);
10569 }
10570 
10571 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10572 {
10573 	int ret;
10574 
10575 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
10576 		return 0;
10577 
10578 	ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
10579 	if (ret)
10580 		dev_err(&hdev->pdev->dev,
10581 			"configure pauseparam error, ret = %d.\n", ret);
10582 
10583 	return ret;
10584 }
10585 
10586 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
10587 {
10588 	struct phy_device *phydev = hdev->hw.mac.phydev;
10589 	u16 remote_advertising = 0;
10590 	u16 local_advertising;
10591 	u32 rx_pause, tx_pause;
10592 	u8 flowctl;
10593 
10594 	if (!phydev->link || !phydev->autoneg)
10595 		return 0;
10596 
10597 	local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
10598 
10599 	if (phydev->pause)
10600 		remote_advertising = LPA_PAUSE_CAP;
10601 
10602 	if (phydev->asym_pause)
10603 		remote_advertising |= LPA_PAUSE_ASYM;
10604 
10605 	flowctl = mii_resolve_flowctrl_fdx(local_advertising,
10606 					   remote_advertising);
10607 	tx_pause = flowctl & FLOW_CTRL_TX;
10608 	rx_pause = flowctl & FLOW_CTRL_RX;
10609 
10610 	if (phydev->duplex == HCLGE_MAC_HALF) {
10611 		tx_pause = 0;
10612 		rx_pause = 0;
10613 	}
10614 
10615 	return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
10616 }
10617 
10618 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
10619 				 u32 *rx_en, u32 *tx_en)
10620 {
10621 	struct hclge_vport *vport = hclge_get_vport(handle);
10622 	struct hclge_dev *hdev = vport->back;
10623 	u8 media_type = hdev->hw.mac.media_type;
10624 
10625 	*auto_neg = (media_type == HNAE3_MEDIA_TYPE_COPPER) ?
10626 		    hclge_get_autoneg(handle) : 0;
10627 
10628 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10629 		*rx_en = 0;
10630 		*tx_en = 0;
10631 		return;
10632 	}
10633 
10634 	if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
10635 		*rx_en = 1;
10636 		*tx_en = 0;
10637 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
10638 		*tx_en = 1;
10639 		*rx_en = 0;
10640 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
10641 		*rx_en = 1;
10642 		*tx_en = 1;
10643 	} else {
10644 		*rx_en = 0;
10645 		*tx_en = 0;
10646 	}
10647 }
10648 
10649 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
10650 					 u32 rx_en, u32 tx_en)
10651 {
10652 	if (rx_en && tx_en)
10653 		hdev->fc_mode_last_time = HCLGE_FC_FULL;
10654 	else if (rx_en && !tx_en)
10655 		hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
10656 	else if (!rx_en && tx_en)
10657 		hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
10658 	else
10659 		hdev->fc_mode_last_time = HCLGE_FC_NONE;
10660 
10661 	hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
10662 }
10663 
10664 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
10665 				u32 rx_en, u32 tx_en)
10666 {
10667 	struct hclge_vport *vport = hclge_get_vport(handle);
10668 	struct hclge_dev *hdev = vport->back;
10669 	struct phy_device *phydev = hdev->hw.mac.phydev;
10670 	u32 fc_autoneg;
10671 
10672 	if (phydev || hnae3_dev_phy_imp_supported(hdev)) {
10673 		fc_autoneg = hclge_get_autoneg(handle);
10674 		if (auto_neg != fc_autoneg) {
10675 			dev_info(&hdev->pdev->dev,
10676 				 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
10677 			return -EOPNOTSUPP;
10678 		}
10679 	}
10680 
10681 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10682 		dev_info(&hdev->pdev->dev,
10683 			 "Priority flow control enabled. Cannot set link flow control.\n");
10684 		return -EOPNOTSUPP;
10685 	}
10686 
10687 	hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
10688 
10689 	hclge_record_user_pauseparam(hdev, rx_en, tx_en);
10690 
10691 	if (!auto_neg || hnae3_dev_phy_imp_supported(hdev))
10692 		return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
10693 
10694 	if (phydev)
10695 		return phy_start_aneg(phydev);
10696 
10697 	return -EOPNOTSUPP;
10698 }
10699 
10700 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
10701 					  u8 *auto_neg, u32 *speed, u8 *duplex)
10702 {
10703 	struct hclge_vport *vport = hclge_get_vport(handle);
10704 	struct hclge_dev *hdev = vport->back;
10705 
10706 	if (speed)
10707 		*speed = hdev->hw.mac.speed;
10708 	if (duplex)
10709 		*duplex = hdev->hw.mac.duplex;
10710 	if (auto_neg)
10711 		*auto_neg = hdev->hw.mac.autoneg;
10712 }
10713 
10714 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
10715 				 u8 *module_type)
10716 {
10717 	struct hclge_vport *vport = hclge_get_vport(handle);
10718 	struct hclge_dev *hdev = vport->back;
10719 
10720 	/* When nic is down, the service task is not running, doesn't update
10721 	 * the port information per second. Query the port information before
10722 	 * return the media type, ensure getting the correct media information.
10723 	 */
10724 	hclge_update_port_info(hdev);
10725 
10726 	if (media_type)
10727 		*media_type = hdev->hw.mac.media_type;
10728 
10729 	if (module_type)
10730 		*module_type = hdev->hw.mac.module_type;
10731 }
10732 
10733 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
10734 				u8 *tp_mdix_ctrl, u8 *tp_mdix)
10735 {
10736 	struct hclge_vport *vport = hclge_get_vport(handle);
10737 	struct hclge_dev *hdev = vport->back;
10738 	struct phy_device *phydev = hdev->hw.mac.phydev;
10739 	int mdix_ctrl, mdix, is_resolved;
10740 	unsigned int retval;
10741 
10742 	if (!phydev) {
10743 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
10744 		*tp_mdix = ETH_TP_MDI_INVALID;
10745 		return;
10746 	}
10747 
10748 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
10749 
10750 	retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
10751 	mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
10752 				    HCLGE_PHY_MDIX_CTRL_S);
10753 
10754 	retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
10755 	mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
10756 	is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
10757 
10758 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
10759 
10760 	switch (mdix_ctrl) {
10761 	case 0x0:
10762 		*tp_mdix_ctrl = ETH_TP_MDI;
10763 		break;
10764 	case 0x1:
10765 		*tp_mdix_ctrl = ETH_TP_MDI_X;
10766 		break;
10767 	case 0x3:
10768 		*tp_mdix_ctrl = ETH_TP_MDI_AUTO;
10769 		break;
10770 	default:
10771 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
10772 		break;
10773 	}
10774 
10775 	if (!is_resolved)
10776 		*tp_mdix = ETH_TP_MDI_INVALID;
10777 	else if (mdix)
10778 		*tp_mdix = ETH_TP_MDI_X;
10779 	else
10780 		*tp_mdix = ETH_TP_MDI;
10781 }
10782 
10783 static void hclge_info_show(struct hclge_dev *hdev)
10784 {
10785 	struct device *dev = &hdev->pdev->dev;
10786 
10787 	dev_info(dev, "PF info begin:\n");
10788 
10789 	dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
10790 	dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
10791 	dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
10792 	dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
10793 	dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
10794 	dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
10795 	dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
10796 	dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
10797 	dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
10798 	dev_info(dev, "This is %s PF\n",
10799 		 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
10800 	dev_info(dev, "DCB %s\n",
10801 		 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
10802 	dev_info(dev, "MQPRIO %s\n",
10803 		 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
10804 
10805 	dev_info(dev, "PF info end.\n");
10806 }
10807 
10808 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
10809 					  struct hclge_vport *vport)
10810 {
10811 	struct hnae3_client *client = vport->nic.client;
10812 	struct hclge_dev *hdev = ae_dev->priv;
10813 	int rst_cnt = hdev->rst_stats.reset_cnt;
10814 	int ret;
10815 
10816 	ret = client->ops->init_instance(&vport->nic);
10817 	if (ret)
10818 		return ret;
10819 
10820 	set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10821 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10822 	    rst_cnt != hdev->rst_stats.reset_cnt) {
10823 		ret = -EBUSY;
10824 		goto init_nic_err;
10825 	}
10826 
10827 	/* Enable nic hw error interrupts */
10828 	ret = hclge_config_nic_hw_error(hdev, true);
10829 	if (ret) {
10830 		dev_err(&ae_dev->pdev->dev,
10831 			"fail(%d) to enable hw error interrupts\n", ret);
10832 		goto init_nic_err;
10833 	}
10834 
10835 	hnae3_set_client_init_flag(client, ae_dev, 1);
10836 
10837 	if (netif_msg_drv(&hdev->vport->nic))
10838 		hclge_info_show(hdev);
10839 
10840 	return ret;
10841 
10842 init_nic_err:
10843 	clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10844 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10845 		msleep(HCLGE_WAIT_RESET_DONE);
10846 
10847 	client->ops->uninit_instance(&vport->nic, 0);
10848 
10849 	return ret;
10850 }
10851 
10852 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
10853 					   struct hclge_vport *vport)
10854 {
10855 	struct hclge_dev *hdev = ae_dev->priv;
10856 	struct hnae3_client *client;
10857 	int rst_cnt;
10858 	int ret;
10859 
10860 	if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
10861 	    !hdev->nic_client)
10862 		return 0;
10863 
10864 	client = hdev->roce_client;
10865 	ret = hclge_init_roce_base_info(vport);
10866 	if (ret)
10867 		return ret;
10868 
10869 	rst_cnt = hdev->rst_stats.reset_cnt;
10870 	ret = client->ops->init_instance(&vport->roce);
10871 	if (ret)
10872 		return ret;
10873 
10874 	set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10875 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10876 	    rst_cnt != hdev->rst_stats.reset_cnt) {
10877 		ret = -EBUSY;
10878 		goto init_roce_err;
10879 	}
10880 
10881 	/* Enable roce ras interrupts */
10882 	ret = hclge_config_rocee_ras_interrupt(hdev, true);
10883 	if (ret) {
10884 		dev_err(&ae_dev->pdev->dev,
10885 			"fail(%d) to enable roce ras interrupts\n", ret);
10886 		goto init_roce_err;
10887 	}
10888 
10889 	hnae3_set_client_init_flag(client, ae_dev, 1);
10890 
10891 	return 0;
10892 
10893 init_roce_err:
10894 	clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10895 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10896 		msleep(HCLGE_WAIT_RESET_DONE);
10897 
10898 	hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
10899 
10900 	return ret;
10901 }
10902 
10903 static int hclge_init_client_instance(struct hnae3_client *client,
10904 				      struct hnae3_ae_dev *ae_dev)
10905 {
10906 	struct hclge_dev *hdev = ae_dev->priv;
10907 	struct hclge_vport *vport = &hdev->vport[0];
10908 	int ret;
10909 
10910 	switch (client->type) {
10911 	case HNAE3_CLIENT_KNIC:
10912 		hdev->nic_client = client;
10913 		vport->nic.client = client;
10914 		ret = hclge_init_nic_client_instance(ae_dev, vport);
10915 		if (ret)
10916 			goto clear_nic;
10917 
10918 		ret = hclge_init_roce_client_instance(ae_dev, vport);
10919 		if (ret)
10920 			goto clear_roce;
10921 
10922 		break;
10923 	case HNAE3_CLIENT_ROCE:
10924 		if (hnae3_dev_roce_supported(hdev)) {
10925 			hdev->roce_client = client;
10926 			vport->roce.client = client;
10927 		}
10928 
10929 		ret = hclge_init_roce_client_instance(ae_dev, vport);
10930 		if (ret)
10931 			goto clear_roce;
10932 
10933 		break;
10934 	default:
10935 		return -EINVAL;
10936 	}
10937 
10938 	return 0;
10939 
10940 clear_nic:
10941 	hdev->nic_client = NULL;
10942 	vport->nic.client = NULL;
10943 	return ret;
10944 clear_roce:
10945 	hdev->roce_client = NULL;
10946 	vport->roce.client = NULL;
10947 	return ret;
10948 }
10949 
10950 static void hclge_uninit_client_instance(struct hnae3_client *client,
10951 					 struct hnae3_ae_dev *ae_dev)
10952 {
10953 	struct hclge_dev *hdev = ae_dev->priv;
10954 	struct hclge_vport *vport = &hdev->vport[0];
10955 
10956 	if (hdev->roce_client) {
10957 		clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10958 		while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10959 			msleep(HCLGE_WAIT_RESET_DONE);
10960 
10961 		hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
10962 		hdev->roce_client = NULL;
10963 		vport->roce.client = NULL;
10964 	}
10965 	if (client->type == HNAE3_CLIENT_ROCE)
10966 		return;
10967 	if (hdev->nic_client && client->ops->uninit_instance) {
10968 		clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10969 		while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10970 			msleep(HCLGE_WAIT_RESET_DONE);
10971 
10972 		client->ops->uninit_instance(&vport->nic, 0);
10973 		hdev->nic_client = NULL;
10974 		vport->nic.client = NULL;
10975 	}
10976 }
10977 
10978 static int hclge_dev_mem_map(struct hclge_dev *hdev)
10979 {
10980 #define HCLGE_MEM_BAR		4
10981 
10982 	struct pci_dev *pdev = hdev->pdev;
10983 	struct hclge_hw *hw = &hdev->hw;
10984 
10985 	/* for device does not have device memory, return directly */
10986 	if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
10987 		return 0;
10988 
10989 	hw->mem_base = devm_ioremap_wc(&pdev->dev,
10990 				       pci_resource_start(pdev, HCLGE_MEM_BAR),
10991 				       pci_resource_len(pdev, HCLGE_MEM_BAR));
10992 	if (!hw->mem_base) {
10993 		dev_err(&pdev->dev, "failed to map device memory\n");
10994 		return -EFAULT;
10995 	}
10996 
10997 	return 0;
10998 }
10999 
11000 static int hclge_pci_init(struct hclge_dev *hdev)
11001 {
11002 	struct pci_dev *pdev = hdev->pdev;
11003 	struct hclge_hw *hw;
11004 	int ret;
11005 
11006 	ret = pci_enable_device(pdev);
11007 	if (ret) {
11008 		dev_err(&pdev->dev, "failed to enable PCI device\n");
11009 		return ret;
11010 	}
11011 
11012 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
11013 	if (ret) {
11014 		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
11015 		if (ret) {
11016 			dev_err(&pdev->dev,
11017 				"can't set consistent PCI DMA");
11018 			goto err_disable_device;
11019 		}
11020 		dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
11021 	}
11022 
11023 	ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
11024 	if (ret) {
11025 		dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
11026 		goto err_disable_device;
11027 	}
11028 
11029 	pci_set_master(pdev);
11030 	hw = &hdev->hw;
11031 	hw->io_base = pcim_iomap(pdev, 2, 0);
11032 	if (!hw->io_base) {
11033 		dev_err(&pdev->dev, "Can't map configuration register space\n");
11034 		ret = -ENOMEM;
11035 		goto err_clr_master;
11036 	}
11037 
11038 	ret = hclge_dev_mem_map(hdev);
11039 	if (ret)
11040 		goto err_unmap_io_base;
11041 
11042 	hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
11043 
11044 	return 0;
11045 
11046 err_unmap_io_base:
11047 	pcim_iounmap(pdev, hdev->hw.io_base);
11048 err_clr_master:
11049 	pci_clear_master(pdev);
11050 	pci_release_regions(pdev);
11051 err_disable_device:
11052 	pci_disable_device(pdev);
11053 
11054 	return ret;
11055 }
11056 
11057 static void hclge_pci_uninit(struct hclge_dev *hdev)
11058 {
11059 	struct pci_dev *pdev = hdev->pdev;
11060 
11061 	if (hdev->hw.mem_base)
11062 		devm_iounmap(&pdev->dev, hdev->hw.mem_base);
11063 
11064 	pcim_iounmap(pdev, hdev->hw.io_base);
11065 	pci_free_irq_vectors(pdev);
11066 	pci_clear_master(pdev);
11067 	pci_release_mem_regions(pdev);
11068 	pci_disable_device(pdev);
11069 }
11070 
11071 static void hclge_state_init(struct hclge_dev *hdev)
11072 {
11073 	set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
11074 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
11075 	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
11076 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11077 	clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
11078 	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
11079 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
11080 }
11081 
11082 static void hclge_state_uninit(struct hclge_dev *hdev)
11083 {
11084 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
11085 	set_bit(HCLGE_STATE_REMOVING, &hdev->state);
11086 
11087 	if (hdev->reset_timer.function)
11088 		del_timer_sync(&hdev->reset_timer);
11089 	if (hdev->service_task.work.func)
11090 		cancel_delayed_work_sync(&hdev->service_task);
11091 }
11092 
11093 static void hclge_reset_prepare_general(struct hnae3_ae_dev *ae_dev,
11094 					enum hnae3_reset_type rst_type)
11095 {
11096 #define HCLGE_RESET_RETRY_WAIT_MS	500
11097 #define HCLGE_RESET_RETRY_CNT	5
11098 
11099 	struct hclge_dev *hdev = ae_dev->priv;
11100 	int retry_cnt = 0;
11101 	int ret;
11102 
11103 retry:
11104 	down(&hdev->reset_sem);
11105 	set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11106 	hdev->reset_type = rst_type;
11107 	ret = hclge_reset_prepare(hdev);
11108 	if (ret || hdev->reset_pending) {
11109 		dev_err(&hdev->pdev->dev, "fail to prepare to reset, ret=%d\n",
11110 			ret);
11111 		if (hdev->reset_pending ||
11112 		    retry_cnt++ < HCLGE_RESET_RETRY_CNT) {
11113 			dev_err(&hdev->pdev->dev,
11114 				"reset_pending:0x%lx, retry_cnt:%d\n",
11115 				hdev->reset_pending, retry_cnt);
11116 			clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11117 			up(&hdev->reset_sem);
11118 			msleep(HCLGE_RESET_RETRY_WAIT_MS);
11119 			goto retry;
11120 		}
11121 	}
11122 
11123 	/* disable misc vector before reset done */
11124 	hclge_enable_vector(&hdev->misc_vector, false);
11125 	set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
11126 
11127 	if (hdev->reset_type == HNAE3_FLR_RESET)
11128 		hdev->rst_stats.flr_rst_cnt++;
11129 }
11130 
11131 static void hclge_reset_done(struct hnae3_ae_dev *ae_dev)
11132 {
11133 	struct hclge_dev *hdev = ae_dev->priv;
11134 	int ret;
11135 
11136 	hclge_enable_vector(&hdev->misc_vector, true);
11137 
11138 	ret = hclge_reset_rebuild(hdev);
11139 	if (ret)
11140 		dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
11141 
11142 	hdev->reset_type = HNAE3_NONE_RESET;
11143 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11144 	up(&hdev->reset_sem);
11145 }
11146 
11147 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
11148 {
11149 	u16 i;
11150 
11151 	for (i = 0; i < hdev->num_alloc_vport; i++) {
11152 		struct hclge_vport *vport = &hdev->vport[i];
11153 		int ret;
11154 
11155 		 /* Send cmd to clear VF's FUNC_RST_ING */
11156 		ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
11157 		if (ret)
11158 			dev_warn(&hdev->pdev->dev,
11159 				 "clear vf(%u) rst failed %d!\n",
11160 				 vport->vport_id, ret);
11161 	}
11162 }
11163 
11164 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
11165 {
11166 	struct pci_dev *pdev = ae_dev->pdev;
11167 	struct hclge_dev *hdev;
11168 	int ret;
11169 
11170 	hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
11171 	if (!hdev)
11172 		return -ENOMEM;
11173 
11174 	hdev->pdev = pdev;
11175 	hdev->ae_dev = ae_dev;
11176 	hdev->reset_type = HNAE3_NONE_RESET;
11177 	hdev->reset_level = HNAE3_FUNC_RESET;
11178 	ae_dev->priv = hdev;
11179 
11180 	/* HW supprt 2 layer vlan */
11181 	hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
11182 
11183 	mutex_init(&hdev->vport_lock);
11184 	spin_lock_init(&hdev->fd_rule_lock);
11185 	sema_init(&hdev->reset_sem, 1);
11186 
11187 	ret = hclge_pci_init(hdev);
11188 	if (ret)
11189 		goto out;
11190 
11191 	/* Firmware command queue initialize */
11192 	ret = hclge_cmd_queue_init(hdev);
11193 	if (ret)
11194 		goto err_pci_uninit;
11195 
11196 	/* Firmware command initialize */
11197 	ret = hclge_cmd_init(hdev);
11198 	if (ret)
11199 		goto err_cmd_uninit;
11200 
11201 	ret = hclge_get_cap(hdev);
11202 	if (ret)
11203 		goto err_cmd_uninit;
11204 
11205 	ret = hclge_query_dev_specs(hdev);
11206 	if (ret) {
11207 		dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
11208 			ret);
11209 		goto err_cmd_uninit;
11210 	}
11211 
11212 	ret = hclge_configure(hdev);
11213 	if (ret) {
11214 		dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
11215 		goto err_cmd_uninit;
11216 	}
11217 
11218 	ret = hclge_init_msi(hdev);
11219 	if (ret) {
11220 		dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
11221 		goto err_cmd_uninit;
11222 	}
11223 
11224 	ret = hclge_misc_irq_init(hdev);
11225 	if (ret)
11226 		goto err_msi_uninit;
11227 
11228 	ret = hclge_alloc_tqps(hdev);
11229 	if (ret) {
11230 		dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
11231 		goto err_msi_irq_uninit;
11232 	}
11233 
11234 	ret = hclge_alloc_vport(hdev);
11235 	if (ret)
11236 		goto err_msi_irq_uninit;
11237 
11238 	ret = hclge_map_tqp(hdev);
11239 	if (ret)
11240 		goto err_msi_irq_uninit;
11241 
11242 	if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER &&
11243 	    !hnae3_dev_phy_imp_supported(hdev)) {
11244 		ret = hclge_mac_mdio_config(hdev);
11245 		if (ret)
11246 			goto err_msi_irq_uninit;
11247 	}
11248 
11249 	ret = hclge_init_umv_space(hdev);
11250 	if (ret)
11251 		goto err_mdiobus_unreg;
11252 
11253 	ret = hclge_mac_init(hdev);
11254 	if (ret) {
11255 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11256 		goto err_mdiobus_unreg;
11257 	}
11258 
11259 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11260 	if (ret) {
11261 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11262 		goto err_mdiobus_unreg;
11263 	}
11264 
11265 	ret = hclge_config_gro(hdev, true);
11266 	if (ret)
11267 		goto err_mdiobus_unreg;
11268 
11269 	ret = hclge_init_vlan_config(hdev);
11270 	if (ret) {
11271 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11272 		goto err_mdiobus_unreg;
11273 	}
11274 
11275 	ret = hclge_tm_schd_init(hdev);
11276 	if (ret) {
11277 		dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
11278 		goto err_mdiobus_unreg;
11279 	}
11280 
11281 	ret = hclge_rss_init_cfg(hdev);
11282 	if (ret) {
11283 		dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
11284 		goto err_mdiobus_unreg;
11285 	}
11286 
11287 	ret = hclge_rss_init_hw(hdev);
11288 	if (ret) {
11289 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11290 		goto err_mdiobus_unreg;
11291 	}
11292 
11293 	ret = init_mgr_tbl(hdev);
11294 	if (ret) {
11295 		dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
11296 		goto err_mdiobus_unreg;
11297 	}
11298 
11299 	ret = hclge_init_fd_config(hdev);
11300 	if (ret) {
11301 		dev_err(&pdev->dev,
11302 			"fd table init fail, ret=%d\n", ret);
11303 		goto err_mdiobus_unreg;
11304 	}
11305 
11306 	INIT_KFIFO(hdev->mac_tnl_log);
11307 
11308 	hclge_dcb_ops_set(hdev);
11309 
11310 	timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
11311 	INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
11312 
11313 	/* Setup affinity after service timer setup because add_timer_on
11314 	 * is called in affinity notify.
11315 	 */
11316 	hclge_misc_affinity_setup(hdev);
11317 
11318 	hclge_clear_all_event_cause(hdev);
11319 	hclge_clear_resetting_state(hdev);
11320 
11321 	/* Log and clear the hw errors those already occurred */
11322 	hclge_handle_all_hns_hw_errors(ae_dev);
11323 
11324 	/* request delayed reset for the error recovery because an immediate
11325 	 * global reset on a PF affecting pending initialization of other PFs
11326 	 */
11327 	if (ae_dev->hw_err_reset_req) {
11328 		enum hnae3_reset_type reset_level;
11329 
11330 		reset_level = hclge_get_reset_level(ae_dev,
11331 						    &ae_dev->hw_err_reset_req);
11332 		hclge_set_def_reset_request(ae_dev, reset_level);
11333 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
11334 	}
11335 
11336 	/* Enable MISC vector(vector0) */
11337 	hclge_enable_vector(&hdev->misc_vector, true);
11338 
11339 	hclge_state_init(hdev);
11340 	hdev->last_reset_time = jiffies;
11341 
11342 	dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
11343 		 HCLGE_DRIVER_NAME);
11344 
11345 	hclge_task_schedule(hdev, round_jiffies_relative(HZ));
11346 
11347 	return 0;
11348 
11349 err_mdiobus_unreg:
11350 	if (hdev->hw.mac.phydev)
11351 		mdiobus_unregister(hdev->hw.mac.mdio_bus);
11352 err_msi_irq_uninit:
11353 	hclge_misc_irq_uninit(hdev);
11354 err_msi_uninit:
11355 	pci_free_irq_vectors(pdev);
11356 err_cmd_uninit:
11357 	hclge_cmd_uninit(hdev);
11358 err_pci_uninit:
11359 	pcim_iounmap(pdev, hdev->hw.io_base);
11360 	pci_clear_master(pdev);
11361 	pci_release_regions(pdev);
11362 	pci_disable_device(pdev);
11363 out:
11364 	mutex_destroy(&hdev->vport_lock);
11365 	return ret;
11366 }
11367 
11368 static void hclge_stats_clear(struct hclge_dev *hdev)
11369 {
11370 	memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
11371 }
11372 
11373 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11374 {
11375 	return hclge_config_switch_param(hdev, vf, enable,
11376 					 HCLGE_SWITCH_ANTI_SPOOF_MASK);
11377 }
11378 
11379 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11380 {
11381 	return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
11382 					  HCLGE_FILTER_FE_NIC_INGRESS_B,
11383 					  enable, vf);
11384 }
11385 
11386 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
11387 {
11388 	int ret;
11389 
11390 	ret = hclge_set_mac_spoofchk(hdev, vf, enable);
11391 	if (ret) {
11392 		dev_err(&hdev->pdev->dev,
11393 			"Set vf %d mac spoof check %s failed, ret=%d\n",
11394 			vf, enable ? "on" : "off", ret);
11395 		return ret;
11396 	}
11397 
11398 	ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
11399 	if (ret)
11400 		dev_err(&hdev->pdev->dev,
11401 			"Set vf %d vlan spoof check %s failed, ret=%d\n",
11402 			vf, enable ? "on" : "off", ret);
11403 
11404 	return ret;
11405 }
11406 
11407 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
11408 				 bool enable)
11409 {
11410 	struct hclge_vport *vport = hclge_get_vport(handle);
11411 	struct hclge_dev *hdev = vport->back;
11412 	u32 new_spoofchk = enable ? 1 : 0;
11413 	int ret;
11414 
11415 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11416 		return -EOPNOTSUPP;
11417 
11418 	vport = hclge_get_vf_vport(hdev, vf);
11419 	if (!vport)
11420 		return -EINVAL;
11421 
11422 	if (vport->vf_info.spoofchk == new_spoofchk)
11423 		return 0;
11424 
11425 	if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
11426 		dev_warn(&hdev->pdev->dev,
11427 			 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
11428 			 vf);
11429 	else if (enable && hclge_is_umv_space_full(vport, true))
11430 		dev_warn(&hdev->pdev->dev,
11431 			 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
11432 			 vf);
11433 
11434 	ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
11435 	if (ret)
11436 		return ret;
11437 
11438 	vport->vf_info.spoofchk = new_spoofchk;
11439 	return 0;
11440 }
11441 
11442 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
11443 {
11444 	struct hclge_vport *vport = hdev->vport;
11445 	int ret;
11446 	int i;
11447 
11448 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11449 		return 0;
11450 
11451 	/* resume the vf spoof check state after reset */
11452 	for (i = 0; i < hdev->num_alloc_vport; i++) {
11453 		ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
11454 					       vport->vf_info.spoofchk);
11455 		if (ret)
11456 			return ret;
11457 
11458 		vport++;
11459 	}
11460 
11461 	return 0;
11462 }
11463 
11464 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
11465 {
11466 	struct hclge_vport *vport = hclge_get_vport(handle);
11467 	struct hclge_dev *hdev = vport->back;
11468 	struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
11469 	u32 new_trusted = enable ? 1 : 0;
11470 	bool en_bc_pmc;
11471 	int ret;
11472 
11473 	vport = hclge_get_vf_vport(hdev, vf);
11474 	if (!vport)
11475 		return -EINVAL;
11476 
11477 	if (vport->vf_info.trusted == new_trusted)
11478 		return 0;
11479 
11480 	/* Disable promisc mode for VF if it is not trusted any more. */
11481 	if (!enable && vport->vf_info.promisc_enable) {
11482 		en_bc_pmc = ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2;
11483 		ret = hclge_set_vport_promisc_mode(vport, false, false,
11484 						   en_bc_pmc);
11485 		if (ret)
11486 			return ret;
11487 		vport->vf_info.promisc_enable = 0;
11488 		hclge_inform_vf_promisc_info(vport);
11489 	}
11490 
11491 	vport->vf_info.trusted = new_trusted;
11492 
11493 	return 0;
11494 }
11495 
11496 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
11497 {
11498 	int ret;
11499 	int vf;
11500 
11501 	/* reset vf rate to default value */
11502 	for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
11503 		struct hclge_vport *vport = &hdev->vport[vf];
11504 
11505 		vport->vf_info.max_tx_rate = 0;
11506 		ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
11507 		if (ret)
11508 			dev_err(&hdev->pdev->dev,
11509 				"vf%d failed to reset to default, ret=%d\n",
11510 				vf - HCLGE_VF_VPORT_START_NUM, ret);
11511 	}
11512 }
11513 
11514 static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
11515 				     int min_tx_rate, int max_tx_rate)
11516 {
11517 	if (min_tx_rate != 0 ||
11518 	    max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
11519 		dev_err(&hdev->pdev->dev,
11520 			"min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
11521 			min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
11522 		return -EINVAL;
11523 	}
11524 
11525 	return 0;
11526 }
11527 
11528 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
11529 			     int min_tx_rate, int max_tx_rate, bool force)
11530 {
11531 	struct hclge_vport *vport = hclge_get_vport(handle);
11532 	struct hclge_dev *hdev = vport->back;
11533 	int ret;
11534 
11535 	ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate);
11536 	if (ret)
11537 		return ret;
11538 
11539 	vport = hclge_get_vf_vport(hdev, vf);
11540 	if (!vport)
11541 		return -EINVAL;
11542 
11543 	if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
11544 		return 0;
11545 
11546 	ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
11547 	if (ret)
11548 		return ret;
11549 
11550 	vport->vf_info.max_tx_rate = max_tx_rate;
11551 
11552 	return 0;
11553 }
11554 
11555 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
11556 {
11557 	struct hnae3_handle *handle = &hdev->vport->nic;
11558 	struct hclge_vport *vport;
11559 	int ret;
11560 	int vf;
11561 
11562 	/* resume the vf max_tx_rate after reset */
11563 	for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
11564 		vport = hclge_get_vf_vport(hdev, vf);
11565 		if (!vport)
11566 			return -EINVAL;
11567 
11568 		/* zero means max rate, after reset, firmware already set it to
11569 		 * max rate, so just continue.
11570 		 */
11571 		if (!vport->vf_info.max_tx_rate)
11572 			continue;
11573 
11574 		ret = hclge_set_vf_rate(handle, vf, 0,
11575 					vport->vf_info.max_tx_rate, true);
11576 		if (ret) {
11577 			dev_err(&hdev->pdev->dev,
11578 				"vf%d failed to resume tx_rate:%u, ret=%d\n",
11579 				vf, vport->vf_info.max_tx_rate, ret);
11580 			return ret;
11581 		}
11582 	}
11583 
11584 	return 0;
11585 }
11586 
11587 static void hclge_reset_vport_state(struct hclge_dev *hdev)
11588 {
11589 	struct hclge_vport *vport = hdev->vport;
11590 	int i;
11591 
11592 	for (i = 0; i < hdev->num_alloc_vport; i++) {
11593 		hclge_vport_stop(vport);
11594 		vport++;
11595 	}
11596 }
11597 
11598 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
11599 {
11600 	struct hclge_dev *hdev = ae_dev->priv;
11601 	struct pci_dev *pdev = ae_dev->pdev;
11602 	int ret;
11603 
11604 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
11605 
11606 	hclge_stats_clear(hdev);
11607 	/* NOTE: pf reset needn't to clear or restore pf and vf table entry.
11608 	 * so here should not clean table in memory.
11609 	 */
11610 	if (hdev->reset_type == HNAE3_IMP_RESET ||
11611 	    hdev->reset_type == HNAE3_GLOBAL_RESET) {
11612 		memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
11613 		memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
11614 		bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
11615 		hclge_reset_umv_space(hdev);
11616 	}
11617 
11618 	ret = hclge_cmd_init(hdev);
11619 	if (ret) {
11620 		dev_err(&pdev->dev, "Cmd queue init failed\n");
11621 		return ret;
11622 	}
11623 
11624 	ret = hclge_map_tqp(hdev);
11625 	if (ret) {
11626 		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
11627 		return ret;
11628 	}
11629 
11630 	ret = hclge_mac_init(hdev);
11631 	if (ret) {
11632 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11633 		return ret;
11634 	}
11635 
11636 	ret = hclge_tp_port_init(hdev);
11637 	if (ret) {
11638 		dev_err(&pdev->dev, "failed to init tp port, ret = %d\n",
11639 			ret);
11640 		return ret;
11641 	}
11642 
11643 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11644 	if (ret) {
11645 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11646 		return ret;
11647 	}
11648 
11649 	ret = hclge_config_gro(hdev, true);
11650 	if (ret)
11651 		return ret;
11652 
11653 	ret = hclge_init_vlan_config(hdev);
11654 	if (ret) {
11655 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11656 		return ret;
11657 	}
11658 
11659 	ret = hclge_tm_init_hw(hdev, true);
11660 	if (ret) {
11661 		dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
11662 		return ret;
11663 	}
11664 
11665 	ret = hclge_rss_init_hw(hdev);
11666 	if (ret) {
11667 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11668 		return ret;
11669 	}
11670 
11671 	ret = init_mgr_tbl(hdev);
11672 	if (ret) {
11673 		dev_err(&pdev->dev,
11674 			"failed to reinit manager table, ret = %d\n", ret);
11675 		return ret;
11676 	}
11677 
11678 	ret = hclge_init_fd_config(hdev);
11679 	if (ret) {
11680 		dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
11681 		return ret;
11682 	}
11683 
11684 	/* Log and clear the hw errors those already occurred */
11685 	hclge_handle_all_hns_hw_errors(ae_dev);
11686 
11687 	/* Re-enable the hw error interrupts because
11688 	 * the interrupts get disabled on global reset.
11689 	 */
11690 	ret = hclge_config_nic_hw_error(hdev, true);
11691 	if (ret) {
11692 		dev_err(&pdev->dev,
11693 			"fail(%d) to re-enable NIC hw error interrupts\n",
11694 			ret);
11695 		return ret;
11696 	}
11697 
11698 	if (hdev->roce_client) {
11699 		ret = hclge_config_rocee_ras_interrupt(hdev, true);
11700 		if (ret) {
11701 			dev_err(&pdev->dev,
11702 				"fail(%d) to re-enable roce ras interrupts\n",
11703 				ret);
11704 			return ret;
11705 		}
11706 	}
11707 
11708 	hclge_reset_vport_state(hdev);
11709 	ret = hclge_reset_vport_spoofchk(hdev);
11710 	if (ret)
11711 		return ret;
11712 
11713 	ret = hclge_resume_vf_rate(hdev);
11714 	if (ret)
11715 		return ret;
11716 
11717 	dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
11718 		 HCLGE_DRIVER_NAME);
11719 
11720 	return 0;
11721 }
11722 
11723 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
11724 {
11725 	struct hclge_dev *hdev = ae_dev->priv;
11726 	struct hclge_mac *mac = &hdev->hw.mac;
11727 
11728 	hclge_reset_vf_rate(hdev);
11729 	hclge_clear_vf_vlan(hdev);
11730 	hclge_misc_affinity_teardown(hdev);
11731 	hclge_state_uninit(hdev);
11732 	hclge_uninit_mac_table(hdev);
11733 	hclge_del_all_fd_entries(hdev);
11734 
11735 	if (mac->phydev)
11736 		mdiobus_unregister(mac->mdio_bus);
11737 
11738 	/* Disable MISC vector(vector0) */
11739 	hclge_enable_vector(&hdev->misc_vector, false);
11740 	synchronize_irq(hdev->misc_vector.vector_irq);
11741 
11742 	/* Disable all hw interrupts */
11743 	hclge_config_mac_tnl_int(hdev, false);
11744 	hclge_config_nic_hw_error(hdev, false);
11745 	hclge_config_rocee_ras_interrupt(hdev, false);
11746 
11747 	hclge_cmd_uninit(hdev);
11748 	hclge_misc_irq_uninit(hdev);
11749 	hclge_pci_uninit(hdev);
11750 	mutex_destroy(&hdev->vport_lock);
11751 	hclge_uninit_vport_vlan_table(hdev);
11752 	ae_dev->priv = NULL;
11753 }
11754 
11755 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
11756 {
11757 	struct hclge_vport *vport = hclge_get_vport(handle);
11758 	struct hclge_dev *hdev = vport->back;
11759 
11760 	return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps);
11761 }
11762 
11763 static void hclge_get_channels(struct hnae3_handle *handle,
11764 			       struct ethtool_channels *ch)
11765 {
11766 	ch->max_combined = hclge_get_max_channels(handle);
11767 	ch->other_count = 1;
11768 	ch->max_other = 1;
11769 	ch->combined_count = handle->kinfo.rss_size;
11770 }
11771 
11772 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
11773 					u16 *alloc_tqps, u16 *max_rss_size)
11774 {
11775 	struct hclge_vport *vport = hclge_get_vport(handle);
11776 	struct hclge_dev *hdev = vport->back;
11777 
11778 	*alloc_tqps = vport->alloc_tqps;
11779 	*max_rss_size = hdev->pf_rss_size_max;
11780 }
11781 
11782 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
11783 			      bool rxfh_configured)
11784 {
11785 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
11786 	struct hclge_vport *vport = hclge_get_vport(handle);
11787 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
11788 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
11789 	struct hclge_dev *hdev = vport->back;
11790 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
11791 	u16 cur_rss_size = kinfo->rss_size;
11792 	u16 cur_tqps = kinfo->num_tqps;
11793 	u16 tc_valid[HCLGE_MAX_TC_NUM];
11794 	u16 roundup_size;
11795 	u32 *rss_indir;
11796 	unsigned int i;
11797 	int ret;
11798 
11799 	kinfo->req_rss_size = new_tqps_num;
11800 
11801 	ret = hclge_tm_vport_map_update(hdev);
11802 	if (ret) {
11803 		dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
11804 		return ret;
11805 	}
11806 
11807 	roundup_size = roundup_pow_of_two(kinfo->rss_size);
11808 	roundup_size = ilog2(roundup_size);
11809 	/* Set the RSS TC mode according to the new RSS size */
11810 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
11811 		tc_valid[i] = 0;
11812 
11813 		if (!(hdev->hw_tc_map & BIT(i)))
11814 			continue;
11815 
11816 		tc_valid[i] = 1;
11817 		tc_size[i] = roundup_size;
11818 		tc_offset[i] = kinfo->rss_size * i;
11819 	}
11820 	ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
11821 	if (ret)
11822 		return ret;
11823 
11824 	/* RSS indirection table has been configured by user */
11825 	if (rxfh_configured)
11826 		goto out;
11827 
11828 	/* Reinitializes the rss indirect table according to the new RSS size */
11829 	rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32),
11830 			    GFP_KERNEL);
11831 	if (!rss_indir)
11832 		return -ENOMEM;
11833 
11834 	for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
11835 		rss_indir[i] = i % kinfo->rss_size;
11836 
11837 	ret = hclge_set_rss(handle, rss_indir, NULL, 0);
11838 	if (ret)
11839 		dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
11840 			ret);
11841 
11842 	kfree(rss_indir);
11843 
11844 out:
11845 	if (!ret)
11846 		dev_info(&hdev->pdev->dev,
11847 			 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
11848 			 cur_rss_size, kinfo->rss_size,
11849 			 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
11850 
11851 	return ret;
11852 }
11853 
11854 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
11855 			      u32 *regs_num_64_bit)
11856 {
11857 	struct hclge_desc desc;
11858 	u32 total_num;
11859 	int ret;
11860 
11861 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
11862 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11863 	if (ret) {
11864 		dev_err(&hdev->pdev->dev,
11865 			"Query register number cmd failed, ret = %d.\n", ret);
11866 		return ret;
11867 	}
11868 
11869 	*regs_num_32_bit = le32_to_cpu(desc.data[0]);
11870 	*regs_num_64_bit = le32_to_cpu(desc.data[1]);
11871 
11872 	total_num = *regs_num_32_bit + *regs_num_64_bit;
11873 	if (!total_num)
11874 		return -EINVAL;
11875 
11876 	return 0;
11877 }
11878 
11879 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
11880 				 void *data)
11881 {
11882 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
11883 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
11884 
11885 	struct hclge_desc *desc;
11886 	u32 *reg_val = data;
11887 	__le32 *desc_data;
11888 	int nodata_num;
11889 	int cmd_num;
11890 	int i, k, n;
11891 	int ret;
11892 
11893 	if (regs_num == 0)
11894 		return 0;
11895 
11896 	nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
11897 	cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
11898 			       HCLGE_32_BIT_REG_RTN_DATANUM);
11899 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
11900 	if (!desc)
11901 		return -ENOMEM;
11902 
11903 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
11904 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
11905 	if (ret) {
11906 		dev_err(&hdev->pdev->dev,
11907 			"Query 32 bit register cmd failed, ret = %d.\n", ret);
11908 		kfree(desc);
11909 		return ret;
11910 	}
11911 
11912 	for (i = 0; i < cmd_num; i++) {
11913 		if (i == 0) {
11914 			desc_data = (__le32 *)(&desc[i].data[0]);
11915 			n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
11916 		} else {
11917 			desc_data = (__le32 *)(&desc[i]);
11918 			n = HCLGE_32_BIT_REG_RTN_DATANUM;
11919 		}
11920 		for (k = 0; k < n; k++) {
11921 			*reg_val++ = le32_to_cpu(*desc_data++);
11922 
11923 			regs_num--;
11924 			if (!regs_num)
11925 				break;
11926 		}
11927 	}
11928 
11929 	kfree(desc);
11930 	return 0;
11931 }
11932 
11933 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
11934 				 void *data)
11935 {
11936 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
11937 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
11938 
11939 	struct hclge_desc *desc;
11940 	u64 *reg_val = data;
11941 	__le64 *desc_data;
11942 	int nodata_len;
11943 	int cmd_num;
11944 	int i, k, n;
11945 	int ret;
11946 
11947 	if (regs_num == 0)
11948 		return 0;
11949 
11950 	nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
11951 	cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
11952 			       HCLGE_64_BIT_REG_RTN_DATANUM);
11953 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
11954 	if (!desc)
11955 		return -ENOMEM;
11956 
11957 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
11958 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
11959 	if (ret) {
11960 		dev_err(&hdev->pdev->dev,
11961 			"Query 64 bit register cmd failed, ret = %d.\n", ret);
11962 		kfree(desc);
11963 		return ret;
11964 	}
11965 
11966 	for (i = 0; i < cmd_num; i++) {
11967 		if (i == 0) {
11968 			desc_data = (__le64 *)(&desc[i].data[0]);
11969 			n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
11970 		} else {
11971 			desc_data = (__le64 *)(&desc[i]);
11972 			n = HCLGE_64_BIT_REG_RTN_DATANUM;
11973 		}
11974 		for (k = 0; k < n; k++) {
11975 			*reg_val++ = le64_to_cpu(*desc_data++);
11976 
11977 			regs_num--;
11978 			if (!regs_num)
11979 				break;
11980 		}
11981 	}
11982 
11983 	kfree(desc);
11984 	return 0;
11985 }
11986 
11987 #define MAX_SEPARATE_NUM	4
11988 #define SEPARATOR_VALUE		0xFDFCFBFA
11989 #define REG_NUM_PER_LINE	4
11990 #define REG_LEN_PER_LINE	(REG_NUM_PER_LINE * sizeof(u32))
11991 #define REG_SEPARATOR_LINE	1
11992 #define REG_NUM_REMAIN_MASK	3
11993 
11994 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
11995 {
11996 	int i;
11997 
11998 	/* initialize command BD except the last one */
11999 	for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
12000 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
12001 					   true);
12002 		desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12003 	}
12004 
12005 	/* initialize the last command BD */
12006 	hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
12007 
12008 	return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
12009 }
12010 
12011 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
12012 				    int *bd_num_list,
12013 				    u32 type_num)
12014 {
12015 	u32 entries_per_desc, desc_index, index, offset, i;
12016 	struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
12017 	int ret;
12018 
12019 	ret = hclge_query_bd_num_cmd_send(hdev, desc);
12020 	if (ret) {
12021 		dev_err(&hdev->pdev->dev,
12022 			"Get dfx bd num fail, status is %d.\n", ret);
12023 		return ret;
12024 	}
12025 
12026 	entries_per_desc = ARRAY_SIZE(desc[0].data);
12027 	for (i = 0; i < type_num; i++) {
12028 		offset = hclge_dfx_bd_offset_list[i];
12029 		index = offset % entries_per_desc;
12030 		desc_index = offset / entries_per_desc;
12031 		bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
12032 	}
12033 
12034 	return ret;
12035 }
12036 
12037 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
12038 				  struct hclge_desc *desc_src, int bd_num,
12039 				  enum hclge_opcode_type cmd)
12040 {
12041 	struct hclge_desc *desc = desc_src;
12042 	int i, ret;
12043 
12044 	hclge_cmd_setup_basic_desc(desc, cmd, true);
12045 	for (i = 0; i < bd_num - 1; i++) {
12046 		desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12047 		desc++;
12048 		hclge_cmd_setup_basic_desc(desc, cmd, true);
12049 	}
12050 
12051 	desc = desc_src;
12052 	ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
12053 	if (ret)
12054 		dev_err(&hdev->pdev->dev,
12055 			"Query dfx reg cmd(0x%x) send fail, status is %d.\n",
12056 			cmd, ret);
12057 
12058 	return ret;
12059 }
12060 
12061 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
12062 				    void *data)
12063 {
12064 	int entries_per_desc, reg_num, separator_num, desc_index, index, i;
12065 	struct hclge_desc *desc = desc_src;
12066 	u32 *reg = data;
12067 
12068 	entries_per_desc = ARRAY_SIZE(desc->data);
12069 	reg_num = entries_per_desc * bd_num;
12070 	separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
12071 	for (i = 0; i < reg_num; i++) {
12072 		index = i % entries_per_desc;
12073 		desc_index = i / entries_per_desc;
12074 		*reg++ = le32_to_cpu(desc[desc_index].data[index]);
12075 	}
12076 	for (i = 0; i < separator_num; i++)
12077 		*reg++ = SEPARATOR_VALUE;
12078 
12079 	return reg_num + separator_num;
12080 }
12081 
12082 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
12083 {
12084 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12085 	int data_len_per_desc, bd_num, i;
12086 	int *bd_num_list;
12087 	u32 data_len;
12088 	int ret;
12089 
12090 	bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12091 	if (!bd_num_list)
12092 		return -ENOMEM;
12093 
12094 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12095 	if (ret) {
12096 		dev_err(&hdev->pdev->dev,
12097 			"Get dfx reg bd num fail, status is %d.\n", ret);
12098 		goto out;
12099 	}
12100 
12101 	data_len_per_desc = sizeof_field(struct hclge_desc, data);
12102 	*len = 0;
12103 	for (i = 0; i < dfx_reg_type_num; i++) {
12104 		bd_num = bd_num_list[i];
12105 		data_len = data_len_per_desc * bd_num;
12106 		*len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
12107 	}
12108 
12109 out:
12110 	kfree(bd_num_list);
12111 	return ret;
12112 }
12113 
12114 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
12115 {
12116 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12117 	int bd_num, bd_num_max, buf_len, i;
12118 	struct hclge_desc *desc_src;
12119 	int *bd_num_list;
12120 	u32 *reg = data;
12121 	int ret;
12122 
12123 	bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12124 	if (!bd_num_list)
12125 		return -ENOMEM;
12126 
12127 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12128 	if (ret) {
12129 		dev_err(&hdev->pdev->dev,
12130 			"Get dfx reg bd num fail, status is %d.\n", ret);
12131 		goto out;
12132 	}
12133 
12134 	bd_num_max = bd_num_list[0];
12135 	for (i = 1; i < dfx_reg_type_num; i++)
12136 		bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
12137 
12138 	buf_len = sizeof(*desc_src) * bd_num_max;
12139 	desc_src = kzalloc(buf_len, GFP_KERNEL);
12140 	if (!desc_src) {
12141 		ret = -ENOMEM;
12142 		goto out;
12143 	}
12144 
12145 	for (i = 0; i < dfx_reg_type_num; i++) {
12146 		bd_num = bd_num_list[i];
12147 		ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
12148 					     hclge_dfx_reg_opcode_list[i]);
12149 		if (ret) {
12150 			dev_err(&hdev->pdev->dev,
12151 				"Get dfx reg fail, status is %d.\n", ret);
12152 			break;
12153 		}
12154 
12155 		reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
12156 	}
12157 
12158 	kfree(desc_src);
12159 out:
12160 	kfree(bd_num_list);
12161 	return ret;
12162 }
12163 
12164 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
12165 			      struct hnae3_knic_private_info *kinfo)
12166 {
12167 #define HCLGE_RING_REG_OFFSET		0x200
12168 #define HCLGE_RING_INT_REG_OFFSET	0x4
12169 
12170 	int i, j, reg_num, separator_num;
12171 	int data_num_sum;
12172 	u32 *reg = data;
12173 
12174 	/* fetching per-PF registers valus from PF PCIe register space */
12175 	reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
12176 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12177 	for (i = 0; i < reg_num; i++)
12178 		*reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
12179 	for (i = 0; i < separator_num; i++)
12180 		*reg++ = SEPARATOR_VALUE;
12181 	data_num_sum = reg_num + separator_num;
12182 
12183 	reg_num = ARRAY_SIZE(common_reg_addr_list);
12184 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12185 	for (i = 0; i < reg_num; i++)
12186 		*reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
12187 	for (i = 0; i < separator_num; i++)
12188 		*reg++ = SEPARATOR_VALUE;
12189 	data_num_sum += reg_num + separator_num;
12190 
12191 	reg_num = ARRAY_SIZE(ring_reg_addr_list);
12192 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12193 	for (j = 0; j < kinfo->num_tqps; j++) {
12194 		for (i = 0; i < reg_num; i++)
12195 			*reg++ = hclge_read_dev(&hdev->hw,
12196 						ring_reg_addr_list[i] +
12197 						HCLGE_RING_REG_OFFSET * j);
12198 		for (i = 0; i < separator_num; i++)
12199 			*reg++ = SEPARATOR_VALUE;
12200 	}
12201 	data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
12202 
12203 	reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
12204 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12205 	for (j = 0; j < hdev->num_msi_used - 1; j++) {
12206 		for (i = 0; i < reg_num; i++)
12207 			*reg++ = hclge_read_dev(&hdev->hw,
12208 						tqp_intr_reg_addr_list[i] +
12209 						HCLGE_RING_INT_REG_OFFSET * j);
12210 		for (i = 0; i < separator_num; i++)
12211 			*reg++ = SEPARATOR_VALUE;
12212 	}
12213 	data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
12214 
12215 	return data_num_sum;
12216 }
12217 
12218 static int hclge_get_regs_len(struct hnae3_handle *handle)
12219 {
12220 	int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
12221 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12222 	struct hclge_vport *vport = hclge_get_vport(handle);
12223 	struct hclge_dev *hdev = vport->back;
12224 	int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
12225 	int regs_lines_32_bit, regs_lines_64_bit;
12226 	int ret;
12227 
12228 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
12229 	if (ret) {
12230 		dev_err(&hdev->pdev->dev,
12231 			"Get register number failed, ret = %d.\n", ret);
12232 		return ret;
12233 	}
12234 
12235 	ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
12236 	if (ret) {
12237 		dev_err(&hdev->pdev->dev,
12238 			"Get dfx reg len failed, ret = %d.\n", ret);
12239 		return ret;
12240 	}
12241 
12242 	cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
12243 		REG_SEPARATOR_LINE;
12244 	common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
12245 		REG_SEPARATOR_LINE;
12246 	ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
12247 		REG_SEPARATOR_LINE;
12248 	tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
12249 		REG_SEPARATOR_LINE;
12250 	regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
12251 		REG_SEPARATOR_LINE;
12252 	regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
12253 		REG_SEPARATOR_LINE;
12254 
12255 	return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
12256 		tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
12257 		regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
12258 }
12259 
12260 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
12261 			   void *data)
12262 {
12263 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12264 	struct hclge_vport *vport = hclge_get_vport(handle);
12265 	struct hclge_dev *hdev = vport->back;
12266 	u32 regs_num_32_bit, regs_num_64_bit;
12267 	int i, reg_num, separator_num, ret;
12268 	u32 *reg = data;
12269 
12270 	*version = hdev->fw_version;
12271 
12272 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
12273 	if (ret) {
12274 		dev_err(&hdev->pdev->dev,
12275 			"Get register number failed, ret = %d.\n", ret);
12276 		return;
12277 	}
12278 
12279 	reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
12280 
12281 	ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
12282 	if (ret) {
12283 		dev_err(&hdev->pdev->dev,
12284 			"Get 32 bit register failed, ret = %d.\n", ret);
12285 		return;
12286 	}
12287 	reg_num = regs_num_32_bit;
12288 	reg += reg_num;
12289 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12290 	for (i = 0; i < separator_num; i++)
12291 		*reg++ = SEPARATOR_VALUE;
12292 
12293 	ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
12294 	if (ret) {
12295 		dev_err(&hdev->pdev->dev,
12296 			"Get 64 bit register failed, ret = %d.\n", ret);
12297 		return;
12298 	}
12299 	reg_num = regs_num_64_bit * 2;
12300 	reg += reg_num;
12301 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12302 	for (i = 0; i < separator_num; i++)
12303 		*reg++ = SEPARATOR_VALUE;
12304 
12305 	ret = hclge_get_dfx_reg(hdev, reg);
12306 	if (ret)
12307 		dev_err(&hdev->pdev->dev,
12308 			"Get dfx register failed, ret = %d.\n", ret);
12309 }
12310 
12311 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
12312 {
12313 	struct hclge_set_led_state_cmd *req;
12314 	struct hclge_desc desc;
12315 	int ret;
12316 
12317 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
12318 
12319 	req = (struct hclge_set_led_state_cmd *)desc.data;
12320 	hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
12321 			HCLGE_LED_LOCATE_STATE_S, locate_led_status);
12322 
12323 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12324 	if (ret)
12325 		dev_err(&hdev->pdev->dev,
12326 			"Send set led state cmd error, ret =%d\n", ret);
12327 
12328 	return ret;
12329 }
12330 
12331 enum hclge_led_status {
12332 	HCLGE_LED_OFF,
12333 	HCLGE_LED_ON,
12334 	HCLGE_LED_NO_CHANGE = 0xFF,
12335 };
12336 
12337 static int hclge_set_led_id(struct hnae3_handle *handle,
12338 			    enum ethtool_phys_id_state status)
12339 {
12340 	struct hclge_vport *vport = hclge_get_vport(handle);
12341 	struct hclge_dev *hdev = vport->back;
12342 
12343 	switch (status) {
12344 	case ETHTOOL_ID_ACTIVE:
12345 		return hclge_set_led_status(hdev, HCLGE_LED_ON);
12346 	case ETHTOOL_ID_INACTIVE:
12347 		return hclge_set_led_status(hdev, HCLGE_LED_OFF);
12348 	default:
12349 		return -EINVAL;
12350 	}
12351 }
12352 
12353 static void hclge_get_link_mode(struct hnae3_handle *handle,
12354 				unsigned long *supported,
12355 				unsigned long *advertising)
12356 {
12357 	unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
12358 	struct hclge_vport *vport = hclge_get_vport(handle);
12359 	struct hclge_dev *hdev = vport->back;
12360 	unsigned int idx = 0;
12361 
12362 	for (; idx < size; idx++) {
12363 		supported[idx] = hdev->hw.mac.supported[idx];
12364 		advertising[idx] = hdev->hw.mac.advertising[idx];
12365 	}
12366 }
12367 
12368 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
12369 {
12370 	struct hclge_vport *vport = hclge_get_vport(handle);
12371 	struct hclge_dev *hdev = vport->back;
12372 
12373 	return hclge_config_gro(hdev, enable);
12374 }
12375 
12376 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
12377 {
12378 	struct hclge_vport *vport = &hdev->vport[0];
12379 	struct hnae3_handle *handle = &vport->nic;
12380 	u8 tmp_flags;
12381 	int ret;
12382 
12383 	if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
12384 		set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
12385 		vport->last_promisc_flags = vport->overflow_promisc_flags;
12386 	}
12387 
12388 	if (test_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state)) {
12389 		tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
12390 		ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
12391 					     tmp_flags & HNAE3_MPE);
12392 		if (!ret) {
12393 			clear_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
12394 			hclge_enable_vlan_filter(handle,
12395 						 tmp_flags & HNAE3_VLAN_FLTR);
12396 		}
12397 	}
12398 }
12399 
12400 static bool hclge_module_existed(struct hclge_dev *hdev)
12401 {
12402 	struct hclge_desc desc;
12403 	u32 existed;
12404 	int ret;
12405 
12406 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
12407 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12408 	if (ret) {
12409 		dev_err(&hdev->pdev->dev,
12410 			"failed to get SFP exist state, ret = %d\n", ret);
12411 		return false;
12412 	}
12413 
12414 	existed = le32_to_cpu(desc.data[0]);
12415 
12416 	return existed != 0;
12417 }
12418 
12419 /* need 6 bds(total 140 bytes) in one reading
12420  * return the number of bytes actually read, 0 means read failed.
12421  */
12422 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
12423 				     u32 len, u8 *data)
12424 {
12425 	struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
12426 	struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
12427 	u16 read_len;
12428 	u16 copy_len;
12429 	int ret;
12430 	int i;
12431 
12432 	/* setup all 6 bds to read module eeprom info. */
12433 	for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12434 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
12435 					   true);
12436 
12437 		/* bd0~bd4 need next flag */
12438 		if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
12439 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12440 	}
12441 
12442 	/* setup bd0, this bd contains offset and read length. */
12443 	sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
12444 	sfp_info_bd0->offset = cpu_to_le16((u16)offset);
12445 	read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
12446 	sfp_info_bd0->read_len = cpu_to_le16(read_len);
12447 
12448 	ret = hclge_cmd_send(&hdev->hw, desc, i);
12449 	if (ret) {
12450 		dev_err(&hdev->pdev->dev,
12451 			"failed to get SFP eeprom info, ret = %d\n", ret);
12452 		return 0;
12453 	}
12454 
12455 	/* copy sfp info from bd0 to out buffer. */
12456 	copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
12457 	memcpy(data, sfp_info_bd0->data, copy_len);
12458 	read_len = copy_len;
12459 
12460 	/* copy sfp info from bd1~bd5 to out buffer if needed. */
12461 	for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12462 		if (read_len >= len)
12463 			return read_len;
12464 
12465 		copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
12466 		memcpy(data + read_len, desc[i].data, copy_len);
12467 		read_len += copy_len;
12468 	}
12469 
12470 	return read_len;
12471 }
12472 
12473 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
12474 				   u32 len, u8 *data)
12475 {
12476 	struct hclge_vport *vport = hclge_get_vport(handle);
12477 	struct hclge_dev *hdev = vport->back;
12478 	u32 read_len = 0;
12479 	u16 data_len;
12480 
12481 	if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
12482 		return -EOPNOTSUPP;
12483 
12484 	if (!hclge_module_existed(hdev))
12485 		return -ENXIO;
12486 
12487 	while (read_len < len) {
12488 		data_len = hclge_get_sfp_eeprom_info(hdev,
12489 						     offset + read_len,
12490 						     len - read_len,
12491 						     data + read_len);
12492 		if (!data_len)
12493 			return -EIO;
12494 
12495 		read_len += data_len;
12496 	}
12497 
12498 	return 0;
12499 }
12500 
12501 static const struct hnae3_ae_ops hclge_ops = {
12502 	.init_ae_dev = hclge_init_ae_dev,
12503 	.uninit_ae_dev = hclge_uninit_ae_dev,
12504 	.reset_prepare = hclge_reset_prepare_general,
12505 	.reset_done = hclge_reset_done,
12506 	.init_client_instance = hclge_init_client_instance,
12507 	.uninit_client_instance = hclge_uninit_client_instance,
12508 	.map_ring_to_vector = hclge_map_ring_to_vector,
12509 	.unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
12510 	.get_vector = hclge_get_vector,
12511 	.put_vector = hclge_put_vector,
12512 	.set_promisc_mode = hclge_set_promisc_mode,
12513 	.request_update_promisc_mode = hclge_request_update_promisc_mode,
12514 	.set_loopback = hclge_set_loopback,
12515 	.start = hclge_ae_start,
12516 	.stop = hclge_ae_stop,
12517 	.client_start = hclge_client_start,
12518 	.client_stop = hclge_client_stop,
12519 	.get_status = hclge_get_status,
12520 	.get_ksettings_an_result = hclge_get_ksettings_an_result,
12521 	.cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
12522 	.get_media_type = hclge_get_media_type,
12523 	.check_port_speed = hclge_check_port_speed,
12524 	.get_fec = hclge_get_fec,
12525 	.set_fec = hclge_set_fec,
12526 	.get_rss_key_size = hclge_get_rss_key_size,
12527 	.get_rss = hclge_get_rss,
12528 	.set_rss = hclge_set_rss,
12529 	.set_rss_tuple = hclge_set_rss_tuple,
12530 	.get_rss_tuple = hclge_get_rss_tuple,
12531 	.get_tc_size = hclge_get_tc_size,
12532 	.get_mac_addr = hclge_get_mac_addr,
12533 	.set_mac_addr = hclge_set_mac_addr,
12534 	.do_ioctl = hclge_do_ioctl,
12535 	.add_uc_addr = hclge_add_uc_addr,
12536 	.rm_uc_addr = hclge_rm_uc_addr,
12537 	.add_mc_addr = hclge_add_mc_addr,
12538 	.rm_mc_addr = hclge_rm_mc_addr,
12539 	.set_autoneg = hclge_set_autoneg,
12540 	.get_autoneg = hclge_get_autoneg,
12541 	.restart_autoneg = hclge_restart_autoneg,
12542 	.halt_autoneg = hclge_halt_autoneg,
12543 	.get_pauseparam = hclge_get_pauseparam,
12544 	.set_pauseparam = hclge_set_pauseparam,
12545 	.set_mtu = hclge_set_mtu,
12546 	.reset_queue = hclge_reset_tqp,
12547 	.get_stats = hclge_get_stats,
12548 	.get_mac_stats = hclge_get_mac_stat,
12549 	.update_stats = hclge_update_stats,
12550 	.get_strings = hclge_get_strings,
12551 	.get_sset_count = hclge_get_sset_count,
12552 	.get_fw_version = hclge_get_fw_version,
12553 	.get_mdix_mode = hclge_get_mdix_mode,
12554 	.enable_vlan_filter = hclge_enable_vlan_filter,
12555 	.set_vlan_filter = hclge_set_vlan_filter,
12556 	.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
12557 	.enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
12558 	.reset_event = hclge_reset_event,
12559 	.get_reset_level = hclge_get_reset_level,
12560 	.set_default_reset_request = hclge_set_def_reset_request,
12561 	.get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
12562 	.set_channels = hclge_set_channels,
12563 	.get_channels = hclge_get_channels,
12564 	.get_regs_len = hclge_get_regs_len,
12565 	.get_regs = hclge_get_regs,
12566 	.set_led_id = hclge_set_led_id,
12567 	.get_link_mode = hclge_get_link_mode,
12568 	.add_fd_entry = hclge_add_fd_entry,
12569 	.del_fd_entry = hclge_del_fd_entry,
12570 	.get_fd_rule_cnt = hclge_get_fd_rule_cnt,
12571 	.get_fd_rule_info = hclge_get_fd_rule_info,
12572 	.get_fd_all_rules = hclge_get_all_rules,
12573 	.enable_fd = hclge_enable_fd,
12574 	.add_arfs_entry = hclge_add_fd_entry_by_arfs,
12575 	.dbg_run_cmd = hclge_dbg_run_cmd,
12576 	.dbg_read_cmd = hclge_dbg_read_cmd,
12577 	.handle_hw_ras_error = hclge_handle_hw_ras_error,
12578 	.get_hw_reset_stat = hclge_get_hw_reset_stat,
12579 	.ae_dev_resetting = hclge_ae_dev_resetting,
12580 	.ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
12581 	.set_gro_en = hclge_gro_en,
12582 	.get_global_queue_id = hclge_covert_handle_qid_global,
12583 	.set_timer_task = hclge_set_timer_task,
12584 	.mac_connect_phy = hclge_mac_connect_phy,
12585 	.mac_disconnect_phy = hclge_mac_disconnect_phy,
12586 	.get_vf_config = hclge_get_vf_config,
12587 	.set_vf_link_state = hclge_set_vf_link_state,
12588 	.set_vf_spoofchk = hclge_set_vf_spoofchk,
12589 	.set_vf_trust = hclge_set_vf_trust,
12590 	.set_vf_rate = hclge_set_vf_rate,
12591 	.set_vf_mac = hclge_set_vf_mac,
12592 	.get_module_eeprom = hclge_get_module_eeprom,
12593 	.get_cmdq_stat = hclge_get_cmdq_stat,
12594 	.add_cls_flower = hclge_add_cls_flower,
12595 	.del_cls_flower = hclge_del_cls_flower,
12596 	.cls_flower_active = hclge_is_cls_flower_active,
12597 	.get_phy_link_ksettings = hclge_get_phy_link_ksettings,
12598 	.set_phy_link_ksettings = hclge_set_phy_link_ksettings,
12599 };
12600 
12601 static struct hnae3_ae_algo ae_algo = {
12602 	.ops = &hclge_ops,
12603 	.pdev_id_table = ae_algo_pci_tbl,
12604 };
12605 
12606 static int hclge_init(void)
12607 {
12608 	pr_info("%s is initializing\n", HCLGE_NAME);
12609 
12610 	hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
12611 	if (!hclge_wq) {
12612 		pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
12613 		return -ENOMEM;
12614 	}
12615 
12616 	hnae3_register_ae_algo(&ae_algo);
12617 
12618 	return 0;
12619 }
12620 
12621 static void hclge_exit(void)
12622 {
12623 	hnae3_unregister_ae_algo(&ae_algo);
12624 	destroy_workqueue(hclge_wq);
12625 }
12626 module_init(hclge_init);
12627 module_exit(hclge_exit);
12628 
12629 MODULE_LICENSE("GPL");
12630 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
12631 MODULE_DESCRIPTION("HCLGE Driver");
12632 MODULE_VERSION(HCLGE_MOD_VERSION);
12633