xref: /linux/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c (revision 65d2dbb300197839eafc4171cfeb57a14c452724)
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/ipv6.h>
17 #include <net/rtnetlink.h>
18 #include "hclge_cmd.h"
19 #include "hclge_dcb.h"
20 #include "hclge_main.h"
21 #include "hclge_mbx.h"
22 #include "hclge_mdio.h"
23 #include "hclge_tm.h"
24 #include "hclge_err.h"
25 #include "hnae3.h"
26 
27 #define HCLGE_NAME			"hclge"
28 #define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
29 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
30 
31 #define HCLGE_BUF_SIZE_UNIT	256U
32 #define HCLGE_BUF_MUL_BY	2
33 #define HCLGE_BUF_DIV_BY	2
34 #define NEED_RESERVE_TC_NUM	2
35 #define BUF_MAX_PERCENT		100
36 #define BUF_RESERVE_PERCENT	90
37 
38 #define HCLGE_RESET_MAX_FAIL_CNT	5
39 #define HCLGE_RESET_SYNC_TIME		100
40 #define HCLGE_PF_RESET_SYNC_TIME	20
41 #define HCLGE_PF_RESET_SYNC_CNT		1500
42 
43 /* Get DFX BD number offset */
44 #define HCLGE_DFX_BIOS_BD_OFFSET        1
45 #define HCLGE_DFX_SSU_0_BD_OFFSET       2
46 #define HCLGE_DFX_SSU_1_BD_OFFSET       3
47 #define HCLGE_DFX_IGU_BD_OFFSET         4
48 #define HCLGE_DFX_RPU_0_BD_OFFSET       5
49 #define HCLGE_DFX_RPU_1_BD_OFFSET       6
50 #define HCLGE_DFX_NCSI_BD_OFFSET        7
51 #define HCLGE_DFX_RTC_BD_OFFSET         8
52 #define HCLGE_DFX_PPP_BD_OFFSET         9
53 #define HCLGE_DFX_RCB_BD_OFFSET         10
54 #define HCLGE_DFX_TQP_BD_OFFSET         11
55 #define HCLGE_DFX_SSU_2_BD_OFFSET       12
56 
57 #define HCLGE_LINK_STATUS_MS	10
58 
59 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
60 static int hclge_init_vlan_config(struct hclge_dev *hdev);
61 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
62 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
63 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
64 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
65 static int hclge_clear_arfs_rules(struct hclge_dev *hdev);
66 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
67 						   unsigned long *addr);
68 static int hclge_set_default_loopback(struct hclge_dev *hdev);
69 
70 static void hclge_sync_mac_table(struct hclge_dev *hdev);
71 static void hclge_restore_hw_table(struct hclge_dev *hdev);
72 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
73 static void hclge_sync_fd_table(struct hclge_dev *hdev);
74 
75 static struct hnae3_ae_algo ae_algo;
76 
77 static struct workqueue_struct *hclge_wq;
78 
79 static const struct pci_device_id ae_algo_pci_tbl[] = {
80 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
81 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
82 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
83 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
84 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
85 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
86 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
87 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
88 	/* required last entry */
89 	{0, }
90 };
91 
92 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
93 
94 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
95 					 HCLGE_CMDQ_TX_ADDR_H_REG,
96 					 HCLGE_CMDQ_TX_DEPTH_REG,
97 					 HCLGE_CMDQ_TX_TAIL_REG,
98 					 HCLGE_CMDQ_TX_HEAD_REG,
99 					 HCLGE_CMDQ_RX_ADDR_L_REG,
100 					 HCLGE_CMDQ_RX_ADDR_H_REG,
101 					 HCLGE_CMDQ_RX_DEPTH_REG,
102 					 HCLGE_CMDQ_RX_TAIL_REG,
103 					 HCLGE_CMDQ_RX_HEAD_REG,
104 					 HCLGE_VECTOR0_CMDQ_SRC_REG,
105 					 HCLGE_CMDQ_INTR_STS_REG,
106 					 HCLGE_CMDQ_INTR_EN_REG,
107 					 HCLGE_CMDQ_INTR_GEN_REG};
108 
109 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
110 					   HCLGE_VECTOR0_OTER_EN_REG,
111 					   HCLGE_MISC_RESET_STS_REG,
112 					   HCLGE_MISC_VECTOR_INT_STS,
113 					   HCLGE_GLOBAL_RESET_REG,
114 					   HCLGE_FUN_RST_ING,
115 					   HCLGE_GRO_EN_REG};
116 
117 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
118 					 HCLGE_RING_RX_ADDR_H_REG,
119 					 HCLGE_RING_RX_BD_NUM_REG,
120 					 HCLGE_RING_RX_BD_LENGTH_REG,
121 					 HCLGE_RING_RX_MERGE_EN_REG,
122 					 HCLGE_RING_RX_TAIL_REG,
123 					 HCLGE_RING_RX_HEAD_REG,
124 					 HCLGE_RING_RX_FBD_NUM_REG,
125 					 HCLGE_RING_RX_OFFSET_REG,
126 					 HCLGE_RING_RX_FBD_OFFSET_REG,
127 					 HCLGE_RING_RX_STASH_REG,
128 					 HCLGE_RING_RX_BD_ERR_REG,
129 					 HCLGE_RING_TX_ADDR_L_REG,
130 					 HCLGE_RING_TX_ADDR_H_REG,
131 					 HCLGE_RING_TX_BD_NUM_REG,
132 					 HCLGE_RING_TX_PRIORITY_REG,
133 					 HCLGE_RING_TX_TC_REG,
134 					 HCLGE_RING_TX_MERGE_EN_REG,
135 					 HCLGE_RING_TX_TAIL_REG,
136 					 HCLGE_RING_TX_HEAD_REG,
137 					 HCLGE_RING_TX_FBD_NUM_REG,
138 					 HCLGE_RING_TX_OFFSET_REG,
139 					 HCLGE_RING_TX_EBD_NUM_REG,
140 					 HCLGE_RING_TX_EBD_OFFSET_REG,
141 					 HCLGE_RING_TX_BD_ERR_REG,
142 					 HCLGE_RING_EN_REG};
143 
144 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
145 					     HCLGE_TQP_INTR_GL0_REG,
146 					     HCLGE_TQP_INTR_GL1_REG,
147 					     HCLGE_TQP_INTR_GL2_REG,
148 					     HCLGE_TQP_INTR_RL_REG};
149 
150 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
151 	"App    Loopback test",
152 	"Serdes serial Loopback test",
153 	"Serdes parallel Loopback test",
154 	"Phy    Loopback test"
155 };
156 
157 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
158 	{"mac_tx_mac_pause_num",
159 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
160 	{"mac_rx_mac_pause_num",
161 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
162 	{"mac_tx_control_pkt_num",
163 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
164 	{"mac_rx_control_pkt_num",
165 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
166 	{"mac_tx_pfc_pkt_num",
167 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
168 	{"mac_tx_pfc_pri0_pkt_num",
169 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
170 	{"mac_tx_pfc_pri1_pkt_num",
171 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
172 	{"mac_tx_pfc_pri2_pkt_num",
173 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
174 	{"mac_tx_pfc_pri3_pkt_num",
175 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
176 	{"mac_tx_pfc_pri4_pkt_num",
177 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
178 	{"mac_tx_pfc_pri5_pkt_num",
179 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
180 	{"mac_tx_pfc_pri6_pkt_num",
181 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
182 	{"mac_tx_pfc_pri7_pkt_num",
183 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
184 	{"mac_rx_pfc_pkt_num",
185 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
186 	{"mac_rx_pfc_pri0_pkt_num",
187 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
188 	{"mac_rx_pfc_pri1_pkt_num",
189 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
190 	{"mac_rx_pfc_pri2_pkt_num",
191 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
192 	{"mac_rx_pfc_pri3_pkt_num",
193 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
194 	{"mac_rx_pfc_pri4_pkt_num",
195 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
196 	{"mac_rx_pfc_pri5_pkt_num",
197 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
198 	{"mac_rx_pfc_pri6_pkt_num",
199 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
200 	{"mac_rx_pfc_pri7_pkt_num",
201 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
202 	{"mac_tx_total_pkt_num",
203 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
204 	{"mac_tx_total_oct_num",
205 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
206 	{"mac_tx_good_pkt_num",
207 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
208 	{"mac_tx_bad_pkt_num",
209 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
210 	{"mac_tx_good_oct_num",
211 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
212 	{"mac_tx_bad_oct_num",
213 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
214 	{"mac_tx_uni_pkt_num",
215 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
216 	{"mac_tx_multi_pkt_num",
217 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
218 	{"mac_tx_broad_pkt_num",
219 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
220 	{"mac_tx_undersize_pkt_num",
221 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
222 	{"mac_tx_oversize_pkt_num",
223 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
224 	{"mac_tx_64_oct_pkt_num",
225 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
226 	{"mac_tx_65_127_oct_pkt_num",
227 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
228 	{"mac_tx_128_255_oct_pkt_num",
229 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
230 	{"mac_tx_256_511_oct_pkt_num",
231 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
232 	{"mac_tx_512_1023_oct_pkt_num",
233 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
234 	{"mac_tx_1024_1518_oct_pkt_num",
235 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
236 	{"mac_tx_1519_2047_oct_pkt_num",
237 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
238 	{"mac_tx_2048_4095_oct_pkt_num",
239 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
240 	{"mac_tx_4096_8191_oct_pkt_num",
241 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
242 	{"mac_tx_8192_9216_oct_pkt_num",
243 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
244 	{"mac_tx_9217_12287_oct_pkt_num",
245 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
246 	{"mac_tx_12288_16383_oct_pkt_num",
247 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
248 	{"mac_tx_1519_max_good_pkt_num",
249 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
250 	{"mac_tx_1519_max_bad_pkt_num",
251 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
252 	{"mac_rx_total_pkt_num",
253 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
254 	{"mac_rx_total_oct_num",
255 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
256 	{"mac_rx_good_pkt_num",
257 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
258 	{"mac_rx_bad_pkt_num",
259 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
260 	{"mac_rx_good_oct_num",
261 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
262 	{"mac_rx_bad_oct_num",
263 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
264 	{"mac_rx_uni_pkt_num",
265 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
266 	{"mac_rx_multi_pkt_num",
267 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
268 	{"mac_rx_broad_pkt_num",
269 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
270 	{"mac_rx_undersize_pkt_num",
271 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
272 	{"mac_rx_oversize_pkt_num",
273 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
274 	{"mac_rx_64_oct_pkt_num",
275 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
276 	{"mac_rx_65_127_oct_pkt_num",
277 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
278 	{"mac_rx_128_255_oct_pkt_num",
279 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
280 	{"mac_rx_256_511_oct_pkt_num",
281 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
282 	{"mac_rx_512_1023_oct_pkt_num",
283 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
284 	{"mac_rx_1024_1518_oct_pkt_num",
285 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
286 	{"mac_rx_1519_2047_oct_pkt_num",
287 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
288 	{"mac_rx_2048_4095_oct_pkt_num",
289 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
290 	{"mac_rx_4096_8191_oct_pkt_num",
291 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
292 	{"mac_rx_8192_9216_oct_pkt_num",
293 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
294 	{"mac_rx_9217_12287_oct_pkt_num",
295 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
296 	{"mac_rx_12288_16383_oct_pkt_num",
297 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
298 	{"mac_rx_1519_max_good_pkt_num",
299 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
300 	{"mac_rx_1519_max_bad_pkt_num",
301 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
302 
303 	{"mac_tx_fragment_pkt_num",
304 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
305 	{"mac_tx_undermin_pkt_num",
306 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
307 	{"mac_tx_jabber_pkt_num",
308 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
309 	{"mac_tx_err_all_pkt_num",
310 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
311 	{"mac_tx_from_app_good_pkt_num",
312 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
313 	{"mac_tx_from_app_bad_pkt_num",
314 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
315 	{"mac_rx_fragment_pkt_num",
316 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
317 	{"mac_rx_undermin_pkt_num",
318 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
319 	{"mac_rx_jabber_pkt_num",
320 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
321 	{"mac_rx_fcs_err_pkt_num",
322 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
323 	{"mac_rx_send_app_good_pkt_num",
324 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
325 	{"mac_rx_send_app_bad_pkt_num",
326 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
327 };
328 
329 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
330 	{
331 		.flags = HCLGE_MAC_MGR_MASK_VLAN_B,
332 		.ethter_type = cpu_to_le16(ETH_P_LLDP),
333 		.mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
334 		.i_port_bitmap = 0x1,
335 	},
336 };
337 
338 static const u8 hclge_hash_key[] = {
339 	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
340 	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
341 	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
342 	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
343 	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
344 };
345 
346 static const u32 hclge_dfx_bd_offset_list[] = {
347 	HCLGE_DFX_BIOS_BD_OFFSET,
348 	HCLGE_DFX_SSU_0_BD_OFFSET,
349 	HCLGE_DFX_SSU_1_BD_OFFSET,
350 	HCLGE_DFX_IGU_BD_OFFSET,
351 	HCLGE_DFX_RPU_0_BD_OFFSET,
352 	HCLGE_DFX_RPU_1_BD_OFFSET,
353 	HCLGE_DFX_NCSI_BD_OFFSET,
354 	HCLGE_DFX_RTC_BD_OFFSET,
355 	HCLGE_DFX_PPP_BD_OFFSET,
356 	HCLGE_DFX_RCB_BD_OFFSET,
357 	HCLGE_DFX_TQP_BD_OFFSET,
358 	HCLGE_DFX_SSU_2_BD_OFFSET
359 };
360 
361 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
362 	HCLGE_OPC_DFX_BIOS_COMMON_REG,
363 	HCLGE_OPC_DFX_SSU_REG_0,
364 	HCLGE_OPC_DFX_SSU_REG_1,
365 	HCLGE_OPC_DFX_IGU_EGU_REG,
366 	HCLGE_OPC_DFX_RPU_REG_0,
367 	HCLGE_OPC_DFX_RPU_REG_1,
368 	HCLGE_OPC_DFX_NCSI_REG,
369 	HCLGE_OPC_DFX_RTC_REG,
370 	HCLGE_OPC_DFX_PPP_REG,
371 	HCLGE_OPC_DFX_RCB_REG,
372 	HCLGE_OPC_DFX_TQP_REG,
373 	HCLGE_OPC_DFX_SSU_REG_2
374 };
375 
376 static const struct key_info meta_data_key_info[] = {
377 	{ PACKET_TYPE_ID, 6},
378 	{ IP_FRAGEMENT, 1},
379 	{ ROCE_TYPE, 1},
380 	{ NEXT_KEY, 5},
381 	{ VLAN_NUMBER, 2},
382 	{ SRC_VPORT, 12},
383 	{ DST_VPORT, 12},
384 	{ TUNNEL_PACKET, 1},
385 };
386 
387 static const struct key_info tuple_key_info[] = {
388 	{ OUTER_DST_MAC, 48, KEY_OPT_MAC, -1, -1 },
389 	{ OUTER_SRC_MAC, 48, KEY_OPT_MAC, -1, -1 },
390 	{ OUTER_VLAN_TAG_FST, 16, KEY_OPT_LE16, -1, -1 },
391 	{ OUTER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
392 	{ OUTER_ETH_TYPE, 16, KEY_OPT_LE16, -1, -1 },
393 	{ OUTER_L2_RSV, 16, KEY_OPT_LE16, -1, -1 },
394 	{ OUTER_IP_TOS, 8, KEY_OPT_U8, -1, -1 },
395 	{ OUTER_IP_PROTO, 8, KEY_OPT_U8, -1, -1 },
396 	{ OUTER_SRC_IP, 32, KEY_OPT_IP, -1, -1 },
397 	{ OUTER_DST_IP, 32, KEY_OPT_IP, -1, -1 },
398 	{ OUTER_L3_RSV, 16, KEY_OPT_LE16, -1, -1 },
399 	{ OUTER_SRC_PORT, 16, KEY_OPT_LE16, -1, -1 },
400 	{ OUTER_DST_PORT, 16, KEY_OPT_LE16, -1, -1 },
401 	{ OUTER_L4_RSV, 32, KEY_OPT_LE32, -1, -1 },
402 	{ OUTER_TUN_VNI, 24, KEY_OPT_VNI, -1, -1 },
403 	{ OUTER_TUN_FLOW_ID, 8, KEY_OPT_U8, -1, -1 },
404 	{ INNER_DST_MAC, 48, KEY_OPT_MAC,
405 	  offsetof(struct hclge_fd_rule, tuples.dst_mac),
406 	  offsetof(struct hclge_fd_rule, tuples_mask.dst_mac) },
407 	{ INNER_SRC_MAC, 48, KEY_OPT_MAC,
408 	  offsetof(struct hclge_fd_rule, tuples.src_mac),
409 	  offsetof(struct hclge_fd_rule, tuples_mask.src_mac) },
410 	{ INNER_VLAN_TAG_FST, 16, KEY_OPT_LE16,
411 	  offsetof(struct hclge_fd_rule, tuples.vlan_tag1),
412 	  offsetof(struct hclge_fd_rule, tuples_mask.vlan_tag1) },
413 	{ INNER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
414 	{ INNER_ETH_TYPE, 16, KEY_OPT_LE16,
415 	  offsetof(struct hclge_fd_rule, tuples.ether_proto),
416 	  offsetof(struct hclge_fd_rule, tuples_mask.ether_proto) },
417 	{ INNER_L2_RSV, 16, KEY_OPT_LE16,
418 	  offsetof(struct hclge_fd_rule, tuples.l2_user_def),
419 	  offsetof(struct hclge_fd_rule, tuples_mask.l2_user_def) },
420 	{ INNER_IP_TOS, 8, KEY_OPT_U8,
421 	  offsetof(struct hclge_fd_rule, tuples.ip_tos),
422 	  offsetof(struct hclge_fd_rule, tuples_mask.ip_tos) },
423 	{ INNER_IP_PROTO, 8, KEY_OPT_U8,
424 	  offsetof(struct hclge_fd_rule, tuples.ip_proto),
425 	  offsetof(struct hclge_fd_rule, tuples_mask.ip_proto) },
426 	{ INNER_SRC_IP, 32, KEY_OPT_IP,
427 	  offsetof(struct hclge_fd_rule, tuples.src_ip),
428 	  offsetof(struct hclge_fd_rule, tuples_mask.src_ip) },
429 	{ INNER_DST_IP, 32, KEY_OPT_IP,
430 	  offsetof(struct hclge_fd_rule, tuples.dst_ip),
431 	  offsetof(struct hclge_fd_rule, tuples_mask.dst_ip) },
432 	{ INNER_L3_RSV, 16, KEY_OPT_LE16,
433 	  offsetof(struct hclge_fd_rule, tuples.l3_user_def),
434 	  offsetof(struct hclge_fd_rule, tuples_mask.l3_user_def) },
435 	{ INNER_SRC_PORT, 16, KEY_OPT_LE16,
436 	  offsetof(struct hclge_fd_rule, tuples.src_port),
437 	  offsetof(struct hclge_fd_rule, tuples_mask.src_port) },
438 	{ INNER_DST_PORT, 16, KEY_OPT_LE16,
439 	  offsetof(struct hclge_fd_rule, tuples.dst_port),
440 	  offsetof(struct hclge_fd_rule, tuples_mask.dst_port) },
441 	{ INNER_L4_RSV, 32, KEY_OPT_LE32,
442 	  offsetof(struct hclge_fd_rule, tuples.l4_user_def),
443 	  offsetof(struct hclge_fd_rule, tuples_mask.l4_user_def) },
444 };
445 
446 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
447 {
448 #define HCLGE_MAC_CMD_NUM 21
449 
450 	u64 *data = (u64 *)(&hdev->mac_stats);
451 	struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
452 	__le64 *desc_data;
453 	int i, k, n;
454 	int ret;
455 
456 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
457 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
458 	if (ret) {
459 		dev_err(&hdev->pdev->dev,
460 			"Get MAC pkt stats fail, status = %d.\n", ret);
461 
462 		return ret;
463 	}
464 
465 	for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
466 		/* for special opcode 0032, only the first desc has the head */
467 		if (unlikely(i == 0)) {
468 			desc_data = (__le64 *)(&desc[i].data[0]);
469 			n = HCLGE_RD_FIRST_STATS_NUM;
470 		} else {
471 			desc_data = (__le64 *)(&desc[i]);
472 			n = HCLGE_RD_OTHER_STATS_NUM;
473 		}
474 
475 		for (k = 0; k < n; k++) {
476 			*data += le64_to_cpu(*desc_data);
477 			data++;
478 			desc_data++;
479 		}
480 	}
481 
482 	return 0;
483 }
484 
485 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
486 {
487 	u64 *data = (u64 *)(&hdev->mac_stats);
488 	struct hclge_desc *desc;
489 	__le64 *desc_data;
490 	u16 i, k, n;
491 	int ret;
492 
493 	/* This may be called inside atomic sections,
494 	 * so GFP_ATOMIC is more suitalbe here
495 	 */
496 	desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
497 	if (!desc)
498 		return -ENOMEM;
499 
500 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
501 	ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
502 	if (ret) {
503 		kfree(desc);
504 		return ret;
505 	}
506 
507 	for (i = 0; i < desc_num; i++) {
508 		/* for special opcode 0034, only the first desc has the head */
509 		if (i == 0) {
510 			desc_data = (__le64 *)(&desc[i].data[0]);
511 			n = HCLGE_RD_FIRST_STATS_NUM;
512 		} else {
513 			desc_data = (__le64 *)(&desc[i]);
514 			n = HCLGE_RD_OTHER_STATS_NUM;
515 		}
516 
517 		for (k = 0; k < n; k++) {
518 			*data += le64_to_cpu(*desc_data);
519 			data++;
520 			desc_data++;
521 		}
522 	}
523 
524 	kfree(desc);
525 
526 	return 0;
527 }
528 
529 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
530 {
531 	struct hclge_desc desc;
532 	__le32 *desc_data;
533 	u32 reg_num;
534 	int ret;
535 
536 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
537 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
538 	if (ret)
539 		return ret;
540 
541 	desc_data = (__le32 *)(&desc.data[0]);
542 	reg_num = le32_to_cpu(*desc_data);
543 
544 	*desc_num = 1 + ((reg_num - 3) >> 2) +
545 		    (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
546 
547 	return 0;
548 }
549 
550 static int hclge_mac_update_stats(struct hclge_dev *hdev)
551 {
552 	u32 desc_num;
553 	int ret;
554 
555 	ret = hclge_mac_query_reg_num(hdev, &desc_num);
556 
557 	/* The firmware supports the new statistics acquisition method */
558 	if (!ret)
559 		ret = hclge_mac_update_stats_complete(hdev, desc_num);
560 	else if (ret == -EOPNOTSUPP)
561 		ret = hclge_mac_update_stats_defective(hdev);
562 	else
563 		dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
564 
565 	return ret;
566 }
567 
568 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
569 {
570 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
571 	struct hclge_vport *vport = hclge_get_vport(handle);
572 	struct hclge_dev *hdev = vport->back;
573 	struct hnae3_queue *queue;
574 	struct hclge_desc desc[1];
575 	struct hclge_tqp *tqp;
576 	int ret, i;
577 
578 	for (i = 0; i < kinfo->num_tqps; i++) {
579 		queue = handle->kinfo.tqp[i];
580 		tqp = container_of(queue, struct hclge_tqp, q);
581 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
582 		hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
583 					   true);
584 
585 		desc[0].data[0] = cpu_to_le32(tqp->index);
586 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
587 		if (ret) {
588 			dev_err(&hdev->pdev->dev,
589 				"Query tqp stat fail, status = %d,queue = %d\n",
590 				ret, i);
591 			return ret;
592 		}
593 		tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
594 			le32_to_cpu(desc[0].data[1]);
595 	}
596 
597 	for (i = 0; i < kinfo->num_tqps; i++) {
598 		queue = handle->kinfo.tqp[i];
599 		tqp = container_of(queue, struct hclge_tqp, q);
600 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
601 		hclge_cmd_setup_basic_desc(&desc[0],
602 					   HCLGE_OPC_QUERY_TX_STATS,
603 					   true);
604 
605 		desc[0].data[0] = cpu_to_le32(tqp->index);
606 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
607 		if (ret) {
608 			dev_err(&hdev->pdev->dev,
609 				"Query tqp stat fail, status = %d,queue = %d\n",
610 				ret, i);
611 			return ret;
612 		}
613 		tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
614 			le32_to_cpu(desc[0].data[1]);
615 	}
616 
617 	return 0;
618 }
619 
620 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
621 {
622 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
623 	struct hclge_tqp *tqp;
624 	u64 *buff = data;
625 	int i;
626 
627 	for (i = 0; i < kinfo->num_tqps; i++) {
628 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
629 		*buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
630 	}
631 
632 	for (i = 0; i < kinfo->num_tqps; i++) {
633 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
634 		*buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
635 	}
636 
637 	return buff;
638 }
639 
640 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
641 {
642 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
643 
644 	/* each tqp has TX & RX two queues */
645 	return kinfo->num_tqps * (2);
646 }
647 
648 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
649 {
650 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
651 	u8 *buff = data;
652 	int i;
653 
654 	for (i = 0; i < kinfo->num_tqps; i++) {
655 		struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
656 			struct hclge_tqp, q);
657 		snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
658 			 tqp->index);
659 		buff = buff + ETH_GSTRING_LEN;
660 	}
661 
662 	for (i = 0; i < kinfo->num_tqps; i++) {
663 		struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
664 			struct hclge_tqp, q);
665 		snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
666 			 tqp->index);
667 		buff = buff + ETH_GSTRING_LEN;
668 	}
669 
670 	return buff;
671 }
672 
673 static u64 *hclge_comm_get_stats(const void *comm_stats,
674 				 const struct hclge_comm_stats_str strs[],
675 				 int size, u64 *data)
676 {
677 	u64 *buf = data;
678 	u32 i;
679 
680 	for (i = 0; i < size; i++)
681 		buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
682 
683 	return buf + size;
684 }
685 
686 static u8 *hclge_comm_get_strings(u32 stringset,
687 				  const struct hclge_comm_stats_str strs[],
688 				  int size, u8 *data)
689 {
690 	char *buff = (char *)data;
691 	u32 i;
692 
693 	if (stringset != ETH_SS_STATS)
694 		return buff;
695 
696 	for (i = 0; i < size; i++) {
697 		snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
698 		buff = buff + ETH_GSTRING_LEN;
699 	}
700 
701 	return (u8 *)buff;
702 }
703 
704 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
705 {
706 	struct hnae3_handle *handle;
707 	int status;
708 
709 	handle = &hdev->vport[0].nic;
710 	if (handle->client) {
711 		status = hclge_tqps_update_stats(handle);
712 		if (status) {
713 			dev_err(&hdev->pdev->dev,
714 				"Update TQPS stats fail, status = %d.\n",
715 				status);
716 		}
717 	}
718 
719 	status = hclge_mac_update_stats(hdev);
720 	if (status)
721 		dev_err(&hdev->pdev->dev,
722 			"Update MAC stats fail, status = %d.\n", status);
723 }
724 
725 static void hclge_update_stats(struct hnae3_handle *handle,
726 			       struct net_device_stats *net_stats)
727 {
728 	struct hclge_vport *vport = hclge_get_vport(handle);
729 	struct hclge_dev *hdev = vport->back;
730 	int status;
731 
732 	if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
733 		return;
734 
735 	status = hclge_mac_update_stats(hdev);
736 	if (status)
737 		dev_err(&hdev->pdev->dev,
738 			"Update MAC stats fail, status = %d.\n",
739 			status);
740 
741 	status = hclge_tqps_update_stats(handle);
742 	if (status)
743 		dev_err(&hdev->pdev->dev,
744 			"Update TQPS stats fail, status = %d.\n",
745 			status);
746 
747 	clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
748 }
749 
750 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
751 {
752 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
753 		HNAE3_SUPPORT_PHY_LOOPBACK |\
754 		HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
755 		HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
756 
757 	struct hclge_vport *vport = hclge_get_vport(handle);
758 	struct hclge_dev *hdev = vport->back;
759 	int count = 0;
760 
761 	/* Loopback test support rules:
762 	 * mac: only GE mode support
763 	 * serdes: all mac mode will support include GE/XGE/LGE/CGE
764 	 * phy: only support when phy device exist on board
765 	 */
766 	if (stringset == ETH_SS_TEST) {
767 		/* clear loopback bit flags at first */
768 		handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
769 		if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
770 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
771 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
772 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
773 			count += 1;
774 			handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
775 		}
776 
777 		count += 2;
778 		handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
779 		handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
780 
781 		if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
782 		     hdev->hw.mac.phydev->drv->set_loopback) ||
783 		    hnae3_dev_phy_imp_supported(hdev)) {
784 			count += 1;
785 			handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
786 		}
787 
788 	} else if (stringset == ETH_SS_STATS) {
789 		count = ARRAY_SIZE(g_mac_stats_string) +
790 			hclge_tqps_get_sset_count(handle, stringset);
791 	}
792 
793 	return count;
794 }
795 
796 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
797 			      u8 *data)
798 {
799 	u8 *p = (char *)data;
800 	int size;
801 
802 	if (stringset == ETH_SS_STATS) {
803 		size = ARRAY_SIZE(g_mac_stats_string);
804 		p = hclge_comm_get_strings(stringset, g_mac_stats_string,
805 					   size, p);
806 		p = hclge_tqps_get_strings(handle, p);
807 	} else if (stringset == ETH_SS_TEST) {
808 		if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
809 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
810 			       ETH_GSTRING_LEN);
811 			p += ETH_GSTRING_LEN;
812 		}
813 		if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
814 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
815 			       ETH_GSTRING_LEN);
816 			p += ETH_GSTRING_LEN;
817 		}
818 		if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
819 			memcpy(p,
820 			       hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
821 			       ETH_GSTRING_LEN);
822 			p += ETH_GSTRING_LEN;
823 		}
824 		if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
825 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
826 			       ETH_GSTRING_LEN);
827 			p += ETH_GSTRING_LEN;
828 		}
829 	}
830 }
831 
832 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
833 {
834 	struct hclge_vport *vport = hclge_get_vport(handle);
835 	struct hclge_dev *hdev = vport->back;
836 	u64 *p;
837 
838 	p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
839 				 ARRAY_SIZE(g_mac_stats_string), data);
840 	p = hclge_tqps_get_stats(handle, p);
841 }
842 
843 static void hclge_get_mac_stat(struct hnae3_handle *handle,
844 			       struct hns3_mac_stats *mac_stats)
845 {
846 	struct hclge_vport *vport = hclge_get_vport(handle);
847 	struct hclge_dev *hdev = vport->back;
848 
849 	hclge_update_stats(handle, NULL);
850 
851 	mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
852 	mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
853 }
854 
855 static int hclge_parse_func_status(struct hclge_dev *hdev,
856 				   struct hclge_func_status_cmd *status)
857 {
858 #define HCLGE_MAC_ID_MASK	0xF
859 
860 	if (!(status->pf_state & HCLGE_PF_STATE_DONE))
861 		return -EINVAL;
862 
863 	/* Set the pf to main pf */
864 	if (status->pf_state & HCLGE_PF_STATE_MAIN)
865 		hdev->flag |= HCLGE_FLAG_MAIN;
866 	else
867 		hdev->flag &= ~HCLGE_FLAG_MAIN;
868 
869 	hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
870 	return 0;
871 }
872 
873 static int hclge_query_function_status(struct hclge_dev *hdev)
874 {
875 #define HCLGE_QUERY_MAX_CNT	5
876 
877 	struct hclge_func_status_cmd *req;
878 	struct hclge_desc desc;
879 	int timeout = 0;
880 	int ret;
881 
882 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
883 	req = (struct hclge_func_status_cmd *)desc.data;
884 
885 	do {
886 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
887 		if (ret) {
888 			dev_err(&hdev->pdev->dev,
889 				"query function status failed %d.\n", ret);
890 			return ret;
891 		}
892 
893 		/* Check pf reset is done */
894 		if (req->pf_state)
895 			break;
896 		usleep_range(1000, 2000);
897 	} while (timeout++ < HCLGE_QUERY_MAX_CNT);
898 
899 	return hclge_parse_func_status(hdev, req);
900 }
901 
902 static int hclge_query_pf_resource(struct hclge_dev *hdev)
903 {
904 	struct hclge_pf_res_cmd *req;
905 	struct hclge_desc desc;
906 	int ret;
907 
908 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
909 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
910 	if (ret) {
911 		dev_err(&hdev->pdev->dev,
912 			"query pf resource failed %d.\n", ret);
913 		return ret;
914 	}
915 
916 	req = (struct hclge_pf_res_cmd *)desc.data;
917 	hdev->num_tqps = le16_to_cpu(req->tqp_num) +
918 			 le16_to_cpu(req->ext_tqp_num);
919 	hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
920 
921 	if (req->tx_buf_size)
922 		hdev->tx_buf_size =
923 			le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
924 	else
925 		hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
926 
927 	hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
928 
929 	if (req->dv_buf_size)
930 		hdev->dv_buf_size =
931 			le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
932 	else
933 		hdev->dv_buf_size = HCLGE_DEFAULT_DV;
934 
935 	hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
936 
937 	hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
938 	if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
939 		dev_err(&hdev->pdev->dev,
940 			"only %u msi resources available, not enough for pf(min:2).\n",
941 			hdev->num_nic_msi);
942 		return -EINVAL;
943 	}
944 
945 	if (hnae3_dev_roce_supported(hdev)) {
946 		hdev->num_roce_msi =
947 			le16_to_cpu(req->pf_intr_vector_number_roce);
948 
949 		/* PF should have NIC vectors and Roce vectors,
950 		 * NIC vectors are queued before Roce vectors.
951 		 */
952 		hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
953 	} else {
954 		hdev->num_msi = hdev->num_nic_msi;
955 	}
956 
957 	return 0;
958 }
959 
960 static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
961 {
962 	switch (speed_cmd) {
963 	case 6:
964 		*speed = HCLGE_MAC_SPEED_10M;
965 		break;
966 	case 7:
967 		*speed = HCLGE_MAC_SPEED_100M;
968 		break;
969 	case 0:
970 		*speed = HCLGE_MAC_SPEED_1G;
971 		break;
972 	case 1:
973 		*speed = HCLGE_MAC_SPEED_10G;
974 		break;
975 	case 2:
976 		*speed = HCLGE_MAC_SPEED_25G;
977 		break;
978 	case 3:
979 		*speed = HCLGE_MAC_SPEED_40G;
980 		break;
981 	case 4:
982 		*speed = HCLGE_MAC_SPEED_50G;
983 		break;
984 	case 5:
985 		*speed = HCLGE_MAC_SPEED_100G;
986 		break;
987 	case 8:
988 		*speed = HCLGE_MAC_SPEED_200G;
989 		break;
990 	default:
991 		return -EINVAL;
992 	}
993 
994 	return 0;
995 }
996 
997 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
998 {
999 	struct hclge_vport *vport = hclge_get_vport(handle);
1000 	struct hclge_dev *hdev = vport->back;
1001 	u32 speed_ability = hdev->hw.mac.speed_ability;
1002 	u32 speed_bit = 0;
1003 
1004 	switch (speed) {
1005 	case HCLGE_MAC_SPEED_10M:
1006 		speed_bit = HCLGE_SUPPORT_10M_BIT;
1007 		break;
1008 	case HCLGE_MAC_SPEED_100M:
1009 		speed_bit = HCLGE_SUPPORT_100M_BIT;
1010 		break;
1011 	case HCLGE_MAC_SPEED_1G:
1012 		speed_bit = HCLGE_SUPPORT_1G_BIT;
1013 		break;
1014 	case HCLGE_MAC_SPEED_10G:
1015 		speed_bit = HCLGE_SUPPORT_10G_BIT;
1016 		break;
1017 	case HCLGE_MAC_SPEED_25G:
1018 		speed_bit = HCLGE_SUPPORT_25G_BIT;
1019 		break;
1020 	case HCLGE_MAC_SPEED_40G:
1021 		speed_bit = HCLGE_SUPPORT_40G_BIT;
1022 		break;
1023 	case HCLGE_MAC_SPEED_50G:
1024 		speed_bit = HCLGE_SUPPORT_50G_BIT;
1025 		break;
1026 	case HCLGE_MAC_SPEED_100G:
1027 		speed_bit = HCLGE_SUPPORT_100G_BIT;
1028 		break;
1029 	case HCLGE_MAC_SPEED_200G:
1030 		speed_bit = HCLGE_SUPPORT_200G_BIT;
1031 		break;
1032 	default:
1033 		return -EINVAL;
1034 	}
1035 
1036 	if (speed_bit & speed_ability)
1037 		return 0;
1038 
1039 	return -EINVAL;
1040 }
1041 
1042 static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
1043 {
1044 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1045 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1046 				 mac->supported);
1047 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1048 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1049 				 mac->supported);
1050 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1051 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1052 				 mac->supported);
1053 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1054 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1055 				 mac->supported);
1056 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1057 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1058 				 mac->supported);
1059 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1060 		linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1061 				 mac->supported);
1062 }
1063 
1064 static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
1065 {
1066 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1067 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1068 				 mac->supported);
1069 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1070 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1071 				 mac->supported);
1072 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1073 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1074 				 mac->supported);
1075 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1076 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1077 				 mac->supported);
1078 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1079 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1080 				 mac->supported);
1081 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1082 		linkmode_set_bit(
1083 			ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1084 			mac->supported);
1085 }
1086 
1087 static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
1088 {
1089 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1090 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1091 				 mac->supported);
1092 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1093 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1094 				 mac->supported);
1095 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1096 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1097 				 mac->supported);
1098 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1099 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1100 				 mac->supported);
1101 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1102 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1103 				 mac->supported);
1104 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1105 		linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1106 				 mac->supported);
1107 }
1108 
1109 static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
1110 {
1111 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1112 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1113 				 mac->supported);
1114 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1115 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1116 				 mac->supported);
1117 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1118 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1119 				 mac->supported);
1120 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1121 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1122 				 mac->supported);
1123 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1124 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1125 				 mac->supported);
1126 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1127 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1128 				 mac->supported);
1129 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1130 		linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1131 				 mac->supported);
1132 }
1133 
1134 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1135 {
1136 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1137 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1138 
1139 	switch (mac->speed) {
1140 	case HCLGE_MAC_SPEED_10G:
1141 	case HCLGE_MAC_SPEED_40G:
1142 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1143 				 mac->supported);
1144 		mac->fec_ability =
1145 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1146 		break;
1147 	case HCLGE_MAC_SPEED_25G:
1148 	case HCLGE_MAC_SPEED_50G:
1149 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1150 				 mac->supported);
1151 		mac->fec_ability =
1152 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1153 			BIT(HNAE3_FEC_AUTO);
1154 		break;
1155 	case HCLGE_MAC_SPEED_100G:
1156 	case HCLGE_MAC_SPEED_200G:
1157 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1158 		mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1159 		break;
1160 	default:
1161 		mac->fec_ability = 0;
1162 		break;
1163 	}
1164 }
1165 
1166 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1167 					u16 speed_ability)
1168 {
1169 	struct hclge_mac *mac = &hdev->hw.mac;
1170 
1171 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1172 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1173 				 mac->supported);
1174 
1175 	hclge_convert_setting_sr(mac, speed_ability);
1176 	hclge_convert_setting_lr(mac, speed_ability);
1177 	hclge_convert_setting_cr(mac, speed_ability);
1178 	if (hnae3_dev_fec_supported(hdev))
1179 		hclge_convert_setting_fec(mac);
1180 
1181 	if (hnae3_dev_pause_supported(hdev))
1182 		linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1183 
1184 	linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1185 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1186 }
1187 
1188 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1189 					    u16 speed_ability)
1190 {
1191 	struct hclge_mac *mac = &hdev->hw.mac;
1192 
1193 	hclge_convert_setting_kr(mac, speed_ability);
1194 	if (hnae3_dev_fec_supported(hdev))
1195 		hclge_convert_setting_fec(mac);
1196 
1197 	if (hnae3_dev_pause_supported(hdev))
1198 		linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1199 
1200 	linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1201 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1202 }
1203 
1204 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1205 					 u16 speed_ability)
1206 {
1207 	unsigned long *supported = hdev->hw.mac.supported;
1208 
1209 	/* default to support all speed for GE port */
1210 	if (!speed_ability)
1211 		speed_ability = HCLGE_SUPPORT_GE;
1212 
1213 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1214 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1215 				 supported);
1216 
1217 	if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1218 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1219 				 supported);
1220 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1221 				 supported);
1222 	}
1223 
1224 	if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1225 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1226 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1227 	}
1228 
1229 	if (hnae3_dev_pause_supported(hdev)) {
1230 		linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1231 		linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1232 	}
1233 
1234 	linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1235 	linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1236 }
1237 
1238 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1239 {
1240 	u8 media_type = hdev->hw.mac.media_type;
1241 
1242 	if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1243 		hclge_parse_fiber_link_mode(hdev, speed_ability);
1244 	else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1245 		hclge_parse_copper_link_mode(hdev, speed_ability);
1246 	else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1247 		hclge_parse_backplane_link_mode(hdev, speed_ability);
1248 }
1249 
1250 static u32 hclge_get_max_speed(u16 speed_ability)
1251 {
1252 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1253 		return HCLGE_MAC_SPEED_200G;
1254 
1255 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1256 		return HCLGE_MAC_SPEED_100G;
1257 
1258 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1259 		return HCLGE_MAC_SPEED_50G;
1260 
1261 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1262 		return HCLGE_MAC_SPEED_40G;
1263 
1264 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1265 		return HCLGE_MAC_SPEED_25G;
1266 
1267 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1268 		return HCLGE_MAC_SPEED_10G;
1269 
1270 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1271 		return HCLGE_MAC_SPEED_1G;
1272 
1273 	if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1274 		return HCLGE_MAC_SPEED_100M;
1275 
1276 	if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1277 		return HCLGE_MAC_SPEED_10M;
1278 
1279 	return HCLGE_MAC_SPEED_1G;
1280 }
1281 
1282 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1283 {
1284 #define SPEED_ABILITY_EXT_SHIFT			8
1285 
1286 	struct hclge_cfg_param_cmd *req;
1287 	u64 mac_addr_tmp_high;
1288 	u16 speed_ability_ext;
1289 	u64 mac_addr_tmp;
1290 	unsigned int i;
1291 
1292 	req = (struct hclge_cfg_param_cmd *)desc[0].data;
1293 
1294 	/* get the configuration */
1295 	cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1296 					      HCLGE_CFG_VMDQ_M,
1297 					      HCLGE_CFG_VMDQ_S);
1298 	cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1299 				      HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1300 	cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1301 					    HCLGE_CFG_TQP_DESC_N_M,
1302 					    HCLGE_CFG_TQP_DESC_N_S);
1303 
1304 	cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1305 					HCLGE_CFG_PHY_ADDR_M,
1306 					HCLGE_CFG_PHY_ADDR_S);
1307 	cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1308 					  HCLGE_CFG_MEDIA_TP_M,
1309 					  HCLGE_CFG_MEDIA_TP_S);
1310 	cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1311 					  HCLGE_CFG_RX_BUF_LEN_M,
1312 					  HCLGE_CFG_RX_BUF_LEN_S);
1313 	/* get mac_address */
1314 	mac_addr_tmp = __le32_to_cpu(req->param[2]);
1315 	mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1316 					    HCLGE_CFG_MAC_ADDR_H_M,
1317 					    HCLGE_CFG_MAC_ADDR_H_S);
1318 
1319 	mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1320 
1321 	cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1322 					     HCLGE_CFG_DEFAULT_SPEED_M,
1323 					     HCLGE_CFG_DEFAULT_SPEED_S);
1324 	cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1325 					       HCLGE_CFG_RSS_SIZE_M,
1326 					       HCLGE_CFG_RSS_SIZE_S);
1327 
1328 	for (i = 0; i < ETH_ALEN; i++)
1329 		cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1330 
1331 	req = (struct hclge_cfg_param_cmd *)desc[1].data;
1332 	cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1333 
1334 	cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1335 					     HCLGE_CFG_SPEED_ABILITY_M,
1336 					     HCLGE_CFG_SPEED_ABILITY_S);
1337 	speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1338 					    HCLGE_CFG_SPEED_ABILITY_EXT_M,
1339 					    HCLGE_CFG_SPEED_ABILITY_EXT_S);
1340 	cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1341 
1342 	cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1343 					 HCLGE_CFG_UMV_TBL_SPACE_M,
1344 					 HCLGE_CFG_UMV_TBL_SPACE_S);
1345 	if (!cfg->umv_space)
1346 		cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1347 
1348 	cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
1349 					       HCLGE_CFG_PF_RSS_SIZE_M,
1350 					       HCLGE_CFG_PF_RSS_SIZE_S);
1351 
1352 	/* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a
1353 	 * power of 2, instead of reading out directly. This would
1354 	 * be more flexible for future changes and expansions.
1355 	 * When VF max  rss size field is HCLGE_CFG_RSS_SIZE_S,
1356 	 * it does not make sense if PF's field is 0. In this case, PF and VF
1357 	 * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S.
1358 	 */
1359 	cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
1360 			       1U << cfg->pf_rss_size_max :
1361 			       cfg->vf_rss_size_max;
1362 }
1363 
1364 /* hclge_get_cfg: query the static parameter from flash
1365  * @hdev: pointer to struct hclge_dev
1366  * @hcfg: the config structure to be getted
1367  */
1368 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1369 {
1370 	struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1371 	struct hclge_cfg_param_cmd *req;
1372 	unsigned int i;
1373 	int ret;
1374 
1375 	for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1376 		u32 offset = 0;
1377 
1378 		req = (struct hclge_cfg_param_cmd *)desc[i].data;
1379 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1380 					   true);
1381 		hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1382 				HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1383 		/* Len should be united by 4 bytes when send to hardware */
1384 		hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1385 				HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1386 		req->offset = cpu_to_le32(offset);
1387 	}
1388 
1389 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1390 	if (ret) {
1391 		dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1392 		return ret;
1393 	}
1394 
1395 	hclge_parse_cfg(hcfg, desc);
1396 
1397 	return 0;
1398 }
1399 
1400 static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1401 {
1402 #define HCLGE_MAX_NON_TSO_BD_NUM			8U
1403 
1404 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1405 
1406 	ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1407 	ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1408 	ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
1409 	ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1410 	ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
1411 	ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
1412 	ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
1413 }
1414 
1415 static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1416 				  struct hclge_desc *desc)
1417 {
1418 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1419 	struct hclge_dev_specs_0_cmd *req0;
1420 	struct hclge_dev_specs_1_cmd *req1;
1421 
1422 	req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1423 	req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
1424 
1425 	ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1426 	ae_dev->dev_specs.rss_ind_tbl_size =
1427 		le16_to_cpu(req0->rss_ind_tbl_size);
1428 	ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
1429 	ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1430 	ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1431 	ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
1432 	ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
1433 	ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
1434 }
1435 
1436 static void hclge_check_dev_specs(struct hclge_dev *hdev)
1437 {
1438 	struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1439 
1440 	if (!dev_specs->max_non_tso_bd_num)
1441 		dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1442 	if (!dev_specs->rss_ind_tbl_size)
1443 		dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1444 	if (!dev_specs->rss_key_size)
1445 		dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
1446 	if (!dev_specs->max_tm_rate)
1447 		dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1448 	if (!dev_specs->max_qset_num)
1449 		dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM;
1450 	if (!dev_specs->max_int_gl)
1451 		dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
1452 	if (!dev_specs->max_frm_size)
1453 		dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
1454 }
1455 
1456 static int hclge_query_dev_specs(struct hclge_dev *hdev)
1457 {
1458 	struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1459 	int ret;
1460 	int i;
1461 
1462 	/* set default specifications as devices lower than version V3 do not
1463 	 * support querying specifications from firmware.
1464 	 */
1465 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1466 		hclge_set_default_dev_specs(hdev);
1467 		return 0;
1468 	}
1469 
1470 	for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1471 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1472 					   true);
1473 		desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1474 	}
1475 	hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1476 
1477 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1478 	if (ret)
1479 		return ret;
1480 
1481 	hclge_parse_dev_specs(hdev, desc);
1482 	hclge_check_dev_specs(hdev);
1483 
1484 	return 0;
1485 }
1486 
1487 static int hclge_get_cap(struct hclge_dev *hdev)
1488 {
1489 	int ret;
1490 
1491 	ret = hclge_query_function_status(hdev);
1492 	if (ret) {
1493 		dev_err(&hdev->pdev->dev,
1494 			"query function status error %d.\n", ret);
1495 		return ret;
1496 	}
1497 
1498 	/* get pf resource */
1499 	return hclge_query_pf_resource(hdev);
1500 }
1501 
1502 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1503 {
1504 #define HCLGE_MIN_TX_DESC	64
1505 #define HCLGE_MIN_RX_DESC	64
1506 
1507 	if (!is_kdump_kernel())
1508 		return;
1509 
1510 	dev_info(&hdev->pdev->dev,
1511 		 "Running kdump kernel. Using minimal resources\n");
1512 
1513 	/* minimal queue pairs equals to the number of vports */
1514 	hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1515 	hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1516 	hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1517 }
1518 
1519 static int hclge_configure(struct hclge_dev *hdev)
1520 {
1521 	struct hclge_cfg cfg;
1522 	unsigned int i;
1523 	int ret;
1524 
1525 	ret = hclge_get_cfg(hdev, &cfg);
1526 	if (ret)
1527 		return ret;
1528 
1529 	hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1530 	hdev->base_tqp_pid = 0;
1531 	hdev->vf_rss_size_max = cfg.vf_rss_size_max;
1532 	hdev->pf_rss_size_max = cfg.pf_rss_size_max;
1533 	hdev->rx_buf_len = cfg.rx_buf_len;
1534 	ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1535 	hdev->hw.mac.media_type = cfg.media_type;
1536 	hdev->hw.mac.phy_addr = cfg.phy_addr;
1537 	hdev->num_tx_desc = cfg.tqp_desc_num;
1538 	hdev->num_rx_desc = cfg.tqp_desc_num;
1539 	hdev->tm_info.num_pg = 1;
1540 	hdev->tc_max = cfg.tc_num;
1541 	hdev->tm_info.hw_pfc_map = 0;
1542 	hdev->wanted_umv_size = cfg.umv_space;
1543 
1544 	if (hnae3_dev_fd_supported(hdev)) {
1545 		hdev->fd_en = true;
1546 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1547 	}
1548 
1549 	ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1550 	if (ret) {
1551 		dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1552 			cfg.default_speed, ret);
1553 		return ret;
1554 	}
1555 
1556 	hclge_parse_link_mode(hdev, cfg.speed_ability);
1557 
1558 	hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1559 
1560 	if ((hdev->tc_max > HNAE3_MAX_TC) ||
1561 	    (hdev->tc_max < 1)) {
1562 		dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1563 			 hdev->tc_max);
1564 		hdev->tc_max = 1;
1565 	}
1566 
1567 	/* Dev does not support DCB */
1568 	if (!hnae3_dev_dcb_supported(hdev)) {
1569 		hdev->tc_max = 1;
1570 		hdev->pfc_max = 0;
1571 	} else {
1572 		hdev->pfc_max = hdev->tc_max;
1573 	}
1574 
1575 	hdev->tm_info.num_tc = 1;
1576 
1577 	/* Currently not support uncontiuous tc */
1578 	for (i = 0; i < hdev->tm_info.num_tc; i++)
1579 		hnae3_set_bit(hdev->hw_tc_map, i, 1);
1580 
1581 	hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1582 
1583 	hclge_init_kdump_kernel_config(hdev);
1584 
1585 	/* Set the init affinity based on pci func number */
1586 	i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1587 	i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1588 	cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1589 			&hdev->affinity_mask);
1590 
1591 	return ret;
1592 }
1593 
1594 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1595 			    u16 tso_mss_max)
1596 {
1597 	struct hclge_cfg_tso_status_cmd *req;
1598 	struct hclge_desc desc;
1599 
1600 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1601 
1602 	req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1603 	req->tso_mss_min = cpu_to_le16(tso_mss_min);
1604 	req->tso_mss_max = cpu_to_le16(tso_mss_max);
1605 
1606 	return hclge_cmd_send(&hdev->hw, &desc, 1);
1607 }
1608 
1609 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1610 {
1611 	struct hclge_cfg_gro_status_cmd *req;
1612 	struct hclge_desc desc;
1613 	int ret;
1614 
1615 	if (!hnae3_dev_gro_supported(hdev))
1616 		return 0;
1617 
1618 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1619 	req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1620 
1621 	req->gro_en = en ? 1 : 0;
1622 
1623 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1624 	if (ret)
1625 		dev_err(&hdev->pdev->dev,
1626 			"GRO hardware config cmd failed, ret = %d\n", ret);
1627 
1628 	return ret;
1629 }
1630 
1631 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1632 {
1633 	struct hclge_tqp *tqp;
1634 	int i;
1635 
1636 	hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1637 				  sizeof(struct hclge_tqp), GFP_KERNEL);
1638 	if (!hdev->htqp)
1639 		return -ENOMEM;
1640 
1641 	tqp = hdev->htqp;
1642 
1643 	for (i = 0; i < hdev->num_tqps; i++) {
1644 		tqp->dev = &hdev->pdev->dev;
1645 		tqp->index = i;
1646 
1647 		tqp->q.ae_algo = &ae_algo;
1648 		tqp->q.buf_size = hdev->rx_buf_len;
1649 		tqp->q.tx_desc_num = hdev->num_tx_desc;
1650 		tqp->q.rx_desc_num = hdev->num_rx_desc;
1651 
1652 		/* need an extended offset to configure queues >=
1653 		 * HCLGE_TQP_MAX_SIZE_DEV_V2
1654 		 */
1655 		if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
1656 			tqp->q.io_base = hdev->hw.io_base +
1657 					 HCLGE_TQP_REG_OFFSET +
1658 					 i * HCLGE_TQP_REG_SIZE;
1659 		else
1660 			tqp->q.io_base = hdev->hw.io_base +
1661 					 HCLGE_TQP_REG_OFFSET +
1662 					 HCLGE_TQP_EXT_REG_OFFSET +
1663 					 (i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
1664 					 HCLGE_TQP_REG_SIZE;
1665 
1666 		tqp++;
1667 	}
1668 
1669 	return 0;
1670 }
1671 
1672 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1673 				  u16 tqp_pid, u16 tqp_vid, bool is_pf)
1674 {
1675 	struct hclge_tqp_map_cmd *req;
1676 	struct hclge_desc desc;
1677 	int ret;
1678 
1679 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1680 
1681 	req = (struct hclge_tqp_map_cmd *)desc.data;
1682 	req->tqp_id = cpu_to_le16(tqp_pid);
1683 	req->tqp_vf = func_id;
1684 	req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1685 	if (!is_pf)
1686 		req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1687 	req->tqp_vid = cpu_to_le16(tqp_vid);
1688 
1689 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1690 	if (ret)
1691 		dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1692 
1693 	return ret;
1694 }
1695 
1696 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1697 {
1698 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1699 	struct hclge_dev *hdev = vport->back;
1700 	int i, alloced;
1701 
1702 	for (i = 0, alloced = 0; i < hdev->num_tqps &&
1703 	     alloced < num_tqps; i++) {
1704 		if (!hdev->htqp[i].alloced) {
1705 			hdev->htqp[i].q.handle = &vport->nic;
1706 			hdev->htqp[i].q.tqp_index = alloced;
1707 			hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1708 			hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1709 			kinfo->tqp[alloced] = &hdev->htqp[i].q;
1710 			hdev->htqp[i].alloced = true;
1711 			alloced++;
1712 		}
1713 	}
1714 	vport->alloc_tqps = alloced;
1715 	kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max,
1716 				vport->alloc_tqps / hdev->tm_info.num_tc);
1717 
1718 	/* ensure one to one mapping between irq and queue at default */
1719 	kinfo->rss_size = min_t(u16, kinfo->rss_size,
1720 				(hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1721 
1722 	return 0;
1723 }
1724 
1725 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1726 			    u16 num_tx_desc, u16 num_rx_desc)
1727 
1728 {
1729 	struct hnae3_handle *nic = &vport->nic;
1730 	struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1731 	struct hclge_dev *hdev = vport->back;
1732 	int ret;
1733 
1734 	kinfo->num_tx_desc = num_tx_desc;
1735 	kinfo->num_rx_desc = num_rx_desc;
1736 
1737 	kinfo->rx_buf_len = hdev->rx_buf_len;
1738 
1739 	kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1740 				  sizeof(struct hnae3_queue *), GFP_KERNEL);
1741 	if (!kinfo->tqp)
1742 		return -ENOMEM;
1743 
1744 	ret = hclge_assign_tqp(vport, num_tqps);
1745 	if (ret)
1746 		dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1747 
1748 	return ret;
1749 }
1750 
1751 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1752 				  struct hclge_vport *vport)
1753 {
1754 	struct hnae3_handle *nic = &vport->nic;
1755 	struct hnae3_knic_private_info *kinfo;
1756 	u16 i;
1757 
1758 	kinfo = &nic->kinfo;
1759 	for (i = 0; i < vport->alloc_tqps; i++) {
1760 		struct hclge_tqp *q =
1761 			container_of(kinfo->tqp[i], struct hclge_tqp, q);
1762 		bool is_pf;
1763 		int ret;
1764 
1765 		is_pf = !(vport->vport_id);
1766 		ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1767 					     i, is_pf);
1768 		if (ret)
1769 			return ret;
1770 	}
1771 
1772 	return 0;
1773 }
1774 
1775 static int hclge_map_tqp(struct hclge_dev *hdev)
1776 {
1777 	struct hclge_vport *vport = hdev->vport;
1778 	u16 i, num_vport;
1779 
1780 	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1781 	for (i = 0; i < num_vport; i++)	{
1782 		int ret;
1783 
1784 		ret = hclge_map_tqp_to_vport(hdev, vport);
1785 		if (ret)
1786 			return ret;
1787 
1788 		vport++;
1789 	}
1790 
1791 	return 0;
1792 }
1793 
1794 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1795 {
1796 	struct hnae3_handle *nic = &vport->nic;
1797 	struct hclge_dev *hdev = vport->back;
1798 	int ret;
1799 
1800 	nic->pdev = hdev->pdev;
1801 	nic->ae_algo = &ae_algo;
1802 	nic->numa_node_mask = hdev->numa_node_mask;
1803 
1804 	ret = hclge_knic_setup(vport, num_tqps,
1805 			       hdev->num_tx_desc, hdev->num_rx_desc);
1806 	if (ret)
1807 		dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1808 
1809 	return ret;
1810 }
1811 
1812 static int hclge_alloc_vport(struct hclge_dev *hdev)
1813 {
1814 	struct pci_dev *pdev = hdev->pdev;
1815 	struct hclge_vport *vport;
1816 	u32 tqp_main_vport;
1817 	u32 tqp_per_vport;
1818 	int num_vport, i;
1819 	int ret;
1820 
1821 	/* We need to alloc a vport for main NIC of PF */
1822 	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1823 
1824 	if (hdev->num_tqps < num_vport) {
1825 		dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1826 			hdev->num_tqps, num_vport);
1827 		return -EINVAL;
1828 	}
1829 
1830 	/* Alloc the same number of TQPs for every vport */
1831 	tqp_per_vport = hdev->num_tqps / num_vport;
1832 	tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1833 
1834 	vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1835 			     GFP_KERNEL);
1836 	if (!vport)
1837 		return -ENOMEM;
1838 
1839 	hdev->vport = vport;
1840 	hdev->num_alloc_vport = num_vport;
1841 
1842 	if (IS_ENABLED(CONFIG_PCI_IOV))
1843 		hdev->num_alloc_vfs = hdev->num_req_vfs;
1844 
1845 	for (i = 0; i < num_vport; i++) {
1846 		vport->back = hdev;
1847 		vport->vport_id = i;
1848 		vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1849 		vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1850 		vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1851 		vport->rxvlan_cfg.rx_vlan_offload_en = true;
1852 		INIT_LIST_HEAD(&vport->vlan_list);
1853 		INIT_LIST_HEAD(&vport->uc_mac_list);
1854 		INIT_LIST_HEAD(&vport->mc_mac_list);
1855 		spin_lock_init(&vport->mac_list_lock);
1856 
1857 		if (i == 0)
1858 			ret = hclge_vport_setup(vport, tqp_main_vport);
1859 		else
1860 			ret = hclge_vport_setup(vport, tqp_per_vport);
1861 		if (ret) {
1862 			dev_err(&pdev->dev,
1863 				"vport setup failed for vport %d, %d\n",
1864 				i, ret);
1865 			return ret;
1866 		}
1867 
1868 		vport++;
1869 	}
1870 
1871 	return 0;
1872 }
1873 
1874 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1875 				    struct hclge_pkt_buf_alloc *buf_alloc)
1876 {
1877 /* TX buffer size is unit by 128 byte */
1878 #define HCLGE_BUF_SIZE_UNIT_SHIFT	7
1879 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK	BIT(15)
1880 	struct hclge_tx_buff_alloc_cmd *req;
1881 	struct hclge_desc desc;
1882 	int ret;
1883 	u8 i;
1884 
1885 	req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1886 
1887 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1888 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1889 		u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1890 
1891 		req->tx_pkt_buff[i] =
1892 			cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1893 				     HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1894 	}
1895 
1896 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1897 	if (ret)
1898 		dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1899 			ret);
1900 
1901 	return ret;
1902 }
1903 
1904 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1905 				 struct hclge_pkt_buf_alloc *buf_alloc)
1906 {
1907 	int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1908 
1909 	if (ret)
1910 		dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1911 
1912 	return ret;
1913 }
1914 
1915 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1916 {
1917 	unsigned int i;
1918 	u32 cnt = 0;
1919 
1920 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1921 		if (hdev->hw_tc_map & BIT(i))
1922 			cnt++;
1923 	return cnt;
1924 }
1925 
1926 /* Get the number of pfc enabled TCs, which have private buffer */
1927 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1928 				  struct hclge_pkt_buf_alloc *buf_alloc)
1929 {
1930 	struct hclge_priv_buf *priv;
1931 	unsigned int i;
1932 	int cnt = 0;
1933 
1934 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1935 		priv = &buf_alloc->priv_buf[i];
1936 		if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1937 		    priv->enable)
1938 			cnt++;
1939 	}
1940 
1941 	return cnt;
1942 }
1943 
1944 /* Get the number of pfc disabled TCs, which have private buffer */
1945 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1946 				     struct hclge_pkt_buf_alloc *buf_alloc)
1947 {
1948 	struct hclge_priv_buf *priv;
1949 	unsigned int i;
1950 	int cnt = 0;
1951 
1952 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1953 		priv = &buf_alloc->priv_buf[i];
1954 		if (hdev->hw_tc_map & BIT(i) &&
1955 		    !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1956 		    priv->enable)
1957 			cnt++;
1958 	}
1959 
1960 	return cnt;
1961 }
1962 
1963 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1964 {
1965 	struct hclge_priv_buf *priv;
1966 	u32 rx_priv = 0;
1967 	int i;
1968 
1969 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1970 		priv = &buf_alloc->priv_buf[i];
1971 		if (priv->enable)
1972 			rx_priv += priv->buf_size;
1973 	}
1974 	return rx_priv;
1975 }
1976 
1977 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1978 {
1979 	u32 i, total_tx_size = 0;
1980 
1981 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1982 		total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1983 
1984 	return total_tx_size;
1985 }
1986 
1987 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1988 				struct hclge_pkt_buf_alloc *buf_alloc,
1989 				u32 rx_all)
1990 {
1991 	u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1992 	u32 tc_num = hclge_get_tc_num(hdev);
1993 	u32 shared_buf, aligned_mps;
1994 	u32 rx_priv;
1995 	int i;
1996 
1997 	aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1998 
1999 	if (hnae3_dev_dcb_supported(hdev))
2000 		shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
2001 					hdev->dv_buf_size;
2002 	else
2003 		shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
2004 					+ hdev->dv_buf_size;
2005 
2006 	shared_buf_tc = tc_num * aligned_mps + aligned_mps;
2007 	shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
2008 			     HCLGE_BUF_SIZE_UNIT);
2009 
2010 	rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
2011 	if (rx_all < rx_priv + shared_std)
2012 		return false;
2013 
2014 	shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
2015 	buf_alloc->s_buf.buf_size = shared_buf;
2016 	if (hnae3_dev_dcb_supported(hdev)) {
2017 		buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
2018 		buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
2019 			- roundup(aligned_mps / HCLGE_BUF_DIV_BY,
2020 				  HCLGE_BUF_SIZE_UNIT);
2021 	} else {
2022 		buf_alloc->s_buf.self.high = aligned_mps +
2023 						HCLGE_NON_DCB_ADDITIONAL_BUF;
2024 		buf_alloc->s_buf.self.low = aligned_mps;
2025 	}
2026 
2027 	if (hnae3_dev_dcb_supported(hdev)) {
2028 		hi_thrd = shared_buf - hdev->dv_buf_size;
2029 
2030 		if (tc_num <= NEED_RESERVE_TC_NUM)
2031 			hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
2032 					/ BUF_MAX_PERCENT;
2033 
2034 		if (tc_num)
2035 			hi_thrd = hi_thrd / tc_num;
2036 
2037 		hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
2038 		hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
2039 		lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
2040 	} else {
2041 		hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
2042 		lo_thrd = aligned_mps;
2043 	}
2044 
2045 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2046 		buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
2047 		buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
2048 	}
2049 
2050 	return true;
2051 }
2052 
2053 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
2054 				struct hclge_pkt_buf_alloc *buf_alloc)
2055 {
2056 	u32 i, total_size;
2057 
2058 	total_size = hdev->pkt_buf_size;
2059 
2060 	/* alloc tx buffer for all enabled tc */
2061 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2062 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2063 
2064 		if (hdev->hw_tc_map & BIT(i)) {
2065 			if (total_size < hdev->tx_buf_size)
2066 				return -ENOMEM;
2067 
2068 			priv->tx_buf_size = hdev->tx_buf_size;
2069 		} else {
2070 			priv->tx_buf_size = 0;
2071 		}
2072 
2073 		total_size -= priv->tx_buf_size;
2074 	}
2075 
2076 	return 0;
2077 }
2078 
2079 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2080 				  struct hclge_pkt_buf_alloc *buf_alloc)
2081 {
2082 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2083 	u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2084 	unsigned int i;
2085 
2086 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2087 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2088 
2089 		priv->enable = 0;
2090 		priv->wl.low = 0;
2091 		priv->wl.high = 0;
2092 		priv->buf_size = 0;
2093 
2094 		if (!(hdev->hw_tc_map & BIT(i)))
2095 			continue;
2096 
2097 		priv->enable = 1;
2098 
2099 		if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2100 			priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2101 			priv->wl.high = roundup(priv->wl.low + aligned_mps,
2102 						HCLGE_BUF_SIZE_UNIT);
2103 		} else {
2104 			priv->wl.low = 0;
2105 			priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2106 					aligned_mps;
2107 		}
2108 
2109 		priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2110 	}
2111 
2112 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2113 }
2114 
2115 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2116 					  struct hclge_pkt_buf_alloc *buf_alloc)
2117 {
2118 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2119 	int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2120 	int i;
2121 
2122 	/* let the last to be cleared first */
2123 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2124 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2125 		unsigned int mask = BIT((unsigned int)i);
2126 
2127 		if (hdev->hw_tc_map & mask &&
2128 		    !(hdev->tm_info.hw_pfc_map & mask)) {
2129 			/* Clear the no pfc TC private buffer */
2130 			priv->wl.low = 0;
2131 			priv->wl.high = 0;
2132 			priv->buf_size = 0;
2133 			priv->enable = 0;
2134 			no_pfc_priv_num--;
2135 		}
2136 
2137 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2138 		    no_pfc_priv_num == 0)
2139 			break;
2140 	}
2141 
2142 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2143 }
2144 
2145 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2146 					struct hclge_pkt_buf_alloc *buf_alloc)
2147 {
2148 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2149 	int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2150 	int i;
2151 
2152 	/* let the last to be cleared first */
2153 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2154 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2155 		unsigned int mask = BIT((unsigned int)i);
2156 
2157 		if (hdev->hw_tc_map & mask &&
2158 		    hdev->tm_info.hw_pfc_map & mask) {
2159 			/* Reduce the number of pfc TC with private buffer */
2160 			priv->wl.low = 0;
2161 			priv->enable = 0;
2162 			priv->wl.high = 0;
2163 			priv->buf_size = 0;
2164 			pfc_priv_num--;
2165 		}
2166 
2167 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2168 		    pfc_priv_num == 0)
2169 			break;
2170 	}
2171 
2172 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2173 }
2174 
2175 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2176 				      struct hclge_pkt_buf_alloc *buf_alloc)
2177 {
2178 #define COMPENSATE_BUFFER	0x3C00
2179 #define COMPENSATE_HALF_MPS_NUM	5
2180 #define PRIV_WL_GAP		0x1800
2181 
2182 	u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2183 	u32 tc_num = hclge_get_tc_num(hdev);
2184 	u32 half_mps = hdev->mps >> 1;
2185 	u32 min_rx_priv;
2186 	unsigned int i;
2187 
2188 	if (tc_num)
2189 		rx_priv = rx_priv / tc_num;
2190 
2191 	if (tc_num <= NEED_RESERVE_TC_NUM)
2192 		rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2193 
2194 	min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2195 			COMPENSATE_HALF_MPS_NUM * half_mps;
2196 	min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2197 	rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2198 
2199 	if (rx_priv < min_rx_priv)
2200 		return false;
2201 
2202 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2203 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2204 
2205 		priv->enable = 0;
2206 		priv->wl.low = 0;
2207 		priv->wl.high = 0;
2208 		priv->buf_size = 0;
2209 
2210 		if (!(hdev->hw_tc_map & BIT(i)))
2211 			continue;
2212 
2213 		priv->enable = 1;
2214 		priv->buf_size = rx_priv;
2215 		priv->wl.high = rx_priv - hdev->dv_buf_size;
2216 		priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2217 	}
2218 
2219 	buf_alloc->s_buf.buf_size = 0;
2220 
2221 	return true;
2222 }
2223 
2224 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2225  * @hdev: pointer to struct hclge_dev
2226  * @buf_alloc: pointer to buffer calculation data
2227  * @return: 0: calculate sucessful, negative: fail
2228  */
2229 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2230 				struct hclge_pkt_buf_alloc *buf_alloc)
2231 {
2232 	/* When DCB is not supported, rx private buffer is not allocated. */
2233 	if (!hnae3_dev_dcb_supported(hdev)) {
2234 		u32 rx_all = hdev->pkt_buf_size;
2235 
2236 		rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2237 		if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2238 			return -ENOMEM;
2239 
2240 		return 0;
2241 	}
2242 
2243 	if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2244 		return 0;
2245 
2246 	if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2247 		return 0;
2248 
2249 	/* try to decrease the buffer size */
2250 	if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2251 		return 0;
2252 
2253 	if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2254 		return 0;
2255 
2256 	if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2257 		return 0;
2258 
2259 	return -ENOMEM;
2260 }
2261 
2262 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2263 				   struct hclge_pkt_buf_alloc *buf_alloc)
2264 {
2265 	struct hclge_rx_priv_buff_cmd *req;
2266 	struct hclge_desc desc;
2267 	int ret;
2268 	int i;
2269 
2270 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2271 	req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2272 
2273 	/* Alloc private buffer TCs */
2274 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2275 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2276 
2277 		req->buf_num[i] =
2278 			cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2279 		req->buf_num[i] |=
2280 			cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2281 	}
2282 
2283 	req->shared_buf =
2284 		cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2285 			    (1 << HCLGE_TC0_PRI_BUF_EN_B));
2286 
2287 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2288 	if (ret)
2289 		dev_err(&hdev->pdev->dev,
2290 			"rx private buffer alloc cmd failed %d\n", ret);
2291 
2292 	return ret;
2293 }
2294 
2295 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2296 				   struct hclge_pkt_buf_alloc *buf_alloc)
2297 {
2298 	struct hclge_rx_priv_wl_buf *req;
2299 	struct hclge_priv_buf *priv;
2300 	struct hclge_desc desc[2];
2301 	int i, j;
2302 	int ret;
2303 
2304 	for (i = 0; i < 2; i++) {
2305 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2306 					   false);
2307 		req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2308 
2309 		/* The first descriptor set the NEXT bit to 1 */
2310 		if (i == 0)
2311 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2312 		else
2313 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2314 
2315 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2316 			u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2317 
2318 			priv = &buf_alloc->priv_buf[idx];
2319 			req->tc_wl[j].high =
2320 				cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2321 			req->tc_wl[j].high |=
2322 				cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2323 			req->tc_wl[j].low =
2324 				cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2325 			req->tc_wl[j].low |=
2326 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2327 		}
2328 	}
2329 
2330 	/* Send 2 descriptor at one time */
2331 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2332 	if (ret)
2333 		dev_err(&hdev->pdev->dev,
2334 			"rx private waterline config cmd failed %d\n",
2335 			ret);
2336 	return ret;
2337 }
2338 
2339 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2340 				    struct hclge_pkt_buf_alloc *buf_alloc)
2341 {
2342 	struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2343 	struct hclge_rx_com_thrd *req;
2344 	struct hclge_desc desc[2];
2345 	struct hclge_tc_thrd *tc;
2346 	int i, j;
2347 	int ret;
2348 
2349 	for (i = 0; i < 2; i++) {
2350 		hclge_cmd_setup_basic_desc(&desc[i],
2351 					   HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2352 		req = (struct hclge_rx_com_thrd *)&desc[i].data;
2353 
2354 		/* The first descriptor set the NEXT bit to 1 */
2355 		if (i == 0)
2356 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2357 		else
2358 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2359 
2360 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2361 			tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2362 
2363 			req->com_thrd[j].high =
2364 				cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2365 			req->com_thrd[j].high |=
2366 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2367 			req->com_thrd[j].low =
2368 				cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2369 			req->com_thrd[j].low |=
2370 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2371 		}
2372 	}
2373 
2374 	/* Send 2 descriptors at one time */
2375 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2376 	if (ret)
2377 		dev_err(&hdev->pdev->dev,
2378 			"common threshold config cmd failed %d\n", ret);
2379 	return ret;
2380 }
2381 
2382 static int hclge_common_wl_config(struct hclge_dev *hdev,
2383 				  struct hclge_pkt_buf_alloc *buf_alloc)
2384 {
2385 	struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2386 	struct hclge_rx_com_wl *req;
2387 	struct hclge_desc desc;
2388 	int ret;
2389 
2390 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2391 
2392 	req = (struct hclge_rx_com_wl *)desc.data;
2393 	req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2394 	req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2395 
2396 	req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2397 	req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2398 
2399 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2400 	if (ret)
2401 		dev_err(&hdev->pdev->dev,
2402 			"common waterline config cmd failed %d\n", ret);
2403 
2404 	return ret;
2405 }
2406 
2407 int hclge_buffer_alloc(struct hclge_dev *hdev)
2408 {
2409 	struct hclge_pkt_buf_alloc *pkt_buf;
2410 	int ret;
2411 
2412 	pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2413 	if (!pkt_buf)
2414 		return -ENOMEM;
2415 
2416 	ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2417 	if (ret) {
2418 		dev_err(&hdev->pdev->dev,
2419 			"could not calc tx buffer size for all TCs %d\n", ret);
2420 		goto out;
2421 	}
2422 
2423 	ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2424 	if (ret) {
2425 		dev_err(&hdev->pdev->dev,
2426 			"could not alloc tx buffers %d\n", ret);
2427 		goto out;
2428 	}
2429 
2430 	ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2431 	if (ret) {
2432 		dev_err(&hdev->pdev->dev,
2433 			"could not calc rx priv buffer size for all TCs %d\n",
2434 			ret);
2435 		goto out;
2436 	}
2437 
2438 	ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2439 	if (ret) {
2440 		dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2441 			ret);
2442 		goto out;
2443 	}
2444 
2445 	if (hnae3_dev_dcb_supported(hdev)) {
2446 		ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2447 		if (ret) {
2448 			dev_err(&hdev->pdev->dev,
2449 				"could not configure rx private waterline %d\n",
2450 				ret);
2451 			goto out;
2452 		}
2453 
2454 		ret = hclge_common_thrd_config(hdev, pkt_buf);
2455 		if (ret) {
2456 			dev_err(&hdev->pdev->dev,
2457 				"could not configure common threshold %d\n",
2458 				ret);
2459 			goto out;
2460 		}
2461 	}
2462 
2463 	ret = hclge_common_wl_config(hdev, pkt_buf);
2464 	if (ret)
2465 		dev_err(&hdev->pdev->dev,
2466 			"could not configure common waterline %d\n", ret);
2467 
2468 out:
2469 	kfree(pkt_buf);
2470 	return ret;
2471 }
2472 
2473 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2474 {
2475 	struct hnae3_handle *roce = &vport->roce;
2476 	struct hnae3_handle *nic = &vport->nic;
2477 	struct hclge_dev *hdev = vport->back;
2478 
2479 	roce->rinfo.num_vectors = vport->back->num_roce_msi;
2480 
2481 	if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
2482 		return -EINVAL;
2483 
2484 	roce->rinfo.base_vector = hdev->roce_base_vector;
2485 
2486 	roce->rinfo.netdev = nic->kinfo.netdev;
2487 	roce->rinfo.roce_io_base = hdev->hw.io_base;
2488 	roce->rinfo.roce_mem_base = hdev->hw.mem_base;
2489 
2490 	roce->pdev = nic->pdev;
2491 	roce->ae_algo = nic->ae_algo;
2492 	roce->numa_node_mask = nic->numa_node_mask;
2493 
2494 	return 0;
2495 }
2496 
2497 static int hclge_init_msi(struct hclge_dev *hdev)
2498 {
2499 	struct pci_dev *pdev = hdev->pdev;
2500 	int vectors;
2501 	int i;
2502 
2503 	vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2504 					hdev->num_msi,
2505 					PCI_IRQ_MSI | PCI_IRQ_MSIX);
2506 	if (vectors < 0) {
2507 		dev_err(&pdev->dev,
2508 			"failed(%d) to allocate MSI/MSI-X vectors\n",
2509 			vectors);
2510 		return vectors;
2511 	}
2512 	if (vectors < hdev->num_msi)
2513 		dev_warn(&hdev->pdev->dev,
2514 			 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2515 			 hdev->num_msi, vectors);
2516 
2517 	hdev->num_msi = vectors;
2518 	hdev->num_msi_left = vectors;
2519 
2520 	hdev->base_msi_vector = pdev->irq;
2521 	hdev->roce_base_vector = hdev->base_msi_vector +
2522 				hdev->num_nic_msi;
2523 
2524 	hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2525 					   sizeof(u16), GFP_KERNEL);
2526 	if (!hdev->vector_status) {
2527 		pci_free_irq_vectors(pdev);
2528 		return -ENOMEM;
2529 	}
2530 
2531 	for (i = 0; i < hdev->num_msi; i++)
2532 		hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2533 
2534 	hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2535 					sizeof(int), GFP_KERNEL);
2536 	if (!hdev->vector_irq) {
2537 		pci_free_irq_vectors(pdev);
2538 		return -ENOMEM;
2539 	}
2540 
2541 	return 0;
2542 }
2543 
2544 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2545 {
2546 	if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2547 		duplex = HCLGE_MAC_FULL;
2548 
2549 	return duplex;
2550 }
2551 
2552 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2553 				      u8 duplex)
2554 {
2555 	struct hclge_config_mac_speed_dup_cmd *req;
2556 	struct hclge_desc desc;
2557 	int ret;
2558 
2559 	req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2560 
2561 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2562 
2563 	if (duplex)
2564 		hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2565 
2566 	switch (speed) {
2567 	case HCLGE_MAC_SPEED_10M:
2568 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2569 				HCLGE_CFG_SPEED_S, 6);
2570 		break;
2571 	case HCLGE_MAC_SPEED_100M:
2572 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2573 				HCLGE_CFG_SPEED_S, 7);
2574 		break;
2575 	case HCLGE_MAC_SPEED_1G:
2576 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2577 				HCLGE_CFG_SPEED_S, 0);
2578 		break;
2579 	case HCLGE_MAC_SPEED_10G:
2580 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2581 				HCLGE_CFG_SPEED_S, 1);
2582 		break;
2583 	case HCLGE_MAC_SPEED_25G:
2584 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2585 				HCLGE_CFG_SPEED_S, 2);
2586 		break;
2587 	case HCLGE_MAC_SPEED_40G:
2588 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2589 				HCLGE_CFG_SPEED_S, 3);
2590 		break;
2591 	case HCLGE_MAC_SPEED_50G:
2592 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2593 				HCLGE_CFG_SPEED_S, 4);
2594 		break;
2595 	case HCLGE_MAC_SPEED_100G:
2596 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2597 				HCLGE_CFG_SPEED_S, 5);
2598 		break;
2599 	case HCLGE_MAC_SPEED_200G:
2600 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2601 				HCLGE_CFG_SPEED_S, 8);
2602 		break;
2603 	default:
2604 		dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2605 		return -EINVAL;
2606 	}
2607 
2608 	hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2609 		      1);
2610 
2611 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2612 	if (ret) {
2613 		dev_err(&hdev->pdev->dev,
2614 			"mac speed/duplex config cmd failed %d.\n", ret);
2615 		return ret;
2616 	}
2617 
2618 	return 0;
2619 }
2620 
2621 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2622 {
2623 	struct hclge_mac *mac = &hdev->hw.mac;
2624 	int ret;
2625 
2626 	duplex = hclge_check_speed_dup(duplex, speed);
2627 	if (!mac->support_autoneg && mac->speed == speed &&
2628 	    mac->duplex == duplex)
2629 		return 0;
2630 
2631 	ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2632 	if (ret)
2633 		return ret;
2634 
2635 	hdev->hw.mac.speed = speed;
2636 	hdev->hw.mac.duplex = duplex;
2637 
2638 	return 0;
2639 }
2640 
2641 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2642 				     u8 duplex)
2643 {
2644 	struct hclge_vport *vport = hclge_get_vport(handle);
2645 	struct hclge_dev *hdev = vport->back;
2646 
2647 	return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2648 }
2649 
2650 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2651 {
2652 	struct hclge_config_auto_neg_cmd *req;
2653 	struct hclge_desc desc;
2654 	u32 flag = 0;
2655 	int ret;
2656 
2657 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2658 
2659 	req = (struct hclge_config_auto_neg_cmd *)desc.data;
2660 	if (enable)
2661 		hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2662 	req->cfg_an_cmd_flag = cpu_to_le32(flag);
2663 
2664 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2665 	if (ret)
2666 		dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2667 			ret);
2668 
2669 	return ret;
2670 }
2671 
2672 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2673 {
2674 	struct hclge_vport *vport = hclge_get_vport(handle);
2675 	struct hclge_dev *hdev = vport->back;
2676 
2677 	if (!hdev->hw.mac.support_autoneg) {
2678 		if (enable) {
2679 			dev_err(&hdev->pdev->dev,
2680 				"autoneg is not supported by current port\n");
2681 			return -EOPNOTSUPP;
2682 		} else {
2683 			return 0;
2684 		}
2685 	}
2686 
2687 	return hclge_set_autoneg_en(hdev, enable);
2688 }
2689 
2690 static int hclge_get_autoneg(struct hnae3_handle *handle)
2691 {
2692 	struct hclge_vport *vport = hclge_get_vport(handle);
2693 	struct hclge_dev *hdev = vport->back;
2694 	struct phy_device *phydev = hdev->hw.mac.phydev;
2695 
2696 	if (phydev)
2697 		return phydev->autoneg;
2698 
2699 	return hdev->hw.mac.autoneg;
2700 }
2701 
2702 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2703 {
2704 	struct hclge_vport *vport = hclge_get_vport(handle);
2705 	struct hclge_dev *hdev = vport->back;
2706 	int ret;
2707 
2708 	dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2709 
2710 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2711 	if (ret)
2712 		return ret;
2713 	return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2714 }
2715 
2716 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2717 {
2718 	struct hclge_vport *vport = hclge_get_vport(handle);
2719 	struct hclge_dev *hdev = vport->back;
2720 
2721 	if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2722 		return hclge_set_autoneg_en(hdev, !halt);
2723 
2724 	return 0;
2725 }
2726 
2727 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2728 {
2729 	struct hclge_config_fec_cmd *req;
2730 	struct hclge_desc desc;
2731 	int ret;
2732 
2733 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2734 
2735 	req = (struct hclge_config_fec_cmd *)desc.data;
2736 	if (fec_mode & BIT(HNAE3_FEC_AUTO))
2737 		hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2738 	if (fec_mode & BIT(HNAE3_FEC_RS))
2739 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2740 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2741 	if (fec_mode & BIT(HNAE3_FEC_BASER))
2742 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2743 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2744 
2745 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2746 	if (ret)
2747 		dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2748 
2749 	return ret;
2750 }
2751 
2752 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2753 {
2754 	struct hclge_vport *vport = hclge_get_vport(handle);
2755 	struct hclge_dev *hdev = vport->back;
2756 	struct hclge_mac *mac = &hdev->hw.mac;
2757 	int ret;
2758 
2759 	if (fec_mode && !(mac->fec_ability & fec_mode)) {
2760 		dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2761 		return -EINVAL;
2762 	}
2763 
2764 	ret = hclge_set_fec_hw(hdev, fec_mode);
2765 	if (ret)
2766 		return ret;
2767 
2768 	mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2769 	return 0;
2770 }
2771 
2772 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2773 			  u8 *fec_mode)
2774 {
2775 	struct hclge_vport *vport = hclge_get_vport(handle);
2776 	struct hclge_dev *hdev = vport->back;
2777 	struct hclge_mac *mac = &hdev->hw.mac;
2778 
2779 	if (fec_ability)
2780 		*fec_ability = mac->fec_ability;
2781 	if (fec_mode)
2782 		*fec_mode = mac->fec_mode;
2783 }
2784 
2785 static int hclge_mac_init(struct hclge_dev *hdev)
2786 {
2787 	struct hclge_mac *mac = &hdev->hw.mac;
2788 	int ret;
2789 
2790 	hdev->support_sfp_query = true;
2791 	hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2792 	ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2793 					 hdev->hw.mac.duplex);
2794 	if (ret)
2795 		return ret;
2796 
2797 	if (hdev->hw.mac.support_autoneg) {
2798 		ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2799 		if (ret)
2800 			return ret;
2801 	}
2802 
2803 	mac->link = 0;
2804 
2805 	if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2806 		ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2807 		if (ret)
2808 			return ret;
2809 	}
2810 
2811 	ret = hclge_set_mac_mtu(hdev, hdev->mps);
2812 	if (ret) {
2813 		dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2814 		return ret;
2815 	}
2816 
2817 	ret = hclge_set_default_loopback(hdev);
2818 	if (ret)
2819 		return ret;
2820 
2821 	ret = hclge_buffer_alloc(hdev);
2822 	if (ret)
2823 		dev_err(&hdev->pdev->dev,
2824 			"allocate buffer fail, ret=%d\n", ret);
2825 
2826 	return ret;
2827 }
2828 
2829 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2830 {
2831 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2832 	    !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2833 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2834 				    hclge_wq, &hdev->service_task, 0);
2835 }
2836 
2837 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2838 {
2839 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2840 	    !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2841 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2842 				    hclge_wq, &hdev->service_task, 0);
2843 }
2844 
2845 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2846 {
2847 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2848 	    !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2849 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2850 				    hclge_wq, &hdev->service_task,
2851 				    delay_time);
2852 }
2853 
2854 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
2855 {
2856 	struct hclge_link_status_cmd *req;
2857 	struct hclge_desc desc;
2858 	int ret;
2859 
2860 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2861 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2862 	if (ret) {
2863 		dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2864 			ret);
2865 		return ret;
2866 	}
2867 
2868 	req = (struct hclge_link_status_cmd *)desc.data;
2869 	*link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2870 		HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
2871 
2872 	return 0;
2873 }
2874 
2875 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
2876 {
2877 	struct phy_device *phydev = hdev->hw.mac.phydev;
2878 
2879 	*link_status = HCLGE_LINK_STATUS_DOWN;
2880 
2881 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2882 		return 0;
2883 
2884 	if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2885 		return 0;
2886 
2887 	return hclge_get_mac_link_status(hdev, link_status);
2888 }
2889 
2890 static void hclge_update_link_status(struct hclge_dev *hdev)
2891 {
2892 	struct hnae3_client *rclient = hdev->roce_client;
2893 	struct hnae3_client *client = hdev->nic_client;
2894 	struct hnae3_handle *rhandle;
2895 	struct hnae3_handle *handle;
2896 	int state;
2897 	int ret;
2898 	int i;
2899 
2900 	if (!client)
2901 		return;
2902 
2903 	if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2904 		return;
2905 
2906 	ret = hclge_get_mac_phy_link(hdev, &state);
2907 	if (ret) {
2908 		clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2909 		return;
2910 	}
2911 
2912 	if (state != hdev->hw.mac.link) {
2913 		for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2914 			handle = &hdev->vport[i].nic;
2915 			client->ops->link_status_change(handle, state);
2916 			hclge_config_mac_tnl_int(hdev, state);
2917 			rhandle = &hdev->vport[i].roce;
2918 			if (rclient && rclient->ops->link_status_change)
2919 				rclient->ops->link_status_change(rhandle,
2920 								 state);
2921 		}
2922 		hdev->hw.mac.link = state;
2923 	}
2924 
2925 	clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2926 }
2927 
2928 static void hclge_update_port_capability(struct hclge_dev *hdev,
2929 					 struct hclge_mac *mac)
2930 {
2931 	if (hnae3_dev_fec_supported(hdev))
2932 		/* update fec ability by speed */
2933 		hclge_convert_setting_fec(mac);
2934 
2935 	/* firmware can not identify back plane type, the media type
2936 	 * read from configuration can help deal it
2937 	 */
2938 	if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2939 	    mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2940 		mac->module_type = HNAE3_MODULE_TYPE_KR;
2941 	else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2942 		mac->module_type = HNAE3_MODULE_TYPE_TP;
2943 
2944 	if (mac->support_autoneg) {
2945 		linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2946 		linkmode_copy(mac->advertising, mac->supported);
2947 	} else {
2948 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2949 				   mac->supported);
2950 		linkmode_zero(mac->advertising);
2951 	}
2952 }
2953 
2954 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2955 {
2956 	struct hclge_sfp_info_cmd *resp;
2957 	struct hclge_desc desc;
2958 	int ret;
2959 
2960 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2961 	resp = (struct hclge_sfp_info_cmd *)desc.data;
2962 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2963 	if (ret == -EOPNOTSUPP) {
2964 		dev_warn(&hdev->pdev->dev,
2965 			 "IMP do not support get SFP speed %d\n", ret);
2966 		return ret;
2967 	} else if (ret) {
2968 		dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2969 		return ret;
2970 	}
2971 
2972 	*speed = le32_to_cpu(resp->speed);
2973 
2974 	return 0;
2975 }
2976 
2977 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2978 {
2979 	struct hclge_sfp_info_cmd *resp;
2980 	struct hclge_desc desc;
2981 	int ret;
2982 
2983 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2984 	resp = (struct hclge_sfp_info_cmd *)desc.data;
2985 
2986 	resp->query_type = QUERY_ACTIVE_SPEED;
2987 
2988 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2989 	if (ret == -EOPNOTSUPP) {
2990 		dev_warn(&hdev->pdev->dev,
2991 			 "IMP does not support get SFP info %d\n", ret);
2992 		return ret;
2993 	} else if (ret) {
2994 		dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2995 		return ret;
2996 	}
2997 
2998 	/* In some case, mac speed get from IMP may be 0, it shouldn't be
2999 	 * set to mac->speed.
3000 	 */
3001 	if (!le32_to_cpu(resp->speed))
3002 		return 0;
3003 
3004 	mac->speed = le32_to_cpu(resp->speed);
3005 	/* if resp->speed_ability is 0, it means it's an old version
3006 	 * firmware, do not update these params
3007 	 */
3008 	if (resp->speed_ability) {
3009 		mac->module_type = le32_to_cpu(resp->module_type);
3010 		mac->speed_ability = le32_to_cpu(resp->speed_ability);
3011 		mac->autoneg = resp->autoneg;
3012 		mac->support_autoneg = resp->autoneg_ability;
3013 		mac->speed_type = QUERY_ACTIVE_SPEED;
3014 		if (!resp->active_fec)
3015 			mac->fec_mode = 0;
3016 		else
3017 			mac->fec_mode = BIT(resp->active_fec);
3018 	} else {
3019 		mac->speed_type = QUERY_SFP_SPEED;
3020 	}
3021 
3022 	return 0;
3023 }
3024 
3025 static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle,
3026 					struct ethtool_link_ksettings *cmd)
3027 {
3028 	struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3029 	struct hclge_vport *vport = hclge_get_vport(handle);
3030 	struct hclge_phy_link_ksetting_0_cmd *req0;
3031 	struct hclge_phy_link_ksetting_1_cmd *req1;
3032 	u32 supported, advertising, lp_advertising;
3033 	struct hclge_dev *hdev = vport->back;
3034 	int ret;
3035 
3036 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3037 				   true);
3038 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3039 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3040 				   true);
3041 
3042 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3043 	if (ret) {
3044 		dev_err(&hdev->pdev->dev,
3045 			"failed to get phy link ksetting, ret = %d.\n", ret);
3046 		return ret;
3047 	}
3048 
3049 	req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3050 	cmd->base.autoneg = req0->autoneg;
3051 	cmd->base.speed = le32_to_cpu(req0->speed);
3052 	cmd->base.duplex = req0->duplex;
3053 	cmd->base.port = req0->port;
3054 	cmd->base.transceiver = req0->transceiver;
3055 	cmd->base.phy_address = req0->phy_address;
3056 	cmd->base.eth_tp_mdix = req0->eth_tp_mdix;
3057 	cmd->base.eth_tp_mdix_ctrl = req0->eth_tp_mdix_ctrl;
3058 	supported = le32_to_cpu(req0->supported);
3059 	advertising = le32_to_cpu(req0->advertising);
3060 	lp_advertising = le32_to_cpu(req0->lp_advertising);
3061 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
3062 						supported);
3063 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
3064 						advertising);
3065 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
3066 						lp_advertising);
3067 
3068 	req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3069 	cmd->base.master_slave_cfg = req1->master_slave_cfg;
3070 	cmd->base.master_slave_state = req1->master_slave_state;
3071 
3072 	return 0;
3073 }
3074 
3075 static int
3076 hclge_set_phy_link_ksettings(struct hnae3_handle *handle,
3077 			     const struct ethtool_link_ksettings *cmd)
3078 {
3079 	struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3080 	struct hclge_vport *vport = hclge_get_vport(handle);
3081 	struct hclge_phy_link_ksetting_0_cmd *req0;
3082 	struct hclge_phy_link_ksetting_1_cmd *req1;
3083 	struct hclge_dev *hdev = vport->back;
3084 	u32 advertising;
3085 	int ret;
3086 
3087 	if (cmd->base.autoneg == AUTONEG_DISABLE &&
3088 	    ((cmd->base.speed != SPEED_100 && cmd->base.speed != SPEED_10) ||
3089 	     (cmd->base.duplex != DUPLEX_HALF &&
3090 	      cmd->base.duplex != DUPLEX_FULL)))
3091 		return -EINVAL;
3092 
3093 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3094 				   false);
3095 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3096 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3097 				   false);
3098 
3099 	req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3100 	req0->autoneg = cmd->base.autoneg;
3101 	req0->speed = cpu_to_le32(cmd->base.speed);
3102 	req0->duplex = cmd->base.duplex;
3103 	ethtool_convert_link_mode_to_legacy_u32(&advertising,
3104 						cmd->link_modes.advertising);
3105 	req0->advertising = cpu_to_le32(advertising);
3106 	req0->eth_tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
3107 
3108 	req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3109 	req1->master_slave_cfg = cmd->base.master_slave_cfg;
3110 
3111 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3112 	if (ret) {
3113 		dev_err(&hdev->pdev->dev,
3114 			"failed to set phy link ksettings, ret = %d.\n", ret);
3115 		return ret;
3116 	}
3117 
3118 	hdev->hw.mac.autoneg = cmd->base.autoneg;
3119 	hdev->hw.mac.speed = cmd->base.speed;
3120 	hdev->hw.mac.duplex = cmd->base.duplex;
3121 	linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising);
3122 
3123 	return 0;
3124 }
3125 
3126 static int hclge_update_tp_port_info(struct hclge_dev *hdev)
3127 {
3128 	struct ethtool_link_ksettings cmd;
3129 	int ret;
3130 
3131 	if (!hnae3_dev_phy_imp_supported(hdev))
3132 		return 0;
3133 
3134 	ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd);
3135 	if (ret)
3136 		return ret;
3137 
3138 	hdev->hw.mac.autoneg = cmd.base.autoneg;
3139 	hdev->hw.mac.speed = cmd.base.speed;
3140 	hdev->hw.mac.duplex = cmd.base.duplex;
3141 
3142 	return 0;
3143 }
3144 
3145 static int hclge_tp_port_init(struct hclge_dev *hdev)
3146 {
3147 	struct ethtool_link_ksettings cmd;
3148 
3149 	if (!hnae3_dev_phy_imp_supported(hdev))
3150 		return 0;
3151 
3152 	cmd.base.autoneg = hdev->hw.mac.autoneg;
3153 	cmd.base.speed = hdev->hw.mac.speed;
3154 	cmd.base.duplex = hdev->hw.mac.duplex;
3155 	linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising);
3156 
3157 	return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd);
3158 }
3159 
3160 static int hclge_update_port_info(struct hclge_dev *hdev)
3161 {
3162 	struct hclge_mac *mac = &hdev->hw.mac;
3163 	int speed = HCLGE_MAC_SPEED_UNKNOWN;
3164 	int ret;
3165 
3166 	/* get the port info from SFP cmd if not copper port */
3167 	if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
3168 		return hclge_update_tp_port_info(hdev);
3169 
3170 	/* if IMP does not support get SFP/qSFP info, return directly */
3171 	if (!hdev->support_sfp_query)
3172 		return 0;
3173 
3174 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
3175 		ret = hclge_get_sfp_info(hdev, mac);
3176 	else
3177 		ret = hclge_get_sfp_speed(hdev, &speed);
3178 
3179 	if (ret == -EOPNOTSUPP) {
3180 		hdev->support_sfp_query = false;
3181 		return ret;
3182 	} else if (ret) {
3183 		return ret;
3184 	}
3185 
3186 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3187 		if (mac->speed_type == QUERY_ACTIVE_SPEED) {
3188 			hclge_update_port_capability(hdev, mac);
3189 			return 0;
3190 		}
3191 		return hclge_cfg_mac_speed_dup(hdev, mac->speed,
3192 					       HCLGE_MAC_FULL);
3193 	} else {
3194 		if (speed == HCLGE_MAC_SPEED_UNKNOWN)
3195 			return 0; /* do nothing if no SFP */
3196 
3197 		/* must config full duplex for SFP */
3198 		return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
3199 	}
3200 }
3201 
3202 static int hclge_get_status(struct hnae3_handle *handle)
3203 {
3204 	struct hclge_vport *vport = hclge_get_vport(handle);
3205 	struct hclge_dev *hdev = vport->back;
3206 
3207 	hclge_update_link_status(hdev);
3208 
3209 	return hdev->hw.mac.link;
3210 }
3211 
3212 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3213 {
3214 	if (!pci_num_vf(hdev->pdev)) {
3215 		dev_err(&hdev->pdev->dev,
3216 			"SRIOV is disabled, can not get vport(%d) info.\n", vf);
3217 		return NULL;
3218 	}
3219 
3220 	if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3221 		dev_err(&hdev->pdev->dev,
3222 			"vf id(%d) is out of range(0 <= vfid < %d)\n",
3223 			vf, pci_num_vf(hdev->pdev));
3224 		return NULL;
3225 	}
3226 
3227 	/* VF start from 1 in vport */
3228 	vf += HCLGE_VF_VPORT_START_NUM;
3229 	return &hdev->vport[vf];
3230 }
3231 
3232 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3233 			       struct ifla_vf_info *ivf)
3234 {
3235 	struct hclge_vport *vport = hclge_get_vport(handle);
3236 	struct hclge_dev *hdev = vport->back;
3237 
3238 	vport = hclge_get_vf_vport(hdev, vf);
3239 	if (!vport)
3240 		return -EINVAL;
3241 
3242 	ivf->vf = vf;
3243 	ivf->linkstate = vport->vf_info.link_state;
3244 	ivf->spoofchk = vport->vf_info.spoofchk;
3245 	ivf->trusted = vport->vf_info.trusted;
3246 	ivf->min_tx_rate = 0;
3247 	ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3248 	ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3249 	ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3250 	ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3251 	ether_addr_copy(ivf->mac, vport->vf_info.mac);
3252 
3253 	return 0;
3254 }
3255 
3256 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3257 				   int link_state)
3258 {
3259 	struct hclge_vport *vport = hclge_get_vport(handle);
3260 	struct hclge_dev *hdev = vport->back;
3261 
3262 	vport = hclge_get_vf_vport(hdev, vf);
3263 	if (!vport)
3264 		return -EINVAL;
3265 
3266 	vport->vf_info.link_state = link_state;
3267 
3268 	return 0;
3269 }
3270 
3271 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3272 {
3273 	u32 cmdq_src_reg, msix_src_reg;
3274 
3275 	/* fetch the events from their corresponding regs */
3276 	cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3277 	msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3278 
3279 	/* Assumption: If by any chance reset and mailbox events are reported
3280 	 * together then we will only process reset event in this go and will
3281 	 * defer the processing of the mailbox events. Since, we would have not
3282 	 * cleared RX CMDQ event this time we would receive again another
3283 	 * interrupt from H/W just for the mailbox.
3284 	 *
3285 	 * check for vector0 reset event sources
3286 	 */
3287 	if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3288 		dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3289 		set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3290 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3291 		*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3292 		hdev->rst_stats.imp_rst_cnt++;
3293 		return HCLGE_VECTOR0_EVENT_RST;
3294 	}
3295 
3296 	if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3297 		dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3298 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3299 		set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3300 		*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3301 		hdev->rst_stats.global_rst_cnt++;
3302 		return HCLGE_VECTOR0_EVENT_RST;
3303 	}
3304 
3305 	/* check for vector0 msix event source */
3306 	if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
3307 		*clearval = msix_src_reg;
3308 		return HCLGE_VECTOR0_EVENT_ERR;
3309 	}
3310 
3311 	/* check for vector0 mailbox(=CMDQ RX) event source */
3312 	if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3313 		cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3314 		*clearval = cmdq_src_reg;
3315 		return HCLGE_VECTOR0_EVENT_MBX;
3316 	}
3317 
3318 	/* print other vector0 event source */
3319 	dev_info(&hdev->pdev->dev,
3320 		 "CMDQ INT status:0x%x, other INT status:0x%x\n",
3321 		 cmdq_src_reg, msix_src_reg);
3322 	*clearval = msix_src_reg;
3323 
3324 	return HCLGE_VECTOR0_EVENT_OTHER;
3325 }
3326 
3327 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3328 				    u32 regclr)
3329 {
3330 	switch (event_type) {
3331 	case HCLGE_VECTOR0_EVENT_RST:
3332 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3333 		break;
3334 	case HCLGE_VECTOR0_EVENT_MBX:
3335 		hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3336 		break;
3337 	default:
3338 		break;
3339 	}
3340 }
3341 
3342 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3343 {
3344 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3345 				BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3346 				BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3347 				BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3348 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3349 }
3350 
3351 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3352 {
3353 	writel(enable ? 1 : 0, vector->addr);
3354 }
3355 
3356 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3357 {
3358 	struct hclge_dev *hdev = data;
3359 	u32 clearval = 0;
3360 	u32 event_cause;
3361 
3362 	hclge_enable_vector(&hdev->misc_vector, false);
3363 	event_cause = hclge_check_event_cause(hdev, &clearval);
3364 
3365 	/* vector 0 interrupt is shared with reset and mailbox source events.*/
3366 	switch (event_cause) {
3367 	case HCLGE_VECTOR0_EVENT_ERR:
3368 		/* we do not know what type of reset is required now. This could
3369 		 * only be decided after we fetch the type of errors which
3370 		 * caused this event. Therefore, we will do below for now:
3371 		 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3372 		 *    have defered type of reset to be used.
3373 		 * 2. Schedule the reset serivce task.
3374 		 * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
3375 		 *    will fetch the correct type of reset.  This would be done
3376 		 *    by first decoding the types of errors.
3377 		 */
3378 		set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3379 		fallthrough;
3380 	case HCLGE_VECTOR0_EVENT_RST:
3381 		hclge_reset_task_schedule(hdev);
3382 		break;
3383 	case HCLGE_VECTOR0_EVENT_MBX:
3384 		/* If we are here then,
3385 		 * 1. Either we are not handling any mbx task and we are not
3386 		 *    scheduled as well
3387 		 *                        OR
3388 		 * 2. We could be handling a mbx task but nothing more is
3389 		 *    scheduled.
3390 		 * In both cases, we should schedule mbx task as there are more
3391 		 * mbx messages reported by this interrupt.
3392 		 */
3393 		hclge_mbx_task_schedule(hdev);
3394 		break;
3395 	default:
3396 		dev_warn(&hdev->pdev->dev,
3397 			 "received unknown or unhandled event of vector0\n");
3398 		break;
3399 	}
3400 
3401 	hclge_clear_event_cause(hdev, event_cause, clearval);
3402 
3403 	/* Enable interrupt if it is not cause by reset. And when
3404 	 * clearval equal to 0, it means interrupt status may be
3405 	 * cleared by hardware before driver reads status register.
3406 	 * For this case, vector0 interrupt also should be enabled.
3407 	 */
3408 	if (!clearval ||
3409 	    event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3410 		hclge_enable_vector(&hdev->misc_vector, true);
3411 	}
3412 
3413 	return IRQ_HANDLED;
3414 }
3415 
3416 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3417 {
3418 	if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3419 		dev_warn(&hdev->pdev->dev,
3420 			 "vector(vector_id %d) has been freed.\n", vector_id);
3421 		return;
3422 	}
3423 
3424 	hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3425 	hdev->num_msi_left += 1;
3426 	hdev->num_msi_used -= 1;
3427 }
3428 
3429 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3430 {
3431 	struct hclge_misc_vector *vector = &hdev->misc_vector;
3432 
3433 	vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3434 
3435 	vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3436 	hdev->vector_status[0] = 0;
3437 
3438 	hdev->num_msi_left -= 1;
3439 	hdev->num_msi_used += 1;
3440 }
3441 
3442 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3443 				      const cpumask_t *mask)
3444 {
3445 	struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3446 					      affinity_notify);
3447 
3448 	cpumask_copy(&hdev->affinity_mask, mask);
3449 }
3450 
3451 static void hclge_irq_affinity_release(struct kref *ref)
3452 {
3453 }
3454 
3455 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3456 {
3457 	irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3458 			      &hdev->affinity_mask);
3459 
3460 	hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3461 	hdev->affinity_notify.release = hclge_irq_affinity_release;
3462 	irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3463 				  &hdev->affinity_notify);
3464 }
3465 
3466 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3467 {
3468 	irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3469 	irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3470 }
3471 
3472 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3473 {
3474 	int ret;
3475 
3476 	hclge_get_misc_vector(hdev);
3477 
3478 	/* this would be explicitly freed in the end */
3479 	snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3480 		 HCLGE_NAME, pci_name(hdev->pdev));
3481 	ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3482 			  0, hdev->misc_vector.name, hdev);
3483 	if (ret) {
3484 		hclge_free_vector(hdev, 0);
3485 		dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3486 			hdev->misc_vector.vector_irq);
3487 	}
3488 
3489 	return ret;
3490 }
3491 
3492 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3493 {
3494 	free_irq(hdev->misc_vector.vector_irq, hdev);
3495 	hclge_free_vector(hdev, 0);
3496 }
3497 
3498 int hclge_notify_client(struct hclge_dev *hdev,
3499 			enum hnae3_reset_notify_type type)
3500 {
3501 	struct hnae3_client *client = hdev->nic_client;
3502 	u16 i;
3503 
3504 	if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3505 		return 0;
3506 
3507 	if (!client->ops->reset_notify)
3508 		return -EOPNOTSUPP;
3509 
3510 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3511 		struct hnae3_handle *handle = &hdev->vport[i].nic;
3512 		int ret;
3513 
3514 		ret = client->ops->reset_notify(handle, type);
3515 		if (ret) {
3516 			dev_err(&hdev->pdev->dev,
3517 				"notify nic client failed %d(%d)\n", type, ret);
3518 			return ret;
3519 		}
3520 	}
3521 
3522 	return 0;
3523 }
3524 
3525 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3526 				    enum hnae3_reset_notify_type type)
3527 {
3528 	struct hnae3_client *client = hdev->roce_client;
3529 	int ret;
3530 	u16 i;
3531 
3532 	if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3533 		return 0;
3534 
3535 	if (!client->ops->reset_notify)
3536 		return -EOPNOTSUPP;
3537 
3538 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3539 		struct hnae3_handle *handle = &hdev->vport[i].roce;
3540 
3541 		ret = client->ops->reset_notify(handle, type);
3542 		if (ret) {
3543 			dev_err(&hdev->pdev->dev,
3544 				"notify roce client failed %d(%d)",
3545 				type, ret);
3546 			return ret;
3547 		}
3548 	}
3549 
3550 	return ret;
3551 }
3552 
3553 static int hclge_reset_wait(struct hclge_dev *hdev)
3554 {
3555 #define HCLGE_RESET_WATI_MS	100
3556 #define HCLGE_RESET_WAIT_CNT	350
3557 
3558 	u32 val, reg, reg_bit;
3559 	u32 cnt = 0;
3560 
3561 	switch (hdev->reset_type) {
3562 	case HNAE3_IMP_RESET:
3563 		reg = HCLGE_GLOBAL_RESET_REG;
3564 		reg_bit = HCLGE_IMP_RESET_BIT;
3565 		break;
3566 	case HNAE3_GLOBAL_RESET:
3567 		reg = HCLGE_GLOBAL_RESET_REG;
3568 		reg_bit = HCLGE_GLOBAL_RESET_BIT;
3569 		break;
3570 	case HNAE3_FUNC_RESET:
3571 		reg = HCLGE_FUN_RST_ING;
3572 		reg_bit = HCLGE_FUN_RST_ING_B;
3573 		break;
3574 	default:
3575 		dev_err(&hdev->pdev->dev,
3576 			"Wait for unsupported reset type: %d\n",
3577 			hdev->reset_type);
3578 		return -EINVAL;
3579 	}
3580 
3581 	val = hclge_read_dev(&hdev->hw, reg);
3582 	while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3583 		msleep(HCLGE_RESET_WATI_MS);
3584 		val = hclge_read_dev(&hdev->hw, reg);
3585 		cnt++;
3586 	}
3587 
3588 	if (cnt >= HCLGE_RESET_WAIT_CNT) {
3589 		dev_warn(&hdev->pdev->dev,
3590 			 "Wait for reset timeout: %d\n", hdev->reset_type);
3591 		return -EBUSY;
3592 	}
3593 
3594 	return 0;
3595 }
3596 
3597 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3598 {
3599 	struct hclge_vf_rst_cmd *req;
3600 	struct hclge_desc desc;
3601 
3602 	req = (struct hclge_vf_rst_cmd *)desc.data;
3603 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3604 	req->dest_vfid = func_id;
3605 
3606 	if (reset)
3607 		req->vf_rst = 0x1;
3608 
3609 	return hclge_cmd_send(&hdev->hw, &desc, 1);
3610 }
3611 
3612 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3613 {
3614 	int i;
3615 
3616 	for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3617 		struct hclge_vport *vport = &hdev->vport[i];
3618 		int ret;
3619 
3620 		/* Send cmd to set/clear VF's FUNC_RST_ING */
3621 		ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3622 		if (ret) {
3623 			dev_err(&hdev->pdev->dev,
3624 				"set vf(%u) rst failed %d!\n",
3625 				vport->vport_id, ret);
3626 			return ret;
3627 		}
3628 
3629 		if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3630 			continue;
3631 
3632 		/* Inform VF to process the reset.
3633 		 * hclge_inform_reset_assert_to_vf may fail if VF
3634 		 * driver is not loaded.
3635 		 */
3636 		ret = hclge_inform_reset_assert_to_vf(vport);
3637 		if (ret)
3638 			dev_warn(&hdev->pdev->dev,
3639 				 "inform reset to vf(%u) failed %d!\n",
3640 				 vport->vport_id, ret);
3641 	}
3642 
3643 	return 0;
3644 }
3645 
3646 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3647 {
3648 	if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3649 	    test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3650 	    test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3651 		return;
3652 
3653 	hclge_mbx_handler(hdev);
3654 
3655 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3656 }
3657 
3658 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3659 {
3660 	struct hclge_pf_rst_sync_cmd *req;
3661 	struct hclge_desc desc;
3662 	int cnt = 0;
3663 	int ret;
3664 
3665 	req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3666 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3667 
3668 	do {
3669 		/* vf need to down netdev by mbx during PF or FLR reset */
3670 		hclge_mailbox_service_task(hdev);
3671 
3672 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3673 		/* for compatible with old firmware, wait
3674 		 * 100 ms for VF to stop IO
3675 		 */
3676 		if (ret == -EOPNOTSUPP) {
3677 			msleep(HCLGE_RESET_SYNC_TIME);
3678 			return;
3679 		} else if (ret) {
3680 			dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3681 				 ret);
3682 			return;
3683 		} else if (req->all_vf_ready) {
3684 			return;
3685 		}
3686 		msleep(HCLGE_PF_RESET_SYNC_TIME);
3687 		hclge_cmd_reuse_desc(&desc, true);
3688 	} while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3689 
3690 	dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3691 }
3692 
3693 void hclge_report_hw_error(struct hclge_dev *hdev,
3694 			   enum hnae3_hw_error_type type)
3695 {
3696 	struct hnae3_client *client = hdev->nic_client;
3697 	u16 i;
3698 
3699 	if (!client || !client->ops->process_hw_error ||
3700 	    !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3701 		return;
3702 
3703 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3704 		client->ops->process_hw_error(&hdev->vport[i].nic, type);
3705 }
3706 
3707 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3708 {
3709 	u32 reg_val;
3710 
3711 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3712 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3713 		hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3714 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3715 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3716 	}
3717 
3718 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3719 		hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3720 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3721 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3722 	}
3723 }
3724 
3725 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3726 {
3727 	struct hclge_desc desc;
3728 	struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3729 	int ret;
3730 
3731 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3732 	hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3733 	req->fun_reset_vfid = func_id;
3734 
3735 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3736 	if (ret)
3737 		dev_err(&hdev->pdev->dev,
3738 			"send function reset cmd fail, status =%d\n", ret);
3739 
3740 	return ret;
3741 }
3742 
3743 static void hclge_do_reset(struct hclge_dev *hdev)
3744 {
3745 	struct hnae3_handle *handle = &hdev->vport[0].nic;
3746 	struct pci_dev *pdev = hdev->pdev;
3747 	u32 val;
3748 
3749 	if (hclge_get_hw_reset_stat(handle)) {
3750 		dev_info(&pdev->dev, "hardware reset not finish\n");
3751 		dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3752 			 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3753 			 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3754 		return;
3755 	}
3756 
3757 	switch (hdev->reset_type) {
3758 	case HNAE3_GLOBAL_RESET:
3759 		dev_info(&pdev->dev, "global reset requested\n");
3760 		val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3761 		hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3762 		hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3763 		break;
3764 	case HNAE3_FUNC_RESET:
3765 		dev_info(&pdev->dev, "PF reset requested\n");
3766 		/* schedule again to check later */
3767 		set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3768 		hclge_reset_task_schedule(hdev);
3769 		break;
3770 	default:
3771 		dev_warn(&pdev->dev,
3772 			 "unsupported reset type: %d\n", hdev->reset_type);
3773 		break;
3774 	}
3775 }
3776 
3777 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3778 						   unsigned long *addr)
3779 {
3780 	enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3781 	struct hclge_dev *hdev = ae_dev->priv;
3782 
3783 	/* first, resolve any unknown reset type to the known type(s) */
3784 	if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3785 		u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
3786 					HCLGE_MISC_VECTOR_INT_STS);
3787 		/* we will intentionally ignore any errors from this function
3788 		 *  as we will end up in *some* reset request in any case
3789 		 */
3790 		if (hclge_handle_hw_msix_error(hdev, addr))
3791 			dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3792 				 msix_sts_reg);
3793 
3794 		clear_bit(HNAE3_UNKNOWN_RESET, addr);
3795 		/* We defered the clearing of the error event which caused
3796 		 * interrupt since it was not posssible to do that in
3797 		 * interrupt context (and this is the reason we introduced
3798 		 * new UNKNOWN reset type). Now, the errors have been
3799 		 * handled and cleared in hardware we can safely enable
3800 		 * interrupts. This is an exception to the norm.
3801 		 */
3802 		hclge_enable_vector(&hdev->misc_vector, true);
3803 	}
3804 
3805 	/* return the highest priority reset level amongst all */
3806 	if (test_bit(HNAE3_IMP_RESET, addr)) {
3807 		rst_level = HNAE3_IMP_RESET;
3808 		clear_bit(HNAE3_IMP_RESET, addr);
3809 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3810 		clear_bit(HNAE3_FUNC_RESET, addr);
3811 	} else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3812 		rst_level = HNAE3_GLOBAL_RESET;
3813 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3814 		clear_bit(HNAE3_FUNC_RESET, addr);
3815 	} else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3816 		rst_level = HNAE3_FUNC_RESET;
3817 		clear_bit(HNAE3_FUNC_RESET, addr);
3818 	} else if (test_bit(HNAE3_FLR_RESET, addr)) {
3819 		rst_level = HNAE3_FLR_RESET;
3820 		clear_bit(HNAE3_FLR_RESET, addr);
3821 	}
3822 
3823 	if (hdev->reset_type != HNAE3_NONE_RESET &&
3824 	    rst_level < hdev->reset_type)
3825 		return HNAE3_NONE_RESET;
3826 
3827 	return rst_level;
3828 }
3829 
3830 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3831 {
3832 	u32 clearval = 0;
3833 
3834 	switch (hdev->reset_type) {
3835 	case HNAE3_IMP_RESET:
3836 		clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3837 		break;
3838 	case HNAE3_GLOBAL_RESET:
3839 		clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3840 		break;
3841 	default:
3842 		break;
3843 	}
3844 
3845 	if (!clearval)
3846 		return;
3847 
3848 	/* For revision 0x20, the reset interrupt source
3849 	 * can only be cleared after hardware reset done
3850 	 */
3851 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
3852 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3853 				clearval);
3854 
3855 	hclge_enable_vector(&hdev->misc_vector, true);
3856 }
3857 
3858 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3859 {
3860 	u32 reg_val;
3861 
3862 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3863 	if (enable)
3864 		reg_val |= HCLGE_NIC_SW_RST_RDY;
3865 	else
3866 		reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3867 
3868 	hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3869 }
3870 
3871 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3872 {
3873 	int ret;
3874 
3875 	ret = hclge_set_all_vf_rst(hdev, true);
3876 	if (ret)
3877 		return ret;
3878 
3879 	hclge_func_reset_sync_vf(hdev);
3880 
3881 	return 0;
3882 }
3883 
3884 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3885 {
3886 	u32 reg_val;
3887 	int ret = 0;
3888 
3889 	switch (hdev->reset_type) {
3890 	case HNAE3_FUNC_RESET:
3891 		ret = hclge_func_reset_notify_vf(hdev);
3892 		if (ret)
3893 			return ret;
3894 
3895 		ret = hclge_func_reset_cmd(hdev, 0);
3896 		if (ret) {
3897 			dev_err(&hdev->pdev->dev,
3898 				"asserting function reset fail %d!\n", ret);
3899 			return ret;
3900 		}
3901 
3902 		/* After performaning pf reset, it is not necessary to do the
3903 		 * mailbox handling or send any command to firmware, because
3904 		 * any mailbox handling or command to firmware is only valid
3905 		 * after hclge_cmd_init is called.
3906 		 */
3907 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3908 		hdev->rst_stats.pf_rst_cnt++;
3909 		break;
3910 	case HNAE3_FLR_RESET:
3911 		ret = hclge_func_reset_notify_vf(hdev);
3912 		if (ret)
3913 			return ret;
3914 		break;
3915 	case HNAE3_IMP_RESET:
3916 		hclge_handle_imp_error(hdev);
3917 		reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3918 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3919 				BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3920 		break;
3921 	default:
3922 		break;
3923 	}
3924 
3925 	/* inform hardware that preparatory work is done */
3926 	msleep(HCLGE_RESET_SYNC_TIME);
3927 	hclge_reset_handshake(hdev, true);
3928 	dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3929 
3930 	return ret;
3931 }
3932 
3933 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3934 {
3935 #define MAX_RESET_FAIL_CNT 5
3936 
3937 	if (hdev->reset_pending) {
3938 		dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3939 			 hdev->reset_pending);
3940 		return true;
3941 	} else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3942 		   HCLGE_RESET_INT_M) {
3943 		dev_info(&hdev->pdev->dev,
3944 			 "reset failed because new reset interrupt\n");
3945 		hclge_clear_reset_cause(hdev);
3946 		return false;
3947 	} else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3948 		hdev->rst_stats.reset_fail_cnt++;
3949 		set_bit(hdev->reset_type, &hdev->reset_pending);
3950 		dev_info(&hdev->pdev->dev,
3951 			 "re-schedule reset task(%u)\n",
3952 			 hdev->rst_stats.reset_fail_cnt);
3953 		return true;
3954 	}
3955 
3956 	hclge_clear_reset_cause(hdev);
3957 
3958 	/* recover the handshake status when reset fail */
3959 	hclge_reset_handshake(hdev, true);
3960 
3961 	dev_err(&hdev->pdev->dev, "Reset fail!\n");
3962 
3963 	hclge_dbg_dump_rst_info(hdev);
3964 
3965 	set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3966 
3967 	return false;
3968 }
3969 
3970 static int hclge_set_rst_done(struct hclge_dev *hdev)
3971 {
3972 	struct hclge_pf_rst_done_cmd *req;
3973 	struct hclge_desc desc;
3974 	int ret;
3975 
3976 	req = (struct hclge_pf_rst_done_cmd *)desc.data;
3977 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3978 	req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3979 
3980 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3981 	/* To be compatible with the old firmware, which does not support
3982 	 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3983 	 * return success
3984 	 */
3985 	if (ret == -EOPNOTSUPP) {
3986 		dev_warn(&hdev->pdev->dev,
3987 			 "current firmware does not support command(0x%x)!\n",
3988 			 HCLGE_OPC_PF_RST_DONE);
3989 		return 0;
3990 	} else if (ret) {
3991 		dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3992 			ret);
3993 	}
3994 
3995 	return ret;
3996 }
3997 
3998 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3999 {
4000 	int ret = 0;
4001 
4002 	switch (hdev->reset_type) {
4003 	case HNAE3_FUNC_RESET:
4004 	case HNAE3_FLR_RESET:
4005 		ret = hclge_set_all_vf_rst(hdev, false);
4006 		break;
4007 	case HNAE3_GLOBAL_RESET:
4008 	case HNAE3_IMP_RESET:
4009 		ret = hclge_set_rst_done(hdev);
4010 		break;
4011 	default:
4012 		break;
4013 	}
4014 
4015 	/* clear up the handshake status after re-initialize done */
4016 	hclge_reset_handshake(hdev, false);
4017 
4018 	return ret;
4019 }
4020 
4021 static int hclge_reset_stack(struct hclge_dev *hdev)
4022 {
4023 	int ret;
4024 
4025 	ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
4026 	if (ret)
4027 		return ret;
4028 
4029 	ret = hclge_reset_ae_dev(hdev->ae_dev);
4030 	if (ret)
4031 		return ret;
4032 
4033 	return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
4034 }
4035 
4036 static int hclge_reset_prepare(struct hclge_dev *hdev)
4037 {
4038 	int ret;
4039 
4040 	hdev->rst_stats.reset_cnt++;
4041 	/* perform reset of the stack & ae device for a client */
4042 	ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
4043 	if (ret)
4044 		return ret;
4045 
4046 	rtnl_lock();
4047 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
4048 	rtnl_unlock();
4049 	if (ret)
4050 		return ret;
4051 
4052 	return hclge_reset_prepare_wait(hdev);
4053 }
4054 
4055 static int hclge_reset_rebuild(struct hclge_dev *hdev)
4056 {
4057 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4058 	enum hnae3_reset_type reset_level;
4059 	int ret;
4060 
4061 	hdev->rst_stats.hw_reset_done_cnt++;
4062 
4063 	ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
4064 	if (ret)
4065 		return ret;
4066 
4067 	rtnl_lock();
4068 	ret = hclge_reset_stack(hdev);
4069 	rtnl_unlock();
4070 	if (ret)
4071 		return ret;
4072 
4073 	hclge_clear_reset_cause(hdev);
4074 
4075 	ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
4076 	/* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
4077 	 * times
4078 	 */
4079 	if (ret &&
4080 	    hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
4081 		return ret;
4082 
4083 	ret = hclge_reset_prepare_up(hdev);
4084 	if (ret)
4085 		return ret;
4086 
4087 	rtnl_lock();
4088 	ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
4089 	rtnl_unlock();
4090 	if (ret)
4091 		return ret;
4092 
4093 	ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
4094 	if (ret)
4095 		return ret;
4096 
4097 	hdev->last_reset_time = jiffies;
4098 	hdev->rst_stats.reset_fail_cnt = 0;
4099 	hdev->rst_stats.reset_done_cnt++;
4100 	clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4101 
4102 	/* if default_reset_request has a higher level reset request,
4103 	 * it should be handled as soon as possible. since some errors
4104 	 * need this kind of reset to fix.
4105 	 */
4106 	reset_level = hclge_get_reset_level(ae_dev,
4107 					    &hdev->default_reset_request);
4108 	if (reset_level != HNAE3_NONE_RESET)
4109 		set_bit(reset_level, &hdev->reset_request);
4110 
4111 	return 0;
4112 }
4113 
4114 static void hclge_reset(struct hclge_dev *hdev)
4115 {
4116 	if (hclge_reset_prepare(hdev))
4117 		goto err_reset;
4118 
4119 	if (hclge_reset_wait(hdev))
4120 		goto err_reset;
4121 
4122 	if (hclge_reset_rebuild(hdev))
4123 		goto err_reset;
4124 
4125 	return;
4126 
4127 err_reset:
4128 	if (hclge_reset_err_handle(hdev))
4129 		hclge_reset_task_schedule(hdev);
4130 }
4131 
4132 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
4133 {
4134 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
4135 	struct hclge_dev *hdev = ae_dev->priv;
4136 
4137 	/* We might end up getting called broadly because of 2 below cases:
4138 	 * 1. Recoverable error was conveyed through APEI and only way to bring
4139 	 *    normalcy is to reset.
4140 	 * 2. A new reset request from the stack due to timeout
4141 	 *
4142 	 * For the first case,error event might not have ae handle available.
4143 	 * check if this is a new reset request and we are not here just because
4144 	 * last reset attempt did not succeed and watchdog hit us again. We will
4145 	 * know this if last reset request did not occur very recently (watchdog
4146 	 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
4147 	 * In case of new request we reset the "reset level" to PF reset.
4148 	 * And if it is a repeat reset request of the most recent one then we
4149 	 * want to make sure we throttle the reset request. Therefore, we will
4150 	 * not allow it again before 3*HZ times.
4151 	 */
4152 	if (!handle)
4153 		handle = &hdev->vport[0].nic;
4154 
4155 	if (time_before(jiffies, (hdev->last_reset_time +
4156 				  HCLGE_RESET_INTERVAL))) {
4157 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
4158 		return;
4159 	} else if (hdev->default_reset_request) {
4160 		hdev->reset_level =
4161 			hclge_get_reset_level(ae_dev,
4162 					      &hdev->default_reset_request);
4163 	} else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
4164 		hdev->reset_level = HNAE3_FUNC_RESET;
4165 	}
4166 
4167 	dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
4168 		 hdev->reset_level);
4169 
4170 	/* request reset & schedule reset task */
4171 	set_bit(hdev->reset_level, &hdev->reset_request);
4172 	hclge_reset_task_schedule(hdev);
4173 
4174 	if (hdev->reset_level < HNAE3_GLOBAL_RESET)
4175 		hdev->reset_level++;
4176 }
4177 
4178 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
4179 					enum hnae3_reset_type rst_type)
4180 {
4181 	struct hclge_dev *hdev = ae_dev->priv;
4182 
4183 	set_bit(rst_type, &hdev->default_reset_request);
4184 }
4185 
4186 static void hclge_reset_timer(struct timer_list *t)
4187 {
4188 	struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
4189 
4190 	/* if default_reset_request has no value, it means that this reset
4191 	 * request has already be handled, so just return here
4192 	 */
4193 	if (!hdev->default_reset_request)
4194 		return;
4195 
4196 	dev_info(&hdev->pdev->dev,
4197 		 "triggering reset in reset timer\n");
4198 	hclge_reset_event(hdev->pdev, NULL);
4199 }
4200 
4201 static void hclge_reset_subtask(struct hclge_dev *hdev)
4202 {
4203 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4204 
4205 	/* check if there is any ongoing reset in the hardware. This status can
4206 	 * be checked from reset_pending. If there is then, we need to wait for
4207 	 * hardware to complete reset.
4208 	 *    a. If we are able to figure out in reasonable time that hardware
4209 	 *       has fully resetted then, we can proceed with driver, client
4210 	 *       reset.
4211 	 *    b. else, we can come back later to check this status so re-sched
4212 	 *       now.
4213 	 */
4214 	hdev->last_reset_time = jiffies;
4215 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
4216 	if (hdev->reset_type != HNAE3_NONE_RESET)
4217 		hclge_reset(hdev);
4218 
4219 	/* check if we got any *new* reset requests to be honored */
4220 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
4221 	if (hdev->reset_type != HNAE3_NONE_RESET)
4222 		hclge_do_reset(hdev);
4223 
4224 	hdev->reset_type = HNAE3_NONE_RESET;
4225 }
4226 
4227 static void hclge_reset_service_task(struct hclge_dev *hdev)
4228 {
4229 	if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4230 		return;
4231 
4232 	down(&hdev->reset_sem);
4233 	set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4234 
4235 	hclge_reset_subtask(hdev);
4236 
4237 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4238 	up(&hdev->reset_sem);
4239 }
4240 
4241 static void hclge_update_vport_alive(struct hclge_dev *hdev)
4242 {
4243 	int i;
4244 
4245 	/* start from vport 1 for PF is always alive */
4246 	for (i = 1; i < hdev->num_alloc_vport; i++) {
4247 		struct hclge_vport *vport = &hdev->vport[i];
4248 
4249 		if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
4250 			clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4251 
4252 		/* If vf is not alive, set to default value */
4253 		if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4254 			vport->mps = HCLGE_MAC_DEFAULT_FRAME;
4255 	}
4256 }
4257 
4258 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4259 {
4260 	unsigned long delta = round_jiffies_relative(HZ);
4261 
4262 	if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4263 		return;
4264 
4265 	/* Always handle the link updating to make sure link state is
4266 	 * updated when it is triggered by mbx.
4267 	 */
4268 	hclge_update_link_status(hdev);
4269 	hclge_sync_mac_table(hdev);
4270 	hclge_sync_promisc_mode(hdev);
4271 	hclge_sync_fd_table(hdev);
4272 
4273 	if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4274 		delta = jiffies - hdev->last_serv_processed;
4275 
4276 		if (delta < round_jiffies_relative(HZ)) {
4277 			delta = round_jiffies_relative(HZ) - delta;
4278 			goto out;
4279 		}
4280 	}
4281 
4282 	hdev->serv_processed_cnt++;
4283 	hclge_update_vport_alive(hdev);
4284 
4285 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4286 		hdev->last_serv_processed = jiffies;
4287 		goto out;
4288 	}
4289 
4290 	if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4291 		hclge_update_stats_for_all(hdev);
4292 
4293 	hclge_update_port_info(hdev);
4294 	hclge_sync_vlan_filter(hdev);
4295 
4296 	if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4297 		hclge_rfs_filter_expire(hdev);
4298 
4299 	hdev->last_serv_processed = jiffies;
4300 
4301 out:
4302 	hclge_task_schedule(hdev, delta);
4303 }
4304 
4305 static void hclge_service_task(struct work_struct *work)
4306 {
4307 	struct hclge_dev *hdev =
4308 		container_of(work, struct hclge_dev, service_task.work);
4309 
4310 	hclge_reset_service_task(hdev);
4311 	hclge_mailbox_service_task(hdev);
4312 	hclge_periodic_service_task(hdev);
4313 
4314 	/* Handle reset and mbx again in case periodical task delays the
4315 	 * handling by calling hclge_task_schedule() in
4316 	 * hclge_periodic_service_task().
4317 	 */
4318 	hclge_reset_service_task(hdev);
4319 	hclge_mailbox_service_task(hdev);
4320 }
4321 
4322 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4323 {
4324 	/* VF handle has no client */
4325 	if (!handle->client)
4326 		return container_of(handle, struct hclge_vport, nic);
4327 	else if (handle->client->type == HNAE3_CLIENT_ROCE)
4328 		return container_of(handle, struct hclge_vport, roce);
4329 	else
4330 		return container_of(handle, struct hclge_vport, nic);
4331 }
4332 
4333 static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
4334 				  struct hnae3_vector_info *vector_info)
4335 {
4336 #define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2	64
4337 
4338 	vector_info->vector = pci_irq_vector(hdev->pdev, idx);
4339 
4340 	/* need an extend offset to config vector >= 64 */
4341 	if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
4342 		vector_info->io_addr = hdev->hw.io_base +
4343 				HCLGE_VECTOR_REG_BASE +
4344 				(idx - 1) * HCLGE_VECTOR_REG_OFFSET;
4345 	else
4346 		vector_info->io_addr = hdev->hw.io_base +
4347 				HCLGE_VECTOR_EXT_REG_BASE +
4348 				(idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4349 				HCLGE_VECTOR_REG_OFFSET_H +
4350 				(idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4351 				HCLGE_VECTOR_REG_OFFSET;
4352 
4353 	hdev->vector_status[idx] = hdev->vport[0].vport_id;
4354 	hdev->vector_irq[idx] = vector_info->vector;
4355 }
4356 
4357 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4358 			    struct hnae3_vector_info *vector_info)
4359 {
4360 	struct hclge_vport *vport = hclge_get_vport(handle);
4361 	struct hnae3_vector_info *vector = vector_info;
4362 	struct hclge_dev *hdev = vport->back;
4363 	int alloc = 0;
4364 	u16 i = 0;
4365 	u16 j;
4366 
4367 	vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4368 	vector_num = min(hdev->num_msi_left, vector_num);
4369 
4370 	for (j = 0; j < vector_num; j++) {
4371 		while (++i < hdev->num_nic_msi) {
4372 			if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4373 				hclge_get_vector_info(hdev, i, vector);
4374 				vector++;
4375 				alloc++;
4376 
4377 				break;
4378 			}
4379 		}
4380 	}
4381 	hdev->num_msi_left -= alloc;
4382 	hdev->num_msi_used += alloc;
4383 
4384 	return alloc;
4385 }
4386 
4387 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4388 {
4389 	int i;
4390 
4391 	for (i = 0; i < hdev->num_msi; i++)
4392 		if (vector == hdev->vector_irq[i])
4393 			return i;
4394 
4395 	return -EINVAL;
4396 }
4397 
4398 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4399 {
4400 	struct hclge_vport *vport = hclge_get_vport(handle);
4401 	struct hclge_dev *hdev = vport->back;
4402 	int vector_id;
4403 
4404 	vector_id = hclge_get_vector_index(hdev, vector);
4405 	if (vector_id < 0) {
4406 		dev_err(&hdev->pdev->dev,
4407 			"Get vector index fail. vector = %d\n", vector);
4408 		return vector_id;
4409 	}
4410 
4411 	hclge_free_vector(hdev, vector_id);
4412 
4413 	return 0;
4414 }
4415 
4416 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4417 {
4418 	return HCLGE_RSS_KEY_SIZE;
4419 }
4420 
4421 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4422 				  const u8 hfunc, const u8 *key)
4423 {
4424 	struct hclge_rss_config_cmd *req;
4425 	unsigned int key_offset = 0;
4426 	struct hclge_desc desc;
4427 	int key_counts;
4428 	int key_size;
4429 	int ret;
4430 
4431 	key_counts = HCLGE_RSS_KEY_SIZE;
4432 	req = (struct hclge_rss_config_cmd *)desc.data;
4433 
4434 	while (key_counts) {
4435 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4436 					   false);
4437 
4438 		req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4439 		req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4440 
4441 		key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4442 		memcpy(req->hash_key,
4443 		       key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4444 
4445 		key_counts -= key_size;
4446 		key_offset++;
4447 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4448 		if (ret) {
4449 			dev_err(&hdev->pdev->dev,
4450 				"Configure RSS config fail, status = %d\n",
4451 				ret);
4452 			return ret;
4453 		}
4454 	}
4455 	return 0;
4456 }
4457 
4458 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
4459 {
4460 	struct hclge_rss_indirection_table_cmd *req;
4461 	struct hclge_desc desc;
4462 	int rss_cfg_tbl_num;
4463 	u8 rss_msb_oft;
4464 	u8 rss_msb_val;
4465 	int ret;
4466 	u16 qid;
4467 	int i;
4468 	u32 j;
4469 
4470 	req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4471 	rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
4472 			  HCLGE_RSS_CFG_TBL_SIZE;
4473 
4474 	for (i = 0; i < rss_cfg_tbl_num; i++) {
4475 		hclge_cmd_setup_basic_desc
4476 			(&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4477 
4478 		req->start_table_index =
4479 			cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4480 		req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4481 		for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) {
4482 			qid = indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4483 			req->rss_qid_l[j] = qid & 0xff;
4484 			rss_msb_oft =
4485 				j * HCLGE_RSS_CFG_TBL_BW_H / BITS_PER_BYTE;
4486 			rss_msb_val = (qid >> HCLGE_RSS_CFG_TBL_BW_L & 0x1) <<
4487 				(j * HCLGE_RSS_CFG_TBL_BW_H % BITS_PER_BYTE);
4488 			req->rss_qid_h[rss_msb_oft] |= rss_msb_val;
4489 		}
4490 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4491 		if (ret) {
4492 			dev_err(&hdev->pdev->dev,
4493 				"Configure rss indir table fail,status = %d\n",
4494 				ret);
4495 			return ret;
4496 		}
4497 	}
4498 	return 0;
4499 }
4500 
4501 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4502 				 u16 *tc_size, u16 *tc_offset)
4503 {
4504 	struct hclge_rss_tc_mode_cmd *req;
4505 	struct hclge_desc desc;
4506 	int ret;
4507 	int i;
4508 
4509 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4510 	req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4511 
4512 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4513 		u16 mode = 0;
4514 
4515 		hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4516 		hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4517 				HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4518 		hnae3_set_bit(mode, HCLGE_RSS_TC_SIZE_MSB_B,
4519 			      tc_size[i] >> HCLGE_RSS_TC_SIZE_MSB_OFFSET & 0x1);
4520 		hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4521 				HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4522 
4523 		req->rss_tc_mode[i] = cpu_to_le16(mode);
4524 	}
4525 
4526 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4527 	if (ret)
4528 		dev_err(&hdev->pdev->dev,
4529 			"Configure rss tc mode fail, status = %d\n", ret);
4530 
4531 	return ret;
4532 }
4533 
4534 static void hclge_get_rss_type(struct hclge_vport *vport)
4535 {
4536 	if (vport->rss_tuple_sets.ipv4_tcp_en ||
4537 	    vport->rss_tuple_sets.ipv4_udp_en ||
4538 	    vport->rss_tuple_sets.ipv4_sctp_en ||
4539 	    vport->rss_tuple_sets.ipv6_tcp_en ||
4540 	    vport->rss_tuple_sets.ipv6_udp_en ||
4541 	    vport->rss_tuple_sets.ipv6_sctp_en)
4542 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4543 	else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4544 		 vport->rss_tuple_sets.ipv6_fragment_en)
4545 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4546 	else
4547 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4548 }
4549 
4550 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4551 {
4552 	struct hclge_rss_input_tuple_cmd *req;
4553 	struct hclge_desc desc;
4554 	int ret;
4555 
4556 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4557 
4558 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4559 
4560 	/* Get the tuple cfg from pf */
4561 	req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4562 	req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4563 	req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4564 	req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4565 	req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4566 	req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4567 	req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4568 	req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4569 	hclge_get_rss_type(&hdev->vport[0]);
4570 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4571 	if (ret)
4572 		dev_err(&hdev->pdev->dev,
4573 			"Configure rss input fail, status = %d\n", ret);
4574 	return ret;
4575 }
4576 
4577 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4578 			 u8 *key, u8 *hfunc)
4579 {
4580 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4581 	struct hclge_vport *vport = hclge_get_vport(handle);
4582 	int i;
4583 
4584 	/* Get hash algorithm */
4585 	if (hfunc) {
4586 		switch (vport->rss_algo) {
4587 		case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4588 			*hfunc = ETH_RSS_HASH_TOP;
4589 			break;
4590 		case HCLGE_RSS_HASH_ALGO_SIMPLE:
4591 			*hfunc = ETH_RSS_HASH_XOR;
4592 			break;
4593 		default:
4594 			*hfunc = ETH_RSS_HASH_UNKNOWN;
4595 			break;
4596 		}
4597 	}
4598 
4599 	/* Get the RSS Key required by the user */
4600 	if (key)
4601 		memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4602 
4603 	/* Get indirect table */
4604 	if (indir)
4605 		for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4606 			indir[i] =  vport->rss_indirection_tbl[i];
4607 
4608 	return 0;
4609 }
4610 
4611 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4612 			 const  u8 *key, const  u8 hfunc)
4613 {
4614 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4615 	struct hclge_vport *vport = hclge_get_vport(handle);
4616 	struct hclge_dev *hdev = vport->back;
4617 	u8 hash_algo;
4618 	int ret, i;
4619 
4620 	/* Set the RSS Hash Key if specififed by the user */
4621 	if (key) {
4622 		switch (hfunc) {
4623 		case ETH_RSS_HASH_TOP:
4624 			hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4625 			break;
4626 		case ETH_RSS_HASH_XOR:
4627 			hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4628 			break;
4629 		case ETH_RSS_HASH_NO_CHANGE:
4630 			hash_algo = vport->rss_algo;
4631 			break;
4632 		default:
4633 			return -EINVAL;
4634 		}
4635 
4636 		ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4637 		if (ret)
4638 			return ret;
4639 
4640 		/* Update the shadow RSS key with user specified qids */
4641 		memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4642 		vport->rss_algo = hash_algo;
4643 	}
4644 
4645 	/* Update the shadow RSS table with user specified qids */
4646 	for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4647 		vport->rss_indirection_tbl[i] = indir[i];
4648 
4649 	/* Update the hardware */
4650 	return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4651 }
4652 
4653 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4654 {
4655 	u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4656 
4657 	if (nfc->data & RXH_L4_B_2_3)
4658 		hash_sets |= HCLGE_D_PORT_BIT;
4659 	else
4660 		hash_sets &= ~HCLGE_D_PORT_BIT;
4661 
4662 	if (nfc->data & RXH_IP_SRC)
4663 		hash_sets |= HCLGE_S_IP_BIT;
4664 	else
4665 		hash_sets &= ~HCLGE_S_IP_BIT;
4666 
4667 	if (nfc->data & RXH_IP_DST)
4668 		hash_sets |= HCLGE_D_IP_BIT;
4669 	else
4670 		hash_sets &= ~HCLGE_D_IP_BIT;
4671 
4672 	if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4673 		hash_sets |= HCLGE_V_TAG_BIT;
4674 
4675 	return hash_sets;
4676 }
4677 
4678 static int hclge_init_rss_tuple_cmd(struct hclge_vport *vport,
4679 				    struct ethtool_rxnfc *nfc,
4680 				    struct hclge_rss_input_tuple_cmd *req)
4681 {
4682 	struct hclge_dev *hdev = vport->back;
4683 	u8 tuple_sets;
4684 
4685 	req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4686 	req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4687 	req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4688 	req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4689 	req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4690 	req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4691 	req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4692 	req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4693 
4694 	tuple_sets = hclge_get_rss_hash_bits(nfc);
4695 	switch (nfc->flow_type) {
4696 	case TCP_V4_FLOW:
4697 		req->ipv4_tcp_en = tuple_sets;
4698 		break;
4699 	case TCP_V6_FLOW:
4700 		req->ipv6_tcp_en = tuple_sets;
4701 		break;
4702 	case UDP_V4_FLOW:
4703 		req->ipv4_udp_en = tuple_sets;
4704 		break;
4705 	case UDP_V6_FLOW:
4706 		req->ipv6_udp_en = tuple_sets;
4707 		break;
4708 	case SCTP_V4_FLOW:
4709 		req->ipv4_sctp_en = tuple_sets;
4710 		break;
4711 	case SCTP_V6_FLOW:
4712 		if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
4713 		    (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
4714 			return -EINVAL;
4715 
4716 		req->ipv6_sctp_en = tuple_sets;
4717 		break;
4718 	case IPV4_FLOW:
4719 		req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4720 		break;
4721 	case IPV6_FLOW:
4722 		req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4723 		break;
4724 	default:
4725 		return -EINVAL;
4726 	}
4727 
4728 	return 0;
4729 }
4730 
4731 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4732 			       struct ethtool_rxnfc *nfc)
4733 {
4734 	struct hclge_vport *vport = hclge_get_vport(handle);
4735 	struct hclge_dev *hdev = vport->back;
4736 	struct hclge_rss_input_tuple_cmd *req;
4737 	struct hclge_desc desc;
4738 	int ret;
4739 
4740 	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4741 			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
4742 		return -EINVAL;
4743 
4744 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4745 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4746 
4747 	ret = hclge_init_rss_tuple_cmd(vport, nfc, req);
4748 	if (ret) {
4749 		dev_err(&hdev->pdev->dev,
4750 			"failed to init rss tuple cmd, ret = %d\n", ret);
4751 		return ret;
4752 	}
4753 
4754 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4755 	if (ret) {
4756 		dev_err(&hdev->pdev->dev,
4757 			"Set rss tuple fail, status = %d\n", ret);
4758 		return ret;
4759 	}
4760 
4761 	vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4762 	vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4763 	vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4764 	vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4765 	vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4766 	vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4767 	vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4768 	vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4769 	hclge_get_rss_type(vport);
4770 	return 0;
4771 }
4772 
4773 static int hclge_get_vport_rss_tuple(struct hclge_vport *vport, int flow_type,
4774 				     u8 *tuple_sets)
4775 {
4776 	switch (flow_type) {
4777 	case TCP_V4_FLOW:
4778 		*tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4779 		break;
4780 	case UDP_V4_FLOW:
4781 		*tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4782 		break;
4783 	case TCP_V6_FLOW:
4784 		*tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4785 		break;
4786 	case UDP_V6_FLOW:
4787 		*tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4788 		break;
4789 	case SCTP_V4_FLOW:
4790 		*tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4791 		break;
4792 	case SCTP_V6_FLOW:
4793 		*tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4794 		break;
4795 	case IPV4_FLOW:
4796 	case IPV6_FLOW:
4797 		*tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4798 		break;
4799 	default:
4800 		return -EINVAL;
4801 	}
4802 
4803 	return 0;
4804 }
4805 
4806 static u64 hclge_convert_rss_tuple(u8 tuple_sets)
4807 {
4808 	u64 tuple_data = 0;
4809 
4810 	if (tuple_sets & HCLGE_D_PORT_BIT)
4811 		tuple_data |= RXH_L4_B_2_3;
4812 	if (tuple_sets & HCLGE_S_PORT_BIT)
4813 		tuple_data |= RXH_L4_B_0_1;
4814 	if (tuple_sets & HCLGE_D_IP_BIT)
4815 		tuple_data |= RXH_IP_DST;
4816 	if (tuple_sets & HCLGE_S_IP_BIT)
4817 		tuple_data |= RXH_IP_SRC;
4818 
4819 	return tuple_data;
4820 }
4821 
4822 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4823 			       struct ethtool_rxnfc *nfc)
4824 {
4825 	struct hclge_vport *vport = hclge_get_vport(handle);
4826 	u8 tuple_sets;
4827 	int ret;
4828 
4829 	nfc->data = 0;
4830 
4831 	ret = hclge_get_vport_rss_tuple(vport, nfc->flow_type, &tuple_sets);
4832 	if (ret || !tuple_sets)
4833 		return ret;
4834 
4835 	nfc->data = hclge_convert_rss_tuple(tuple_sets);
4836 
4837 	return 0;
4838 }
4839 
4840 static int hclge_get_tc_size(struct hnae3_handle *handle)
4841 {
4842 	struct hclge_vport *vport = hclge_get_vport(handle);
4843 	struct hclge_dev *hdev = vport->back;
4844 
4845 	return hdev->pf_rss_size_max;
4846 }
4847 
4848 static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
4849 {
4850 	struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
4851 	struct hclge_vport *vport = hdev->vport;
4852 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4853 	u16 tc_valid[HCLGE_MAX_TC_NUM] = {0};
4854 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4855 	struct hnae3_tc_info *tc_info;
4856 	u16 roundup_size;
4857 	u16 rss_size;
4858 	int i;
4859 
4860 	tc_info = &vport->nic.kinfo.tc_info;
4861 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4862 		rss_size = tc_info->tqp_count[i];
4863 		tc_valid[i] = 0;
4864 
4865 		if (!(hdev->hw_tc_map & BIT(i)))
4866 			continue;
4867 
4868 		/* tc_size set to hardware is the log2 of roundup power of two
4869 		 * of rss_size, the acutal queue size is limited by indirection
4870 		 * table.
4871 		 */
4872 		if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size ||
4873 		    rss_size == 0) {
4874 			dev_err(&hdev->pdev->dev,
4875 				"Configure rss tc size failed, invalid TC_SIZE = %u\n",
4876 				rss_size);
4877 			return -EINVAL;
4878 		}
4879 
4880 		roundup_size = roundup_pow_of_two(rss_size);
4881 		roundup_size = ilog2(roundup_size);
4882 
4883 		tc_valid[i] = 1;
4884 		tc_size[i] = roundup_size;
4885 		tc_offset[i] = tc_info->tqp_offset[i];
4886 	}
4887 
4888 	return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4889 }
4890 
4891 int hclge_rss_init_hw(struct hclge_dev *hdev)
4892 {
4893 	struct hclge_vport *vport = hdev->vport;
4894 	u16 *rss_indir = vport[0].rss_indirection_tbl;
4895 	u8 *key = vport[0].rss_hash_key;
4896 	u8 hfunc = vport[0].rss_algo;
4897 	int ret;
4898 
4899 	ret = hclge_set_rss_indir_table(hdev, rss_indir);
4900 	if (ret)
4901 		return ret;
4902 
4903 	ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4904 	if (ret)
4905 		return ret;
4906 
4907 	ret = hclge_set_rss_input_tuple(hdev);
4908 	if (ret)
4909 		return ret;
4910 
4911 	return hclge_init_rss_tc_mode(hdev);
4912 }
4913 
4914 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4915 {
4916 	struct hclge_vport *vport = hdev->vport;
4917 	int i, j;
4918 
4919 	for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4920 		for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
4921 			vport[j].rss_indirection_tbl[i] =
4922 				i % vport[j].alloc_rss_size;
4923 	}
4924 }
4925 
4926 static int hclge_rss_init_cfg(struct hclge_dev *hdev)
4927 {
4928 	u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
4929 	int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4930 	struct hclge_vport *vport = hdev->vport;
4931 
4932 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
4933 		rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4934 
4935 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4936 		u16 *rss_ind_tbl;
4937 
4938 		vport[i].rss_tuple_sets.ipv4_tcp_en =
4939 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4940 		vport[i].rss_tuple_sets.ipv4_udp_en =
4941 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4942 		vport[i].rss_tuple_sets.ipv4_sctp_en =
4943 			HCLGE_RSS_INPUT_TUPLE_SCTP;
4944 		vport[i].rss_tuple_sets.ipv4_fragment_en =
4945 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4946 		vport[i].rss_tuple_sets.ipv6_tcp_en =
4947 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4948 		vport[i].rss_tuple_sets.ipv6_udp_en =
4949 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4950 		vport[i].rss_tuple_sets.ipv6_sctp_en =
4951 			hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
4952 			HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
4953 			HCLGE_RSS_INPUT_TUPLE_SCTP;
4954 		vport[i].rss_tuple_sets.ipv6_fragment_en =
4955 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4956 
4957 		vport[i].rss_algo = rss_algo;
4958 
4959 		rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
4960 					   sizeof(*rss_ind_tbl), GFP_KERNEL);
4961 		if (!rss_ind_tbl)
4962 			return -ENOMEM;
4963 
4964 		vport[i].rss_indirection_tbl = rss_ind_tbl;
4965 		memcpy(vport[i].rss_hash_key, hclge_hash_key,
4966 		       HCLGE_RSS_KEY_SIZE);
4967 	}
4968 
4969 	hclge_rss_indir_init_cfg(hdev);
4970 
4971 	return 0;
4972 }
4973 
4974 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4975 				int vector_id, bool en,
4976 				struct hnae3_ring_chain_node *ring_chain)
4977 {
4978 	struct hclge_dev *hdev = vport->back;
4979 	struct hnae3_ring_chain_node *node;
4980 	struct hclge_desc desc;
4981 	struct hclge_ctrl_vector_chain_cmd *req =
4982 		(struct hclge_ctrl_vector_chain_cmd *)desc.data;
4983 	enum hclge_cmd_status status;
4984 	enum hclge_opcode_type op;
4985 	u16 tqp_type_and_id;
4986 	int i;
4987 
4988 	op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4989 	hclge_cmd_setup_basic_desc(&desc, op, false);
4990 	req->int_vector_id_l = hnae3_get_field(vector_id,
4991 					       HCLGE_VECTOR_ID_L_M,
4992 					       HCLGE_VECTOR_ID_L_S);
4993 	req->int_vector_id_h = hnae3_get_field(vector_id,
4994 					       HCLGE_VECTOR_ID_H_M,
4995 					       HCLGE_VECTOR_ID_H_S);
4996 
4997 	i = 0;
4998 	for (node = ring_chain; node; node = node->next) {
4999 		tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
5000 		hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
5001 				HCLGE_INT_TYPE_S,
5002 				hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
5003 		hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
5004 				HCLGE_TQP_ID_S, node->tqp_index);
5005 		hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
5006 				HCLGE_INT_GL_IDX_S,
5007 				hnae3_get_field(node->int_gl_idx,
5008 						HNAE3_RING_GL_IDX_M,
5009 						HNAE3_RING_GL_IDX_S));
5010 		req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
5011 		if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
5012 			req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
5013 			req->vfid = vport->vport_id;
5014 
5015 			status = hclge_cmd_send(&hdev->hw, &desc, 1);
5016 			if (status) {
5017 				dev_err(&hdev->pdev->dev,
5018 					"Map TQP fail, status is %d.\n",
5019 					status);
5020 				return -EIO;
5021 			}
5022 			i = 0;
5023 
5024 			hclge_cmd_setup_basic_desc(&desc,
5025 						   op,
5026 						   false);
5027 			req->int_vector_id_l =
5028 				hnae3_get_field(vector_id,
5029 						HCLGE_VECTOR_ID_L_M,
5030 						HCLGE_VECTOR_ID_L_S);
5031 			req->int_vector_id_h =
5032 				hnae3_get_field(vector_id,
5033 						HCLGE_VECTOR_ID_H_M,
5034 						HCLGE_VECTOR_ID_H_S);
5035 		}
5036 	}
5037 
5038 	if (i > 0) {
5039 		req->int_cause_num = i;
5040 		req->vfid = vport->vport_id;
5041 		status = hclge_cmd_send(&hdev->hw, &desc, 1);
5042 		if (status) {
5043 			dev_err(&hdev->pdev->dev,
5044 				"Map TQP fail, status is %d.\n", status);
5045 			return -EIO;
5046 		}
5047 	}
5048 
5049 	return 0;
5050 }
5051 
5052 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
5053 				    struct hnae3_ring_chain_node *ring_chain)
5054 {
5055 	struct hclge_vport *vport = hclge_get_vport(handle);
5056 	struct hclge_dev *hdev = vport->back;
5057 	int vector_id;
5058 
5059 	vector_id = hclge_get_vector_index(hdev, vector);
5060 	if (vector_id < 0) {
5061 		dev_err(&hdev->pdev->dev,
5062 			"failed to get vector index. vector=%d\n", vector);
5063 		return vector_id;
5064 	}
5065 
5066 	return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
5067 }
5068 
5069 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
5070 				       struct hnae3_ring_chain_node *ring_chain)
5071 {
5072 	struct hclge_vport *vport = hclge_get_vport(handle);
5073 	struct hclge_dev *hdev = vport->back;
5074 	int vector_id, ret;
5075 
5076 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
5077 		return 0;
5078 
5079 	vector_id = hclge_get_vector_index(hdev, vector);
5080 	if (vector_id < 0) {
5081 		dev_err(&handle->pdev->dev,
5082 			"Get vector index fail. ret =%d\n", vector_id);
5083 		return vector_id;
5084 	}
5085 
5086 	ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
5087 	if (ret)
5088 		dev_err(&handle->pdev->dev,
5089 			"Unmap ring from vector fail. vectorid=%d, ret =%d\n",
5090 			vector_id, ret);
5091 
5092 	return ret;
5093 }
5094 
5095 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id,
5096 				      bool en_uc, bool en_mc, bool en_bc)
5097 {
5098 	struct hclge_vport *vport = &hdev->vport[vf_id];
5099 	struct hnae3_handle *handle = &vport->nic;
5100 	struct hclge_promisc_cfg_cmd *req;
5101 	struct hclge_desc desc;
5102 	bool uc_tx_en = en_uc;
5103 	u8 promisc_cfg = 0;
5104 	int ret;
5105 
5106 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
5107 
5108 	req = (struct hclge_promisc_cfg_cmd *)desc.data;
5109 	req->vf_id = vf_id;
5110 
5111 	if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags))
5112 		uc_tx_en = false;
5113 
5114 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0);
5115 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0);
5116 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0);
5117 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0);
5118 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0);
5119 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0);
5120 	req->extend_promisc = promisc_cfg;
5121 
5122 	/* to be compatible with DEVICE_VERSION_V1/2 */
5123 	promisc_cfg = 0;
5124 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0);
5125 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0);
5126 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0);
5127 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1);
5128 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1);
5129 	req->promisc = promisc_cfg;
5130 
5131 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5132 	if (ret)
5133 		dev_err(&hdev->pdev->dev,
5134 			"failed to set vport %u promisc mode, ret = %d.\n",
5135 			vf_id, ret);
5136 
5137 	return ret;
5138 }
5139 
5140 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
5141 				 bool en_mc_pmc, bool en_bc_pmc)
5142 {
5143 	return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id,
5144 					  en_uc_pmc, en_mc_pmc, en_bc_pmc);
5145 }
5146 
5147 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
5148 				  bool en_mc_pmc)
5149 {
5150 	struct hclge_vport *vport = hclge_get_vport(handle);
5151 	struct hclge_dev *hdev = vport->back;
5152 	bool en_bc_pmc = true;
5153 
5154 	/* For device whose version below V2, if broadcast promisc enabled,
5155 	 * vlan filter is always bypassed. So broadcast promisc should be
5156 	 * disabled until user enable promisc mode
5157 	 */
5158 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
5159 		en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
5160 
5161 	return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
5162 					    en_bc_pmc);
5163 }
5164 
5165 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
5166 {
5167 	struct hclge_vport *vport = hclge_get_vport(handle);
5168 	struct hclge_dev *hdev = vport->back;
5169 
5170 	set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
5171 }
5172 
5173 static void hclge_sync_fd_state(struct hclge_dev *hdev)
5174 {
5175 	if (hlist_empty(&hdev->fd_rule_list))
5176 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5177 }
5178 
5179 static void hclge_fd_inc_rule_cnt(struct hclge_dev *hdev, u16 location)
5180 {
5181 	if (!test_bit(location, hdev->fd_bmap)) {
5182 		set_bit(location, hdev->fd_bmap);
5183 		hdev->hclge_fd_rule_num++;
5184 	}
5185 }
5186 
5187 static void hclge_fd_dec_rule_cnt(struct hclge_dev *hdev, u16 location)
5188 {
5189 	if (test_bit(location, hdev->fd_bmap)) {
5190 		clear_bit(location, hdev->fd_bmap);
5191 		hdev->hclge_fd_rule_num--;
5192 	}
5193 }
5194 
5195 static void hclge_fd_free_node(struct hclge_dev *hdev,
5196 			       struct hclge_fd_rule *rule)
5197 {
5198 	hlist_del(&rule->rule_node);
5199 	kfree(rule);
5200 	hclge_sync_fd_state(hdev);
5201 }
5202 
5203 static void hclge_update_fd_rule_node(struct hclge_dev *hdev,
5204 				      struct hclge_fd_rule *old_rule,
5205 				      struct hclge_fd_rule *new_rule,
5206 				      enum HCLGE_FD_NODE_STATE state)
5207 {
5208 	switch (state) {
5209 	case HCLGE_FD_TO_ADD:
5210 	case HCLGE_FD_ACTIVE:
5211 		/* 1) if the new state is TO_ADD, just replace the old rule
5212 		 * with the same location, no matter its state, because the
5213 		 * new rule will be configured to the hardware.
5214 		 * 2) if the new state is ACTIVE, it means the new rule
5215 		 * has been configured to the hardware, so just replace
5216 		 * the old rule node with the same location.
5217 		 * 3) for it doesn't add a new node to the list, so it's
5218 		 * unnecessary to update the rule number and fd_bmap.
5219 		 */
5220 		new_rule->rule_node.next = old_rule->rule_node.next;
5221 		new_rule->rule_node.pprev = old_rule->rule_node.pprev;
5222 		memcpy(old_rule, new_rule, sizeof(*old_rule));
5223 		kfree(new_rule);
5224 		break;
5225 	case HCLGE_FD_DELETED:
5226 		hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5227 		hclge_fd_free_node(hdev, old_rule);
5228 		break;
5229 	case HCLGE_FD_TO_DEL:
5230 		/* if new request is TO_DEL, and old rule is existent
5231 		 * 1) the state of old rule is TO_DEL, we need do nothing,
5232 		 * because we delete rule by location, other rule content
5233 		 * is unncessary.
5234 		 * 2) the state of old rule is ACTIVE, we need to change its
5235 		 * state to TO_DEL, so the rule will be deleted when periodic
5236 		 * task being scheduled.
5237 		 * 3) the state of old rule is TO_ADD, it means the rule hasn't
5238 		 * been added to hardware, so we just delete the rule node from
5239 		 * fd_rule_list directly.
5240 		 */
5241 		if (old_rule->state == HCLGE_FD_TO_ADD) {
5242 			hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5243 			hclge_fd_free_node(hdev, old_rule);
5244 			return;
5245 		}
5246 		old_rule->state = HCLGE_FD_TO_DEL;
5247 		break;
5248 	}
5249 }
5250 
5251 static struct hclge_fd_rule *hclge_find_fd_rule(struct hlist_head *hlist,
5252 						u16 location,
5253 						struct hclge_fd_rule **parent)
5254 {
5255 	struct hclge_fd_rule *rule;
5256 	struct hlist_node *node;
5257 
5258 	hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
5259 		if (rule->location == location)
5260 			return rule;
5261 		else if (rule->location > location)
5262 			return NULL;
5263 		/* record the parent node, use to keep the nodes in fd_rule_list
5264 		 * in ascend order.
5265 		 */
5266 		*parent = rule;
5267 	}
5268 
5269 	return NULL;
5270 }
5271 
5272 /* insert fd rule node in ascend order according to rule->location */
5273 static void hclge_fd_insert_rule_node(struct hlist_head *hlist,
5274 				      struct hclge_fd_rule *rule,
5275 				      struct hclge_fd_rule *parent)
5276 {
5277 	INIT_HLIST_NODE(&rule->rule_node);
5278 
5279 	if (parent)
5280 		hlist_add_behind(&rule->rule_node, &parent->rule_node);
5281 	else
5282 		hlist_add_head(&rule->rule_node, hlist);
5283 }
5284 
5285 static int hclge_fd_set_user_def_cmd(struct hclge_dev *hdev,
5286 				     struct hclge_fd_user_def_cfg *cfg)
5287 {
5288 	struct hclge_fd_user_def_cfg_cmd *req;
5289 	struct hclge_desc desc;
5290 	u16 data = 0;
5291 	int ret;
5292 
5293 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_USER_DEF_OP, false);
5294 
5295 	req = (struct hclge_fd_user_def_cfg_cmd *)desc.data;
5296 
5297 	hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[0].ref_cnt > 0);
5298 	hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5299 			HCLGE_FD_USER_DEF_OFT_S, cfg[0].offset);
5300 	req->ol2_cfg = cpu_to_le16(data);
5301 
5302 	data = 0;
5303 	hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[1].ref_cnt > 0);
5304 	hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5305 			HCLGE_FD_USER_DEF_OFT_S, cfg[1].offset);
5306 	req->ol3_cfg = cpu_to_le16(data);
5307 
5308 	data = 0;
5309 	hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[2].ref_cnt > 0);
5310 	hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5311 			HCLGE_FD_USER_DEF_OFT_S, cfg[2].offset);
5312 	req->ol4_cfg = cpu_to_le16(data);
5313 
5314 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5315 	if (ret)
5316 		dev_err(&hdev->pdev->dev,
5317 			"failed to set fd user def data, ret= %d\n", ret);
5318 	return ret;
5319 }
5320 
5321 static void hclge_sync_fd_user_def_cfg(struct hclge_dev *hdev, bool locked)
5322 {
5323 	int ret;
5324 
5325 	if (!test_and_clear_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state))
5326 		return;
5327 
5328 	if (!locked)
5329 		spin_lock_bh(&hdev->fd_rule_lock);
5330 
5331 	ret = hclge_fd_set_user_def_cmd(hdev, hdev->fd_cfg.user_def_cfg);
5332 	if (ret)
5333 		set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5334 
5335 	if (!locked)
5336 		spin_unlock_bh(&hdev->fd_rule_lock);
5337 }
5338 
5339 static int hclge_fd_check_user_def_refcnt(struct hclge_dev *hdev,
5340 					  struct hclge_fd_rule *rule)
5341 {
5342 	struct hlist_head *hlist = &hdev->fd_rule_list;
5343 	struct hclge_fd_rule *fd_rule, *parent = NULL;
5344 	struct hclge_fd_user_def_info *info, *old_info;
5345 	struct hclge_fd_user_def_cfg *cfg;
5346 
5347 	if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5348 	    rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5349 		return 0;
5350 
5351 	/* for valid layer is start from 1, so need minus 1 to get the cfg */
5352 	cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5353 	info = &rule->ep.user_def;
5354 
5355 	if (!cfg->ref_cnt || cfg->offset == info->offset)
5356 		return 0;
5357 
5358 	if (cfg->ref_cnt > 1)
5359 		goto error;
5360 
5361 	fd_rule = hclge_find_fd_rule(hlist, rule->location, &parent);
5362 	if (fd_rule) {
5363 		old_info = &fd_rule->ep.user_def;
5364 		if (info->layer == old_info->layer)
5365 			return 0;
5366 	}
5367 
5368 error:
5369 	dev_err(&hdev->pdev->dev,
5370 		"No available offset for layer%d fd rule, each layer only support one user def offset.\n",
5371 		info->layer + 1);
5372 	return -ENOSPC;
5373 }
5374 
5375 static void hclge_fd_inc_user_def_refcnt(struct hclge_dev *hdev,
5376 					 struct hclge_fd_rule *rule)
5377 {
5378 	struct hclge_fd_user_def_cfg *cfg;
5379 
5380 	if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5381 	    rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5382 		return;
5383 
5384 	cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5385 	if (!cfg->ref_cnt) {
5386 		cfg->offset = rule->ep.user_def.offset;
5387 		set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5388 	}
5389 	cfg->ref_cnt++;
5390 }
5391 
5392 static void hclge_fd_dec_user_def_refcnt(struct hclge_dev *hdev,
5393 					 struct hclge_fd_rule *rule)
5394 {
5395 	struct hclge_fd_user_def_cfg *cfg;
5396 
5397 	if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5398 	    rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5399 		return;
5400 
5401 	cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5402 	if (!cfg->ref_cnt)
5403 		return;
5404 
5405 	cfg->ref_cnt--;
5406 	if (!cfg->ref_cnt) {
5407 		cfg->offset = 0;
5408 		set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5409 	}
5410 }
5411 
5412 static void hclge_update_fd_list(struct hclge_dev *hdev,
5413 				 enum HCLGE_FD_NODE_STATE state, u16 location,
5414 				 struct hclge_fd_rule *new_rule)
5415 {
5416 	struct hlist_head *hlist = &hdev->fd_rule_list;
5417 	struct hclge_fd_rule *fd_rule, *parent = NULL;
5418 
5419 	fd_rule = hclge_find_fd_rule(hlist, location, &parent);
5420 	if (fd_rule) {
5421 		hclge_fd_dec_user_def_refcnt(hdev, fd_rule);
5422 		if (state == HCLGE_FD_ACTIVE)
5423 			hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5424 		hclge_sync_fd_user_def_cfg(hdev, true);
5425 
5426 		hclge_update_fd_rule_node(hdev, fd_rule, new_rule, state);
5427 		return;
5428 	}
5429 
5430 	/* it's unlikely to fail here, because we have checked the rule
5431 	 * exist before.
5432 	 */
5433 	if (unlikely(state == HCLGE_FD_TO_DEL || state == HCLGE_FD_DELETED)) {
5434 		dev_warn(&hdev->pdev->dev,
5435 			 "failed to delete fd rule %u, it's inexistent\n",
5436 			 location);
5437 		return;
5438 	}
5439 
5440 	hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5441 	hclge_sync_fd_user_def_cfg(hdev, true);
5442 
5443 	hclge_fd_insert_rule_node(hlist, new_rule, parent);
5444 	hclge_fd_inc_rule_cnt(hdev, new_rule->location);
5445 
5446 	if (state == HCLGE_FD_TO_ADD) {
5447 		set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
5448 		hclge_task_schedule(hdev, 0);
5449 	}
5450 }
5451 
5452 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
5453 {
5454 	struct hclge_get_fd_mode_cmd *req;
5455 	struct hclge_desc desc;
5456 	int ret;
5457 
5458 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
5459 
5460 	req = (struct hclge_get_fd_mode_cmd *)desc.data;
5461 
5462 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5463 	if (ret) {
5464 		dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
5465 		return ret;
5466 	}
5467 
5468 	*fd_mode = req->mode;
5469 
5470 	return ret;
5471 }
5472 
5473 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
5474 				   u32 *stage1_entry_num,
5475 				   u32 *stage2_entry_num,
5476 				   u16 *stage1_counter_num,
5477 				   u16 *stage2_counter_num)
5478 {
5479 	struct hclge_get_fd_allocation_cmd *req;
5480 	struct hclge_desc desc;
5481 	int ret;
5482 
5483 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
5484 
5485 	req = (struct hclge_get_fd_allocation_cmd *)desc.data;
5486 
5487 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5488 	if (ret) {
5489 		dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
5490 			ret);
5491 		return ret;
5492 	}
5493 
5494 	*stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
5495 	*stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
5496 	*stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
5497 	*stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
5498 
5499 	return ret;
5500 }
5501 
5502 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
5503 				   enum HCLGE_FD_STAGE stage_num)
5504 {
5505 	struct hclge_set_fd_key_config_cmd *req;
5506 	struct hclge_fd_key_cfg *stage;
5507 	struct hclge_desc desc;
5508 	int ret;
5509 
5510 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
5511 
5512 	req = (struct hclge_set_fd_key_config_cmd *)desc.data;
5513 	stage = &hdev->fd_cfg.key_cfg[stage_num];
5514 	req->stage = stage_num;
5515 	req->key_select = stage->key_sel;
5516 	req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
5517 	req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
5518 	req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
5519 	req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
5520 	req->tuple_mask = cpu_to_le32(~stage->tuple_active);
5521 	req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
5522 
5523 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5524 	if (ret)
5525 		dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
5526 
5527 	return ret;
5528 }
5529 
5530 static void hclge_fd_disable_user_def(struct hclge_dev *hdev)
5531 {
5532 	struct hclge_fd_user_def_cfg *cfg = hdev->fd_cfg.user_def_cfg;
5533 
5534 	spin_lock_bh(&hdev->fd_rule_lock);
5535 	memset(cfg, 0, sizeof(hdev->fd_cfg.user_def_cfg));
5536 	spin_unlock_bh(&hdev->fd_rule_lock);
5537 
5538 	hclge_fd_set_user_def_cmd(hdev, cfg);
5539 }
5540 
5541 static int hclge_init_fd_config(struct hclge_dev *hdev)
5542 {
5543 #define LOW_2_WORDS		0x03
5544 	struct hclge_fd_key_cfg *key_cfg;
5545 	int ret;
5546 
5547 	if (!hnae3_dev_fd_supported(hdev))
5548 		return 0;
5549 
5550 	ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
5551 	if (ret)
5552 		return ret;
5553 
5554 	switch (hdev->fd_cfg.fd_mode) {
5555 	case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
5556 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
5557 		break;
5558 	case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
5559 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
5560 		break;
5561 	default:
5562 		dev_err(&hdev->pdev->dev,
5563 			"Unsupported flow director mode %u\n",
5564 			hdev->fd_cfg.fd_mode);
5565 		return -EOPNOTSUPP;
5566 	}
5567 
5568 	key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
5569 	key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE;
5570 	key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
5571 	key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
5572 	key_cfg->outer_sipv6_word_en = 0;
5573 	key_cfg->outer_dipv6_word_en = 0;
5574 
5575 	key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
5576 				BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
5577 				BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5578 				BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5579 
5580 	/* If use max 400bit key, we can support tuples for ether type */
5581 	if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5582 		key_cfg->tuple_active |=
5583 				BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
5584 		if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
5585 			key_cfg->tuple_active |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
5586 	}
5587 
5588 	/* roce_type is used to filter roce frames
5589 	 * dst_vport is used to specify the rule
5590 	 */
5591 	key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5592 
5593 	ret = hclge_get_fd_allocation(hdev,
5594 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5595 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5596 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5597 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5598 	if (ret)
5599 		return ret;
5600 
5601 	return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5602 }
5603 
5604 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5605 				int loc, u8 *key, bool is_add)
5606 {
5607 	struct hclge_fd_tcam_config_1_cmd *req1;
5608 	struct hclge_fd_tcam_config_2_cmd *req2;
5609 	struct hclge_fd_tcam_config_3_cmd *req3;
5610 	struct hclge_desc desc[3];
5611 	int ret;
5612 
5613 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5614 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5615 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5616 	desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5617 	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5618 
5619 	req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5620 	req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5621 	req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5622 
5623 	req1->stage = stage;
5624 	req1->xy_sel = sel_x ? 1 : 0;
5625 	hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5626 	req1->index = cpu_to_le32(loc);
5627 	req1->entry_vld = sel_x ? is_add : 0;
5628 
5629 	if (key) {
5630 		memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5631 		memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5632 		       sizeof(req2->tcam_data));
5633 		memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5634 		       sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5635 	}
5636 
5637 	ret = hclge_cmd_send(&hdev->hw, desc, 3);
5638 	if (ret)
5639 		dev_err(&hdev->pdev->dev,
5640 			"config tcam key fail, ret=%d\n",
5641 			ret);
5642 
5643 	return ret;
5644 }
5645 
5646 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5647 			      struct hclge_fd_ad_data *action)
5648 {
5649 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
5650 	struct hclge_fd_ad_config_cmd *req;
5651 	struct hclge_desc desc;
5652 	u64 ad_data = 0;
5653 	int ret;
5654 
5655 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5656 
5657 	req = (struct hclge_fd_ad_config_cmd *)desc.data;
5658 	req->index = cpu_to_le32(loc);
5659 	req->stage = stage;
5660 
5661 	hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5662 		      action->write_rule_id_to_bd);
5663 	hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5664 			action->rule_id);
5665 	if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) {
5666 		hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B,
5667 			      action->override_tc);
5668 		hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M,
5669 				HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size);
5670 	}
5671 	ad_data <<= 32;
5672 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5673 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5674 		      action->forward_to_direct_queue);
5675 	hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5676 			action->queue_id);
5677 	hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5678 	hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5679 			HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5680 	hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5681 	hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5682 			action->counter_id);
5683 
5684 	req->ad_data = cpu_to_le64(ad_data);
5685 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5686 	if (ret)
5687 		dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5688 
5689 	return ret;
5690 }
5691 
5692 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5693 				   struct hclge_fd_rule *rule)
5694 {
5695 	int offset, moffset, ip_offset;
5696 	enum HCLGE_FD_KEY_OPT key_opt;
5697 	u16 tmp_x_s, tmp_y_s;
5698 	u32 tmp_x_l, tmp_y_l;
5699 	u8 *p = (u8 *)rule;
5700 	int i;
5701 
5702 	if (rule->unused_tuple & BIT(tuple_bit))
5703 		return true;
5704 
5705 	key_opt = tuple_key_info[tuple_bit].key_opt;
5706 	offset = tuple_key_info[tuple_bit].offset;
5707 	moffset = tuple_key_info[tuple_bit].moffset;
5708 
5709 	switch (key_opt) {
5710 	case KEY_OPT_U8:
5711 		calc_x(*key_x, p[offset], p[moffset]);
5712 		calc_y(*key_y, p[offset], p[moffset]);
5713 
5714 		return true;
5715 	case KEY_OPT_LE16:
5716 		calc_x(tmp_x_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5717 		calc_y(tmp_y_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5718 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5719 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5720 
5721 		return true;
5722 	case KEY_OPT_LE32:
5723 		calc_x(tmp_x_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5724 		calc_y(tmp_y_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5725 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5726 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5727 
5728 		return true;
5729 	case KEY_OPT_MAC:
5730 		for (i = 0; i < ETH_ALEN; i++) {
5731 			calc_x(key_x[ETH_ALEN - 1 - i], p[offset + i],
5732 			       p[moffset + i]);
5733 			calc_y(key_y[ETH_ALEN - 1 - i], p[offset + i],
5734 			       p[moffset + i]);
5735 		}
5736 
5737 		return true;
5738 	case KEY_OPT_IP:
5739 		ip_offset = IPV4_INDEX * sizeof(u32);
5740 		calc_x(tmp_x_l, *(u32 *)(&p[offset + ip_offset]),
5741 		       *(u32 *)(&p[moffset + ip_offset]));
5742 		calc_y(tmp_y_l, *(u32 *)(&p[offset + ip_offset]),
5743 		       *(u32 *)(&p[moffset + ip_offset]));
5744 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5745 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5746 
5747 		return true;
5748 	default:
5749 		return false;
5750 	}
5751 }
5752 
5753 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5754 				 u8 vf_id, u8 network_port_id)
5755 {
5756 	u32 port_number = 0;
5757 
5758 	if (port_type == HOST_PORT) {
5759 		hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5760 				pf_id);
5761 		hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5762 				vf_id);
5763 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5764 	} else {
5765 		hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5766 				HCLGE_NETWORK_PORT_ID_S, network_port_id);
5767 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5768 	}
5769 
5770 	return port_number;
5771 }
5772 
5773 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5774 				       __le32 *key_x, __le32 *key_y,
5775 				       struct hclge_fd_rule *rule)
5776 {
5777 	u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5778 	u8 cur_pos = 0, tuple_size, shift_bits;
5779 	unsigned int i;
5780 
5781 	for (i = 0; i < MAX_META_DATA; i++) {
5782 		tuple_size = meta_data_key_info[i].key_length;
5783 		tuple_bit = key_cfg->meta_data_active & BIT(i);
5784 
5785 		switch (tuple_bit) {
5786 		case BIT(ROCE_TYPE):
5787 			hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5788 			cur_pos += tuple_size;
5789 			break;
5790 		case BIT(DST_VPORT):
5791 			port_number = hclge_get_port_number(HOST_PORT, 0,
5792 							    rule->vf_id, 0);
5793 			hnae3_set_field(meta_data,
5794 					GENMASK(cur_pos + tuple_size, cur_pos),
5795 					cur_pos, port_number);
5796 			cur_pos += tuple_size;
5797 			break;
5798 		default:
5799 			break;
5800 		}
5801 	}
5802 
5803 	calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5804 	calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5805 	shift_bits = sizeof(meta_data) * 8 - cur_pos;
5806 
5807 	*key_x = cpu_to_le32(tmp_x << shift_bits);
5808 	*key_y = cpu_to_le32(tmp_y << shift_bits);
5809 }
5810 
5811 /* A complete key is combined with meta data key and tuple key.
5812  * Meta data key is stored at the MSB region, and tuple key is stored at
5813  * the LSB region, unused bits will be filled 0.
5814  */
5815 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5816 			    struct hclge_fd_rule *rule)
5817 {
5818 	struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5819 	u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5820 	u8 *cur_key_x, *cur_key_y;
5821 	u8 meta_data_region;
5822 	u8 tuple_size;
5823 	int ret;
5824 	u32 i;
5825 
5826 	memset(key_x, 0, sizeof(key_x));
5827 	memset(key_y, 0, sizeof(key_y));
5828 	cur_key_x = key_x;
5829 	cur_key_y = key_y;
5830 
5831 	for (i = 0 ; i < MAX_TUPLE; i++) {
5832 		bool tuple_valid;
5833 
5834 		tuple_size = tuple_key_info[i].key_length / 8;
5835 		if (!(key_cfg->tuple_active & BIT(i)))
5836 			continue;
5837 
5838 		tuple_valid = hclge_fd_convert_tuple(i, cur_key_x,
5839 						     cur_key_y, rule);
5840 		if (tuple_valid) {
5841 			cur_key_x += tuple_size;
5842 			cur_key_y += tuple_size;
5843 		}
5844 	}
5845 
5846 	meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5847 			MAX_META_DATA_LENGTH / 8;
5848 
5849 	hclge_fd_convert_meta_data(key_cfg,
5850 				   (__le32 *)(key_x + meta_data_region),
5851 				   (__le32 *)(key_y + meta_data_region),
5852 				   rule);
5853 
5854 	ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5855 				   true);
5856 	if (ret) {
5857 		dev_err(&hdev->pdev->dev,
5858 			"fd key_y config fail, loc=%u, ret=%d\n",
5859 			rule->queue_id, ret);
5860 		return ret;
5861 	}
5862 
5863 	ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5864 				   true);
5865 	if (ret)
5866 		dev_err(&hdev->pdev->dev,
5867 			"fd key_x config fail, loc=%u, ret=%d\n",
5868 			rule->queue_id, ret);
5869 	return ret;
5870 }
5871 
5872 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5873 			       struct hclge_fd_rule *rule)
5874 {
5875 	struct hclge_vport *vport = hdev->vport;
5876 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
5877 	struct hclge_fd_ad_data ad_data;
5878 
5879 	memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data));
5880 	ad_data.ad_id = rule->location;
5881 
5882 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5883 		ad_data.drop_packet = true;
5884 	} else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) {
5885 		ad_data.override_tc = true;
5886 		ad_data.queue_id =
5887 			kinfo->tc_info.tqp_offset[rule->cls_flower.tc];
5888 		ad_data.tc_size =
5889 			ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]);
5890 	} else {
5891 		ad_data.forward_to_direct_queue = true;
5892 		ad_data.queue_id = rule->queue_id;
5893 	}
5894 
5895 	ad_data.use_counter = false;
5896 	ad_data.counter_id = 0;
5897 
5898 	ad_data.use_next_stage = false;
5899 	ad_data.next_input_key = 0;
5900 
5901 	ad_data.write_rule_id_to_bd = true;
5902 	ad_data.rule_id = rule->location;
5903 
5904 	return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5905 }
5906 
5907 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
5908 				       u32 *unused_tuple)
5909 {
5910 	if (!spec || !unused_tuple)
5911 		return -EINVAL;
5912 
5913 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5914 
5915 	if (!spec->ip4src)
5916 		*unused_tuple |= BIT(INNER_SRC_IP);
5917 
5918 	if (!spec->ip4dst)
5919 		*unused_tuple |= BIT(INNER_DST_IP);
5920 
5921 	if (!spec->psrc)
5922 		*unused_tuple |= BIT(INNER_SRC_PORT);
5923 
5924 	if (!spec->pdst)
5925 		*unused_tuple |= BIT(INNER_DST_PORT);
5926 
5927 	if (!spec->tos)
5928 		*unused_tuple |= BIT(INNER_IP_TOS);
5929 
5930 	return 0;
5931 }
5932 
5933 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
5934 				    u32 *unused_tuple)
5935 {
5936 	if (!spec || !unused_tuple)
5937 		return -EINVAL;
5938 
5939 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5940 		BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5941 
5942 	if (!spec->ip4src)
5943 		*unused_tuple |= BIT(INNER_SRC_IP);
5944 
5945 	if (!spec->ip4dst)
5946 		*unused_tuple |= BIT(INNER_DST_IP);
5947 
5948 	if (!spec->tos)
5949 		*unused_tuple |= BIT(INNER_IP_TOS);
5950 
5951 	if (!spec->proto)
5952 		*unused_tuple |= BIT(INNER_IP_PROTO);
5953 
5954 	if (spec->l4_4_bytes)
5955 		return -EOPNOTSUPP;
5956 
5957 	if (spec->ip_ver != ETH_RX_NFC_IP4)
5958 		return -EOPNOTSUPP;
5959 
5960 	return 0;
5961 }
5962 
5963 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
5964 				       u32 *unused_tuple)
5965 {
5966 	if (!spec || !unused_tuple)
5967 		return -EINVAL;
5968 
5969 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5970 
5971 	/* check whether src/dst ip address used */
5972 	if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
5973 		*unused_tuple |= BIT(INNER_SRC_IP);
5974 
5975 	if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
5976 		*unused_tuple |= BIT(INNER_DST_IP);
5977 
5978 	if (!spec->psrc)
5979 		*unused_tuple |= BIT(INNER_SRC_PORT);
5980 
5981 	if (!spec->pdst)
5982 		*unused_tuple |= BIT(INNER_DST_PORT);
5983 
5984 	if (!spec->tclass)
5985 		*unused_tuple |= BIT(INNER_IP_TOS);
5986 
5987 	return 0;
5988 }
5989 
5990 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
5991 				    u32 *unused_tuple)
5992 {
5993 	if (!spec || !unused_tuple)
5994 		return -EINVAL;
5995 
5996 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5997 			BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5998 
5999 	/* check whether src/dst ip address used */
6000 	if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
6001 		*unused_tuple |= BIT(INNER_SRC_IP);
6002 
6003 	if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
6004 		*unused_tuple |= BIT(INNER_DST_IP);
6005 
6006 	if (!spec->l4_proto)
6007 		*unused_tuple |= BIT(INNER_IP_PROTO);
6008 
6009 	if (!spec->tclass)
6010 		*unused_tuple |= BIT(INNER_IP_TOS);
6011 
6012 	if (spec->l4_4_bytes)
6013 		return -EOPNOTSUPP;
6014 
6015 	return 0;
6016 }
6017 
6018 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
6019 {
6020 	if (!spec || !unused_tuple)
6021 		return -EINVAL;
6022 
6023 	*unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
6024 		BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
6025 		BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
6026 
6027 	if (is_zero_ether_addr(spec->h_source))
6028 		*unused_tuple |= BIT(INNER_SRC_MAC);
6029 
6030 	if (is_zero_ether_addr(spec->h_dest))
6031 		*unused_tuple |= BIT(INNER_DST_MAC);
6032 
6033 	if (!spec->h_proto)
6034 		*unused_tuple |= BIT(INNER_ETH_TYPE);
6035 
6036 	return 0;
6037 }
6038 
6039 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
6040 				    struct ethtool_rx_flow_spec *fs,
6041 				    u32 *unused_tuple)
6042 {
6043 	if (fs->flow_type & FLOW_EXT) {
6044 		if (fs->h_ext.vlan_etype) {
6045 			dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
6046 			return -EOPNOTSUPP;
6047 		}
6048 
6049 		if (!fs->h_ext.vlan_tci)
6050 			*unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6051 
6052 		if (fs->m_ext.vlan_tci &&
6053 		    be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
6054 			dev_err(&hdev->pdev->dev,
6055 				"failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n",
6056 				ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
6057 			return -EINVAL;
6058 		}
6059 	} else {
6060 		*unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6061 	}
6062 
6063 	if (fs->flow_type & FLOW_MAC_EXT) {
6064 		if (hdev->fd_cfg.fd_mode !=
6065 		    HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6066 			dev_err(&hdev->pdev->dev,
6067 				"FLOW_MAC_EXT is not supported in current fd mode!\n");
6068 			return -EOPNOTSUPP;
6069 		}
6070 
6071 		if (is_zero_ether_addr(fs->h_ext.h_dest))
6072 			*unused_tuple |= BIT(INNER_DST_MAC);
6073 		else
6074 			*unused_tuple &= ~BIT(INNER_DST_MAC);
6075 	}
6076 
6077 	return 0;
6078 }
6079 
6080 static int hclge_fd_get_user_def_layer(u32 flow_type, u32 *unused_tuple,
6081 				       struct hclge_fd_user_def_info *info)
6082 {
6083 	switch (flow_type) {
6084 	case ETHER_FLOW:
6085 		info->layer = HCLGE_FD_USER_DEF_L2;
6086 		*unused_tuple &= ~BIT(INNER_L2_RSV);
6087 		break;
6088 	case IP_USER_FLOW:
6089 	case IPV6_USER_FLOW:
6090 		info->layer = HCLGE_FD_USER_DEF_L3;
6091 		*unused_tuple &= ~BIT(INNER_L3_RSV);
6092 		break;
6093 	case TCP_V4_FLOW:
6094 	case UDP_V4_FLOW:
6095 	case TCP_V6_FLOW:
6096 	case UDP_V6_FLOW:
6097 		info->layer = HCLGE_FD_USER_DEF_L4;
6098 		*unused_tuple &= ~BIT(INNER_L4_RSV);
6099 		break;
6100 	default:
6101 		return -EOPNOTSUPP;
6102 	}
6103 
6104 	return 0;
6105 }
6106 
6107 static bool hclge_fd_is_user_def_all_masked(struct ethtool_rx_flow_spec *fs)
6108 {
6109 	return be32_to_cpu(fs->m_ext.data[1] | fs->m_ext.data[0]) == 0;
6110 }
6111 
6112 static int hclge_fd_parse_user_def_field(struct hclge_dev *hdev,
6113 					 struct ethtool_rx_flow_spec *fs,
6114 					 u32 *unused_tuple,
6115 					 struct hclge_fd_user_def_info *info)
6116 {
6117 	u32 tuple_active = hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1].tuple_active;
6118 	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6119 	u16 data, offset, data_mask, offset_mask;
6120 	int ret;
6121 
6122 	info->layer = HCLGE_FD_USER_DEF_NONE;
6123 	*unused_tuple |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
6124 
6125 	if (!(fs->flow_type & FLOW_EXT) || hclge_fd_is_user_def_all_masked(fs))
6126 		return 0;
6127 
6128 	/* user-def data from ethtool is 64 bit value, the bit0~15 is used
6129 	 * for data, and bit32~47 is used for offset.
6130 	 */
6131 	data = be32_to_cpu(fs->h_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6132 	data_mask = be32_to_cpu(fs->m_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6133 	offset = be32_to_cpu(fs->h_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6134 	offset_mask = be32_to_cpu(fs->m_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6135 
6136 	if (!(tuple_active & HCLGE_FD_TUPLE_USER_DEF_TUPLES)) {
6137 		dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
6138 		return -EOPNOTSUPP;
6139 	}
6140 
6141 	if (offset > HCLGE_FD_MAX_USER_DEF_OFFSET) {
6142 		dev_err(&hdev->pdev->dev,
6143 			"user-def offset[%u] should be no more than %u\n",
6144 			offset, HCLGE_FD_MAX_USER_DEF_OFFSET);
6145 		return -EINVAL;
6146 	}
6147 
6148 	if (offset_mask != HCLGE_FD_USER_DEF_OFFSET_UNMASK) {
6149 		dev_err(&hdev->pdev->dev, "user-def offset can't be masked\n");
6150 		return -EINVAL;
6151 	}
6152 
6153 	ret = hclge_fd_get_user_def_layer(flow_type, unused_tuple, info);
6154 	if (ret) {
6155 		dev_err(&hdev->pdev->dev,
6156 			"unsupported flow type for user-def bytes, ret = %d\n",
6157 			ret);
6158 		return ret;
6159 	}
6160 
6161 	info->data = data;
6162 	info->data_mask = data_mask;
6163 	info->offset = offset;
6164 
6165 	return 0;
6166 }
6167 
6168 static int hclge_fd_check_spec(struct hclge_dev *hdev,
6169 			       struct ethtool_rx_flow_spec *fs,
6170 			       u32 *unused_tuple,
6171 			       struct hclge_fd_user_def_info *info)
6172 {
6173 	u32 flow_type;
6174 	int ret;
6175 
6176 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6177 		dev_err(&hdev->pdev->dev,
6178 			"failed to config fd rules, invalid rule location: %u, max is %u\n.",
6179 			fs->location,
6180 			hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
6181 		return -EINVAL;
6182 	}
6183 
6184 	ret = hclge_fd_parse_user_def_field(hdev, fs, unused_tuple, info);
6185 	if (ret)
6186 		return ret;
6187 
6188 	flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6189 	switch (flow_type) {
6190 	case SCTP_V4_FLOW:
6191 	case TCP_V4_FLOW:
6192 	case UDP_V4_FLOW:
6193 		ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
6194 						  unused_tuple);
6195 		break;
6196 	case IP_USER_FLOW:
6197 		ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
6198 					       unused_tuple);
6199 		break;
6200 	case SCTP_V6_FLOW:
6201 	case TCP_V6_FLOW:
6202 	case UDP_V6_FLOW:
6203 		ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
6204 						  unused_tuple);
6205 		break;
6206 	case IPV6_USER_FLOW:
6207 		ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
6208 					       unused_tuple);
6209 		break;
6210 	case ETHER_FLOW:
6211 		if (hdev->fd_cfg.fd_mode !=
6212 			HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6213 			dev_err(&hdev->pdev->dev,
6214 				"ETHER_FLOW is not supported in current fd mode!\n");
6215 			return -EOPNOTSUPP;
6216 		}
6217 
6218 		ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
6219 						 unused_tuple);
6220 		break;
6221 	default:
6222 		dev_err(&hdev->pdev->dev,
6223 			"unsupported protocol type, protocol type = %#x\n",
6224 			flow_type);
6225 		return -EOPNOTSUPP;
6226 	}
6227 
6228 	if (ret) {
6229 		dev_err(&hdev->pdev->dev,
6230 			"failed to check flow union tuple, ret = %d\n",
6231 			ret);
6232 		return ret;
6233 	}
6234 
6235 	return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
6236 }
6237 
6238 static void hclge_fd_get_tcpip4_tuple(struct hclge_dev *hdev,
6239 				      struct ethtool_rx_flow_spec *fs,
6240 				      struct hclge_fd_rule *rule, u8 ip_proto)
6241 {
6242 	rule->tuples.src_ip[IPV4_INDEX] =
6243 			be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
6244 	rule->tuples_mask.src_ip[IPV4_INDEX] =
6245 			be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
6246 
6247 	rule->tuples.dst_ip[IPV4_INDEX] =
6248 			be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
6249 	rule->tuples_mask.dst_ip[IPV4_INDEX] =
6250 			be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
6251 
6252 	rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
6253 	rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
6254 
6255 	rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
6256 	rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
6257 
6258 	rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
6259 	rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
6260 
6261 	rule->tuples.ether_proto = ETH_P_IP;
6262 	rule->tuples_mask.ether_proto = 0xFFFF;
6263 
6264 	rule->tuples.ip_proto = ip_proto;
6265 	rule->tuples_mask.ip_proto = 0xFF;
6266 }
6267 
6268 static void hclge_fd_get_ip4_tuple(struct hclge_dev *hdev,
6269 				   struct ethtool_rx_flow_spec *fs,
6270 				   struct hclge_fd_rule *rule)
6271 {
6272 	rule->tuples.src_ip[IPV4_INDEX] =
6273 			be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
6274 	rule->tuples_mask.src_ip[IPV4_INDEX] =
6275 			be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
6276 
6277 	rule->tuples.dst_ip[IPV4_INDEX] =
6278 			be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
6279 	rule->tuples_mask.dst_ip[IPV4_INDEX] =
6280 			be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
6281 
6282 	rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
6283 	rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
6284 
6285 	rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
6286 	rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
6287 
6288 	rule->tuples.ether_proto = ETH_P_IP;
6289 	rule->tuples_mask.ether_proto = 0xFFFF;
6290 }
6291 
6292 static void hclge_fd_get_tcpip6_tuple(struct hclge_dev *hdev,
6293 				      struct ethtool_rx_flow_spec *fs,
6294 				      struct hclge_fd_rule *rule, u8 ip_proto)
6295 {
6296 	be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.tcp_ip6_spec.ip6src,
6297 			  IPV6_SIZE);
6298 	be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.tcp_ip6_spec.ip6src,
6299 			  IPV6_SIZE);
6300 
6301 	be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.tcp_ip6_spec.ip6dst,
6302 			  IPV6_SIZE);
6303 	be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.tcp_ip6_spec.ip6dst,
6304 			  IPV6_SIZE);
6305 
6306 	rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
6307 	rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
6308 
6309 	rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
6310 	rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
6311 
6312 	rule->tuples.ether_proto = ETH_P_IPV6;
6313 	rule->tuples_mask.ether_proto = 0xFFFF;
6314 
6315 	rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6316 	rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6317 
6318 	rule->tuples.ip_proto = ip_proto;
6319 	rule->tuples_mask.ip_proto = 0xFF;
6320 }
6321 
6322 static void hclge_fd_get_ip6_tuple(struct hclge_dev *hdev,
6323 				   struct ethtool_rx_flow_spec *fs,
6324 				   struct hclge_fd_rule *rule)
6325 {
6326 	be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.usr_ip6_spec.ip6src,
6327 			  IPV6_SIZE);
6328 	be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.usr_ip6_spec.ip6src,
6329 			  IPV6_SIZE);
6330 
6331 	be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.usr_ip6_spec.ip6dst,
6332 			  IPV6_SIZE);
6333 	be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.usr_ip6_spec.ip6dst,
6334 			  IPV6_SIZE);
6335 
6336 	rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
6337 	rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
6338 
6339 	rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6340 	rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6341 
6342 	rule->tuples.ether_proto = ETH_P_IPV6;
6343 	rule->tuples_mask.ether_proto = 0xFFFF;
6344 }
6345 
6346 static void hclge_fd_get_ether_tuple(struct hclge_dev *hdev,
6347 				     struct ethtool_rx_flow_spec *fs,
6348 				     struct hclge_fd_rule *rule)
6349 {
6350 	ether_addr_copy(rule->tuples.src_mac, fs->h_u.ether_spec.h_source);
6351 	ether_addr_copy(rule->tuples_mask.src_mac, fs->m_u.ether_spec.h_source);
6352 
6353 	ether_addr_copy(rule->tuples.dst_mac, fs->h_u.ether_spec.h_dest);
6354 	ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_u.ether_spec.h_dest);
6355 
6356 	rule->tuples.ether_proto = be16_to_cpu(fs->h_u.ether_spec.h_proto);
6357 	rule->tuples_mask.ether_proto = be16_to_cpu(fs->m_u.ether_spec.h_proto);
6358 }
6359 
6360 static void hclge_fd_get_user_def_tuple(struct hclge_fd_user_def_info *info,
6361 					struct hclge_fd_rule *rule)
6362 {
6363 	switch (info->layer) {
6364 	case HCLGE_FD_USER_DEF_L2:
6365 		rule->tuples.l2_user_def = info->data;
6366 		rule->tuples_mask.l2_user_def = info->data_mask;
6367 		break;
6368 	case HCLGE_FD_USER_DEF_L3:
6369 		rule->tuples.l3_user_def = info->data;
6370 		rule->tuples_mask.l3_user_def = info->data_mask;
6371 		break;
6372 	case HCLGE_FD_USER_DEF_L4:
6373 		rule->tuples.l4_user_def = (u32)info->data << 16;
6374 		rule->tuples_mask.l4_user_def = (u32)info->data_mask << 16;
6375 		break;
6376 	default:
6377 		break;
6378 	}
6379 
6380 	rule->ep.user_def = *info;
6381 }
6382 
6383 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
6384 			      struct ethtool_rx_flow_spec *fs,
6385 			      struct hclge_fd_rule *rule,
6386 			      struct hclge_fd_user_def_info *info)
6387 {
6388 	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6389 
6390 	switch (flow_type) {
6391 	case SCTP_V4_FLOW:
6392 		hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_SCTP);
6393 		break;
6394 	case TCP_V4_FLOW:
6395 		hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_TCP);
6396 		break;
6397 	case UDP_V4_FLOW:
6398 		hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_UDP);
6399 		break;
6400 	case IP_USER_FLOW:
6401 		hclge_fd_get_ip4_tuple(hdev, fs, rule);
6402 		break;
6403 	case SCTP_V6_FLOW:
6404 		hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_SCTP);
6405 		break;
6406 	case TCP_V6_FLOW:
6407 		hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_TCP);
6408 		break;
6409 	case UDP_V6_FLOW:
6410 		hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_UDP);
6411 		break;
6412 	case IPV6_USER_FLOW:
6413 		hclge_fd_get_ip6_tuple(hdev, fs, rule);
6414 		break;
6415 	case ETHER_FLOW:
6416 		hclge_fd_get_ether_tuple(hdev, fs, rule);
6417 		break;
6418 	default:
6419 		return -EOPNOTSUPP;
6420 	}
6421 
6422 	if (fs->flow_type & FLOW_EXT) {
6423 		rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
6424 		rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
6425 		hclge_fd_get_user_def_tuple(info, rule);
6426 	}
6427 
6428 	if (fs->flow_type & FLOW_MAC_EXT) {
6429 		ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
6430 		ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
6431 	}
6432 
6433 	return 0;
6434 }
6435 
6436 static int hclge_fd_config_rule(struct hclge_dev *hdev,
6437 				struct hclge_fd_rule *rule)
6438 {
6439 	int ret;
6440 
6441 	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6442 	if (ret)
6443 		return ret;
6444 
6445 	return hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6446 }
6447 
6448 static int hclge_add_fd_entry_common(struct hclge_dev *hdev,
6449 				     struct hclge_fd_rule *rule)
6450 {
6451 	int ret;
6452 
6453 	spin_lock_bh(&hdev->fd_rule_lock);
6454 
6455 	if (hdev->fd_active_type != rule->rule_type &&
6456 	    (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6457 	     hdev->fd_active_type == HCLGE_FD_EP_ACTIVE)) {
6458 		dev_err(&hdev->pdev->dev,
6459 			"mode conflict(new type %d, active type %d), please delete existent rules first\n",
6460 			rule->rule_type, hdev->fd_active_type);
6461 		spin_unlock_bh(&hdev->fd_rule_lock);
6462 		return -EINVAL;
6463 	}
6464 
6465 	ret = hclge_fd_check_user_def_refcnt(hdev, rule);
6466 	if (ret)
6467 		goto out;
6468 
6469 	ret = hclge_clear_arfs_rules(hdev);
6470 	if (ret)
6471 		goto out;
6472 
6473 	ret = hclge_fd_config_rule(hdev, rule);
6474 	if (ret)
6475 		goto out;
6476 
6477 	hclge_update_fd_list(hdev, HCLGE_FD_ACTIVE, rule->location, rule);
6478 	hdev->fd_active_type = rule->rule_type;
6479 
6480 out:
6481 	spin_unlock_bh(&hdev->fd_rule_lock);
6482 	return ret;
6483 }
6484 
6485 static bool hclge_is_cls_flower_active(struct hnae3_handle *handle)
6486 {
6487 	struct hclge_vport *vport = hclge_get_vport(handle);
6488 	struct hclge_dev *hdev = vport->back;
6489 
6490 	return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE;
6491 }
6492 
6493 static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
6494 				      u16 *vport_id, u8 *action, u16 *queue_id)
6495 {
6496 	struct hclge_vport *vport = hdev->vport;
6497 
6498 	if (ring_cookie == RX_CLS_FLOW_DISC) {
6499 		*action = HCLGE_FD_ACTION_DROP_PACKET;
6500 	} else {
6501 		u32 ring = ethtool_get_flow_spec_ring(ring_cookie);
6502 		u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
6503 		u16 tqps;
6504 
6505 		if (vf > hdev->num_req_vfs) {
6506 			dev_err(&hdev->pdev->dev,
6507 				"Error: vf id (%u) > max vf num (%u)\n",
6508 				vf, hdev->num_req_vfs);
6509 			return -EINVAL;
6510 		}
6511 
6512 		*vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
6513 		tqps = hdev->vport[vf].nic.kinfo.num_tqps;
6514 
6515 		if (ring >= tqps) {
6516 			dev_err(&hdev->pdev->dev,
6517 				"Error: queue id (%u) > max tqp num (%u)\n",
6518 				ring, tqps - 1);
6519 			return -EINVAL;
6520 		}
6521 
6522 		*action = HCLGE_FD_ACTION_SELECT_QUEUE;
6523 		*queue_id = ring;
6524 	}
6525 
6526 	return 0;
6527 }
6528 
6529 static int hclge_add_fd_entry(struct hnae3_handle *handle,
6530 			      struct ethtool_rxnfc *cmd)
6531 {
6532 	struct hclge_vport *vport = hclge_get_vport(handle);
6533 	struct hclge_dev *hdev = vport->back;
6534 	struct hclge_fd_user_def_info info;
6535 	u16 dst_vport_id = 0, q_index = 0;
6536 	struct ethtool_rx_flow_spec *fs;
6537 	struct hclge_fd_rule *rule;
6538 	u32 unused = 0;
6539 	u8 action;
6540 	int ret;
6541 
6542 	if (!hnae3_dev_fd_supported(hdev)) {
6543 		dev_err(&hdev->pdev->dev,
6544 			"flow table director is not supported\n");
6545 		return -EOPNOTSUPP;
6546 	}
6547 
6548 	if (!hdev->fd_en) {
6549 		dev_err(&hdev->pdev->dev,
6550 			"please enable flow director first\n");
6551 		return -EOPNOTSUPP;
6552 	}
6553 
6554 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6555 
6556 	ret = hclge_fd_check_spec(hdev, fs, &unused, &info);
6557 	if (ret)
6558 		return ret;
6559 
6560 	ret = hclge_fd_parse_ring_cookie(hdev, fs->ring_cookie, &dst_vport_id,
6561 					 &action, &q_index);
6562 	if (ret)
6563 		return ret;
6564 
6565 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6566 	if (!rule)
6567 		return -ENOMEM;
6568 
6569 	ret = hclge_fd_get_tuple(hdev, fs, rule, &info);
6570 	if (ret) {
6571 		kfree(rule);
6572 		return ret;
6573 	}
6574 
6575 	rule->flow_type = fs->flow_type;
6576 	rule->location = fs->location;
6577 	rule->unused_tuple = unused;
6578 	rule->vf_id = dst_vport_id;
6579 	rule->queue_id = q_index;
6580 	rule->action = action;
6581 	rule->rule_type = HCLGE_FD_EP_ACTIVE;
6582 
6583 	ret = hclge_add_fd_entry_common(hdev, rule);
6584 	if (ret)
6585 		kfree(rule);
6586 
6587 	return ret;
6588 }
6589 
6590 static int hclge_del_fd_entry(struct hnae3_handle *handle,
6591 			      struct ethtool_rxnfc *cmd)
6592 {
6593 	struct hclge_vport *vport = hclge_get_vport(handle);
6594 	struct hclge_dev *hdev = vport->back;
6595 	struct ethtool_rx_flow_spec *fs;
6596 	int ret;
6597 
6598 	if (!hnae3_dev_fd_supported(hdev))
6599 		return -EOPNOTSUPP;
6600 
6601 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6602 
6603 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6604 		return -EINVAL;
6605 
6606 	spin_lock_bh(&hdev->fd_rule_lock);
6607 	if (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6608 	    !test_bit(fs->location, hdev->fd_bmap)) {
6609 		dev_err(&hdev->pdev->dev,
6610 			"Delete fail, rule %u is inexistent\n", fs->location);
6611 		spin_unlock_bh(&hdev->fd_rule_lock);
6612 		return -ENOENT;
6613 	}
6614 
6615 	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
6616 				   NULL, false);
6617 	if (ret)
6618 		goto out;
6619 
6620 	hclge_update_fd_list(hdev, HCLGE_FD_DELETED, fs->location, NULL);
6621 
6622 out:
6623 	spin_unlock_bh(&hdev->fd_rule_lock);
6624 	return ret;
6625 }
6626 
6627 static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev,
6628 					 bool clear_list)
6629 {
6630 	struct hclge_fd_rule *rule;
6631 	struct hlist_node *node;
6632 	u16 location;
6633 
6634 	if (!hnae3_dev_fd_supported(hdev))
6635 		return;
6636 
6637 	spin_lock_bh(&hdev->fd_rule_lock);
6638 
6639 	for_each_set_bit(location, hdev->fd_bmap,
6640 			 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6641 		hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6642 				     NULL, false);
6643 
6644 	if (clear_list) {
6645 		hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6646 					  rule_node) {
6647 			hlist_del(&rule->rule_node);
6648 			kfree(rule);
6649 		}
6650 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6651 		hdev->hclge_fd_rule_num = 0;
6652 		bitmap_zero(hdev->fd_bmap,
6653 			    hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6654 	}
6655 
6656 	spin_unlock_bh(&hdev->fd_rule_lock);
6657 }
6658 
6659 static void hclge_del_all_fd_entries(struct hclge_dev *hdev)
6660 {
6661 	hclge_clear_fd_rules_in_list(hdev, true);
6662 	hclge_fd_disable_user_def(hdev);
6663 }
6664 
6665 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6666 {
6667 	struct hclge_vport *vport = hclge_get_vport(handle);
6668 	struct hclge_dev *hdev = vport->back;
6669 	struct hclge_fd_rule *rule;
6670 	struct hlist_node *node;
6671 
6672 	/* Return ok here, because reset error handling will check this
6673 	 * return value. If error is returned here, the reset process will
6674 	 * fail.
6675 	 */
6676 	if (!hnae3_dev_fd_supported(hdev))
6677 		return 0;
6678 
6679 	/* if fd is disabled, should not restore it when reset */
6680 	if (!hdev->fd_en)
6681 		return 0;
6682 
6683 	spin_lock_bh(&hdev->fd_rule_lock);
6684 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6685 		if (rule->state == HCLGE_FD_ACTIVE)
6686 			rule->state = HCLGE_FD_TO_ADD;
6687 	}
6688 	spin_unlock_bh(&hdev->fd_rule_lock);
6689 	set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
6690 
6691 	return 0;
6692 }
6693 
6694 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6695 				 struct ethtool_rxnfc *cmd)
6696 {
6697 	struct hclge_vport *vport = hclge_get_vport(handle);
6698 	struct hclge_dev *hdev = vport->back;
6699 
6700 	if (!hnae3_dev_fd_supported(hdev) || hclge_is_cls_flower_active(handle))
6701 		return -EOPNOTSUPP;
6702 
6703 	cmd->rule_cnt = hdev->hclge_fd_rule_num;
6704 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6705 
6706 	return 0;
6707 }
6708 
6709 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6710 				     struct ethtool_tcpip4_spec *spec,
6711 				     struct ethtool_tcpip4_spec *spec_mask)
6712 {
6713 	spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6714 	spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6715 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6716 
6717 	spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6718 	spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6719 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6720 
6721 	spec->psrc = cpu_to_be16(rule->tuples.src_port);
6722 	spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6723 			0 : cpu_to_be16(rule->tuples_mask.src_port);
6724 
6725 	spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6726 	spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6727 			0 : cpu_to_be16(rule->tuples_mask.dst_port);
6728 
6729 	spec->tos = rule->tuples.ip_tos;
6730 	spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6731 			0 : rule->tuples_mask.ip_tos;
6732 }
6733 
6734 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6735 				  struct ethtool_usrip4_spec *spec,
6736 				  struct ethtool_usrip4_spec *spec_mask)
6737 {
6738 	spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6739 	spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6740 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6741 
6742 	spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6743 	spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6744 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6745 
6746 	spec->tos = rule->tuples.ip_tos;
6747 	spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6748 			0 : rule->tuples_mask.ip_tos;
6749 
6750 	spec->proto = rule->tuples.ip_proto;
6751 	spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6752 			0 : rule->tuples_mask.ip_proto;
6753 
6754 	spec->ip_ver = ETH_RX_NFC_IP4;
6755 }
6756 
6757 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6758 				     struct ethtool_tcpip6_spec *spec,
6759 				     struct ethtool_tcpip6_spec *spec_mask)
6760 {
6761 	cpu_to_be32_array(spec->ip6src,
6762 			  rule->tuples.src_ip, IPV6_SIZE);
6763 	cpu_to_be32_array(spec->ip6dst,
6764 			  rule->tuples.dst_ip, IPV6_SIZE);
6765 	if (rule->unused_tuple & BIT(INNER_SRC_IP))
6766 		memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6767 	else
6768 		cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6769 				  IPV6_SIZE);
6770 
6771 	if (rule->unused_tuple & BIT(INNER_DST_IP))
6772 		memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6773 	else
6774 		cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6775 				  IPV6_SIZE);
6776 
6777 	spec->tclass = rule->tuples.ip_tos;
6778 	spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6779 			0 : rule->tuples_mask.ip_tos;
6780 
6781 	spec->psrc = cpu_to_be16(rule->tuples.src_port);
6782 	spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6783 			0 : cpu_to_be16(rule->tuples_mask.src_port);
6784 
6785 	spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6786 	spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6787 			0 : cpu_to_be16(rule->tuples_mask.dst_port);
6788 }
6789 
6790 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6791 				  struct ethtool_usrip6_spec *spec,
6792 				  struct ethtool_usrip6_spec *spec_mask)
6793 {
6794 	cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6795 	cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6796 	if (rule->unused_tuple & BIT(INNER_SRC_IP))
6797 		memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6798 	else
6799 		cpu_to_be32_array(spec_mask->ip6src,
6800 				  rule->tuples_mask.src_ip, IPV6_SIZE);
6801 
6802 	if (rule->unused_tuple & BIT(INNER_DST_IP))
6803 		memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6804 	else
6805 		cpu_to_be32_array(spec_mask->ip6dst,
6806 				  rule->tuples_mask.dst_ip, IPV6_SIZE);
6807 
6808 	spec->tclass = rule->tuples.ip_tos;
6809 	spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6810 			0 : rule->tuples_mask.ip_tos;
6811 
6812 	spec->l4_proto = rule->tuples.ip_proto;
6813 	spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6814 			0 : rule->tuples_mask.ip_proto;
6815 }
6816 
6817 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6818 				    struct ethhdr *spec,
6819 				    struct ethhdr *spec_mask)
6820 {
6821 	ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6822 	ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6823 
6824 	if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6825 		eth_zero_addr(spec_mask->h_source);
6826 	else
6827 		ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6828 
6829 	if (rule->unused_tuple & BIT(INNER_DST_MAC))
6830 		eth_zero_addr(spec_mask->h_dest);
6831 	else
6832 		ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6833 
6834 	spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6835 	spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6836 			0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6837 }
6838 
6839 static void hclge_fd_get_user_def_info(struct ethtool_rx_flow_spec *fs,
6840 				       struct hclge_fd_rule *rule)
6841 {
6842 	if ((rule->unused_tuple & HCLGE_FD_TUPLE_USER_DEF_TUPLES) ==
6843 	    HCLGE_FD_TUPLE_USER_DEF_TUPLES) {
6844 		fs->h_ext.data[0] = 0;
6845 		fs->h_ext.data[1] = 0;
6846 		fs->m_ext.data[0] = 0;
6847 		fs->m_ext.data[1] = 0;
6848 	} else {
6849 		fs->h_ext.data[0] = cpu_to_be32(rule->ep.user_def.offset);
6850 		fs->h_ext.data[1] = cpu_to_be32(rule->ep.user_def.data);
6851 		fs->m_ext.data[0] =
6852 				cpu_to_be32(HCLGE_FD_USER_DEF_OFFSET_UNMASK);
6853 		fs->m_ext.data[1] = cpu_to_be32(rule->ep.user_def.data_mask);
6854 	}
6855 }
6856 
6857 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6858 				  struct hclge_fd_rule *rule)
6859 {
6860 	if (fs->flow_type & FLOW_EXT) {
6861 		fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6862 		fs->m_ext.vlan_tci =
6863 				rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6864 				0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
6865 
6866 		hclge_fd_get_user_def_info(fs, rule);
6867 	}
6868 
6869 	if (fs->flow_type & FLOW_MAC_EXT) {
6870 		ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6871 		if (rule->unused_tuple & BIT(INNER_DST_MAC))
6872 			eth_zero_addr(fs->m_u.ether_spec.h_dest);
6873 		else
6874 			ether_addr_copy(fs->m_u.ether_spec.h_dest,
6875 					rule->tuples_mask.dst_mac);
6876 	}
6877 }
6878 
6879 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
6880 				  struct ethtool_rxnfc *cmd)
6881 {
6882 	struct hclge_vport *vport = hclge_get_vport(handle);
6883 	struct hclge_fd_rule *rule = NULL;
6884 	struct hclge_dev *hdev = vport->back;
6885 	struct ethtool_rx_flow_spec *fs;
6886 	struct hlist_node *node2;
6887 
6888 	if (!hnae3_dev_fd_supported(hdev))
6889 		return -EOPNOTSUPP;
6890 
6891 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6892 
6893 	spin_lock_bh(&hdev->fd_rule_lock);
6894 
6895 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
6896 		if (rule->location >= fs->location)
6897 			break;
6898 	}
6899 
6900 	if (!rule || fs->location != rule->location) {
6901 		spin_unlock_bh(&hdev->fd_rule_lock);
6902 
6903 		return -ENOENT;
6904 	}
6905 
6906 	fs->flow_type = rule->flow_type;
6907 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
6908 	case SCTP_V4_FLOW:
6909 	case TCP_V4_FLOW:
6910 	case UDP_V4_FLOW:
6911 		hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
6912 					 &fs->m_u.tcp_ip4_spec);
6913 		break;
6914 	case IP_USER_FLOW:
6915 		hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
6916 				      &fs->m_u.usr_ip4_spec);
6917 		break;
6918 	case SCTP_V6_FLOW:
6919 	case TCP_V6_FLOW:
6920 	case UDP_V6_FLOW:
6921 		hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
6922 					 &fs->m_u.tcp_ip6_spec);
6923 		break;
6924 	case IPV6_USER_FLOW:
6925 		hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
6926 				      &fs->m_u.usr_ip6_spec);
6927 		break;
6928 	/* The flow type of fd rule has been checked before adding in to rule
6929 	 * list. As other flow types have been handled, it must be ETHER_FLOW
6930 	 * for the default case
6931 	 */
6932 	default:
6933 		hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
6934 					&fs->m_u.ether_spec);
6935 		break;
6936 	}
6937 
6938 	hclge_fd_get_ext_info(fs, rule);
6939 
6940 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6941 		fs->ring_cookie = RX_CLS_FLOW_DISC;
6942 	} else {
6943 		u64 vf_id;
6944 
6945 		fs->ring_cookie = rule->queue_id;
6946 		vf_id = rule->vf_id;
6947 		vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6948 		fs->ring_cookie |= vf_id;
6949 	}
6950 
6951 	spin_unlock_bh(&hdev->fd_rule_lock);
6952 
6953 	return 0;
6954 }
6955 
6956 static int hclge_get_all_rules(struct hnae3_handle *handle,
6957 			       struct ethtool_rxnfc *cmd, u32 *rule_locs)
6958 {
6959 	struct hclge_vport *vport = hclge_get_vport(handle);
6960 	struct hclge_dev *hdev = vport->back;
6961 	struct hclge_fd_rule *rule;
6962 	struct hlist_node *node2;
6963 	int cnt = 0;
6964 
6965 	if (!hnae3_dev_fd_supported(hdev))
6966 		return -EOPNOTSUPP;
6967 
6968 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6969 
6970 	spin_lock_bh(&hdev->fd_rule_lock);
6971 	hlist_for_each_entry_safe(rule, node2,
6972 				  &hdev->fd_rule_list, rule_node) {
6973 		if (cnt == cmd->rule_cnt) {
6974 			spin_unlock_bh(&hdev->fd_rule_lock);
6975 			return -EMSGSIZE;
6976 		}
6977 
6978 		if (rule->state == HCLGE_FD_TO_DEL)
6979 			continue;
6980 
6981 		rule_locs[cnt] = rule->location;
6982 		cnt++;
6983 	}
6984 
6985 	spin_unlock_bh(&hdev->fd_rule_lock);
6986 
6987 	cmd->rule_cnt = cnt;
6988 
6989 	return 0;
6990 }
6991 
6992 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6993 				     struct hclge_fd_rule_tuples *tuples)
6994 {
6995 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
6996 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
6997 
6998 	tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6999 	tuples->ip_proto = fkeys->basic.ip_proto;
7000 	tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
7001 
7002 	if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
7003 		tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
7004 		tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
7005 	} else {
7006 		int i;
7007 
7008 		for (i = 0; i < IPV6_SIZE; i++) {
7009 			tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
7010 			tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
7011 		}
7012 	}
7013 }
7014 
7015 /* traverse all rules, check whether an existed rule has the same tuples */
7016 static struct hclge_fd_rule *
7017 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
7018 			  const struct hclge_fd_rule_tuples *tuples)
7019 {
7020 	struct hclge_fd_rule *rule = NULL;
7021 	struct hlist_node *node;
7022 
7023 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7024 		if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
7025 			return rule;
7026 	}
7027 
7028 	return NULL;
7029 }
7030 
7031 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
7032 				     struct hclge_fd_rule *rule)
7033 {
7034 	rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
7035 			     BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
7036 			     BIT(INNER_SRC_PORT);
7037 	rule->action = 0;
7038 	rule->vf_id = 0;
7039 	rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
7040 	if (tuples->ether_proto == ETH_P_IP) {
7041 		if (tuples->ip_proto == IPPROTO_TCP)
7042 			rule->flow_type = TCP_V4_FLOW;
7043 		else
7044 			rule->flow_type = UDP_V4_FLOW;
7045 	} else {
7046 		if (tuples->ip_proto == IPPROTO_TCP)
7047 			rule->flow_type = TCP_V6_FLOW;
7048 		else
7049 			rule->flow_type = UDP_V6_FLOW;
7050 	}
7051 	memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
7052 	memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
7053 }
7054 
7055 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
7056 				      u16 flow_id, struct flow_keys *fkeys)
7057 {
7058 	struct hclge_vport *vport = hclge_get_vport(handle);
7059 	struct hclge_fd_rule_tuples new_tuples = {};
7060 	struct hclge_dev *hdev = vport->back;
7061 	struct hclge_fd_rule *rule;
7062 	u16 bit_id;
7063 
7064 	if (!hnae3_dev_fd_supported(hdev))
7065 		return -EOPNOTSUPP;
7066 
7067 	/* when there is already fd rule existed add by user,
7068 	 * arfs should not work
7069 	 */
7070 	spin_lock_bh(&hdev->fd_rule_lock);
7071 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE &&
7072 	    hdev->fd_active_type != HCLGE_FD_RULE_NONE) {
7073 		spin_unlock_bh(&hdev->fd_rule_lock);
7074 		return -EOPNOTSUPP;
7075 	}
7076 
7077 	hclge_fd_get_flow_tuples(fkeys, &new_tuples);
7078 
7079 	/* check is there flow director filter existed for this flow,
7080 	 * if not, create a new filter for it;
7081 	 * if filter exist with different queue id, modify the filter;
7082 	 * if filter exist with same queue id, do nothing
7083 	 */
7084 	rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
7085 	if (!rule) {
7086 		bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
7087 		if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7088 			spin_unlock_bh(&hdev->fd_rule_lock);
7089 			return -ENOSPC;
7090 		}
7091 
7092 		rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
7093 		if (!rule) {
7094 			spin_unlock_bh(&hdev->fd_rule_lock);
7095 			return -ENOMEM;
7096 		}
7097 
7098 		rule->location = bit_id;
7099 		rule->arfs.flow_id = flow_id;
7100 		rule->queue_id = queue_id;
7101 		hclge_fd_build_arfs_rule(&new_tuples, rule);
7102 		hclge_update_fd_list(hdev, HCLGE_FD_TO_ADD, rule->location,
7103 				     rule);
7104 		hdev->fd_active_type = HCLGE_FD_ARFS_ACTIVE;
7105 	} else if (rule->queue_id != queue_id) {
7106 		rule->queue_id = queue_id;
7107 		rule->state = HCLGE_FD_TO_ADD;
7108 		set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7109 		hclge_task_schedule(hdev, 0);
7110 	}
7111 	spin_unlock_bh(&hdev->fd_rule_lock);
7112 	return rule->location;
7113 }
7114 
7115 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
7116 {
7117 #ifdef CONFIG_RFS_ACCEL
7118 	struct hnae3_handle *handle = &hdev->vport[0].nic;
7119 	struct hclge_fd_rule *rule;
7120 	struct hlist_node *node;
7121 
7122 	spin_lock_bh(&hdev->fd_rule_lock);
7123 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
7124 		spin_unlock_bh(&hdev->fd_rule_lock);
7125 		return;
7126 	}
7127 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7128 		if (rule->state != HCLGE_FD_ACTIVE)
7129 			continue;
7130 		if (rps_may_expire_flow(handle->netdev, rule->queue_id,
7131 					rule->arfs.flow_id, rule->location)) {
7132 			rule->state = HCLGE_FD_TO_DEL;
7133 			set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7134 		}
7135 	}
7136 	spin_unlock_bh(&hdev->fd_rule_lock);
7137 #endif
7138 }
7139 
7140 /* make sure being called after lock up with fd_rule_lock */
7141 static int hclge_clear_arfs_rules(struct hclge_dev *hdev)
7142 {
7143 #ifdef CONFIG_RFS_ACCEL
7144 	struct hclge_fd_rule *rule;
7145 	struct hlist_node *node;
7146 	int ret;
7147 
7148 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE)
7149 		return 0;
7150 
7151 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7152 		switch (rule->state) {
7153 		case HCLGE_FD_TO_DEL:
7154 		case HCLGE_FD_ACTIVE:
7155 			ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7156 						   rule->location, NULL, false);
7157 			if (ret)
7158 				return ret;
7159 			fallthrough;
7160 		case HCLGE_FD_TO_ADD:
7161 			hclge_fd_dec_rule_cnt(hdev, rule->location);
7162 			hlist_del(&rule->rule_node);
7163 			kfree(rule);
7164 			break;
7165 		default:
7166 			break;
7167 		}
7168 	}
7169 	hclge_sync_fd_state(hdev);
7170 
7171 	return 0;
7172 #endif
7173 }
7174 
7175 static void hclge_get_cls_key_basic(const struct flow_rule *flow,
7176 				    struct hclge_fd_rule *rule)
7177 {
7178 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) {
7179 		struct flow_match_basic match;
7180 		u16 ethtype_key, ethtype_mask;
7181 
7182 		flow_rule_match_basic(flow, &match);
7183 		ethtype_key = ntohs(match.key->n_proto);
7184 		ethtype_mask = ntohs(match.mask->n_proto);
7185 
7186 		if (ethtype_key == ETH_P_ALL) {
7187 			ethtype_key = 0;
7188 			ethtype_mask = 0;
7189 		}
7190 		rule->tuples.ether_proto = ethtype_key;
7191 		rule->tuples_mask.ether_proto = ethtype_mask;
7192 		rule->tuples.ip_proto = match.key->ip_proto;
7193 		rule->tuples_mask.ip_proto = match.mask->ip_proto;
7194 	} else {
7195 		rule->unused_tuple |= BIT(INNER_IP_PROTO);
7196 		rule->unused_tuple |= BIT(INNER_ETH_TYPE);
7197 	}
7198 }
7199 
7200 static void hclge_get_cls_key_mac(const struct flow_rule *flow,
7201 				  struct hclge_fd_rule *rule)
7202 {
7203 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
7204 		struct flow_match_eth_addrs match;
7205 
7206 		flow_rule_match_eth_addrs(flow, &match);
7207 		ether_addr_copy(rule->tuples.dst_mac, match.key->dst);
7208 		ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst);
7209 		ether_addr_copy(rule->tuples.src_mac, match.key->src);
7210 		ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src);
7211 	} else {
7212 		rule->unused_tuple |= BIT(INNER_DST_MAC);
7213 		rule->unused_tuple |= BIT(INNER_SRC_MAC);
7214 	}
7215 }
7216 
7217 static void hclge_get_cls_key_vlan(const struct flow_rule *flow,
7218 				   struct hclge_fd_rule *rule)
7219 {
7220 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
7221 		struct flow_match_vlan match;
7222 
7223 		flow_rule_match_vlan(flow, &match);
7224 		rule->tuples.vlan_tag1 = match.key->vlan_id |
7225 				(match.key->vlan_priority << VLAN_PRIO_SHIFT);
7226 		rule->tuples_mask.vlan_tag1 = match.mask->vlan_id |
7227 				(match.mask->vlan_priority << VLAN_PRIO_SHIFT);
7228 	} else {
7229 		rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST);
7230 	}
7231 }
7232 
7233 static void hclge_get_cls_key_ip(const struct flow_rule *flow,
7234 				 struct hclge_fd_rule *rule)
7235 {
7236 	u16 addr_type = 0;
7237 
7238 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) {
7239 		struct flow_match_control match;
7240 
7241 		flow_rule_match_control(flow, &match);
7242 		addr_type = match.key->addr_type;
7243 	}
7244 
7245 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
7246 		struct flow_match_ipv4_addrs match;
7247 
7248 		flow_rule_match_ipv4_addrs(flow, &match);
7249 		rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src);
7250 		rule->tuples_mask.src_ip[IPV4_INDEX] =
7251 						be32_to_cpu(match.mask->src);
7252 		rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst);
7253 		rule->tuples_mask.dst_ip[IPV4_INDEX] =
7254 						be32_to_cpu(match.mask->dst);
7255 	} else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
7256 		struct flow_match_ipv6_addrs match;
7257 
7258 		flow_rule_match_ipv6_addrs(flow, &match);
7259 		be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32,
7260 				  IPV6_SIZE);
7261 		be32_to_cpu_array(rule->tuples_mask.src_ip,
7262 				  match.mask->src.s6_addr32, IPV6_SIZE);
7263 		be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32,
7264 				  IPV6_SIZE);
7265 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
7266 				  match.mask->dst.s6_addr32, IPV6_SIZE);
7267 	} else {
7268 		rule->unused_tuple |= BIT(INNER_SRC_IP);
7269 		rule->unused_tuple |= BIT(INNER_DST_IP);
7270 	}
7271 }
7272 
7273 static void hclge_get_cls_key_port(const struct flow_rule *flow,
7274 				   struct hclge_fd_rule *rule)
7275 {
7276 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
7277 		struct flow_match_ports match;
7278 
7279 		flow_rule_match_ports(flow, &match);
7280 
7281 		rule->tuples.src_port = be16_to_cpu(match.key->src);
7282 		rule->tuples_mask.src_port = be16_to_cpu(match.mask->src);
7283 		rule->tuples.dst_port = be16_to_cpu(match.key->dst);
7284 		rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst);
7285 	} else {
7286 		rule->unused_tuple |= BIT(INNER_SRC_PORT);
7287 		rule->unused_tuple |= BIT(INNER_DST_PORT);
7288 	}
7289 }
7290 
7291 static int hclge_parse_cls_flower(struct hclge_dev *hdev,
7292 				  struct flow_cls_offload *cls_flower,
7293 				  struct hclge_fd_rule *rule)
7294 {
7295 	struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower);
7296 	struct flow_dissector *dissector = flow->match.dissector;
7297 
7298 	if (dissector->used_keys &
7299 	    ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
7300 	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
7301 	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
7302 	      BIT(FLOW_DISSECTOR_KEY_VLAN) |
7303 	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
7304 	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
7305 	      BIT(FLOW_DISSECTOR_KEY_PORTS))) {
7306 		dev_err(&hdev->pdev->dev, "unsupported key set: %#x\n",
7307 			dissector->used_keys);
7308 		return -EOPNOTSUPP;
7309 	}
7310 
7311 	hclge_get_cls_key_basic(flow, rule);
7312 	hclge_get_cls_key_mac(flow, rule);
7313 	hclge_get_cls_key_vlan(flow, rule);
7314 	hclge_get_cls_key_ip(flow, rule);
7315 	hclge_get_cls_key_port(flow, rule);
7316 
7317 	return 0;
7318 }
7319 
7320 static int hclge_check_cls_flower(struct hclge_dev *hdev,
7321 				  struct flow_cls_offload *cls_flower, int tc)
7322 {
7323 	u32 prio = cls_flower->common.prio;
7324 
7325 	if (tc < 0 || tc > hdev->tc_max) {
7326 		dev_err(&hdev->pdev->dev, "invalid traffic class\n");
7327 		return -EINVAL;
7328 	}
7329 
7330 	if (prio == 0 ||
7331 	    prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7332 		dev_err(&hdev->pdev->dev,
7333 			"prio %u should be in range[1, %u]\n",
7334 			prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
7335 		return -EINVAL;
7336 	}
7337 
7338 	if (test_bit(prio - 1, hdev->fd_bmap)) {
7339 		dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio);
7340 		return -EINVAL;
7341 	}
7342 	return 0;
7343 }
7344 
7345 static int hclge_add_cls_flower(struct hnae3_handle *handle,
7346 				struct flow_cls_offload *cls_flower,
7347 				int tc)
7348 {
7349 	struct hclge_vport *vport = hclge_get_vport(handle);
7350 	struct hclge_dev *hdev = vport->back;
7351 	struct hclge_fd_rule *rule;
7352 	int ret;
7353 
7354 	ret = hclge_check_cls_flower(hdev, cls_flower, tc);
7355 	if (ret) {
7356 		dev_err(&hdev->pdev->dev,
7357 			"failed to check cls flower params, ret = %d\n", ret);
7358 		return ret;
7359 	}
7360 
7361 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
7362 	if (!rule)
7363 		return -ENOMEM;
7364 
7365 	ret = hclge_parse_cls_flower(hdev, cls_flower, rule);
7366 	if (ret) {
7367 		kfree(rule);
7368 		return ret;
7369 	}
7370 
7371 	rule->action = HCLGE_FD_ACTION_SELECT_TC;
7372 	rule->cls_flower.tc = tc;
7373 	rule->location = cls_flower->common.prio - 1;
7374 	rule->vf_id = 0;
7375 	rule->cls_flower.cookie = cls_flower->cookie;
7376 	rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE;
7377 
7378 	ret = hclge_add_fd_entry_common(hdev, rule);
7379 	if (ret)
7380 		kfree(rule);
7381 
7382 	return ret;
7383 }
7384 
7385 static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev,
7386 						   unsigned long cookie)
7387 {
7388 	struct hclge_fd_rule *rule;
7389 	struct hlist_node *node;
7390 
7391 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7392 		if (rule->cls_flower.cookie == cookie)
7393 			return rule;
7394 	}
7395 
7396 	return NULL;
7397 }
7398 
7399 static int hclge_del_cls_flower(struct hnae3_handle *handle,
7400 				struct flow_cls_offload *cls_flower)
7401 {
7402 	struct hclge_vport *vport = hclge_get_vport(handle);
7403 	struct hclge_dev *hdev = vport->back;
7404 	struct hclge_fd_rule *rule;
7405 	int ret;
7406 
7407 	spin_lock_bh(&hdev->fd_rule_lock);
7408 
7409 	rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
7410 	if (!rule) {
7411 		spin_unlock_bh(&hdev->fd_rule_lock);
7412 		return -EINVAL;
7413 	}
7414 
7415 	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
7416 				   NULL, false);
7417 	if (ret) {
7418 		spin_unlock_bh(&hdev->fd_rule_lock);
7419 		return ret;
7420 	}
7421 
7422 	hclge_update_fd_list(hdev, HCLGE_FD_DELETED, rule->location, NULL);
7423 	spin_unlock_bh(&hdev->fd_rule_lock);
7424 
7425 	return 0;
7426 }
7427 
7428 static void hclge_sync_fd_list(struct hclge_dev *hdev, struct hlist_head *hlist)
7429 {
7430 	struct hclge_fd_rule *rule;
7431 	struct hlist_node *node;
7432 	int ret = 0;
7433 
7434 	if (!test_and_clear_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state))
7435 		return;
7436 
7437 	spin_lock_bh(&hdev->fd_rule_lock);
7438 
7439 	hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
7440 		switch (rule->state) {
7441 		case HCLGE_FD_TO_ADD:
7442 			ret = hclge_fd_config_rule(hdev, rule);
7443 			if (ret)
7444 				goto out;
7445 			rule->state = HCLGE_FD_ACTIVE;
7446 			break;
7447 		case HCLGE_FD_TO_DEL:
7448 			ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7449 						   rule->location, NULL, false);
7450 			if (ret)
7451 				goto out;
7452 			hclge_fd_dec_rule_cnt(hdev, rule->location);
7453 			hclge_fd_free_node(hdev, rule);
7454 			break;
7455 		default:
7456 			break;
7457 		}
7458 	}
7459 
7460 out:
7461 	if (ret)
7462 		set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7463 
7464 	spin_unlock_bh(&hdev->fd_rule_lock);
7465 }
7466 
7467 static void hclge_sync_fd_table(struct hclge_dev *hdev)
7468 {
7469 	if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) {
7470 		bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
7471 
7472 		hclge_clear_fd_rules_in_list(hdev, clear_list);
7473 	}
7474 
7475 	hclge_sync_fd_user_def_cfg(hdev, false);
7476 
7477 	hclge_sync_fd_list(hdev, &hdev->fd_rule_list);
7478 }
7479 
7480 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
7481 {
7482 	struct hclge_vport *vport = hclge_get_vport(handle);
7483 	struct hclge_dev *hdev = vport->back;
7484 
7485 	return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
7486 	       hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
7487 }
7488 
7489 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
7490 {
7491 	struct hclge_vport *vport = hclge_get_vport(handle);
7492 	struct hclge_dev *hdev = vport->back;
7493 
7494 	return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
7495 }
7496 
7497 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
7498 {
7499 	struct hclge_vport *vport = hclge_get_vport(handle);
7500 	struct hclge_dev *hdev = vport->back;
7501 
7502 	return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7503 }
7504 
7505 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
7506 {
7507 	struct hclge_vport *vport = hclge_get_vport(handle);
7508 	struct hclge_dev *hdev = vport->back;
7509 
7510 	return hdev->rst_stats.hw_reset_done_cnt;
7511 }
7512 
7513 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
7514 {
7515 	struct hclge_vport *vport = hclge_get_vport(handle);
7516 	struct hclge_dev *hdev = vport->back;
7517 
7518 	hdev->fd_en = enable;
7519 
7520 	if (!enable)
7521 		set_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state);
7522 	else
7523 		hclge_restore_fd_entries(handle);
7524 
7525 	hclge_task_schedule(hdev, 0);
7526 }
7527 
7528 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
7529 {
7530 	struct hclge_desc desc;
7531 	struct hclge_config_mac_mode_cmd *req =
7532 		(struct hclge_config_mac_mode_cmd *)desc.data;
7533 	u32 loop_en = 0;
7534 	int ret;
7535 
7536 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
7537 
7538 	if (enable) {
7539 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
7540 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
7541 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
7542 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
7543 		hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
7544 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
7545 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
7546 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
7547 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
7548 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
7549 	}
7550 
7551 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7552 
7553 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7554 	if (ret)
7555 		dev_err(&hdev->pdev->dev,
7556 			"mac enable fail, ret =%d.\n", ret);
7557 }
7558 
7559 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
7560 				     u8 switch_param, u8 param_mask)
7561 {
7562 	struct hclge_mac_vlan_switch_cmd *req;
7563 	struct hclge_desc desc;
7564 	u32 func_id;
7565 	int ret;
7566 
7567 	func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
7568 	req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
7569 
7570 	/* read current config parameter */
7571 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
7572 				   true);
7573 	req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
7574 	req->func_id = cpu_to_le32(func_id);
7575 
7576 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7577 	if (ret) {
7578 		dev_err(&hdev->pdev->dev,
7579 			"read mac vlan switch parameter fail, ret = %d\n", ret);
7580 		return ret;
7581 	}
7582 
7583 	/* modify and write new config parameter */
7584 	hclge_cmd_reuse_desc(&desc, false);
7585 	req->switch_param = (req->switch_param & param_mask) | switch_param;
7586 	req->param_mask = param_mask;
7587 
7588 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7589 	if (ret)
7590 		dev_err(&hdev->pdev->dev,
7591 			"set mac vlan switch parameter fail, ret = %d\n", ret);
7592 	return ret;
7593 }
7594 
7595 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
7596 				       int link_ret)
7597 {
7598 #define HCLGE_PHY_LINK_STATUS_NUM  200
7599 
7600 	struct phy_device *phydev = hdev->hw.mac.phydev;
7601 	int i = 0;
7602 	int ret;
7603 
7604 	do {
7605 		ret = phy_read_status(phydev);
7606 		if (ret) {
7607 			dev_err(&hdev->pdev->dev,
7608 				"phy update link status fail, ret = %d\n", ret);
7609 			return;
7610 		}
7611 
7612 		if (phydev->link == link_ret)
7613 			break;
7614 
7615 		msleep(HCLGE_LINK_STATUS_MS);
7616 	} while (++i < HCLGE_PHY_LINK_STATUS_NUM);
7617 }
7618 
7619 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
7620 {
7621 #define HCLGE_MAC_LINK_STATUS_NUM  100
7622 
7623 	int link_status;
7624 	int i = 0;
7625 	int ret;
7626 
7627 	do {
7628 		ret = hclge_get_mac_link_status(hdev, &link_status);
7629 		if (ret)
7630 			return ret;
7631 		if (link_status == link_ret)
7632 			return 0;
7633 
7634 		msleep(HCLGE_LINK_STATUS_MS);
7635 	} while (++i < HCLGE_MAC_LINK_STATUS_NUM);
7636 	return -EBUSY;
7637 }
7638 
7639 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
7640 					  bool is_phy)
7641 {
7642 	int link_ret;
7643 
7644 	link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
7645 
7646 	if (is_phy)
7647 		hclge_phy_link_status_wait(hdev, link_ret);
7648 
7649 	return hclge_mac_link_status_wait(hdev, link_ret);
7650 }
7651 
7652 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
7653 {
7654 	struct hclge_config_mac_mode_cmd *req;
7655 	struct hclge_desc desc;
7656 	u32 loop_en;
7657 	int ret;
7658 
7659 	req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
7660 	/* 1 Read out the MAC mode config at first */
7661 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
7662 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7663 	if (ret) {
7664 		dev_err(&hdev->pdev->dev,
7665 			"mac loopback get fail, ret =%d.\n", ret);
7666 		return ret;
7667 	}
7668 
7669 	/* 2 Then setup the loopback flag */
7670 	loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
7671 	hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
7672 
7673 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7674 
7675 	/* 3 Config mac work mode with loopback flag
7676 	 * and its original configure parameters
7677 	 */
7678 	hclge_cmd_reuse_desc(&desc, false);
7679 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7680 	if (ret)
7681 		dev_err(&hdev->pdev->dev,
7682 			"mac loopback set fail, ret =%d.\n", ret);
7683 	return ret;
7684 }
7685 
7686 static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
7687 				     enum hnae3_loop loop_mode)
7688 {
7689 #define HCLGE_COMMON_LB_RETRY_MS	10
7690 #define HCLGE_COMMON_LB_RETRY_NUM	100
7691 
7692 	struct hclge_common_lb_cmd *req;
7693 	struct hclge_desc desc;
7694 	int ret, i = 0;
7695 	u8 loop_mode_b;
7696 
7697 	req = (struct hclge_common_lb_cmd *)desc.data;
7698 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, false);
7699 
7700 	switch (loop_mode) {
7701 	case HNAE3_LOOP_SERIAL_SERDES:
7702 		loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
7703 		break;
7704 	case HNAE3_LOOP_PARALLEL_SERDES:
7705 		loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
7706 		break;
7707 	case HNAE3_LOOP_PHY:
7708 		loop_mode_b = HCLGE_CMD_GE_PHY_INNER_LOOP_B;
7709 		break;
7710 	default:
7711 		dev_err(&hdev->pdev->dev,
7712 			"unsupported common loopback mode %d\n", loop_mode);
7713 		return -ENOTSUPP;
7714 	}
7715 
7716 	if (en) {
7717 		req->enable = loop_mode_b;
7718 		req->mask = loop_mode_b;
7719 	} else {
7720 		req->mask = loop_mode_b;
7721 	}
7722 
7723 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7724 	if (ret) {
7725 		dev_err(&hdev->pdev->dev,
7726 			"common loopback set fail, ret = %d\n", ret);
7727 		return ret;
7728 	}
7729 
7730 	do {
7731 		msleep(HCLGE_COMMON_LB_RETRY_MS);
7732 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK,
7733 					   true);
7734 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7735 		if (ret) {
7736 			dev_err(&hdev->pdev->dev,
7737 				"common loopback get, ret = %d\n", ret);
7738 			return ret;
7739 		}
7740 	} while (++i < HCLGE_COMMON_LB_RETRY_NUM &&
7741 		 !(req->result & HCLGE_CMD_COMMON_LB_DONE_B));
7742 
7743 	if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) {
7744 		dev_err(&hdev->pdev->dev, "common loopback set timeout\n");
7745 		return -EBUSY;
7746 	} else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) {
7747 		dev_err(&hdev->pdev->dev, "common loopback set failed in fw\n");
7748 		return -EIO;
7749 	}
7750 	return ret;
7751 }
7752 
7753 static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en,
7754 				     enum hnae3_loop loop_mode)
7755 {
7756 	int ret;
7757 
7758 	ret = hclge_cfg_common_loopback(hdev, en, loop_mode);
7759 	if (ret)
7760 		return ret;
7761 
7762 	hclge_cfg_mac_mode(hdev, en);
7763 
7764 	ret = hclge_mac_phy_link_status_wait(hdev, en, false);
7765 	if (ret)
7766 		dev_err(&hdev->pdev->dev,
7767 			"serdes loopback config mac mode timeout\n");
7768 
7769 	return ret;
7770 }
7771 
7772 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
7773 				     struct phy_device *phydev)
7774 {
7775 	int ret;
7776 
7777 	if (!phydev->suspended) {
7778 		ret = phy_suspend(phydev);
7779 		if (ret)
7780 			return ret;
7781 	}
7782 
7783 	ret = phy_resume(phydev);
7784 	if (ret)
7785 		return ret;
7786 
7787 	return phy_loopback(phydev, true);
7788 }
7789 
7790 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
7791 				      struct phy_device *phydev)
7792 {
7793 	int ret;
7794 
7795 	ret = phy_loopback(phydev, false);
7796 	if (ret)
7797 		return ret;
7798 
7799 	return phy_suspend(phydev);
7800 }
7801 
7802 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
7803 {
7804 	struct phy_device *phydev = hdev->hw.mac.phydev;
7805 	int ret;
7806 
7807 	if (!phydev) {
7808 		if (hnae3_dev_phy_imp_supported(hdev))
7809 			return hclge_set_common_loopback(hdev, en,
7810 							 HNAE3_LOOP_PHY);
7811 		return -ENOTSUPP;
7812 	}
7813 
7814 	if (en)
7815 		ret = hclge_enable_phy_loopback(hdev, phydev);
7816 	else
7817 		ret = hclge_disable_phy_loopback(hdev, phydev);
7818 	if (ret) {
7819 		dev_err(&hdev->pdev->dev,
7820 			"set phy loopback fail, ret = %d\n", ret);
7821 		return ret;
7822 	}
7823 
7824 	hclge_cfg_mac_mode(hdev, en);
7825 
7826 	ret = hclge_mac_phy_link_status_wait(hdev, en, true);
7827 	if (ret)
7828 		dev_err(&hdev->pdev->dev,
7829 			"phy loopback config mac mode timeout\n");
7830 
7831 	return ret;
7832 }
7833 
7834 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
7835 			    int stream_id, bool enable)
7836 {
7837 	struct hclge_desc desc;
7838 	struct hclge_cfg_com_tqp_queue_cmd *req =
7839 		(struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
7840 	int ret;
7841 
7842 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
7843 	req->tqp_id = cpu_to_le16(tqp_id);
7844 	req->stream_id = cpu_to_le16(stream_id);
7845 	if (enable)
7846 		req->enable |= 1U << HCLGE_TQP_ENABLE_B;
7847 
7848 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7849 	if (ret)
7850 		dev_err(&hdev->pdev->dev,
7851 			"Tqp enable fail, status =%d.\n", ret);
7852 	return ret;
7853 }
7854 
7855 static int hclge_set_loopback(struct hnae3_handle *handle,
7856 			      enum hnae3_loop loop_mode, bool en)
7857 {
7858 	struct hclge_vport *vport = hclge_get_vport(handle);
7859 	struct hnae3_knic_private_info *kinfo;
7860 	struct hclge_dev *hdev = vport->back;
7861 	int i, ret;
7862 
7863 	/* Loopback can be enabled in three places: SSU, MAC, and serdes. By
7864 	 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
7865 	 * the same, the packets are looped back in the SSU. If SSU loopback
7866 	 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
7867 	 */
7868 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
7869 		u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
7870 
7871 		ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
7872 						HCLGE_SWITCH_ALW_LPBK_MASK);
7873 		if (ret)
7874 			return ret;
7875 	}
7876 
7877 	switch (loop_mode) {
7878 	case HNAE3_LOOP_APP:
7879 		ret = hclge_set_app_loopback(hdev, en);
7880 		break;
7881 	case HNAE3_LOOP_SERIAL_SERDES:
7882 	case HNAE3_LOOP_PARALLEL_SERDES:
7883 		ret = hclge_set_common_loopback(hdev, en, loop_mode);
7884 		break;
7885 	case HNAE3_LOOP_PHY:
7886 		ret = hclge_set_phy_loopback(hdev, en);
7887 		break;
7888 	default:
7889 		ret = -ENOTSUPP;
7890 		dev_err(&hdev->pdev->dev,
7891 			"loop_mode %d is not supported\n", loop_mode);
7892 		break;
7893 	}
7894 
7895 	if (ret)
7896 		return ret;
7897 
7898 	kinfo = &vport->nic.kinfo;
7899 	for (i = 0; i < kinfo->num_tqps; i++) {
7900 		ret = hclge_tqp_enable(hdev, i, 0, en);
7901 		if (ret)
7902 			return ret;
7903 	}
7904 
7905 	return 0;
7906 }
7907 
7908 static int hclge_set_default_loopback(struct hclge_dev *hdev)
7909 {
7910 	int ret;
7911 
7912 	ret = hclge_set_app_loopback(hdev, false);
7913 	if (ret)
7914 		return ret;
7915 
7916 	ret = hclge_cfg_common_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
7917 	if (ret)
7918 		return ret;
7919 
7920 	return hclge_cfg_common_loopback(hdev, false,
7921 					 HNAE3_LOOP_PARALLEL_SERDES);
7922 }
7923 
7924 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
7925 {
7926 	struct hclge_vport *vport = hclge_get_vport(handle);
7927 	struct hnae3_knic_private_info *kinfo;
7928 	struct hnae3_queue *queue;
7929 	struct hclge_tqp *tqp;
7930 	int i;
7931 
7932 	kinfo = &vport->nic.kinfo;
7933 	for (i = 0; i < kinfo->num_tqps; i++) {
7934 		queue = handle->kinfo.tqp[i];
7935 		tqp = container_of(queue, struct hclge_tqp, q);
7936 		memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
7937 	}
7938 }
7939 
7940 static void hclge_flush_link_update(struct hclge_dev *hdev)
7941 {
7942 #define HCLGE_FLUSH_LINK_TIMEOUT	100000
7943 
7944 	unsigned long last = hdev->serv_processed_cnt;
7945 	int i = 0;
7946 
7947 	while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
7948 	       i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
7949 	       last == hdev->serv_processed_cnt)
7950 		usleep_range(1, 1);
7951 }
7952 
7953 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
7954 {
7955 	struct hclge_vport *vport = hclge_get_vport(handle);
7956 	struct hclge_dev *hdev = vport->back;
7957 
7958 	if (enable) {
7959 		hclge_task_schedule(hdev, 0);
7960 	} else {
7961 		/* Set the DOWN flag here to disable link updating */
7962 		set_bit(HCLGE_STATE_DOWN, &hdev->state);
7963 
7964 		/* flush memory to make sure DOWN is seen by service task */
7965 		smp_mb__before_atomic();
7966 		hclge_flush_link_update(hdev);
7967 	}
7968 }
7969 
7970 static int hclge_ae_start(struct hnae3_handle *handle)
7971 {
7972 	struct hclge_vport *vport = hclge_get_vport(handle);
7973 	struct hclge_dev *hdev = vport->back;
7974 
7975 	/* mac enable */
7976 	hclge_cfg_mac_mode(hdev, true);
7977 	clear_bit(HCLGE_STATE_DOWN, &hdev->state);
7978 	hdev->hw.mac.link = 0;
7979 
7980 	/* reset tqp stats */
7981 	hclge_reset_tqp_stats(handle);
7982 
7983 	hclge_mac_start_phy(hdev);
7984 
7985 	return 0;
7986 }
7987 
7988 static void hclge_ae_stop(struct hnae3_handle *handle)
7989 {
7990 	struct hclge_vport *vport = hclge_get_vport(handle);
7991 	struct hclge_dev *hdev = vport->back;
7992 	int i;
7993 
7994 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
7995 	spin_lock_bh(&hdev->fd_rule_lock);
7996 	hclge_clear_arfs_rules(hdev);
7997 	spin_unlock_bh(&hdev->fd_rule_lock);
7998 
7999 	/* If it is not PF reset, the firmware will disable the MAC,
8000 	 * so it only need to stop phy here.
8001 	 */
8002 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
8003 	    hdev->reset_type != HNAE3_FUNC_RESET) {
8004 		hclge_mac_stop_phy(hdev);
8005 		hclge_update_link_status(hdev);
8006 		return;
8007 	}
8008 
8009 	for (i = 0; i < handle->kinfo.num_tqps; i++)
8010 		hclge_reset_tqp(handle, i);
8011 
8012 	hclge_config_mac_tnl_int(hdev, false);
8013 
8014 	/* Mac disable */
8015 	hclge_cfg_mac_mode(hdev, false);
8016 
8017 	hclge_mac_stop_phy(hdev);
8018 
8019 	/* reset tqp stats */
8020 	hclge_reset_tqp_stats(handle);
8021 	hclge_update_link_status(hdev);
8022 }
8023 
8024 int hclge_vport_start(struct hclge_vport *vport)
8025 {
8026 	struct hclge_dev *hdev = vport->back;
8027 
8028 	set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8029 	vport->last_active_jiffies = jiffies;
8030 
8031 	if (test_bit(vport->vport_id, hdev->vport_config_block)) {
8032 		if (vport->vport_id) {
8033 			hclge_restore_mac_table_common(vport);
8034 			hclge_restore_vport_vlan_table(vport);
8035 		} else {
8036 			hclge_restore_hw_table(hdev);
8037 		}
8038 	}
8039 
8040 	clear_bit(vport->vport_id, hdev->vport_config_block);
8041 
8042 	return 0;
8043 }
8044 
8045 void hclge_vport_stop(struct hclge_vport *vport)
8046 {
8047 	clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8048 }
8049 
8050 static int hclge_client_start(struct hnae3_handle *handle)
8051 {
8052 	struct hclge_vport *vport = hclge_get_vport(handle);
8053 
8054 	return hclge_vport_start(vport);
8055 }
8056 
8057 static void hclge_client_stop(struct hnae3_handle *handle)
8058 {
8059 	struct hclge_vport *vport = hclge_get_vport(handle);
8060 
8061 	hclge_vport_stop(vport);
8062 }
8063 
8064 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
8065 					 u16 cmdq_resp, u8  resp_code,
8066 					 enum hclge_mac_vlan_tbl_opcode op)
8067 {
8068 	struct hclge_dev *hdev = vport->back;
8069 
8070 	if (cmdq_resp) {
8071 		dev_err(&hdev->pdev->dev,
8072 			"cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
8073 			cmdq_resp);
8074 		return -EIO;
8075 	}
8076 
8077 	if (op == HCLGE_MAC_VLAN_ADD) {
8078 		if (!resp_code || resp_code == 1)
8079 			return 0;
8080 		else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
8081 			 resp_code == HCLGE_ADD_MC_OVERFLOW)
8082 			return -ENOSPC;
8083 
8084 		dev_err(&hdev->pdev->dev,
8085 			"add mac addr failed for undefined, code=%u.\n",
8086 			resp_code);
8087 		return -EIO;
8088 	} else if (op == HCLGE_MAC_VLAN_REMOVE) {
8089 		if (!resp_code) {
8090 			return 0;
8091 		} else if (resp_code == 1) {
8092 			dev_dbg(&hdev->pdev->dev,
8093 				"remove mac addr failed for miss.\n");
8094 			return -ENOENT;
8095 		}
8096 
8097 		dev_err(&hdev->pdev->dev,
8098 			"remove mac addr failed for undefined, code=%u.\n",
8099 			resp_code);
8100 		return -EIO;
8101 	} else if (op == HCLGE_MAC_VLAN_LKUP) {
8102 		if (!resp_code) {
8103 			return 0;
8104 		} else if (resp_code == 1) {
8105 			dev_dbg(&hdev->pdev->dev,
8106 				"lookup mac addr failed for miss.\n");
8107 			return -ENOENT;
8108 		}
8109 
8110 		dev_err(&hdev->pdev->dev,
8111 			"lookup mac addr failed for undefined, code=%u.\n",
8112 			resp_code);
8113 		return -EIO;
8114 	}
8115 
8116 	dev_err(&hdev->pdev->dev,
8117 		"unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
8118 
8119 	return -EINVAL;
8120 }
8121 
8122 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
8123 {
8124 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
8125 
8126 	unsigned int word_num;
8127 	unsigned int bit_num;
8128 
8129 	if (vfid > 255 || vfid < 0)
8130 		return -EIO;
8131 
8132 	if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
8133 		word_num = vfid / 32;
8134 		bit_num  = vfid % 32;
8135 		if (clr)
8136 			desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8137 		else
8138 			desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
8139 	} else {
8140 		word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
8141 		bit_num  = vfid % 32;
8142 		if (clr)
8143 			desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8144 		else
8145 			desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
8146 	}
8147 
8148 	return 0;
8149 }
8150 
8151 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
8152 {
8153 #define HCLGE_DESC_NUMBER 3
8154 #define HCLGE_FUNC_NUMBER_PER_DESC 6
8155 	int i, j;
8156 
8157 	for (i = 1; i < HCLGE_DESC_NUMBER; i++)
8158 		for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
8159 			if (desc[i].data[j])
8160 				return false;
8161 
8162 	return true;
8163 }
8164 
8165 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
8166 				   const u8 *addr, bool is_mc)
8167 {
8168 	const unsigned char *mac_addr = addr;
8169 	u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
8170 		       (mac_addr[0]) | (mac_addr[1] << 8);
8171 	u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
8172 
8173 	hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8174 	if (is_mc) {
8175 		hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
8176 		hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8177 	}
8178 
8179 	new_req->mac_addr_hi32 = cpu_to_le32(high_val);
8180 	new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
8181 }
8182 
8183 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
8184 				     struct hclge_mac_vlan_tbl_entry_cmd *req)
8185 {
8186 	struct hclge_dev *hdev = vport->back;
8187 	struct hclge_desc desc;
8188 	u8 resp_code;
8189 	u16 retval;
8190 	int ret;
8191 
8192 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
8193 
8194 	memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8195 
8196 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8197 	if (ret) {
8198 		dev_err(&hdev->pdev->dev,
8199 			"del mac addr failed for cmd_send, ret =%d.\n",
8200 			ret);
8201 		return ret;
8202 	}
8203 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8204 	retval = le16_to_cpu(desc.retval);
8205 
8206 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8207 					     HCLGE_MAC_VLAN_REMOVE);
8208 }
8209 
8210 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
8211 				     struct hclge_mac_vlan_tbl_entry_cmd *req,
8212 				     struct hclge_desc *desc,
8213 				     bool is_mc)
8214 {
8215 	struct hclge_dev *hdev = vport->back;
8216 	u8 resp_code;
8217 	u16 retval;
8218 	int ret;
8219 
8220 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
8221 	if (is_mc) {
8222 		desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8223 		memcpy(desc[0].data,
8224 		       req,
8225 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8226 		hclge_cmd_setup_basic_desc(&desc[1],
8227 					   HCLGE_OPC_MAC_VLAN_ADD,
8228 					   true);
8229 		desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8230 		hclge_cmd_setup_basic_desc(&desc[2],
8231 					   HCLGE_OPC_MAC_VLAN_ADD,
8232 					   true);
8233 		ret = hclge_cmd_send(&hdev->hw, desc, 3);
8234 	} else {
8235 		memcpy(desc[0].data,
8236 		       req,
8237 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8238 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
8239 	}
8240 	if (ret) {
8241 		dev_err(&hdev->pdev->dev,
8242 			"lookup mac addr failed for cmd_send, ret =%d.\n",
8243 			ret);
8244 		return ret;
8245 	}
8246 	resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
8247 	retval = le16_to_cpu(desc[0].retval);
8248 
8249 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8250 					     HCLGE_MAC_VLAN_LKUP);
8251 }
8252 
8253 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
8254 				  struct hclge_mac_vlan_tbl_entry_cmd *req,
8255 				  struct hclge_desc *mc_desc)
8256 {
8257 	struct hclge_dev *hdev = vport->back;
8258 	int cfg_status;
8259 	u8 resp_code;
8260 	u16 retval;
8261 	int ret;
8262 
8263 	if (!mc_desc) {
8264 		struct hclge_desc desc;
8265 
8266 		hclge_cmd_setup_basic_desc(&desc,
8267 					   HCLGE_OPC_MAC_VLAN_ADD,
8268 					   false);
8269 		memcpy(desc.data, req,
8270 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8271 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8272 		resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8273 		retval = le16_to_cpu(desc.retval);
8274 
8275 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8276 							   resp_code,
8277 							   HCLGE_MAC_VLAN_ADD);
8278 	} else {
8279 		hclge_cmd_reuse_desc(&mc_desc[0], false);
8280 		mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8281 		hclge_cmd_reuse_desc(&mc_desc[1], false);
8282 		mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8283 		hclge_cmd_reuse_desc(&mc_desc[2], false);
8284 		mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
8285 		memcpy(mc_desc[0].data, req,
8286 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8287 		ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
8288 		resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
8289 		retval = le16_to_cpu(mc_desc[0].retval);
8290 
8291 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8292 							   resp_code,
8293 							   HCLGE_MAC_VLAN_ADD);
8294 	}
8295 
8296 	if (ret) {
8297 		dev_err(&hdev->pdev->dev,
8298 			"add mac addr failed for cmd_send, ret =%d.\n",
8299 			ret);
8300 		return ret;
8301 	}
8302 
8303 	return cfg_status;
8304 }
8305 
8306 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
8307 			       u16 *allocated_size)
8308 {
8309 	struct hclge_umv_spc_alc_cmd *req;
8310 	struct hclge_desc desc;
8311 	int ret;
8312 
8313 	req = (struct hclge_umv_spc_alc_cmd *)desc.data;
8314 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
8315 
8316 	req->space_size = cpu_to_le32(space_size);
8317 
8318 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8319 	if (ret) {
8320 		dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
8321 			ret);
8322 		return ret;
8323 	}
8324 
8325 	*allocated_size = le32_to_cpu(desc.data[1]);
8326 
8327 	return 0;
8328 }
8329 
8330 static int hclge_init_umv_space(struct hclge_dev *hdev)
8331 {
8332 	u16 allocated_size = 0;
8333 	int ret;
8334 
8335 	ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
8336 	if (ret)
8337 		return ret;
8338 
8339 	if (allocated_size < hdev->wanted_umv_size)
8340 		dev_warn(&hdev->pdev->dev,
8341 			 "failed to alloc umv space, want %u, get %u\n",
8342 			 hdev->wanted_umv_size, allocated_size);
8343 
8344 	hdev->max_umv_size = allocated_size;
8345 	hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
8346 	hdev->share_umv_size = hdev->priv_umv_size +
8347 			hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8348 
8349 	return 0;
8350 }
8351 
8352 static void hclge_reset_umv_space(struct hclge_dev *hdev)
8353 {
8354 	struct hclge_vport *vport;
8355 	int i;
8356 
8357 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8358 		vport = &hdev->vport[i];
8359 		vport->used_umv_num = 0;
8360 	}
8361 
8362 	mutex_lock(&hdev->vport_lock);
8363 	hdev->share_umv_size = hdev->priv_umv_size +
8364 			hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8365 	mutex_unlock(&hdev->vport_lock);
8366 }
8367 
8368 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
8369 {
8370 	struct hclge_dev *hdev = vport->back;
8371 	bool is_full;
8372 
8373 	if (need_lock)
8374 		mutex_lock(&hdev->vport_lock);
8375 
8376 	is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
8377 		   hdev->share_umv_size == 0);
8378 
8379 	if (need_lock)
8380 		mutex_unlock(&hdev->vport_lock);
8381 
8382 	return is_full;
8383 }
8384 
8385 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
8386 {
8387 	struct hclge_dev *hdev = vport->back;
8388 
8389 	if (is_free) {
8390 		if (vport->used_umv_num > hdev->priv_umv_size)
8391 			hdev->share_umv_size++;
8392 
8393 		if (vport->used_umv_num > 0)
8394 			vport->used_umv_num--;
8395 	} else {
8396 		if (vport->used_umv_num >= hdev->priv_umv_size &&
8397 		    hdev->share_umv_size > 0)
8398 			hdev->share_umv_size--;
8399 		vport->used_umv_num++;
8400 	}
8401 }
8402 
8403 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
8404 						  const u8 *mac_addr)
8405 {
8406 	struct hclge_mac_node *mac_node, *tmp;
8407 
8408 	list_for_each_entry_safe(mac_node, tmp, list, node)
8409 		if (ether_addr_equal(mac_addr, mac_node->mac_addr))
8410 			return mac_node;
8411 
8412 	return NULL;
8413 }
8414 
8415 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
8416 				  enum HCLGE_MAC_NODE_STATE state)
8417 {
8418 	switch (state) {
8419 	/* from set_rx_mode or tmp_add_list */
8420 	case HCLGE_MAC_TO_ADD:
8421 		if (mac_node->state == HCLGE_MAC_TO_DEL)
8422 			mac_node->state = HCLGE_MAC_ACTIVE;
8423 		break;
8424 	/* only from set_rx_mode */
8425 	case HCLGE_MAC_TO_DEL:
8426 		if (mac_node->state == HCLGE_MAC_TO_ADD) {
8427 			list_del(&mac_node->node);
8428 			kfree(mac_node);
8429 		} else {
8430 			mac_node->state = HCLGE_MAC_TO_DEL;
8431 		}
8432 		break;
8433 	/* only from tmp_add_list, the mac_node->state won't be
8434 	 * ACTIVE.
8435 	 */
8436 	case HCLGE_MAC_ACTIVE:
8437 		if (mac_node->state == HCLGE_MAC_TO_ADD)
8438 			mac_node->state = HCLGE_MAC_ACTIVE;
8439 
8440 		break;
8441 	}
8442 }
8443 
8444 int hclge_update_mac_list(struct hclge_vport *vport,
8445 			  enum HCLGE_MAC_NODE_STATE state,
8446 			  enum HCLGE_MAC_ADDR_TYPE mac_type,
8447 			  const unsigned char *addr)
8448 {
8449 	struct hclge_dev *hdev = vport->back;
8450 	struct hclge_mac_node *mac_node;
8451 	struct list_head *list;
8452 
8453 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8454 		&vport->uc_mac_list : &vport->mc_mac_list;
8455 
8456 	spin_lock_bh(&vport->mac_list_lock);
8457 
8458 	/* if the mac addr is already in the mac list, no need to add a new
8459 	 * one into it, just check the mac addr state, convert it to a new
8460 	 * new state, or just remove it, or do nothing.
8461 	 */
8462 	mac_node = hclge_find_mac_node(list, addr);
8463 	if (mac_node) {
8464 		hclge_update_mac_node(mac_node, state);
8465 		spin_unlock_bh(&vport->mac_list_lock);
8466 		set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8467 		return 0;
8468 	}
8469 
8470 	/* if this address is never added, unnecessary to delete */
8471 	if (state == HCLGE_MAC_TO_DEL) {
8472 		spin_unlock_bh(&vport->mac_list_lock);
8473 		dev_err(&hdev->pdev->dev,
8474 			"failed to delete address %pM from mac list\n",
8475 			addr);
8476 		return -ENOENT;
8477 	}
8478 
8479 	mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
8480 	if (!mac_node) {
8481 		spin_unlock_bh(&vport->mac_list_lock);
8482 		return -ENOMEM;
8483 	}
8484 
8485 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8486 
8487 	mac_node->state = state;
8488 	ether_addr_copy(mac_node->mac_addr, addr);
8489 	list_add_tail(&mac_node->node, list);
8490 
8491 	spin_unlock_bh(&vport->mac_list_lock);
8492 
8493 	return 0;
8494 }
8495 
8496 static int hclge_add_uc_addr(struct hnae3_handle *handle,
8497 			     const unsigned char *addr)
8498 {
8499 	struct hclge_vport *vport = hclge_get_vport(handle);
8500 
8501 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
8502 				     addr);
8503 }
8504 
8505 int hclge_add_uc_addr_common(struct hclge_vport *vport,
8506 			     const unsigned char *addr)
8507 {
8508 	struct hclge_dev *hdev = vport->back;
8509 	struct hclge_mac_vlan_tbl_entry_cmd req;
8510 	struct hclge_desc desc;
8511 	u16 egress_port = 0;
8512 	int ret;
8513 
8514 	/* mac addr check */
8515 	if (is_zero_ether_addr(addr) ||
8516 	    is_broadcast_ether_addr(addr) ||
8517 	    is_multicast_ether_addr(addr)) {
8518 		dev_err(&hdev->pdev->dev,
8519 			"Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
8520 			 addr, is_zero_ether_addr(addr),
8521 			 is_broadcast_ether_addr(addr),
8522 			 is_multicast_ether_addr(addr));
8523 		return -EINVAL;
8524 	}
8525 
8526 	memset(&req, 0, sizeof(req));
8527 
8528 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8529 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8530 
8531 	req.egress_port = cpu_to_le16(egress_port);
8532 
8533 	hclge_prepare_mac_addr(&req, addr, false);
8534 
8535 	/* Lookup the mac address in the mac_vlan table, and add
8536 	 * it if the entry is inexistent. Repeated unicast entry
8537 	 * is not allowed in the mac vlan table.
8538 	 */
8539 	ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
8540 	if (ret == -ENOENT) {
8541 		mutex_lock(&hdev->vport_lock);
8542 		if (!hclge_is_umv_space_full(vport, false)) {
8543 			ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
8544 			if (!ret)
8545 				hclge_update_umv_space(vport, false);
8546 			mutex_unlock(&hdev->vport_lock);
8547 			return ret;
8548 		}
8549 		mutex_unlock(&hdev->vport_lock);
8550 
8551 		if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
8552 			dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
8553 				hdev->priv_umv_size);
8554 
8555 		return -ENOSPC;
8556 	}
8557 
8558 	/* check if we just hit the duplicate */
8559 	if (!ret) {
8560 		dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
8561 			 vport->vport_id, addr);
8562 		return 0;
8563 	}
8564 
8565 	dev_err(&hdev->pdev->dev,
8566 		"PF failed to add unicast entry(%pM) in the MAC table\n",
8567 		addr);
8568 
8569 	return ret;
8570 }
8571 
8572 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
8573 			    const unsigned char *addr)
8574 {
8575 	struct hclge_vport *vport = hclge_get_vport(handle);
8576 
8577 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
8578 				     addr);
8579 }
8580 
8581 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
8582 			    const unsigned char *addr)
8583 {
8584 	struct hclge_dev *hdev = vport->back;
8585 	struct hclge_mac_vlan_tbl_entry_cmd req;
8586 	int ret;
8587 
8588 	/* mac addr check */
8589 	if (is_zero_ether_addr(addr) ||
8590 	    is_broadcast_ether_addr(addr) ||
8591 	    is_multicast_ether_addr(addr)) {
8592 		dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
8593 			addr);
8594 		return -EINVAL;
8595 	}
8596 
8597 	memset(&req, 0, sizeof(req));
8598 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
8599 	hclge_prepare_mac_addr(&req, addr, false);
8600 	ret = hclge_remove_mac_vlan_tbl(vport, &req);
8601 	if (!ret) {
8602 		mutex_lock(&hdev->vport_lock);
8603 		hclge_update_umv_space(vport, true);
8604 		mutex_unlock(&hdev->vport_lock);
8605 	} else if (ret == -ENOENT) {
8606 		ret = 0;
8607 	}
8608 
8609 	return ret;
8610 }
8611 
8612 static int hclge_add_mc_addr(struct hnae3_handle *handle,
8613 			     const unsigned char *addr)
8614 {
8615 	struct hclge_vport *vport = hclge_get_vport(handle);
8616 
8617 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
8618 				     addr);
8619 }
8620 
8621 int hclge_add_mc_addr_common(struct hclge_vport *vport,
8622 			     const unsigned char *addr)
8623 {
8624 	struct hclge_dev *hdev = vport->back;
8625 	struct hclge_mac_vlan_tbl_entry_cmd req;
8626 	struct hclge_desc desc[3];
8627 	int status;
8628 
8629 	/* mac addr check */
8630 	if (!is_multicast_ether_addr(addr)) {
8631 		dev_err(&hdev->pdev->dev,
8632 			"Add mc mac err! invalid mac:%pM.\n",
8633 			 addr);
8634 		return -EINVAL;
8635 	}
8636 	memset(&req, 0, sizeof(req));
8637 	hclge_prepare_mac_addr(&req, addr, true);
8638 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8639 	if (status) {
8640 		/* This mac addr do not exist, add new entry for it */
8641 		memset(desc[0].data, 0, sizeof(desc[0].data));
8642 		memset(desc[1].data, 0, sizeof(desc[0].data));
8643 		memset(desc[2].data, 0, sizeof(desc[0].data));
8644 	}
8645 	status = hclge_update_desc_vfid(desc, vport->vport_id, false);
8646 	if (status)
8647 		return status;
8648 	status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8649 
8650 	/* if already overflow, not to print each time */
8651 	if (status == -ENOSPC &&
8652 	    !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
8653 		dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
8654 
8655 	return status;
8656 }
8657 
8658 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
8659 			    const unsigned char *addr)
8660 {
8661 	struct hclge_vport *vport = hclge_get_vport(handle);
8662 
8663 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
8664 				     addr);
8665 }
8666 
8667 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
8668 			    const unsigned char *addr)
8669 {
8670 	struct hclge_dev *hdev = vport->back;
8671 	struct hclge_mac_vlan_tbl_entry_cmd req;
8672 	enum hclge_cmd_status status;
8673 	struct hclge_desc desc[3];
8674 
8675 	/* mac addr check */
8676 	if (!is_multicast_ether_addr(addr)) {
8677 		dev_dbg(&hdev->pdev->dev,
8678 			"Remove mc mac err! invalid mac:%pM.\n",
8679 			 addr);
8680 		return -EINVAL;
8681 	}
8682 
8683 	memset(&req, 0, sizeof(req));
8684 	hclge_prepare_mac_addr(&req, addr, true);
8685 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8686 	if (!status) {
8687 		/* This mac addr exist, remove this handle's VFID for it */
8688 		status = hclge_update_desc_vfid(desc, vport->vport_id, true);
8689 		if (status)
8690 			return status;
8691 
8692 		if (hclge_is_all_function_id_zero(desc))
8693 			/* All the vfid is zero, so need to delete this entry */
8694 			status = hclge_remove_mac_vlan_tbl(vport, &req);
8695 		else
8696 			/* Not all the vfid is zero, update the vfid */
8697 			status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8698 
8699 	} else if (status == -ENOENT) {
8700 		status = 0;
8701 	}
8702 
8703 	return status;
8704 }
8705 
8706 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
8707 				      struct list_head *list,
8708 				      int (*sync)(struct hclge_vport *,
8709 						  const unsigned char *))
8710 {
8711 	struct hclge_mac_node *mac_node, *tmp;
8712 	int ret;
8713 
8714 	list_for_each_entry_safe(mac_node, tmp, list, node) {
8715 		ret = sync(vport, mac_node->mac_addr);
8716 		if (!ret) {
8717 			mac_node->state = HCLGE_MAC_ACTIVE;
8718 		} else {
8719 			set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8720 				&vport->state);
8721 			break;
8722 		}
8723 	}
8724 }
8725 
8726 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
8727 					struct list_head *list,
8728 					int (*unsync)(struct hclge_vport *,
8729 						      const unsigned char *))
8730 {
8731 	struct hclge_mac_node *mac_node, *tmp;
8732 	int ret;
8733 
8734 	list_for_each_entry_safe(mac_node, tmp, list, node) {
8735 		ret = unsync(vport, mac_node->mac_addr);
8736 		if (!ret || ret == -ENOENT) {
8737 			list_del(&mac_node->node);
8738 			kfree(mac_node);
8739 		} else {
8740 			set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8741 				&vport->state);
8742 			break;
8743 		}
8744 	}
8745 }
8746 
8747 static bool hclge_sync_from_add_list(struct list_head *add_list,
8748 				     struct list_head *mac_list)
8749 {
8750 	struct hclge_mac_node *mac_node, *tmp, *new_node;
8751 	bool all_added = true;
8752 
8753 	list_for_each_entry_safe(mac_node, tmp, add_list, node) {
8754 		if (mac_node->state == HCLGE_MAC_TO_ADD)
8755 			all_added = false;
8756 
8757 		/* if the mac address from tmp_add_list is not in the
8758 		 * uc/mc_mac_list, it means have received a TO_DEL request
8759 		 * during the time window of adding the mac address into mac
8760 		 * table. if mac_node state is ACTIVE, then change it to TO_DEL,
8761 		 * then it will be removed at next time. else it must be TO_ADD,
8762 		 * this address hasn't been added into mac table,
8763 		 * so just remove the mac node.
8764 		 */
8765 		new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8766 		if (new_node) {
8767 			hclge_update_mac_node(new_node, mac_node->state);
8768 			list_del(&mac_node->node);
8769 			kfree(mac_node);
8770 		} else if (mac_node->state == HCLGE_MAC_ACTIVE) {
8771 			mac_node->state = HCLGE_MAC_TO_DEL;
8772 			list_del(&mac_node->node);
8773 			list_add_tail(&mac_node->node, mac_list);
8774 		} else {
8775 			list_del(&mac_node->node);
8776 			kfree(mac_node);
8777 		}
8778 	}
8779 
8780 	return all_added;
8781 }
8782 
8783 static void hclge_sync_from_del_list(struct list_head *del_list,
8784 				     struct list_head *mac_list)
8785 {
8786 	struct hclge_mac_node *mac_node, *tmp, *new_node;
8787 
8788 	list_for_each_entry_safe(mac_node, tmp, del_list, node) {
8789 		new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8790 		if (new_node) {
8791 			/* If the mac addr exists in the mac list, it means
8792 			 * received a new TO_ADD request during the time window
8793 			 * of configuring the mac address. For the mac node
8794 			 * state is TO_ADD, and the address is already in the
8795 			 * in the hardware(due to delete fail), so we just need
8796 			 * to change the mac node state to ACTIVE.
8797 			 */
8798 			new_node->state = HCLGE_MAC_ACTIVE;
8799 			list_del(&mac_node->node);
8800 			kfree(mac_node);
8801 		} else {
8802 			list_del(&mac_node->node);
8803 			list_add_tail(&mac_node->node, mac_list);
8804 		}
8805 	}
8806 }
8807 
8808 static void hclge_update_overflow_flags(struct hclge_vport *vport,
8809 					enum HCLGE_MAC_ADDR_TYPE mac_type,
8810 					bool is_all_added)
8811 {
8812 	if (mac_type == HCLGE_MAC_ADDR_UC) {
8813 		if (is_all_added)
8814 			vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
8815 		else
8816 			vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
8817 	} else {
8818 		if (is_all_added)
8819 			vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
8820 		else
8821 			vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
8822 	}
8823 }
8824 
8825 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
8826 				       enum HCLGE_MAC_ADDR_TYPE mac_type)
8827 {
8828 	struct hclge_mac_node *mac_node, *tmp, *new_node;
8829 	struct list_head tmp_add_list, tmp_del_list;
8830 	struct list_head *list;
8831 	bool all_added;
8832 
8833 	INIT_LIST_HEAD(&tmp_add_list);
8834 	INIT_LIST_HEAD(&tmp_del_list);
8835 
8836 	/* move the mac addr to the tmp_add_list and tmp_del_list, then
8837 	 * we can add/delete these mac addr outside the spin lock
8838 	 */
8839 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8840 		&vport->uc_mac_list : &vport->mc_mac_list;
8841 
8842 	spin_lock_bh(&vport->mac_list_lock);
8843 
8844 	list_for_each_entry_safe(mac_node, tmp, list, node) {
8845 		switch (mac_node->state) {
8846 		case HCLGE_MAC_TO_DEL:
8847 			list_del(&mac_node->node);
8848 			list_add_tail(&mac_node->node, &tmp_del_list);
8849 			break;
8850 		case HCLGE_MAC_TO_ADD:
8851 			new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8852 			if (!new_node)
8853 				goto stop_traverse;
8854 			ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
8855 			new_node->state = mac_node->state;
8856 			list_add_tail(&new_node->node, &tmp_add_list);
8857 			break;
8858 		default:
8859 			break;
8860 		}
8861 	}
8862 
8863 stop_traverse:
8864 	spin_unlock_bh(&vport->mac_list_lock);
8865 
8866 	/* delete first, in order to get max mac table space for adding */
8867 	if (mac_type == HCLGE_MAC_ADDR_UC) {
8868 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8869 					    hclge_rm_uc_addr_common);
8870 		hclge_sync_vport_mac_list(vport, &tmp_add_list,
8871 					  hclge_add_uc_addr_common);
8872 	} else {
8873 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8874 					    hclge_rm_mc_addr_common);
8875 		hclge_sync_vport_mac_list(vport, &tmp_add_list,
8876 					  hclge_add_mc_addr_common);
8877 	}
8878 
8879 	/* if some mac addresses were added/deleted fail, move back to the
8880 	 * mac_list, and retry at next time.
8881 	 */
8882 	spin_lock_bh(&vport->mac_list_lock);
8883 
8884 	hclge_sync_from_del_list(&tmp_del_list, list);
8885 	all_added = hclge_sync_from_add_list(&tmp_add_list, list);
8886 
8887 	spin_unlock_bh(&vport->mac_list_lock);
8888 
8889 	hclge_update_overflow_flags(vport, mac_type, all_added);
8890 }
8891 
8892 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
8893 {
8894 	struct hclge_dev *hdev = vport->back;
8895 
8896 	if (test_bit(vport->vport_id, hdev->vport_config_block))
8897 		return false;
8898 
8899 	if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
8900 		return true;
8901 
8902 	return false;
8903 }
8904 
8905 static void hclge_sync_mac_table(struct hclge_dev *hdev)
8906 {
8907 	int i;
8908 
8909 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8910 		struct hclge_vport *vport = &hdev->vport[i];
8911 
8912 		if (!hclge_need_sync_mac_table(vport))
8913 			continue;
8914 
8915 		hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
8916 		hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
8917 	}
8918 }
8919 
8920 static void hclge_build_del_list(struct list_head *list,
8921 				 bool is_del_list,
8922 				 struct list_head *tmp_del_list)
8923 {
8924 	struct hclge_mac_node *mac_cfg, *tmp;
8925 
8926 	list_for_each_entry_safe(mac_cfg, tmp, list, node) {
8927 		switch (mac_cfg->state) {
8928 		case HCLGE_MAC_TO_DEL:
8929 		case HCLGE_MAC_ACTIVE:
8930 			list_del(&mac_cfg->node);
8931 			list_add_tail(&mac_cfg->node, tmp_del_list);
8932 			break;
8933 		case HCLGE_MAC_TO_ADD:
8934 			if (is_del_list) {
8935 				list_del(&mac_cfg->node);
8936 				kfree(mac_cfg);
8937 			}
8938 			break;
8939 		}
8940 	}
8941 }
8942 
8943 static void hclge_unsync_del_list(struct hclge_vport *vport,
8944 				  int (*unsync)(struct hclge_vport *vport,
8945 						const unsigned char *addr),
8946 				  bool is_del_list,
8947 				  struct list_head *tmp_del_list)
8948 {
8949 	struct hclge_mac_node *mac_cfg, *tmp;
8950 	int ret;
8951 
8952 	list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) {
8953 		ret = unsync(vport, mac_cfg->mac_addr);
8954 		if (!ret || ret == -ENOENT) {
8955 			/* clear all mac addr from hardware, but remain these
8956 			 * mac addr in the mac list, and restore them after
8957 			 * vf reset finished.
8958 			 */
8959 			if (!is_del_list &&
8960 			    mac_cfg->state == HCLGE_MAC_ACTIVE) {
8961 				mac_cfg->state = HCLGE_MAC_TO_ADD;
8962 			} else {
8963 				list_del(&mac_cfg->node);
8964 				kfree(mac_cfg);
8965 			}
8966 		} else if (is_del_list) {
8967 			mac_cfg->state = HCLGE_MAC_TO_DEL;
8968 		}
8969 	}
8970 }
8971 
8972 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
8973 				  enum HCLGE_MAC_ADDR_TYPE mac_type)
8974 {
8975 	int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
8976 	struct hclge_dev *hdev = vport->back;
8977 	struct list_head tmp_del_list, *list;
8978 
8979 	if (mac_type == HCLGE_MAC_ADDR_UC) {
8980 		list = &vport->uc_mac_list;
8981 		unsync = hclge_rm_uc_addr_common;
8982 	} else {
8983 		list = &vport->mc_mac_list;
8984 		unsync = hclge_rm_mc_addr_common;
8985 	}
8986 
8987 	INIT_LIST_HEAD(&tmp_del_list);
8988 
8989 	if (!is_del_list)
8990 		set_bit(vport->vport_id, hdev->vport_config_block);
8991 
8992 	spin_lock_bh(&vport->mac_list_lock);
8993 
8994 	hclge_build_del_list(list, is_del_list, &tmp_del_list);
8995 
8996 	spin_unlock_bh(&vport->mac_list_lock);
8997 
8998 	hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list);
8999 
9000 	spin_lock_bh(&vport->mac_list_lock);
9001 
9002 	hclge_sync_from_del_list(&tmp_del_list, list);
9003 
9004 	spin_unlock_bh(&vport->mac_list_lock);
9005 }
9006 
9007 /* remove all mac address when uninitailize */
9008 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
9009 					enum HCLGE_MAC_ADDR_TYPE mac_type)
9010 {
9011 	struct hclge_mac_node *mac_node, *tmp;
9012 	struct hclge_dev *hdev = vport->back;
9013 	struct list_head tmp_del_list, *list;
9014 
9015 	INIT_LIST_HEAD(&tmp_del_list);
9016 
9017 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
9018 		&vport->uc_mac_list : &vport->mc_mac_list;
9019 
9020 	spin_lock_bh(&vport->mac_list_lock);
9021 
9022 	list_for_each_entry_safe(mac_node, tmp, list, node) {
9023 		switch (mac_node->state) {
9024 		case HCLGE_MAC_TO_DEL:
9025 		case HCLGE_MAC_ACTIVE:
9026 			list_del(&mac_node->node);
9027 			list_add_tail(&mac_node->node, &tmp_del_list);
9028 			break;
9029 		case HCLGE_MAC_TO_ADD:
9030 			list_del(&mac_node->node);
9031 			kfree(mac_node);
9032 			break;
9033 		}
9034 	}
9035 
9036 	spin_unlock_bh(&vport->mac_list_lock);
9037 
9038 	if (mac_type == HCLGE_MAC_ADDR_UC)
9039 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9040 					    hclge_rm_uc_addr_common);
9041 	else
9042 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9043 					    hclge_rm_mc_addr_common);
9044 
9045 	if (!list_empty(&tmp_del_list))
9046 		dev_warn(&hdev->pdev->dev,
9047 			 "uninit %s mac list for vport %u not completely.\n",
9048 			 mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
9049 			 vport->vport_id);
9050 
9051 	list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
9052 		list_del(&mac_node->node);
9053 		kfree(mac_node);
9054 	}
9055 }
9056 
9057 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
9058 {
9059 	struct hclge_vport *vport;
9060 	int i;
9061 
9062 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9063 		vport = &hdev->vport[i];
9064 		hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
9065 		hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
9066 	}
9067 }
9068 
9069 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
9070 					      u16 cmdq_resp, u8 resp_code)
9071 {
9072 #define HCLGE_ETHERTYPE_SUCCESS_ADD		0
9073 #define HCLGE_ETHERTYPE_ALREADY_ADD		1
9074 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW	2
9075 #define HCLGE_ETHERTYPE_KEY_CONFLICT		3
9076 
9077 	int return_status;
9078 
9079 	if (cmdq_resp) {
9080 		dev_err(&hdev->pdev->dev,
9081 			"cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
9082 			cmdq_resp);
9083 		return -EIO;
9084 	}
9085 
9086 	switch (resp_code) {
9087 	case HCLGE_ETHERTYPE_SUCCESS_ADD:
9088 	case HCLGE_ETHERTYPE_ALREADY_ADD:
9089 		return_status = 0;
9090 		break;
9091 	case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
9092 		dev_err(&hdev->pdev->dev,
9093 			"add mac ethertype failed for manager table overflow.\n");
9094 		return_status = -EIO;
9095 		break;
9096 	case HCLGE_ETHERTYPE_KEY_CONFLICT:
9097 		dev_err(&hdev->pdev->dev,
9098 			"add mac ethertype failed for key conflict.\n");
9099 		return_status = -EIO;
9100 		break;
9101 	default:
9102 		dev_err(&hdev->pdev->dev,
9103 			"add mac ethertype failed for undefined, code=%u.\n",
9104 			resp_code);
9105 		return_status = -EIO;
9106 	}
9107 
9108 	return return_status;
9109 }
9110 
9111 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
9112 				     u8 *mac_addr)
9113 {
9114 	struct hclge_mac_vlan_tbl_entry_cmd req;
9115 	struct hclge_dev *hdev = vport->back;
9116 	struct hclge_desc desc;
9117 	u16 egress_port = 0;
9118 	int i;
9119 
9120 	if (is_zero_ether_addr(mac_addr))
9121 		return false;
9122 
9123 	memset(&req, 0, sizeof(req));
9124 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
9125 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
9126 	req.egress_port = cpu_to_le16(egress_port);
9127 	hclge_prepare_mac_addr(&req, mac_addr, false);
9128 
9129 	if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
9130 		return true;
9131 
9132 	vf_idx += HCLGE_VF_VPORT_START_NUM;
9133 	for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
9134 		if (i != vf_idx &&
9135 		    ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
9136 			return true;
9137 
9138 	return false;
9139 }
9140 
9141 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
9142 			    u8 *mac_addr)
9143 {
9144 	struct hclge_vport *vport = hclge_get_vport(handle);
9145 	struct hclge_dev *hdev = vport->back;
9146 
9147 	vport = hclge_get_vf_vport(hdev, vf);
9148 	if (!vport)
9149 		return -EINVAL;
9150 
9151 	if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
9152 		dev_info(&hdev->pdev->dev,
9153 			 "Specified MAC(=%pM) is same as before, no change committed!\n",
9154 			 mac_addr);
9155 		return 0;
9156 	}
9157 
9158 	if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
9159 		dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
9160 			mac_addr);
9161 		return -EEXIST;
9162 	}
9163 
9164 	ether_addr_copy(vport->vf_info.mac, mac_addr);
9165 
9166 	if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
9167 		dev_info(&hdev->pdev->dev,
9168 			 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
9169 			 vf, mac_addr);
9170 		return hclge_inform_reset_assert_to_vf(vport);
9171 	}
9172 
9173 	dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
9174 		 vf, mac_addr);
9175 	return 0;
9176 }
9177 
9178 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
9179 			     const struct hclge_mac_mgr_tbl_entry_cmd *req)
9180 {
9181 	struct hclge_desc desc;
9182 	u8 resp_code;
9183 	u16 retval;
9184 	int ret;
9185 
9186 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
9187 	memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
9188 
9189 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9190 	if (ret) {
9191 		dev_err(&hdev->pdev->dev,
9192 			"add mac ethertype failed for cmd_send, ret =%d.\n",
9193 			ret);
9194 		return ret;
9195 	}
9196 
9197 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
9198 	retval = le16_to_cpu(desc.retval);
9199 
9200 	return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
9201 }
9202 
9203 static int init_mgr_tbl(struct hclge_dev *hdev)
9204 {
9205 	int ret;
9206 	int i;
9207 
9208 	for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
9209 		ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
9210 		if (ret) {
9211 			dev_err(&hdev->pdev->dev,
9212 				"add mac ethertype failed, ret =%d.\n",
9213 				ret);
9214 			return ret;
9215 		}
9216 	}
9217 
9218 	return 0;
9219 }
9220 
9221 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
9222 {
9223 	struct hclge_vport *vport = hclge_get_vport(handle);
9224 	struct hclge_dev *hdev = vport->back;
9225 
9226 	ether_addr_copy(p, hdev->hw.mac.mac_addr);
9227 }
9228 
9229 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
9230 				       const u8 *old_addr, const u8 *new_addr)
9231 {
9232 	struct list_head *list = &vport->uc_mac_list;
9233 	struct hclge_mac_node *old_node, *new_node;
9234 
9235 	new_node = hclge_find_mac_node(list, new_addr);
9236 	if (!new_node) {
9237 		new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
9238 		if (!new_node)
9239 			return -ENOMEM;
9240 
9241 		new_node->state = HCLGE_MAC_TO_ADD;
9242 		ether_addr_copy(new_node->mac_addr, new_addr);
9243 		list_add(&new_node->node, list);
9244 	} else {
9245 		if (new_node->state == HCLGE_MAC_TO_DEL)
9246 			new_node->state = HCLGE_MAC_ACTIVE;
9247 
9248 		/* make sure the new addr is in the list head, avoid dev
9249 		 * addr may be not re-added into mac table for the umv space
9250 		 * limitation after global/imp reset which will clear mac
9251 		 * table by hardware.
9252 		 */
9253 		list_move(&new_node->node, list);
9254 	}
9255 
9256 	if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
9257 		old_node = hclge_find_mac_node(list, old_addr);
9258 		if (old_node) {
9259 			if (old_node->state == HCLGE_MAC_TO_ADD) {
9260 				list_del(&old_node->node);
9261 				kfree(old_node);
9262 			} else {
9263 				old_node->state = HCLGE_MAC_TO_DEL;
9264 			}
9265 		}
9266 	}
9267 
9268 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
9269 
9270 	return 0;
9271 }
9272 
9273 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
9274 			      bool is_first)
9275 {
9276 	const unsigned char *new_addr = (const unsigned char *)p;
9277 	struct hclge_vport *vport = hclge_get_vport(handle);
9278 	struct hclge_dev *hdev = vport->back;
9279 	unsigned char *old_addr = NULL;
9280 	int ret;
9281 
9282 	/* mac addr check */
9283 	if (is_zero_ether_addr(new_addr) ||
9284 	    is_broadcast_ether_addr(new_addr) ||
9285 	    is_multicast_ether_addr(new_addr)) {
9286 		dev_err(&hdev->pdev->dev,
9287 			"change uc mac err! invalid mac: %pM.\n",
9288 			 new_addr);
9289 		return -EINVAL;
9290 	}
9291 
9292 	ret = hclge_pause_addr_cfg(hdev, new_addr);
9293 	if (ret) {
9294 		dev_err(&hdev->pdev->dev,
9295 			"failed to configure mac pause address, ret = %d\n",
9296 			ret);
9297 		return ret;
9298 	}
9299 
9300 	if (!is_first)
9301 		old_addr = hdev->hw.mac.mac_addr;
9302 
9303 	spin_lock_bh(&vport->mac_list_lock);
9304 	ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
9305 	if (ret) {
9306 		dev_err(&hdev->pdev->dev,
9307 			"failed to change the mac addr:%pM, ret = %d\n",
9308 			new_addr, ret);
9309 		spin_unlock_bh(&vport->mac_list_lock);
9310 
9311 		if (!is_first)
9312 			hclge_pause_addr_cfg(hdev, old_addr);
9313 
9314 		return ret;
9315 	}
9316 	/* we must update dev addr with spin lock protect, preventing dev addr
9317 	 * being removed by set_rx_mode path.
9318 	 */
9319 	ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
9320 	spin_unlock_bh(&vport->mac_list_lock);
9321 
9322 	hclge_task_schedule(hdev, 0);
9323 
9324 	return 0;
9325 }
9326 
9327 static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd)
9328 {
9329 	struct mii_ioctl_data *data = if_mii(ifr);
9330 
9331 	if (!hnae3_dev_phy_imp_supported(hdev))
9332 		return -EOPNOTSUPP;
9333 
9334 	switch (cmd) {
9335 	case SIOCGMIIPHY:
9336 		data->phy_id = hdev->hw.mac.phy_addr;
9337 		/* this command reads phy id and register at the same time */
9338 		fallthrough;
9339 	case SIOCGMIIREG:
9340 		data->val_out = hclge_read_phy_reg(hdev, data->reg_num);
9341 		return 0;
9342 
9343 	case SIOCSMIIREG:
9344 		return hclge_write_phy_reg(hdev, data->reg_num, data->val_in);
9345 	default:
9346 		return -EOPNOTSUPP;
9347 	}
9348 }
9349 
9350 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
9351 			  int cmd)
9352 {
9353 	struct hclge_vport *vport = hclge_get_vport(handle);
9354 	struct hclge_dev *hdev = vport->back;
9355 
9356 	if (!hdev->hw.mac.phydev)
9357 		return hclge_mii_ioctl(hdev, ifr, cmd);
9358 
9359 	return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
9360 }
9361 
9362 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
9363 				      u8 fe_type, bool filter_en, u8 vf_id)
9364 {
9365 	struct hclge_vlan_filter_ctrl_cmd *req;
9366 	struct hclge_desc desc;
9367 	int ret;
9368 
9369 	/* read current vlan filter parameter */
9370 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
9371 	req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
9372 	req->vlan_type = vlan_type;
9373 	req->vf_id = vf_id;
9374 
9375 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9376 	if (ret) {
9377 		dev_err(&hdev->pdev->dev,
9378 			"failed to get vlan filter config, ret = %d.\n", ret);
9379 		return ret;
9380 	}
9381 
9382 	/* modify and write new config parameter */
9383 	hclge_cmd_reuse_desc(&desc, false);
9384 	req->vlan_fe = filter_en ?
9385 			(req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
9386 
9387 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9388 	if (ret)
9389 		dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
9390 			ret);
9391 
9392 	return ret;
9393 }
9394 
9395 #define HCLGE_FILTER_TYPE_VF		0
9396 #define HCLGE_FILTER_TYPE_PORT		1
9397 #define HCLGE_FILTER_FE_EGRESS_V1_B	BIT(0)
9398 #define HCLGE_FILTER_FE_NIC_INGRESS_B	BIT(0)
9399 #define HCLGE_FILTER_FE_NIC_EGRESS_B	BIT(1)
9400 #define HCLGE_FILTER_FE_ROCE_INGRESS_B	BIT(2)
9401 #define HCLGE_FILTER_FE_ROCE_EGRESS_B	BIT(3)
9402 #define HCLGE_FILTER_FE_EGRESS		(HCLGE_FILTER_FE_NIC_EGRESS_B \
9403 					| HCLGE_FILTER_FE_ROCE_EGRESS_B)
9404 #define HCLGE_FILTER_FE_INGRESS		(HCLGE_FILTER_FE_NIC_INGRESS_B \
9405 					| HCLGE_FILTER_FE_ROCE_INGRESS_B)
9406 
9407 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
9408 {
9409 	struct hclge_vport *vport = hclge_get_vport(handle);
9410 	struct hclge_dev *hdev = vport->back;
9411 
9412 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
9413 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9414 					   HCLGE_FILTER_FE_EGRESS, enable, 0);
9415 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9416 					   HCLGE_FILTER_FE_INGRESS, enable, 0);
9417 	} else {
9418 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9419 					   HCLGE_FILTER_FE_EGRESS_V1_B, enable,
9420 					   0);
9421 	}
9422 	if (enable)
9423 		handle->netdev_flags |= HNAE3_VLAN_FLTR;
9424 	else
9425 		handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
9426 }
9427 
9428 static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
9429 					bool is_kill, u16 vlan,
9430 					struct hclge_desc *desc)
9431 {
9432 	struct hclge_vlan_filter_vf_cfg_cmd *req0;
9433 	struct hclge_vlan_filter_vf_cfg_cmd *req1;
9434 	u8 vf_byte_val;
9435 	u8 vf_byte_off;
9436 	int ret;
9437 
9438 	hclge_cmd_setup_basic_desc(&desc[0],
9439 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9440 	hclge_cmd_setup_basic_desc(&desc[1],
9441 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9442 
9443 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9444 
9445 	vf_byte_off = vfid / 8;
9446 	vf_byte_val = 1 << (vfid % 8);
9447 
9448 	req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9449 	req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
9450 
9451 	req0->vlan_id  = cpu_to_le16(vlan);
9452 	req0->vlan_cfg = is_kill;
9453 
9454 	if (vf_byte_off < HCLGE_MAX_VF_BYTES)
9455 		req0->vf_bitmap[vf_byte_off] = vf_byte_val;
9456 	else
9457 		req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
9458 
9459 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
9460 	if (ret) {
9461 		dev_err(&hdev->pdev->dev,
9462 			"Send vf vlan command fail, ret =%d.\n",
9463 			ret);
9464 		return ret;
9465 	}
9466 
9467 	return 0;
9468 }
9469 
9470 static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid,
9471 					  bool is_kill, struct hclge_desc *desc)
9472 {
9473 	struct hclge_vlan_filter_vf_cfg_cmd *req;
9474 
9475 	req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9476 
9477 	if (!is_kill) {
9478 #define HCLGE_VF_VLAN_NO_ENTRY	2
9479 		if (!req->resp_code || req->resp_code == 1)
9480 			return 0;
9481 
9482 		if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
9483 			set_bit(vfid, hdev->vf_vlan_full);
9484 			dev_warn(&hdev->pdev->dev,
9485 				 "vf vlan table is full, vf vlan filter is disabled\n");
9486 			return 0;
9487 		}
9488 
9489 		dev_err(&hdev->pdev->dev,
9490 			"Add vf vlan filter fail, ret =%u.\n",
9491 			req->resp_code);
9492 	} else {
9493 #define HCLGE_VF_VLAN_DEL_NO_FOUND	1
9494 		if (!req->resp_code)
9495 			return 0;
9496 
9497 		/* vf vlan filter is disabled when vf vlan table is full,
9498 		 * then new vlan id will not be added into vf vlan table.
9499 		 * Just return 0 without warning, avoid massive verbose
9500 		 * print logs when unload.
9501 		 */
9502 		if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
9503 			return 0;
9504 
9505 		dev_err(&hdev->pdev->dev,
9506 			"Kill vf vlan filter fail, ret =%u.\n",
9507 			req->resp_code);
9508 	}
9509 
9510 	return -EIO;
9511 }
9512 
9513 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
9514 				    bool is_kill, u16 vlan,
9515 				    __be16 proto)
9516 {
9517 	struct hclge_vport *vport = &hdev->vport[vfid];
9518 	struct hclge_desc desc[2];
9519 	int ret;
9520 
9521 	/* if vf vlan table is full, firmware will close vf vlan filter, it
9522 	 * is unable and unnecessary to add new vlan id to vf vlan filter.
9523 	 * If spoof check is enable, and vf vlan is full, it shouldn't add
9524 	 * new vlan, because tx packets with these vlan id will be dropped.
9525 	 */
9526 	if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
9527 		if (vport->vf_info.spoofchk && vlan) {
9528 			dev_err(&hdev->pdev->dev,
9529 				"Can't add vlan due to spoof check is on and vf vlan table is full\n");
9530 			return -EPERM;
9531 		}
9532 		return 0;
9533 	}
9534 
9535 	ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc);
9536 	if (ret)
9537 		return ret;
9538 
9539 	return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc);
9540 }
9541 
9542 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
9543 				      u16 vlan_id, bool is_kill)
9544 {
9545 	struct hclge_vlan_filter_pf_cfg_cmd *req;
9546 	struct hclge_desc desc;
9547 	u8 vlan_offset_byte_val;
9548 	u8 vlan_offset_byte;
9549 	u8 vlan_offset_160;
9550 	int ret;
9551 
9552 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
9553 
9554 	vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
9555 	vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
9556 			   HCLGE_VLAN_BYTE_SIZE;
9557 	vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
9558 
9559 	req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
9560 	req->vlan_offset = vlan_offset_160;
9561 	req->vlan_cfg = is_kill;
9562 	req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
9563 
9564 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9565 	if (ret)
9566 		dev_err(&hdev->pdev->dev,
9567 			"port vlan command, send fail, ret =%d.\n", ret);
9568 	return ret;
9569 }
9570 
9571 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
9572 				    u16 vport_id, u16 vlan_id,
9573 				    bool is_kill)
9574 {
9575 	u16 vport_idx, vport_num = 0;
9576 	int ret;
9577 
9578 	if (is_kill && !vlan_id)
9579 		return 0;
9580 
9581 	ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
9582 				       proto);
9583 	if (ret) {
9584 		dev_err(&hdev->pdev->dev,
9585 			"Set %u vport vlan filter config fail, ret =%d.\n",
9586 			vport_id, ret);
9587 		return ret;
9588 	}
9589 
9590 	/* vlan 0 may be added twice when 8021q module is enabled */
9591 	if (!is_kill && !vlan_id &&
9592 	    test_bit(vport_id, hdev->vlan_table[vlan_id]))
9593 		return 0;
9594 
9595 	if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
9596 		dev_err(&hdev->pdev->dev,
9597 			"Add port vlan failed, vport %u is already in vlan %u\n",
9598 			vport_id, vlan_id);
9599 		return -EINVAL;
9600 	}
9601 
9602 	if (is_kill &&
9603 	    !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
9604 		dev_err(&hdev->pdev->dev,
9605 			"Delete port vlan failed, vport %u is not in vlan %u\n",
9606 			vport_id, vlan_id);
9607 		return -EINVAL;
9608 	}
9609 
9610 	for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
9611 		vport_num++;
9612 
9613 	if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
9614 		ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
9615 						 is_kill);
9616 
9617 	return ret;
9618 }
9619 
9620 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
9621 {
9622 	struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
9623 	struct hclge_vport_vtag_tx_cfg_cmd *req;
9624 	struct hclge_dev *hdev = vport->back;
9625 	struct hclge_desc desc;
9626 	u16 bmap_index;
9627 	int status;
9628 
9629 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
9630 
9631 	req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
9632 	req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
9633 	req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
9634 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
9635 		      vcfg->accept_tag1 ? 1 : 0);
9636 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
9637 		      vcfg->accept_untag1 ? 1 : 0);
9638 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
9639 		      vcfg->accept_tag2 ? 1 : 0);
9640 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
9641 		      vcfg->accept_untag2 ? 1 : 0);
9642 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
9643 		      vcfg->insert_tag1_en ? 1 : 0);
9644 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
9645 		      vcfg->insert_tag2_en ? 1 : 0);
9646 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B,
9647 		      vcfg->tag_shift_mode_en ? 1 : 0);
9648 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
9649 
9650 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9651 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9652 			HCLGE_VF_NUM_PER_BYTE;
9653 	req->vf_bitmap[bmap_index] =
9654 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9655 
9656 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
9657 	if (status)
9658 		dev_err(&hdev->pdev->dev,
9659 			"Send port txvlan cfg command fail, ret =%d\n",
9660 			status);
9661 
9662 	return status;
9663 }
9664 
9665 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
9666 {
9667 	struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
9668 	struct hclge_vport_vtag_rx_cfg_cmd *req;
9669 	struct hclge_dev *hdev = vport->back;
9670 	struct hclge_desc desc;
9671 	u16 bmap_index;
9672 	int status;
9673 
9674 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
9675 
9676 	req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
9677 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
9678 		      vcfg->strip_tag1_en ? 1 : 0);
9679 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
9680 		      vcfg->strip_tag2_en ? 1 : 0);
9681 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
9682 		      vcfg->vlan1_vlan_prionly ? 1 : 0);
9683 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
9684 		      vcfg->vlan2_vlan_prionly ? 1 : 0);
9685 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B,
9686 		      vcfg->strip_tag1_discard_en ? 1 : 0);
9687 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B,
9688 		      vcfg->strip_tag2_discard_en ? 1 : 0);
9689 
9690 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9691 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9692 			HCLGE_VF_NUM_PER_BYTE;
9693 	req->vf_bitmap[bmap_index] =
9694 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9695 
9696 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
9697 	if (status)
9698 		dev_err(&hdev->pdev->dev,
9699 			"Send port rxvlan cfg command fail, ret =%d\n",
9700 			status);
9701 
9702 	return status;
9703 }
9704 
9705 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
9706 				  u16 port_base_vlan_state,
9707 				  u16 vlan_tag)
9708 {
9709 	int ret;
9710 
9711 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9712 		vport->txvlan_cfg.accept_tag1 = true;
9713 		vport->txvlan_cfg.insert_tag1_en = false;
9714 		vport->txvlan_cfg.default_tag1 = 0;
9715 	} else {
9716 		struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev);
9717 
9718 		vport->txvlan_cfg.accept_tag1 =
9719 			ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3;
9720 		vport->txvlan_cfg.insert_tag1_en = true;
9721 		vport->txvlan_cfg.default_tag1 = vlan_tag;
9722 	}
9723 
9724 	vport->txvlan_cfg.accept_untag1 = true;
9725 
9726 	/* accept_tag2 and accept_untag2 are not supported on
9727 	 * pdev revision(0x20), new revision support them,
9728 	 * this two fields can not be configured by user.
9729 	 */
9730 	vport->txvlan_cfg.accept_tag2 = true;
9731 	vport->txvlan_cfg.accept_untag2 = true;
9732 	vport->txvlan_cfg.insert_tag2_en = false;
9733 	vport->txvlan_cfg.default_tag2 = 0;
9734 	vport->txvlan_cfg.tag_shift_mode_en = true;
9735 
9736 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9737 		vport->rxvlan_cfg.strip_tag1_en = false;
9738 		vport->rxvlan_cfg.strip_tag2_en =
9739 				vport->rxvlan_cfg.rx_vlan_offload_en;
9740 		vport->rxvlan_cfg.strip_tag2_discard_en = false;
9741 	} else {
9742 		vport->rxvlan_cfg.strip_tag1_en =
9743 				vport->rxvlan_cfg.rx_vlan_offload_en;
9744 		vport->rxvlan_cfg.strip_tag2_en = true;
9745 		vport->rxvlan_cfg.strip_tag2_discard_en = true;
9746 	}
9747 
9748 	vport->rxvlan_cfg.strip_tag1_discard_en = false;
9749 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9750 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9751 
9752 	ret = hclge_set_vlan_tx_offload_cfg(vport);
9753 	if (ret)
9754 		return ret;
9755 
9756 	return hclge_set_vlan_rx_offload_cfg(vport);
9757 }
9758 
9759 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
9760 {
9761 	struct hclge_rx_vlan_type_cfg_cmd *rx_req;
9762 	struct hclge_tx_vlan_type_cfg_cmd *tx_req;
9763 	struct hclge_desc desc;
9764 	int status;
9765 
9766 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
9767 	rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
9768 	rx_req->ot_fst_vlan_type =
9769 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
9770 	rx_req->ot_sec_vlan_type =
9771 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
9772 	rx_req->in_fst_vlan_type =
9773 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
9774 	rx_req->in_sec_vlan_type =
9775 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
9776 
9777 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
9778 	if (status) {
9779 		dev_err(&hdev->pdev->dev,
9780 			"Send rxvlan protocol type command fail, ret =%d\n",
9781 			status);
9782 		return status;
9783 	}
9784 
9785 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
9786 
9787 	tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
9788 	tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
9789 	tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
9790 
9791 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
9792 	if (status)
9793 		dev_err(&hdev->pdev->dev,
9794 			"Send txvlan protocol type command fail, ret =%d\n",
9795 			status);
9796 
9797 	return status;
9798 }
9799 
9800 static int hclge_init_vlan_config(struct hclge_dev *hdev)
9801 {
9802 #define HCLGE_DEF_VLAN_TYPE		0x8100
9803 
9804 	struct hnae3_handle *handle = &hdev->vport[0].nic;
9805 	struct hclge_vport *vport;
9806 	int ret;
9807 	int i;
9808 
9809 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
9810 		/* for revision 0x21, vf vlan filter is per function */
9811 		for (i = 0; i < hdev->num_alloc_vport; i++) {
9812 			vport = &hdev->vport[i];
9813 			ret = hclge_set_vlan_filter_ctrl(hdev,
9814 							 HCLGE_FILTER_TYPE_VF,
9815 							 HCLGE_FILTER_FE_EGRESS,
9816 							 true,
9817 							 vport->vport_id);
9818 			if (ret)
9819 				return ret;
9820 		}
9821 
9822 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9823 						 HCLGE_FILTER_FE_INGRESS, true,
9824 						 0);
9825 		if (ret)
9826 			return ret;
9827 	} else {
9828 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9829 						 HCLGE_FILTER_FE_EGRESS_V1_B,
9830 						 true, 0);
9831 		if (ret)
9832 			return ret;
9833 	}
9834 
9835 	handle->netdev_flags |= HNAE3_VLAN_FLTR;
9836 
9837 	hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
9838 	hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
9839 	hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
9840 	hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
9841 	hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
9842 	hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
9843 
9844 	ret = hclge_set_vlan_protocol_type(hdev);
9845 	if (ret)
9846 		return ret;
9847 
9848 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9849 		u16 vlan_tag;
9850 
9851 		vport = &hdev->vport[i];
9852 		vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
9853 
9854 		ret = hclge_vlan_offload_cfg(vport,
9855 					     vport->port_base_vlan_cfg.state,
9856 					     vlan_tag);
9857 		if (ret)
9858 			return ret;
9859 	}
9860 
9861 	return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
9862 }
9863 
9864 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
9865 				       bool writen_to_tbl)
9866 {
9867 	struct hclge_vport_vlan_cfg *vlan;
9868 
9869 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
9870 	if (!vlan)
9871 		return;
9872 
9873 	vlan->hd_tbl_status = writen_to_tbl;
9874 	vlan->vlan_id = vlan_id;
9875 
9876 	list_add_tail(&vlan->node, &vport->vlan_list);
9877 }
9878 
9879 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
9880 {
9881 	struct hclge_vport_vlan_cfg *vlan, *tmp;
9882 	struct hclge_dev *hdev = vport->back;
9883 	int ret;
9884 
9885 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9886 		if (!vlan->hd_tbl_status) {
9887 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9888 						       vport->vport_id,
9889 						       vlan->vlan_id, false);
9890 			if (ret) {
9891 				dev_err(&hdev->pdev->dev,
9892 					"restore vport vlan list failed, ret=%d\n",
9893 					ret);
9894 				return ret;
9895 			}
9896 		}
9897 		vlan->hd_tbl_status = true;
9898 	}
9899 
9900 	return 0;
9901 }
9902 
9903 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
9904 				      bool is_write_tbl)
9905 {
9906 	struct hclge_vport_vlan_cfg *vlan, *tmp;
9907 	struct hclge_dev *hdev = vport->back;
9908 
9909 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9910 		if (vlan->vlan_id == vlan_id) {
9911 			if (is_write_tbl && vlan->hd_tbl_status)
9912 				hclge_set_vlan_filter_hw(hdev,
9913 							 htons(ETH_P_8021Q),
9914 							 vport->vport_id,
9915 							 vlan_id,
9916 							 true);
9917 
9918 			list_del(&vlan->node);
9919 			kfree(vlan);
9920 			break;
9921 		}
9922 	}
9923 }
9924 
9925 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
9926 {
9927 	struct hclge_vport_vlan_cfg *vlan, *tmp;
9928 	struct hclge_dev *hdev = vport->back;
9929 
9930 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9931 		if (vlan->hd_tbl_status)
9932 			hclge_set_vlan_filter_hw(hdev,
9933 						 htons(ETH_P_8021Q),
9934 						 vport->vport_id,
9935 						 vlan->vlan_id,
9936 						 true);
9937 
9938 		vlan->hd_tbl_status = false;
9939 		if (is_del_list) {
9940 			list_del(&vlan->node);
9941 			kfree(vlan);
9942 		}
9943 	}
9944 	clear_bit(vport->vport_id, hdev->vf_vlan_full);
9945 }
9946 
9947 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
9948 {
9949 	struct hclge_vport_vlan_cfg *vlan, *tmp;
9950 	struct hclge_vport *vport;
9951 	int i;
9952 
9953 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9954 		vport = &hdev->vport[i];
9955 		list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9956 			list_del(&vlan->node);
9957 			kfree(vlan);
9958 		}
9959 	}
9960 }
9961 
9962 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
9963 {
9964 	struct hclge_vport_vlan_cfg *vlan, *tmp;
9965 	struct hclge_dev *hdev = vport->back;
9966 	u16 vlan_proto;
9967 	u16 vlan_id;
9968 	u16 state;
9969 	int ret;
9970 
9971 	vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
9972 	vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
9973 	state = vport->port_base_vlan_cfg.state;
9974 
9975 	if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
9976 		clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
9977 		hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
9978 					 vport->vport_id, vlan_id,
9979 					 false);
9980 		return;
9981 	}
9982 
9983 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9984 		ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9985 					       vport->vport_id,
9986 					       vlan->vlan_id, false);
9987 		if (ret)
9988 			break;
9989 		vlan->hd_tbl_status = true;
9990 	}
9991 }
9992 
9993 /* For global reset and imp reset, hardware will clear the mac table,
9994  * so we change the mac address state from ACTIVE to TO_ADD, then they
9995  * can be restored in the service task after reset complete. Furtherly,
9996  * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
9997  * be restored after reset, so just remove these mac nodes from mac_list.
9998  */
9999 static void hclge_mac_node_convert_for_reset(struct list_head *list)
10000 {
10001 	struct hclge_mac_node *mac_node, *tmp;
10002 
10003 	list_for_each_entry_safe(mac_node, tmp, list, node) {
10004 		if (mac_node->state == HCLGE_MAC_ACTIVE) {
10005 			mac_node->state = HCLGE_MAC_TO_ADD;
10006 		} else if (mac_node->state == HCLGE_MAC_TO_DEL) {
10007 			list_del(&mac_node->node);
10008 			kfree(mac_node);
10009 		}
10010 	}
10011 }
10012 
10013 void hclge_restore_mac_table_common(struct hclge_vport *vport)
10014 {
10015 	spin_lock_bh(&vport->mac_list_lock);
10016 
10017 	hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
10018 	hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
10019 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
10020 
10021 	spin_unlock_bh(&vport->mac_list_lock);
10022 }
10023 
10024 static void hclge_restore_hw_table(struct hclge_dev *hdev)
10025 {
10026 	struct hclge_vport *vport = &hdev->vport[0];
10027 	struct hnae3_handle *handle = &vport->nic;
10028 
10029 	hclge_restore_mac_table_common(vport);
10030 	hclge_restore_vport_vlan_table(vport);
10031 	set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
10032 	set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
10033 	hclge_restore_fd_entries(handle);
10034 }
10035 
10036 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
10037 {
10038 	struct hclge_vport *vport = hclge_get_vport(handle);
10039 
10040 	if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10041 		vport->rxvlan_cfg.strip_tag1_en = false;
10042 		vport->rxvlan_cfg.strip_tag2_en = enable;
10043 		vport->rxvlan_cfg.strip_tag2_discard_en = false;
10044 	} else {
10045 		vport->rxvlan_cfg.strip_tag1_en = enable;
10046 		vport->rxvlan_cfg.strip_tag2_en = true;
10047 		vport->rxvlan_cfg.strip_tag2_discard_en = true;
10048 	}
10049 
10050 	vport->rxvlan_cfg.strip_tag1_discard_en = false;
10051 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
10052 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
10053 	vport->rxvlan_cfg.rx_vlan_offload_en = enable;
10054 
10055 	return hclge_set_vlan_rx_offload_cfg(vport);
10056 }
10057 
10058 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
10059 					    u16 port_base_vlan_state,
10060 					    struct hclge_vlan_info *new_info,
10061 					    struct hclge_vlan_info *old_info)
10062 {
10063 	struct hclge_dev *hdev = vport->back;
10064 	int ret;
10065 
10066 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
10067 		hclge_rm_vport_all_vlan_table(vport, false);
10068 		return hclge_set_vlan_filter_hw(hdev,
10069 						 htons(new_info->vlan_proto),
10070 						 vport->vport_id,
10071 						 new_info->vlan_tag,
10072 						 false);
10073 	}
10074 
10075 	ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
10076 				       vport->vport_id, old_info->vlan_tag,
10077 				       true);
10078 	if (ret)
10079 		return ret;
10080 
10081 	return hclge_add_vport_all_vlan_table(vport);
10082 }
10083 
10084 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
10085 				    struct hclge_vlan_info *vlan_info)
10086 {
10087 	struct hnae3_handle *nic = &vport->nic;
10088 	struct hclge_vlan_info *old_vlan_info;
10089 	struct hclge_dev *hdev = vport->back;
10090 	int ret;
10091 
10092 	old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10093 
10094 	ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
10095 	if (ret)
10096 		return ret;
10097 
10098 	if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
10099 		/* add new VLAN tag */
10100 		ret = hclge_set_vlan_filter_hw(hdev,
10101 					       htons(vlan_info->vlan_proto),
10102 					       vport->vport_id,
10103 					       vlan_info->vlan_tag,
10104 					       false);
10105 		if (ret)
10106 			return ret;
10107 
10108 		/* remove old VLAN tag */
10109 		ret = hclge_set_vlan_filter_hw(hdev,
10110 					       htons(old_vlan_info->vlan_proto),
10111 					       vport->vport_id,
10112 					       old_vlan_info->vlan_tag,
10113 					       true);
10114 		if (ret)
10115 			return ret;
10116 
10117 		goto update;
10118 	}
10119 
10120 	ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
10121 					       old_vlan_info);
10122 	if (ret)
10123 		return ret;
10124 
10125 	/* update state only when disable/enable port based VLAN */
10126 	vport->port_base_vlan_cfg.state = state;
10127 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
10128 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
10129 	else
10130 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
10131 
10132 update:
10133 	vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
10134 	vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
10135 	vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
10136 
10137 	return 0;
10138 }
10139 
10140 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
10141 					  enum hnae3_port_base_vlan_state state,
10142 					  u16 vlan)
10143 {
10144 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10145 		if (!vlan)
10146 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10147 		else
10148 			return HNAE3_PORT_BASE_VLAN_ENABLE;
10149 	} else {
10150 		if (!vlan)
10151 			return HNAE3_PORT_BASE_VLAN_DISABLE;
10152 		else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
10153 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10154 		else
10155 			return HNAE3_PORT_BASE_VLAN_MODIFY;
10156 	}
10157 }
10158 
10159 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
10160 				    u16 vlan, u8 qos, __be16 proto)
10161 {
10162 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
10163 	struct hclge_vport *vport = hclge_get_vport(handle);
10164 	struct hclge_dev *hdev = vport->back;
10165 	struct hclge_vlan_info vlan_info;
10166 	u16 state;
10167 	int ret;
10168 
10169 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10170 		return -EOPNOTSUPP;
10171 
10172 	vport = hclge_get_vf_vport(hdev, vfid);
10173 	if (!vport)
10174 		return -EINVAL;
10175 
10176 	/* qos is a 3 bits value, so can not be bigger than 7 */
10177 	if (vlan > VLAN_N_VID - 1 || qos > 7)
10178 		return -EINVAL;
10179 	if (proto != htons(ETH_P_8021Q))
10180 		return -EPROTONOSUPPORT;
10181 
10182 	state = hclge_get_port_base_vlan_state(vport,
10183 					       vport->port_base_vlan_cfg.state,
10184 					       vlan);
10185 	if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
10186 		return 0;
10187 
10188 	vlan_info.vlan_tag = vlan;
10189 	vlan_info.qos = qos;
10190 	vlan_info.vlan_proto = ntohs(proto);
10191 
10192 	ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
10193 	if (ret) {
10194 		dev_err(&hdev->pdev->dev,
10195 			"failed to update port base vlan for vf %d, ret = %d\n",
10196 			vfid, ret);
10197 		return ret;
10198 	}
10199 
10200 	/* for DEVICE_VERSION_V3, vf doesn't need to know about the port based
10201 	 * VLAN state.
10202 	 */
10203 	if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
10204 	    test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
10205 		hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
10206 						  vport->vport_id, state,
10207 						  vlan, qos,
10208 						  ntohs(proto));
10209 
10210 	return 0;
10211 }
10212 
10213 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
10214 {
10215 	struct hclge_vlan_info *vlan_info;
10216 	struct hclge_vport *vport;
10217 	int ret;
10218 	int vf;
10219 
10220 	/* clear port base vlan for all vf */
10221 	for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10222 		vport = &hdev->vport[vf];
10223 		vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10224 
10225 		ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10226 					       vport->vport_id,
10227 					       vlan_info->vlan_tag, true);
10228 		if (ret)
10229 			dev_err(&hdev->pdev->dev,
10230 				"failed to clear vf vlan for vf%d, ret = %d\n",
10231 				vf - HCLGE_VF_VPORT_START_NUM, ret);
10232 	}
10233 }
10234 
10235 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
10236 			  u16 vlan_id, bool is_kill)
10237 {
10238 	struct hclge_vport *vport = hclge_get_vport(handle);
10239 	struct hclge_dev *hdev = vport->back;
10240 	bool writen_to_tbl = false;
10241 	int ret = 0;
10242 
10243 	/* When device is resetting or reset failed, firmware is unable to
10244 	 * handle mailbox. Just record the vlan id, and remove it after
10245 	 * reset finished.
10246 	 */
10247 	if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10248 	     test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
10249 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
10250 		return -EBUSY;
10251 	}
10252 
10253 	/* when port base vlan enabled, we use port base vlan as the vlan
10254 	 * filter entry. In this case, we don't update vlan filter table
10255 	 * when user add new vlan or remove exist vlan, just update the vport
10256 	 * vlan list. The vlan id in vlan list will be writen in vlan filter
10257 	 * table until port base vlan disabled
10258 	 */
10259 	if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10260 		ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
10261 					       vlan_id, is_kill);
10262 		writen_to_tbl = true;
10263 	}
10264 
10265 	if (!ret) {
10266 		if (is_kill)
10267 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
10268 		else
10269 			hclge_add_vport_vlan_table(vport, vlan_id,
10270 						   writen_to_tbl);
10271 	} else if (is_kill) {
10272 		/* when remove hw vlan filter failed, record the vlan id,
10273 		 * and try to remove it from hw later, to be consistence
10274 		 * with stack
10275 		 */
10276 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
10277 	}
10278 	return ret;
10279 }
10280 
10281 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
10282 {
10283 #define HCLGE_MAX_SYNC_COUNT	60
10284 
10285 	int i, ret, sync_cnt = 0;
10286 	u16 vlan_id;
10287 
10288 	/* start from vport 1 for PF is always alive */
10289 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10290 		struct hclge_vport *vport = &hdev->vport[i];
10291 
10292 		vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10293 					 VLAN_N_VID);
10294 		while (vlan_id != VLAN_N_VID) {
10295 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10296 						       vport->vport_id, vlan_id,
10297 						       true);
10298 			if (ret && ret != -EINVAL)
10299 				return;
10300 
10301 			clear_bit(vlan_id, vport->vlan_del_fail_bmap);
10302 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
10303 
10304 			sync_cnt++;
10305 			if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
10306 				return;
10307 
10308 			vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10309 						 VLAN_N_VID);
10310 		}
10311 	}
10312 }
10313 
10314 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
10315 {
10316 	struct hclge_config_max_frm_size_cmd *req;
10317 	struct hclge_desc desc;
10318 
10319 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
10320 
10321 	req = (struct hclge_config_max_frm_size_cmd *)desc.data;
10322 	req->max_frm_size = cpu_to_le16(new_mps);
10323 	req->min_frm_size = HCLGE_MAC_MIN_FRAME;
10324 
10325 	return hclge_cmd_send(&hdev->hw, &desc, 1);
10326 }
10327 
10328 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
10329 {
10330 	struct hclge_vport *vport = hclge_get_vport(handle);
10331 
10332 	return hclge_set_vport_mtu(vport, new_mtu);
10333 }
10334 
10335 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
10336 {
10337 	struct hclge_dev *hdev = vport->back;
10338 	int i, max_frm_size, ret;
10339 
10340 	/* HW supprt 2 layer vlan */
10341 	max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10342 	if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
10343 	    max_frm_size > hdev->ae_dev->dev_specs.max_frm_size)
10344 		return -EINVAL;
10345 
10346 	max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
10347 	mutex_lock(&hdev->vport_lock);
10348 	/* VF's mps must fit within hdev->mps */
10349 	if (vport->vport_id && max_frm_size > hdev->mps) {
10350 		mutex_unlock(&hdev->vport_lock);
10351 		return -EINVAL;
10352 	} else if (vport->vport_id) {
10353 		vport->mps = max_frm_size;
10354 		mutex_unlock(&hdev->vport_lock);
10355 		return 0;
10356 	}
10357 
10358 	/* PF's mps must be greater then VF's mps */
10359 	for (i = 1; i < hdev->num_alloc_vport; i++)
10360 		if (max_frm_size < hdev->vport[i].mps) {
10361 			mutex_unlock(&hdev->vport_lock);
10362 			return -EINVAL;
10363 		}
10364 
10365 	hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
10366 
10367 	ret = hclge_set_mac_mtu(hdev, max_frm_size);
10368 	if (ret) {
10369 		dev_err(&hdev->pdev->dev,
10370 			"Change mtu fail, ret =%d\n", ret);
10371 		goto out;
10372 	}
10373 
10374 	hdev->mps = max_frm_size;
10375 	vport->mps = max_frm_size;
10376 
10377 	ret = hclge_buffer_alloc(hdev);
10378 	if (ret)
10379 		dev_err(&hdev->pdev->dev,
10380 			"Allocate buffer fail, ret =%d\n", ret);
10381 
10382 out:
10383 	hclge_notify_client(hdev, HNAE3_UP_CLIENT);
10384 	mutex_unlock(&hdev->vport_lock);
10385 	return ret;
10386 }
10387 
10388 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
10389 				    bool enable)
10390 {
10391 	struct hclge_reset_tqp_queue_cmd *req;
10392 	struct hclge_desc desc;
10393 	int ret;
10394 
10395 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
10396 
10397 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10398 	req->tqp_id = cpu_to_le16(queue_id);
10399 	if (enable)
10400 		hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
10401 
10402 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10403 	if (ret) {
10404 		dev_err(&hdev->pdev->dev,
10405 			"Send tqp reset cmd error, status =%d\n", ret);
10406 		return ret;
10407 	}
10408 
10409 	return 0;
10410 }
10411 
10412 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
10413 {
10414 	struct hclge_reset_tqp_queue_cmd *req;
10415 	struct hclge_desc desc;
10416 	int ret;
10417 
10418 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
10419 
10420 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10421 	req->tqp_id = cpu_to_le16(queue_id);
10422 
10423 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10424 	if (ret) {
10425 		dev_err(&hdev->pdev->dev,
10426 			"Get reset status error, status =%d\n", ret);
10427 		return ret;
10428 	}
10429 
10430 	return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
10431 }
10432 
10433 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
10434 {
10435 	struct hnae3_queue *queue;
10436 	struct hclge_tqp *tqp;
10437 
10438 	queue = handle->kinfo.tqp[queue_id];
10439 	tqp = container_of(queue, struct hclge_tqp, q);
10440 
10441 	return tqp->index;
10442 }
10443 
10444 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
10445 {
10446 	struct hclge_vport *vport = hclge_get_vport(handle);
10447 	struct hclge_dev *hdev = vport->back;
10448 	int reset_try_times = 0;
10449 	int reset_status;
10450 	u16 queue_gid;
10451 	int ret;
10452 
10453 	queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
10454 
10455 	ret = hclge_tqp_enable(hdev, queue_id, 0, false);
10456 	if (ret) {
10457 		dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
10458 		return ret;
10459 	}
10460 
10461 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
10462 	if (ret) {
10463 		dev_err(&hdev->pdev->dev,
10464 			"Send reset tqp cmd fail, ret = %d\n", ret);
10465 		return ret;
10466 	}
10467 
10468 	while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
10469 		reset_status = hclge_get_reset_status(hdev, queue_gid);
10470 		if (reset_status)
10471 			break;
10472 
10473 		/* Wait for tqp hw reset */
10474 		usleep_range(1000, 1200);
10475 	}
10476 
10477 	if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
10478 		dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
10479 		return ret;
10480 	}
10481 
10482 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
10483 	if (ret)
10484 		dev_err(&hdev->pdev->dev,
10485 			"Deassert the soft reset fail, ret = %d\n", ret);
10486 
10487 	return ret;
10488 }
10489 
10490 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
10491 {
10492 	struct hnae3_handle *handle = &vport->nic;
10493 	struct hclge_dev *hdev = vport->back;
10494 	int reset_try_times = 0;
10495 	int reset_status;
10496 	u16 queue_gid;
10497 	int ret;
10498 
10499 	if (queue_id >= handle->kinfo.num_tqps) {
10500 		dev_warn(&hdev->pdev->dev, "Invalid vf queue id(%u)\n",
10501 			 queue_id);
10502 		return;
10503 	}
10504 
10505 	queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
10506 
10507 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
10508 	if (ret) {
10509 		dev_warn(&hdev->pdev->dev,
10510 			 "Send reset tqp cmd fail, ret = %d\n", ret);
10511 		return;
10512 	}
10513 
10514 	while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
10515 		reset_status = hclge_get_reset_status(hdev, queue_gid);
10516 		if (reset_status)
10517 			break;
10518 
10519 		/* Wait for tqp hw reset */
10520 		usleep_range(1000, 1200);
10521 	}
10522 
10523 	if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
10524 		dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
10525 		return;
10526 	}
10527 
10528 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
10529 	if (ret)
10530 		dev_warn(&hdev->pdev->dev,
10531 			 "Deassert the soft reset fail, ret = %d\n", ret);
10532 }
10533 
10534 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
10535 {
10536 	struct hclge_vport *vport = hclge_get_vport(handle);
10537 	struct hclge_dev *hdev = vport->back;
10538 
10539 	return hdev->fw_version;
10540 }
10541 
10542 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10543 {
10544 	struct phy_device *phydev = hdev->hw.mac.phydev;
10545 
10546 	if (!phydev)
10547 		return;
10548 
10549 	phy_set_asym_pause(phydev, rx_en, tx_en);
10550 }
10551 
10552 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10553 {
10554 	int ret;
10555 
10556 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
10557 		return 0;
10558 
10559 	ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
10560 	if (ret)
10561 		dev_err(&hdev->pdev->dev,
10562 			"configure pauseparam error, ret = %d.\n", ret);
10563 
10564 	return ret;
10565 }
10566 
10567 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
10568 {
10569 	struct phy_device *phydev = hdev->hw.mac.phydev;
10570 	u16 remote_advertising = 0;
10571 	u16 local_advertising;
10572 	u32 rx_pause, tx_pause;
10573 	u8 flowctl;
10574 
10575 	if (!phydev->link || !phydev->autoneg)
10576 		return 0;
10577 
10578 	local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
10579 
10580 	if (phydev->pause)
10581 		remote_advertising = LPA_PAUSE_CAP;
10582 
10583 	if (phydev->asym_pause)
10584 		remote_advertising |= LPA_PAUSE_ASYM;
10585 
10586 	flowctl = mii_resolve_flowctrl_fdx(local_advertising,
10587 					   remote_advertising);
10588 	tx_pause = flowctl & FLOW_CTRL_TX;
10589 	rx_pause = flowctl & FLOW_CTRL_RX;
10590 
10591 	if (phydev->duplex == HCLGE_MAC_HALF) {
10592 		tx_pause = 0;
10593 		rx_pause = 0;
10594 	}
10595 
10596 	return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
10597 }
10598 
10599 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
10600 				 u32 *rx_en, u32 *tx_en)
10601 {
10602 	struct hclge_vport *vport = hclge_get_vport(handle);
10603 	struct hclge_dev *hdev = vport->back;
10604 	u8 media_type = hdev->hw.mac.media_type;
10605 
10606 	*auto_neg = (media_type == HNAE3_MEDIA_TYPE_COPPER) ?
10607 		    hclge_get_autoneg(handle) : 0;
10608 
10609 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10610 		*rx_en = 0;
10611 		*tx_en = 0;
10612 		return;
10613 	}
10614 
10615 	if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
10616 		*rx_en = 1;
10617 		*tx_en = 0;
10618 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
10619 		*tx_en = 1;
10620 		*rx_en = 0;
10621 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
10622 		*rx_en = 1;
10623 		*tx_en = 1;
10624 	} else {
10625 		*rx_en = 0;
10626 		*tx_en = 0;
10627 	}
10628 }
10629 
10630 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
10631 					 u32 rx_en, u32 tx_en)
10632 {
10633 	if (rx_en && tx_en)
10634 		hdev->fc_mode_last_time = HCLGE_FC_FULL;
10635 	else if (rx_en && !tx_en)
10636 		hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
10637 	else if (!rx_en && tx_en)
10638 		hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
10639 	else
10640 		hdev->fc_mode_last_time = HCLGE_FC_NONE;
10641 
10642 	hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
10643 }
10644 
10645 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
10646 				u32 rx_en, u32 tx_en)
10647 {
10648 	struct hclge_vport *vport = hclge_get_vport(handle);
10649 	struct hclge_dev *hdev = vport->back;
10650 	struct phy_device *phydev = hdev->hw.mac.phydev;
10651 	u32 fc_autoneg;
10652 
10653 	if (phydev || hnae3_dev_phy_imp_supported(hdev)) {
10654 		fc_autoneg = hclge_get_autoneg(handle);
10655 		if (auto_neg != fc_autoneg) {
10656 			dev_info(&hdev->pdev->dev,
10657 				 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
10658 			return -EOPNOTSUPP;
10659 		}
10660 	}
10661 
10662 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10663 		dev_info(&hdev->pdev->dev,
10664 			 "Priority flow control enabled. Cannot set link flow control.\n");
10665 		return -EOPNOTSUPP;
10666 	}
10667 
10668 	hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
10669 
10670 	hclge_record_user_pauseparam(hdev, rx_en, tx_en);
10671 
10672 	if (!auto_neg || hnae3_dev_phy_imp_supported(hdev))
10673 		return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
10674 
10675 	if (phydev)
10676 		return phy_start_aneg(phydev);
10677 
10678 	return -EOPNOTSUPP;
10679 }
10680 
10681 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
10682 					  u8 *auto_neg, u32 *speed, u8 *duplex)
10683 {
10684 	struct hclge_vport *vport = hclge_get_vport(handle);
10685 	struct hclge_dev *hdev = vport->back;
10686 
10687 	if (speed)
10688 		*speed = hdev->hw.mac.speed;
10689 	if (duplex)
10690 		*duplex = hdev->hw.mac.duplex;
10691 	if (auto_neg)
10692 		*auto_neg = hdev->hw.mac.autoneg;
10693 }
10694 
10695 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
10696 				 u8 *module_type)
10697 {
10698 	struct hclge_vport *vport = hclge_get_vport(handle);
10699 	struct hclge_dev *hdev = vport->back;
10700 
10701 	/* When nic is down, the service task is not running, doesn't update
10702 	 * the port information per second. Query the port information before
10703 	 * return the media type, ensure getting the correct media information.
10704 	 */
10705 	hclge_update_port_info(hdev);
10706 
10707 	if (media_type)
10708 		*media_type = hdev->hw.mac.media_type;
10709 
10710 	if (module_type)
10711 		*module_type = hdev->hw.mac.module_type;
10712 }
10713 
10714 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
10715 				u8 *tp_mdix_ctrl, u8 *tp_mdix)
10716 {
10717 	struct hclge_vport *vport = hclge_get_vport(handle);
10718 	struct hclge_dev *hdev = vport->back;
10719 	struct phy_device *phydev = hdev->hw.mac.phydev;
10720 	int mdix_ctrl, mdix, is_resolved;
10721 	unsigned int retval;
10722 
10723 	if (!phydev) {
10724 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
10725 		*tp_mdix = ETH_TP_MDI_INVALID;
10726 		return;
10727 	}
10728 
10729 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
10730 
10731 	retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
10732 	mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
10733 				    HCLGE_PHY_MDIX_CTRL_S);
10734 
10735 	retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
10736 	mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
10737 	is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
10738 
10739 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
10740 
10741 	switch (mdix_ctrl) {
10742 	case 0x0:
10743 		*tp_mdix_ctrl = ETH_TP_MDI;
10744 		break;
10745 	case 0x1:
10746 		*tp_mdix_ctrl = ETH_TP_MDI_X;
10747 		break;
10748 	case 0x3:
10749 		*tp_mdix_ctrl = ETH_TP_MDI_AUTO;
10750 		break;
10751 	default:
10752 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
10753 		break;
10754 	}
10755 
10756 	if (!is_resolved)
10757 		*tp_mdix = ETH_TP_MDI_INVALID;
10758 	else if (mdix)
10759 		*tp_mdix = ETH_TP_MDI_X;
10760 	else
10761 		*tp_mdix = ETH_TP_MDI;
10762 }
10763 
10764 static void hclge_info_show(struct hclge_dev *hdev)
10765 {
10766 	struct device *dev = &hdev->pdev->dev;
10767 
10768 	dev_info(dev, "PF info begin:\n");
10769 
10770 	dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
10771 	dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
10772 	dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
10773 	dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
10774 	dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
10775 	dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
10776 	dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
10777 	dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
10778 	dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
10779 	dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
10780 	dev_info(dev, "This is %s PF\n",
10781 		 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
10782 	dev_info(dev, "DCB %s\n",
10783 		 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
10784 	dev_info(dev, "MQPRIO %s\n",
10785 		 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
10786 
10787 	dev_info(dev, "PF info end.\n");
10788 }
10789 
10790 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
10791 					  struct hclge_vport *vport)
10792 {
10793 	struct hnae3_client *client = vport->nic.client;
10794 	struct hclge_dev *hdev = ae_dev->priv;
10795 	int rst_cnt = hdev->rst_stats.reset_cnt;
10796 	int ret;
10797 
10798 	ret = client->ops->init_instance(&vport->nic);
10799 	if (ret)
10800 		return ret;
10801 
10802 	set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10803 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10804 	    rst_cnt != hdev->rst_stats.reset_cnt) {
10805 		ret = -EBUSY;
10806 		goto init_nic_err;
10807 	}
10808 
10809 	/* Enable nic hw error interrupts */
10810 	ret = hclge_config_nic_hw_error(hdev, true);
10811 	if (ret) {
10812 		dev_err(&ae_dev->pdev->dev,
10813 			"fail(%d) to enable hw error interrupts\n", ret);
10814 		goto init_nic_err;
10815 	}
10816 
10817 	hnae3_set_client_init_flag(client, ae_dev, 1);
10818 
10819 	if (netif_msg_drv(&hdev->vport->nic))
10820 		hclge_info_show(hdev);
10821 
10822 	return ret;
10823 
10824 init_nic_err:
10825 	clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10826 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10827 		msleep(HCLGE_WAIT_RESET_DONE);
10828 
10829 	client->ops->uninit_instance(&vport->nic, 0);
10830 
10831 	return ret;
10832 }
10833 
10834 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
10835 					   struct hclge_vport *vport)
10836 {
10837 	struct hclge_dev *hdev = ae_dev->priv;
10838 	struct hnae3_client *client;
10839 	int rst_cnt;
10840 	int ret;
10841 
10842 	if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
10843 	    !hdev->nic_client)
10844 		return 0;
10845 
10846 	client = hdev->roce_client;
10847 	ret = hclge_init_roce_base_info(vport);
10848 	if (ret)
10849 		return ret;
10850 
10851 	rst_cnt = hdev->rst_stats.reset_cnt;
10852 	ret = client->ops->init_instance(&vport->roce);
10853 	if (ret)
10854 		return ret;
10855 
10856 	set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10857 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10858 	    rst_cnt != hdev->rst_stats.reset_cnt) {
10859 		ret = -EBUSY;
10860 		goto init_roce_err;
10861 	}
10862 
10863 	/* Enable roce ras interrupts */
10864 	ret = hclge_config_rocee_ras_interrupt(hdev, true);
10865 	if (ret) {
10866 		dev_err(&ae_dev->pdev->dev,
10867 			"fail(%d) to enable roce ras interrupts\n", ret);
10868 		goto init_roce_err;
10869 	}
10870 
10871 	hnae3_set_client_init_flag(client, ae_dev, 1);
10872 
10873 	return 0;
10874 
10875 init_roce_err:
10876 	clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10877 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10878 		msleep(HCLGE_WAIT_RESET_DONE);
10879 
10880 	hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
10881 
10882 	return ret;
10883 }
10884 
10885 static int hclge_init_client_instance(struct hnae3_client *client,
10886 				      struct hnae3_ae_dev *ae_dev)
10887 {
10888 	struct hclge_dev *hdev = ae_dev->priv;
10889 	struct hclge_vport *vport;
10890 	int i, ret;
10891 
10892 	for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
10893 		vport = &hdev->vport[i];
10894 
10895 		switch (client->type) {
10896 		case HNAE3_CLIENT_KNIC:
10897 			hdev->nic_client = client;
10898 			vport->nic.client = client;
10899 			ret = hclge_init_nic_client_instance(ae_dev, vport);
10900 			if (ret)
10901 				goto clear_nic;
10902 
10903 			ret = hclge_init_roce_client_instance(ae_dev, vport);
10904 			if (ret)
10905 				goto clear_roce;
10906 
10907 			break;
10908 		case HNAE3_CLIENT_ROCE:
10909 			if (hnae3_dev_roce_supported(hdev)) {
10910 				hdev->roce_client = client;
10911 				vport->roce.client = client;
10912 			}
10913 
10914 			ret = hclge_init_roce_client_instance(ae_dev, vport);
10915 			if (ret)
10916 				goto clear_roce;
10917 
10918 			break;
10919 		default:
10920 			return -EINVAL;
10921 		}
10922 	}
10923 
10924 	return 0;
10925 
10926 clear_nic:
10927 	hdev->nic_client = NULL;
10928 	vport->nic.client = NULL;
10929 	return ret;
10930 clear_roce:
10931 	hdev->roce_client = NULL;
10932 	vport->roce.client = NULL;
10933 	return ret;
10934 }
10935 
10936 static void hclge_uninit_client_instance(struct hnae3_client *client,
10937 					 struct hnae3_ae_dev *ae_dev)
10938 {
10939 	struct hclge_dev *hdev = ae_dev->priv;
10940 	struct hclge_vport *vport;
10941 	int i;
10942 
10943 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
10944 		vport = &hdev->vport[i];
10945 		if (hdev->roce_client) {
10946 			clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10947 			while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10948 				msleep(HCLGE_WAIT_RESET_DONE);
10949 
10950 			hdev->roce_client->ops->uninit_instance(&vport->roce,
10951 								0);
10952 			hdev->roce_client = NULL;
10953 			vport->roce.client = NULL;
10954 		}
10955 		if (client->type == HNAE3_CLIENT_ROCE)
10956 			return;
10957 		if (hdev->nic_client && client->ops->uninit_instance) {
10958 			clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10959 			while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10960 				msleep(HCLGE_WAIT_RESET_DONE);
10961 
10962 			client->ops->uninit_instance(&vport->nic, 0);
10963 			hdev->nic_client = NULL;
10964 			vport->nic.client = NULL;
10965 		}
10966 	}
10967 }
10968 
10969 static int hclge_dev_mem_map(struct hclge_dev *hdev)
10970 {
10971 #define HCLGE_MEM_BAR		4
10972 
10973 	struct pci_dev *pdev = hdev->pdev;
10974 	struct hclge_hw *hw = &hdev->hw;
10975 
10976 	/* for device does not have device memory, return directly */
10977 	if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
10978 		return 0;
10979 
10980 	hw->mem_base = devm_ioremap_wc(&pdev->dev,
10981 				       pci_resource_start(pdev, HCLGE_MEM_BAR),
10982 				       pci_resource_len(pdev, HCLGE_MEM_BAR));
10983 	if (!hw->mem_base) {
10984 		dev_err(&pdev->dev, "failed to map device memory\n");
10985 		return -EFAULT;
10986 	}
10987 
10988 	return 0;
10989 }
10990 
10991 static int hclge_pci_init(struct hclge_dev *hdev)
10992 {
10993 	struct pci_dev *pdev = hdev->pdev;
10994 	struct hclge_hw *hw;
10995 	int ret;
10996 
10997 	ret = pci_enable_device(pdev);
10998 	if (ret) {
10999 		dev_err(&pdev->dev, "failed to enable PCI device\n");
11000 		return ret;
11001 	}
11002 
11003 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
11004 	if (ret) {
11005 		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
11006 		if (ret) {
11007 			dev_err(&pdev->dev,
11008 				"can't set consistent PCI DMA");
11009 			goto err_disable_device;
11010 		}
11011 		dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
11012 	}
11013 
11014 	ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
11015 	if (ret) {
11016 		dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
11017 		goto err_disable_device;
11018 	}
11019 
11020 	pci_set_master(pdev);
11021 	hw = &hdev->hw;
11022 	hw->io_base = pcim_iomap(pdev, 2, 0);
11023 	if (!hw->io_base) {
11024 		dev_err(&pdev->dev, "Can't map configuration register space\n");
11025 		ret = -ENOMEM;
11026 		goto err_clr_master;
11027 	}
11028 
11029 	ret = hclge_dev_mem_map(hdev);
11030 	if (ret)
11031 		goto err_unmap_io_base;
11032 
11033 	hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
11034 
11035 	return 0;
11036 
11037 err_unmap_io_base:
11038 	pcim_iounmap(pdev, hdev->hw.io_base);
11039 err_clr_master:
11040 	pci_clear_master(pdev);
11041 	pci_release_regions(pdev);
11042 err_disable_device:
11043 	pci_disable_device(pdev);
11044 
11045 	return ret;
11046 }
11047 
11048 static void hclge_pci_uninit(struct hclge_dev *hdev)
11049 {
11050 	struct pci_dev *pdev = hdev->pdev;
11051 
11052 	if (hdev->hw.mem_base)
11053 		devm_iounmap(&pdev->dev, hdev->hw.mem_base);
11054 
11055 	pcim_iounmap(pdev, hdev->hw.io_base);
11056 	pci_free_irq_vectors(pdev);
11057 	pci_clear_master(pdev);
11058 	pci_release_mem_regions(pdev);
11059 	pci_disable_device(pdev);
11060 }
11061 
11062 static void hclge_state_init(struct hclge_dev *hdev)
11063 {
11064 	set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
11065 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
11066 	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
11067 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11068 	clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
11069 	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
11070 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
11071 }
11072 
11073 static void hclge_state_uninit(struct hclge_dev *hdev)
11074 {
11075 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
11076 	set_bit(HCLGE_STATE_REMOVING, &hdev->state);
11077 
11078 	if (hdev->reset_timer.function)
11079 		del_timer_sync(&hdev->reset_timer);
11080 	if (hdev->service_task.work.func)
11081 		cancel_delayed_work_sync(&hdev->service_task);
11082 }
11083 
11084 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
11085 {
11086 #define HCLGE_FLR_RETRY_WAIT_MS	500
11087 #define HCLGE_FLR_RETRY_CNT	5
11088 
11089 	struct hclge_dev *hdev = ae_dev->priv;
11090 	int retry_cnt = 0;
11091 	int ret;
11092 
11093 retry:
11094 	down(&hdev->reset_sem);
11095 	set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11096 	hdev->reset_type = HNAE3_FLR_RESET;
11097 	ret = hclge_reset_prepare(hdev);
11098 	if (ret || hdev->reset_pending) {
11099 		dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
11100 			ret);
11101 		if (hdev->reset_pending ||
11102 		    retry_cnt++ < HCLGE_FLR_RETRY_CNT) {
11103 			dev_err(&hdev->pdev->dev,
11104 				"reset_pending:0x%lx, retry_cnt:%d\n",
11105 				hdev->reset_pending, retry_cnt);
11106 			clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11107 			up(&hdev->reset_sem);
11108 			msleep(HCLGE_FLR_RETRY_WAIT_MS);
11109 			goto retry;
11110 		}
11111 	}
11112 
11113 	/* disable misc vector before FLR done */
11114 	hclge_enable_vector(&hdev->misc_vector, false);
11115 	set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
11116 	hdev->rst_stats.flr_rst_cnt++;
11117 }
11118 
11119 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
11120 {
11121 	struct hclge_dev *hdev = ae_dev->priv;
11122 	int ret;
11123 
11124 	hclge_enable_vector(&hdev->misc_vector, true);
11125 
11126 	ret = hclge_reset_rebuild(hdev);
11127 	if (ret)
11128 		dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
11129 
11130 	hdev->reset_type = HNAE3_NONE_RESET;
11131 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11132 	up(&hdev->reset_sem);
11133 }
11134 
11135 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
11136 {
11137 	u16 i;
11138 
11139 	for (i = 0; i < hdev->num_alloc_vport; i++) {
11140 		struct hclge_vport *vport = &hdev->vport[i];
11141 		int ret;
11142 
11143 		 /* Send cmd to clear VF's FUNC_RST_ING */
11144 		ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
11145 		if (ret)
11146 			dev_warn(&hdev->pdev->dev,
11147 				 "clear vf(%u) rst failed %d!\n",
11148 				 vport->vport_id, ret);
11149 	}
11150 }
11151 
11152 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
11153 {
11154 	struct pci_dev *pdev = ae_dev->pdev;
11155 	struct hclge_dev *hdev;
11156 	int ret;
11157 
11158 	hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
11159 	if (!hdev)
11160 		return -ENOMEM;
11161 
11162 	hdev->pdev = pdev;
11163 	hdev->ae_dev = ae_dev;
11164 	hdev->reset_type = HNAE3_NONE_RESET;
11165 	hdev->reset_level = HNAE3_FUNC_RESET;
11166 	ae_dev->priv = hdev;
11167 
11168 	/* HW supprt 2 layer vlan */
11169 	hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
11170 
11171 	mutex_init(&hdev->vport_lock);
11172 	spin_lock_init(&hdev->fd_rule_lock);
11173 	sema_init(&hdev->reset_sem, 1);
11174 
11175 	ret = hclge_pci_init(hdev);
11176 	if (ret)
11177 		goto out;
11178 
11179 	/* Firmware command queue initialize */
11180 	ret = hclge_cmd_queue_init(hdev);
11181 	if (ret)
11182 		goto err_pci_uninit;
11183 
11184 	/* Firmware command initialize */
11185 	ret = hclge_cmd_init(hdev);
11186 	if (ret)
11187 		goto err_cmd_uninit;
11188 
11189 	ret = hclge_get_cap(hdev);
11190 	if (ret)
11191 		goto err_cmd_uninit;
11192 
11193 	ret = hclge_query_dev_specs(hdev);
11194 	if (ret) {
11195 		dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
11196 			ret);
11197 		goto err_cmd_uninit;
11198 	}
11199 
11200 	ret = hclge_configure(hdev);
11201 	if (ret) {
11202 		dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
11203 		goto err_cmd_uninit;
11204 	}
11205 
11206 	ret = hclge_init_msi(hdev);
11207 	if (ret) {
11208 		dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
11209 		goto err_cmd_uninit;
11210 	}
11211 
11212 	ret = hclge_misc_irq_init(hdev);
11213 	if (ret)
11214 		goto err_msi_uninit;
11215 
11216 	ret = hclge_alloc_tqps(hdev);
11217 	if (ret) {
11218 		dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
11219 		goto err_msi_irq_uninit;
11220 	}
11221 
11222 	ret = hclge_alloc_vport(hdev);
11223 	if (ret)
11224 		goto err_msi_irq_uninit;
11225 
11226 	ret = hclge_map_tqp(hdev);
11227 	if (ret)
11228 		goto err_msi_irq_uninit;
11229 
11230 	if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER &&
11231 	    !hnae3_dev_phy_imp_supported(hdev)) {
11232 		ret = hclge_mac_mdio_config(hdev);
11233 		if (ret)
11234 			goto err_msi_irq_uninit;
11235 	}
11236 
11237 	ret = hclge_init_umv_space(hdev);
11238 	if (ret)
11239 		goto err_mdiobus_unreg;
11240 
11241 	ret = hclge_mac_init(hdev);
11242 	if (ret) {
11243 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11244 		goto err_mdiobus_unreg;
11245 	}
11246 
11247 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11248 	if (ret) {
11249 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11250 		goto err_mdiobus_unreg;
11251 	}
11252 
11253 	ret = hclge_config_gro(hdev, true);
11254 	if (ret)
11255 		goto err_mdiobus_unreg;
11256 
11257 	ret = hclge_init_vlan_config(hdev);
11258 	if (ret) {
11259 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11260 		goto err_mdiobus_unreg;
11261 	}
11262 
11263 	ret = hclge_tm_schd_init(hdev);
11264 	if (ret) {
11265 		dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
11266 		goto err_mdiobus_unreg;
11267 	}
11268 
11269 	ret = hclge_rss_init_cfg(hdev);
11270 	if (ret) {
11271 		dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
11272 		goto err_mdiobus_unreg;
11273 	}
11274 
11275 	ret = hclge_rss_init_hw(hdev);
11276 	if (ret) {
11277 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11278 		goto err_mdiobus_unreg;
11279 	}
11280 
11281 	ret = init_mgr_tbl(hdev);
11282 	if (ret) {
11283 		dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
11284 		goto err_mdiobus_unreg;
11285 	}
11286 
11287 	ret = hclge_init_fd_config(hdev);
11288 	if (ret) {
11289 		dev_err(&pdev->dev,
11290 			"fd table init fail, ret=%d\n", ret);
11291 		goto err_mdiobus_unreg;
11292 	}
11293 
11294 	INIT_KFIFO(hdev->mac_tnl_log);
11295 
11296 	hclge_dcb_ops_set(hdev);
11297 
11298 	timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
11299 	INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
11300 
11301 	/* Setup affinity after service timer setup because add_timer_on
11302 	 * is called in affinity notify.
11303 	 */
11304 	hclge_misc_affinity_setup(hdev);
11305 
11306 	hclge_clear_all_event_cause(hdev);
11307 	hclge_clear_resetting_state(hdev);
11308 
11309 	/* Log and clear the hw errors those already occurred */
11310 	hclge_handle_all_hns_hw_errors(ae_dev);
11311 
11312 	/* request delayed reset for the error recovery because an immediate
11313 	 * global reset on a PF affecting pending initialization of other PFs
11314 	 */
11315 	if (ae_dev->hw_err_reset_req) {
11316 		enum hnae3_reset_type reset_level;
11317 
11318 		reset_level = hclge_get_reset_level(ae_dev,
11319 						    &ae_dev->hw_err_reset_req);
11320 		hclge_set_def_reset_request(ae_dev, reset_level);
11321 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
11322 	}
11323 
11324 	/* Enable MISC vector(vector0) */
11325 	hclge_enable_vector(&hdev->misc_vector, true);
11326 
11327 	hclge_state_init(hdev);
11328 	hdev->last_reset_time = jiffies;
11329 
11330 	dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
11331 		 HCLGE_DRIVER_NAME);
11332 
11333 	hclge_task_schedule(hdev, round_jiffies_relative(HZ));
11334 
11335 	return 0;
11336 
11337 err_mdiobus_unreg:
11338 	if (hdev->hw.mac.phydev)
11339 		mdiobus_unregister(hdev->hw.mac.mdio_bus);
11340 err_msi_irq_uninit:
11341 	hclge_misc_irq_uninit(hdev);
11342 err_msi_uninit:
11343 	pci_free_irq_vectors(pdev);
11344 err_cmd_uninit:
11345 	hclge_cmd_uninit(hdev);
11346 err_pci_uninit:
11347 	pcim_iounmap(pdev, hdev->hw.io_base);
11348 	pci_clear_master(pdev);
11349 	pci_release_regions(pdev);
11350 	pci_disable_device(pdev);
11351 out:
11352 	mutex_destroy(&hdev->vport_lock);
11353 	return ret;
11354 }
11355 
11356 static void hclge_stats_clear(struct hclge_dev *hdev)
11357 {
11358 	memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
11359 }
11360 
11361 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11362 {
11363 	return hclge_config_switch_param(hdev, vf, enable,
11364 					 HCLGE_SWITCH_ANTI_SPOOF_MASK);
11365 }
11366 
11367 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11368 {
11369 	return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
11370 					  HCLGE_FILTER_FE_NIC_INGRESS_B,
11371 					  enable, vf);
11372 }
11373 
11374 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
11375 {
11376 	int ret;
11377 
11378 	ret = hclge_set_mac_spoofchk(hdev, vf, enable);
11379 	if (ret) {
11380 		dev_err(&hdev->pdev->dev,
11381 			"Set vf %d mac spoof check %s failed, ret=%d\n",
11382 			vf, enable ? "on" : "off", ret);
11383 		return ret;
11384 	}
11385 
11386 	ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
11387 	if (ret)
11388 		dev_err(&hdev->pdev->dev,
11389 			"Set vf %d vlan spoof check %s failed, ret=%d\n",
11390 			vf, enable ? "on" : "off", ret);
11391 
11392 	return ret;
11393 }
11394 
11395 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
11396 				 bool enable)
11397 {
11398 	struct hclge_vport *vport = hclge_get_vport(handle);
11399 	struct hclge_dev *hdev = vport->back;
11400 	u32 new_spoofchk = enable ? 1 : 0;
11401 	int ret;
11402 
11403 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11404 		return -EOPNOTSUPP;
11405 
11406 	vport = hclge_get_vf_vport(hdev, vf);
11407 	if (!vport)
11408 		return -EINVAL;
11409 
11410 	if (vport->vf_info.spoofchk == new_spoofchk)
11411 		return 0;
11412 
11413 	if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
11414 		dev_warn(&hdev->pdev->dev,
11415 			 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
11416 			 vf);
11417 	else if (enable && hclge_is_umv_space_full(vport, true))
11418 		dev_warn(&hdev->pdev->dev,
11419 			 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
11420 			 vf);
11421 
11422 	ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
11423 	if (ret)
11424 		return ret;
11425 
11426 	vport->vf_info.spoofchk = new_spoofchk;
11427 	return 0;
11428 }
11429 
11430 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
11431 {
11432 	struct hclge_vport *vport = hdev->vport;
11433 	int ret;
11434 	int i;
11435 
11436 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11437 		return 0;
11438 
11439 	/* resume the vf spoof check state after reset */
11440 	for (i = 0; i < hdev->num_alloc_vport; i++) {
11441 		ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
11442 					       vport->vf_info.spoofchk);
11443 		if (ret)
11444 			return ret;
11445 
11446 		vport++;
11447 	}
11448 
11449 	return 0;
11450 }
11451 
11452 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
11453 {
11454 	struct hclge_vport *vport = hclge_get_vport(handle);
11455 	struct hclge_dev *hdev = vport->back;
11456 	struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
11457 	u32 new_trusted = enable ? 1 : 0;
11458 	bool en_bc_pmc;
11459 	int ret;
11460 
11461 	vport = hclge_get_vf_vport(hdev, vf);
11462 	if (!vport)
11463 		return -EINVAL;
11464 
11465 	if (vport->vf_info.trusted == new_trusted)
11466 		return 0;
11467 
11468 	/* Disable promisc mode for VF if it is not trusted any more. */
11469 	if (!enable && vport->vf_info.promisc_enable) {
11470 		en_bc_pmc = ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2;
11471 		ret = hclge_set_vport_promisc_mode(vport, false, false,
11472 						   en_bc_pmc);
11473 		if (ret)
11474 			return ret;
11475 		vport->vf_info.promisc_enable = 0;
11476 		hclge_inform_vf_promisc_info(vport);
11477 	}
11478 
11479 	vport->vf_info.trusted = new_trusted;
11480 
11481 	return 0;
11482 }
11483 
11484 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
11485 {
11486 	int ret;
11487 	int vf;
11488 
11489 	/* reset vf rate to default value */
11490 	for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
11491 		struct hclge_vport *vport = &hdev->vport[vf];
11492 
11493 		vport->vf_info.max_tx_rate = 0;
11494 		ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
11495 		if (ret)
11496 			dev_err(&hdev->pdev->dev,
11497 				"vf%d failed to reset to default, ret=%d\n",
11498 				vf - HCLGE_VF_VPORT_START_NUM, ret);
11499 	}
11500 }
11501 
11502 static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
11503 				     int min_tx_rate, int max_tx_rate)
11504 {
11505 	if (min_tx_rate != 0 ||
11506 	    max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
11507 		dev_err(&hdev->pdev->dev,
11508 			"min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
11509 			min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
11510 		return -EINVAL;
11511 	}
11512 
11513 	return 0;
11514 }
11515 
11516 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
11517 			     int min_tx_rate, int max_tx_rate, bool force)
11518 {
11519 	struct hclge_vport *vport = hclge_get_vport(handle);
11520 	struct hclge_dev *hdev = vport->back;
11521 	int ret;
11522 
11523 	ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate);
11524 	if (ret)
11525 		return ret;
11526 
11527 	vport = hclge_get_vf_vport(hdev, vf);
11528 	if (!vport)
11529 		return -EINVAL;
11530 
11531 	if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
11532 		return 0;
11533 
11534 	ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
11535 	if (ret)
11536 		return ret;
11537 
11538 	vport->vf_info.max_tx_rate = max_tx_rate;
11539 
11540 	return 0;
11541 }
11542 
11543 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
11544 {
11545 	struct hnae3_handle *handle = &hdev->vport->nic;
11546 	struct hclge_vport *vport;
11547 	int ret;
11548 	int vf;
11549 
11550 	/* resume the vf max_tx_rate after reset */
11551 	for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
11552 		vport = hclge_get_vf_vport(hdev, vf);
11553 		if (!vport)
11554 			return -EINVAL;
11555 
11556 		/* zero means max rate, after reset, firmware already set it to
11557 		 * max rate, so just continue.
11558 		 */
11559 		if (!vport->vf_info.max_tx_rate)
11560 			continue;
11561 
11562 		ret = hclge_set_vf_rate(handle, vf, 0,
11563 					vport->vf_info.max_tx_rate, true);
11564 		if (ret) {
11565 			dev_err(&hdev->pdev->dev,
11566 				"vf%d failed to resume tx_rate:%u, ret=%d\n",
11567 				vf, vport->vf_info.max_tx_rate, ret);
11568 			return ret;
11569 		}
11570 	}
11571 
11572 	return 0;
11573 }
11574 
11575 static void hclge_reset_vport_state(struct hclge_dev *hdev)
11576 {
11577 	struct hclge_vport *vport = hdev->vport;
11578 	int i;
11579 
11580 	for (i = 0; i < hdev->num_alloc_vport; i++) {
11581 		hclge_vport_stop(vport);
11582 		vport++;
11583 	}
11584 }
11585 
11586 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
11587 {
11588 	struct hclge_dev *hdev = ae_dev->priv;
11589 	struct pci_dev *pdev = ae_dev->pdev;
11590 	int ret;
11591 
11592 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
11593 
11594 	hclge_stats_clear(hdev);
11595 	/* NOTE: pf reset needn't to clear or restore pf and vf table entry.
11596 	 * so here should not clean table in memory.
11597 	 */
11598 	if (hdev->reset_type == HNAE3_IMP_RESET ||
11599 	    hdev->reset_type == HNAE3_GLOBAL_RESET) {
11600 		memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
11601 		memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
11602 		bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
11603 		hclge_reset_umv_space(hdev);
11604 	}
11605 
11606 	ret = hclge_cmd_init(hdev);
11607 	if (ret) {
11608 		dev_err(&pdev->dev, "Cmd queue init failed\n");
11609 		return ret;
11610 	}
11611 
11612 	ret = hclge_map_tqp(hdev);
11613 	if (ret) {
11614 		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
11615 		return ret;
11616 	}
11617 
11618 	ret = hclge_mac_init(hdev);
11619 	if (ret) {
11620 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11621 		return ret;
11622 	}
11623 
11624 	ret = hclge_tp_port_init(hdev);
11625 	if (ret) {
11626 		dev_err(&pdev->dev, "failed to init tp port, ret = %d\n",
11627 			ret);
11628 		return ret;
11629 	}
11630 
11631 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11632 	if (ret) {
11633 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11634 		return ret;
11635 	}
11636 
11637 	ret = hclge_config_gro(hdev, true);
11638 	if (ret)
11639 		return ret;
11640 
11641 	ret = hclge_init_vlan_config(hdev);
11642 	if (ret) {
11643 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11644 		return ret;
11645 	}
11646 
11647 	ret = hclge_tm_init_hw(hdev, true);
11648 	if (ret) {
11649 		dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
11650 		return ret;
11651 	}
11652 
11653 	ret = hclge_rss_init_hw(hdev);
11654 	if (ret) {
11655 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11656 		return ret;
11657 	}
11658 
11659 	ret = init_mgr_tbl(hdev);
11660 	if (ret) {
11661 		dev_err(&pdev->dev,
11662 			"failed to reinit manager table, ret = %d\n", ret);
11663 		return ret;
11664 	}
11665 
11666 	ret = hclge_init_fd_config(hdev);
11667 	if (ret) {
11668 		dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
11669 		return ret;
11670 	}
11671 
11672 	/* Log and clear the hw errors those already occurred */
11673 	hclge_handle_all_hns_hw_errors(ae_dev);
11674 
11675 	/* Re-enable the hw error interrupts because
11676 	 * the interrupts get disabled on global reset.
11677 	 */
11678 	ret = hclge_config_nic_hw_error(hdev, true);
11679 	if (ret) {
11680 		dev_err(&pdev->dev,
11681 			"fail(%d) to re-enable NIC hw error interrupts\n",
11682 			ret);
11683 		return ret;
11684 	}
11685 
11686 	if (hdev->roce_client) {
11687 		ret = hclge_config_rocee_ras_interrupt(hdev, true);
11688 		if (ret) {
11689 			dev_err(&pdev->dev,
11690 				"fail(%d) to re-enable roce ras interrupts\n",
11691 				ret);
11692 			return ret;
11693 		}
11694 	}
11695 
11696 	hclge_reset_vport_state(hdev);
11697 	ret = hclge_reset_vport_spoofchk(hdev);
11698 	if (ret)
11699 		return ret;
11700 
11701 	ret = hclge_resume_vf_rate(hdev);
11702 	if (ret)
11703 		return ret;
11704 
11705 	dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
11706 		 HCLGE_DRIVER_NAME);
11707 
11708 	return 0;
11709 }
11710 
11711 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
11712 {
11713 	struct hclge_dev *hdev = ae_dev->priv;
11714 	struct hclge_mac *mac = &hdev->hw.mac;
11715 
11716 	hclge_reset_vf_rate(hdev);
11717 	hclge_clear_vf_vlan(hdev);
11718 	hclge_misc_affinity_teardown(hdev);
11719 	hclge_state_uninit(hdev);
11720 	hclge_uninit_mac_table(hdev);
11721 	hclge_del_all_fd_entries(hdev);
11722 
11723 	if (mac->phydev)
11724 		mdiobus_unregister(mac->mdio_bus);
11725 
11726 	/* Disable MISC vector(vector0) */
11727 	hclge_enable_vector(&hdev->misc_vector, false);
11728 	synchronize_irq(hdev->misc_vector.vector_irq);
11729 
11730 	/* Disable all hw interrupts */
11731 	hclge_config_mac_tnl_int(hdev, false);
11732 	hclge_config_nic_hw_error(hdev, false);
11733 	hclge_config_rocee_ras_interrupt(hdev, false);
11734 
11735 	hclge_cmd_uninit(hdev);
11736 	hclge_misc_irq_uninit(hdev);
11737 	hclge_pci_uninit(hdev);
11738 	mutex_destroy(&hdev->vport_lock);
11739 	hclge_uninit_vport_vlan_table(hdev);
11740 	ae_dev->priv = NULL;
11741 }
11742 
11743 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
11744 {
11745 	struct hclge_vport *vport = hclge_get_vport(handle);
11746 	struct hclge_dev *hdev = vport->back;
11747 
11748 	return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps);
11749 }
11750 
11751 static void hclge_get_channels(struct hnae3_handle *handle,
11752 			       struct ethtool_channels *ch)
11753 {
11754 	ch->max_combined = hclge_get_max_channels(handle);
11755 	ch->other_count = 1;
11756 	ch->max_other = 1;
11757 	ch->combined_count = handle->kinfo.rss_size;
11758 }
11759 
11760 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
11761 					u16 *alloc_tqps, u16 *max_rss_size)
11762 {
11763 	struct hclge_vport *vport = hclge_get_vport(handle);
11764 	struct hclge_dev *hdev = vport->back;
11765 
11766 	*alloc_tqps = vport->alloc_tqps;
11767 	*max_rss_size = hdev->pf_rss_size_max;
11768 }
11769 
11770 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
11771 			      bool rxfh_configured)
11772 {
11773 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
11774 	struct hclge_vport *vport = hclge_get_vport(handle);
11775 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
11776 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
11777 	struct hclge_dev *hdev = vport->back;
11778 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
11779 	u16 cur_rss_size = kinfo->rss_size;
11780 	u16 cur_tqps = kinfo->num_tqps;
11781 	u16 tc_valid[HCLGE_MAX_TC_NUM];
11782 	u16 roundup_size;
11783 	u32 *rss_indir;
11784 	unsigned int i;
11785 	int ret;
11786 
11787 	kinfo->req_rss_size = new_tqps_num;
11788 
11789 	ret = hclge_tm_vport_map_update(hdev);
11790 	if (ret) {
11791 		dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
11792 		return ret;
11793 	}
11794 
11795 	roundup_size = roundup_pow_of_two(kinfo->rss_size);
11796 	roundup_size = ilog2(roundup_size);
11797 	/* Set the RSS TC mode according to the new RSS size */
11798 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
11799 		tc_valid[i] = 0;
11800 
11801 		if (!(hdev->hw_tc_map & BIT(i)))
11802 			continue;
11803 
11804 		tc_valid[i] = 1;
11805 		tc_size[i] = roundup_size;
11806 		tc_offset[i] = kinfo->rss_size * i;
11807 	}
11808 	ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
11809 	if (ret)
11810 		return ret;
11811 
11812 	/* RSS indirection table has been configuared by user */
11813 	if (rxfh_configured)
11814 		goto out;
11815 
11816 	/* Reinitializes the rss indirect table according to the new RSS size */
11817 	rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32),
11818 			    GFP_KERNEL);
11819 	if (!rss_indir)
11820 		return -ENOMEM;
11821 
11822 	for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
11823 		rss_indir[i] = i % kinfo->rss_size;
11824 
11825 	ret = hclge_set_rss(handle, rss_indir, NULL, 0);
11826 	if (ret)
11827 		dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
11828 			ret);
11829 
11830 	kfree(rss_indir);
11831 
11832 out:
11833 	if (!ret)
11834 		dev_info(&hdev->pdev->dev,
11835 			 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
11836 			 cur_rss_size, kinfo->rss_size,
11837 			 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
11838 
11839 	return ret;
11840 }
11841 
11842 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
11843 			      u32 *regs_num_64_bit)
11844 {
11845 	struct hclge_desc desc;
11846 	u32 total_num;
11847 	int ret;
11848 
11849 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
11850 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11851 	if (ret) {
11852 		dev_err(&hdev->pdev->dev,
11853 			"Query register number cmd failed, ret = %d.\n", ret);
11854 		return ret;
11855 	}
11856 
11857 	*regs_num_32_bit = le32_to_cpu(desc.data[0]);
11858 	*regs_num_64_bit = le32_to_cpu(desc.data[1]);
11859 
11860 	total_num = *regs_num_32_bit + *regs_num_64_bit;
11861 	if (!total_num)
11862 		return -EINVAL;
11863 
11864 	return 0;
11865 }
11866 
11867 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
11868 				 void *data)
11869 {
11870 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
11871 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
11872 
11873 	struct hclge_desc *desc;
11874 	u32 *reg_val = data;
11875 	__le32 *desc_data;
11876 	int nodata_num;
11877 	int cmd_num;
11878 	int i, k, n;
11879 	int ret;
11880 
11881 	if (regs_num == 0)
11882 		return 0;
11883 
11884 	nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
11885 	cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
11886 			       HCLGE_32_BIT_REG_RTN_DATANUM);
11887 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
11888 	if (!desc)
11889 		return -ENOMEM;
11890 
11891 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
11892 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
11893 	if (ret) {
11894 		dev_err(&hdev->pdev->dev,
11895 			"Query 32 bit register cmd failed, ret = %d.\n", ret);
11896 		kfree(desc);
11897 		return ret;
11898 	}
11899 
11900 	for (i = 0; i < cmd_num; i++) {
11901 		if (i == 0) {
11902 			desc_data = (__le32 *)(&desc[i].data[0]);
11903 			n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
11904 		} else {
11905 			desc_data = (__le32 *)(&desc[i]);
11906 			n = HCLGE_32_BIT_REG_RTN_DATANUM;
11907 		}
11908 		for (k = 0; k < n; k++) {
11909 			*reg_val++ = le32_to_cpu(*desc_data++);
11910 
11911 			regs_num--;
11912 			if (!regs_num)
11913 				break;
11914 		}
11915 	}
11916 
11917 	kfree(desc);
11918 	return 0;
11919 }
11920 
11921 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
11922 				 void *data)
11923 {
11924 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
11925 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
11926 
11927 	struct hclge_desc *desc;
11928 	u64 *reg_val = data;
11929 	__le64 *desc_data;
11930 	int nodata_len;
11931 	int cmd_num;
11932 	int i, k, n;
11933 	int ret;
11934 
11935 	if (regs_num == 0)
11936 		return 0;
11937 
11938 	nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
11939 	cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
11940 			       HCLGE_64_BIT_REG_RTN_DATANUM);
11941 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
11942 	if (!desc)
11943 		return -ENOMEM;
11944 
11945 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
11946 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
11947 	if (ret) {
11948 		dev_err(&hdev->pdev->dev,
11949 			"Query 64 bit register cmd failed, ret = %d.\n", ret);
11950 		kfree(desc);
11951 		return ret;
11952 	}
11953 
11954 	for (i = 0; i < cmd_num; i++) {
11955 		if (i == 0) {
11956 			desc_data = (__le64 *)(&desc[i].data[0]);
11957 			n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
11958 		} else {
11959 			desc_data = (__le64 *)(&desc[i]);
11960 			n = HCLGE_64_BIT_REG_RTN_DATANUM;
11961 		}
11962 		for (k = 0; k < n; k++) {
11963 			*reg_val++ = le64_to_cpu(*desc_data++);
11964 
11965 			regs_num--;
11966 			if (!regs_num)
11967 				break;
11968 		}
11969 	}
11970 
11971 	kfree(desc);
11972 	return 0;
11973 }
11974 
11975 #define MAX_SEPARATE_NUM	4
11976 #define SEPARATOR_VALUE		0xFDFCFBFA
11977 #define REG_NUM_PER_LINE	4
11978 #define REG_LEN_PER_LINE	(REG_NUM_PER_LINE * sizeof(u32))
11979 #define REG_SEPARATOR_LINE	1
11980 #define REG_NUM_REMAIN_MASK	3
11981 #define BD_LIST_MAX_NUM		30
11982 
11983 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
11984 {
11985 	int i;
11986 
11987 	/* initialize command BD except the last one */
11988 	for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
11989 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
11990 					   true);
11991 		desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11992 	}
11993 
11994 	/* initialize the last command BD */
11995 	hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
11996 
11997 	return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
11998 }
11999 
12000 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
12001 				    int *bd_num_list,
12002 				    u32 type_num)
12003 {
12004 	u32 entries_per_desc, desc_index, index, offset, i;
12005 	struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
12006 	int ret;
12007 
12008 	ret = hclge_query_bd_num_cmd_send(hdev, desc);
12009 	if (ret) {
12010 		dev_err(&hdev->pdev->dev,
12011 			"Get dfx bd num fail, status is %d.\n", ret);
12012 		return ret;
12013 	}
12014 
12015 	entries_per_desc = ARRAY_SIZE(desc[0].data);
12016 	for (i = 0; i < type_num; i++) {
12017 		offset = hclge_dfx_bd_offset_list[i];
12018 		index = offset % entries_per_desc;
12019 		desc_index = offset / entries_per_desc;
12020 		bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
12021 	}
12022 
12023 	return ret;
12024 }
12025 
12026 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
12027 				  struct hclge_desc *desc_src, int bd_num,
12028 				  enum hclge_opcode_type cmd)
12029 {
12030 	struct hclge_desc *desc = desc_src;
12031 	int i, ret;
12032 
12033 	hclge_cmd_setup_basic_desc(desc, cmd, true);
12034 	for (i = 0; i < bd_num - 1; i++) {
12035 		desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12036 		desc++;
12037 		hclge_cmd_setup_basic_desc(desc, cmd, true);
12038 	}
12039 
12040 	desc = desc_src;
12041 	ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
12042 	if (ret)
12043 		dev_err(&hdev->pdev->dev,
12044 			"Query dfx reg cmd(0x%x) send fail, status is %d.\n",
12045 			cmd, ret);
12046 
12047 	return ret;
12048 }
12049 
12050 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
12051 				    void *data)
12052 {
12053 	int entries_per_desc, reg_num, separator_num, desc_index, index, i;
12054 	struct hclge_desc *desc = desc_src;
12055 	u32 *reg = data;
12056 
12057 	entries_per_desc = ARRAY_SIZE(desc->data);
12058 	reg_num = entries_per_desc * bd_num;
12059 	separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
12060 	for (i = 0; i < reg_num; i++) {
12061 		index = i % entries_per_desc;
12062 		desc_index = i / entries_per_desc;
12063 		*reg++ = le32_to_cpu(desc[desc_index].data[index]);
12064 	}
12065 	for (i = 0; i < separator_num; i++)
12066 		*reg++ = SEPARATOR_VALUE;
12067 
12068 	return reg_num + separator_num;
12069 }
12070 
12071 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
12072 {
12073 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12074 	int data_len_per_desc, bd_num, i;
12075 	int bd_num_list[BD_LIST_MAX_NUM];
12076 	u32 data_len;
12077 	int ret;
12078 
12079 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12080 	if (ret) {
12081 		dev_err(&hdev->pdev->dev,
12082 			"Get dfx reg bd num fail, status is %d.\n", ret);
12083 		return ret;
12084 	}
12085 
12086 	data_len_per_desc = sizeof_field(struct hclge_desc, data);
12087 	*len = 0;
12088 	for (i = 0; i < dfx_reg_type_num; i++) {
12089 		bd_num = bd_num_list[i];
12090 		data_len = data_len_per_desc * bd_num;
12091 		*len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
12092 	}
12093 
12094 	return ret;
12095 }
12096 
12097 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
12098 {
12099 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12100 	int bd_num, bd_num_max, buf_len, i;
12101 	int bd_num_list[BD_LIST_MAX_NUM];
12102 	struct hclge_desc *desc_src;
12103 	u32 *reg = data;
12104 	int ret;
12105 
12106 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12107 	if (ret) {
12108 		dev_err(&hdev->pdev->dev,
12109 			"Get dfx reg bd num fail, status is %d.\n", ret);
12110 		return ret;
12111 	}
12112 
12113 	bd_num_max = bd_num_list[0];
12114 	for (i = 1; i < dfx_reg_type_num; i++)
12115 		bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
12116 
12117 	buf_len = sizeof(*desc_src) * bd_num_max;
12118 	desc_src = kzalloc(buf_len, GFP_KERNEL);
12119 	if (!desc_src)
12120 		return -ENOMEM;
12121 
12122 	for (i = 0; i < dfx_reg_type_num; i++) {
12123 		bd_num = bd_num_list[i];
12124 		ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
12125 					     hclge_dfx_reg_opcode_list[i]);
12126 		if (ret) {
12127 			dev_err(&hdev->pdev->dev,
12128 				"Get dfx reg fail, status is %d.\n", ret);
12129 			break;
12130 		}
12131 
12132 		reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
12133 	}
12134 
12135 	kfree(desc_src);
12136 	return ret;
12137 }
12138 
12139 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
12140 			      struct hnae3_knic_private_info *kinfo)
12141 {
12142 #define HCLGE_RING_REG_OFFSET		0x200
12143 #define HCLGE_RING_INT_REG_OFFSET	0x4
12144 
12145 	int i, j, reg_num, separator_num;
12146 	int data_num_sum;
12147 	u32 *reg = data;
12148 
12149 	/* fetching per-PF registers valus from PF PCIe register space */
12150 	reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
12151 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12152 	for (i = 0; i < reg_num; i++)
12153 		*reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
12154 	for (i = 0; i < separator_num; i++)
12155 		*reg++ = SEPARATOR_VALUE;
12156 	data_num_sum = reg_num + separator_num;
12157 
12158 	reg_num = ARRAY_SIZE(common_reg_addr_list);
12159 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12160 	for (i = 0; i < reg_num; i++)
12161 		*reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
12162 	for (i = 0; i < separator_num; i++)
12163 		*reg++ = SEPARATOR_VALUE;
12164 	data_num_sum += reg_num + separator_num;
12165 
12166 	reg_num = ARRAY_SIZE(ring_reg_addr_list);
12167 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12168 	for (j = 0; j < kinfo->num_tqps; j++) {
12169 		for (i = 0; i < reg_num; i++)
12170 			*reg++ = hclge_read_dev(&hdev->hw,
12171 						ring_reg_addr_list[i] +
12172 						HCLGE_RING_REG_OFFSET * j);
12173 		for (i = 0; i < separator_num; i++)
12174 			*reg++ = SEPARATOR_VALUE;
12175 	}
12176 	data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
12177 
12178 	reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
12179 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12180 	for (j = 0; j < hdev->num_msi_used - 1; j++) {
12181 		for (i = 0; i < reg_num; i++)
12182 			*reg++ = hclge_read_dev(&hdev->hw,
12183 						tqp_intr_reg_addr_list[i] +
12184 						HCLGE_RING_INT_REG_OFFSET * j);
12185 		for (i = 0; i < separator_num; i++)
12186 			*reg++ = SEPARATOR_VALUE;
12187 	}
12188 	data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
12189 
12190 	return data_num_sum;
12191 }
12192 
12193 static int hclge_get_regs_len(struct hnae3_handle *handle)
12194 {
12195 	int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
12196 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12197 	struct hclge_vport *vport = hclge_get_vport(handle);
12198 	struct hclge_dev *hdev = vport->back;
12199 	int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
12200 	int regs_lines_32_bit, regs_lines_64_bit;
12201 	int ret;
12202 
12203 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
12204 	if (ret) {
12205 		dev_err(&hdev->pdev->dev,
12206 			"Get register number failed, ret = %d.\n", ret);
12207 		return ret;
12208 	}
12209 
12210 	ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
12211 	if (ret) {
12212 		dev_err(&hdev->pdev->dev,
12213 			"Get dfx reg len failed, ret = %d.\n", ret);
12214 		return ret;
12215 	}
12216 
12217 	cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
12218 		REG_SEPARATOR_LINE;
12219 	common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
12220 		REG_SEPARATOR_LINE;
12221 	ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
12222 		REG_SEPARATOR_LINE;
12223 	tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
12224 		REG_SEPARATOR_LINE;
12225 	regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
12226 		REG_SEPARATOR_LINE;
12227 	regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
12228 		REG_SEPARATOR_LINE;
12229 
12230 	return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
12231 		tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
12232 		regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
12233 }
12234 
12235 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
12236 			   void *data)
12237 {
12238 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12239 	struct hclge_vport *vport = hclge_get_vport(handle);
12240 	struct hclge_dev *hdev = vport->back;
12241 	u32 regs_num_32_bit, regs_num_64_bit;
12242 	int i, reg_num, separator_num, ret;
12243 	u32 *reg = data;
12244 
12245 	*version = hdev->fw_version;
12246 
12247 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
12248 	if (ret) {
12249 		dev_err(&hdev->pdev->dev,
12250 			"Get register number failed, ret = %d.\n", ret);
12251 		return;
12252 	}
12253 
12254 	reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
12255 
12256 	ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
12257 	if (ret) {
12258 		dev_err(&hdev->pdev->dev,
12259 			"Get 32 bit register failed, ret = %d.\n", ret);
12260 		return;
12261 	}
12262 	reg_num = regs_num_32_bit;
12263 	reg += reg_num;
12264 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12265 	for (i = 0; i < separator_num; i++)
12266 		*reg++ = SEPARATOR_VALUE;
12267 
12268 	ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
12269 	if (ret) {
12270 		dev_err(&hdev->pdev->dev,
12271 			"Get 64 bit register failed, ret = %d.\n", ret);
12272 		return;
12273 	}
12274 	reg_num = regs_num_64_bit * 2;
12275 	reg += reg_num;
12276 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12277 	for (i = 0; i < separator_num; i++)
12278 		*reg++ = SEPARATOR_VALUE;
12279 
12280 	ret = hclge_get_dfx_reg(hdev, reg);
12281 	if (ret)
12282 		dev_err(&hdev->pdev->dev,
12283 			"Get dfx register failed, ret = %d.\n", ret);
12284 }
12285 
12286 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
12287 {
12288 	struct hclge_set_led_state_cmd *req;
12289 	struct hclge_desc desc;
12290 	int ret;
12291 
12292 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
12293 
12294 	req = (struct hclge_set_led_state_cmd *)desc.data;
12295 	hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
12296 			HCLGE_LED_LOCATE_STATE_S, locate_led_status);
12297 
12298 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12299 	if (ret)
12300 		dev_err(&hdev->pdev->dev,
12301 			"Send set led state cmd error, ret =%d\n", ret);
12302 
12303 	return ret;
12304 }
12305 
12306 enum hclge_led_status {
12307 	HCLGE_LED_OFF,
12308 	HCLGE_LED_ON,
12309 	HCLGE_LED_NO_CHANGE = 0xFF,
12310 };
12311 
12312 static int hclge_set_led_id(struct hnae3_handle *handle,
12313 			    enum ethtool_phys_id_state status)
12314 {
12315 	struct hclge_vport *vport = hclge_get_vport(handle);
12316 	struct hclge_dev *hdev = vport->back;
12317 
12318 	switch (status) {
12319 	case ETHTOOL_ID_ACTIVE:
12320 		return hclge_set_led_status(hdev, HCLGE_LED_ON);
12321 	case ETHTOOL_ID_INACTIVE:
12322 		return hclge_set_led_status(hdev, HCLGE_LED_OFF);
12323 	default:
12324 		return -EINVAL;
12325 	}
12326 }
12327 
12328 static void hclge_get_link_mode(struct hnae3_handle *handle,
12329 				unsigned long *supported,
12330 				unsigned long *advertising)
12331 {
12332 	unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
12333 	struct hclge_vport *vport = hclge_get_vport(handle);
12334 	struct hclge_dev *hdev = vport->back;
12335 	unsigned int idx = 0;
12336 
12337 	for (; idx < size; idx++) {
12338 		supported[idx] = hdev->hw.mac.supported[idx];
12339 		advertising[idx] = hdev->hw.mac.advertising[idx];
12340 	}
12341 }
12342 
12343 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
12344 {
12345 	struct hclge_vport *vport = hclge_get_vport(handle);
12346 	struct hclge_dev *hdev = vport->back;
12347 
12348 	return hclge_config_gro(hdev, enable);
12349 }
12350 
12351 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
12352 {
12353 	struct hclge_vport *vport = &hdev->vport[0];
12354 	struct hnae3_handle *handle = &vport->nic;
12355 	u8 tmp_flags;
12356 	int ret;
12357 
12358 	if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
12359 		set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
12360 		vport->last_promisc_flags = vport->overflow_promisc_flags;
12361 	}
12362 
12363 	if (test_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state)) {
12364 		tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
12365 		ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
12366 					     tmp_flags & HNAE3_MPE);
12367 		if (!ret) {
12368 			clear_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
12369 			hclge_enable_vlan_filter(handle,
12370 						 tmp_flags & HNAE3_VLAN_FLTR);
12371 		}
12372 	}
12373 }
12374 
12375 static bool hclge_module_existed(struct hclge_dev *hdev)
12376 {
12377 	struct hclge_desc desc;
12378 	u32 existed;
12379 	int ret;
12380 
12381 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
12382 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12383 	if (ret) {
12384 		dev_err(&hdev->pdev->dev,
12385 			"failed to get SFP exist state, ret = %d\n", ret);
12386 		return false;
12387 	}
12388 
12389 	existed = le32_to_cpu(desc.data[0]);
12390 
12391 	return existed != 0;
12392 }
12393 
12394 /* need 6 bds(total 140 bytes) in one reading
12395  * return the number of bytes actually read, 0 means read failed.
12396  */
12397 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
12398 				     u32 len, u8 *data)
12399 {
12400 	struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
12401 	struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
12402 	u16 read_len;
12403 	u16 copy_len;
12404 	int ret;
12405 	int i;
12406 
12407 	/* setup all 6 bds to read module eeprom info. */
12408 	for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12409 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
12410 					   true);
12411 
12412 		/* bd0~bd4 need next flag */
12413 		if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
12414 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12415 	}
12416 
12417 	/* setup bd0, this bd contains offset and read length. */
12418 	sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
12419 	sfp_info_bd0->offset = cpu_to_le16((u16)offset);
12420 	read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
12421 	sfp_info_bd0->read_len = cpu_to_le16(read_len);
12422 
12423 	ret = hclge_cmd_send(&hdev->hw, desc, i);
12424 	if (ret) {
12425 		dev_err(&hdev->pdev->dev,
12426 			"failed to get SFP eeprom info, ret = %d\n", ret);
12427 		return 0;
12428 	}
12429 
12430 	/* copy sfp info from bd0 to out buffer. */
12431 	copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
12432 	memcpy(data, sfp_info_bd0->data, copy_len);
12433 	read_len = copy_len;
12434 
12435 	/* copy sfp info from bd1~bd5 to out buffer if needed. */
12436 	for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12437 		if (read_len >= len)
12438 			return read_len;
12439 
12440 		copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
12441 		memcpy(data + read_len, desc[i].data, copy_len);
12442 		read_len += copy_len;
12443 	}
12444 
12445 	return read_len;
12446 }
12447 
12448 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
12449 				   u32 len, u8 *data)
12450 {
12451 	struct hclge_vport *vport = hclge_get_vport(handle);
12452 	struct hclge_dev *hdev = vport->back;
12453 	u32 read_len = 0;
12454 	u16 data_len;
12455 
12456 	if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
12457 		return -EOPNOTSUPP;
12458 
12459 	if (!hclge_module_existed(hdev))
12460 		return -ENXIO;
12461 
12462 	while (read_len < len) {
12463 		data_len = hclge_get_sfp_eeprom_info(hdev,
12464 						     offset + read_len,
12465 						     len - read_len,
12466 						     data + read_len);
12467 		if (!data_len)
12468 			return -EIO;
12469 
12470 		read_len += data_len;
12471 	}
12472 
12473 	return 0;
12474 }
12475 
12476 static const struct hnae3_ae_ops hclge_ops = {
12477 	.init_ae_dev = hclge_init_ae_dev,
12478 	.uninit_ae_dev = hclge_uninit_ae_dev,
12479 	.flr_prepare = hclge_flr_prepare,
12480 	.flr_done = hclge_flr_done,
12481 	.init_client_instance = hclge_init_client_instance,
12482 	.uninit_client_instance = hclge_uninit_client_instance,
12483 	.map_ring_to_vector = hclge_map_ring_to_vector,
12484 	.unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
12485 	.get_vector = hclge_get_vector,
12486 	.put_vector = hclge_put_vector,
12487 	.set_promisc_mode = hclge_set_promisc_mode,
12488 	.request_update_promisc_mode = hclge_request_update_promisc_mode,
12489 	.set_loopback = hclge_set_loopback,
12490 	.start = hclge_ae_start,
12491 	.stop = hclge_ae_stop,
12492 	.client_start = hclge_client_start,
12493 	.client_stop = hclge_client_stop,
12494 	.get_status = hclge_get_status,
12495 	.get_ksettings_an_result = hclge_get_ksettings_an_result,
12496 	.cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
12497 	.get_media_type = hclge_get_media_type,
12498 	.check_port_speed = hclge_check_port_speed,
12499 	.get_fec = hclge_get_fec,
12500 	.set_fec = hclge_set_fec,
12501 	.get_rss_key_size = hclge_get_rss_key_size,
12502 	.get_rss = hclge_get_rss,
12503 	.set_rss = hclge_set_rss,
12504 	.set_rss_tuple = hclge_set_rss_tuple,
12505 	.get_rss_tuple = hclge_get_rss_tuple,
12506 	.get_tc_size = hclge_get_tc_size,
12507 	.get_mac_addr = hclge_get_mac_addr,
12508 	.set_mac_addr = hclge_set_mac_addr,
12509 	.do_ioctl = hclge_do_ioctl,
12510 	.add_uc_addr = hclge_add_uc_addr,
12511 	.rm_uc_addr = hclge_rm_uc_addr,
12512 	.add_mc_addr = hclge_add_mc_addr,
12513 	.rm_mc_addr = hclge_rm_mc_addr,
12514 	.set_autoneg = hclge_set_autoneg,
12515 	.get_autoneg = hclge_get_autoneg,
12516 	.restart_autoneg = hclge_restart_autoneg,
12517 	.halt_autoneg = hclge_halt_autoneg,
12518 	.get_pauseparam = hclge_get_pauseparam,
12519 	.set_pauseparam = hclge_set_pauseparam,
12520 	.set_mtu = hclge_set_mtu,
12521 	.reset_queue = hclge_reset_tqp,
12522 	.get_stats = hclge_get_stats,
12523 	.get_mac_stats = hclge_get_mac_stat,
12524 	.update_stats = hclge_update_stats,
12525 	.get_strings = hclge_get_strings,
12526 	.get_sset_count = hclge_get_sset_count,
12527 	.get_fw_version = hclge_get_fw_version,
12528 	.get_mdix_mode = hclge_get_mdix_mode,
12529 	.enable_vlan_filter = hclge_enable_vlan_filter,
12530 	.set_vlan_filter = hclge_set_vlan_filter,
12531 	.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
12532 	.enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
12533 	.reset_event = hclge_reset_event,
12534 	.get_reset_level = hclge_get_reset_level,
12535 	.set_default_reset_request = hclge_set_def_reset_request,
12536 	.get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
12537 	.set_channels = hclge_set_channels,
12538 	.get_channels = hclge_get_channels,
12539 	.get_regs_len = hclge_get_regs_len,
12540 	.get_regs = hclge_get_regs,
12541 	.set_led_id = hclge_set_led_id,
12542 	.get_link_mode = hclge_get_link_mode,
12543 	.add_fd_entry = hclge_add_fd_entry,
12544 	.del_fd_entry = hclge_del_fd_entry,
12545 	.get_fd_rule_cnt = hclge_get_fd_rule_cnt,
12546 	.get_fd_rule_info = hclge_get_fd_rule_info,
12547 	.get_fd_all_rules = hclge_get_all_rules,
12548 	.enable_fd = hclge_enable_fd,
12549 	.add_arfs_entry = hclge_add_fd_entry_by_arfs,
12550 	.dbg_run_cmd = hclge_dbg_run_cmd,
12551 	.dbg_read_cmd = hclge_dbg_read_cmd,
12552 	.handle_hw_ras_error = hclge_handle_hw_ras_error,
12553 	.get_hw_reset_stat = hclge_get_hw_reset_stat,
12554 	.ae_dev_resetting = hclge_ae_dev_resetting,
12555 	.ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
12556 	.set_gro_en = hclge_gro_en,
12557 	.get_global_queue_id = hclge_covert_handle_qid_global,
12558 	.set_timer_task = hclge_set_timer_task,
12559 	.mac_connect_phy = hclge_mac_connect_phy,
12560 	.mac_disconnect_phy = hclge_mac_disconnect_phy,
12561 	.get_vf_config = hclge_get_vf_config,
12562 	.set_vf_link_state = hclge_set_vf_link_state,
12563 	.set_vf_spoofchk = hclge_set_vf_spoofchk,
12564 	.set_vf_trust = hclge_set_vf_trust,
12565 	.set_vf_rate = hclge_set_vf_rate,
12566 	.set_vf_mac = hclge_set_vf_mac,
12567 	.get_module_eeprom = hclge_get_module_eeprom,
12568 	.get_cmdq_stat = hclge_get_cmdq_stat,
12569 	.add_cls_flower = hclge_add_cls_flower,
12570 	.del_cls_flower = hclge_del_cls_flower,
12571 	.cls_flower_active = hclge_is_cls_flower_active,
12572 	.get_phy_link_ksettings = hclge_get_phy_link_ksettings,
12573 	.set_phy_link_ksettings = hclge_set_phy_link_ksettings,
12574 };
12575 
12576 static struct hnae3_ae_algo ae_algo = {
12577 	.ops = &hclge_ops,
12578 	.pdev_id_table = ae_algo_pci_tbl,
12579 };
12580 
12581 static int hclge_init(void)
12582 {
12583 	pr_info("%s is initializing\n", HCLGE_NAME);
12584 
12585 	hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
12586 	if (!hclge_wq) {
12587 		pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
12588 		return -ENOMEM;
12589 	}
12590 
12591 	hnae3_register_ae_algo(&ae_algo);
12592 
12593 	return 0;
12594 }
12595 
12596 static void hclge_exit(void)
12597 {
12598 	hnae3_unregister_ae_algo(&ae_algo);
12599 	destroy_workqueue(hclge_wq);
12600 }
12601 module_init(hclge_init);
12602 module_exit(hclge_exit);
12603 
12604 MODULE_LICENSE("GPL");
12605 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
12606 MODULE_DESCRIPTION("HCLGE Driver");
12607 MODULE_VERSION(HCLGE_MOD_VERSION);
12608