xref: /linux/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c (revision a5d9265e017f081f0dc133c0e2f45103d027b874)
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <net/rtnetlink.h>
16 #include "hclge_cmd.h"
17 #include "hclge_dcb.h"
18 #include "hclge_main.h"
19 #include "hclge_mbx.h"
20 #include "hclge_mdio.h"
21 #include "hclge_tm.h"
22 #include "hclge_err.h"
23 #include "hnae3.h"
24 
25 #define HCLGE_NAME			"hclge"
26 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
27 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
28 
29 #define HCLGE_BUF_SIZE_UNIT	256
30 
31 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
32 static int hclge_init_vlan_config(struct hclge_dev *hdev);
33 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
34 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
35 			       u16 *allocated_size, bool is_alloc);
36 
37 static struct hnae3_ae_algo ae_algo;
38 
39 static const struct pci_device_id ae_algo_pci_tbl[] = {
40 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
41 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
42 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
43 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
44 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
45 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
46 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
47 	/* required last entry */
48 	{0, }
49 };
50 
51 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
52 
53 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
54 					 HCLGE_CMDQ_TX_ADDR_H_REG,
55 					 HCLGE_CMDQ_TX_DEPTH_REG,
56 					 HCLGE_CMDQ_TX_TAIL_REG,
57 					 HCLGE_CMDQ_TX_HEAD_REG,
58 					 HCLGE_CMDQ_RX_ADDR_L_REG,
59 					 HCLGE_CMDQ_RX_ADDR_H_REG,
60 					 HCLGE_CMDQ_RX_DEPTH_REG,
61 					 HCLGE_CMDQ_RX_TAIL_REG,
62 					 HCLGE_CMDQ_RX_HEAD_REG,
63 					 HCLGE_VECTOR0_CMDQ_SRC_REG,
64 					 HCLGE_CMDQ_INTR_STS_REG,
65 					 HCLGE_CMDQ_INTR_EN_REG,
66 					 HCLGE_CMDQ_INTR_GEN_REG};
67 
68 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
69 					   HCLGE_VECTOR0_OTER_EN_REG,
70 					   HCLGE_MISC_RESET_STS_REG,
71 					   HCLGE_MISC_VECTOR_INT_STS,
72 					   HCLGE_GLOBAL_RESET_REG,
73 					   HCLGE_FUN_RST_ING,
74 					   HCLGE_GRO_EN_REG};
75 
76 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
77 					 HCLGE_RING_RX_ADDR_H_REG,
78 					 HCLGE_RING_RX_BD_NUM_REG,
79 					 HCLGE_RING_RX_BD_LENGTH_REG,
80 					 HCLGE_RING_RX_MERGE_EN_REG,
81 					 HCLGE_RING_RX_TAIL_REG,
82 					 HCLGE_RING_RX_HEAD_REG,
83 					 HCLGE_RING_RX_FBD_NUM_REG,
84 					 HCLGE_RING_RX_OFFSET_REG,
85 					 HCLGE_RING_RX_FBD_OFFSET_REG,
86 					 HCLGE_RING_RX_STASH_REG,
87 					 HCLGE_RING_RX_BD_ERR_REG,
88 					 HCLGE_RING_TX_ADDR_L_REG,
89 					 HCLGE_RING_TX_ADDR_H_REG,
90 					 HCLGE_RING_TX_BD_NUM_REG,
91 					 HCLGE_RING_TX_PRIORITY_REG,
92 					 HCLGE_RING_TX_TC_REG,
93 					 HCLGE_RING_TX_MERGE_EN_REG,
94 					 HCLGE_RING_TX_TAIL_REG,
95 					 HCLGE_RING_TX_HEAD_REG,
96 					 HCLGE_RING_TX_FBD_NUM_REG,
97 					 HCLGE_RING_TX_OFFSET_REG,
98 					 HCLGE_RING_TX_EBD_NUM_REG,
99 					 HCLGE_RING_TX_EBD_OFFSET_REG,
100 					 HCLGE_RING_TX_BD_ERR_REG,
101 					 HCLGE_RING_EN_REG};
102 
103 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
104 					     HCLGE_TQP_INTR_GL0_REG,
105 					     HCLGE_TQP_INTR_GL1_REG,
106 					     HCLGE_TQP_INTR_GL2_REG,
107 					     HCLGE_TQP_INTR_RL_REG};
108 
109 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
110 	"App    Loopback test",
111 	"Serdes serial Loopback test",
112 	"Serdes parallel Loopback test",
113 	"Phy    Loopback test"
114 };
115 
116 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
117 	{"mac_tx_mac_pause_num",
118 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
119 	{"mac_rx_mac_pause_num",
120 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
121 	{"mac_tx_control_pkt_num",
122 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
123 	{"mac_rx_control_pkt_num",
124 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
125 	{"mac_tx_pfc_pkt_num",
126 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
127 	{"mac_tx_pfc_pri0_pkt_num",
128 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
129 	{"mac_tx_pfc_pri1_pkt_num",
130 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
131 	{"mac_tx_pfc_pri2_pkt_num",
132 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
133 	{"mac_tx_pfc_pri3_pkt_num",
134 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
135 	{"mac_tx_pfc_pri4_pkt_num",
136 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
137 	{"mac_tx_pfc_pri5_pkt_num",
138 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
139 	{"mac_tx_pfc_pri6_pkt_num",
140 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
141 	{"mac_tx_pfc_pri7_pkt_num",
142 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
143 	{"mac_rx_pfc_pkt_num",
144 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
145 	{"mac_rx_pfc_pri0_pkt_num",
146 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
147 	{"mac_rx_pfc_pri1_pkt_num",
148 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
149 	{"mac_rx_pfc_pri2_pkt_num",
150 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
151 	{"mac_rx_pfc_pri3_pkt_num",
152 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
153 	{"mac_rx_pfc_pri4_pkt_num",
154 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
155 	{"mac_rx_pfc_pri5_pkt_num",
156 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
157 	{"mac_rx_pfc_pri6_pkt_num",
158 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
159 	{"mac_rx_pfc_pri7_pkt_num",
160 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
161 	{"mac_tx_total_pkt_num",
162 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
163 	{"mac_tx_total_oct_num",
164 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
165 	{"mac_tx_good_pkt_num",
166 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
167 	{"mac_tx_bad_pkt_num",
168 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
169 	{"mac_tx_good_oct_num",
170 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
171 	{"mac_tx_bad_oct_num",
172 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
173 	{"mac_tx_uni_pkt_num",
174 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
175 	{"mac_tx_multi_pkt_num",
176 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
177 	{"mac_tx_broad_pkt_num",
178 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
179 	{"mac_tx_undersize_pkt_num",
180 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
181 	{"mac_tx_oversize_pkt_num",
182 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
183 	{"mac_tx_64_oct_pkt_num",
184 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
185 	{"mac_tx_65_127_oct_pkt_num",
186 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
187 	{"mac_tx_128_255_oct_pkt_num",
188 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
189 	{"mac_tx_256_511_oct_pkt_num",
190 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
191 	{"mac_tx_512_1023_oct_pkt_num",
192 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
193 	{"mac_tx_1024_1518_oct_pkt_num",
194 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
195 	{"mac_tx_1519_2047_oct_pkt_num",
196 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
197 	{"mac_tx_2048_4095_oct_pkt_num",
198 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
199 	{"mac_tx_4096_8191_oct_pkt_num",
200 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
201 	{"mac_tx_8192_9216_oct_pkt_num",
202 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
203 	{"mac_tx_9217_12287_oct_pkt_num",
204 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
205 	{"mac_tx_12288_16383_oct_pkt_num",
206 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
207 	{"mac_tx_1519_max_good_pkt_num",
208 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
209 	{"mac_tx_1519_max_bad_pkt_num",
210 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
211 	{"mac_rx_total_pkt_num",
212 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
213 	{"mac_rx_total_oct_num",
214 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
215 	{"mac_rx_good_pkt_num",
216 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
217 	{"mac_rx_bad_pkt_num",
218 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
219 	{"mac_rx_good_oct_num",
220 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
221 	{"mac_rx_bad_oct_num",
222 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
223 	{"mac_rx_uni_pkt_num",
224 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
225 	{"mac_rx_multi_pkt_num",
226 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
227 	{"mac_rx_broad_pkt_num",
228 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
229 	{"mac_rx_undersize_pkt_num",
230 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
231 	{"mac_rx_oversize_pkt_num",
232 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
233 	{"mac_rx_64_oct_pkt_num",
234 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
235 	{"mac_rx_65_127_oct_pkt_num",
236 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
237 	{"mac_rx_128_255_oct_pkt_num",
238 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
239 	{"mac_rx_256_511_oct_pkt_num",
240 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
241 	{"mac_rx_512_1023_oct_pkt_num",
242 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
243 	{"mac_rx_1024_1518_oct_pkt_num",
244 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
245 	{"mac_rx_1519_2047_oct_pkt_num",
246 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
247 	{"mac_rx_2048_4095_oct_pkt_num",
248 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
249 	{"mac_rx_4096_8191_oct_pkt_num",
250 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
251 	{"mac_rx_8192_9216_oct_pkt_num",
252 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
253 	{"mac_rx_9217_12287_oct_pkt_num",
254 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
255 	{"mac_rx_12288_16383_oct_pkt_num",
256 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
257 	{"mac_rx_1519_max_good_pkt_num",
258 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
259 	{"mac_rx_1519_max_bad_pkt_num",
260 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
261 
262 	{"mac_tx_fragment_pkt_num",
263 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
264 	{"mac_tx_undermin_pkt_num",
265 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
266 	{"mac_tx_jabber_pkt_num",
267 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
268 	{"mac_tx_err_all_pkt_num",
269 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
270 	{"mac_tx_from_app_good_pkt_num",
271 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
272 	{"mac_tx_from_app_bad_pkt_num",
273 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
274 	{"mac_rx_fragment_pkt_num",
275 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
276 	{"mac_rx_undermin_pkt_num",
277 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
278 	{"mac_rx_jabber_pkt_num",
279 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
280 	{"mac_rx_fcs_err_pkt_num",
281 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
282 	{"mac_rx_send_app_good_pkt_num",
283 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
284 	{"mac_rx_send_app_bad_pkt_num",
285 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
286 };
287 
288 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
289 	{
290 		.flags = HCLGE_MAC_MGR_MASK_VLAN_B,
291 		.ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP),
292 		.mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
293 		.mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
294 		.i_port_bitmap = 0x1,
295 	},
296 };
297 
298 static const u8 hclge_hash_key[] = {
299 	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
300 	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
301 	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
302 	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
303 	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
304 };
305 
306 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
307 {
308 #define HCLGE_MAC_CMD_NUM 21
309 
310 	u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
311 	struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
312 	__le64 *desc_data;
313 	int i, k, n;
314 	int ret;
315 
316 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
317 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
318 	if (ret) {
319 		dev_err(&hdev->pdev->dev,
320 			"Get MAC pkt stats fail, status = %d.\n", ret);
321 
322 		return ret;
323 	}
324 
325 	for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
326 		/* for special opcode 0032, only the first desc has the head */
327 		if (unlikely(i == 0)) {
328 			desc_data = (__le64 *)(&desc[i].data[0]);
329 			n = HCLGE_RD_FIRST_STATS_NUM;
330 		} else {
331 			desc_data = (__le64 *)(&desc[i]);
332 			n = HCLGE_RD_OTHER_STATS_NUM;
333 		}
334 
335 		for (k = 0; k < n; k++) {
336 			*data += le64_to_cpu(*desc_data);
337 			data++;
338 			desc_data++;
339 		}
340 	}
341 
342 	return 0;
343 }
344 
345 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
346 {
347 	u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
348 	struct hclge_desc *desc;
349 	__le64 *desc_data;
350 	u16 i, k, n;
351 	int ret;
352 
353 	desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL);
354 	if (!desc)
355 		return -ENOMEM;
356 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
357 	ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
358 	if (ret) {
359 		kfree(desc);
360 		return ret;
361 	}
362 
363 	for (i = 0; i < desc_num; i++) {
364 		/* for special opcode 0034, only the first desc has the head */
365 		if (i == 0) {
366 			desc_data = (__le64 *)(&desc[i].data[0]);
367 			n = HCLGE_RD_FIRST_STATS_NUM;
368 		} else {
369 			desc_data = (__le64 *)(&desc[i]);
370 			n = HCLGE_RD_OTHER_STATS_NUM;
371 		}
372 
373 		for (k = 0; k < n; k++) {
374 			*data += le64_to_cpu(*desc_data);
375 			data++;
376 			desc_data++;
377 		}
378 	}
379 
380 	kfree(desc);
381 
382 	return 0;
383 }
384 
385 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
386 {
387 	struct hclge_desc desc;
388 	__le32 *desc_data;
389 	u32 reg_num;
390 	int ret;
391 
392 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
393 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
394 	if (ret)
395 		return ret;
396 
397 	desc_data = (__le32 *)(&desc.data[0]);
398 	reg_num = le32_to_cpu(*desc_data);
399 
400 	*desc_num = 1 + ((reg_num - 3) >> 2) +
401 		    (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
402 
403 	return 0;
404 }
405 
406 static int hclge_mac_update_stats(struct hclge_dev *hdev)
407 {
408 	u32 desc_num;
409 	int ret;
410 
411 	ret = hclge_mac_query_reg_num(hdev, &desc_num);
412 
413 	/* The firmware supports the new statistics acquisition method */
414 	if (!ret)
415 		ret = hclge_mac_update_stats_complete(hdev, desc_num);
416 	else if (ret == -EOPNOTSUPP)
417 		ret = hclge_mac_update_stats_defective(hdev);
418 	else
419 		dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
420 
421 	return ret;
422 }
423 
424 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
425 {
426 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
427 	struct hclge_vport *vport = hclge_get_vport(handle);
428 	struct hclge_dev *hdev = vport->back;
429 	struct hnae3_queue *queue;
430 	struct hclge_desc desc[1];
431 	struct hclge_tqp *tqp;
432 	int ret, i;
433 
434 	for (i = 0; i < kinfo->num_tqps; i++) {
435 		queue = handle->kinfo.tqp[i];
436 		tqp = container_of(queue, struct hclge_tqp, q);
437 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
438 		hclge_cmd_setup_basic_desc(&desc[0],
439 					   HCLGE_OPC_QUERY_RX_STATUS,
440 					   true);
441 
442 		desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
443 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
444 		if (ret) {
445 			dev_err(&hdev->pdev->dev,
446 				"Query tqp stat fail, status = %d,queue = %d\n",
447 				ret,	i);
448 			return ret;
449 		}
450 		tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
451 			le32_to_cpu(desc[0].data[1]);
452 	}
453 
454 	for (i = 0; i < kinfo->num_tqps; i++) {
455 		queue = handle->kinfo.tqp[i];
456 		tqp = container_of(queue, struct hclge_tqp, q);
457 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
458 		hclge_cmd_setup_basic_desc(&desc[0],
459 					   HCLGE_OPC_QUERY_TX_STATUS,
460 					   true);
461 
462 		desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
463 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
464 		if (ret) {
465 			dev_err(&hdev->pdev->dev,
466 				"Query tqp stat fail, status = %d,queue = %d\n",
467 				ret, i);
468 			return ret;
469 		}
470 		tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
471 			le32_to_cpu(desc[0].data[1]);
472 	}
473 
474 	return 0;
475 }
476 
477 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
478 {
479 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
480 	struct hclge_tqp *tqp;
481 	u64 *buff = data;
482 	int i;
483 
484 	for (i = 0; i < kinfo->num_tqps; i++) {
485 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
486 		*buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
487 	}
488 
489 	for (i = 0; i < kinfo->num_tqps; i++) {
490 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
491 		*buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
492 	}
493 
494 	return buff;
495 }
496 
497 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
498 {
499 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
500 
501 	return kinfo->num_tqps * (2);
502 }
503 
504 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
505 {
506 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
507 	u8 *buff = data;
508 	int i = 0;
509 
510 	for (i = 0; i < kinfo->num_tqps; i++) {
511 		struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
512 			struct hclge_tqp, q);
513 		snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
514 			 tqp->index);
515 		buff = buff + ETH_GSTRING_LEN;
516 	}
517 
518 	for (i = 0; i < kinfo->num_tqps; i++) {
519 		struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
520 			struct hclge_tqp, q);
521 		snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
522 			 tqp->index);
523 		buff = buff + ETH_GSTRING_LEN;
524 	}
525 
526 	return buff;
527 }
528 
529 static u64 *hclge_comm_get_stats(void *comm_stats,
530 				 const struct hclge_comm_stats_str strs[],
531 				 int size, u64 *data)
532 {
533 	u64 *buf = data;
534 	u32 i;
535 
536 	for (i = 0; i < size; i++)
537 		buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
538 
539 	return buf + size;
540 }
541 
542 static u8 *hclge_comm_get_strings(u32 stringset,
543 				  const struct hclge_comm_stats_str strs[],
544 				  int size, u8 *data)
545 {
546 	char *buff = (char *)data;
547 	u32 i;
548 
549 	if (stringset != ETH_SS_STATS)
550 		return buff;
551 
552 	for (i = 0; i < size; i++) {
553 		snprintf(buff, ETH_GSTRING_LEN,
554 			 strs[i].desc);
555 		buff = buff + ETH_GSTRING_LEN;
556 	}
557 
558 	return (u8 *)buff;
559 }
560 
561 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
562 {
563 	struct hnae3_handle *handle;
564 	int status;
565 
566 	handle = &hdev->vport[0].nic;
567 	if (handle->client) {
568 		status = hclge_tqps_update_stats(handle);
569 		if (status) {
570 			dev_err(&hdev->pdev->dev,
571 				"Update TQPS stats fail, status = %d.\n",
572 				status);
573 		}
574 	}
575 
576 	status = hclge_mac_update_stats(hdev);
577 	if (status)
578 		dev_err(&hdev->pdev->dev,
579 			"Update MAC stats fail, status = %d.\n", status);
580 }
581 
582 static void hclge_update_stats(struct hnae3_handle *handle,
583 			       struct net_device_stats *net_stats)
584 {
585 	struct hclge_vport *vport = hclge_get_vport(handle);
586 	struct hclge_dev *hdev = vport->back;
587 	int status;
588 
589 	if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
590 		return;
591 
592 	status = hclge_mac_update_stats(hdev);
593 	if (status)
594 		dev_err(&hdev->pdev->dev,
595 			"Update MAC stats fail, status = %d.\n",
596 			status);
597 
598 	status = hclge_tqps_update_stats(handle);
599 	if (status)
600 		dev_err(&hdev->pdev->dev,
601 			"Update TQPS stats fail, status = %d.\n",
602 			status);
603 
604 	clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
605 }
606 
607 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
608 {
609 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
610 		HNAE3_SUPPORT_PHY_LOOPBACK |\
611 		HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
612 		HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
613 
614 	struct hclge_vport *vport = hclge_get_vport(handle);
615 	struct hclge_dev *hdev = vport->back;
616 	int count = 0;
617 
618 	/* Loopback test support rules:
619 	 * mac: only GE mode support
620 	 * serdes: all mac mode will support include GE/XGE/LGE/CGE
621 	 * phy: only support when phy device exist on board
622 	 */
623 	if (stringset == ETH_SS_TEST) {
624 		/* clear loopback bit flags at first */
625 		handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
626 		if (hdev->pdev->revision >= 0x21 ||
627 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
628 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
629 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
630 			count += 1;
631 			handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
632 		}
633 
634 		count += 2;
635 		handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
636 		handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
637 	} else if (stringset == ETH_SS_STATS) {
638 		count = ARRAY_SIZE(g_mac_stats_string) +
639 			hclge_tqps_get_sset_count(handle, stringset);
640 	}
641 
642 	return count;
643 }
644 
645 static void hclge_get_strings(struct hnae3_handle *handle,
646 			      u32 stringset,
647 			      u8 *data)
648 {
649 	u8 *p = (char *)data;
650 	int size;
651 
652 	if (stringset == ETH_SS_STATS) {
653 		size = ARRAY_SIZE(g_mac_stats_string);
654 		p = hclge_comm_get_strings(stringset,
655 					   g_mac_stats_string,
656 					   size,
657 					   p);
658 		p = hclge_tqps_get_strings(handle, p);
659 	} else if (stringset == ETH_SS_TEST) {
660 		if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
661 			memcpy(p,
662 			       hns3_nic_test_strs[HNAE3_LOOP_APP],
663 			       ETH_GSTRING_LEN);
664 			p += ETH_GSTRING_LEN;
665 		}
666 		if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
667 			memcpy(p,
668 			       hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
669 			       ETH_GSTRING_LEN);
670 			p += ETH_GSTRING_LEN;
671 		}
672 		if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
673 			memcpy(p,
674 			       hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
675 			       ETH_GSTRING_LEN);
676 			p += ETH_GSTRING_LEN;
677 		}
678 		if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
679 			memcpy(p,
680 			       hns3_nic_test_strs[HNAE3_LOOP_PHY],
681 			       ETH_GSTRING_LEN);
682 			p += ETH_GSTRING_LEN;
683 		}
684 	}
685 }
686 
687 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
688 {
689 	struct hclge_vport *vport = hclge_get_vport(handle);
690 	struct hclge_dev *hdev = vport->back;
691 	u64 *p;
692 
693 	p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
694 				 g_mac_stats_string,
695 				 ARRAY_SIZE(g_mac_stats_string),
696 				 data);
697 	p = hclge_tqps_get_stats(handle, p);
698 }
699 
700 static int hclge_parse_func_status(struct hclge_dev *hdev,
701 				   struct hclge_func_status_cmd *status)
702 {
703 	if (!(status->pf_state & HCLGE_PF_STATE_DONE))
704 		return -EINVAL;
705 
706 	/* Set the pf to main pf */
707 	if (status->pf_state & HCLGE_PF_STATE_MAIN)
708 		hdev->flag |= HCLGE_FLAG_MAIN;
709 	else
710 		hdev->flag &= ~HCLGE_FLAG_MAIN;
711 
712 	return 0;
713 }
714 
715 static int hclge_query_function_status(struct hclge_dev *hdev)
716 {
717 	struct hclge_func_status_cmd *req;
718 	struct hclge_desc desc;
719 	int timeout = 0;
720 	int ret;
721 
722 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
723 	req = (struct hclge_func_status_cmd *)desc.data;
724 
725 	do {
726 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
727 		if (ret) {
728 			dev_err(&hdev->pdev->dev,
729 				"query function status failed %d.\n",
730 				ret);
731 
732 			return ret;
733 		}
734 
735 		/* Check pf reset is done */
736 		if (req->pf_state)
737 			break;
738 		usleep_range(1000, 2000);
739 	} while (timeout++ < 5);
740 
741 	ret = hclge_parse_func_status(hdev, req);
742 
743 	return ret;
744 }
745 
746 static int hclge_query_pf_resource(struct hclge_dev *hdev)
747 {
748 	struct hclge_pf_res_cmd *req;
749 	struct hclge_desc desc;
750 	int ret;
751 
752 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
753 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
754 	if (ret) {
755 		dev_err(&hdev->pdev->dev,
756 			"query pf resource failed %d.\n", ret);
757 		return ret;
758 	}
759 
760 	req = (struct hclge_pf_res_cmd *)desc.data;
761 	hdev->num_tqps = __le16_to_cpu(req->tqp_num);
762 	hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
763 
764 	if (req->tx_buf_size)
765 		hdev->tx_buf_size =
766 			__le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
767 	else
768 		hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
769 
770 	hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
771 
772 	if (req->dv_buf_size)
773 		hdev->dv_buf_size =
774 			__le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
775 	else
776 		hdev->dv_buf_size = HCLGE_DEFAULT_DV;
777 
778 	hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
779 
780 	if (hnae3_dev_roce_supported(hdev)) {
781 		hdev->roce_base_msix_offset =
782 		hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
783 				HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
784 		hdev->num_roce_msi =
785 		hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
786 				HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
787 
788 		/* PF should have NIC vectors and Roce vectors,
789 		 * NIC vectors are queued before Roce vectors.
790 		 */
791 		hdev->num_msi = hdev->num_roce_msi  +
792 				hdev->roce_base_msix_offset;
793 	} else {
794 		hdev->num_msi =
795 		hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
796 				HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
797 	}
798 
799 	return 0;
800 }
801 
802 static int hclge_parse_speed(int speed_cmd, int *speed)
803 {
804 	switch (speed_cmd) {
805 	case 6:
806 		*speed = HCLGE_MAC_SPEED_10M;
807 		break;
808 	case 7:
809 		*speed = HCLGE_MAC_SPEED_100M;
810 		break;
811 	case 0:
812 		*speed = HCLGE_MAC_SPEED_1G;
813 		break;
814 	case 1:
815 		*speed = HCLGE_MAC_SPEED_10G;
816 		break;
817 	case 2:
818 		*speed = HCLGE_MAC_SPEED_25G;
819 		break;
820 	case 3:
821 		*speed = HCLGE_MAC_SPEED_40G;
822 		break;
823 	case 4:
824 		*speed = HCLGE_MAC_SPEED_50G;
825 		break;
826 	case 5:
827 		*speed = HCLGE_MAC_SPEED_100G;
828 		break;
829 	default:
830 		return -EINVAL;
831 	}
832 
833 	return 0;
834 }
835 
836 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
837 					u8 speed_ability)
838 {
839 	unsigned long *supported = hdev->hw.mac.supported;
840 
841 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
842 		set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
843 			supported);
844 
845 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
846 		set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
847 			supported);
848 
849 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
850 		set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
851 			supported);
852 
853 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
854 		set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
855 			supported);
856 
857 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
858 		set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
859 			supported);
860 
861 	set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported);
862 	set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
863 }
864 
865 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
866 {
867 	u8 media_type = hdev->hw.mac.media_type;
868 
869 	if (media_type != HNAE3_MEDIA_TYPE_FIBER)
870 		return;
871 
872 	hclge_parse_fiber_link_mode(hdev, speed_ability);
873 }
874 
875 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
876 {
877 	struct hclge_cfg_param_cmd *req;
878 	u64 mac_addr_tmp_high;
879 	u64 mac_addr_tmp;
880 	int i;
881 
882 	req = (struct hclge_cfg_param_cmd *)desc[0].data;
883 
884 	/* get the configuration */
885 	cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
886 					      HCLGE_CFG_VMDQ_M,
887 					      HCLGE_CFG_VMDQ_S);
888 	cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
889 				      HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
890 	cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
891 					    HCLGE_CFG_TQP_DESC_N_M,
892 					    HCLGE_CFG_TQP_DESC_N_S);
893 
894 	cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
895 					HCLGE_CFG_PHY_ADDR_M,
896 					HCLGE_CFG_PHY_ADDR_S);
897 	cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
898 					  HCLGE_CFG_MEDIA_TP_M,
899 					  HCLGE_CFG_MEDIA_TP_S);
900 	cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
901 					  HCLGE_CFG_RX_BUF_LEN_M,
902 					  HCLGE_CFG_RX_BUF_LEN_S);
903 	/* get mac_address */
904 	mac_addr_tmp = __le32_to_cpu(req->param[2]);
905 	mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
906 					    HCLGE_CFG_MAC_ADDR_H_M,
907 					    HCLGE_CFG_MAC_ADDR_H_S);
908 
909 	mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
910 
911 	cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
912 					     HCLGE_CFG_DEFAULT_SPEED_M,
913 					     HCLGE_CFG_DEFAULT_SPEED_S);
914 	cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
915 					    HCLGE_CFG_RSS_SIZE_M,
916 					    HCLGE_CFG_RSS_SIZE_S);
917 
918 	for (i = 0; i < ETH_ALEN; i++)
919 		cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
920 
921 	req = (struct hclge_cfg_param_cmd *)desc[1].data;
922 	cfg->numa_node_map = __le32_to_cpu(req->param[0]);
923 
924 	cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
925 					     HCLGE_CFG_SPEED_ABILITY_M,
926 					     HCLGE_CFG_SPEED_ABILITY_S);
927 	cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
928 					 HCLGE_CFG_UMV_TBL_SPACE_M,
929 					 HCLGE_CFG_UMV_TBL_SPACE_S);
930 	if (!cfg->umv_space)
931 		cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
932 }
933 
934 /* hclge_get_cfg: query the static parameter from flash
935  * @hdev: pointer to struct hclge_dev
936  * @hcfg: the config structure to be getted
937  */
938 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
939 {
940 	struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
941 	struct hclge_cfg_param_cmd *req;
942 	int i, ret;
943 
944 	for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
945 		u32 offset = 0;
946 
947 		req = (struct hclge_cfg_param_cmd *)desc[i].data;
948 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
949 					   true);
950 		hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
951 				HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
952 		/* Len should be united by 4 bytes when send to hardware */
953 		hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
954 				HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
955 		req->offset = cpu_to_le32(offset);
956 	}
957 
958 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
959 	if (ret) {
960 		dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
961 		return ret;
962 	}
963 
964 	hclge_parse_cfg(hcfg, desc);
965 
966 	return 0;
967 }
968 
969 static int hclge_get_cap(struct hclge_dev *hdev)
970 {
971 	int ret;
972 
973 	ret = hclge_query_function_status(hdev);
974 	if (ret) {
975 		dev_err(&hdev->pdev->dev,
976 			"query function status error %d.\n", ret);
977 		return ret;
978 	}
979 
980 	/* get pf resource */
981 	ret = hclge_query_pf_resource(hdev);
982 	if (ret)
983 		dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
984 
985 	return ret;
986 }
987 
988 static int hclge_configure(struct hclge_dev *hdev)
989 {
990 	struct hclge_cfg cfg;
991 	int ret, i;
992 
993 	ret = hclge_get_cfg(hdev, &cfg);
994 	if (ret) {
995 		dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
996 		return ret;
997 	}
998 
999 	hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1000 	hdev->base_tqp_pid = 0;
1001 	hdev->rss_size_max = cfg.rss_size_max;
1002 	hdev->rx_buf_len = cfg.rx_buf_len;
1003 	ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1004 	hdev->hw.mac.media_type = cfg.media_type;
1005 	hdev->hw.mac.phy_addr = cfg.phy_addr;
1006 	hdev->num_desc = cfg.tqp_desc_num;
1007 	hdev->tm_info.num_pg = 1;
1008 	hdev->tc_max = cfg.tc_num;
1009 	hdev->tm_info.hw_pfc_map = 0;
1010 	hdev->wanted_umv_size = cfg.umv_space;
1011 
1012 	if (hnae3_dev_fd_supported(hdev))
1013 		hdev->fd_en = true;
1014 
1015 	ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1016 	if (ret) {
1017 		dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1018 		return ret;
1019 	}
1020 
1021 	hclge_parse_link_mode(hdev, cfg.speed_ability);
1022 
1023 	if ((hdev->tc_max > HNAE3_MAX_TC) ||
1024 	    (hdev->tc_max < 1)) {
1025 		dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1026 			 hdev->tc_max);
1027 		hdev->tc_max = 1;
1028 	}
1029 
1030 	/* Dev does not support DCB */
1031 	if (!hnae3_dev_dcb_supported(hdev)) {
1032 		hdev->tc_max = 1;
1033 		hdev->pfc_max = 0;
1034 	} else {
1035 		hdev->pfc_max = hdev->tc_max;
1036 	}
1037 
1038 	hdev->tm_info.num_tc = 1;
1039 
1040 	/* Currently not support uncontiuous tc */
1041 	for (i = 0; i < hdev->tm_info.num_tc; i++)
1042 		hnae3_set_bit(hdev->hw_tc_map, i, 1);
1043 
1044 	hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1045 
1046 	return ret;
1047 }
1048 
1049 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
1050 			    int tso_mss_max)
1051 {
1052 	struct hclge_cfg_tso_status_cmd *req;
1053 	struct hclge_desc desc;
1054 	u16 tso_mss;
1055 
1056 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1057 
1058 	req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1059 
1060 	tso_mss = 0;
1061 	hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1062 			HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1063 	req->tso_mss_min = cpu_to_le16(tso_mss);
1064 
1065 	tso_mss = 0;
1066 	hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1067 			HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1068 	req->tso_mss_max = cpu_to_le16(tso_mss);
1069 
1070 	return hclge_cmd_send(&hdev->hw, &desc, 1);
1071 }
1072 
1073 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1074 {
1075 	struct hclge_cfg_gro_status_cmd *req;
1076 	struct hclge_desc desc;
1077 	int ret;
1078 
1079 	if (!hnae3_dev_gro_supported(hdev))
1080 		return 0;
1081 
1082 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1083 	req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1084 
1085 	req->gro_en = cpu_to_le16(en ? 1 : 0);
1086 
1087 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1088 	if (ret)
1089 		dev_err(&hdev->pdev->dev,
1090 			"GRO hardware config cmd failed, ret = %d\n", ret);
1091 
1092 	return ret;
1093 }
1094 
1095 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1096 {
1097 	struct hclge_tqp *tqp;
1098 	int i;
1099 
1100 	hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1101 				  sizeof(struct hclge_tqp), GFP_KERNEL);
1102 	if (!hdev->htqp)
1103 		return -ENOMEM;
1104 
1105 	tqp = hdev->htqp;
1106 
1107 	for (i = 0; i < hdev->num_tqps; i++) {
1108 		tqp->dev = &hdev->pdev->dev;
1109 		tqp->index = i;
1110 
1111 		tqp->q.ae_algo = &ae_algo;
1112 		tqp->q.buf_size = hdev->rx_buf_len;
1113 		tqp->q.desc_num = hdev->num_desc;
1114 		tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1115 			i * HCLGE_TQP_REG_SIZE;
1116 
1117 		tqp++;
1118 	}
1119 
1120 	return 0;
1121 }
1122 
1123 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1124 				  u16 tqp_pid, u16 tqp_vid, bool is_pf)
1125 {
1126 	struct hclge_tqp_map_cmd *req;
1127 	struct hclge_desc desc;
1128 	int ret;
1129 
1130 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1131 
1132 	req = (struct hclge_tqp_map_cmd *)desc.data;
1133 	req->tqp_id = cpu_to_le16(tqp_pid);
1134 	req->tqp_vf = func_id;
1135 	req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1136 			1 << HCLGE_TQP_MAP_EN_B;
1137 	req->tqp_vid = cpu_to_le16(tqp_vid);
1138 
1139 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1140 	if (ret)
1141 		dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1142 
1143 	return ret;
1144 }
1145 
1146 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1147 {
1148 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1149 	struct hclge_dev *hdev = vport->back;
1150 	int i, alloced;
1151 
1152 	for (i = 0, alloced = 0; i < hdev->num_tqps &&
1153 	     alloced < num_tqps; i++) {
1154 		if (!hdev->htqp[i].alloced) {
1155 			hdev->htqp[i].q.handle = &vport->nic;
1156 			hdev->htqp[i].q.tqp_index = alloced;
1157 			hdev->htqp[i].q.desc_num = kinfo->num_desc;
1158 			kinfo->tqp[alloced] = &hdev->htqp[i].q;
1159 			hdev->htqp[i].alloced = true;
1160 			alloced++;
1161 		}
1162 	}
1163 	vport->alloc_tqps = alloced;
1164 	kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1165 				vport->alloc_tqps / hdev->tm_info.num_tc);
1166 
1167 	return 0;
1168 }
1169 
1170 static int hclge_knic_setup(struct hclge_vport *vport,
1171 			    u16 num_tqps, u16 num_desc)
1172 {
1173 	struct hnae3_handle *nic = &vport->nic;
1174 	struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1175 	struct hclge_dev *hdev = vport->back;
1176 	int ret;
1177 
1178 	kinfo->num_desc = num_desc;
1179 	kinfo->rx_buf_len = hdev->rx_buf_len;
1180 
1181 	kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1182 				  sizeof(struct hnae3_queue *), GFP_KERNEL);
1183 	if (!kinfo->tqp)
1184 		return -ENOMEM;
1185 
1186 	ret = hclge_assign_tqp(vport, num_tqps);
1187 	if (ret)
1188 		dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1189 
1190 	return ret;
1191 }
1192 
1193 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1194 				  struct hclge_vport *vport)
1195 {
1196 	struct hnae3_handle *nic = &vport->nic;
1197 	struct hnae3_knic_private_info *kinfo;
1198 	u16 i;
1199 
1200 	kinfo = &nic->kinfo;
1201 	for (i = 0; i < vport->alloc_tqps; i++) {
1202 		struct hclge_tqp *q =
1203 			container_of(kinfo->tqp[i], struct hclge_tqp, q);
1204 		bool is_pf;
1205 		int ret;
1206 
1207 		is_pf = !(vport->vport_id);
1208 		ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1209 					     i, is_pf);
1210 		if (ret)
1211 			return ret;
1212 	}
1213 
1214 	return 0;
1215 }
1216 
1217 static int hclge_map_tqp(struct hclge_dev *hdev)
1218 {
1219 	struct hclge_vport *vport = hdev->vport;
1220 	u16 i, num_vport;
1221 
1222 	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1223 	for (i = 0; i < num_vport; i++)	{
1224 		int ret;
1225 
1226 		ret = hclge_map_tqp_to_vport(hdev, vport);
1227 		if (ret)
1228 			return ret;
1229 
1230 		vport++;
1231 	}
1232 
1233 	return 0;
1234 }
1235 
1236 static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
1237 {
1238 	/* this would be initialized later */
1239 }
1240 
1241 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1242 {
1243 	struct hnae3_handle *nic = &vport->nic;
1244 	struct hclge_dev *hdev = vport->back;
1245 	int ret;
1246 
1247 	nic->pdev = hdev->pdev;
1248 	nic->ae_algo = &ae_algo;
1249 	nic->numa_node_mask = hdev->numa_node_mask;
1250 
1251 	if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
1252 		ret = hclge_knic_setup(vport, num_tqps, hdev->num_desc);
1253 		if (ret) {
1254 			dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
1255 				ret);
1256 			return ret;
1257 		}
1258 	} else {
1259 		hclge_unic_setup(vport, num_tqps);
1260 	}
1261 
1262 	return 0;
1263 }
1264 
1265 static int hclge_alloc_vport(struct hclge_dev *hdev)
1266 {
1267 	struct pci_dev *pdev = hdev->pdev;
1268 	struct hclge_vport *vport;
1269 	u32 tqp_main_vport;
1270 	u32 tqp_per_vport;
1271 	int num_vport, i;
1272 	int ret;
1273 
1274 	/* We need to alloc a vport for main NIC of PF */
1275 	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1276 
1277 	if (hdev->num_tqps < num_vport) {
1278 		dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1279 			hdev->num_tqps, num_vport);
1280 		return -EINVAL;
1281 	}
1282 
1283 	/* Alloc the same number of TQPs for every vport */
1284 	tqp_per_vport = hdev->num_tqps / num_vport;
1285 	tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1286 
1287 	vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1288 			     GFP_KERNEL);
1289 	if (!vport)
1290 		return -ENOMEM;
1291 
1292 	hdev->vport = vport;
1293 	hdev->num_alloc_vport = num_vport;
1294 
1295 	if (IS_ENABLED(CONFIG_PCI_IOV))
1296 		hdev->num_alloc_vfs = hdev->num_req_vfs;
1297 
1298 	for (i = 0; i < num_vport; i++) {
1299 		vport->back = hdev;
1300 		vport->vport_id = i;
1301 		vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1302 
1303 		if (i == 0)
1304 			ret = hclge_vport_setup(vport, tqp_main_vport);
1305 		else
1306 			ret = hclge_vport_setup(vport, tqp_per_vport);
1307 		if (ret) {
1308 			dev_err(&pdev->dev,
1309 				"vport setup failed for vport %d, %d\n",
1310 				i, ret);
1311 			return ret;
1312 		}
1313 
1314 		vport++;
1315 	}
1316 
1317 	return 0;
1318 }
1319 
1320 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1321 				    struct hclge_pkt_buf_alloc *buf_alloc)
1322 {
1323 /* TX buffer size is unit by 128 byte */
1324 #define HCLGE_BUF_SIZE_UNIT_SHIFT	7
1325 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK	BIT(15)
1326 	struct hclge_tx_buff_alloc_cmd *req;
1327 	struct hclge_desc desc;
1328 	int ret;
1329 	u8 i;
1330 
1331 	req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1332 
1333 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1334 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1335 		u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1336 
1337 		req->tx_pkt_buff[i] =
1338 			cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1339 				     HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1340 	}
1341 
1342 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1343 	if (ret)
1344 		dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1345 			ret);
1346 
1347 	return ret;
1348 }
1349 
1350 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1351 				 struct hclge_pkt_buf_alloc *buf_alloc)
1352 {
1353 	int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1354 
1355 	if (ret)
1356 		dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1357 
1358 	return ret;
1359 }
1360 
1361 static int hclge_get_tc_num(struct hclge_dev *hdev)
1362 {
1363 	int i, cnt = 0;
1364 
1365 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1366 		if (hdev->hw_tc_map & BIT(i))
1367 			cnt++;
1368 	return cnt;
1369 }
1370 
1371 static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev)
1372 {
1373 	int i, cnt = 0;
1374 
1375 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1376 		if (hdev->hw_tc_map & BIT(i) &&
1377 		    hdev->tm_info.hw_pfc_map & BIT(i))
1378 			cnt++;
1379 	return cnt;
1380 }
1381 
1382 /* Get the number of pfc enabled TCs, which have private buffer */
1383 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1384 				  struct hclge_pkt_buf_alloc *buf_alloc)
1385 {
1386 	struct hclge_priv_buf *priv;
1387 	int i, cnt = 0;
1388 
1389 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1390 		priv = &buf_alloc->priv_buf[i];
1391 		if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1392 		    priv->enable)
1393 			cnt++;
1394 	}
1395 
1396 	return cnt;
1397 }
1398 
1399 /* Get the number of pfc disabled TCs, which have private buffer */
1400 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1401 				     struct hclge_pkt_buf_alloc *buf_alloc)
1402 {
1403 	struct hclge_priv_buf *priv;
1404 	int i, cnt = 0;
1405 
1406 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1407 		priv = &buf_alloc->priv_buf[i];
1408 		if (hdev->hw_tc_map & BIT(i) &&
1409 		    !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1410 		    priv->enable)
1411 			cnt++;
1412 	}
1413 
1414 	return cnt;
1415 }
1416 
1417 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1418 {
1419 	struct hclge_priv_buf *priv;
1420 	u32 rx_priv = 0;
1421 	int i;
1422 
1423 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1424 		priv = &buf_alloc->priv_buf[i];
1425 		if (priv->enable)
1426 			rx_priv += priv->buf_size;
1427 	}
1428 	return rx_priv;
1429 }
1430 
1431 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1432 {
1433 	u32 i, total_tx_size = 0;
1434 
1435 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1436 		total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1437 
1438 	return total_tx_size;
1439 }
1440 
1441 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1442 				struct hclge_pkt_buf_alloc *buf_alloc,
1443 				u32 rx_all)
1444 {
1445 	u32 shared_buf_min, shared_buf_tc, shared_std;
1446 	int tc_num, pfc_enable_num;
1447 	u32 shared_buf, aligned_mps;
1448 	u32 rx_priv;
1449 	int i;
1450 
1451 	tc_num = hclge_get_tc_num(hdev);
1452 	pfc_enable_num = hclge_get_pfc_enalbe_num(hdev);
1453 	aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1454 
1455 	if (hnae3_dev_dcb_supported(hdev))
1456 		shared_buf_min = 2 * aligned_mps + hdev->dv_buf_size;
1457 	else
1458 		shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1459 					+ hdev->dv_buf_size;
1460 
1461 	shared_buf_tc = pfc_enable_num * aligned_mps +
1462 			(tc_num - pfc_enable_num) * aligned_mps / 2 +
1463 			aligned_mps;
1464 	shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1465 			     HCLGE_BUF_SIZE_UNIT);
1466 
1467 	rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1468 	if (rx_all < rx_priv + shared_std)
1469 		return false;
1470 
1471 	shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1472 	buf_alloc->s_buf.buf_size = shared_buf;
1473 	if (hnae3_dev_dcb_supported(hdev)) {
1474 		buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1475 		buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1476 			- roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
1477 	} else {
1478 		buf_alloc->s_buf.self.high = aligned_mps +
1479 						HCLGE_NON_DCB_ADDITIONAL_BUF;
1480 		buf_alloc->s_buf.self.low =
1481 			roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
1482 	}
1483 
1484 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1485 		if ((hdev->hw_tc_map & BIT(i)) &&
1486 		    (hdev->tm_info.hw_pfc_map & BIT(i))) {
1487 			buf_alloc->s_buf.tc_thrd[i].low = aligned_mps;
1488 			buf_alloc->s_buf.tc_thrd[i].high = 2 * aligned_mps;
1489 		} else {
1490 			buf_alloc->s_buf.tc_thrd[i].low = 0;
1491 			buf_alloc->s_buf.tc_thrd[i].high = aligned_mps;
1492 		}
1493 	}
1494 
1495 	return true;
1496 }
1497 
1498 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1499 				struct hclge_pkt_buf_alloc *buf_alloc)
1500 {
1501 	u32 i, total_size;
1502 
1503 	total_size = hdev->pkt_buf_size;
1504 
1505 	/* alloc tx buffer for all enabled tc */
1506 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1507 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1508 
1509 		if (hdev->hw_tc_map & BIT(i)) {
1510 			if (total_size < hdev->tx_buf_size)
1511 				return -ENOMEM;
1512 
1513 			priv->tx_buf_size = hdev->tx_buf_size;
1514 		} else {
1515 			priv->tx_buf_size = 0;
1516 		}
1517 
1518 		total_size -= priv->tx_buf_size;
1519 	}
1520 
1521 	return 0;
1522 }
1523 
1524 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1525 				  struct hclge_pkt_buf_alloc *buf_alloc)
1526 {
1527 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1528 	u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1529 	int i;
1530 
1531 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1532 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1533 
1534 		priv->enable = 0;
1535 		priv->wl.low = 0;
1536 		priv->wl.high = 0;
1537 		priv->buf_size = 0;
1538 
1539 		if (!(hdev->hw_tc_map & BIT(i)))
1540 			continue;
1541 
1542 		priv->enable = 1;
1543 
1544 		if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1545 			priv->wl.low = max ? aligned_mps : 256;
1546 			priv->wl.high = roundup(priv->wl.low + aligned_mps,
1547 						HCLGE_BUF_SIZE_UNIT);
1548 		} else {
1549 			priv->wl.low = 0;
1550 			priv->wl.high = max ? (aligned_mps * 2) : aligned_mps;
1551 		}
1552 
1553 		priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1554 	}
1555 
1556 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1557 }
1558 
1559 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1560 					  struct hclge_pkt_buf_alloc *buf_alloc)
1561 {
1562 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1563 	int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1564 	int i;
1565 
1566 	/* let the last to be cleared first */
1567 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1568 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1569 
1570 		if (hdev->hw_tc_map & BIT(i) &&
1571 		    !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1572 			/* Clear the no pfc TC private buffer */
1573 			priv->wl.low = 0;
1574 			priv->wl.high = 0;
1575 			priv->buf_size = 0;
1576 			priv->enable = 0;
1577 			no_pfc_priv_num--;
1578 		}
1579 
1580 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1581 		    no_pfc_priv_num == 0)
1582 			break;
1583 	}
1584 
1585 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1586 }
1587 
1588 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1589 					struct hclge_pkt_buf_alloc *buf_alloc)
1590 {
1591 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1592 	int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1593 	int i;
1594 
1595 	/* let the last to be cleared first */
1596 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1597 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1598 
1599 		if (hdev->hw_tc_map & BIT(i) &&
1600 		    hdev->tm_info.hw_pfc_map & BIT(i)) {
1601 			/* Reduce the number of pfc TC with private buffer */
1602 			priv->wl.low = 0;
1603 			priv->enable = 0;
1604 			priv->wl.high = 0;
1605 			priv->buf_size = 0;
1606 			pfc_priv_num--;
1607 		}
1608 
1609 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1610 		    pfc_priv_num == 0)
1611 			break;
1612 	}
1613 
1614 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1615 }
1616 
1617 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1618  * @hdev: pointer to struct hclge_dev
1619  * @buf_alloc: pointer to buffer calculation data
1620  * @return: 0: calculate sucessful, negative: fail
1621  */
1622 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1623 				struct hclge_pkt_buf_alloc *buf_alloc)
1624 {
1625 	/* When DCB is not supported, rx private buffer is not allocated. */
1626 	if (!hnae3_dev_dcb_supported(hdev)) {
1627 		u32 rx_all = hdev->pkt_buf_size;
1628 
1629 		rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1630 		if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1631 			return -ENOMEM;
1632 
1633 		return 0;
1634 	}
1635 
1636 	if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
1637 		return 0;
1638 
1639 	/* try to decrease the buffer size */
1640 	if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
1641 		return 0;
1642 
1643 	if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
1644 		return 0;
1645 
1646 	if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
1647 		return 0;
1648 
1649 	return -ENOMEM;
1650 }
1651 
1652 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1653 				   struct hclge_pkt_buf_alloc *buf_alloc)
1654 {
1655 	struct hclge_rx_priv_buff_cmd *req;
1656 	struct hclge_desc desc;
1657 	int ret;
1658 	int i;
1659 
1660 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1661 	req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1662 
1663 	/* Alloc private buffer TCs */
1664 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1665 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1666 
1667 		req->buf_num[i] =
1668 			cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1669 		req->buf_num[i] |=
1670 			cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1671 	}
1672 
1673 	req->shared_buf =
1674 		cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1675 			    (1 << HCLGE_TC0_PRI_BUF_EN_B));
1676 
1677 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1678 	if (ret)
1679 		dev_err(&hdev->pdev->dev,
1680 			"rx private buffer alloc cmd failed %d\n", ret);
1681 
1682 	return ret;
1683 }
1684 
1685 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1686 				   struct hclge_pkt_buf_alloc *buf_alloc)
1687 {
1688 	struct hclge_rx_priv_wl_buf *req;
1689 	struct hclge_priv_buf *priv;
1690 	struct hclge_desc desc[2];
1691 	int i, j;
1692 	int ret;
1693 
1694 	for (i = 0; i < 2; i++) {
1695 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1696 					   false);
1697 		req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1698 
1699 		/* The first descriptor set the NEXT bit to 1 */
1700 		if (i == 0)
1701 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1702 		else
1703 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1704 
1705 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1706 			u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1707 
1708 			priv = &buf_alloc->priv_buf[idx];
1709 			req->tc_wl[j].high =
1710 				cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1711 			req->tc_wl[j].high |=
1712 				cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1713 			req->tc_wl[j].low =
1714 				cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1715 			req->tc_wl[j].low |=
1716 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1717 		}
1718 	}
1719 
1720 	/* Send 2 descriptor at one time */
1721 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
1722 	if (ret)
1723 		dev_err(&hdev->pdev->dev,
1724 			"rx private waterline config cmd failed %d\n",
1725 			ret);
1726 	return ret;
1727 }
1728 
1729 static int hclge_common_thrd_config(struct hclge_dev *hdev,
1730 				    struct hclge_pkt_buf_alloc *buf_alloc)
1731 {
1732 	struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
1733 	struct hclge_rx_com_thrd *req;
1734 	struct hclge_desc desc[2];
1735 	struct hclge_tc_thrd *tc;
1736 	int i, j;
1737 	int ret;
1738 
1739 	for (i = 0; i < 2; i++) {
1740 		hclge_cmd_setup_basic_desc(&desc[i],
1741 					   HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1742 		req = (struct hclge_rx_com_thrd *)&desc[i].data;
1743 
1744 		/* The first descriptor set the NEXT bit to 1 */
1745 		if (i == 0)
1746 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1747 		else
1748 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1749 
1750 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1751 			tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1752 
1753 			req->com_thrd[j].high =
1754 				cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1755 			req->com_thrd[j].high |=
1756 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1757 			req->com_thrd[j].low =
1758 				cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1759 			req->com_thrd[j].low |=
1760 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1761 		}
1762 	}
1763 
1764 	/* Send 2 descriptors at one time */
1765 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
1766 	if (ret)
1767 		dev_err(&hdev->pdev->dev,
1768 			"common threshold config cmd failed %d\n", ret);
1769 	return ret;
1770 }
1771 
1772 static int hclge_common_wl_config(struct hclge_dev *hdev,
1773 				  struct hclge_pkt_buf_alloc *buf_alloc)
1774 {
1775 	struct hclge_shared_buf *buf = &buf_alloc->s_buf;
1776 	struct hclge_rx_com_wl *req;
1777 	struct hclge_desc desc;
1778 	int ret;
1779 
1780 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
1781 
1782 	req = (struct hclge_rx_com_wl *)desc.data;
1783 	req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
1784 	req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1785 
1786 	req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
1787 	req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1788 
1789 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1790 	if (ret)
1791 		dev_err(&hdev->pdev->dev,
1792 			"common waterline config cmd failed %d\n", ret);
1793 
1794 	return ret;
1795 }
1796 
1797 int hclge_buffer_alloc(struct hclge_dev *hdev)
1798 {
1799 	struct hclge_pkt_buf_alloc *pkt_buf;
1800 	int ret;
1801 
1802 	pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
1803 	if (!pkt_buf)
1804 		return -ENOMEM;
1805 
1806 	ret = hclge_tx_buffer_calc(hdev, pkt_buf);
1807 	if (ret) {
1808 		dev_err(&hdev->pdev->dev,
1809 			"could not calc tx buffer size for all TCs %d\n", ret);
1810 		goto out;
1811 	}
1812 
1813 	ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
1814 	if (ret) {
1815 		dev_err(&hdev->pdev->dev,
1816 			"could not alloc tx buffers %d\n", ret);
1817 		goto out;
1818 	}
1819 
1820 	ret = hclge_rx_buffer_calc(hdev, pkt_buf);
1821 	if (ret) {
1822 		dev_err(&hdev->pdev->dev,
1823 			"could not calc rx priv buffer size for all TCs %d\n",
1824 			ret);
1825 		goto out;
1826 	}
1827 
1828 	ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
1829 	if (ret) {
1830 		dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
1831 			ret);
1832 		goto out;
1833 	}
1834 
1835 	if (hnae3_dev_dcb_supported(hdev)) {
1836 		ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
1837 		if (ret) {
1838 			dev_err(&hdev->pdev->dev,
1839 				"could not configure rx private waterline %d\n",
1840 				ret);
1841 			goto out;
1842 		}
1843 
1844 		ret = hclge_common_thrd_config(hdev, pkt_buf);
1845 		if (ret) {
1846 			dev_err(&hdev->pdev->dev,
1847 				"could not configure common threshold %d\n",
1848 				ret);
1849 			goto out;
1850 		}
1851 	}
1852 
1853 	ret = hclge_common_wl_config(hdev, pkt_buf);
1854 	if (ret)
1855 		dev_err(&hdev->pdev->dev,
1856 			"could not configure common waterline %d\n", ret);
1857 
1858 out:
1859 	kfree(pkt_buf);
1860 	return ret;
1861 }
1862 
1863 static int hclge_init_roce_base_info(struct hclge_vport *vport)
1864 {
1865 	struct hnae3_handle *roce = &vport->roce;
1866 	struct hnae3_handle *nic = &vport->nic;
1867 
1868 	roce->rinfo.num_vectors = vport->back->num_roce_msi;
1869 
1870 	if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
1871 	    vport->back->num_msi_left == 0)
1872 		return -EINVAL;
1873 
1874 	roce->rinfo.base_vector = vport->back->roce_base_vector;
1875 
1876 	roce->rinfo.netdev = nic->kinfo.netdev;
1877 	roce->rinfo.roce_io_base = vport->back->hw.io_base;
1878 
1879 	roce->pdev = nic->pdev;
1880 	roce->ae_algo = nic->ae_algo;
1881 	roce->numa_node_mask = nic->numa_node_mask;
1882 
1883 	return 0;
1884 }
1885 
1886 static int hclge_init_msi(struct hclge_dev *hdev)
1887 {
1888 	struct pci_dev *pdev = hdev->pdev;
1889 	int vectors;
1890 	int i;
1891 
1892 	vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
1893 					PCI_IRQ_MSI | PCI_IRQ_MSIX);
1894 	if (vectors < 0) {
1895 		dev_err(&pdev->dev,
1896 			"failed(%d) to allocate MSI/MSI-X vectors\n",
1897 			vectors);
1898 		return vectors;
1899 	}
1900 	if (vectors < hdev->num_msi)
1901 		dev_warn(&hdev->pdev->dev,
1902 			 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
1903 			 hdev->num_msi, vectors);
1904 
1905 	hdev->num_msi = vectors;
1906 	hdev->num_msi_left = vectors;
1907 	hdev->base_msi_vector = pdev->irq;
1908 	hdev->roce_base_vector = hdev->base_msi_vector +
1909 				hdev->roce_base_msix_offset;
1910 
1911 	hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
1912 					   sizeof(u16), GFP_KERNEL);
1913 	if (!hdev->vector_status) {
1914 		pci_free_irq_vectors(pdev);
1915 		return -ENOMEM;
1916 	}
1917 
1918 	for (i = 0; i < hdev->num_msi; i++)
1919 		hdev->vector_status[i] = HCLGE_INVALID_VPORT;
1920 
1921 	hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
1922 					sizeof(int), GFP_KERNEL);
1923 	if (!hdev->vector_irq) {
1924 		pci_free_irq_vectors(pdev);
1925 		return -ENOMEM;
1926 	}
1927 
1928 	return 0;
1929 }
1930 
1931 static u8 hclge_check_speed_dup(u8 duplex, int speed)
1932 {
1933 
1934 	if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
1935 		duplex = HCLGE_MAC_FULL;
1936 
1937 	return duplex;
1938 }
1939 
1940 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
1941 				      u8 duplex)
1942 {
1943 	struct hclge_config_mac_speed_dup_cmd *req;
1944 	struct hclge_desc desc;
1945 	int ret;
1946 
1947 	req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
1948 
1949 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
1950 
1951 	hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
1952 
1953 	switch (speed) {
1954 	case HCLGE_MAC_SPEED_10M:
1955 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1956 				HCLGE_CFG_SPEED_S, 6);
1957 		break;
1958 	case HCLGE_MAC_SPEED_100M:
1959 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1960 				HCLGE_CFG_SPEED_S, 7);
1961 		break;
1962 	case HCLGE_MAC_SPEED_1G:
1963 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1964 				HCLGE_CFG_SPEED_S, 0);
1965 		break;
1966 	case HCLGE_MAC_SPEED_10G:
1967 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1968 				HCLGE_CFG_SPEED_S, 1);
1969 		break;
1970 	case HCLGE_MAC_SPEED_25G:
1971 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1972 				HCLGE_CFG_SPEED_S, 2);
1973 		break;
1974 	case HCLGE_MAC_SPEED_40G:
1975 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1976 				HCLGE_CFG_SPEED_S, 3);
1977 		break;
1978 	case HCLGE_MAC_SPEED_50G:
1979 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1980 				HCLGE_CFG_SPEED_S, 4);
1981 		break;
1982 	case HCLGE_MAC_SPEED_100G:
1983 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1984 				HCLGE_CFG_SPEED_S, 5);
1985 		break;
1986 	default:
1987 		dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
1988 		return -EINVAL;
1989 	}
1990 
1991 	hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
1992 		      1);
1993 
1994 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1995 	if (ret) {
1996 		dev_err(&hdev->pdev->dev,
1997 			"mac speed/duplex config cmd failed %d.\n", ret);
1998 		return ret;
1999 	}
2000 
2001 	return 0;
2002 }
2003 
2004 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2005 {
2006 	int ret;
2007 
2008 	duplex = hclge_check_speed_dup(duplex, speed);
2009 	if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2010 		return 0;
2011 
2012 	ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2013 	if (ret)
2014 		return ret;
2015 
2016 	hdev->hw.mac.speed = speed;
2017 	hdev->hw.mac.duplex = duplex;
2018 
2019 	return 0;
2020 }
2021 
2022 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2023 				     u8 duplex)
2024 {
2025 	struct hclge_vport *vport = hclge_get_vport(handle);
2026 	struct hclge_dev *hdev = vport->back;
2027 
2028 	return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2029 }
2030 
2031 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2032 {
2033 	struct hclge_config_auto_neg_cmd *req;
2034 	struct hclge_desc desc;
2035 	u32 flag = 0;
2036 	int ret;
2037 
2038 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2039 
2040 	req = (struct hclge_config_auto_neg_cmd *)desc.data;
2041 	hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
2042 	req->cfg_an_cmd_flag = cpu_to_le32(flag);
2043 
2044 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2045 	if (ret)
2046 		dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2047 			ret);
2048 
2049 	return ret;
2050 }
2051 
2052 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2053 {
2054 	struct hclge_vport *vport = hclge_get_vport(handle);
2055 	struct hclge_dev *hdev = vport->back;
2056 
2057 	return hclge_set_autoneg_en(hdev, enable);
2058 }
2059 
2060 static int hclge_get_autoneg(struct hnae3_handle *handle)
2061 {
2062 	struct hclge_vport *vport = hclge_get_vport(handle);
2063 	struct hclge_dev *hdev = vport->back;
2064 	struct phy_device *phydev = hdev->hw.mac.phydev;
2065 
2066 	if (phydev)
2067 		return phydev->autoneg;
2068 
2069 	return hdev->hw.mac.autoneg;
2070 }
2071 
2072 static int hclge_mac_init(struct hclge_dev *hdev)
2073 {
2074 	struct hclge_mac *mac = &hdev->hw.mac;
2075 	int ret;
2076 
2077 	hdev->support_sfp_query = true;
2078 	hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2079 	ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2080 					 hdev->hw.mac.duplex);
2081 	if (ret) {
2082 		dev_err(&hdev->pdev->dev,
2083 			"Config mac speed dup fail ret=%d\n", ret);
2084 		return ret;
2085 	}
2086 
2087 	mac->link = 0;
2088 
2089 	ret = hclge_set_mac_mtu(hdev, hdev->mps);
2090 	if (ret) {
2091 		dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2092 		return ret;
2093 	}
2094 
2095 	ret = hclge_buffer_alloc(hdev);
2096 	if (ret)
2097 		dev_err(&hdev->pdev->dev,
2098 			"allocate buffer fail, ret=%d\n", ret);
2099 
2100 	return ret;
2101 }
2102 
2103 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2104 {
2105 	if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2106 		schedule_work(&hdev->mbx_service_task);
2107 }
2108 
2109 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2110 {
2111 	if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2112 		schedule_work(&hdev->rst_service_task);
2113 }
2114 
2115 static void hclge_task_schedule(struct hclge_dev *hdev)
2116 {
2117 	if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2118 	    !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2119 	    !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2120 		(void)schedule_work(&hdev->service_task);
2121 }
2122 
2123 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2124 {
2125 	struct hclge_link_status_cmd *req;
2126 	struct hclge_desc desc;
2127 	int link_status;
2128 	int ret;
2129 
2130 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2131 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2132 	if (ret) {
2133 		dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2134 			ret);
2135 		return ret;
2136 	}
2137 
2138 	req = (struct hclge_link_status_cmd *)desc.data;
2139 	link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2140 
2141 	return !!link_status;
2142 }
2143 
2144 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2145 {
2146 	int mac_state;
2147 	int link_stat;
2148 
2149 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2150 		return 0;
2151 
2152 	mac_state = hclge_get_mac_link_status(hdev);
2153 
2154 	if (hdev->hw.mac.phydev) {
2155 		if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2156 			link_stat = mac_state &
2157 				hdev->hw.mac.phydev->link;
2158 		else
2159 			link_stat = 0;
2160 
2161 	} else {
2162 		link_stat = mac_state;
2163 	}
2164 
2165 	return !!link_stat;
2166 }
2167 
2168 static void hclge_update_link_status(struct hclge_dev *hdev)
2169 {
2170 	struct hnae3_client *rclient = hdev->roce_client;
2171 	struct hnae3_client *client = hdev->nic_client;
2172 	struct hnae3_handle *rhandle;
2173 	struct hnae3_handle *handle;
2174 	int state;
2175 	int i;
2176 
2177 	if (!client)
2178 		return;
2179 	state = hclge_get_mac_phy_link(hdev);
2180 	if (state != hdev->hw.mac.link) {
2181 		for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2182 			handle = &hdev->vport[i].nic;
2183 			client->ops->link_status_change(handle, state);
2184 			rhandle = &hdev->vport[i].roce;
2185 			if (rclient && rclient->ops->link_status_change)
2186 				rclient->ops->link_status_change(rhandle,
2187 								 state);
2188 		}
2189 		hdev->hw.mac.link = state;
2190 	}
2191 }
2192 
2193 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2194 {
2195 	struct hclge_sfp_speed_cmd *resp = NULL;
2196 	struct hclge_desc desc;
2197 	int ret;
2198 
2199 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SFP_GET_SPEED, true);
2200 	resp = (struct hclge_sfp_speed_cmd *)desc.data;
2201 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2202 	if (ret == -EOPNOTSUPP) {
2203 		dev_warn(&hdev->pdev->dev,
2204 			 "IMP do not support get SFP speed %d\n", ret);
2205 		return ret;
2206 	} else if (ret) {
2207 		dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2208 		return ret;
2209 	}
2210 
2211 	*speed = resp->sfp_speed;
2212 
2213 	return 0;
2214 }
2215 
2216 static int hclge_update_speed_duplex(struct hclge_dev *hdev)
2217 {
2218 	struct hclge_mac mac = hdev->hw.mac;
2219 	int speed;
2220 	int ret;
2221 
2222 	/* get the speed from SFP cmd when phy
2223 	 * doesn't exit.
2224 	 */
2225 	if (mac.phydev)
2226 		return 0;
2227 
2228 	/* if IMP does not support get SFP/qSFP speed, return directly */
2229 	if (!hdev->support_sfp_query)
2230 		return 0;
2231 
2232 	ret = hclge_get_sfp_speed(hdev, &speed);
2233 	if (ret == -EOPNOTSUPP) {
2234 		hdev->support_sfp_query = false;
2235 		return ret;
2236 	} else if (ret) {
2237 		return ret;
2238 	}
2239 
2240 	if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2241 		return 0; /* do nothing if no SFP */
2242 
2243 	/* must config full duplex for SFP */
2244 	return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2245 }
2246 
2247 static int hclge_update_speed_duplex_h(struct hnae3_handle *handle)
2248 {
2249 	struct hclge_vport *vport = hclge_get_vport(handle);
2250 	struct hclge_dev *hdev = vport->back;
2251 
2252 	return hclge_update_speed_duplex(hdev);
2253 }
2254 
2255 static int hclge_get_status(struct hnae3_handle *handle)
2256 {
2257 	struct hclge_vport *vport = hclge_get_vport(handle);
2258 	struct hclge_dev *hdev = vport->back;
2259 
2260 	hclge_update_link_status(hdev);
2261 
2262 	return hdev->hw.mac.link;
2263 }
2264 
2265 static void hclge_service_timer(struct timer_list *t)
2266 {
2267 	struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2268 
2269 	mod_timer(&hdev->service_timer, jiffies + HZ);
2270 	hdev->hw_stats.stats_timer++;
2271 	hclge_task_schedule(hdev);
2272 }
2273 
2274 static void hclge_service_complete(struct hclge_dev *hdev)
2275 {
2276 	WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2277 
2278 	/* Flush memory before next watchdog */
2279 	smp_mb__before_atomic();
2280 	clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2281 }
2282 
2283 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2284 {
2285 	u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2286 
2287 	/* fetch the events from their corresponding regs */
2288 	rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2289 	cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2290 	msix_src_reg = hclge_read_dev(&hdev->hw,
2291 				      HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2292 
2293 	/* Assumption: If by any chance reset and mailbox events are reported
2294 	 * together then we will only process reset event in this go and will
2295 	 * defer the processing of the mailbox events. Since, we would have not
2296 	 * cleared RX CMDQ event this time we would receive again another
2297 	 * interrupt from H/W just for the mailbox.
2298 	 */
2299 
2300 	/* check for vector0 reset event sources */
2301 	if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2302 		dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2303 		set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2304 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2305 		*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2306 		return HCLGE_VECTOR0_EVENT_RST;
2307 	}
2308 
2309 	if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2310 		dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2311 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2312 		set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2313 		*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2314 		return HCLGE_VECTOR0_EVENT_RST;
2315 	}
2316 
2317 	if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
2318 		dev_info(&hdev->pdev->dev, "core reset interrupt\n");
2319 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2320 		set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
2321 		*clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2322 		return HCLGE_VECTOR0_EVENT_RST;
2323 	}
2324 
2325 	/* check for vector0 msix event source */
2326 	if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK)
2327 		return HCLGE_VECTOR0_EVENT_ERR;
2328 
2329 	/* check for vector0 mailbox(=CMDQ RX) event source */
2330 	if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2331 		cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2332 		*clearval = cmdq_src_reg;
2333 		return HCLGE_VECTOR0_EVENT_MBX;
2334 	}
2335 
2336 	return HCLGE_VECTOR0_EVENT_OTHER;
2337 }
2338 
2339 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2340 				    u32 regclr)
2341 {
2342 	switch (event_type) {
2343 	case HCLGE_VECTOR0_EVENT_RST:
2344 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2345 		break;
2346 	case HCLGE_VECTOR0_EVENT_MBX:
2347 		hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2348 		break;
2349 	default:
2350 		break;
2351 	}
2352 }
2353 
2354 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2355 {
2356 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2357 				BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2358 				BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2359 				BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2360 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2361 }
2362 
2363 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2364 {
2365 	writel(enable ? 1 : 0, vector->addr);
2366 }
2367 
2368 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2369 {
2370 	struct hclge_dev *hdev = data;
2371 	u32 event_cause;
2372 	u32 clearval;
2373 
2374 	hclge_enable_vector(&hdev->misc_vector, false);
2375 	event_cause = hclge_check_event_cause(hdev, &clearval);
2376 
2377 	/* vector 0 interrupt is shared with reset and mailbox source events.*/
2378 	switch (event_cause) {
2379 	case HCLGE_VECTOR0_EVENT_ERR:
2380 		/* we do not know what type of reset is required now. This could
2381 		 * only be decided after we fetch the type of errors which
2382 		 * caused this event. Therefore, we will do below for now:
2383 		 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2384 		 *    have defered type of reset to be used.
2385 		 * 2. Schedule the reset serivce task.
2386 		 * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
2387 		 *    will fetch the correct type of reset.  This would be done
2388 		 *    by first decoding the types of errors.
2389 		 */
2390 		set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2391 		/* fall through */
2392 	case HCLGE_VECTOR0_EVENT_RST:
2393 		hclge_reset_task_schedule(hdev);
2394 		break;
2395 	case HCLGE_VECTOR0_EVENT_MBX:
2396 		/* If we are here then,
2397 		 * 1. Either we are not handling any mbx task and we are not
2398 		 *    scheduled as well
2399 		 *                        OR
2400 		 * 2. We could be handling a mbx task but nothing more is
2401 		 *    scheduled.
2402 		 * In both cases, we should schedule mbx task as there are more
2403 		 * mbx messages reported by this interrupt.
2404 		 */
2405 		hclge_mbx_task_schedule(hdev);
2406 		break;
2407 	default:
2408 		dev_warn(&hdev->pdev->dev,
2409 			 "received unknown or unhandled event of vector0\n");
2410 		break;
2411 	}
2412 
2413 	/* clear the source of interrupt if it is not cause by reset */
2414 	if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2415 		hclge_clear_event_cause(hdev, event_cause, clearval);
2416 		hclge_enable_vector(&hdev->misc_vector, true);
2417 	}
2418 
2419 	return IRQ_HANDLED;
2420 }
2421 
2422 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2423 {
2424 	if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2425 		dev_warn(&hdev->pdev->dev,
2426 			 "vector(vector_id %d) has been freed.\n", vector_id);
2427 		return;
2428 	}
2429 
2430 	hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2431 	hdev->num_msi_left += 1;
2432 	hdev->num_msi_used -= 1;
2433 }
2434 
2435 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2436 {
2437 	struct hclge_misc_vector *vector = &hdev->misc_vector;
2438 
2439 	vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2440 
2441 	vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2442 	hdev->vector_status[0] = 0;
2443 
2444 	hdev->num_msi_left -= 1;
2445 	hdev->num_msi_used += 1;
2446 }
2447 
2448 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2449 {
2450 	int ret;
2451 
2452 	hclge_get_misc_vector(hdev);
2453 
2454 	/* this would be explicitly freed in the end */
2455 	ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2456 			  0, "hclge_misc", hdev);
2457 	if (ret) {
2458 		hclge_free_vector(hdev, 0);
2459 		dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2460 			hdev->misc_vector.vector_irq);
2461 	}
2462 
2463 	return ret;
2464 }
2465 
2466 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2467 {
2468 	free_irq(hdev->misc_vector.vector_irq, hdev);
2469 	hclge_free_vector(hdev, 0);
2470 }
2471 
2472 int hclge_notify_client(struct hclge_dev *hdev,
2473 			enum hnae3_reset_notify_type type)
2474 {
2475 	struct hnae3_client *client = hdev->nic_client;
2476 	u16 i;
2477 
2478 	if (!client->ops->reset_notify)
2479 		return -EOPNOTSUPP;
2480 
2481 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2482 		struct hnae3_handle *handle = &hdev->vport[i].nic;
2483 		int ret;
2484 
2485 		ret = client->ops->reset_notify(handle, type);
2486 		if (ret) {
2487 			dev_err(&hdev->pdev->dev,
2488 				"notify nic client failed %d(%d)\n", type, ret);
2489 			return ret;
2490 		}
2491 	}
2492 
2493 	return 0;
2494 }
2495 
2496 static int hclge_notify_roce_client(struct hclge_dev *hdev,
2497 				    enum hnae3_reset_notify_type type)
2498 {
2499 	struct hnae3_client *client = hdev->roce_client;
2500 	int ret = 0;
2501 	u16 i;
2502 
2503 	if (!client)
2504 		return 0;
2505 
2506 	if (!client->ops->reset_notify)
2507 		return -EOPNOTSUPP;
2508 
2509 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2510 		struct hnae3_handle *handle = &hdev->vport[i].roce;
2511 
2512 		ret = client->ops->reset_notify(handle, type);
2513 		if (ret) {
2514 			dev_err(&hdev->pdev->dev,
2515 				"notify roce client failed %d(%d)",
2516 				type, ret);
2517 			return ret;
2518 		}
2519 	}
2520 
2521 	return ret;
2522 }
2523 
2524 static int hclge_reset_wait(struct hclge_dev *hdev)
2525 {
2526 #define HCLGE_RESET_WATI_MS	100
2527 #define HCLGE_RESET_WAIT_CNT	200
2528 	u32 val, reg, reg_bit;
2529 	u32 cnt = 0;
2530 
2531 	switch (hdev->reset_type) {
2532 	case HNAE3_IMP_RESET:
2533 		reg = HCLGE_GLOBAL_RESET_REG;
2534 		reg_bit = HCLGE_IMP_RESET_BIT;
2535 		break;
2536 	case HNAE3_GLOBAL_RESET:
2537 		reg = HCLGE_GLOBAL_RESET_REG;
2538 		reg_bit = HCLGE_GLOBAL_RESET_BIT;
2539 		break;
2540 	case HNAE3_CORE_RESET:
2541 		reg = HCLGE_GLOBAL_RESET_REG;
2542 		reg_bit = HCLGE_CORE_RESET_BIT;
2543 		break;
2544 	case HNAE3_FUNC_RESET:
2545 		reg = HCLGE_FUN_RST_ING;
2546 		reg_bit = HCLGE_FUN_RST_ING_B;
2547 		break;
2548 	case HNAE3_FLR_RESET:
2549 		break;
2550 	default:
2551 		dev_err(&hdev->pdev->dev,
2552 			"Wait for unsupported reset type: %d\n",
2553 			hdev->reset_type);
2554 		return -EINVAL;
2555 	}
2556 
2557 	if (hdev->reset_type == HNAE3_FLR_RESET) {
2558 		while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
2559 		       cnt++ < HCLGE_RESET_WAIT_CNT)
2560 			msleep(HCLGE_RESET_WATI_MS);
2561 
2562 		if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
2563 			dev_err(&hdev->pdev->dev,
2564 				"flr wait timeout: %d\n", cnt);
2565 			return -EBUSY;
2566 		}
2567 
2568 		return 0;
2569 	}
2570 
2571 	val = hclge_read_dev(&hdev->hw, reg);
2572 	while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
2573 		msleep(HCLGE_RESET_WATI_MS);
2574 		val = hclge_read_dev(&hdev->hw, reg);
2575 		cnt++;
2576 	}
2577 
2578 	if (cnt >= HCLGE_RESET_WAIT_CNT) {
2579 		dev_warn(&hdev->pdev->dev,
2580 			 "Wait for reset timeout: %d\n", hdev->reset_type);
2581 		return -EBUSY;
2582 	}
2583 
2584 	return 0;
2585 }
2586 
2587 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
2588 {
2589 	struct hclge_vf_rst_cmd *req;
2590 	struct hclge_desc desc;
2591 
2592 	req = (struct hclge_vf_rst_cmd *)desc.data;
2593 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
2594 	req->dest_vfid = func_id;
2595 
2596 	if (reset)
2597 		req->vf_rst = 0x1;
2598 
2599 	return hclge_cmd_send(&hdev->hw, &desc, 1);
2600 }
2601 
2602 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
2603 {
2604 	int i;
2605 
2606 	for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
2607 		struct hclge_vport *vport = &hdev->vport[i];
2608 		int ret;
2609 
2610 		/* Send cmd to set/clear VF's FUNC_RST_ING */
2611 		ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
2612 		if (ret) {
2613 			dev_err(&hdev->pdev->dev,
2614 				"set vf(%d) rst failed %d!\n",
2615 				vport->vport_id, ret);
2616 			return ret;
2617 		}
2618 
2619 		if (!reset)
2620 			continue;
2621 
2622 		/* Inform VF to process the reset.
2623 		 * hclge_inform_reset_assert_to_vf may fail if VF
2624 		 * driver is not loaded.
2625 		 */
2626 		ret = hclge_inform_reset_assert_to_vf(vport);
2627 		if (ret)
2628 			dev_warn(&hdev->pdev->dev,
2629 				 "inform reset to vf(%d) failed %d!\n",
2630 				 vport->vport_id, ret);
2631 	}
2632 
2633 	return 0;
2634 }
2635 
2636 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
2637 {
2638 	struct hclge_desc desc;
2639 	struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
2640 	int ret;
2641 
2642 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
2643 	hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
2644 	req->fun_reset_vfid = func_id;
2645 
2646 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2647 	if (ret)
2648 		dev_err(&hdev->pdev->dev,
2649 			"send function reset cmd fail, status =%d\n", ret);
2650 
2651 	return ret;
2652 }
2653 
2654 static void hclge_do_reset(struct hclge_dev *hdev)
2655 {
2656 	struct pci_dev *pdev = hdev->pdev;
2657 	u32 val;
2658 
2659 	switch (hdev->reset_type) {
2660 	case HNAE3_GLOBAL_RESET:
2661 		val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2662 		hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
2663 		hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2664 		dev_info(&pdev->dev, "Global Reset requested\n");
2665 		break;
2666 	case HNAE3_CORE_RESET:
2667 		val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2668 		hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
2669 		hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2670 		dev_info(&pdev->dev, "Core Reset requested\n");
2671 		break;
2672 	case HNAE3_FUNC_RESET:
2673 		dev_info(&pdev->dev, "PF Reset requested\n");
2674 		/* schedule again to check later */
2675 		set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
2676 		hclge_reset_task_schedule(hdev);
2677 		break;
2678 	case HNAE3_FLR_RESET:
2679 		dev_info(&pdev->dev, "FLR requested\n");
2680 		/* schedule again to check later */
2681 		set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
2682 		hclge_reset_task_schedule(hdev);
2683 		break;
2684 	default:
2685 		dev_warn(&pdev->dev,
2686 			 "Unsupported reset type: %d\n", hdev->reset_type);
2687 		break;
2688 	}
2689 }
2690 
2691 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
2692 						   unsigned long *addr)
2693 {
2694 	enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
2695 
2696 	/* first, resolve any unknown reset type to the known type(s) */
2697 	if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
2698 		/* we will intentionally ignore any errors from this function
2699 		 *  as we will end up in *some* reset request in any case
2700 		 */
2701 		hclge_handle_hw_msix_error(hdev, addr);
2702 		clear_bit(HNAE3_UNKNOWN_RESET, addr);
2703 		/* We defered the clearing of the error event which caused
2704 		 * interrupt since it was not posssible to do that in
2705 		 * interrupt context (and this is the reason we introduced
2706 		 * new UNKNOWN reset type). Now, the errors have been
2707 		 * handled and cleared in hardware we can safely enable
2708 		 * interrupts. This is an exception to the norm.
2709 		 */
2710 		hclge_enable_vector(&hdev->misc_vector, true);
2711 	}
2712 
2713 	/* return the highest priority reset level amongst all */
2714 	if (test_bit(HNAE3_IMP_RESET, addr)) {
2715 		rst_level = HNAE3_IMP_RESET;
2716 		clear_bit(HNAE3_IMP_RESET, addr);
2717 		clear_bit(HNAE3_GLOBAL_RESET, addr);
2718 		clear_bit(HNAE3_CORE_RESET, addr);
2719 		clear_bit(HNAE3_FUNC_RESET, addr);
2720 	} else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
2721 		rst_level = HNAE3_GLOBAL_RESET;
2722 		clear_bit(HNAE3_GLOBAL_RESET, addr);
2723 		clear_bit(HNAE3_CORE_RESET, addr);
2724 		clear_bit(HNAE3_FUNC_RESET, addr);
2725 	} else if (test_bit(HNAE3_CORE_RESET, addr)) {
2726 		rst_level = HNAE3_CORE_RESET;
2727 		clear_bit(HNAE3_CORE_RESET, addr);
2728 		clear_bit(HNAE3_FUNC_RESET, addr);
2729 	} else if (test_bit(HNAE3_FUNC_RESET, addr)) {
2730 		rst_level = HNAE3_FUNC_RESET;
2731 		clear_bit(HNAE3_FUNC_RESET, addr);
2732 	} else if (test_bit(HNAE3_FLR_RESET, addr)) {
2733 		rst_level = HNAE3_FLR_RESET;
2734 		clear_bit(HNAE3_FLR_RESET, addr);
2735 	}
2736 
2737 	return rst_level;
2738 }
2739 
2740 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
2741 {
2742 	u32 clearval = 0;
2743 
2744 	switch (hdev->reset_type) {
2745 	case HNAE3_IMP_RESET:
2746 		clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2747 		break;
2748 	case HNAE3_GLOBAL_RESET:
2749 		clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2750 		break;
2751 	case HNAE3_CORE_RESET:
2752 		clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2753 		break;
2754 	default:
2755 		break;
2756 	}
2757 
2758 	if (!clearval)
2759 		return;
2760 
2761 	hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
2762 	hclge_enable_vector(&hdev->misc_vector, true);
2763 }
2764 
2765 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
2766 {
2767 	int ret = 0;
2768 
2769 	switch (hdev->reset_type) {
2770 	case HNAE3_FUNC_RESET:
2771 		/* fall through */
2772 	case HNAE3_FLR_RESET:
2773 		ret = hclge_set_all_vf_rst(hdev, true);
2774 		break;
2775 	default:
2776 		break;
2777 	}
2778 
2779 	return ret;
2780 }
2781 
2782 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
2783 {
2784 	u32 reg_val;
2785 	int ret = 0;
2786 
2787 	switch (hdev->reset_type) {
2788 	case HNAE3_FUNC_RESET:
2789 		/* There is no mechanism for PF to know if VF has stopped IO
2790 		 * for now, just wait 100 ms for VF to stop IO
2791 		 */
2792 		msleep(100);
2793 		ret = hclge_func_reset_cmd(hdev, 0);
2794 		if (ret) {
2795 			dev_err(&hdev->pdev->dev,
2796 				"asserting function reset fail %d!\n", ret);
2797 			return ret;
2798 		}
2799 
2800 		/* After performaning pf reset, it is not necessary to do the
2801 		 * mailbox handling or send any command to firmware, because
2802 		 * any mailbox handling or command to firmware is only valid
2803 		 * after hclge_cmd_init is called.
2804 		 */
2805 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2806 		break;
2807 	case HNAE3_FLR_RESET:
2808 		/* There is no mechanism for PF to know if VF has stopped IO
2809 		 * for now, just wait 100 ms for VF to stop IO
2810 		 */
2811 		msleep(100);
2812 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2813 		set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
2814 		break;
2815 	case HNAE3_IMP_RESET:
2816 		reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
2817 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
2818 				BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
2819 		break;
2820 	default:
2821 		break;
2822 	}
2823 
2824 	dev_info(&hdev->pdev->dev, "prepare wait ok\n");
2825 
2826 	return ret;
2827 }
2828 
2829 static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
2830 {
2831 #define MAX_RESET_FAIL_CNT 5
2832 #define RESET_UPGRADE_DELAY_SEC 10
2833 
2834 	if (hdev->reset_pending) {
2835 		dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
2836 			 hdev->reset_pending);
2837 		return true;
2838 	} else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
2839 		   (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
2840 		    BIT(HCLGE_IMP_RESET_BIT))) {
2841 		dev_info(&hdev->pdev->dev,
2842 			 "reset failed because IMP Reset is pending\n");
2843 		hclge_clear_reset_cause(hdev);
2844 		return false;
2845 	} else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
2846 		hdev->reset_fail_cnt++;
2847 		if (is_timeout) {
2848 			set_bit(hdev->reset_type, &hdev->reset_pending);
2849 			dev_info(&hdev->pdev->dev,
2850 				 "re-schedule to wait for hw reset done\n");
2851 			return true;
2852 		}
2853 
2854 		dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
2855 		hclge_clear_reset_cause(hdev);
2856 		mod_timer(&hdev->reset_timer,
2857 			  jiffies + RESET_UPGRADE_DELAY_SEC * HZ);
2858 
2859 		return false;
2860 	}
2861 
2862 	hclge_clear_reset_cause(hdev);
2863 	dev_err(&hdev->pdev->dev, "Reset fail!\n");
2864 	return false;
2865 }
2866 
2867 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
2868 {
2869 	int ret = 0;
2870 
2871 	switch (hdev->reset_type) {
2872 	case HNAE3_FUNC_RESET:
2873 		/* fall through */
2874 	case HNAE3_FLR_RESET:
2875 		ret = hclge_set_all_vf_rst(hdev, false);
2876 		break;
2877 	default:
2878 		break;
2879 	}
2880 
2881 	return ret;
2882 }
2883 
2884 static void hclge_reset(struct hclge_dev *hdev)
2885 {
2886 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
2887 	bool is_timeout = false;
2888 	int ret;
2889 
2890 	/* Initialize ae_dev reset status as well, in case enet layer wants to
2891 	 * know if device is undergoing reset
2892 	 */
2893 	ae_dev->reset_type = hdev->reset_type;
2894 	hdev->reset_count++;
2895 	/* perform reset of the stack & ae device for a client */
2896 	ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
2897 	if (ret)
2898 		goto err_reset;
2899 
2900 	ret = hclge_reset_prepare_down(hdev);
2901 	if (ret)
2902 		goto err_reset;
2903 
2904 	rtnl_lock();
2905 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2906 	if (ret)
2907 		goto err_reset_lock;
2908 
2909 	rtnl_unlock();
2910 
2911 	ret = hclge_reset_prepare_wait(hdev);
2912 	if (ret)
2913 		goto err_reset;
2914 
2915 	if (hclge_reset_wait(hdev)) {
2916 		is_timeout = true;
2917 		goto err_reset;
2918 	}
2919 
2920 	ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
2921 	if (ret)
2922 		goto err_reset;
2923 
2924 	rtnl_lock();
2925 	ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
2926 	if (ret)
2927 		goto err_reset_lock;
2928 
2929 	ret = hclge_reset_ae_dev(hdev->ae_dev);
2930 	if (ret)
2931 		goto err_reset_lock;
2932 
2933 	ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
2934 	if (ret)
2935 		goto err_reset_lock;
2936 
2937 	ret = hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
2938 	if (ret)
2939 		goto err_reset_lock;
2940 
2941 	hclge_clear_reset_cause(hdev);
2942 
2943 	ret = hclge_reset_prepare_up(hdev);
2944 	if (ret)
2945 		goto err_reset_lock;
2946 
2947 	ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2948 	if (ret)
2949 		goto err_reset_lock;
2950 
2951 	rtnl_unlock();
2952 
2953 	ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
2954 	if (ret)
2955 		goto err_reset;
2956 
2957 	ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
2958 	if (ret)
2959 		goto err_reset;
2960 
2961 	hdev->last_reset_time = jiffies;
2962 	hdev->reset_fail_cnt = 0;
2963 	ae_dev->reset_type = HNAE3_NONE_RESET;
2964 
2965 	return;
2966 
2967 err_reset_lock:
2968 	rtnl_unlock();
2969 err_reset:
2970 	if (hclge_reset_err_handle(hdev, is_timeout))
2971 		hclge_reset_task_schedule(hdev);
2972 }
2973 
2974 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
2975 {
2976 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
2977 	struct hclge_dev *hdev = ae_dev->priv;
2978 
2979 	/* We might end up getting called broadly because of 2 below cases:
2980 	 * 1. Recoverable error was conveyed through APEI and only way to bring
2981 	 *    normalcy is to reset.
2982 	 * 2. A new reset request from the stack due to timeout
2983 	 *
2984 	 * For the first case,error event might not have ae handle available.
2985 	 * check if this is a new reset request and we are not here just because
2986 	 * last reset attempt did not succeed and watchdog hit us again. We will
2987 	 * know this if last reset request did not occur very recently (watchdog
2988 	 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
2989 	 * In case of new request we reset the "reset level" to PF reset.
2990 	 * And if it is a repeat reset request of the most recent one then we
2991 	 * want to make sure we throttle the reset request. Therefore, we will
2992 	 * not allow it again before 3*HZ times.
2993 	 */
2994 	if (!handle)
2995 		handle = &hdev->vport[0].nic;
2996 
2997 	if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ)))
2998 		return;
2999 	else if (hdev->default_reset_request)
3000 		hdev->reset_level =
3001 			hclge_get_reset_level(hdev,
3002 					      &hdev->default_reset_request);
3003 	else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
3004 		hdev->reset_level = HNAE3_FUNC_RESET;
3005 
3006 	dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
3007 		 hdev->reset_level);
3008 
3009 	/* request reset & schedule reset task */
3010 	set_bit(hdev->reset_level, &hdev->reset_request);
3011 	hclge_reset_task_schedule(hdev);
3012 
3013 	if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3014 		hdev->reset_level++;
3015 }
3016 
3017 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3018 					enum hnae3_reset_type rst_type)
3019 {
3020 	struct hclge_dev *hdev = ae_dev->priv;
3021 
3022 	set_bit(rst_type, &hdev->default_reset_request);
3023 }
3024 
3025 static void hclge_reset_timer(struct timer_list *t)
3026 {
3027 	struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3028 
3029 	dev_info(&hdev->pdev->dev,
3030 		 "triggering global reset in reset timer\n");
3031 	set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
3032 	hclge_reset_event(hdev->pdev, NULL);
3033 }
3034 
3035 static void hclge_reset_subtask(struct hclge_dev *hdev)
3036 {
3037 	/* check if there is any ongoing reset in the hardware. This status can
3038 	 * be checked from reset_pending. If there is then, we need to wait for
3039 	 * hardware to complete reset.
3040 	 *    a. If we are able to figure out in reasonable time that hardware
3041 	 *       has fully resetted then, we can proceed with driver, client
3042 	 *       reset.
3043 	 *    b. else, we can come back later to check this status so re-sched
3044 	 *       now.
3045 	 */
3046 	hdev->last_reset_time = jiffies;
3047 	hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
3048 	if (hdev->reset_type != HNAE3_NONE_RESET)
3049 		hclge_reset(hdev);
3050 
3051 	/* check if we got any *new* reset requests to be honored */
3052 	hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
3053 	if (hdev->reset_type != HNAE3_NONE_RESET)
3054 		hclge_do_reset(hdev);
3055 
3056 	hdev->reset_type = HNAE3_NONE_RESET;
3057 }
3058 
3059 static void hclge_reset_service_task(struct work_struct *work)
3060 {
3061 	struct hclge_dev *hdev =
3062 		container_of(work, struct hclge_dev, rst_service_task);
3063 
3064 	if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3065 		return;
3066 
3067 	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3068 
3069 	hclge_reset_subtask(hdev);
3070 
3071 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3072 }
3073 
3074 static void hclge_mailbox_service_task(struct work_struct *work)
3075 {
3076 	struct hclge_dev *hdev =
3077 		container_of(work, struct hclge_dev, mbx_service_task);
3078 
3079 	if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3080 		return;
3081 
3082 	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3083 
3084 	hclge_mbx_handler(hdev);
3085 
3086 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3087 }
3088 
3089 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3090 {
3091 	int i;
3092 
3093 	/* start from vport 1 for PF is always alive */
3094 	for (i = 1; i < hdev->num_alloc_vport; i++) {
3095 		struct hclge_vport *vport = &hdev->vport[i];
3096 
3097 		if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3098 			clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3099 
3100 		/* If vf is not alive, set to default value */
3101 		if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3102 			vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3103 	}
3104 }
3105 
3106 static void hclge_service_task(struct work_struct *work)
3107 {
3108 	struct hclge_dev *hdev =
3109 		container_of(work, struct hclge_dev, service_task);
3110 
3111 	if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3112 		hclge_update_stats_for_all(hdev);
3113 		hdev->hw_stats.stats_timer = 0;
3114 	}
3115 
3116 	hclge_update_speed_duplex(hdev);
3117 	hclge_update_link_status(hdev);
3118 	hclge_update_vport_alive(hdev);
3119 	hclge_service_complete(hdev);
3120 }
3121 
3122 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3123 {
3124 	/* VF handle has no client */
3125 	if (!handle->client)
3126 		return container_of(handle, struct hclge_vport, nic);
3127 	else if (handle->client->type == HNAE3_CLIENT_ROCE)
3128 		return container_of(handle, struct hclge_vport, roce);
3129 	else
3130 		return container_of(handle, struct hclge_vport, nic);
3131 }
3132 
3133 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3134 			    struct hnae3_vector_info *vector_info)
3135 {
3136 	struct hclge_vport *vport = hclge_get_vport(handle);
3137 	struct hnae3_vector_info *vector = vector_info;
3138 	struct hclge_dev *hdev = vport->back;
3139 	int alloc = 0;
3140 	int i, j;
3141 
3142 	vector_num = min(hdev->num_msi_left, vector_num);
3143 
3144 	for (j = 0; j < vector_num; j++) {
3145 		for (i = 1; i < hdev->num_msi; i++) {
3146 			if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3147 				vector->vector = pci_irq_vector(hdev->pdev, i);
3148 				vector->io_addr = hdev->hw.io_base +
3149 					HCLGE_VECTOR_REG_BASE +
3150 					(i - 1) * HCLGE_VECTOR_REG_OFFSET +
3151 					vport->vport_id *
3152 					HCLGE_VECTOR_VF_OFFSET;
3153 				hdev->vector_status[i] = vport->vport_id;
3154 				hdev->vector_irq[i] = vector->vector;
3155 
3156 				vector++;
3157 				alloc++;
3158 
3159 				break;
3160 			}
3161 		}
3162 	}
3163 	hdev->num_msi_left -= alloc;
3164 	hdev->num_msi_used += alloc;
3165 
3166 	return alloc;
3167 }
3168 
3169 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3170 {
3171 	int i;
3172 
3173 	for (i = 0; i < hdev->num_msi; i++)
3174 		if (vector == hdev->vector_irq[i])
3175 			return i;
3176 
3177 	return -EINVAL;
3178 }
3179 
3180 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3181 {
3182 	struct hclge_vport *vport = hclge_get_vport(handle);
3183 	struct hclge_dev *hdev = vport->back;
3184 	int vector_id;
3185 
3186 	vector_id = hclge_get_vector_index(hdev, vector);
3187 	if (vector_id < 0) {
3188 		dev_err(&hdev->pdev->dev,
3189 			"Get vector index fail. vector_id =%d\n", vector_id);
3190 		return vector_id;
3191 	}
3192 
3193 	hclge_free_vector(hdev, vector_id);
3194 
3195 	return 0;
3196 }
3197 
3198 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3199 {
3200 	return HCLGE_RSS_KEY_SIZE;
3201 }
3202 
3203 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3204 {
3205 	return HCLGE_RSS_IND_TBL_SIZE;
3206 }
3207 
3208 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3209 				  const u8 hfunc, const u8 *key)
3210 {
3211 	struct hclge_rss_config_cmd *req;
3212 	struct hclge_desc desc;
3213 	int key_offset;
3214 	int key_size;
3215 	int ret;
3216 
3217 	req = (struct hclge_rss_config_cmd *)desc.data;
3218 
3219 	for (key_offset = 0; key_offset < 3; key_offset++) {
3220 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3221 					   false);
3222 
3223 		req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3224 		req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3225 
3226 		if (key_offset == 2)
3227 			key_size =
3228 			HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
3229 		else
3230 			key_size = HCLGE_RSS_HASH_KEY_NUM;
3231 
3232 		memcpy(req->hash_key,
3233 		       key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3234 
3235 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3236 		if (ret) {
3237 			dev_err(&hdev->pdev->dev,
3238 				"Configure RSS config fail, status = %d\n",
3239 				ret);
3240 			return ret;
3241 		}
3242 	}
3243 	return 0;
3244 }
3245 
3246 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3247 {
3248 	struct hclge_rss_indirection_table_cmd *req;
3249 	struct hclge_desc desc;
3250 	int i, j;
3251 	int ret;
3252 
3253 	req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3254 
3255 	for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3256 		hclge_cmd_setup_basic_desc
3257 			(&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3258 
3259 		req->start_table_index =
3260 			cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3261 		req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3262 
3263 		for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3264 			req->rss_result[j] =
3265 				indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3266 
3267 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3268 		if (ret) {
3269 			dev_err(&hdev->pdev->dev,
3270 				"Configure rss indir table fail,status = %d\n",
3271 				ret);
3272 			return ret;
3273 		}
3274 	}
3275 	return 0;
3276 }
3277 
3278 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3279 				 u16 *tc_size, u16 *tc_offset)
3280 {
3281 	struct hclge_rss_tc_mode_cmd *req;
3282 	struct hclge_desc desc;
3283 	int ret;
3284 	int i;
3285 
3286 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3287 	req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3288 
3289 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3290 		u16 mode = 0;
3291 
3292 		hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3293 		hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3294 				HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3295 		hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3296 				HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3297 
3298 		req->rss_tc_mode[i] = cpu_to_le16(mode);
3299 	}
3300 
3301 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3302 	if (ret)
3303 		dev_err(&hdev->pdev->dev,
3304 			"Configure rss tc mode fail, status = %d\n", ret);
3305 
3306 	return ret;
3307 }
3308 
3309 static void hclge_get_rss_type(struct hclge_vport *vport)
3310 {
3311 	if (vport->rss_tuple_sets.ipv4_tcp_en ||
3312 	    vport->rss_tuple_sets.ipv4_udp_en ||
3313 	    vport->rss_tuple_sets.ipv4_sctp_en ||
3314 	    vport->rss_tuple_sets.ipv6_tcp_en ||
3315 	    vport->rss_tuple_sets.ipv6_udp_en ||
3316 	    vport->rss_tuple_sets.ipv6_sctp_en)
3317 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
3318 	else if (vport->rss_tuple_sets.ipv4_fragment_en ||
3319 		 vport->rss_tuple_sets.ipv6_fragment_en)
3320 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
3321 	else
3322 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
3323 }
3324 
3325 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3326 {
3327 	struct hclge_rss_input_tuple_cmd *req;
3328 	struct hclge_desc desc;
3329 	int ret;
3330 
3331 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3332 
3333 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3334 
3335 	/* Get the tuple cfg from pf */
3336 	req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
3337 	req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
3338 	req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
3339 	req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
3340 	req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
3341 	req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
3342 	req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
3343 	req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
3344 	hclge_get_rss_type(&hdev->vport[0]);
3345 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3346 	if (ret)
3347 		dev_err(&hdev->pdev->dev,
3348 			"Configure rss input fail, status = %d\n", ret);
3349 	return ret;
3350 }
3351 
3352 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3353 			 u8 *key, u8 *hfunc)
3354 {
3355 	struct hclge_vport *vport = hclge_get_vport(handle);
3356 	int i;
3357 
3358 	/* Get hash algorithm */
3359 	if (hfunc) {
3360 		switch (vport->rss_algo) {
3361 		case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
3362 			*hfunc = ETH_RSS_HASH_TOP;
3363 			break;
3364 		case HCLGE_RSS_HASH_ALGO_SIMPLE:
3365 			*hfunc = ETH_RSS_HASH_XOR;
3366 			break;
3367 		default:
3368 			*hfunc = ETH_RSS_HASH_UNKNOWN;
3369 			break;
3370 		}
3371 	}
3372 
3373 	/* Get the RSS Key required by the user */
3374 	if (key)
3375 		memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3376 
3377 	/* Get indirect table */
3378 	if (indir)
3379 		for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3380 			indir[i] =  vport->rss_indirection_tbl[i];
3381 
3382 	return 0;
3383 }
3384 
3385 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3386 			 const  u8 *key, const  u8 hfunc)
3387 {
3388 	struct hclge_vport *vport = hclge_get_vport(handle);
3389 	struct hclge_dev *hdev = vport->back;
3390 	u8 hash_algo;
3391 	int ret, i;
3392 
3393 	/* Set the RSS Hash Key if specififed by the user */
3394 	if (key) {
3395 		switch (hfunc) {
3396 		case ETH_RSS_HASH_TOP:
3397 			hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3398 			break;
3399 		case ETH_RSS_HASH_XOR:
3400 			hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3401 			break;
3402 		case ETH_RSS_HASH_NO_CHANGE:
3403 			hash_algo = vport->rss_algo;
3404 			break;
3405 		default:
3406 			return -EINVAL;
3407 		}
3408 
3409 		ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3410 		if (ret)
3411 			return ret;
3412 
3413 		/* Update the shadow RSS key with user specified qids */
3414 		memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3415 		vport->rss_algo = hash_algo;
3416 	}
3417 
3418 	/* Update the shadow RSS table with user specified qids */
3419 	for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3420 		vport->rss_indirection_tbl[i] = indir[i];
3421 
3422 	/* Update the hardware */
3423 	return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
3424 }
3425 
3426 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3427 {
3428 	u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3429 
3430 	if (nfc->data & RXH_L4_B_2_3)
3431 		hash_sets |= HCLGE_D_PORT_BIT;
3432 	else
3433 		hash_sets &= ~HCLGE_D_PORT_BIT;
3434 
3435 	if (nfc->data & RXH_IP_SRC)
3436 		hash_sets |= HCLGE_S_IP_BIT;
3437 	else
3438 		hash_sets &= ~HCLGE_S_IP_BIT;
3439 
3440 	if (nfc->data & RXH_IP_DST)
3441 		hash_sets |= HCLGE_D_IP_BIT;
3442 	else
3443 		hash_sets &= ~HCLGE_D_IP_BIT;
3444 
3445 	if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3446 		hash_sets |= HCLGE_V_TAG_BIT;
3447 
3448 	return hash_sets;
3449 }
3450 
3451 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3452 			       struct ethtool_rxnfc *nfc)
3453 {
3454 	struct hclge_vport *vport = hclge_get_vport(handle);
3455 	struct hclge_dev *hdev = vport->back;
3456 	struct hclge_rss_input_tuple_cmd *req;
3457 	struct hclge_desc desc;
3458 	u8 tuple_sets;
3459 	int ret;
3460 
3461 	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3462 			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
3463 		return -EINVAL;
3464 
3465 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3466 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3467 
3468 	req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3469 	req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3470 	req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3471 	req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3472 	req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3473 	req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3474 	req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3475 	req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
3476 
3477 	tuple_sets = hclge_get_rss_hash_bits(nfc);
3478 	switch (nfc->flow_type) {
3479 	case TCP_V4_FLOW:
3480 		req->ipv4_tcp_en = tuple_sets;
3481 		break;
3482 	case TCP_V6_FLOW:
3483 		req->ipv6_tcp_en = tuple_sets;
3484 		break;
3485 	case UDP_V4_FLOW:
3486 		req->ipv4_udp_en = tuple_sets;
3487 		break;
3488 	case UDP_V6_FLOW:
3489 		req->ipv6_udp_en = tuple_sets;
3490 		break;
3491 	case SCTP_V4_FLOW:
3492 		req->ipv4_sctp_en = tuple_sets;
3493 		break;
3494 	case SCTP_V6_FLOW:
3495 		if ((nfc->data & RXH_L4_B_0_1) ||
3496 		    (nfc->data & RXH_L4_B_2_3))
3497 			return -EINVAL;
3498 
3499 		req->ipv6_sctp_en = tuple_sets;
3500 		break;
3501 	case IPV4_FLOW:
3502 		req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3503 		break;
3504 	case IPV6_FLOW:
3505 		req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3506 		break;
3507 	default:
3508 		return -EINVAL;
3509 	}
3510 
3511 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3512 	if (ret) {
3513 		dev_err(&hdev->pdev->dev,
3514 			"Set rss tuple fail, status = %d\n", ret);
3515 		return ret;
3516 	}
3517 
3518 	vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
3519 	vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
3520 	vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
3521 	vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
3522 	vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
3523 	vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
3524 	vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
3525 	vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
3526 	hclge_get_rss_type(vport);
3527 	return 0;
3528 }
3529 
3530 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
3531 			       struct ethtool_rxnfc *nfc)
3532 {
3533 	struct hclge_vport *vport = hclge_get_vport(handle);
3534 	u8 tuple_sets;
3535 
3536 	nfc->data = 0;
3537 
3538 	switch (nfc->flow_type) {
3539 	case TCP_V4_FLOW:
3540 		tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
3541 		break;
3542 	case UDP_V4_FLOW:
3543 		tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
3544 		break;
3545 	case TCP_V6_FLOW:
3546 		tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
3547 		break;
3548 	case UDP_V6_FLOW:
3549 		tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
3550 		break;
3551 	case SCTP_V4_FLOW:
3552 		tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
3553 		break;
3554 	case SCTP_V6_FLOW:
3555 		tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
3556 		break;
3557 	case IPV4_FLOW:
3558 	case IPV6_FLOW:
3559 		tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
3560 		break;
3561 	default:
3562 		return -EINVAL;
3563 	}
3564 
3565 	if (!tuple_sets)
3566 		return 0;
3567 
3568 	if (tuple_sets & HCLGE_D_PORT_BIT)
3569 		nfc->data |= RXH_L4_B_2_3;
3570 	if (tuple_sets & HCLGE_S_PORT_BIT)
3571 		nfc->data |= RXH_L4_B_0_1;
3572 	if (tuple_sets & HCLGE_D_IP_BIT)
3573 		nfc->data |= RXH_IP_DST;
3574 	if (tuple_sets & HCLGE_S_IP_BIT)
3575 		nfc->data |= RXH_IP_SRC;
3576 
3577 	return 0;
3578 }
3579 
3580 static int hclge_get_tc_size(struct hnae3_handle *handle)
3581 {
3582 	struct hclge_vport *vport = hclge_get_vport(handle);
3583 	struct hclge_dev *hdev = vport->back;
3584 
3585 	return hdev->rss_size_max;
3586 }
3587 
3588 int hclge_rss_init_hw(struct hclge_dev *hdev)
3589 {
3590 	struct hclge_vport *vport = hdev->vport;
3591 	u8 *rss_indir = vport[0].rss_indirection_tbl;
3592 	u16 rss_size = vport[0].alloc_rss_size;
3593 	u8 *key = vport[0].rss_hash_key;
3594 	u8 hfunc = vport[0].rss_algo;
3595 	u16 tc_offset[HCLGE_MAX_TC_NUM];
3596 	u16 tc_valid[HCLGE_MAX_TC_NUM];
3597 	u16 tc_size[HCLGE_MAX_TC_NUM];
3598 	u16 roundup_size;
3599 	int i, ret;
3600 
3601 	ret = hclge_set_rss_indir_table(hdev, rss_indir);
3602 	if (ret)
3603 		return ret;
3604 
3605 	ret = hclge_set_rss_algo_key(hdev, hfunc, key);
3606 	if (ret)
3607 		return ret;
3608 
3609 	ret = hclge_set_rss_input_tuple(hdev);
3610 	if (ret)
3611 		return ret;
3612 
3613 	/* Each TC have the same queue size, and tc_size set to hardware is
3614 	 * the log2 of roundup power of two of rss_size, the acutal queue
3615 	 * size is limited by indirection table.
3616 	 */
3617 	if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
3618 		dev_err(&hdev->pdev->dev,
3619 			"Configure rss tc size failed, invalid TC_SIZE = %d\n",
3620 			rss_size);
3621 		return -EINVAL;
3622 	}
3623 
3624 	roundup_size = roundup_pow_of_two(rss_size);
3625 	roundup_size = ilog2(roundup_size);
3626 
3627 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3628 		tc_valid[i] = 0;
3629 
3630 		if (!(hdev->hw_tc_map & BIT(i)))
3631 			continue;
3632 
3633 		tc_valid[i] = 1;
3634 		tc_size[i] = roundup_size;
3635 		tc_offset[i] = rss_size * i;
3636 	}
3637 
3638 	return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
3639 }
3640 
3641 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
3642 {
3643 	struct hclge_vport *vport = hdev->vport;
3644 	int i, j;
3645 
3646 	for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
3647 		for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3648 			vport[j].rss_indirection_tbl[i] =
3649 				i % vport[j].alloc_rss_size;
3650 	}
3651 }
3652 
3653 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
3654 {
3655 	int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3656 	struct hclge_vport *vport = hdev->vport;
3657 
3658 	if (hdev->pdev->revision >= 0x21)
3659 		rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3660 
3661 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3662 		vport[i].rss_tuple_sets.ipv4_tcp_en =
3663 			HCLGE_RSS_INPUT_TUPLE_OTHER;
3664 		vport[i].rss_tuple_sets.ipv4_udp_en =
3665 			HCLGE_RSS_INPUT_TUPLE_OTHER;
3666 		vport[i].rss_tuple_sets.ipv4_sctp_en =
3667 			HCLGE_RSS_INPUT_TUPLE_SCTP;
3668 		vport[i].rss_tuple_sets.ipv4_fragment_en =
3669 			HCLGE_RSS_INPUT_TUPLE_OTHER;
3670 		vport[i].rss_tuple_sets.ipv6_tcp_en =
3671 			HCLGE_RSS_INPUT_TUPLE_OTHER;
3672 		vport[i].rss_tuple_sets.ipv6_udp_en =
3673 			HCLGE_RSS_INPUT_TUPLE_OTHER;
3674 		vport[i].rss_tuple_sets.ipv6_sctp_en =
3675 			HCLGE_RSS_INPUT_TUPLE_SCTP;
3676 		vport[i].rss_tuple_sets.ipv6_fragment_en =
3677 			HCLGE_RSS_INPUT_TUPLE_OTHER;
3678 
3679 		vport[i].rss_algo = rss_algo;
3680 
3681 		memcpy(vport[i].rss_hash_key, hclge_hash_key,
3682 		       HCLGE_RSS_KEY_SIZE);
3683 	}
3684 
3685 	hclge_rss_indir_init_cfg(hdev);
3686 }
3687 
3688 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
3689 				int vector_id, bool en,
3690 				struct hnae3_ring_chain_node *ring_chain)
3691 {
3692 	struct hclge_dev *hdev = vport->back;
3693 	struct hnae3_ring_chain_node *node;
3694 	struct hclge_desc desc;
3695 	struct hclge_ctrl_vector_chain_cmd *req
3696 		= (struct hclge_ctrl_vector_chain_cmd *)desc.data;
3697 	enum hclge_cmd_status status;
3698 	enum hclge_opcode_type op;
3699 	u16 tqp_type_and_id;
3700 	int i;
3701 
3702 	op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
3703 	hclge_cmd_setup_basic_desc(&desc, op, false);
3704 	req->int_vector_id = vector_id;
3705 
3706 	i = 0;
3707 	for (node = ring_chain; node; node = node->next) {
3708 		tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
3709 		hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
3710 				HCLGE_INT_TYPE_S,
3711 				hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
3712 		hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
3713 				HCLGE_TQP_ID_S, node->tqp_index);
3714 		hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
3715 				HCLGE_INT_GL_IDX_S,
3716 				hnae3_get_field(node->int_gl_idx,
3717 						HNAE3_RING_GL_IDX_M,
3718 						HNAE3_RING_GL_IDX_S));
3719 		req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
3720 		if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
3721 			req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
3722 			req->vfid = vport->vport_id;
3723 
3724 			status = hclge_cmd_send(&hdev->hw, &desc, 1);
3725 			if (status) {
3726 				dev_err(&hdev->pdev->dev,
3727 					"Map TQP fail, status is %d.\n",
3728 					status);
3729 				return -EIO;
3730 			}
3731 			i = 0;
3732 
3733 			hclge_cmd_setup_basic_desc(&desc,
3734 						   op,
3735 						   false);
3736 			req->int_vector_id = vector_id;
3737 		}
3738 	}
3739 
3740 	if (i > 0) {
3741 		req->int_cause_num = i;
3742 		req->vfid = vport->vport_id;
3743 		status = hclge_cmd_send(&hdev->hw, &desc, 1);
3744 		if (status) {
3745 			dev_err(&hdev->pdev->dev,
3746 				"Map TQP fail, status is %d.\n", status);
3747 			return -EIO;
3748 		}
3749 	}
3750 
3751 	return 0;
3752 }
3753 
3754 static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
3755 				    int vector,
3756 				    struct hnae3_ring_chain_node *ring_chain)
3757 {
3758 	struct hclge_vport *vport = hclge_get_vport(handle);
3759 	struct hclge_dev *hdev = vport->back;
3760 	int vector_id;
3761 
3762 	vector_id = hclge_get_vector_index(hdev, vector);
3763 	if (vector_id < 0) {
3764 		dev_err(&hdev->pdev->dev,
3765 			"Get vector index fail. vector_id =%d\n", vector_id);
3766 		return vector_id;
3767 	}
3768 
3769 	return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
3770 }
3771 
3772 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
3773 				       int vector,
3774 				       struct hnae3_ring_chain_node *ring_chain)
3775 {
3776 	struct hclge_vport *vport = hclge_get_vport(handle);
3777 	struct hclge_dev *hdev = vport->back;
3778 	int vector_id, ret;
3779 
3780 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3781 		return 0;
3782 
3783 	vector_id = hclge_get_vector_index(hdev, vector);
3784 	if (vector_id < 0) {
3785 		dev_err(&handle->pdev->dev,
3786 			"Get vector index fail. ret =%d\n", vector_id);
3787 		return vector_id;
3788 	}
3789 
3790 	ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
3791 	if (ret)
3792 		dev_err(&handle->pdev->dev,
3793 			"Unmap ring from vector fail. vectorid=%d, ret =%d\n",
3794 			vector_id,
3795 			ret);
3796 
3797 	return ret;
3798 }
3799 
3800 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
3801 			       struct hclge_promisc_param *param)
3802 {
3803 	struct hclge_promisc_cfg_cmd *req;
3804 	struct hclge_desc desc;
3805 	int ret;
3806 
3807 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
3808 
3809 	req = (struct hclge_promisc_cfg_cmd *)desc.data;
3810 	req->vf_id = param->vf_id;
3811 
3812 	/* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
3813 	 * pdev revision(0x20), new revision support them. The
3814 	 * value of this two fields will not return error when driver
3815 	 * send command to fireware in revision(0x20).
3816 	 */
3817 	req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
3818 		HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
3819 
3820 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3821 	if (ret)
3822 		dev_err(&hdev->pdev->dev,
3823 			"Set promisc mode fail, status is %d.\n", ret);
3824 
3825 	return ret;
3826 }
3827 
3828 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
3829 			      bool en_mc, bool en_bc, int vport_id)
3830 {
3831 	if (!param)
3832 		return;
3833 
3834 	memset(param, 0, sizeof(struct hclge_promisc_param));
3835 	if (en_uc)
3836 		param->enable = HCLGE_PROMISC_EN_UC;
3837 	if (en_mc)
3838 		param->enable |= HCLGE_PROMISC_EN_MC;
3839 	if (en_bc)
3840 		param->enable |= HCLGE_PROMISC_EN_BC;
3841 	param->vf_id = vport_id;
3842 }
3843 
3844 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
3845 				  bool en_mc_pmc)
3846 {
3847 	struct hclge_vport *vport = hclge_get_vport(handle);
3848 	struct hclge_dev *hdev = vport->back;
3849 	struct hclge_promisc_param param;
3850 	bool en_bc_pmc = true;
3851 
3852 	/* For revision 0x20, if broadcast promisc enabled, vlan filter is
3853 	 * always bypassed. So broadcast promisc should be disabled until
3854 	 * user enable promisc mode
3855 	 */
3856 	if (handle->pdev->revision == 0x20)
3857 		en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
3858 
3859 	hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
3860 				 vport->vport_id);
3861 	return hclge_cmd_set_promisc_mode(hdev, &param);
3862 }
3863 
3864 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
3865 {
3866 	struct hclge_get_fd_mode_cmd *req;
3867 	struct hclge_desc desc;
3868 	int ret;
3869 
3870 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
3871 
3872 	req = (struct hclge_get_fd_mode_cmd *)desc.data;
3873 
3874 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3875 	if (ret) {
3876 		dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
3877 		return ret;
3878 	}
3879 
3880 	*fd_mode = req->mode;
3881 
3882 	return ret;
3883 }
3884 
3885 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
3886 				   u32 *stage1_entry_num,
3887 				   u32 *stage2_entry_num,
3888 				   u16 *stage1_counter_num,
3889 				   u16 *stage2_counter_num)
3890 {
3891 	struct hclge_get_fd_allocation_cmd *req;
3892 	struct hclge_desc desc;
3893 	int ret;
3894 
3895 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
3896 
3897 	req = (struct hclge_get_fd_allocation_cmd *)desc.data;
3898 
3899 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3900 	if (ret) {
3901 		dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
3902 			ret);
3903 		return ret;
3904 	}
3905 
3906 	*stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
3907 	*stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
3908 	*stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
3909 	*stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
3910 
3911 	return ret;
3912 }
3913 
3914 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
3915 {
3916 	struct hclge_set_fd_key_config_cmd *req;
3917 	struct hclge_fd_key_cfg *stage;
3918 	struct hclge_desc desc;
3919 	int ret;
3920 
3921 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
3922 
3923 	req = (struct hclge_set_fd_key_config_cmd *)desc.data;
3924 	stage = &hdev->fd_cfg.key_cfg[stage_num];
3925 	req->stage = stage_num;
3926 	req->key_select = stage->key_sel;
3927 	req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
3928 	req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
3929 	req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
3930 	req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
3931 	req->tuple_mask = cpu_to_le32(~stage->tuple_active);
3932 	req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
3933 
3934 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3935 	if (ret)
3936 		dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
3937 
3938 	return ret;
3939 }
3940 
3941 static int hclge_init_fd_config(struct hclge_dev *hdev)
3942 {
3943 #define LOW_2_WORDS		0x03
3944 	struct hclge_fd_key_cfg *key_cfg;
3945 	int ret;
3946 
3947 	if (!hnae3_dev_fd_supported(hdev))
3948 		return 0;
3949 
3950 	ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
3951 	if (ret)
3952 		return ret;
3953 
3954 	switch (hdev->fd_cfg.fd_mode) {
3955 	case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
3956 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
3957 		break;
3958 	case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
3959 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
3960 		break;
3961 	default:
3962 		dev_err(&hdev->pdev->dev,
3963 			"Unsupported flow director mode %d\n",
3964 			hdev->fd_cfg.fd_mode);
3965 		return -EOPNOTSUPP;
3966 	}
3967 
3968 	hdev->fd_cfg.proto_support =
3969 		TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
3970 		UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
3971 	key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
3972 	key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
3973 	key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
3974 	key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
3975 	key_cfg->outer_sipv6_word_en = 0;
3976 	key_cfg->outer_dipv6_word_en = 0;
3977 
3978 	key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
3979 				BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
3980 				BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
3981 				BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
3982 
3983 	/* If use max 400bit key, we can support tuples for ether type */
3984 	if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
3985 		hdev->fd_cfg.proto_support |= ETHER_FLOW;
3986 		key_cfg->tuple_active |=
3987 				BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
3988 	}
3989 
3990 	/* roce_type is used to filter roce frames
3991 	 * dst_vport is used to specify the rule
3992 	 */
3993 	key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
3994 
3995 	ret = hclge_get_fd_allocation(hdev,
3996 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
3997 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
3998 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
3999 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4000 	if (ret)
4001 		return ret;
4002 
4003 	return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4004 }
4005 
4006 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4007 				int loc, u8 *key, bool is_add)
4008 {
4009 	struct hclge_fd_tcam_config_1_cmd *req1;
4010 	struct hclge_fd_tcam_config_2_cmd *req2;
4011 	struct hclge_fd_tcam_config_3_cmd *req3;
4012 	struct hclge_desc desc[3];
4013 	int ret;
4014 
4015 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4016 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4017 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4018 	desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4019 	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4020 
4021 	req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4022 	req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4023 	req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4024 
4025 	req1->stage = stage;
4026 	req1->xy_sel = sel_x ? 1 : 0;
4027 	hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4028 	req1->index = cpu_to_le32(loc);
4029 	req1->entry_vld = sel_x ? is_add : 0;
4030 
4031 	if (key) {
4032 		memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4033 		memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4034 		       sizeof(req2->tcam_data));
4035 		memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4036 		       sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4037 	}
4038 
4039 	ret = hclge_cmd_send(&hdev->hw, desc, 3);
4040 	if (ret)
4041 		dev_err(&hdev->pdev->dev,
4042 			"config tcam key fail, ret=%d\n",
4043 			ret);
4044 
4045 	return ret;
4046 }
4047 
4048 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4049 			      struct hclge_fd_ad_data *action)
4050 {
4051 	struct hclge_fd_ad_config_cmd *req;
4052 	struct hclge_desc desc;
4053 	u64 ad_data = 0;
4054 	int ret;
4055 
4056 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4057 
4058 	req = (struct hclge_fd_ad_config_cmd *)desc.data;
4059 	req->index = cpu_to_le32(loc);
4060 	req->stage = stage;
4061 
4062 	hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4063 		      action->write_rule_id_to_bd);
4064 	hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4065 			action->rule_id);
4066 	ad_data <<= 32;
4067 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4068 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4069 		      action->forward_to_direct_queue);
4070 	hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4071 			action->queue_id);
4072 	hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4073 	hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4074 			HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4075 	hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4076 	hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4077 			action->counter_id);
4078 
4079 	req->ad_data = cpu_to_le64(ad_data);
4080 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4081 	if (ret)
4082 		dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4083 
4084 	return ret;
4085 }
4086 
4087 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4088 				   struct hclge_fd_rule *rule)
4089 {
4090 	u16 tmp_x_s, tmp_y_s;
4091 	u32 tmp_x_l, tmp_y_l;
4092 	int i;
4093 
4094 	if (rule->unused_tuple & tuple_bit)
4095 		return true;
4096 
4097 	switch (tuple_bit) {
4098 	case 0:
4099 		return false;
4100 	case BIT(INNER_DST_MAC):
4101 		for (i = 0; i < 6; i++) {
4102 			calc_x(key_x[5 - i], rule->tuples.dst_mac[i],
4103 			       rule->tuples_mask.dst_mac[i]);
4104 			calc_y(key_y[5 - i], rule->tuples.dst_mac[i],
4105 			       rule->tuples_mask.dst_mac[i]);
4106 		}
4107 
4108 		return true;
4109 	case BIT(INNER_SRC_MAC):
4110 		for (i = 0; i < 6; i++) {
4111 			calc_x(key_x[5 - i], rule->tuples.src_mac[i],
4112 			       rule->tuples.src_mac[i]);
4113 			calc_y(key_y[5 - i], rule->tuples.src_mac[i],
4114 			       rule->tuples.src_mac[i]);
4115 		}
4116 
4117 		return true;
4118 	case BIT(INNER_VLAN_TAG_FST):
4119 		calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4120 		       rule->tuples_mask.vlan_tag1);
4121 		calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4122 		       rule->tuples_mask.vlan_tag1);
4123 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4124 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4125 
4126 		return true;
4127 	case BIT(INNER_ETH_TYPE):
4128 		calc_x(tmp_x_s, rule->tuples.ether_proto,
4129 		       rule->tuples_mask.ether_proto);
4130 		calc_y(tmp_y_s, rule->tuples.ether_proto,
4131 		       rule->tuples_mask.ether_proto);
4132 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4133 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4134 
4135 		return true;
4136 	case BIT(INNER_IP_TOS):
4137 		calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4138 		calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4139 
4140 		return true;
4141 	case BIT(INNER_IP_PROTO):
4142 		calc_x(*key_x, rule->tuples.ip_proto,
4143 		       rule->tuples_mask.ip_proto);
4144 		calc_y(*key_y, rule->tuples.ip_proto,
4145 		       rule->tuples_mask.ip_proto);
4146 
4147 		return true;
4148 	case BIT(INNER_SRC_IP):
4149 		calc_x(tmp_x_l, rule->tuples.src_ip[3],
4150 		       rule->tuples_mask.src_ip[3]);
4151 		calc_y(tmp_y_l, rule->tuples.src_ip[3],
4152 		       rule->tuples_mask.src_ip[3]);
4153 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4154 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4155 
4156 		return true;
4157 	case BIT(INNER_DST_IP):
4158 		calc_x(tmp_x_l, rule->tuples.dst_ip[3],
4159 		       rule->tuples_mask.dst_ip[3]);
4160 		calc_y(tmp_y_l, rule->tuples.dst_ip[3],
4161 		       rule->tuples_mask.dst_ip[3]);
4162 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4163 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4164 
4165 		return true;
4166 	case BIT(INNER_SRC_PORT):
4167 		calc_x(tmp_x_s, rule->tuples.src_port,
4168 		       rule->tuples_mask.src_port);
4169 		calc_y(tmp_y_s, rule->tuples.src_port,
4170 		       rule->tuples_mask.src_port);
4171 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4172 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4173 
4174 		return true;
4175 	case BIT(INNER_DST_PORT):
4176 		calc_x(tmp_x_s, rule->tuples.dst_port,
4177 		       rule->tuples_mask.dst_port);
4178 		calc_y(tmp_y_s, rule->tuples.dst_port,
4179 		       rule->tuples_mask.dst_port);
4180 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4181 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4182 
4183 		return true;
4184 	default:
4185 		return false;
4186 	}
4187 }
4188 
4189 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4190 				 u8 vf_id, u8 network_port_id)
4191 {
4192 	u32 port_number = 0;
4193 
4194 	if (port_type == HOST_PORT) {
4195 		hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4196 				pf_id);
4197 		hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4198 				vf_id);
4199 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4200 	} else {
4201 		hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4202 				HCLGE_NETWORK_PORT_ID_S, network_port_id);
4203 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4204 	}
4205 
4206 	return port_number;
4207 }
4208 
4209 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4210 				       __le32 *key_x, __le32 *key_y,
4211 				       struct hclge_fd_rule *rule)
4212 {
4213 	u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4214 	u8 cur_pos = 0, tuple_size, shift_bits;
4215 	int i;
4216 
4217 	for (i = 0; i < MAX_META_DATA; i++) {
4218 		tuple_size = meta_data_key_info[i].key_length;
4219 		tuple_bit = key_cfg->meta_data_active & BIT(i);
4220 
4221 		switch (tuple_bit) {
4222 		case BIT(ROCE_TYPE):
4223 			hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4224 			cur_pos += tuple_size;
4225 			break;
4226 		case BIT(DST_VPORT):
4227 			port_number = hclge_get_port_number(HOST_PORT, 0,
4228 							    rule->vf_id, 0);
4229 			hnae3_set_field(meta_data,
4230 					GENMASK(cur_pos + tuple_size, cur_pos),
4231 					cur_pos, port_number);
4232 			cur_pos += tuple_size;
4233 			break;
4234 		default:
4235 			break;
4236 		}
4237 	}
4238 
4239 	calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4240 	calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4241 	shift_bits = sizeof(meta_data) * 8 - cur_pos;
4242 
4243 	*key_x = cpu_to_le32(tmp_x << shift_bits);
4244 	*key_y = cpu_to_le32(tmp_y << shift_bits);
4245 }
4246 
4247 /* A complete key is combined with meta data key and tuple key.
4248  * Meta data key is stored at the MSB region, and tuple key is stored at
4249  * the LSB region, unused bits will be filled 0.
4250  */
4251 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
4252 			    struct hclge_fd_rule *rule)
4253 {
4254 	struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
4255 	u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
4256 	u8 *cur_key_x, *cur_key_y;
4257 	int i, ret, tuple_size;
4258 	u8 meta_data_region;
4259 
4260 	memset(key_x, 0, sizeof(key_x));
4261 	memset(key_y, 0, sizeof(key_y));
4262 	cur_key_x = key_x;
4263 	cur_key_y = key_y;
4264 
4265 	for (i = 0 ; i < MAX_TUPLE; i++) {
4266 		bool tuple_valid;
4267 		u32 check_tuple;
4268 
4269 		tuple_size = tuple_key_info[i].key_length / 8;
4270 		check_tuple = key_cfg->tuple_active & BIT(i);
4271 
4272 		tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
4273 						     cur_key_y, rule);
4274 		if (tuple_valid) {
4275 			cur_key_x += tuple_size;
4276 			cur_key_y += tuple_size;
4277 		}
4278 	}
4279 
4280 	meta_data_region = hdev->fd_cfg.max_key_length / 8 -
4281 			MAX_META_DATA_LENGTH / 8;
4282 
4283 	hclge_fd_convert_meta_data(key_cfg,
4284 				   (__le32 *)(key_x + meta_data_region),
4285 				   (__le32 *)(key_y + meta_data_region),
4286 				   rule);
4287 
4288 	ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
4289 				   true);
4290 	if (ret) {
4291 		dev_err(&hdev->pdev->dev,
4292 			"fd key_y config fail, loc=%d, ret=%d\n",
4293 			rule->queue_id, ret);
4294 		return ret;
4295 	}
4296 
4297 	ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
4298 				   true);
4299 	if (ret)
4300 		dev_err(&hdev->pdev->dev,
4301 			"fd key_x config fail, loc=%d, ret=%d\n",
4302 			rule->queue_id, ret);
4303 	return ret;
4304 }
4305 
4306 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
4307 			       struct hclge_fd_rule *rule)
4308 {
4309 	struct hclge_fd_ad_data ad_data;
4310 
4311 	ad_data.ad_id = rule->location;
4312 
4313 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4314 		ad_data.drop_packet = true;
4315 		ad_data.forward_to_direct_queue = false;
4316 		ad_data.queue_id = 0;
4317 	} else {
4318 		ad_data.drop_packet = false;
4319 		ad_data.forward_to_direct_queue = true;
4320 		ad_data.queue_id = rule->queue_id;
4321 	}
4322 
4323 	ad_data.use_counter = false;
4324 	ad_data.counter_id = 0;
4325 
4326 	ad_data.use_next_stage = false;
4327 	ad_data.next_input_key = 0;
4328 
4329 	ad_data.write_rule_id_to_bd = true;
4330 	ad_data.rule_id = rule->location;
4331 
4332 	return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
4333 }
4334 
4335 static int hclge_fd_check_spec(struct hclge_dev *hdev,
4336 			       struct ethtool_rx_flow_spec *fs, u32 *unused)
4337 {
4338 	struct ethtool_tcpip4_spec *tcp_ip4_spec;
4339 	struct ethtool_usrip4_spec *usr_ip4_spec;
4340 	struct ethtool_tcpip6_spec *tcp_ip6_spec;
4341 	struct ethtool_usrip6_spec *usr_ip6_spec;
4342 	struct ethhdr *ether_spec;
4343 
4344 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4345 		return -EINVAL;
4346 
4347 	if (!(fs->flow_type & hdev->fd_cfg.proto_support))
4348 		return -EOPNOTSUPP;
4349 
4350 	if ((fs->flow_type & FLOW_EXT) &&
4351 	    (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
4352 		dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
4353 		return -EOPNOTSUPP;
4354 	}
4355 
4356 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4357 	case SCTP_V4_FLOW:
4358 	case TCP_V4_FLOW:
4359 	case UDP_V4_FLOW:
4360 		tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
4361 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
4362 
4363 		if (!tcp_ip4_spec->ip4src)
4364 			*unused |= BIT(INNER_SRC_IP);
4365 
4366 		if (!tcp_ip4_spec->ip4dst)
4367 			*unused |= BIT(INNER_DST_IP);
4368 
4369 		if (!tcp_ip4_spec->psrc)
4370 			*unused |= BIT(INNER_SRC_PORT);
4371 
4372 		if (!tcp_ip4_spec->pdst)
4373 			*unused |= BIT(INNER_DST_PORT);
4374 
4375 		if (!tcp_ip4_spec->tos)
4376 			*unused |= BIT(INNER_IP_TOS);
4377 
4378 		break;
4379 	case IP_USER_FLOW:
4380 		usr_ip4_spec = &fs->h_u.usr_ip4_spec;
4381 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4382 			BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4383 
4384 		if (!usr_ip4_spec->ip4src)
4385 			*unused |= BIT(INNER_SRC_IP);
4386 
4387 		if (!usr_ip4_spec->ip4dst)
4388 			*unused |= BIT(INNER_DST_IP);
4389 
4390 		if (!usr_ip4_spec->tos)
4391 			*unused |= BIT(INNER_IP_TOS);
4392 
4393 		if (!usr_ip4_spec->proto)
4394 			*unused |= BIT(INNER_IP_PROTO);
4395 
4396 		if (usr_ip4_spec->l4_4_bytes)
4397 			return -EOPNOTSUPP;
4398 
4399 		if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
4400 			return -EOPNOTSUPP;
4401 
4402 		break;
4403 	case SCTP_V6_FLOW:
4404 	case TCP_V6_FLOW:
4405 	case UDP_V6_FLOW:
4406 		tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
4407 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4408 			BIT(INNER_IP_TOS);
4409 
4410 		if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
4411 		    !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
4412 			*unused |= BIT(INNER_SRC_IP);
4413 
4414 		if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
4415 		    !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
4416 			*unused |= BIT(INNER_DST_IP);
4417 
4418 		if (!tcp_ip6_spec->psrc)
4419 			*unused |= BIT(INNER_SRC_PORT);
4420 
4421 		if (!tcp_ip6_spec->pdst)
4422 			*unused |= BIT(INNER_DST_PORT);
4423 
4424 		if (tcp_ip6_spec->tclass)
4425 			return -EOPNOTSUPP;
4426 
4427 		break;
4428 	case IPV6_USER_FLOW:
4429 		usr_ip6_spec = &fs->h_u.usr_ip6_spec;
4430 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4431 			BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
4432 			BIT(INNER_DST_PORT);
4433 
4434 		if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
4435 		    !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
4436 			*unused |= BIT(INNER_SRC_IP);
4437 
4438 		if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
4439 		    !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
4440 			*unused |= BIT(INNER_DST_IP);
4441 
4442 		if (!usr_ip6_spec->l4_proto)
4443 			*unused |= BIT(INNER_IP_PROTO);
4444 
4445 		if (usr_ip6_spec->tclass)
4446 			return -EOPNOTSUPP;
4447 
4448 		if (usr_ip6_spec->l4_4_bytes)
4449 			return -EOPNOTSUPP;
4450 
4451 		break;
4452 	case ETHER_FLOW:
4453 		ether_spec = &fs->h_u.ether_spec;
4454 		*unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4455 			BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
4456 			BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
4457 
4458 		if (is_zero_ether_addr(ether_spec->h_source))
4459 			*unused |= BIT(INNER_SRC_MAC);
4460 
4461 		if (is_zero_ether_addr(ether_spec->h_dest))
4462 			*unused |= BIT(INNER_DST_MAC);
4463 
4464 		if (!ether_spec->h_proto)
4465 			*unused |= BIT(INNER_ETH_TYPE);
4466 
4467 		break;
4468 	default:
4469 		return -EOPNOTSUPP;
4470 	}
4471 
4472 	if ((fs->flow_type & FLOW_EXT)) {
4473 		if (fs->h_ext.vlan_etype)
4474 			return -EOPNOTSUPP;
4475 		if (!fs->h_ext.vlan_tci)
4476 			*unused |= BIT(INNER_VLAN_TAG_FST);
4477 
4478 		if (fs->m_ext.vlan_tci) {
4479 			if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
4480 				return -EINVAL;
4481 		}
4482 	} else {
4483 		*unused |= BIT(INNER_VLAN_TAG_FST);
4484 	}
4485 
4486 	if (fs->flow_type & FLOW_MAC_EXT) {
4487 		if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
4488 			return -EOPNOTSUPP;
4489 
4490 		if (is_zero_ether_addr(fs->h_ext.h_dest))
4491 			*unused |= BIT(INNER_DST_MAC);
4492 		else
4493 			*unused &= ~(BIT(INNER_DST_MAC));
4494 	}
4495 
4496 	return 0;
4497 }
4498 
4499 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
4500 {
4501 	struct hclge_fd_rule *rule = NULL;
4502 	struct hlist_node *node2;
4503 
4504 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4505 		if (rule->location >= location)
4506 			break;
4507 	}
4508 
4509 	return  rule && rule->location == location;
4510 }
4511 
4512 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
4513 				     struct hclge_fd_rule *new_rule,
4514 				     u16 location,
4515 				     bool is_add)
4516 {
4517 	struct hclge_fd_rule *rule = NULL, *parent = NULL;
4518 	struct hlist_node *node2;
4519 
4520 	if (is_add && !new_rule)
4521 		return -EINVAL;
4522 
4523 	hlist_for_each_entry_safe(rule, node2,
4524 				  &hdev->fd_rule_list, rule_node) {
4525 		if (rule->location >= location)
4526 			break;
4527 		parent = rule;
4528 	}
4529 
4530 	if (rule && rule->location == location) {
4531 		hlist_del(&rule->rule_node);
4532 		kfree(rule);
4533 		hdev->hclge_fd_rule_num--;
4534 
4535 		if (!is_add)
4536 			return 0;
4537 
4538 	} else if (!is_add) {
4539 		dev_err(&hdev->pdev->dev,
4540 			"delete fail, rule %d is inexistent\n",
4541 			location);
4542 		return -EINVAL;
4543 	}
4544 
4545 	INIT_HLIST_NODE(&new_rule->rule_node);
4546 
4547 	if (parent)
4548 		hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
4549 	else
4550 		hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
4551 
4552 	hdev->hclge_fd_rule_num++;
4553 
4554 	return 0;
4555 }
4556 
4557 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
4558 			      struct ethtool_rx_flow_spec *fs,
4559 			      struct hclge_fd_rule *rule)
4560 {
4561 	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
4562 
4563 	switch (flow_type) {
4564 	case SCTP_V4_FLOW:
4565 	case TCP_V4_FLOW:
4566 	case UDP_V4_FLOW:
4567 		rule->tuples.src_ip[3] =
4568 				be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
4569 		rule->tuples_mask.src_ip[3] =
4570 				be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
4571 
4572 		rule->tuples.dst_ip[3] =
4573 				be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
4574 		rule->tuples_mask.dst_ip[3] =
4575 				be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
4576 
4577 		rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
4578 		rule->tuples_mask.src_port =
4579 				be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
4580 
4581 		rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
4582 		rule->tuples_mask.dst_port =
4583 				be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
4584 
4585 		rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
4586 		rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
4587 
4588 		rule->tuples.ether_proto = ETH_P_IP;
4589 		rule->tuples_mask.ether_proto = 0xFFFF;
4590 
4591 		break;
4592 	case IP_USER_FLOW:
4593 		rule->tuples.src_ip[3] =
4594 				be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
4595 		rule->tuples_mask.src_ip[3] =
4596 				be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
4597 
4598 		rule->tuples.dst_ip[3] =
4599 				be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
4600 		rule->tuples_mask.dst_ip[3] =
4601 				be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
4602 
4603 		rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
4604 		rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
4605 
4606 		rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
4607 		rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
4608 
4609 		rule->tuples.ether_proto = ETH_P_IP;
4610 		rule->tuples_mask.ether_proto = 0xFFFF;
4611 
4612 		break;
4613 	case SCTP_V6_FLOW:
4614 	case TCP_V6_FLOW:
4615 	case UDP_V6_FLOW:
4616 		be32_to_cpu_array(rule->tuples.src_ip,
4617 				  fs->h_u.tcp_ip6_spec.ip6src, 4);
4618 		be32_to_cpu_array(rule->tuples_mask.src_ip,
4619 				  fs->m_u.tcp_ip6_spec.ip6src, 4);
4620 
4621 		be32_to_cpu_array(rule->tuples.dst_ip,
4622 				  fs->h_u.tcp_ip6_spec.ip6dst, 4);
4623 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
4624 				  fs->m_u.tcp_ip6_spec.ip6dst, 4);
4625 
4626 		rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
4627 		rule->tuples_mask.src_port =
4628 				be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
4629 
4630 		rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
4631 		rule->tuples_mask.dst_port =
4632 				be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
4633 
4634 		rule->tuples.ether_proto = ETH_P_IPV6;
4635 		rule->tuples_mask.ether_proto = 0xFFFF;
4636 
4637 		break;
4638 	case IPV6_USER_FLOW:
4639 		be32_to_cpu_array(rule->tuples.src_ip,
4640 				  fs->h_u.usr_ip6_spec.ip6src, 4);
4641 		be32_to_cpu_array(rule->tuples_mask.src_ip,
4642 				  fs->m_u.usr_ip6_spec.ip6src, 4);
4643 
4644 		be32_to_cpu_array(rule->tuples.dst_ip,
4645 				  fs->h_u.usr_ip6_spec.ip6dst, 4);
4646 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
4647 				  fs->m_u.usr_ip6_spec.ip6dst, 4);
4648 
4649 		rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
4650 		rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
4651 
4652 		rule->tuples.ether_proto = ETH_P_IPV6;
4653 		rule->tuples_mask.ether_proto = 0xFFFF;
4654 
4655 		break;
4656 	case ETHER_FLOW:
4657 		ether_addr_copy(rule->tuples.src_mac,
4658 				fs->h_u.ether_spec.h_source);
4659 		ether_addr_copy(rule->tuples_mask.src_mac,
4660 				fs->m_u.ether_spec.h_source);
4661 
4662 		ether_addr_copy(rule->tuples.dst_mac,
4663 				fs->h_u.ether_spec.h_dest);
4664 		ether_addr_copy(rule->tuples_mask.dst_mac,
4665 				fs->m_u.ether_spec.h_dest);
4666 
4667 		rule->tuples.ether_proto =
4668 				be16_to_cpu(fs->h_u.ether_spec.h_proto);
4669 		rule->tuples_mask.ether_proto =
4670 				be16_to_cpu(fs->m_u.ether_spec.h_proto);
4671 
4672 		break;
4673 	default:
4674 		return -EOPNOTSUPP;
4675 	}
4676 
4677 	switch (flow_type) {
4678 	case SCTP_V4_FLOW:
4679 	case SCTP_V6_FLOW:
4680 		rule->tuples.ip_proto = IPPROTO_SCTP;
4681 		rule->tuples_mask.ip_proto = 0xFF;
4682 		break;
4683 	case TCP_V4_FLOW:
4684 	case TCP_V6_FLOW:
4685 		rule->tuples.ip_proto = IPPROTO_TCP;
4686 		rule->tuples_mask.ip_proto = 0xFF;
4687 		break;
4688 	case UDP_V4_FLOW:
4689 	case UDP_V6_FLOW:
4690 		rule->tuples.ip_proto = IPPROTO_UDP;
4691 		rule->tuples_mask.ip_proto = 0xFF;
4692 		break;
4693 	default:
4694 		break;
4695 	}
4696 
4697 	if ((fs->flow_type & FLOW_EXT)) {
4698 		rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
4699 		rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
4700 	}
4701 
4702 	if (fs->flow_type & FLOW_MAC_EXT) {
4703 		ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
4704 		ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
4705 	}
4706 
4707 	return 0;
4708 }
4709 
4710 static int hclge_add_fd_entry(struct hnae3_handle *handle,
4711 			      struct ethtool_rxnfc *cmd)
4712 {
4713 	struct hclge_vport *vport = hclge_get_vport(handle);
4714 	struct hclge_dev *hdev = vport->back;
4715 	u16 dst_vport_id = 0, q_index = 0;
4716 	struct ethtool_rx_flow_spec *fs;
4717 	struct hclge_fd_rule *rule;
4718 	u32 unused = 0;
4719 	u8 action;
4720 	int ret;
4721 
4722 	if (!hnae3_dev_fd_supported(hdev))
4723 		return -EOPNOTSUPP;
4724 
4725 	if (!hdev->fd_en) {
4726 		dev_warn(&hdev->pdev->dev,
4727 			 "Please enable flow director first\n");
4728 		return -EOPNOTSUPP;
4729 	}
4730 
4731 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4732 
4733 	ret = hclge_fd_check_spec(hdev, fs, &unused);
4734 	if (ret) {
4735 		dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
4736 		return ret;
4737 	}
4738 
4739 	if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
4740 		action = HCLGE_FD_ACTION_DROP_PACKET;
4741 	} else {
4742 		u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
4743 		u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
4744 		u16 tqps;
4745 
4746 		if (vf > hdev->num_req_vfs) {
4747 			dev_err(&hdev->pdev->dev,
4748 				"Error: vf id (%d) > max vf num (%d)\n",
4749 				vf, hdev->num_req_vfs);
4750 			return -EINVAL;
4751 		}
4752 
4753 		dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
4754 		tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
4755 
4756 		if (ring >= tqps) {
4757 			dev_err(&hdev->pdev->dev,
4758 				"Error: queue id (%d) > max tqp num (%d)\n",
4759 				ring, tqps - 1);
4760 			return -EINVAL;
4761 		}
4762 
4763 		action = HCLGE_FD_ACTION_ACCEPT_PACKET;
4764 		q_index = ring;
4765 	}
4766 
4767 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
4768 	if (!rule)
4769 		return -ENOMEM;
4770 
4771 	ret = hclge_fd_get_tuple(hdev, fs, rule);
4772 	if (ret)
4773 		goto free_rule;
4774 
4775 	rule->flow_type = fs->flow_type;
4776 
4777 	rule->location = fs->location;
4778 	rule->unused_tuple = unused;
4779 	rule->vf_id = dst_vport_id;
4780 	rule->queue_id = q_index;
4781 	rule->action = action;
4782 
4783 	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
4784 	if (ret)
4785 		goto free_rule;
4786 
4787 	ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
4788 	if (ret)
4789 		goto free_rule;
4790 
4791 	ret = hclge_fd_update_rule_list(hdev, rule, fs->location, true);
4792 	if (ret)
4793 		goto free_rule;
4794 
4795 	return ret;
4796 
4797 free_rule:
4798 	kfree(rule);
4799 	return ret;
4800 }
4801 
4802 static int hclge_del_fd_entry(struct hnae3_handle *handle,
4803 			      struct ethtool_rxnfc *cmd)
4804 {
4805 	struct hclge_vport *vport = hclge_get_vport(handle);
4806 	struct hclge_dev *hdev = vport->back;
4807 	struct ethtool_rx_flow_spec *fs;
4808 	int ret;
4809 
4810 	if (!hnae3_dev_fd_supported(hdev))
4811 		return -EOPNOTSUPP;
4812 
4813 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4814 
4815 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4816 		return -EINVAL;
4817 
4818 	if (!hclge_fd_rule_exist(hdev, fs->location)) {
4819 		dev_err(&hdev->pdev->dev,
4820 			"Delete fail, rule %d is inexistent\n",
4821 			fs->location);
4822 		return -ENOENT;
4823 	}
4824 
4825 	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4826 				   fs->location, NULL, false);
4827 	if (ret)
4828 		return ret;
4829 
4830 	return hclge_fd_update_rule_list(hdev, NULL, fs->location,
4831 					 false);
4832 }
4833 
4834 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
4835 				     bool clear_list)
4836 {
4837 	struct hclge_vport *vport = hclge_get_vport(handle);
4838 	struct hclge_dev *hdev = vport->back;
4839 	struct hclge_fd_rule *rule;
4840 	struct hlist_node *node;
4841 
4842 	if (!hnae3_dev_fd_supported(hdev))
4843 		return;
4844 
4845 	if (clear_list) {
4846 		hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
4847 					  rule_node) {
4848 			hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4849 					     rule->location, NULL, false);
4850 			hlist_del(&rule->rule_node);
4851 			kfree(rule);
4852 			hdev->hclge_fd_rule_num--;
4853 		}
4854 	} else {
4855 		hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
4856 					  rule_node)
4857 			hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4858 					     rule->location, NULL, false);
4859 	}
4860 }
4861 
4862 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
4863 {
4864 	struct hclge_vport *vport = hclge_get_vport(handle);
4865 	struct hclge_dev *hdev = vport->back;
4866 	struct hclge_fd_rule *rule;
4867 	struct hlist_node *node;
4868 	int ret;
4869 
4870 	/* Return ok here, because reset error handling will check this
4871 	 * return value. If error is returned here, the reset process will
4872 	 * fail.
4873 	 */
4874 	if (!hnae3_dev_fd_supported(hdev))
4875 		return 0;
4876 
4877 	/* if fd is disabled, should not restore it when reset */
4878 	if (!hdev->fd_en)
4879 		return 0;
4880 
4881 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
4882 		ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
4883 		if (!ret)
4884 			ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
4885 
4886 		if (ret) {
4887 			dev_warn(&hdev->pdev->dev,
4888 				 "Restore rule %d failed, remove it\n",
4889 				 rule->location);
4890 			hlist_del(&rule->rule_node);
4891 			kfree(rule);
4892 			hdev->hclge_fd_rule_num--;
4893 		}
4894 	}
4895 	return 0;
4896 }
4897 
4898 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
4899 				 struct ethtool_rxnfc *cmd)
4900 {
4901 	struct hclge_vport *vport = hclge_get_vport(handle);
4902 	struct hclge_dev *hdev = vport->back;
4903 
4904 	if (!hnae3_dev_fd_supported(hdev))
4905 		return -EOPNOTSUPP;
4906 
4907 	cmd->rule_cnt = hdev->hclge_fd_rule_num;
4908 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
4909 
4910 	return 0;
4911 }
4912 
4913 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
4914 				  struct ethtool_rxnfc *cmd)
4915 {
4916 	struct hclge_vport *vport = hclge_get_vport(handle);
4917 	struct hclge_fd_rule *rule = NULL;
4918 	struct hclge_dev *hdev = vport->back;
4919 	struct ethtool_rx_flow_spec *fs;
4920 	struct hlist_node *node2;
4921 
4922 	if (!hnae3_dev_fd_supported(hdev))
4923 		return -EOPNOTSUPP;
4924 
4925 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4926 
4927 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4928 		if (rule->location >= fs->location)
4929 			break;
4930 	}
4931 
4932 	if (!rule || fs->location != rule->location)
4933 		return -ENOENT;
4934 
4935 	fs->flow_type = rule->flow_type;
4936 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4937 	case SCTP_V4_FLOW:
4938 	case TCP_V4_FLOW:
4939 	case UDP_V4_FLOW:
4940 		fs->h_u.tcp_ip4_spec.ip4src =
4941 				cpu_to_be32(rule->tuples.src_ip[3]);
4942 		fs->m_u.tcp_ip4_spec.ip4src =
4943 				rule->unused_tuple & BIT(INNER_SRC_IP) ?
4944 				0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
4945 
4946 		fs->h_u.tcp_ip4_spec.ip4dst =
4947 				cpu_to_be32(rule->tuples.dst_ip[3]);
4948 		fs->m_u.tcp_ip4_spec.ip4dst =
4949 				rule->unused_tuple & BIT(INNER_DST_IP) ?
4950 				0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
4951 
4952 		fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
4953 		fs->m_u.tcp_ip4_spec.psrc =
4954 				rule->unused_tuple & BIT(INNER_SRC_PORT) ?
4955 				0 : cpu_to_be16(rule->tuples_mask.src_port);
4956 
4957 		fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
4958 		fs->m_u.tcp_ip4_spec.pdst =
4959 				rule->unused_tuple & BIT(INNER_DST_PORT) ?
4960 				0 : cpu_to_be16(rule->tuples_mask.dst_port);
4961 
4962 		fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
4963 		fs->m_u.tcp_ip4_spec.tos =
4964 				rule->unused_tuple & BIT(INNER_IP_TOS) ?
4965 				0 : rule->tuples_mask.ip_tos;
4966 
4967 		break;
4968 	case IP_USER_FLOW:
4969 		fs->h_u.usr_ip4_spec.ip4src =
4970 				cpu_to_be32(rule->tuples.src_ip[3]);
4971 		fs->m_u.tcp_ip4_spec.ip4src =
4972 				rule->unused_tuple & BIT(INNER_SRC_IP) ?
4973 				0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
4974 
4975 		fs->h_u.usr_ip4_spec.ip4dst =
4976 				cpu_to_be32(rule->tuples.dst_ip[3]);
4977 		fs->m_u.usr_ip4_spec.ip4dst =
4978 				rule->unused_tuple & BIT(INNER_DST_IP) ?
4979 				0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
4980 
4981 		fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
4982 		fs->m_u.usr_ip4_spec.tos =
4983 				rule->unused_tuple & BIT(INNER_IP_TOS) ?
4984 				0 : rule->tuples_mask.ip_tos;
4985 
4986 		fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
4987 		fs->m_u.usr_ip4_spec.proto =
4988 				rule->unused_tuple & BIT(INNER_IP_PROTO) ?
4989 				0 : rule->tuples_mask.ip_proto;
4990 
4991 		fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
4992 
4993 		break;
4994 	case SCTP_V6_FLOW:
4995 	case TCP_V6_FLOW:
4996 	case UDP_V6_FLOW:
4997 		cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
4998 				  rule->tuples.src_ip, 4);
4999 		if (rule->unused_tuple & BIT(INNER_SRC_IP))
5000 			memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4);
5001 		else
5002 			cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5003 					  rule->tuples_mask.src_ip, 4);
5004 
5005 		cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5006 				  rule->tuples.dst_ip, 4);
5007 		if (rule->unused_tuple & BIT(INNER_DST_IP))
5008 			memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5009 		else
5010 			cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5011 					  rule->tuples_mask.dst_ip, 4);
5012 
5013 		fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5014 		fs->m_u.tcp_ip6_spec.psrc =
5015 				rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5016 				0 : cpu_to_be16(rule->tuples_mask.src_port);
5017 
5018 		fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5019 		fs->m_u.tcp_ip6_spec.pdst =
5020 				rule->unused_tuple & BIT(INNER_DST_PORT) ?
5021 				0 : cpu_to_be16(rule->tuples_mask.dst_port);
5022 
5023 		break;
5024 	case IPV6_USER_FLOW:
5025 		cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5026 				  rule->tuples.src_ip, 4);
5027 		if (rule->unused_tuple & BIT(INNER_SRC_IP))
5028 			memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4);
5029 		else
5030 			cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5031 					  rule->tuples_mask.src_ip, 4);
5032 
5033 		cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5034 				  rule->tuples.dst_ip, 4);
5035 		if (rule->unused_tuple & BIT(INNER_DST_IP))
5036 			memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5037 		else
5038 			cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5039 					  rule->tuples_mask.dst_ip, 4);
5040 
5041 		fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5042 		fs->m_u.usr_ip6_spec.l4_proto =
5043 				rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5044 				0 : rule->tuples_mask.ip_proto;
5045 
5046 		break;
5047 	case ETHER_FLOW:
5048 		ether_addr_copy(fs->h_u.ether_spec.h_source,
5049 				rule->tuples.src_mac);
5050 		if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5051 			eth_zero_addr(fs->m_u.ether_spec.h_source);
5052 		else
5053 			ether_addr_copy(fs->m_u.ether_spec.h_source,
5054 					rule->tuples_mask.src_mac);
5055 
5056 		ether_addr_copy(fs->h_u.ether_spec.h_dest,
5057 				rule->tuples.dst_mac);
5058 		if (rule->unused_tuple & BIT(INNER_DST_MAC))
5059 			eth_zero_addr(fs->m_u.ether_spec.h_dest);
5060 		else
5061 			ether_addr_copy(fs->m_u.ether_spec.h_dest,
5062 					rule->tuples_mask.dst_mac);
5063 
5064 		fs->h_u.ether_spec.h_proto =
5065 				cpu_to_be16(rule->tuples.ether_proto);
5066 		fs->m_u.ether_spec.h_proto =
5067 				rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
5068 				0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5069 
5070 		break;
5071 	default:
5072 		return -EOPNOTSUPP;
5073 	}
5074 
5075 	if (fs->flow_type & FLOW_EXT) {
5076 		fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5077 		fs->m_ext.vlan_tci =
5078 				rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5079 				cpu_to_be16(VLAN_VID_MASK) :
5080 				cpu_to_be16(rule->tuples_mask.vlan_tag1);
5081 	}
5082 
5083 	if (fs->flow_type & FLOW_MAC_EXT) {
5084 		ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5085 		if (rule->unused_tuple & BIT(INNER_DST_MAC))
5086 			eth_zero_addr(fs->m_u.ether_spec.h_dest);
5087 		else
5088 			ether_addr_copy(fs->m_u.ether_spec.h_dest,
5089 					rule->tuples_mask.dst_mac);
5090 	}
5091 
5092 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5093 		fs->ring_cookie = RX_CLS_FLOW_DISC;
5094 	} else {
5095 		u64 vf_id;
5096 
5097 		fs->ring_cookie = rule->queue_id;
5098 		vf_id = rule->vf_id;
5099 		vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5100 		fs->ring_cookie |= vf_id;
5101 	}
5102 
5103 	return 0;
5104 }
5105 
5106 static int hclge_get_all_rules(struct hnae3_handle *handle,
5107 			       struct ethtool_rxnfc *cmd, u32 *rule_locs)
5108 {
5109 	struct hclge_vport *vport = hclge_get_vport(handle);
5110 	struct hclge_dev *hdev = vport->back;
5111 	struct hclge_fd_rule *rule;
5112 	struct hlist_node *node2;
5113 	int cnt = 0;
5114 
5115 	if (!hnae3_dev_fd_supported(hdev))
5116 		return -EOPNOTSUPP;
5117 
5118 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5119 
5120 	hlist_for_each_entry_safe(rule, node2,
5121 				  &hdev->fd_rule_list, rule_node) {
5122 		if (cnt == cmd->rule_cnt)
5123 			return -EMSGSIZE;
5124 
5125 		rule_locs[cnt] = rule->location;
5126 		cnt++;
5127 	}
5128 
5129 	cmd->rule_cnt = cnt;
5130 
5131 	return 0;
5132 }
5133 
5134 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
5135 {
5136 	struct hclge_vport *vport = hclge_get_vport(handle);
5137 	struct hclge_dev *hdev = vport->back;
5138 
5139 	return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
5140 	       hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
5141 }
5142 
5143 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
5144 {
5145 	struct hclge_vport *vport = hclge_get_vport(handle);
5146 	struct hclge_dev *hdev = vport->back;
5147 
5148 	return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
5149 }
5150 
5151 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
5152 {
5153 	struct hclge_vport *vport = hclge_get_vport(handle);
5154 	struct hclge_dev *hdev = vport->back;
5155 
5156 	return hdev->reset_count;
5157 }
5158 
5159 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
5160 {
5161 	struct hclge_vport *vport = hclge_get_vport(handle);
5162 	struct hclge_dev *hdev = vport->back;
5163 
5164 	hdev->fd_en = enable;
5165 	if (!enable)
5166 		hclge_del_all_fd_entries(handle, false);
5167 	else
5168 		hclge_restore_fd_entries(handle);
5169 }
5170 
5171 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
5172 {
5173 	struct hclge_desc desc;
5174 	struct hclge_config_mac_mode_cmd *req =
5175 		(struct hclge_config_mac_mode_cmd *)desc.data;
5176 	u32 loop_en = 0;
5177 	int ret;
5178 
5179 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
5180 	hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
5181 	hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
5182 	hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
5183 	hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
5184 	hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
5185 	hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
5186 	hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
5187 	hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
5188 	hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
5189 	hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
5190 	hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
5191 	hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
5192 	hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
5193 	hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
5194 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5195 
5196 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5197 	if (ret)
5198 		dev_err(&hdev->pdev->dev,
5199 			"mac enable fail, ret =%d.\n", ret);
5200 }
5201 
5202 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
5203 {
5204 	struct hclge_config_mac_mode_cmd *req;
5205 	struct hclge_desc desc;
5206 	u32 loop_en;
5207 	int ret;
5208 
5209 	req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
5210 	/* 1 Read out the MAC mode config at first */
5211 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
5212 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5213 	if (ret) {
5214 		dev_err(&hdev->pdev->dev,
5215 			"mac loopback get fail, ret =%d.\n", ret);
5216 		return ret;
5217 	}
5218 
5219 	/* 2 Then setup the loopback flag */
5220 	loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
5221 	hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
5222 	hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
5223 	hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
5224 
5225 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5226 
5227 	/* 3 Config mac work mode with loopback flag
5228 	 * and its original configure parameters
5229 	 */
5230 	hclge_cmd_reuse_desc(&desc, false);
5231 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5232 	if (ret)
5233 		dev_err(&hdev->pdev->dev,
5234 			"mac loopback set fail, ret =%d.\n", ret);
5235 	return ret;
5236 }
5237 
5238 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
5239 				     enum hnae3_loop loop_mode)
5240 {
5241 #define HCLGE_SERDES_RETRY_MS	10
5242 #define HCLGE_SERDES_RETRY_NUM	100
5243 
5244 #define HCLGE_MAC_LINK_STATUS_MS   20
5245 #define HCLGE_MAC_LINK_STATUS_NUM  10
5246 #define HCLGE_MAC_LINK_STATUS_DOWN 0
5247 #define HCLGE_MAC_LINK_STATUS_UP   1
5248 
5249 	struct hclge_serdes_lb_cmd *req;
5250 	struct hclge_desc desc;
5251 	int mac_link_ret = 0;
5252 	int ret, i = 0;
5253 	u8 loop_mode_b;
5254 
5255 	req = (struct hclge_serdes_lb_cmd *)desc.data;
5256 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
5257 
5258 	switch (loop_mode) {
5259 	case HNAE3_LOOP_SERIAL_SERDES:
5260 		loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
5261 		break;
5262 	case HNAE3_LOOP_PARALLEL_SERDES:
5263 		loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
5264 		break;
5265 	default:
5266 		dev_err(&hdev->pdev->dev,
5267 			"unsupported serdes loopback mode %d\n", loop_mode);
5268 		return -ENOTSUPP;
5269 	}
5270 
5271 	if (en) {
5272 		req->enable = loop_mode_b;
5273 		req->mask = loop_mode_b;
5274 		mac_link_ret = HCLGE_MAC_LINK_STATUS_UP;
5275 	} else {
5276 		req->mask = loop_mode_b;
5277 		mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN;
5278 	}
5279 
5280 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5281 	if (ret) {
5282 		dev_err(&hdev->pdev->dev,
5283 			"serdes loopback set fail, ret = %d\n", ret);
5284 		return ret;
5285 	}
5286 
5287 	do {
5288 		msleep(HCLGE_SERDES_RETRY_MS);
5289 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
5290 					   true);
5291 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5292 		if (ret) {
5293 			dev_err(&hdev->pdev->dev,
5294 				"serdes loopback get, ret = %d\n", ret);
5295 			return ret;
5296 		}
5297 	} while (++i < HCLGE_SERDES_RETRY_NUM &&
5298 		 !(req->result & HCLGE_CMD_SERDES_DONE_B));
5299 
5300 	if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
5301 		dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
5302 		return -EBUSY;
5303 	} else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
5304 		dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
5305 		return -EIO;
5306 	}
5307 
5308 	hclge_cfg_mac_mode(hdev, en);
5309 
5310 	i = 0;
5311 	do {
5312 		/* serdes Internal loopback, independent of the network cable.*/
5313 		msleep(HCLGE_MAC_LINK_STATUS_MS);
5314 		ret = hclge_get_mac_link_status(hdev);
5315 		if (ret == mac_link_ret)
5316 			return 0;
5317 	} while (++i < HCLGE_MAC_LINK_STATUS_NUM);
5318 
5319 	dev_err(&hdev->pdev->dev, "config mac mode timeout\n");
5320 
5321 	return -EBUSY;
5322 }
5323 
5324 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
5325 			    int stream_id, bool enable)
5326 {
5327 	struct hclge_desc desc;
5328 	struct hclge_cfg_com_tqp_queue_cmd *req =
5329 		(struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
5330 	int ret;
5331 
5332 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
5333 	req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
5334 	req->stream_id = cpu_to_le16(stream_id);
5335 	req->enable |= enable << HCLGE_TQP_ENABLE_B;
5336 
5337 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5338 	if (ret)
5339 		dev_err(&hdev->pdev->dev,
5340 			"Tqp enable fail, status =%d.\n", ret);
5341 	return ret;
5342 }
5343 
5344 static int hclge_set_loopback(struct hnae3_handle *handle,
5345 			      enum hnae3_loop loop_mode, bool en)
5346 {
5347 	struct hclge_vport *vport = hclge_get_vport(handle);
5348 	struct hnae3_knic_private_info *kinfo;
5349 	struct hclge_dev *hdev = vport->back;
5350 	int i, ret;
5351 
5352 	switch (loop_mode) {
5353 	case HNAE3_LOOP_APP:
5354 		ret = hclge_set_app_loopback(hdev, en);
5355 		break;
5356 	case HNAE3_LOOP_SERIAL_SERDES:
5357 	case HNAE3_LOOP_PARALLEL_SERDES:
5358 		ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
5359 		break;
5360 	default:
5361 		ret = -ENOTSUPP;
5362 		dev_err(&hdev->pdev->dev,
5363 			"loop_mode %d is not supported\n", loop_mode);
5364 		break;
5365 	}
5366 
5367 	if (ret)
5368 		return ret;
5369 
5370 	kinfo = &vport->nic.kinfo;
5371 	for (i = 0; i < kinfo->num_tqps; i++) {
5372 		ret = hclge_tqp_enable(hdev, i, 0, en);
5373 		if (ret)
5374 			return ret;
5375 	}
5376 
5377 	return 0;
5378 }
5379 
5380 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
5381 {
5382 	struct hclge_vport *vport = hclge_get_vport(handle);
5383 	struct hnae3_knic_private_info *kinfo;
5384 	struct hnae3_queue *queue;
5385 	struct hclge_tqp *tqp;
5386 	int i;
5387 
5388 	kinfo = &vport->nic.kinfo;
5389 	for (i = 0; i < kinfo->num_tqps; i++) {
5390 		queue = handle->kinfo.tqp[i];
5391 		tqp = container_of(queue, struct hclge_tqp, q);
5392 		memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
5393 	}
5394 }
5395 
5396 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
5397 {
5398 	struct hclge_vport *vport = hclge_get_vport(handle);
5399 	struct hclge_dev *hdev = vport->back;
5400 
5401 	if (enable) {
5402 		mod_timer(&hdev->service_timer, jiffies + HZ);
5403 	} else {
5404 		del_timer_sync(&hdev->service_timer);
5405 		cancel_work_sync(&hdev->service_task);
5406 		clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
5407 	}
5408 }
5409 
5410 static int hclge_ae_start(struct hnae3_handle *handle)
5411 {
5412 	struct hclge_vport *vport = hclge_get_vport(handle);
5413 	struct hclge_dev *hdev = vport->back;
5414 
5415 	/* mac enable */
5416 	hclge_cfg_mac_mode(hdev, true);
5417 	clear_bit(HCLGE_STATE_DOWN, &hdev->state);
5418 	hdev->hw.mac.link = 0;
5419 
5420 	/* reset tqp stats */
5421 	hclge_reset_tqp_stats(handle);
5422 
5423 	hclge_mac_start_phy(hdev);
5424 
5425 	return 0;
5426 }
5427 
5428 static void hclge_ae_stop(struct hnae3_handle *handle)
5429 {
5430 	struct hclge_vport *vport = hclge_get_vport(handle);
5431 	struct hclge_dev *hdev = vport->back;
5432 	int i;
5433 
5434 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
5435 
5436 	/* If it is not PF reset, the firmware will disable the MAC,
5437 	 * so it only need to stop phy here.
5438 	 */
5439 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
5440 	    hdev->reset_type != HNAE3_FUNC_RESET) {
5441 		hclge_mac_stop_phy(hdev);
5442 		return;
5443 	}
5444 
5445 	for (i = 0; i < handle->kinfo.num_tqps; i++)
5446 		hclge_reset_tqp(handle, i);
5447 
5448 	/* Mac disable */
5449 	hclge_cfg_mac_mode(hdev, false);
5450 
5451 	hclge_mac_stop_phy(hdev);
5452 
5453 	/* reset tqp stats */
5454 	hclge_reset_tqp_stats(handle);
5455 	hclge_update_link_status(hdev);
5456 }
5457 
5458 int hclge_vport_start(struct hclge_vport *vport)
5459 {
5460 	set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
5461 	vport->last_active_jiffies = jiffies;
5462 	return 0;
5463 }
5464 
5465 void hclge_vport_stop(struct hclge_vport *vport)
5466 {
5467 	clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
5468 }
5469 
5470 static int hclge_client_start(struct hnae3_handle *handle)
5471 {
5472 	struct hclge_vport *vport = hclge_get_vport(handle);
5473 
5474 	return hclge_vport_start(vport);
5475 }
5476 
5477 static void hclge_client_stop(struct hnae3_handle *handle)
5478 {
5479 	struct hclge_vport *vport = hclge_get_vport(handle);
5480 
5481 	hclge_vport_stop(vport);
5482 }
5483 
5484 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
5485 					 u16 cmdq_resp, u8  resp_code,
5486 					 enum hclge_mac_vlan_tbl_opcode op)
5487 {
5488 	struct hclge_dev *hdev = vport->back;
5489 	int return_status = -EIO;
5490 
5491 	if (cmdq_resp) {
5492 		dev_err(&hdev->pdev->dev,
5493 			"cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
5494 			cmdq_resp);
5495 		return -EIO;
5496 	}
5497 
5498 	if (op == HCLGE_MAC_VLAN_ADD) {
5499 		if ((!resp_code) || (resp_code == 1)) {
5500 			return_status = 0;
5501 		} else if (resp_code == 2) {
5502 			return_status = -ENOSPC;
5503 			dev_err(&hdev->pdev->dev,
5504 				"add mac addr failed for uc_overflow.\n");
5505 		} else if (resp_code == 3) {
5506 			return_status = -ENOSPC;
5507 			dev_err(&hdev->pdev->dev,
5508 				"add mac addr failed for mc_overflow.\n");
5509 		} else {
5510 			dev_err(&hdev->pdev->dev,
5511 				"add mac addr failed for undefined, code=%d.\n",
5512 				resp_code);
5513 		}
5514 	} else if (op == HCLGE_MAC_VLAN_REMOVE) {
5515 		if (!resp_code) {
5516 			return_status = 0;
5517 		} else if (resp_code == 1) {
5518 			return_status = -ENOENT;
5519 			dev_dbg(&hdev->pdev->dev,
5520 				"remove mac addr failed for miss.\n");
5521 		} else {
5522 			dev_err(&hdev->pdev->dev,
5523 				"remove mac addr failed for undefined, code=%d.\n",
5524 				resp_code);
5525 		}
5526 	} else if (op == HCLGE_MAC_VLAN_LKUP) {
5527 		if (!resp_code) {
5528 			return_status = 0;
5529 		} else if (resp_code == 1) {
5530 			return_status = -ENOENT;
5531 			dev_dbg(&hdev->pdev->dev,
5532 				"lookup mac addr failed for miss.\n");
5533 		} else {
5534 			dev_err(&hdev->pdev->dev,
5535 				"lookup mac addr failed for undefined, code=%d.\n",
5536 				resp_code);
5537 		}
5538 	} else {
5539 		return_status = -EINVAL;
5540 		dev_err(&hdev->pdev->dev,
5541 			"unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
5542 			op);
5543 	}
5544 
5545 	return return_status;
5546 }
5547 
5548 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
5549 {
5550 	int word_num;
5551 	int bit_num;
5552 
5553 	if (vfid > 255 || vfid < 0)
5554 		return -EIO;
5555 
5556 	if (vfid >= 0 && vfid <= 191) {
5557 		word_num = vfid / 32;
5558 		bit_num  = vfid % 32;
5559 		if (clr)
5560 			desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
5561 		else
5562 			desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
5563 	} else {
5564 		word_num = (vfid - 192) / 32;
5565 		bit_num  = vfid % 32;
5566 		if (clr)
5567 			desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
5568 		else
5569 			desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
5570 	}
5571 
5572 	return 0;
5573 }
5574 
5575 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
5576 {
5577 #define HCLGE_DESC_NUMBER 3
5578 #define HCLGE_FUNC_NUMBER_PER_DESC 6
5579 	int i, j;
5580 
5581 	for (i = 1; i < HCLGE_DESC_NUMBER; i++)
5582 		for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
5583 			if (desc[i].data[j])
5584 				return false;
5585 
5586 	return true;
5587 }
5588 
5589 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
5590 				   const u8 *addr, bool is_mc)
5591 {
5592 	const unsigned char *mac_addr = addr;
5593 	u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
5594 		       (mac_addr[0]) | (mac_addr[1] << 8);
5595 	u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
5596 
5597 	hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5598 	if (is_mc) {
5599 		hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
5600 		hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5601 	}
5602 
5603 	new_req->mac_addr_hi32 = cpu_to_le32(high_val);
5604 	new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
5605 }
5606 
5607 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
5608 				     struct hclge_mac_vlan_tbl_entry_cmd *req)
5609 {
5610 	struct hclge_dev *hdev = vport->back;
5611 	struct hclge_desc desc;
5612 	u8 resp_code;
5613 	u16 retval;
5614 	int ret;
5615 
5616 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
5617 
5618 	memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5619 
5620 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5621 	if (ret) {
5622 		dev_err(&hdev->pdev->dev,
5623 			"del mac addr failed for cmd_send, ret =%d.\n",
5624 			ret);
5625 		return ret;
5626 	}
5627 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5628 	retval = le16_to_cpu(desc.retval);
5629 
5630 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
5631 					     HCLGE_MAC_VLAN_REMOVE);
5632 }
5633 
5634 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
5635 				     struct hclge_mac_vlan_tbl_entry_cmd *req,
5636 				     struct hclge_desc *desc,
5637 				     bool is_mc)
5638 {
5639 	struct hclge_dev *hdev = vport->back;
5640 	u8 resp_code;
5641 	u16 retval;
5642 	int ret;
5643 
5644 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
5645 	if (is_mc) {
5646 		desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5647 		memcpy(desc[0].data,
5648 		       req,
5649 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5650 		hclge_cmd_setup_basic_desc(&desc[1],
5651 					   HCLGE_OPC_MAC_VLAN_ADD,
5652 					   true);
5653 		desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5654 		hclge_cmd_setup_basic_desc(&desc[2],
5655 					   HCLGE_OPC_MAC_VLAN_ADD,
5656 					   true);
5657 		ret = hclge_cmd_send(&hdev->hw, desc, 3);
5658 	} else {
5659 		memcpy(desc[0].data,
5660 		       req,
5661 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5662 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
5663 	}
5664 	if (ret) {
5665 		dev_err(&hdev->pdev->dev,
5666 			"lookup mac addr failed for cmd_send, ret =%d.\n",
5667 			ret);
5668 		return ret;
5669 	}
5670 	resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
5671 	retval = le16_to_cpu(desc[0].retval);
5672 
5673 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
5674 					     HCLGE_MAC_VLAN_LKUP);
5675 }
5676 
5677 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
5678 				  struct hclge_mac_vlan_tbl_entry_cmd *req,
5679 				  struct hclge_desc *mc_desc)
5680 {
5681 	struct hclge_dev *hdev = vport->back;
5682 	int cfg_status;
5683 	u8 resp_code;
5684 	u16 retval;
5685 	int ret;
5686 
5687 	if (!mc_desc) {
5688 		struct hclge_desc desc;
5689 
5690 		hclge_cmd_setup_basic_desc(&desc,
5691 					   HCLGE_OPC_MAC_VLAN_ADD,
5692 					   false);
5693 		memcpy(desc.data, req,
5694 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5695 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5696 		resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5697 		retval = le16_to_cpu(desc.retval);
5698 
5699 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
5700 							   resp_code,
5701 							   HCLGE_MAC_VLAN_ADD);
5702 	} else {
5703 		hclge_cmd_reuse_desc(&mc_desc[0], false);
5704 		mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5705 		hclge_cmd_reuse_desc(&mc_desc[1], false);
5706 		mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5707 		hclge_cmd_reuse_desc(&mc_desc[2], false);
5708 		mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
5709 		memcpy(mc_desc[0].data, req,
5710 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5711 		ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
5712 		resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
5713 		retval = le16_to_cpu(mc_desc[0].retval);
5714 
5715 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
5716 							   resp_code,
5717 							   HCLGE_MAC_VLAN_ADD);
5718 	}
5719 
5720 	if (ret) {
5721 		dev_err(&hdev->pdev->dev,
5722 			"add mac addr failed for cmd_send, ret =%d.\n",
5723 			ret);
5724 		return ret;
5725 	}
5726 
5727 	return cfg_status;
5728 }
5729 
5730 static int hclge_init_umv_space(struct hclge_dev *hdev)
5731 {
5732 	u16 allocated_size = 0;
5733 	int ret;
5734 
5735 	ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
5736 				  true);
5737 	if (ret)
5738 		return ret;
5739 
5740 	if (allocated_size < hdev->wanted_umv_size)
5741 		dev_warn(&hdev->pdev->dev,
5742 			 "Alloc umv space failed, want %d, get %d\n",
5743 			 hdev->wanted_umv_size, allocated_size);
5744 
5745 	mutex_init(&hdev->umv_mutex);
5746 	hdev->max_umv_size = allocated_size;
5747 	hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
5748 	hdev->share_umv_size = hdev->priv_umv_size +
5749 			hdev->max_umv_size % (hdev->num_req_vfs + 2);
5750 
5751 	return 0;
5752 }
5753 
5754 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
5755 {
5756 	int ret;
5757 
5758 	if (hdev->max_umv_size > 0) {
5759 		ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
5760 					  false);
5761 		if (ret)
5762 			return ret;
5763 		hdev->max_umv_size = 0;
5764 	}
5765 	mutex_destroy(&hdev->umv_mutex);
5766 
5767 	return 0;
5768 }
5769 
5770 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
5771 			       u16 *allocated_size, bool is_alloc)
5772 {
5773 	struct hclge_umv_spc_alc_cmd *req;
5774 	struct hclge_desc desc;
5775 	int ret;
5776 
5777 	req = (struct hclge_umv_spc_alc_cmd *)desc.data;
5778 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
5779 	hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc);
5780 	req->space_size = cpu_to_le32(space_size);
5781 
5782 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5783 	if (ret) {
5784 		dev_err(&hdev->pdev->dev,
5785 			"%s umv space failed for cmd_send, ret =%d\n",
5786 			is_alloc ? "allocate" : "free", ret);
5787 		return ret;
5788 	}
5789 
5790 	if (is_alloc && allocated_size)
5791 		*allocated_size = le32_to_cpu(desc.data[1]);
5792 
5793 	return 0;
5794 }
5795 
5796 static void hclge_reset_umv_space(struct hclge_dev *hdev)
5797 {
5798 	struct hclge_vport *vport;
5799 	int i;
5800 
5801 	for (i = 0; i < hdev->num_alloc_vport; i++) {
5802 		vport = &hdev->vport[i];
5803 		vport->used_umv_num = 0;
5804 	}
5805 
5806 	mutex_lock(&hdev->umv_mutex);
5807 	hdev->share_umv_size = hdev->priv_umv_size +
5808 			hdev->max_umv_size % (hdev->num_req_vfs + 2);
5809 	mutex_unlock(&hdev->umv_mutex);
5810 }
5811 
5812 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
5813 {
5814 	struct hclge_dev *hdev = vport->back;
5815 	bool is_full;
5816 
5817 	mutex_lock(&hdev->umv_mutex);
5818 	is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
5819 		   hdev->share_umv_size == 0);
5820 	mutex_unlock(&hdev->umv_mutex);
5821 
5822 	return is_full;
5823 }
5824 
5825 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
5826 {
5827 	struct hclge_dev *hdev = vport->back;
5828 
5829 	mutex_lock(&hdev->umv_mutex);
5830 	if (is_free) {
5831 		if (vport->used_umv_num > hdev->priv_umv_size)
5832 			hdev->share_umv_size++;
5833 
5834 		if (vport->used_umv_num > 0)
5835 			vport->used_umv_num--;
5836 	} else {
5837 		if (vport->used_umv_num >= hdev->priv_umv_size &&
5838 		    hdev->share_umv_size > 0)
5839 			hdev->share_umv_size--;
5840 		vport->used_umv_num++;
5841 	}
5842 	mutex_unlock(&hdev->umv_mutex);
5843 }
5844 
5845 static int hclge_add_uc_addr(struct hnae3_handle *handle,
5846 			     const unsigned char *addr)
5847 {
5848 	struct hclge_vport *vport = hclge_get_vport(handle);
5849 
5850 	return hclge_add_uc_addr_common(vport, addr);
5851 }
5852 
5853 int hclge_add_uc_addr_common(struct hclge_vport *vport,
5854 			     const unsigned char *addr)
5855 {
5856 	struct hclge_dev *hdev = vport->back;
5857 	struct hclge_mac_vlan_tbl_entry_cmd req;
5858 	struct hclge_desc desc;
5859 	u16 egress_port = 0;
5860 	int ret;
5861 
5862 	/* mac addr check */
5863 	if (is_zero_ether_addr(addr) ||
5864 	    is_broadcast_ether_addr(addr) ||
5865 	    is_multicast_ether_addr(addr)) {
5866 		dev_err(&hdev->pdev->dev,
5867 			"Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
5868 			 addr,
5869 			 is_zero_ether_addr(addr),
5870 			 is_broadcast_ether_addr(addr),
5871 			 is_multicast_ether_addr(addr));
5872 		return -EINVAL;
5873 	}
5874 
5875 	memset(&req, 0, sizeof(req));
5876 
5877 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
5878 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
5879 
5880 	req.egress_port = cpu_to_le16(egress_port);
5881 
5882 	hclge_prepare_mac_addr(&req, addr, false);
5883 
5884 	/* Lookup the mac address in the mac_vlan table, and add
5885 	 * it if the entry is inexistent. Repeated unicast entry
5886 	 * is not allowed in the mac vlan table.
5887 	 */
5888 	ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
5889 	if (ret == -ENOENT) {
5890 		if (!hclge_is_umv_space_full(vport)) {
5891 			ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
5892 			if (!ret)
5893 				hclge_update_umv_space(vport, false);
5894 			return ret;
5895 		}
5896 
5897 		dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
5898 			hdev->priv_umv_size);
5899 
5900 		return -ENOSPC;
5901 	}
5902 
5903 	/* check if we just hit the duplicate */
5904 	if (!ret)
5905 		ret = -EINVAL;
5906 
5907 	dev_err(&hdev->pdev->dev,
5908 		"PF failed to add unicast entry(%pM) in the MAC table\n",
5909 		addr);
5910 
5911 	return ret;
5912 }
5913 
5914 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
5915 			    const unsigned char *addr)
5916 {
5917 	struct hclge_vport *vport = hclge_get_vport(handle);
5918 
5919 	return hclge_rm_uc_addr_common(vport, addr);
5920 }
5921 
5922 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
5923 			    const unsigned char *addr)
5924 {
5925 	struct hclge_dev *hdev = vport->back;
5926 	struct hclge_mac_vlan_tbl_entry_cmd req;
5927 	int ret;
5928 
5929 	/* mac addr check */
5930 	if (is_zero_ether_addr(addr) ||
5931 	    is_broadcast_ether_addr(addr) ||
5932 	    is_multicast_ether_addr(addr)) {
5933 		dev_dbg(&hdev->pdev->dev,
5934 			"Remove mac err! invalid mac:%pM.\n",
5935 			 addr);
5936 		return -EINVAL;
5937 	}
5938 
5939 	memset(&req, 0, sizeof(req));
5940 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
5941 	hclge_prepare_mac_addr(&req, addr, false);
5942 	ret = hclge_remove_mac_vlan_tbl(vport, &req);
5943 	if (!ret)
5944 		hclge_update_umv_space(vport, true);
5945 
5946 	return ret;
5947 }
5948 
5949 static int hclge_add_mc_addr(struct hnae3_handle *handle,
5950 			     const unsigned char *addr)
5951 {
5952 	struct hclge_vport *vport = hclge_get_vport(handle);
5953 
5954 	return hclge_add_mc_addr_common(vport, addr);
5955 }
5956 
5957 int hclge_add_mc_addr_common(struct hclge_vport *vport,
5958 			     const unsigned char *addr)
5959 {
5960 	struct hclge_dev *hdev = vport->back;
5961 	struct hclge_mac_vlan_tbl_entry_cmd req;
5962 	struct hclge_desc desc[3];
5963 	int status;
5964 
5965 	/* mac addr check */
5966 	if (!is_multicast_ether_addr(addr)) {
5967 		dev_err(&hdev->pdev->dev,
5968 			"Add mc mac err! invalid mac:%pM.\n",
5969 			 addr);
5970 		return -EINVAL;
5971 	}
5972 	memset(&req, 0, sizeof(req));
5973 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
5974 	hclge_prepare_mac_addr(&req, addr, true);
5975 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
5976 	if (!status) {
5977 		/* This mac addr exist, update VFID for it */
5978 		hclge_update_desc_vfid(desc, vport->vport_id, false);
5979 		status = hclge_add_mac_vlan_tbl(vport, &req, desc);
5980 	} else {
5981 		/* This mac addr do not exist, add new entry for it */
5982 		memset(desc[0].data, 0, sizeof(desc[0].data));
5983 		memset(desc[1].data, 0, sizeof(desc[0].data));
5984 		memset(desc[2].data, 0, sizeof(desc[0].data));
5985 		hclge_update_desc_vfid(desc, vport->vport_id, false);
5986 		status = hclge_add_mac_vlan_tbl(vport, &req, desc);
5987 	}
5988 
5989 	if (status == -ENOSPC)
5990 		dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
5991 
5992 	return status;
5993 }
5994 
5995 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
5996 			    const unsigned char *addr)
5997 {
5998 	struct hclge_vport *vport = hclge_get_vport(handle);
5999 
6000 	return hclge_rm_mc_addr_common(vport, addr);
6001 }
6002 
6003 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
6004 			    const unsigned char *addr)
6005 {
6006 	struct hclge_dev *hdev = vport->back;
6007 	struct hclge_mac_vlan_tbl_entry_cmd req;
6008 	enum hclge_cmd_status status;
6009 	struct hclge_desc desc[3];
6010 
6011 	/* mac addr check */
6012 	if (!is_multicast_ether_addr(addr)) {
6013 		dev_dbg(&hdev->pdev->dev,
6014 			"Remove mc mac err! invalid mac:%pM.\n",
6015 			 addr);
6016 		return -EINVAL;
6017 	}
6018 
6019 	memset(&req, 0, sizeof(req));
6020 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6021 	hclge_prepare_mac_addr(&req, addr, true);
6022 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6023 	if (!status) {
6024 		/* This mac addr exist, remove this handle's VFID for it */
6025 		hclge_update_desc_vfid(desc, vport->vport_id, true);
6026 
6027 		if (hclge_is_all_function_id_zero(desc))
6028 			/* All the vfid is zero, so need to delete this entry */
6029 			status = hclge_remove_mac_vlan_tbl(vport, &req);
6030 		else
6031 			/* Not all the vfid is zero, update the vfid */
6032 			status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6033 
6034 	} else {
6035 		/* Maybe this mac address is in mta table, but it cannot be
6036 		 * deleted here because an entry of mta represents an address
6037 		 * range rather than a specific address. the delete action to
6038 		 * all entries will take effect in update_mta_status called by
6039 		 * hns3_nic_set_rx_mode.
6040 		 */
6041 		status = 0;
6042 	}
6043 
6044 	return status;
6045 }
6046 
6047 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
6048 					      u16 cmdq_resp, u8 resp_code)
6049 {
6050 #define HCLGE_ETHERTYPE_SUCCESS_ADD		0
6051 #define HCLGE_ETHERTYPE_ALREADY_ADD		1
6052 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW	2
6053 #define HCLGE_ETHERTYPE_KEY_CONFLICT		3
6054 
6055 	int return_status;
6056 
6057 	if (cmdq_resp) {
6058 		dev_err(&hdev->pdev->dev,
6059 			"cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
6060 			cmdq_resp);
6061 		return -EIO;
6062 	}
6063 
6064 	switch (resp_code) {
6065 	case HCLGE_ETHERTYPE_SUCCESS_ADD:
6066 	case HCLGE_ETHERTYPE_ALREADY_ADD:
6067 		return_status = 0;
6068 		break;
6069 	case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
6070 		dev_err(&hdev->pdev->dev,
6071 			"add mac ethertype failed for manager table overflow.\n");
6072 		return_status = -EIO;
6073 		break;
6074 	case HCLGE_ETHERTYPE_KEY_CONFLICT:
6075 		dev_err(&hdev->pdev->dev,
6076 			"add mac ethertype failed for key conflict.\n");
6077 		return_status = -EIO;
6078 		break;
6079 	default:
6080 		dev_err(&hdev->pdev->dev,
6081 			"add mac ethertype failed for undefined, code=%d.\n",
6082 			resp_code);
6083 		return_status = -EIO;
6084 	}
6085 
6086 	return return_status;
6087 }
6088 
6089 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
6090 			     const struct hclge_mac_mgr_tbl_entry_cmd *req)
6091 {
6092 	struct hclge_desc desc;
6093 	u8 resp_code;
6094 	u16 retval;
6095 	int ret;
6096 
6097 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
6098 	memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
6099 
6100 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6101 	if (ret) {
6102 		dev_err(&hdev->pdev->dev,
6103 			"add mac ethertype failed for cmd_send, ret =%d.\n",
6104 			ret);
6105 		return ret;
6106 	}
6107 
6108 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6109 	retval = le16_to_cpu(desc.retval);
6110 
6111 	return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
6112 }
6113 
6114 static int init_mgr_tbl(struct hclge_dev *hdev)
6115 {
6116 	int ret;
6117 	int i;
6118 
6119 	for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
6120 		ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
6121 		if (ret) {
6122 			dev_err(&hdev->pdev->dev,
6123 				"add mac ethertype failed, ret =%d.\n",
6124 				ret);
6125 			return ret;
6126 		}
6127 	}
6128 
6129 	return 0;
6130 }
6131 
6132 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
6133 {
6134 	struct hclge_vport *vport = hclge_get_vport(handle);
6135 	struct hclge_dev *hdev = vport->back;
6136 
6137 	ether_addr_copy(p, hdev->hw.mac.mac_addr);
6138 }
6139 
6140 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
6141 			      bool is_first)
6142 {
6143 	const unsigned char *new_addr = (const unsigned char *)p;
6144 	struct hclge_vport *vport = hclge_get_vport(handle);
6145 	struct hclge_dev *hdev = vport->back;
6146 	int ret;
6147 
6148 	/* mac addr check */
6149 	if (is_zero_ether_addr(new_addr) ||
6150 	    is_broadcast_ether_addr(new_addr) ||
6151 	    is_multicast_ether_addr(new_addr)) {
6152 		dev_err(&hdev->pdev->dev,
6153 			"Change uc mac err! invalid mac:%p.\n",
6154 			 new_addr);
6155 		return -EINVAL;
6156 	}
6157 
6158 	if (!is_first && hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
6159 		dev_warn(&hdev->pdev->dev,
6160 			 "remove old uc mac address fail.\n");
6161 
6162 	ret = hclge_add_uc_addr(handle, new_addr);
6163 	if (ret) {
6164 		dev_err(&hdev->pdev->dev,
6165 			"add uc mac address fail, ret =%d.\n",
6166 			ret);
6167 
6168 		if (!is_first &&
6169 		    hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
6170 			dev_err(&hdev->pdev->dev,
6171 				"restore uc mac address fail.\n");
6172 
6173 		return -EIO;
6174 	}
6175 
6176 	ret = hclge_pause_addr_cfg(hdev, new_addr);
6177 	if (ret) {
6178 		dev_err(&hdev->pdev->dev,
6179 			"configure mac pause address fail, ret =%d.\n",
6180 			ret);
6181 		return -EIO;
6182 	}
6183 
6184 	ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
6185 
6186 	return 0;
6187 }
6188 
6189 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
6190 			  int cmd)
6191 {
6192 	struct hclge_vport *vport = hclge_get_vport(handle);
6193 	struct hclge_dev *hdev = vport->back;
6194 
6195 	if (!hdev->hw.mac.phydev)
6196 		return -EOPNOTSUPP;
6197 
6198 	return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
6199 }
6200 
6201 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
6202 				      u8 fe_type, bool filter_en)
6203 {
6204 	struct hclge_vlan_filter_ctrl_cmd *req;
6205 	struct hclge_desc desc;
6206 	int ret;
6207 
6208 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
6209 
6210 	req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
6211 	req->vlan_type = vlan_type;
6212 	req->vlan_fe = filter_en ? fe_type : 0;
6213 
6214 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6215 	if (ret)
6216 		dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
6217 			ret);
6218 
6219 	return ret;
6220 }
6221 
6222 #define HCLGE_FILTER_TYPE_VF		0
6223 #define HCLGE_FILTER_TYPE_PORT		1
6224 #define HCLGE_FILTER_FE_EGRESS_V1_B	BIT(0)
6225 #define HCLGE_FILTER_FE_NIC_INGRESS_B	BIT(0)
6226 #define HCLGE_FILTER_FE_NIC_EGRESS_B	BIT(1)
6227 #define HCLGE_FILTER_FE_ROCE_INGRESS_B	BIT(2)
6228 #define HCLGE_FILTER_FE_ROCE_EGRESS_B	BIT(3)
6229 #define HCLGE_FILTER_FE_EGRESS		(HCLGE_FILTER_FE_NIC_EGRESS_B \
6230 					| HCLGE_FILTER_FE_ROCE_EGRESS_B)
6231 #define HCLGE_FILTER_FE_INGRESS		(HCLGE_FILTER_FE_NIC_INGRESS_B \
6232 					| HCLGE_FILTER_FE_ROCE_INGRESS_B)
6233 
6234 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
6235 {
6236 	struct hclge_vport *vport = hclge_get_vport(handle);
6237 	struct hclge_dev *hdev = vport->back;
6238 
6239 	if (hdev->pdev->revision >= 0x21) {
6240 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6241 					   HCLGE_FILTER_FE_EGRESS, enable);
6242 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
6243 					   HCLGE_FILTER_FE_INGRESS, enable);
6244 	} else {
6245 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6246 					   HCLGE_FILTER_FE_EGRESS_V1_B, enable);
6247 	}
6248 	if (enable)
6249 		handle->netdev_flags |= HNAE3_VLAN_FLTR;
6250 	else
6251 		handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
6252 }
6253 
6254 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
6255 				    bool is_kill, u16 vlan, u8 qos,
6256 				    __be16 proto)
6257 {
6258 #define HCLGE_MAX_VF_BYTES  16
6259 	struct hclge_vlan_filter_vf_cfg_cmd *req0;
6260 	struct hclge_vlan_filter_vf_cfg_cmd *req1;
6261 	struct hclge_desc desc[2];
6262 	u8 vf_byte_val;
6263 	u8 vf_byte_off;
6264 	int ret;
6265 
6266 	hclge_cmd_setup_basic_desc(&desc[0],
6267 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
6268 	hclge_cmd_setup_basic_desc(&desc[1],
6269 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
6270 
6271 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6272 
6273 	vf_byte_off = vfid / 8;
6274 	vf_byte_val = 1 << (vfid % 8);
6275 
6276 	req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
6277 	req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
6278 
6279 	req0->vlan_id  = cpu_to_le16(vlan);
6280 	req0->vlan_cfg = is_kill;
6281 
6282 	if (vf_byte_off < HCLGE_MAX_VF_BYTES)
6283 		req0->vf_bitmap[vf_byte_off] = vf_byte_val;
6284 	else
6285 		req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
6286 
6287 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
6288 	if (ret) {
6289 		dev_err(&hdev->pdev->dev,
6290 			"Send vf vlan command fail, ret =%d.\n",
6291 			ret);
6292 		return ret;
6293 	}
6294 
6295 	if (!is_kill) {
6296 #define HCLGE_VF_VLAN_NO_ENTRY	2
6297 		if (!req0->resp_code || req0->resp_code == 1)
6298 			return 0;
6299 
6300 		if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
6301 			dev_warn(&hdev->pdev->dev,
6302 				 "vf vlan table is full, vf vlan filter is disabled\n");
6303 			return 0;
6304 		}
6305 
6306 		dev_err(&hdev->pdev->dev,
6307 			"Add vf vlan filter fail, ret =%d.\n",
6308 			req0->resp_code);
6309 	} else {
6310 #define HCLGE_VF_VLAN_DEL_NO_FOUND	1
6311 		if (!req0->resp_code)
6312 			return 0;
6313 
6314 		if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
6315 			dev_warn(&hdev->pdev->dev,
6316 				 "vlan %d filter is not in vf vlan table\n",
6317 				 vlan);
6318 			return 0;
6319 		}
6320 
6321 		dev_err(&hdev->pdev->dev,
6322 			"Kill vf vlan filter fail, ret =%d.\n",
6323 			req0->resp_code);
6324 	}
6325 
6326 	return -EIO;
6327 }
6328 
6329 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
6330 				      u16 vlan_id, bool is_kill)
6331 {
6332 	struct hclge_vlan_filter_pf_cfg_cmd *req;
6333 	struct hclge_desc desc;
6334 	u8 vlan_offset_byte_val;
6335 	u8 vlan_offset_byte;
6336 	u8 vlan_offset_160;
6337 	int ret;
6338 
6339 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
6340 
6341 	vlan_offset_160 = vlan_id / 160;
6342 	vlan_offset_byte = (vlan_id % 160) / 8;
6343 	vlan_offset_byte_val = 1 << (vlan_id % 8);
6344 
6345 	req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
6346 	req->vlan_offset = vlan_offset_160;
6347 	req->vlan_cfg = is_kill;
6348 	req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
6349 
6350 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6351 	if (ret)
6352 		dev_err(&hdev->pdev->dev,
6353 			"port vlan command, send fail, ret =%d.\n", ret);
6354 	return ret;
6355 }
6356 
6357 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
6358 				    u16 vport_id, u16 vlan_id, u8 qos,
6359 				    bool is_kill)
6360 {
6361 	u16 vport_idx, vport_num = 0;
6362 	int ret;
6363 
6364 	if (is_kill && !vlan_id)
6365 		return 0;
6366 
6367 	ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
6368 				       0, proto);
6369 	if (ret) {
6370 		dev_err(&hdev->pdev->dev,
6371 			"Set %d vport vlan filter config fail, ret =%d.\n",
6372 			vport_id, ret);
6373 		return ret;
6374 	}
6375 
6376 	/* vlan 0 may be added twice when 8021q module is enabled */
6377 	if (!is_kill && !vlan_id &&
6378 	    test_bit(vport_id, hdev->vlan_table[vlan_id]))
6379 		return 0;
6380 
6381 	if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
6382 		dev_err(&hdev->pdev->dev,
6383 			"Add port vlan failed, vport %d is already in vlan %d\n",
6384 			vport_id, vlan_id);
6385 		return -EINVAL;
6386 	}
6387 
6388 	if (is_kill &&
6389 	    !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
6390 		dev_err(&hdev->pdev->dev,
6391 			"Delete port vlan failed, vport %d is not in vlan %d\n",
6392 			vport_id, vlan_id);
6393 		return -EINVAL;
6394 	}
6395 
6396 	for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
6397 		vport_num++;
6398 
6399 	if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
6400 		ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
6401 						 is_kill);
6402 
6403 	return ret;
6404 }
6405 
6406 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
6407 			  u16 vlan_id, bool is_kill)
6408 {
6409 	struct hclge_vport *vport = hclge_get_vport(handle);
6410 	struct hclge_dev *hdev = vport->back;
6411 
6412 	return hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, vlan_id,
6413 					0, is_kill);
6414 }
6415 
6416 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
6417 				    u16 vlan, u8 qos, __be16 proto)
6418 {
6419 	struct hclge_vport *vport = hclge_get_vport(handle);
6420 	struct hclge_dev *hdev = vport->back;
6421 
6422 	if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7))
6423 		return -EINVAL;
6424 	if (proto != htons(ETH_P_8021Q))
6425 		return -EPROTONOSUPPORT;
6426 
6427 	return hclge_set_vlan_filter_hw(hdev, proto, vfid, vlan, qos, false);
6428 }
6429 
6430 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
6431 {
6432 	struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
6433 	struct hclge_vport_vtag_tx_cfg_cmd *req;
6434 	struct hclge_dev *hdev = vport->back;
6435 	struct hclge_desc desc;
6436 	int status;
6437 
6438 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
6439 
6440 	req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
6441 	req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
6442 	req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
6443 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
6444 		      vcfg->accept_tag1 ? 1 : 0);
6445 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
6446 		      vcfg->accept_untag1 ? 1 : 0);
6447 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
6448 		      vcfg->accept_tag2 ? 1 : 0);
6449 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
6450 		      vcfg->accept_untag2 ? 1 : 0);
6451 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
6452 		      vcfg->insert_tag1_en ? 1 : 0);
6453 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
6454 		      vcfg->insert_tag2_en ? 1 : 0);
6455 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
6456 
6457 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
6458 	req->vf_bitmap[req->vf_offset] =
6459 		1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
6460 
6461 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
6462 	if (status)
6463 		dev_err(&hdev->pdev->dev,
6464 			"Send port txvlan cfg command fail, ret =%d\n",
6465 			status);
6466 
6467 	return status;
6468 }
6469 
6470 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
6471 {
6472 	struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
6473 	struct hclge_vport_vtag_rx_cfg_cmd *req;
6474 	struct hclge_dev *hdev = vport->back;
6475 	struct hclge_desc desc;
6476 	int status;
6477 
6478 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
6479 
6480 	req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
6481 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
6482 		      vcfg->strip_tag1_en ? 1 : 0);
6483 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
6484 		      vcfg->strip_tag2_en ? 1 : 0);
6485 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
6486 		      vcfg->vlan1_vlan_prionly ? 1 : 0);
6487 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
6488 		      vcfg->vlan2_vlan_prionly ? 1 : 0);
6489 
6490 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
6491 	req->vf_bitmap[req->vf_offset] =
6492 		1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
6493 
6494 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
6495 	if (status)
6496 		dev_err(&hdev->pdev->dev,
6497 			"Send port rxvlan cfg command fail, ret =%d\n",
6498 			status);
6499 
6500 	return status;
6501 }
6502 
6503 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
6504 {
6505 	struct hclge_rx_vlan_type_cfg_cmd *rx_req;
6506 	struct hclge_tx_vlan_type_cfg_cmd *tx_req;
6507 	struct hclge_desc desc;
6508 	int status;
6509 
6510 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
6511 	rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
6512 	rx_req->ot_fst_vlan_type =
6513 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
6514 	rx_req->ot_sec_vlan_type =
6515 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
6516 	rx_req->in_fst_vlan_type =
6517 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
6518 	rx_req->in_sec_vlan_type =
6519 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
6520 
6521 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
6522 	if (status) {
6523 		dev_err(&hdev->pdev->dev,
6524 			"Send rxvlan protocol type command fail, ret =%d\n",
6525 			status);
6526 		return status;
6527 	}
6528 
6529 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
6530 
6531 	tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
6532 	tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
6533 	tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
6534 
6535 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
6536 	if (status)
6537 		dev_err(&hdev->pdev->dev,
6538 			"Send txvlan protocol type command fail, ret =%d\n",
6539 			status);
6540 
6541 	return status;
6542 }
6543 
6544 static int hclge_init_vlan_config(struct hclge_dev *hdev)
6545 {
6546 #define HCLGE_DEF_VLAN_TYPE		0x8100
6547 
6548 	struct hnae3_handle *handle = &hdev->vport[0].nic;
6549 	struct hclge_vport *vport;
6550 	int ret;
6551 	int i;
6552 
6553 	if (hdev->pdev->revision >= 0x21) {
6554 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6555 						 HCLGE_FILTER_FE_EGRESS, true);
6556 		if (ret)
6557 			return ret;
6558 
6559 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
6560 						 HCLGE_FILTER_FE_INGRESS, true);
6561 		if (ret)
6562 			return ret;
6563 	} else {
6564 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6565 						 HCLGE_FILTER_FE_EGRESS_V1_B,
6566 						 true);
6567 		if (ret)
6568 			return ret;
6569 	}
6570 
6571 	handle->netdev_flags |= HNAE3_VLAN_FLTR;
6572 
6573 	hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
6574 	hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
6575 	hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
6576 	hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
6577 	hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
6578 	hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
6579 
6580 	ret = hclge_set_vlan_protocol_type(hdev);
6581 	if (ret)
6582 		return ret;
6583 
6584 	for (i = 0; i < hdev->num_alloc_vport; i++) {
6585 		vport = &hdev->vport[i];
6586 		vport->txvlan_cfg.accept_tag1 = true;
6587 		vport->txvlan_cfg.accept_untag1 = true;
6588 
6589 		/* accept_tag2 and accept_untag2 are not supported on
6590 		 * pdev revision(0x20), new revision support them. The
6591 		 * value of this two fields will not return error when driver
6592 		 * send command to fireware in revision(0x20).
6593 		 * This two fields can not configured by user.
6594 		 */
6595 		vport->txvlan_cfg.accept_tag2 = true;
6596 		vport->txvlan_cfg.accept_untag2 = true;
6597 
6598 		vport->txvlan_cfg.insert_tag1_en = false;
6599 		vport->txvlan_cfg.insert_tag2_en = false;
6600 		vport->txvlan_cfg.default_tag1 = 0;
6601 		vport->txvlan_cfg.default_tag2 = 0;
6602 
6603 		ret = hclge_set_vlan_tx_offload_cfg(vport);
6604 		if (ret)
6605 			return ret;
6606 
6607 		vport->rxvlan_cfg.strip_tag1_en = false;
6608 		vport->rxvlan_cfg.strip_tag2_en = true;
6609 		vport->rxvlan_cfg.vlan1_vlan_prionly = false;
6610 		vport->rxvlan_cfg.vlan2_vlan_prionly = false;
6611 
6612 		ret = hclge_set_vlan_rx_offload_cfg(vport);
6613 		if (ret)
6614 			return ret;
6615 	}
6616 
6617 	return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
6618 }
6619 
6620 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
6621 {
6622 	struct hclge_vport *vport = hclge_get_vport(handle);
6623 
6624 	vport->rxvlan_cfg.strip_tag1_en = false;
6625 	vport->rxvlan_cfg.strip_tag2_en = enable;
6626 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
6627 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
6628 
6629 	return hclge_set_vlan_rx_offload_cfg(vport);
6630 }
6631 
6632 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
6633 {
6634 	struct hclge_config_max_frm_size_cmd *req;
6635 	struct hclge_desc desc;
6636 
6637 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
6638 
6639 	req = (struct hclge_config_max_frm_size_cmd *)desc.data;
6640 	req->max_frm_size = cpu_to_le16(new_mps);
6641 	req->min_frm_size = HCLGE_MAC_MIN_FRAME;
6642 
6643 	return hclge_cmd_send(&hdev->hw, &desc, 1);
6644 }
6645 
6646 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
6647 {
6648 	struct hclge_vport *vport = hclge_get_vport(handle);
6649 
6650 	return hclge_set_vport_mtu(vport, new_mtu);
6651 }
6652 
6653 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
6654 {
6655 	struct hclge_dev *hdev = vport->back;
6656 	int i, max_frm_size, ret = 0;
6657 
6658 	max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
6659 	if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
6660 	    max_frm_size > HCLGE_MAC_MAX_FRAME)
6661 		return -EINVAL;
6662 
6663 	max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
6664 	mutex_lock(&hdev->vport_lock);
6665 	/* VF's mps must fit within hdev->mps */
6666 	if (vport->vport_id && max_frm_size > hdev->mps) {
6667 		mutex_unlock(&hdev->vport_lock);
6668 		return -EINVAL;
6669 	} else if (vport->vport_id) {
6670 		vport->mps = max_frm_size;
6671 		mutex_unlock(&hdev->vport_lock);
6672 		return 0;
6673 	}
6674 
6675 	/* PF's mps must be greater then VF's mps */
6676 	for (i = 1; i < hdev->num_alloc_vport; i++)
6677 		if (max_frm_size < hdev->vport[i].mps) {
6678 			mutex_unlock(&hdev->vport_lock);
6679 			return -EINVAL;
6680 		}
6681 
6682 	hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
6683 
6684 	ret = hclge_set_mac_mtu(hdev, max_frm_size);
6685 	if (ret) {
6686 		dev_err(&hdev->pdev->dev,
6687 			"Change mtu fail, ret =%d\n", ret);
6688 		goto out;
6689 	}
6690 
6691 	hdev->mps = max_frm_size;
6692 	vport->mps = max_frm_size;
6693 
6694 	ret = hclge_buffer_alloc(hdev);
6695 	if (ret)
6696 		dev_err(&hdev->pdev->dev,
6697 			"Allocate buffer fail, ret =%d\n", ret);
6698 
6699 out:
6700 	hclge_notify_client(hdev, HNAE3_UP_CLIENT);
6701 	mutex_unlock(&hdev->vport_lock);
6702 	return ret;
6703 }
6704 
6705 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
6706 				    bool enable)
6707 {
6708 	struct hclge_reset_tqp_queue_cmd *req;
6709 	struct hclge_desc desc;
6710 	int ret;
6711 
6712 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
6713 
6714 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
6715 	req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
6716 	hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
6717 
6718 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6719 	if (ret) {
6720 		dev_err(&hdev->pdev->dev,
6721 			"Send tqp reset cmd error, status =%d\n", ret);
6722 		return ret;
6723 	}
6724 
6725 	return 0;
6726 }
6727 
6728 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
6729 {
6730 	struct hclge_reset_tqp_queue_cmd *req;
6731 	struct hclge_desc desc;
6732 	int ret;
6733 
6734 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
6735 
6736 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
6737 	req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
6738 
6739 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6740 	if (ret) {
6741 		dev_err(&hdev->pdev->dev,
6742 			"Get reset status error, status =%d\n", ret);
6743 		return ret;
6744 	}
6745 
6746 	return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
6747 }
6748 
6749 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
6750 {
6751 	struct hnae3_queue *queue;
6752 	struct hclge_tqp *tqp;
6753 
6754 	queue = handle->kinfo.tqp[queue_id];
6755 	tqp = container_of(queue, struct hclge_tqp, q);
6756 
6757 	return tqp->index;
6758 }
6759 
6760 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
6761 {
6762 	struct hclge_vport *vport = hclge_get_vport(handle);
6763 	struct hclge_dev *hdev = vport->back;
6764 	int reset_try_times = 0;
6765 	int reset_status;
6766 	u16 queue_gid;
6767 	int ret = 0;
6768 
6769 	queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
6770 
6771 	ret = hclge_tqp_enable(hdev, queue_id, 0, false);
6772 	if (ret) {
6773 		dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
6774 		return ret;
6775 	}
6776 
6777 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
6778 	if (ret) {
6779 		dev_err(&hdev->pdev->dev,
6780 			"Send reset tqp cmd fail, ret = %d\n", ret);
6781 		return ret;
6782 	}
6783 
6784 	reset_try_times = 0;
6785 	while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
6786 		/* Wait for tqp hw reset */
6787 		msleep(20);
6788 		reset_status = hclge_get_reset_status(hdev, queue_gid);
6789 		if (reset_status)
6790 			break;
6791 	}
6792 
6793 	if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
6794 		dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
6795 		return ret;
6796 	}
6797 
6798 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
6799 	if (ret)
6800 		dev_err(&hdev->pdev->dev,
6801 			"Deassert the soft reset fail, ret = %d\n", ret);
6802 
6803 	return ret;
6804 }
6805 
6806 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
6807 {
6808 	struct hclge_dev *hdev = vport->back;
6809 	int reset_try_times = 0;
6810 	int reset_status;
6811 	u16 queue_gid;
6812 	int ret;
6813 
6814 	queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
6815 
6816 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
6817 	if (ret) {
6818 		dev_warn(&hdev->pdev->dev,
6819 			 "Send reset tqp cmd fail, ret = %d\n", ret);
6820 		return;
6821 	}
6822 
6823 	reset_try_times = 0;
6824 	while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
6825 		/* Wait for tqp hw reset */
6826 		msleep(20);
6827 		reset_status = hclge_get_reset_status(hdev, queue_gid);
6828 		if (reset_status)
6829 			break;
6830 	}
6831 
6832 	if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
6833 		dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
6834 		return;
6835 	}
6836 
6837 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
6838 	if (ret)
6839 		dev_warn(&hdev->pdev->dev,
6840 			 "Deassert the soft reset fail, ret = %d\n", ret);
6841 }
6842 
6843 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
6844 {
6845 	struct hclge_vport *vport = hclge_get_vport(handle);
6846 	struct hclge_dev *hdev = vport->back;
6847 
6848 	return hdev->fw_version;
6849 }
6850 
6851 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
6852 {
6853 	struct phy_device *phydev = hdev->hw.mac.phydev;
6854 
6855 	if (!phydev)
6856 		return;
6857 
6858 	phy_set_asym_pause(phydev, rx_en, tx_en);
6859 }
6860 
6861 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
6862 {
6863 	int ret;
6864 
6865 	if (rx_en && tx_en)
6866 		hdev->fc_mode_last_time = HCLGE_FC_FULL;
6867 	else if (rx_en && !tx_en)
6868 		hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
6869 	else if (!rx_en && tx_en)
6870 		hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
6871 	else
6872 		hdev->fc_mode_last_time = HCLGE_FC_NONE;
6873 
6874 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
6875 		return 0;
6876 
6877 	ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
6878 	if (ret) {
6879 		dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
6880 			ret);
6881 		return ret;
6882 	}
6883 
6884 	hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
6885 
6886 	return 0;
6887 }
6888 
6889 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
6890 {
6891 	struct phy_device *phydev = hdev->hw.mac.phydev;
6892 	u16 remote_advertising = 0;
6893 	u16 local_advertising = 0;
6894 	u32 rx_pause, tx_pause;
6895 	u8 flowctl;
6896 
6897 	if (!phydev->link || !phydev->autoneg)
6898 		return 0;
6899 
6900 	local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
6901 
6902 	if (phydev->pause)
6903 		remote_advertising = LPA_PAUSE_CAP;
6904 
6905 	if (phydev->asym_pause)
6906 		remote_advertising |= LPA_PAUSE_ASYM;
6907 
6908 	flowctl = mii_resolve_flowctrl_fdx(local_advertising,
6909 					   remote_advertising);
6910 	tx_pause = flowctl & FLOW_CTRL_TX;
6911 	rx_pause = flowctl & FLOW_CTRL_RX;
6912 
6913 	if (phydev->duplex == HCLGE_MAC_HALF) {
6914 		tx_pause = 0;
6915 		rx_pause = 0;
6916 	}
6917 
6918 	return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
6919 }
6920 
6921 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
6922 				 u32 *rx_en, u32 *tx_en)
6923 {
6924 	struct hclge_vport *vport = hclge_get_vport(handle);
6925 	struct hclge_dev *hdev = vport->back;
6926 
6927 	*auto_neg = hclge_get_autoneg(handle);
6928 
6929 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
6930 		*rx_en = 0;
6931 		*tx_en = 0;
6932 		return;
6933 	}
6934 
6935 	if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
6936 		*rx_en = 1;
6937 		*tx_en = 0;
6938 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
6939 		*tx_en = 1;
6940 		*rx_en = 0;
6941 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
6942 		*rx_en = 1;
6943 		*tx_en = 1;
6944 	} else {
6945 		*rx_en = 0;
6946 		*tx_en = 0;
6947 	}
6948 }
6949 
6950 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
6951 				u32 rx_en, u32 tx_en)
6952 {
6953 	struct hclge_vport *vport = hclge_get_vport(handle);
6954 	struct hclge_dev *hdev = vport->back;
6955 	struct phy_device *phydev = hdev->hw.mac.phydev;
6956 	u32 fc_autoneg;
6957 
6958 	fc_autoneg = hclge_get_autoneg(handle);
6959 	if (auto_neg != fc_autoneg) {
6960 		dev_info(&hdev->pdev->dev,
6961 			 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
6962 		return -EOPNOTSUPP;
6963 	}
6964 
6965 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
6966 		dev_info(&hdev->pdev->dev,
6967 			 "Priority flow control enabled. Cannot set link flow control.\n");
6968 		return -EOPNOTSUPP;
6969 	}
6970 
6971 	hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
6972 
6973 	if (!fc_autoneg)
6974 		return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
6975 
6976 	/* Only support flow control negotiation for netdev with
6977 	 * phy attached for now.
6978 	 */
6979 	if (!phydev)
6980 		return -EOPNOTSUPP;
6981 
6982 	return phy_start_aneg(phydev);
6983 }
6984 
6985 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
6986 					  u8 *auto_neg, u32 *speed, u8 *duplex)
6987 {
6988 	struct hclge_vport *vport = hclge_get_vport(handle);
6989 	struct hclge_dev *hdev = vport->back;
6990 
6991 	if (speed)
6992 		*speed = hdev->hw.mac.speed;
6993 	if (duplex)
6994 		*duplex = hdev->hw.mac.duplex;
6995 	if (auto_neg)
6996 		*auto_neg = hdev->hw.mac.autoneg;
6997 }
6998 
6999 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type)
7000 {
7001 	struct hclge_vport *vport = hclge_get_vport(handle);
7002 	struct hclge_dev *hdev = vport->back;
7003 
7004 	if (media_type)
7005 		*media_type = hdev->hw.mac.media_type;
7006 }
7007 
7008 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
7009 				u8 *tp_mdix_ctrl, u8 *tp_mdix)
7010 {
7011 	struct hclge_vport *vport = hclge_get_vport(handle);
7012 	struct hclge_dev *hdev = vport->back;
7013 	struct phy_device *phydev = hdev->hw.mac.phydev;
7014 	int mdix_ctrl, mdix, retval, is_resolved;
7015 
7016 	if (!phydev) {
7017 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
7018 		*tp_mdix = ETH_TP_MDI_INVALID;
7019 		return;
7020 	}
7021 
7022 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
7023 
7024 	retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
7025 	mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
7026 				    HCLGE_PHY_MDIX_CTRL_S);
7027 
7028 	retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
7029 	mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
7030 	is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
7031 
7032 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
7033 
7034 	switch (mdix_ctrl) {
7035 	case 0x0:
7036 		*tp_mdix_ctrl = ETH_TP_MDI;
7037 		break;
7038 	case 0x1:
7039 		*tp_mdix_ctrl = ETH_TP_MDI_X;
7040 		break;
7041 	case 0x3:
7042 		*tp_mdix_ctrl = ETH_TP_MDI_AUTO;
7043 		break;
7044 	default:
7045 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
7046 		break;
7047 	}
7048 
7049 	if (!is_resolved)
7050 		*tp_mdix = ETH_TP_MDI_INVALID;
7051 	else if (mdix)
7052 		*tp_mdix = ETH_TP_MDI_X;
7053 	else
7054 		*tp_mdix = ETH_TP_MDI;
7055 }
7056 
7057 static int hclge_init_client_instance(struct hnae3_client *client,
7058 				      struct hnae3_ae_dev *ae_dev)
7059 {
7060 	struct hclge_dev *hdev = ae_dev->priv;
7061 	struct hclge_vport *vport;
7062 	int i, ret;
7063 
7064 	for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
7065 		vport = &hdev->vport[i];
7066 
7067 		switch (client->type) {
7068 		case HNAE3_CLIENT_KNIC:
7069 
7070 			hdev->nic_client = client;
7071 			vport->nic.client = client;
7072 			ret = client->ops->init_instance(&vport->nic);
7073 			if (ret)
7074 				goto clear_nic;
7075 
7076 			hnae3_set_client_init_flag(client, ae_dev, 1);
7077 
7078 			if (hdev->roce_client &&
7079 			    hnae3_dev_roce_supported(hdev)) {
7080 				struct hnae3_client *rc = hdev->roce_client;
7081 
7082 				ret = hclge_init_roce_base_info(vport);
7083 				if (ret)
7084 					goto clear_roce;
7085 
7086 				ret = rc->ops->init_instance(&vport->roce);
7087 				if (ret)
7088 					goto clear_roce;
7089 
7090 				hnae3_set_client_init_flag(hdev->roce_client,
7091 							   ae_dev, 1);
7092 			}
7093 
7094 			break;
7095 		case HNAE3_CLIENT_UNIC:
7096 			hdev->nic_client = client;
7097 			vport->nic.client = client;
7098 
7099 			ret = client->ops->init_instance(&vport->nic);
7100 			if (ret)
7101 				goto clear_nic;
7102 
7103 			hnae3_set_client_init_flag(client, ae_dev, 1);
7104 
7105 			break;
7106 		case HNAE3_CLIENT_ROCE:
7107 			if (hnae3_dev_roce_supported(hdev)) {
7108 				hdev->roce_client = client;
7109 				vport->roce.client = client;
7110 			}
7111 
7112 			if (hdev->roce_client && hdev->nic_client) {
7113 				ret = hclge_init_roce_base_info(vport);
7114 				if (ret)
7115 					goto clear_roce;
7116 
7117 				ret = client->ops->init_instance(&vport->roce);
7118 				if (ret)
7119 					goto clear_roce;
7120 
7121 				hnae3_set_client_init_flag(client, ae_dev, 1);
7122 			}
7123 
7124 			break;
7125 		default:
7126 			return -EINVAL;
7127 		}
7128 	}
7129 
7130 	return 0;
7131 
7132 clear_nic:
7133 	hdev->nic_client = NULL;
7134 	vport->nic.client = NULL;
7135 	return ret;
7136 clear_roce:
7137 	hdev->roce_client = NULL;
7138 	vport->roce.client = NULL;
7139 	return ret;
7140 }
7141 
7142 static void hclge_uninit_client_instance(struct hnae3_client *client,
7143 					 struct hnae3_ae_dev *ae_dev)
7144 {
7145 	struct hclge_dev *hdev = ae_dev->priv;
7146 	struct hclge_vport *vport;
7147 	int i;
7148 
7149 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
7150 		vport = &hdev->vport[i];
7151 		if (hdev->roce_client) {
7152 			hdev->roce_client->ops->uninit_instance(&vport->roce,
7153 								0);
7154 			hdev->roce_client = NULL;
7155 			vport->roce.client = NULL;
7156 		}
7157 		if (client->type == HNAE3_CLIENT_ROCE)
7158 			return;
7159 		if (hdev->nic_client && client->ops->uninit_instance) {
7160 			client->ops->uninit_instance(&vport->nic, 0);
7161 			hdev->nic_client = NULL;
7162 			vport->nic.client = NULL;
7163 		}
7164 	}
7165 }
7166 
7167 static int hclge_pci_init(struct hclge_dev *hdev)
7168 {
7169 	struct pci_dev *pdev = hdev->pdev;
7170 	struct hclge_hw *hw;
7171 	int ret;
7172 
7173 	ret = pci_enable_device(pdev);
7174 	if (ret) {
7175 		dev_err(&pdev->dev, "failed to enable PCI device\n");
7176 		return ret;
7177 	}
7178 
7179 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
7180 	if (ret) {
7181 		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
7182 		if (ret) {
7183 			dev_err(&pdev->dev,
7184 				"can't set consistent PCI DMA");
7185 			goto err_disable_device;
7186 		}
7187 		dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
7188 	}
7189 
7190 	ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
7191 	if (ret) {
7192 		dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
7193 		goto err_disable_device;
7194 	}
7195 
7196 	pci_set_master(pdev);
7197 	hw = &hdev->hw;
7198 	hw->io_base = pcim_iomap(pdev, 2, 0);
7199 	if (!hw->io_base) {
7200 		dev_err(&pdev->dev, "Can't map configuration register space\n");
7201 		ret = -ENOMEM;
7202 		goto err_clr_master;
7203 	}
7204 
7205 	hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
7206 
7207 	return 0;
7208 err_clr_master:
7209 	pci_clear_master(pdev);
7210 	pci_release_regions(pdev);
7211 err_disable_device:
7212 	pci_disable_device(pdev);
7213 
7214 	return ret;
7215 }
7216 
7217 static void hclge_pci_uninit(struct hclge_dev *hdev)
7218 {
7219 	struct pci_dev *pdev = hdev->pdev;
7220 
7221 	pcim_iounmap(pdev, hdev->hw.io_base);
7222 	pci_free_irq_vectors(pdev);
7223 	pci_clear_master(pdev);
7224 	pci_release_mem_regions(pdev);
7225 	pci_disable_device(pdev);
7226 }
7227 
7228 static void hclge_state_init(struct hclge_dev *hdev)
7229 {
7230 	set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
7231 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
7232 	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
7233 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7234 	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
7235 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
7236 }
7237 
7238 static void hclge_state_uninit(struct hclge_dev *hdev)
7239 {
7240 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
7241 
7242 	if (hdev->service_timer.function)
7243 		del_timer_sync(&hdev->service_timer);
7244 	if (hdev->reset_timer.function)
7245 		del_timer_sync(&hdev->reset_timer);
7246 	if (hdev->service_task.func)
7247 		cancel_work_sync(&hdev->service_task);
7248 	if (hdev->rst_service_task.func)
7249 		cancel_work_sync(&hdev->rst_service_task);
7250 	if (hdev->mbx_service_task.func)
7251 		cancel_work_sync(&hdev->mbx_service_task);
7252 }
7253 
7254 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
7255 {
7256 #define HCLGE_FLR_WAIT_MS	100
7257 #define HCLGE_FLR_WAIT_CNT	50
7258 	struct hclge_dev *hdev = ae_dev->priv;
7259 	int cnt = 0;
7260 
7261 	clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
7262 	clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
7263 	set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
7264 	hclge_reset_event(hdev->pdev, NULL);
7265 
7266 	while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
7267 	       cnt++ < HCLGE_FLR_WAIT_CNT)
7268 		msleep(HCLGE_FLR_WAIT_MS);
7269 
7270 	if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
7271 		dev_err(&hdev->pdev->dev,
7272 			"flr wait down timeout: %d\n", cnt);
7273 }
7274 
7275 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
7276 {
7277 	struct hclge_dev *hdev = ae_dev->priv;
7278 
7279 	set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
7280 }
7281 
7282 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
7283 {
7284 	struct pci_dev *pdev = ae_dev->pdev;
7285 	struct hclge_dev *hdev;
7286 	int ret;
7287 
7288 	hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
7289 	if (!hdev) {
7290 		ret = -ENOMEM;
7291 		goto out;
7292 	}
7293 
7294 	hdev->pdev = pdev;
7295 	hdev->ae_dev = ae_dev;
7296 	hdev->reset_type = HNAE3_NONE_RESET;
7297 	hdev->reset_level = HNAE3_FUNC_RESET;
7298 	ae_dev->priv = hdev;
7299 	hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7300 
7301 	mutex_init(&hdev->vport_lock);
7302 
7303 	ret = hclge_pci_init(hdev);
7304 	if (ret) {
7305 		dev_err(&pdev->dev, "PCI init failed\n");
7306 		goto out;
7307 	}
7308 
7309 	/* Firmware command queue initialize */
7310 	ret = hclge_cmd_queue_init(hdev);
7311 	if (ret) {
7312 		dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
7313 		goto err_pci_uninit;
7314 	}
7315 
7316 	/* Firmware command initialize */
7317 	ret = hclge_cmd_init(hdev);
7318 	if (ret)
7319 		goto err_cmd_uninit;
7320 
7321 	ret = hclge_get_cap(hdev);
7322 	if (ret) {
7323 		dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
7324 			ret);
7325 		goto err_cmd_uninit;
7326 	}
7327 
7328 	ret = hclge_configure(hdev);
7329 	if (ret) {
7330 		dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
7331 		goto err_cmd_uninit;
7332 	}
7333 
7334 	ret = hclge_init_msi(hdev);
7335 	if (ret) {
7336 		dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
7337 		goto err_cmd_uninit;
7338 	}
7339 
7340 	ret = hclge_misc_irq_init(hdev);
7341 	if (ret) {
7342 		dev_err(&pdev->dev,
7343 			"Misc IRQ(vector0) init error, ret = %d.\n",
7344 			ret);
7345 		goto err_msi_uninit;
7346 	}
7347 
7348 	ret = hclge_alloc_tqps(hdev);
7349 	if (ret) {
7350 		dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
7351 		goto err_msi_irq_uninit;
7352 	}
7353 
7354 	ret = hclge_alloc_vport(hdev);
7355 	if (ret) {
7356 		dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
7357 		goto err_msi_irq_uninit;
7358 	}
7359 
7360 	ret = hclge_map_tqp(hdev);
7361 	if (ret) {
7362 		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
7363 		goto err_msi_irq_uninit;
7364 	}
7365 
7366 	if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
7367 		ret = hclge_mac_mdio_config(hdev);
7368 		if (ret) {
7369 			dev_err(&hdev->pdev->dev,
7370 				"mdio config fail ret=%d\n", ret);
7371 			goto err_msi_irq_uninit;
7372 		}
7373 	}
7374 
7375 	ret = hclge_init_umv_space(hdev);
7376 	if (ret) {
7377 		dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
7378 		goto err_mdiobus_unreg;
7379 	}
7380 
7381 	ret = hclge_mac_init(hdev);
7382 	if (ret) {
7383 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
7384 		goto err_mdiobus_unreg;
7385 	}
7386 
7387 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
7388 	if (ret) {
7389 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
7390 		goto err_mdiobus_unreg;
7391 	}
7392 
7393 	ret = hclge_config_gro(hdev, true);
7394 	if (ret)
7395 		goto err_mdiobus_unreg;
7396 
7397 	ret = hclge_init_vlan_config(hdev);
7398 	if (ret) {
7399 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
7400 		goto err_mdiobus_unreg;
7401 	}
7402 
7403 	ret = hclge_tm_schd_init(hdev);
7404 	if (ret) {
7405 		dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
7406 		goto err_mdiobus_unreg;
7407 	}
7408 
7409 	hclge_rss_init_cfg(hdev);
7410 	ret = hclge_rss_init_hw(hdev);
7411 	if (ret) {
7412 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
7413 		goto err_mdiobus_unreg;
7414 	}
7415 
7416 	ret = init_mgr_tbl(hdev);
7417 	if (ret) {
7418 		dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
7419 		goto err_mdiobus_unreg;
7420 	}
7421 
7422 	ret = hclge_init_fd_config(hdev);
7423 	if (ret) {
7424 		dev_err(&pdev->dev,
7425 			"fd table init fail, ret=%d\n", ret);
7426 		goto err_mdiobus_unreg;
7427 	}
7428 
7429 	ret = hclge_hw_error_set_state(hdev, true);
7430 	if (ret) {
7431 		dev_err(&pdev->dev,
7432 			"fail(%d) to enable hw error interrupts\n", ret);
7433 		goto err_mdiobus_unreg;
7434 	}
7435 
7436 	hclge_dcb_ops_set(hdev);
7437 
7438 	timer_setup(&hdev->service_timer, hclge_service_timer, 0);
7439 	timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
7440 	INIT_WORK(&hdev->service_task, hclge_service_task);
7441 	INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
7442 	INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
7443 
7444 	hclge_clear_all_event_cause(hdev);
7445 
7446 	/* Enable MISC vector(vector0) */
7447 	hclge_enable_vector(&hdev->misc_vector, true);
7448 
7449 	hclge_state_init(hdev);
7450 	hdev->last_reset_time = jiffies;
7451 
7452 	pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
7453 	return 0;
7454 
7455 err_mdiobus_unreg:
7456 	if (hdev->hw.mac.phydev)
7457 		mdiobus_unregister(hdev->hw.mac.mdio_bus);
7458 err_msi_irq_uninit:
7459 	hclge_misc_irq_uninit(hdev);
7460 err_msi_uninit:
7461 	pci_free_irq_vectors(pdev);
7462 err_cmd_uninit:
7463 	hclge_destroy_cmd_queue(&hdev->hw);
7464 err_pci_uninit:
7465 	pcim_iounmap(pdev, hdev->hw.io_base);
7466 	pci_clear_master(pdev);
7467 	pci_release_regions(pdev);
7468 	pci_disable_device(pdev);
7469 out:
7470 	return ret;
7471 }
7472 
7473 static void hclge_stats_clear(struct hclge_dev *hdev)
7474 {
7475 	memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
7476 }
7477 
7478 static void hclge_reset_vport_state(struct hclge_dev *hdev)
7479 {
7480 	struct hclge_vport *vport = hdev->vport;
7481 	int i;
7482 
7483 	for (i = 0; i < hdev->num_alloc_vport; i++) {
7484 		hclge_vport_start(vport);
7485 		vport++;
7486 	}
7487 }
7488 
7489 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
7490 {
7491 	struct hclge_dev *hdev = ae_dev->priv;
7492 	struct pci_dev *pdev = ae_dev->pdev;
7493 	int ret;
7494 
7495 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
7496 
7497 	hclge_stats_clear(hdev);
7498 	memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
7499 
7500 	ret = hclge_cmd_init(hdev);
7501 	if (ret) {
7502 		dev_err(&pdev->dev, "Cmd queue init failed\n");
7503 		return ret;
7504 	}
7505 
7506 	ret = hclge_map_tqp(hdev);
7507 	if (ret) {
7508 		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
7509 		return ret;
7510 	}
7511 
7512 	hclge_reset_umv_space(hdev);
7513 
7514 	ret = hclge_mac_init(hdev);
7515 	if (ret) {
7516 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
7517 		return ret;
7518 	}
7519 
7520 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
7521 	if (ret) {
7522 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
7523 		return ret;
7524 	}
7525 
7526 	ret = hclge_config_gro(hdev, true);
7527 	if (ret)
7528 		return ret;
7529 
7530 	ret = hclge_init_vlan_config(hdev);
7531 	if (ret) {
7532 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
7533 		return ret;
7534 	}
7535 
7536 	ret = hclge_tm_init_hw(hdev, true);
7537 	if (ret) {
7538 		dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
7539 		return ret;
7540 	}
7541 
7542 	ret = hclge_rss_init_hw(hdev);
7543 	if (ret) {
7544 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
7545 		return ret;
7546 	}
7547 
7548 	ret = hclge_init_fd_config(hdev);
7549 	if (ret) {
7550 		dev_err(&pdev->dev,
7551 			"fd table init fail, ret=%d\n", ret);
7552 		return ret;
7553 	}
7554 
7555 	/* Re-enable the hw error interrupts because
7556 	 * the interrupts get disabled on core/global reset.
7557 	 */
7558 	ret = hclge_hw_error_set_state(hdev, true);
7559 	if (ret) {
7560 		dev_err(&pdev->dev,
7561 			"fail(%d) to re-enable HNS hw error interrupts\n", ret);
7562 		return ret;
7563 	}
7564 
7565 	hclge_reset_vport_state(hdev);
7566 
7567 	dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
7568 		 HCLGE_DRIVER_NAME);
7569 
7570 	return 0;
7571 }
7572 
7573 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
7574 {
7575 	struct hclge_dev *hdev = ae_dev->priv;
7576 	struct hclge_mac *mac = &hdev->hw.mac;
7577 
7578 	hclge_state_uninit(hdev);
7579 
7580 	if (mac->phydev)
7581 		mdiobus_unregister(mac->mdio_bus);
7582 
7583 	hclge_uninit_umv_space(hdev);
7584 
7585 	/* Disable MISC vector(vector0) */
7586 	hclge_enable_vector(&hdev->misc_vector, false);
7587 	synchronize_irq(hdev->misc_vector.vector_irq);
7588 
7589 	hclge_hw_error_set_state(hdev, false);
7590 	hclge_destroy_cmd_queue(&hdev->hw);
7591 	hclge_misc_irq_uninit(hdev);
7592 	hclge_pci_uninit(hdev);
7593 	mutex_destroy(&hdev->vport_lock);
7594 	ae_dev->priv = NULL;
7595 }
7596 
7597 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
7598 {
7599 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
7600 	struct hclge_vport *vport = hclge_get_vport(handle);
7601 	struct hclge_dev *hdev = vport->back;
7602 
7603 	return min_t(u32, hdev->rss_size_max,
7604 		     vport->alloc_tqps / kinfo->num_tc);
7605 }
7606 
7607 static void hclge_get_channels(struct hnae3_handle *handle,
7608 			       struct ethtool_channels *ch)
7609 {
7610 	ch->max_combined = hclge_get_max_channels(handle);
7611 	ch->other_count = 1;
7612 	ch->max_other = 1;
7613 	ch->combined_count = handle->kinfo.rss_size;
7614 }
7615 
7616 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
7617 					u16 *alloc_tqps, u16 *max_rss_size)
7618 {
7619 	struct hclge_vport *vport = hclge_get_vport(handle);
7620 	struct hclge_dev *hdev = vport->back;
7621 
7622 	*alloc_tqps = vport->alloc_tqps;
7623 	*max_rss_size = hdev->rss_size_max;
7624 }
7625 
7626 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
7627 			      bool rxfh_configured)
7628 {
7629 	struct hclge_vport *vport = hclge_get_vport(handle);
7630 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
7631 	struct hclge_dev *hdev = vport->back;
7632 	int cur_rss_size = kinfo->rss_size;
7633 	int cur_tqps = kinfo->num_tqps;
7634 	u16 tc_offset[HCLGE_MAX_TC_NUM];
7635 	u16 tc_valid[HCLGE_MAX_TC_NUM];
7636 	u16 tc_size[HCLGE_MAX_TC_NUM];
7637 	u16 roundup_size;
7638 	u32 *rss_indir;
7639 	int ret, i;
7640 
7641 	kinfo->req_rss_size = new_tqps_num;
7642 
7643 	ret = hclge_tm_vport_map_update(hdev);
7644 	if (ret) {
7645 		dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
7646 		return ret;
7647 	}
7648 
7649 	roundup_size = roundup_pow_of_two(kinfo->rss_size);
7650 	roundup_size = ilog2(roundup_size);
7651 	/* Set the RSS TC mode according to the new RSS size */
7652 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
7653 		tc_valid[i] = 0;
7654 
7655 		if (!(hdev->hw_tc_map & BIT(i)))
7656 			continue;
7657 
7658 		tc_valid[i] = 1;
7659 		tc_size[i] = roundup_size;
7660 		tc_offset[i] = kinfo->rss_size * i;
7661 	}
7662 	ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
7663 	if (ret)
7664 		return ret;
7665 
7666 	/* RSS indirection table has been configuared by user */
7667 	if (rxfh_configured)
7668 		goto out;
7669 
7670 	/* Reinitializes the rss indirect table according to the new RSS size */
7671 	rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
7672 	if (!rss_indir)
7673 		return -ENOMEM;
7674 
7675 	for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
7676 		rss_indir[i] = i % kinfo->rss_size;
7677 
7678 	ret = hclge_set_rss(handle, rss_indir, NULL, 0);
7679 	if (ret)
7680 		dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
7681 			ret);
7682 
7683 	kfree(rss_indir);
7684 
7685 out:
7686 	if (!ret)
7687 		dev_info(&hdev->pdev->dev,
7688 			 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
7689 			 cur_rss_size, kinfo->rss_size,
7690 			 cur_tqps, kinfo->rss_size * kinfo->num_tc);
7691 
7692 	return ret;
7693 }
7694 
7695 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
7696 			      u32 *regs_num_64_bit)
7697 {
7698 	struct hclge_desc desc;
7699 	u32 total_num;
7700 	int ret;
7701 
7702 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
7703 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7704 	if (ret) {
7705 		dev_err(&hdev->pdev->dev,
7706 			"Query register number cmd failed, ret = %d.\n", ret);
7707 		return ret;
7708 	}
7709 
7710 	*regs_num_32_bit = le32_to_cpu(desc.data[0]);
7711 	*regs_num_64_bit = le32_to_cpu(desc.data[1]);
7712 
7713 	total_num = *regs_num_32_bit + *regs_num_64_bit;
7714 	if (!total_num)
7715 		return -EINVAL;
7716 
7717 	return 0;
7718 }
7719 
7720 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
7721 				 void *data)
7722 {
7723 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
7724 
7725 	struct hclge_desc *desc;
7726 	u32 *reg_val = data;
7727 	__le32 *desc_data;
7728 	int cmd_num;
7729 	int i, k, n;
7730 	int ret;
7731 
7732 	if (regs_num == 0)
7733 		return 0;
7734 
7735 	cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM);
7736 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
7737 	if (!desc)
7738 		return -ENOMEM;
7739 
7740 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
7741 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
7742 	if (ret) {
7743 		dev_err(&hdev->pdev->dev,
7744 			"Query 32 bit register cmd failed, ret = %d.\n", ret);
7745 		kfree(desc);
7746 		return ret;
7747 	}
7748 
7749 	for (i = 0; i < cmd_num; i++) {
7750 		if (i == 0) {
7751 			desc_data = (__le32 *)(&desc[i].data[0]);
7752 			n = HCLGE_32_BIT_REG_RTN_DATANUM - 2;
7753 		} else {
7754 			desc_data = (__le32 *)(&desc[i]);
7755 			n = HCLGE_32_BIT_REG_RTN_DATANUM;
7756 		}
7757 		for (k = 0; k < n; k++) {
7758 			*reg_val++ = le32_to_cpu(*desc_data++);
7759 
7760 			regs_num--;
7761 			if (!regs_num)
7762 				break;
7763 		}
7764 	}
7765 
7766 	kfree(desc);
7767 	return 0;
7768 }
7769 
7770 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
7771 				 void *data)
7772 {
7773 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
7774 
7775 	struct hclge_desc *desc;
7776 	u64 *reg_val = data;
7777 	__le64 *desc_data;
7778 	int cmd_num;
7779 	int i, k, n;
7780 	int ret;
7781 
7782 	if (regs_num == 0)
7783 		return 0;
7784 
7785 	cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM);
7786 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
7787 	if (!desc)
7788 		return -ENOMEM;
7789 
7790 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
7791 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
7792 	if (ret) {
7793 		dev_err(&hdev->pdev->dev,
7794 			"Query 64 bit register cmd failed, ret = %d.\n", ret);
7795 		kfree(desc);
7796 		return ret;
7797 	}
7798 
7799 	for (i = 0; i < cmd_num; i++) {
7800 		if (i == 0) {
7801 			desc_data = (__le64 *)(&desc[i].data[0]);
7802 			n = HCLGE_64_BIT_REG_RTN_DATANUM - 1;
7803 		} else {
7804 			desc_data = (__le64 *)(&desc[i]);
7805 			n = HCLGE_64_BIT_REG_RTN_DATANUM;
7806 		}
7807 		for (k = 0; k < n; k++) {
7808 			*reg_val++ = le64_to_cpu(*desc_data++);
7809 
7810 			regs_num--;
7811 			if (!regs_num)
7812 				break;
7813 		}
7814 	}
7815 
7816 	kfree(desc);
7817 	return 0;
7818 }
7819 
7820 #define MAX_SEPARATE_NUM	4
7821 #define SEPARATOR_VALUE		0xFFFFFFFF
7822 #define REG_NUM_PER_LINE	4
7823 #define REG_LEN_PER_LINE	(REG_NUM_PER_LINE * sizeof(u32))
7824 
7825 static int hclge_get_regs_len(struct hnae3_handle *handle)
7826 {
7827 	int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
7828 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
7829 	struct hclge_vport *vport = hclge_get_vport(handle);
7830 	struct hclge_dev *hdev = vport->back;
7831 	u32 regs_num_32_bit, regs_num_64_bit;
7832 	int ret;
7833 
7834 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
7835 	if (ret) {
7836 		dev_err(&hdev->pdev->dev,
7837 			"Get register number failed, ret = %d.\n", ret);
7838 		return -EOPNOTSUPP;
7839 	}
7840 
7841 	cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
7842 	common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
7843 	ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
7844 	tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
7845 
7846 	return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
7847 		tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
7848 		regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
7849 }
7850 
7851 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
7852 			   void *data)
7853 {
7854 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
7855 	struct hclge_vport *vport = hclge_get_vport(handle);
7856 	struct hclge_dev *hdev = vport->back;
7857 	u32 regs_num_32_bit, regs_num_64_bit;
7858 	int i, j, reg_um, separator_num;
7859 	u32 *reg = data;
7860 	int ret;
7861 
7862 	*version = hdev->fw_version;
7863 
7864 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
7865 	if (ret) {
7866 		dev_err(&hdev->pdev->dev,
7867 			"Get register number failed, ret = %d.\n", ret);
7868 		return;
7869 	}
7870 
7871 	/* fetching per-PF registers valus from PF PCIe register space */
7872 	reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
7873 	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
7874 	for (i = 0; i < reg_um; i++)
7875 		*reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
7876 	for (i = 0; i < separator_num; i++)
7877 		*reg++ = SEPARATOR_VALUE;
7878 
7879 	reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
7880 	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
7881 	for (i = 0; i < reg_um; i++)
7882 		*reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
7883 	for (i = 0; i < separator_num; i++)
7884 		*reg++ = SEPARATOR_VALUE;
7885 
7886 	reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
7887 	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
7888 	for (j = 0; j < kinfo->num_tqps; j++) {
7889 		for (i = 0; i < reg_um; i++)
7890 			*reg++ = hclge_read_dev(&hdev->hw,
7891 						ring_reg_addr_list[i] +
7892 						0x200 * j);
7893 		for (i = 0; i < separator_num; i++)
7894 			*reg++ = SEPARATOR_VALUE;
7895 	}
7896 
7897 	reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
7898 	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
7899 	for (j = 0; j < hdev->num_msi_used - 1; j++) {
7900 		for (i = 0; i < reg_um; i++)
7901 			*reg++ = hclge_read_dev(&hdev->hw,
7902 						tqp_intr_reg_addr_list[i] +
7903 						4 * j);
7904 		for (i = 0; i < separator_num; i++)
7905 			*reg++ = SEPARATOR_VALUE;
7906 	}
7907 
7908 	/* fetching PF common registers values from firmware */
7909 	ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
7910 	if (ret) {
7911 		dev_err(&hdev->pdev->dev,
7912 			"Get 32 bit register failed, ret = %d.\n", ret);
7913 		return;
7914 	}
7915 
7916 	reg += regs_num_32_bit;
7917 	ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
7918 	if (ret)
7919 		dev_err(&hdev->pdev->dev,
7920 			"Get 64 bit register failed, ret = %d.\n", ret);
7921 }
7922 
7923 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
7924 {
7925 	struct hclge_set_led_state_cmd *req;
7926 	struct hclge_desc desc;
7927 	int ret;
7928 
7929 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
7930 
7931 	req = (struct hclge_set_led_state_cmd *)desc.data;
7932 	hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
7933 			HCLGE_LED_LOCATE_STATE_S, locate_led_status);
7934 
7935 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7936 	if (ret)
7937 		dev_err(&hdev->pdev->dev,
7938 			"Send set led state cmd error, ret =%d\n", ret);
7939 
7940 	return ret;
7941 }
7942 
7943 enum hclge_led_status {
7944 	HCLGE_LED_OFF,
7945 	HCLGE_LED_ON,
7946 	HCLGE_LED_NO_CHANGE = 0xFF,
7947 };
7948 
7949 static int hclge_set_led_id(struct hnae3_handle *handle,
7950 			    enum ethtool_phys_id_state status)
7951 {
7952 	struct hclge_vport *vport = hclge_get_vport(handle);
7953 	struct hclge_dev *hdev = vport->back;
7954 
7955 	switch (status) {
7956 	case ETHTOOL_ID_ACTIVE:
7957 		return hclge_set_led_status(hdev, HCLGE_LED_ON);
7958 	case ETHTOOL_ID_INACTIVE:
7959 		return hclge_set_led_status(hdev, HCLGE_LED_OFF);
7960 	default:
7961 		return -EINVAL;
7962 	}
7963 }
7964 
7965 static void hclge_get_link_mode(struct hnae3_handle *handle,
7966 				unsigned long *supported,
7967 				unsigned long *advertising)
7968 {
7969 	unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
7970 	struct hclge_vport *vport = hclge_get_vport(handle);
7971 	struct hclge_dev *hdev = vport->back;
7972 	unsigned int idx = 0;
7973 
7974 	for (; idx < size; idx++) {
7975 		supported[idx] = hdev->hw.mac.supported[idx];
7976 		advertising[idx] = hdev->hw.mac.advertising[idx];
7977 	}
7978 }
7979 
7980 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
7981 {
7982 	struct hclge_vport *vport = hclge_get_vport(handle);
7983 	struct hclge_dev *hdev = vport->back;
7984 
7985 	return hclge_config_gro(hdev, enable);
7986 }
7987 
7988 static const struct hnae3_ae_ops hclge_ops = {
7989 	.init_ae_dev = hclge_init_ae_dev,
7990 	.uninit_ae_dev = hclge_uninit_ae_dev,
7991 	.flr_prepare = hclge_flr_prepare,
7992 	.flr_done = hclge_flr_done,
7993 	.init_client_instance = hclge_init_client_instance,
7994 	.uninit_client_instance = hclge_uninit_client_instance,
7995 	.map_ring_to_vector = hclge_map_ring_to_vector,
7996 	.unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
7997 	.get_vector = hclge_get_vector,
7998 	.put_vector = hclge_put_vector,
7999 	.set_promisc_mode = hclge_set_promisc_mode,
8000 	.set_loopback = hclge_set_loopback,
8001 	.start = hclge_ae_start,
8002 	.stop = hclge_ae_stop,
8003 	.client_start = hclge_client_start,
8004 	.client_stop = hclge_client_stop,
8005 	.get_status = hclge_get_status,
8006 	.get_ksettings_an_result = hclge_get_ksettings_an_result,
8007 	.update_speed_duplex_h = hclge_update_speed_duplex_h,
8008 	.cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
8009 	.get_media_type = hclge_get_media_type,
8010 	.get_rss_key_size = hclge_get_rss_key_size,
8011 	.get_rss_indir_size = hclge_get_rss_indir_size,
8012 	.get_rss = hclge_get_rss,
8013 	.set_rss = hclge_set_rss,
8014 	.set_rss_tuple = hclge_set_rss_tuple,
8015 	.get_rss_tuple = hclge_get_rss_tuple,
8016 	.get_tc_size = hclge_get_tc_size,
8017 	.get_mac_addr = hclge_get_mac_addr,
8018 	.set_mac_addr = hclge_set_mac_addr,
8019 	.do_ioctl = hclge_do_ioctl,
8020 	.add_uc_addr = hclge_add_uc_addr,
8021 	.rm_uc_addr = hclge_rm_uc_addr,
8022 	.add_mc_addr = hclge_add_mc_addr,
8023 	.rm_mc_addr = hclge_rm_mc_addr,
8024 	.set_autoneg = hclge_set_autoneg,
8025 	.get_autoneg = hclge_get_autoneg,
8026 	.get_pauseparam = hclge_get_pauseparam,
8027 	.set_pauseparam = hclge_set_pauseparam,
8028 	.set_mtu = hclge_set_mtu,
8029 	.reset_queue = hclge_reset_tqp,
8030 	.get_stats = hclge_get_stats,
8031 	.update_stats = hclge_update_stats,
8032 	.get_strings = hclge_get_strings,
8033 	.get_sset_count = hclge_get_sset_count,
8034 	.get_fw_version = hclge_get_fw_version,
8035 	.get_mdix_mode = hclge_get_mdix_mode,
8036 	.enable_vlan_filter = hclge_enable_vlan_filter,
8037 	.set_vlan_filter = hclge_set_vlan_filter,
8038 	.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
8039 	.enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
8040 	.reset_event = hclge_reset_event,
8041 	.set_default_reset_request = hclge_set_def_reset_request,
8042 	.get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
8043 	.set_channels = hclge_set_channels,
8044 	.get_channels = hclge_get_channels,
8045 	.get_regs_len = hclge_get_regs_len,
8046 	.get_regs = hclge_get_regs,
8047 	.set_led_id = hclge_set_led_id,
8048 	.get_link_mode = hclge_get_link_mode,
8049 	.add_fd_entry = hclge_add_fd_entry,
8050 	.del_fd_entry = hclge_del_fd_entry,
8051 	.del_all_fd_entries = hclge_del_all_fd_entries,
8052 	.get_fd_rule_cnt = hclge_get_fd_rule_cnt,
8053 	.get_fd_rule_info = hclge_get_fd_rule_info,
8054 	.get_fd_all_rules = hclge_get_all_rules,
8055 	.restore_fd_rules = hclge_restore_fd_entries,
8056 	.enable_fd = hclge_enable_fd,
8057 	.dbg_run_cmd = hclge_dbg_run_cmd,
8058 	.handle_hw_ras_error = hclge_handle_hw_ras_error,
8059 	.get_hw_reset_stat = hclge_get_hw_reset_stat,
8060 	.ae_dev_resetting = hclge_ae_dev_resetting,
8061 	.ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
8062 	.set_gro_en = hclge_gro_en,
8063 	.get_global_queue_id = hclge_covert_handle_qid_global,
8064 	.set_timer_task = hclge_set_timer_task,
8065 	.mac_connect_phy = hclge_mac_connect_phy,
8066 	.mac_disconnect_phy = hclge_mac_disconnect_phy,
8067 };
8068 
8069 static struct hnae3_ae_algo ae_algo = {
8070 	.ops = &hclge_ops,
8071 	.pdev_id_table = ae_algo_pci_tbl,
8072 };
8073 
8074 static int hclge_init(void)
8075 {
8076 	pr_info("%s is initializing\n", HCLGE_NAME);
8077 
8078 	hnae3_register_ae_algo(&ae_algo);
8079 
8080 	return 0;
8081 }
8082 
8083 static void hclge_exit(void)
8084 {
8085 	hnae3_unregister_ae_algo(&ae_algo);
8086 }
8087 module_init(hclge_init);
8088 module_exit(hclge_exit);
8089 
8090 MODULE_LICENSE("GPL");
8091 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
8092 MODULE_DESCRIPTION("HCLGE Driver");
8093 MODULE_VERSION(HCLGE_MOD_VERSION);
8094