xref: /linux/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c (revision 27f54b582567bef2bfb9ee6f23aed6137cf9cfcb)
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
22 #include "hclge_tm.h"
23 #include "hclge_err.h"
24 #include "hnae3.h"
25 
26 #define HCLGE_NAME			"hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29 
30 #define HCLGE_BUF_SIZE_UNIT	256
31 
32 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
33 static int hclge_init_vlan_config(struct hclge_dev *hdev);
34 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
35 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
36 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
37 			       u16 *allocated_size, bool is_alloc);
38 
39 static struct hnae3_ae_algo ae_algo;
40 
41 static const struct pci_device_id ae_algo_pci_tbl[] = {
42 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
43 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
44 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
45 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
46 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
47 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
48 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
49 	/* required last entry */
50 	{0, }
51 };
52 
53 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
54 
55 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
56 					 HCLGE_CMDQ_TX_ADDR_H_REG,
57 					 HCLGE_CMDQ_TX_DEPTH_REG,
58 					 HCLGE_CMDQ_TX_TAIL_REG,
59 					 HCLGE_CMDQ_TX_HEAD_REG,
60 					 HCLGE_CMDQ_RX_ADDR_L_REG,
61 					 HCLGE_CMDQ_RX_ADDR_H_REG,
62 					 HCLGE_CMDQ_RX_DEPTH_REG,
63 					 HCLGE_CMDQ_RX_TAIL_REG,
64 					 HCLGE_CMDQ_RX_HEAD_REG,
65 					 HCLGE_VECTOR0_CMDQ_SRC_REG,
66 					 HCLGE_CMDQ_INTR_STS_REG,
67 					 HCLGE_CMDQ_INTR_EN_REG,
68 					 HCLGE_CMDQ_INTR_GEN_REG};
69 
70 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
71 					   HCLGE_VECTOR0_OTER_EN_REG,
72 					   HCLGE_MISC_RESET_STS_REG,
73 					   HCLGE_MISC_VECTOR_INT_STS,
74 					   HCLGE_GLOBAL_RESET_REG,
75 					   HCLGE_FUN_RST_ING,
76 					   HCLGE_GRO_EN_REG};
77 
78 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
79 					 HCLGE_RING_RX_ADDR_H_REG,
80 					 HCLGE_RING_RX_BD_NUM_REG,
81 					 HCLGE_RING_RX_BD_LENGTH_REG,
82 					 HCLGE_RING_RX_MERGE_EN_REG,
83 					 HCLGE_RING_RX_TAIL_REG,
84 					 HCLGE_RING_RX_HEAD_REG,
85 					 HCLGE_RING_RX_FBD_NUM_REG,
86 					 HCLGE_RING_RX_OFFSET_REG,
87 					 HCLGE_RING_RX_FBD_OFFSET_REG,
88 					 HCLGE_RING_RX_STASH_REG,
89 					 HCLGE_RING_RX_BD_ERR_REG,
90 					 HCLGE_RING_TX_ADDR_L_REG,
91 					 HCLGE_RING_TX_ADDR_H_REG,
92 					 HCLGE_RING_TX_BD_NUM_REG,
93 					 HCLGE_RING_TX_PRIORITY_REG,
94 					 HCLGE_RING_TX_TC_REG,
95 					 HCLGE_RING_TX_MERGE_EN_REG,
96 					 HCLGE_RING_TX_TAIL_REG,
97 					 HCLGE_RING_TX_HEAD_REG,
98 					 HCLGE_RING_TX_FBD_NUM_REG,
99 					 HCLGE_RING_TX_OFFSET_REG,
100 					 HCLGE_RING_TX_EBD_NUM_REG,
101 					 HCLGE_RING_TX_EBD_OFFSET_REG,
102 					 HCLGE_RING_TX_BD_ERR_REG,
103 					 HCLGE_RING_EN_REG};
104 
105 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
106 					     HCLGE_TQP_INTR_GL0_REG,
107 					     HCLGE_TQP_INTR_GL1_REG,
108 					     HCLGE_TQP_INTR_GL2_REG,
109 					     HCLGE_TQP_INTR_RL_REG};
110 
111 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
112 	"App    Loopback test",
113 	"Serdes serial Loopback test",
114 	"Serdes parallel Loopback test",
115 	"Phy    Loopback test"
116 };
117 
118 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
119 	{"mac_tx_mac_pause_num",
120 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
121 	{"mac_rx_mac_pause_num",
122 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
123 	{"mac_tx_control_pkt_num",
124 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
125 	{"mac_rx_control_pkt_num",
126 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
127 	{"mac_tx_pfc_pkt_num",
128 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
129 	{"mac_tx_pfc_pri0_pkt_num",
130 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
131 	{"mac_tx_pfc_pri1_pkt_num",
132 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
133 	{"mac_tx_pfc_pri2_pkt_num",
134 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
135 	{"mac_tx_pfc_pri3_pkt_num",
136 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
137 	{"mac_tx_pfc_pri4_pkt_num",
138 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
139 	{"mac_tx_pfc_pri5_pkt_num",
140 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
141 	{"mac_tx_pfc_pri6_pkt_num",
142 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
143 	{"mac_tx_pfc_pri7_pkt_num",
144 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
145 	{"mac_rx_pfc_pkt_num",
146 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
147 	{"mac_rx_pfc_pri0_pkt_num",
148 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
149 	{"mac_rx_pfc_pri1_pkt_num",
150 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
151 	{"mac_rx_pfc_pri2_pkt_num",
152 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
153 	{"mac_rx_pfc_pri3_pkt_num",
154 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
155 	{"mac_rx_pfc_pri4_pkt_num",
156 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
157 	{"mac_rx_pfc_pri5_pkt_num",
158 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
159 	{"mac_rx_pfc_pri6_pkt_num",
160 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
161 	{"mac_rx_pfc_pri7_pkt_num",
162 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
163 	{"mac_tx_total_pkt_num",
164 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
165 	{"mac_tx_total_oct_num",
166 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
167 	{"mac_tx_good_pkt_num",
168 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
169 	{"mac_tx_bad_pkt_num",
170 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
171 	{"mac_tx_good_oct_num",
172 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
173 	{"mac_tx_bad_oct_num",
174 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
175 	{"mac_tx_uni_pkt_num",
176 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
177 	{"mac_tx_multi_pkt_num",
178 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
179 	{"mac_tx_broad_pkt_num",
180 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
181 	{"mac_tx_undersize_pkt_num",
182 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
183 	{"mac_tx_oversize_pkt_num",
184 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
185 	{"mac_tx_64_oct_pkt_num",
186 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
187 	{"mac_tx_65_127_oct_pkt_num",
188 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
189 	{"mac_tx_128_255_oct_pkt_num",
190 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
191 	{"mac_tx_256_511_oct_pkt_num",
192 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
193 	{"mac_tx_512_1023_oct_pkt_num",
194 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
195 	{"mac_tx_1024_1518_oct_pkt_num",
196 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
197 	{"mac_tx_1519_2047_oct_pkt_num",
198 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
199 	{"mac_tx_2048_4095_oct_pkt_num",
200 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
201 	{"mac_tx_4096_8191_oct_pkt_num",
202 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
203 	{"mac_tx_8192_9216_oct_pkt_num",
204 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
205 	{"mac_tx_9217_12287_oct_pkt_num",
206 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
207 	{"mac_tx_12288_16383_oct_pkt_num",
208 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
209 	{"mac_tx_1519_max_good_pkt_num",
210 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
211 	{"mac_tx_1519_max_bad_pkt_num",
212 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
213 	{"mac_rx_total_pkt_num",
214 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
215 	{"mac_rx_total_oct_num",
216 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
217 	{"mac_rx_good_pkt_num",
218 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
219 	{"mac_rx_bad_pkt_num",
220 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
221 	{"mac_rx_good_oct_num",
222 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
223 	{"mac_rx_bad_oct_num",
224 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
225 	{"mac_rx_uni_pkt_num",
226 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
227 	{"mac_rx_multi_pkt_num",
228 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
229 	{"mac_rx_broad_pkt_num",
230 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
231 	{"mac_rx_undersize_pkt_num",
232 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
233 	{"mac_rx_oversize_pkt_num",
234 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
235 	{"mac_rx_64_oct_pkt_num",
236 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
237 	{"mac_rx_65_127_oct_pkt_num",
238 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
239 	{"mac_rx_128_255_oct_pkt_num",
240 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
241 	{"mac_rx_256_511_oct_pkt_num",
242 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
243 	{"mac_rx_512_1023_oct_pkt_num",
244 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
245 	{"mac_rx_1024_1518_oct_pkt_num",
246 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
247 	{"mac_rx_1519_2047_oct_pkt_num",
248 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
249 	{"mac_rx_2048_4095_oct_pkt_num",
250 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
251 	{"mac_rx_4096_8191_oct_pkt_num",
252 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
253 	{"mac_rx_8192_9216_oct_pkt_num",
254 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
255 	{"mac_rx_9217_12287_oct_pkt_num",
256 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
257 	{"mac_rx_12288_16383_oct_pkt_num",
258 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
259 	{"mac_rx_1519_max_good_pkt_num",
260 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
261 	{"mac_rx_1519_max_bad_pkt_num",
262 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
263 
264 	{"mac_tx_fragment_pkt_num",
265 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
266 	{"mac_tx_undermin_pkt_num",
267 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
268 	{"mac_tx_jabber_pkt_num",
269 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
270 	{"mac_tx_err_all_pkt_num",
271 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
272 	{"mac_tx_from_app_good_pkt_num",
273 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
274 	{"mac_tx_from_app_bad_pkt_num",
275 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
276 	{"mac_rx_fragment_pkt_num",
277 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
278 	{"mac_rx_undermin_pkt_num",
279 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
280 	{"mac_rx_jabber_pkt_num",
281 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
282 	{"mac_rx_fcs_err_pkt_num",
283 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
284 	{"mac_rx_send_app_good_pkt_num",
285 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
286 	{"mac_rx_send_app_bad_pkt_num",
287 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
288 };
289 
290 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
291 	{
292 		.flags = HCLGE_MAC_MGR_MASK_VLAN_B,
293 		.ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP),
294 		.mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
295 		.mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
296 		.i_port_bitmap = 0x1,
297 	},
298 };
299 
300 static const u8 hclge_hash_key[] = {
301 	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
302 	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
303 	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
304 	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
305 	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
306 };
307 
308 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
309 {
310 #define HCLGE_MAC_CMD_NUM 21
311 
312 	u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
313 	struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
314 	__le64 *desc_data;
315 	int i, k, n;
316 	int ret;
317 
318 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
319 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
320 	if (ret) {
321 		dev_err(&hdev->pdev->dev,
322 			"Get MAC pkt stats fail, status = %d.\n", ret);
323 
324 		return ret;
325 	}
326 
327 	for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
328 		/* for special opcode 0032, only the first desc has the head */
329 		if (unlikely(i == 0)) {
330 			desc_data = (__le64 *)(&desc[i].data[0]);
331 			n = HCLGE_RD_FIRST_STATS_NUM;
332 		} else {
333 			desc_data = (__le64 *)(&desc[i]);
334 			n = HCLGE_RD_OTHER_STATS_NUM;
335 		}
336 
337 		for (k = 0; k < n; k++) {
338 			*data += le64_to_cpu(*desc_data);
339 			data++;
340 			desc_data++;
341 		}
342 	}
343 
344 	return 0;
345 }
346 
347 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
348 {
349 	u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
350 	struct hclge_desc *desc;
351 	__le64 *desc_data;
352 	u16 i, k, n;
353 	int ret;
354 
355 	desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL);
356 	if (!desc)
357 		return -ENOMEM;
358 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
359 	ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
360 	if (ret) {
361 		kfree(desc);
362 		return ret;
363 	}
364 
365 	for (i = 0; i < desc_num; i++) {
366 		/* for special opcode 0034, only the first desc has the head */
367 		if (i == 0) {
368 			desc_data = (__le64 *)(&desc[i].data[0]);
369 			n = HCLGE_RD_FIRST_STATS_NUM;
370 		} else {
371 			desc_data = (__le64 *)(&desc[i]);
372 			n = HCLGE_RD_OTHER_STATS_NUM;
373 		}
374 
375 		for (k = 0; k < n; k++) {
376 			*data += le64_to_cpu(*desc_data);
377 			data++;
378 			desc_data++;
379 		}
380 	}
381 
382 	kfree(desc);
383 
384 	return 0;
385 }
386 
387 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
388 {
389 	struct hclge_desc desc;
390 	__le32 *desc_data;
391 	u32 reg_num;
392 	int ret;
393 
394 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
395 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
396 	if (ret)
397 		return ret;
398 
399 	desc_data = (__le32 *)(&desc.data[0]);
400 	reg_num = le32_to_cpu(*desc_data);
401 
402 	*desc_num = 1 + ((reg_num - 3) >> 2) +
403 		    (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
404 
405 	return 0;
406 }
407 
408 static int hclge_mac_update_stats(struct hclge_dev *hdev)
409 {
410 	u32 desc_num;
411 	int ret;
412 
413 	ret = hclge_mac_query_reg_num(hdev, &desc_num);
414 
415 	/* The firmware supports the new statistics acquisition method */
416 	if (!ret)
417 		ret = hclge_mac_update_stats_complete(hdev, desc_num);
418 	else if (ret == -EOPNOTSUPP)
419 		ret = hclge_mac_update_stats_defective(hdev);
420 	else
421 		dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
422 
423 	return ret;
424 }
425 
426 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
427 {
428 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
429 	struct hclge_vport *vport = hclge_get_vport(handle);
430 	struct hclge_dev *hdev = vport->back;
431 	struct hnae3_queue *queue;
432 	struct hclge_desc desc[1];
433 	struct hclge_tqp *tqp;
434 	int ret, i;
435 
436 	for (i = 0; i < kinfo->num_tqps; i++) {
437 		queue = handle->kinfo.tqp[i];
438 		tqp = container_of(queue, struct hclge_tqp, q);
439 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
440 		hclge_cmd_setup_basic_desc(&desc[0],
441 					   HCLGE_OPC_QUERY_RX_STATUS,
442 					   true);
443 
444 		desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
445 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
446 		if (ret) {
447 			dev_err(&hdev->pdev->dev,
448 				"Query tqp stat fail, status = %d,queue = %d\n",
449 				ret,	i);
450 			return ret;
451 		}
452 		tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
453 			le32_to_cpu(desc[0].data[1]);
454 	}
455 
456 	for (i = 0; i < kinfo->num_tqps; i++) {
457 		queue = handle->kinfo.tqp[i];
458 		tqp = container_of(queue, struct hclge_tqp, q);
459 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
460 		hclge_cmd_setup_basic_desc(&desc[0],
461 					   HCLGE_OPC_QUERY_TX_STATUS,
462 					   true);
463 
464 		desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
465 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
466 		if (ret) {
467 			dev_err(&hdev->pdev->dev,
468 				"Query tqp stat fail, status = %d,queue = %d\n",
469 				ret, i);
470 			return ret;
471 		}
472 		tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
473 			le32_to_cpu(desc[0].data[1]);
474 	}
475 
476 	return 0;
477 }
478 
479 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
480 {
481 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
482 	struct hclge_tqp *tqp;
483 	u64 *buff = data;
484 	int i;
485 
486 	for (i = 0; i < kinfo->num_tqps; i++) {
487 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
488 		*buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
489 	}
490 
491 	for (i = 0; i < kinfo->num_tqps; i++) {
492 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
493 		*buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
494 	}
495 
496 	return buff;
497 }
498 
499 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
500 {
501 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
502 
503 	return kinfo->num_tqps * (2);
504 }
505 
506 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
507 {
508 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
509 	u8 *buff = data;
510 	int i = 0;
511 
512 	for (i = 0; i < kinfo->num_tqps; i++) {
513 		struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
514 			struct hclge_tqp, q);
515 		snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
516 			 tqp->index);
517 		buff = buff + ETH_GSTRING_LEN;
518 	}
519 
520 	for (i = 0; i < kinfo->num_tqps; i++) {
521 		struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
522 			struct hclge_tqp, q);
523 		snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
524 			 tqp->index);
525 		buff = buff + ETH_GSTRING_LEN;
526 	}
527 
528 	return buff;
529 }
530 
531 static u64 *hclge_comm_get_stats(void *comm_stats,
532 				 const struct hclge_comm_stats_str strs[],
533 				 int size, u64 *data)
534 {
535 	u64 *buf = data;
536 	u32 i;
537 
538 	for (i = 0; i < size; i++)
539 		buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
540 
541 	return buf + size;
542 }
543 
544 static u8 *hclge_comm_get_strings(u32 stringset,
545 				  const struct hclge_comm_stats_str strs[],
546 				  int size, u8 *data)
547 {
548 	char *buff = (char *)data;
549 	u32 i;
550 
551 	if (stringset != ETH_SS_STATS)
552 		return buff;
553 
554 	for (i = 0; i < size; i++) {
555 		snprintf(buff, ETH_GSTRING_LEN,
556 			 strs[i].desc);
557 		buff = buff + ETH_GSTRING_LEN;
558 	}
559 
560 	return (u8 *)buff;
561 }
562 
563 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
564 {
565 	struct hnae3_handle *handle;
566 	int status;
567 
568 	handle = &hdev->vport[0].nic;
569 	if (handle->client) {
570 		status = hclge_tqps_update_stats(handle);
571 		if (status) {
572 			dev_err(&hdev->pdev->dev,
573 				"Update TQPS stats fail, status = %d.\n",
574 				status);
575 		}
576 	}
577 
578 	status = hclge_mac_update_stats(hdev);
579 	if (status)
580 		dev_err(&hdev->pdev->dev,
581 			"Update MAC stats fail, status = %d.\n", status);
582 }
583 
584 static void hclge_update_stats(struct hnae3_handle *handle,
585 			       struct net_device_stats *net_stats)
586 {
587 	struct hclge_vport *vport = hclge_get_vport(handle);
588 	struct hclge_dev *hdev = vport->back;
589 	int status;
590 
591 	if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
592 		return;
593 
594 	status = hclge_mac_update_stats(hdev);
595 	if (status)
596 		dev_err(&hdev->pdev->dev,
597 			"Update MAC stats fail, status = %d.\n",
598 			status);
599 
600 	status = hclge_tqps_update_stats(handle);
601 	if (status)
602 		dev_err(&hdev->pdev->dev,
603 			"Update TQPS stats fail, status = %d.\n",
604 			status);
605 
606 	clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
607 }
608 
609 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
610 {
611 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
612 		HNAE3_SUPPORT_PHY_LOOPBACK |\
613 		HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
614 		HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
615 
616 	struct hclge_vport *vport = hclge_get_vport(handle);
617 	struct hclge_dev *hdev = vport->back;
618 	int count = 0;
619 
620 	/* Loopback test support rules:
621 	 * mac: only GE mode support
622 	 * serdes: all mac mode will support include GE/XGE/LGE/CGE
623 	 * phy: only support when phy device exist on board
624 	 */
625 	if (stringset == ETH_SS_TEST) {
626 		/* clear loopback bit flags at first */
627 		handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
628 		if (hdev->pdev->revision >= 0x21 ||
629 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
630 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
631 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
632 			count += 1;
633 			handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
634 		}
635 
636 		count += 2;
637 		handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
638 		handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
639 	} else if (stringset == ETH_SS_STATS) {
640 		count = ARRAY_SIZE(g_mac_stats_string) +
641 			hclge_tqps_get_sset_count(handle, stringset);
642 	}
643 
644 	return count;
645 }
646 
647 static void hclge_get_strings(struct hnae3_handle *handle,
648 			      u32 stringset,
649 			      u8 *data)
650 {
651 	u8 *p = (char *)data;
652 	int size;
653 
654 	if (stringset == ETH_SS_STATS) {
655 		size = ARRAY_SIZE(g_mac_stats_string);
656 		p = hclge_comm_get_strings(stringset,
657 					   g_mac_stats_string,
658 					   size,
659 					   p);
660 		p = hclge_tqps_get_strings(handle, p);
661 	} else if (stringset == ETH_SS_TEST) {
662 		if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
663 			memcpy(p,
664 			       hns3_nic_test_strs[HNAE3_LOOP_APP],
665 			       ETH_GSTRING_LEN);
666 			p += ETH_GSTRING_LEN;
667 		}
668 		if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
669 			memcpy(p,
670 			       hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
671 			       ETH_GSTRING_LEN);
672 			p += ETH_GSTRING_LEN;
673 		}
674 		if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
675 			memcpy(p,
676 			       hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
677 			       ETH_GSTRING_LEN);
678 			p += ETH_GSTRING_LEN;
679 		}
680 		if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
681 			memcpy(p,
682 			       hns3_nic_test_strs[HNAE3_LOOP_PHY],
683 			       ETH_GSTRING_LEN);
684 			p += ETH_GSTRING_LEN;
685 		}
686 	}
687 }
688 
689 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
690 {
691 	struct hclge_vport *vport = hclge_get_vport(handle);
692 	struct hclge_dev *hdev = vport->back;
693 	u64 *p;
694 
695 	p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
696 				 g_mac_stats_string,
697 				 ARRAY_SIZE(g_mac_stats_string),
698 				 data);
699 	p = hclge_tqps_get_stats(handle, p);
700 }
701 
702 static int hclge_parse_func_status(struct hclge_dev *hdev,
703 				   struct hclge_func_status_cmd *status)
704 {
705 	if (!(status->pf_state & HCLGE_PF_STATE_DONE))
706 		return -EINVAL;
707 
708 	/* Set the pf to main pf */
709 	if (status->pf_state & HCLGE_PF_STATE_MAIN)
710 		hdev->flag |= HCLGE_FLAG_MAIN;
711 	else
712 		hdev->flag &= ~HCLGE_FLAG_MAIN;
713 
714 	return 0;
715 }
716 
717 static int hclge_query_function_status(struct hclge_dev *hdev)
718 {
719 	struct hclge_func_status_cmd *req;
720 	struct hclge_desc desc;
721 	int timeout = 0;
722 	int ret;
723 
724 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
725 	req = (struct hclge_func_status_cmd *)desc.data;
726 
727 	do {
728 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
729 		if (ret) {
730 			dev_err(&hdev->pdev->dev,
731 				"query function status failed %d.\n",
732 				ret);
733 
734 			return ret;
735 		}
736 
737 		/* Check pf reset is done */
738 		if (req->pf_state)
739 			break;
740 		usleep_range(1000, 2000);
741 	} while (timeout++ < 5);
742 
743 	ret = hclge_parse_func_status(hdev, req);
744 
745 	return ret;
746 }
747 
748 static int hclge_query_pf_resource(struct hclge_dev *hdev)
749 {
750 	struct hclge_pf_res_cmd *req;
751 	struct hclge_desc desc;
752 	int ret;
753 
754 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
755 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
756 	if (ret) {
757 		dev_err(&hdev->pdev->dev,
758 			"query pf resource failed %d.\n", ret);
759 		return ret;
760 	}
761 
762 	req = (struct hclge_pf_res_cmd *)desc.data;
763 	hdev->num_tqps = __le16_to_cpu(req->tqp_num);
764 	hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
765 
766 	if (req->tx_buf_size)
767 		hdev->tx_buf_size =
768 			__le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
769 	else
770 		hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
771 
772 	hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
773 
774 	if (req->dv_buf_size)
775 		hdev->dv_buf_size =
776 			__le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
777 	else
778 		hdev->dv_buf_size = HCLGE_DEFAULT_DV;
779 
780 	hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
781 
782 	if (hnae3_dev_roce_supported(hdev)) {
783 		hdev->roce_base_msix_offset =
784 		hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
785 				HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
786 		hdev->num_roce_msi =
787 		hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
788 				HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
789 
790 		/* PF should have NIC vectors and Roce vectors,
791 		 * NIC vectors are queued before Roce vectors.
792 		 */
793 		hdev->num_msi = hdev->num_roce_msi  +
794 				hdev->roce_base_msix_offset;
795 	} else {
796 		hdev->num_msi =
797 		hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
798 				HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
799 	}
800 
801 	return 0;
802 }
803 
804 static int hclge_parse_speed(int speed_cmd, int *speed)
805 {
806 	switch (speed_cmd) {
807 	case 6:
808 		*speed = HCLGE_MAC_SPEED_10M;
809 		break;
810 	case 7:
811 		*speed = HCLGE_MAC_SPEED_100M;
812 		break;
813 	case 0:
814 		*speed = HCLGE_MAC_SPEED_1G;
815 		break;
816 	case 1:
817 		*speed = HCLGE_MAC_SPEED_10G;
818 		break;
819 	case 2:
820 		*speed = HCLGE_MAC_SPEED_25G;
821 		break;
822 	case 3:
823 		*speed = HCLGE_MAC_SPEED_40G;
824 		break;
825 	case 4:
826 		*speed = HCLGE_MAC_SPEED_50G;
827 		break;
828 	case 5:
829 		*speed = HCLGE_MAC_SPEED_100G;
830 		break;
831 	default:
832 		return -EINVAL;
833 	}
834 
835 	return 0;
836 }
837 
838 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
839 					u8 speed_ability)
840 {
841 	unsigned long *supported = hdev->hw.mac.supported;
842 
843 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
844 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
845 				 supported);
846 
847 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
848 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
849 				 supported);
850 
851 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
852 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
853 				 supported);
854 
855 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
856 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
857 				 supported);
858 
859 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
860 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
861 				 supported);
862 
863 	linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported);
864 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
865 }
866 
867 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
868 					 u8 speed_ability)
869 {
870 	unsigned long *supported = hdev->hw.mac.supported;
871 
872 	/* default to support all speed for GE port */
873 	if (!speed_ability)
874 		speed_ability = HCLGE_SUPPORT_GE;
875 
876 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
877 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
878 				 supported);
879 
880 	if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
881 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
882 				 supported);
883 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
884 				 supported);
885 	}
886 
887 	if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
888 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
889 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
890 	}
891 
892 	linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
893 	linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
894 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
895 }
896 
897 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
898 {
899 	u8 media_type = hdev->hw.mac.media_type;
900 
901 	if (media_type == HNAE3_MEDIA_TYPE_FIBER)
902 		hclge_parse_fiber_link_mode(hdev, speed_ability);
903 	else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
904 		hclge_parse_copper_link_mode(hdev, speed_ability);
905 }
906 
907 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
908 {
909 	struct hclge_cfg_param_cmd *req;
910 	u64 mac_addr_tmp_high;
911 	u64 mac_addr_tmp;
912 	int i;
913 
914 	req = (struct hclge_cfg_param_cmd *)desc[0].data;
915 
916 	/* get the configuration */
917 	cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
918 					      HCLGE_CFG_VMDQ_M,
919 					      HCLGE_CFG_VMDQ_S);
920 	cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
921 				      HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
922 	cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
923 					    HCLGE_CFG_TQP_DESC_N_M,
924 					    HCLGE_CFG_TQP_DESC_N_S);
925 
926 	cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
927 					HCLGE_CFG_PHY_ADDR_M,
928 					HCLGE_CFG_PHY_ADDR_S);
929 	cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
930 					  HCLGE_CFG_MEDIA_TP_M,
931 					  HCLGE_CFG_MEDIA_TP_S);
932 	cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
933 					  HCLGE_CFG_RX_BUF_LEN_M,
934 					  HCLGE_CFG_RX_BUF_LEN_S);
935 	/* get mac_address */
936 	mac_addr_tmp = __le32_to_cpu(req->param[2]);
937 	mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
938 					    HCLGE_CFG_MAC_ADDR_H_M,
939 					    HCLGE_CFG_MAC_ADDR_H_S);
940 
941 	mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
942 
943 	cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
944 					     HCLGE_CFG_DEFAULT_SPEED_M,
945 					     HCLGE_CFG_DEFAULT_SPEED_S);
946 	cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
947 					    HCLGE_CFG_RSS_SIZE_M,
948 					    HCLGE_CFG_RSS_SIZE_S);
949 
950 	for (i = 0; i < ETH_ALEN; i++)
951 		cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
952 
953 	req = (struct hclge_cfg_param_cmd *)desc[1].data;
954 	cfg->numa_node_map = __le32_to_cpu(req->param[0]);
955 
956 	cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
957 					     HCLGE_CFG_SPEED_ABILITY_M,
958 					     HCLGE_CFG_SPEED_ABILITY_S);
959 	cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
960 					 HCLGE_CFG_UMV_TBL_SPACE_M,
961 					 HCLGE_CFG_UMV_TBL_SPACE_S);
962 	if (!cfg->umv_space)
963 		cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
964 }
965 
966 /* hclge_get_cfg: query the static parameter from flash
967  * @hdev: pointer to struct hclge_dev
968  * @hcfg: the config structure to be getted
969  */
970 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
971 {
972 	struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
973 	struct hclge_cfg_param_cmd *req;
974 	int i, ret;
975 
976 	for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
977 		u32 offset = 0;
978 
979 		req = (struct hclge_cfg_param_cmd *)desc[i].data;
980 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
981 					   true);
982 		hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
983 				HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
984 		/* Len should be united by 4 bytes when send to hardware */
985 		hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
986 				HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
987 		req->offset = cpu_to_le32(offset);
988 	}
989 
990 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
991 	if (ret) {
992 		dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
993 		return ret;
994 	}
995 
996 	hclge_parse_cfg(hcfg, desc);
997 
998 	return 0;
999 }
1000 
1001 static int hclge_get_cap(struct hclge_dev *hdev)
1002 {
1003 	int ret;
1004 
1005 	ret = hclge_query_function_status(hdev);
1006 	if (ret) {
1007 		dev_err(&hdev->pdev->dev,
1008 			"query function status error %d.\n", ret);
1009 		return ret;
1010 	}
1011 
1012 	/* get pf resource */
1013 	ret = hclge_query_pf_resource(hdev);
1014 	if (ret)
1015 		dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1016 
1017 	return ret;
1018 }
1019 
1020 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1021 {
1022 #define HCLGE_MIN_TX_DESC	64
1023 #define HCLGE_MIN_RX_DESC	64
1024 
1025 	if (!is_kdump_kernel())
1026 		return;
1027 
1028 	dev_info(&hdev->pdev->dev,
1029 		 "Running kdump kernel. Using minimal resources\n");
1030 
1031 	/* minimal queue pairs equals to the number of vports */
1032 	hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1033 	hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1034 	hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1035 }
1036 
1037 static int hclge_configure(struct hclge_dev *hdev)
1038 {
1039 	struct hclge_cfg cfg;
1040 	int ret, i;
1041 
1042 	ret = hclge_get_cfg(hdev, &cfg);
1043 	if (ret) {
1044 		dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1045 		return ret;
1046 	}
1047 
1048 	hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1049 	hdev->base_tqp_pid = 0;
1050 	hdev->rss_size_max = cfg.rss_size_max;
1051 	hdev->rx_buf_len = cfg.rx_buf_len;
1052 	ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1053 	hdev->hw.mac.media_type = cfg.media_type;
1054 	hdev->hw.mac.phy_addr = cfg.phy_addr;
1055 	hdev->num_tx_desc = cfg.tqp_desc_num;
1056 	hdev->num_rx_desc = cfg.tqp_desc_num;
1057 	hdev->tm_info.num_pg = 1;
1058 	hdev->tc_max = cfg.tc_num;
1059 	hdev->tm_info.hw_pfc_map = 0;
1060 	hdev->wanted_umv_size = cfg.umv_space;
1061 
1062 	if (hnae3_dev_fd_supported(hdev))
1063 		hdev->fd_en = true;
1064 
1065 	ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1066 	if (ret) {
1067 		dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1068 		return ret;
1069 	}
1070 
1071 	hclge_parse_link_mode(hdev, cfg.speed_ability);
1072 
1073 	if ((hdev->tc_max > HNAE3_MAX_TC) ||
1074 	    (hdev->tc_max < 1)) {
1075 		dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1076 			 hdev->tc_max);
1077 		hdev->tc_max = 1;
1078 	}
1079 
1080 	/* Dev does not support DCB */
1081 	if (!hnae3_dev_dcb_supported(hdev)) {
1082 		hdev->tc_max = 1;
1083 		hdev->pfc_max = 0;
1084 	} else {
1085 		hdev->pfc_max = hdev->tc_max;
1086 	}
1087 
1088 	hdev->tm_info.num_tc = 1;
1089 
1090 	/* Currently not support uncontiuous tc */
1091 	for (i = 0; i < hdev->tm_info.num_tc; i++)
1092 		hnae3_set_bit(hdev->hw_tc_map, i, 1);
1093 
1094 	hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1095 
1096 	hclge_init_kdump_kernel_config(hdev);
1097 
1098 	return ret;
1099 }
1100 
1101 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
1102 			    int tso_mss_max)
1103 {
1104 	struct hclge_cfg_tso_status_cmd *req;
1105 	struct hclge_desc desc;
1106 	u16 tso_mss;
1107 
1108 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1109 
1110 	req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1111 
1112 	tso_mss = 0;
1113 	hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1114 			HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1115 	req->tso_mss_min = cpu_to_le16(tso_mss);
1116 
1117 	tso_mss = 0;
1118 	hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1119 			HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1120 	req->tso_mss_max = cpu_to_le16(tso_mss);
1121 
1122 	return hclge_cmd_send(&hdev->hw, &desc, 1);
1123 }
1124 
1125 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1126 {
1127 	struct hclge_cfg_gro_status_cmd *req;
1128 	struct hclge_desc desc;
1129 	int ret;
1130 
1131 	if (!hnae3_dev_gro_supported(hdev))
1132 		return 0;
1133 
1134 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1135 	req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1136 
1137 	req->gro_en = cpu_to_le16(en ? 1 : 0);
1138 
1139 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1140 	if (ret)
1141 		dev_err(&hdev->pdev->dev,
1142 			"GRO hardware config cmd failed, ret = %d\n", ret);
1143 
1144 	return ret;
1145 }
1146 
1147 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1148 {
1149 	struct hclge_tqp *tqp;
1150 	int i;
1151 
1152 	hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1153 				  sizeof(struct hclge_tqp), GFP_KERNEL);
1154 	if (!hdev->htqp)
1155 		return -ENOMEM;
1156 
1157 	tqp = hdev->htqp;
1158 
1159 	for (i = 0; i < hdev->num_tqps; i++) {
1160 		tqp->dev = &hdev->pdev->dev;
1161 		tqp->index = i;
1162 
1163 		tqp->q.ae_algo = &ae_algo;
1164 		tqp->q.buf_size = hdev->rx_buf_len;
1165 		tqp->q.tx_desc_num = hdev->num_tx_desc;
1166 		tqp->q.rx_desc_num = hdev->num_rx_desc;
1167 		tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1168 			i * HCLGE_TQP_REG_SIZE;
1169 
1170 		tqp++;
1171 	}
1172 
1173 	return 0;
1174 }
1175 
1176 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1177 				  u16 tqp_pid, u16 tqp_vid, bool is_pf)
1178 {
1179 	struct hclge_tqp_map_cmd *req;
1180 	struct hclge_desc desc;
1181 	int ret;
1182 
1183 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1184 
1185 	req = (struct hclge_tqp_map_cmd *)desc.data;
1186 	req->tqp_id = cpu_to_le16(tqp_pid);
1187 	req->tqp_vf = func_id;
1188 	req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1189 			1 << HCLGE_TQP_MAP_EN_B;
1190 	req->tqp_vid = cpu_to_le16(tqp_vid);
1191 
1192 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1193 	if (ret)
1194 		dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1195 
1196 	return ret;
1197 }
1198 
1199 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1200 {
1201 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1202 	struct hclge_dev *hdev = vport->back;
1203 	int i, alloced;
1204 
1205 	for (i = 0, alloced = 0; i < hdev->num_tqps &&
1206 	     alloced < num_tqps; i++) {
1207 		if (!hdev->htqp[i].alloced) {
1208 			hdev->htqp[i].q.handle = &vport->nic;
1209 			hdev->htqp[i].q.tqp_index = alloced;
1210 			hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1211 			hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1212 			kinfo->tqp[alloced] = &hdev->htqp[i].q;
1213 			hdev->htqp[i].alloced = true;
1214 			alloced++;
1215 		}
1216 	}
1217 	vport->alloc_tqps = alloced;
1218 	kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1219 				vport->alloc_tqps / hdev->tm_info.num_tc);
1220 
1221 	return 0;
1222 }
1223 
1224 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1225 			    u16 num_tx_desc, u16 num_rx_desc)
1226 
1227 {
1228 	struct hnae3_handle *nic = &vport->nic;
1229 	struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1230 	struct hclge_dev *hdev = vport->back;
1231 	int ret;
1232 
1233 	kinfo->num_tx_desc = num_tx_desc;
1234 	kinfo->num_rx_desc = num_rx_desc;
1235 
1236 	kinfo->rx_buf_len = hdev->rx_buf_len;
1237 
1238 	kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1239 				  sizeof(struct hnae3_queue *), GFP_KERNEL);
1240 	if (!kinfo->tqp)
1241 		return -ENOMEM;
1242 
1243 	ret = hclge_assign_tqp(vport, num_tqps);
1244 	if (ret)
1245 		dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1246 
1247 	return ret;
1248 }
1249 
1250 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1251 				  struct hclge_vport *vport)
1252 {
1253 	struct hnae3_handle *nic = &vport->nic;
1254 	struct hnae3_knic_private_info *kinfo;
1255 	u16 i;
1256 
1257 	kinfo = &nic->kinfo;
1258 	for (i = 0; i < vport->alloc_tqps; i++) {
1259 		struct hclge_tqp *q =
1260 			container_of(kinfo->tqp[i], struct hclge_tqp, q);
1261 		bool is_pf;
1262 		int ret;
1263 
1264 		is_pf = !(vport->vport_id);
1265 		ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1266 					     i, is_pf);
1267 		if (ret)
1268 			return ret;
1269 	}
1270 
1271 	return 0;
1272 }
1273 
1274 static int hclge_map_tqp(struct hclge_dev *hdev)
1275 {
1276 	struct hclge_vport *vport = hdev->vport;
1277 	u16 i, num_vport;
1278 
1279 	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1280 	for (i = 0; i < num_vport; i++)	{
1281 		int ret;
1282 
1283 		ret = hclge_map_tqp_to_vport(hdev, vport);
1284 		if (ret)
1285 			return ret;
1286 
1287 		vport++;
1288 	}
1289 
1290 	return 0;
1291 }
1292 
1293 static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
1294 {
1295 	/* this would be initialized later */
1296 }
1297 
1298 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1299 {
1300 	struct hnae3_handle *nic = &vport->nic;
1301 	struct hclge_dev *hdev = vport->back;
1302 	int ret;
1303 
1304 	nic->pdev = hdev->pdev;
1305 	nic->ae_algo = &ae_algo;
1306 	nic->numa_node_mask = hdev->numa_node_mask;
1307 
1308 	if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
1309 		ret = hclge_knic_setup(vport, num_tqps,
1310 				       hdev->num_tx_desc, hdev->num_rx_desc);
1311 
1312 		if (ret) {
1313 			dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
1314 				ret);
1315 			return ret;
1316 		}
1317 	} else {
1318 		hclge_unic_setup(vport, num_tqps);
1319 	}
1320 
1321 	return 0;
1322 }
1323 
1324 static int hclge_alloc_vport(struct hclge_dev *hdev)
1325 {
1326 	struct pci_dev *pdev = hdev->pdev;
1327 	struct hclge_vport *vport;
1328 	u32 tqp_main_vport;
1329 	u32 tqp_per_vport;
1330 	int num_vport, i;
1331 	int ret;
1332 
1333 	/* We need to alloc a vport for main NIC of PF */
1334 	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1335 
1336 	if (hdev->num_tqps < num_vport) {
1337 		dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1338 			hdev->num_tqps, num_vport);
1339 		return -EINVAL;
1340 	}
1341 
1342 	/* Alloc the same number of TQPs for every vport */
1343 	tqp_per_vport = hdev->num_tqps / num_vport;
1344 	tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1345 
1346 	vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1347 			     GFP_KERNEL);
1348 	if (!vport)
1349 		return -ENOMEM;
1350 
1351 	hdev->vport = vport;
1352 	hdev->num_alloc_vport = num_vport;
1353 
1354 	if (IS_ENABLED(CONFIG_PCI_IOV))
1355 		hdev->num_alloc_vfs = hdev->num_req_vfs;
1356 
1357 	for (i = 0; i < num_vport; i++) {
1358 		vport->back = hdev;
1359 		vport->vport_id = i;
1360 		vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1361 		vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1362 		vport->rxvlan_cfg.rx_vlan_offload_en = true;
1363 		INIT_LIST_HEAD(&vport->vlan_list);
1364 		INIT_LIST_HEAD(&vport->uc_mac_list);
1365 		INIT_LIST_HEAD(&vport->mc_mac_list);
1366 
1367 		if (i == 0)
1368 			ret = hclge_vport_setup(vport, tqp_main_vport);
1369 		else
1370 			ret = hclge_vport_setup(vport, tqp_per_vport);
1371 		if (ret) {
1372 			dev_err(&pdev->dev,
1373 				"vport setup failed for vport %d, %d\n",
1374 				i, ret);
1375 			return ret;
1376 		}
1377 
1378 		vport++;
1379 	}
1380 
1381 	return 0;
1382 }
1383 
1384 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1385 				    struct hclge_pkt_buf_alloc *buf_alloc)
1386 {
1387 /* TX buffer size is unit by 128 byte */
1388 #define HCLGE_BUF_SIZE_UNIT_SHIFT	7
1389 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK	BIT(15)
1390 	struct hclge_tx_buff_alloc_cmd *req;
1391 	struct hclge_desc desc;
1392 	int ret;
1393 	u8 i;
1394 
1395 	req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1396 
1397 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1398 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1399 		u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1400 
1401 		req->tx_pkt_buff[i] =
1402 			cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1403 				     HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1404 	}
1405 
1406 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1407 	if (ret)
1408 		dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1409 			ret);
1410 
1411 	return ret;
1412 }
1413 
1414 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1415 				 struct hclge_pkt_buf_alloc *buf_alloc)
1416 {
1417 	int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1418 
1419 	if (ret)
1420 		dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1421 
1422 	return ret;
1423 }
1424 
1425 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1426 {
1427 	int i, cnt = 0;
1428 
1429 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1430 		if (hdev->hw_tc_map & BIT(i))
1431 			cnt++;
1432 	return cnt;
1433 }
1434 
1435 /* Get the number of pfc enabled TCs, which have private buffer */
1436 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1437 				  struct hclge_pkt_buf_alloc *buf_alloc)
1438 {
1439 	struct hclge_priv_buf *priv;
1440 	int i, cnt = 0;
1441 
1442 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1443 		priv = &buf_alloc->priv_buf[i];
1444 		if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1445 		    priv->enable)
1446 			cnt++;
1447 	}
1448 
1449 	return cnt;
1450 }
1451 
1452 /* Get the number of pfc disabled TCs, which have private buffer */
1453 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1454 				     struct hclge_pkt_buf_alloc *buf_alloc)
1455 {
1456 	struct hclge_priv_buf *priv;
1457 	int i, cnt = 0;
1458 
1459 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1460 		priv = &buf_alloc->priv_buf[i];
1461 		if (hdev->hw_tc_map & BIT(i) &&
1462 		    !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1463 		    priv->enable)
1464 			cnt++;
1465 	}
1466 
1467 	return cnt;
1468 }
1469 
1470 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1471 {
1472 	struct hclge_priv_buf *priv;
1473 	u32 rx_priv = 0;
1474 	int i;
1475 
1476 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1477 		priv = &buf_alloc->priv_buf[i];
1478 		if (priv->enable)
1479 			rx_priv += priv->buf_size;
1480 	}
1481 	return rx_priv;
1482 }
1483 
1484 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1485 {
1486 	u32 i, total_tx_size = 0;
1487 
1488 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1489 		total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1490 
1491 	return total_tx_size;
1492 }
1493 
1494 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1495 				struct hclge_pkt_buf_alloc *buf_alloc,
1496 				u32 rx_all)
1497 {
1498 	u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1499 	u32 tc_num = hclge_get_tc_num(hdev);
1500 	u32 shared_buf, aligned_mps;
1501 	u32 rx_priv;
1502 	int i;
1503 
1504 	aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1505 
1506 	if (hnae3_dev_dcb_supported(hdev))
1507 		shared_buf_min = 2 * aligned_mps + hdev->dv_buf_size;
1508 	else
1509 		shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1510 					+ hdev->dv_buf_size;
1511 
1512 	shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1513 	shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1514 			     HCLGE_BUF_SIZE_UNIT);
1515 
1516 	rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1517 	if (rx_all < rx_priv + shared_std)
1518 		return false;
1519 
1520 	shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1521 	buf_alloc->s_buf.buf_size = shared_buf;
1522 	if (hnae3_dev_dcb_supported(hdev)) {
1523 		buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1524 		buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1525 			- roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
1526 	} else {
1527 		buf_alloc->s_buf.self.high = aligned_mps +
1528 						HCLGE_NON_DCB_ADDITIONAL_BUF;
1529 		buf_alloc->s_buf.self.low = aligned_mps;
1530 	}
1531 
1532 	if (hnae3_dev_dcb_supported(hdev)) {
1533 		if (tc_num)
1534 			hi_thrd = (shared_buf - hdev->dv_buf_size) / tc_num;
1535 		else
1536 			hi_thrd = shared_buf - hdev->dv_buf_size;
1537 
1538 		hi_thrd = max_t(u32, hi_thrd, 2 * aligned_mps);
1539 		hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1540 		lo_thrd = hi_thrd - aligned_mps / 2;
1541 	} else {
1542 		hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1543 		lo_thrd = aligned_mps;
1544 	}
1545 
1546 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1547 		buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1548 		buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1549 	}
1550 
1551 	return true;
1552 }
1553 
1554 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1555 				struct hclge_pkt_buf_alloc *buf_alloc)
1556 {
1557 	u32 i, total_size;
1558 
1559 	total_size = hdev->pkt_buf_size;
1560 
1561 	/* alloc tx buffer for all enabled tc */
1562 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1563 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1564 
1565 		if (hdev->hw_tc_map & BIT(i)) {
1566 			if (total_size < hdev->tx_buf_size)
1567 				return -ENOMEM;
1568 
1569 			priv->tx_buf_size = hdev->tx_buf_size;
1570 		} else {
1571 			priv->tx_buf_size = 0;
1572 		}
1573 
1574 		total_size -= priv->tx_buf_size;
1575 	}
1576 
1577 	return 0;
1578 }
1579 
1580 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1581 				  struct hclge_pkt_buf_alloc *buf_alloc)
1582 {
1583 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1584 	u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1585 	int i;
1586 
1587 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1588 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1589 
1590 		priv->enable = 0;
1591 		priv->wl.low = 0;
1592 		priv->wl.high = 0;
1593 		priv->buf_size = 0;
1594 
1595 		if (!(hdev->hw_tc_map & BIT(i)))
1596 			continue;
1597 
1598 		priv->enable = 1;
1599 
1600 		if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1601 			priv->wl.low = max ? aligned_mps : 256;
1602 			priv->wl.high = roundup(priv->wl.low + aligned_mps,
1603 						HCLGE_BUF_SIZE_UNIT);
1604 		} else {
1605 			priv->wl.low = 0;
1606 			priv->wl.high = max ? (aligned_mps * 2) : aligned_mps;
1607 		}
1608 
1609 		priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1610 	}
1611 
1612 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1613 }
1614 
1615 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1616 					  struct hclge_pkt_buf_alloc *buf_alloc)
1617 {
1618 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1619 	int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1620 	int i;
1621 
1622 	/* let the last to be cleared first */
1623 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1624 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1625 
1626 		if (hdev->hw_tc_map & BIT(i) &&
1627 		    !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1628 			/* Clear the no pfc TC private buffer */
1629 			priv->wl.low = 0;
1630 			priv->wl.high = 0;
1631 			priv->buf_size = 0;
1632 			priv->enable = 0;
1633 			no_pfc_priv_num--;
1634 		}
1635 
1636 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1637 		    no_pfc_priv_num == 0)
1638 			break;
1639 	}
1640 
1641 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1642 }
1643 
1644 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1645 					struct hclge_pkt_buf_alloc *buf_alloc)
1646 {
1647 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1648 	int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1649 	int i;
1650 
1651 	/* let the last to be cleared first */
1652 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1653 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1654 
1655 		if (hdev->hw_tc_map & BIT(i) &&
1656 		    hdev->tm_info.hw_pfc_map & BIT(i)) {
1657 			/* Reduce the number of pfc TC with private buffer */
1658 			priv->wl.low = 0;
1659 			priv->enable = 0;
1660 			priv->wl.high = 0;
1661 			priv->buf_size = 0;
1662 			pfc_priv_num--;
1663 		}
1664 
1665 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1666 		    pfc_priv_num == 0)
1667 			break;
1668 	}
1669 
1670 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1671 }
1672 
1673 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1674  * @hdev: pointer to struct hclge_dev
1675  * @buf_alloc: pointer to buffer calculation data
1676  * @return: 0: calculate sucessful, negative: fail
1677  */
1678 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1679 				struct hclge_pkt_buf_alloc *buf_alloc)
1680 {
1681 	/* When DCB is not supported, rx private buffer is not allocated. */
1682 	if (!hnae3_dev_dcb_supported(hdev)) {
1683 		u32 rx_all = hdev->pkt_buf_size;
1684 
1685 		rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1686 		if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1687 			return -ENOMEM;
1688 
1689 		return 0;
1690 	}
1691 
1692 	if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
1693 		return 0;
1694 
1695 	/* try to decrease the buffer size */
1696 	if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
1697 		return 0;
1698 
1699 	if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
1700 		return 0;
1701 
1702 	if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
1703 		return 0;
1704 
1705 	return -ENOMEM;
1706 }
1707 
1708 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1709 				   struct hclge_pkt_buf_alloc *buf_alloc)
1710 {
1711 	struct hclge_rx_priv_buff_cmd *req;
1712 	struct hclge_desc desc;
1713 	int ret;
1714 	int i;
1715 
1716 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1717 	req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1718 
1719 	/* Alloc private buffer TCs */
1720 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1721 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1722 
1723 		req->buf_num[i] =
1724 			cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1725 		req->buf_num[i] |=
1726 			cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1727 	}
1728 
1729 	req->shared_buf =
1730 		cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1731 			    (1 << HCLGE_TC0_PRI_BUF_EN_B));
1732 
1733 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1734 	if (ret)
1735 		dev_err(&hdev->pdev->dev,
1736 			"rx private buffer alloc cmd failed %d\n", ret);
1737 
1738 	return ret;
1739 }
1740 
1741 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1742 				   struct hclge_pkt_buf_alloc *buf_alloc)
1743 {
1744 	struct hclge_rx_priv_wl_buf *req;
1745 	struct hclge_priv_buf *priv;
1746 	struct hclge_desc desc[2];
1747 	int i, j;
1748 	int ret;
1749 
1750 	for (i = 0; i < 2; i++) {
1751 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1752 					   false);
1753 		req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1754 
1755 		/* The first descriptor set the NEXT bit to 1 */
1756 		if (i == 0)
1757 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1758 		else
1759 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1760 
1761 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1762 			u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1763 
1764 			priv = &buf_alloc->priv_buf[idx];
1765 			req->tc_wl[j].high =
1766 				cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1767 			req->tc_wl[j].high |=
1768 				cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1769 			req->tc_wl[j].low =
1770 				cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1771 			req->tc_wl[j].low |=
1772 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1773 		}
1774 	}
1775 
1776 	/* Send 2 descriptor at one time */
1777 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
1778 	if (ret)
1779 		dev_err(&hdev->pdev->dev,
1780 			"rx private waterline config cmd failed %d\n",
1781 			ret);
1782 	return ret;
1783 }
1784 
1785 static int hclge_common_thrd_config(struct hclge_dev *hdev,
1786 				    struct hclge_pkt_buf_alloc *buf_alloc)
1787 {
1788 	struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
1789 	struct hclge_rx_com_thrd *req;
1790 	struct hclge_desc desc[2];
1791 	struct hclge_tc_thrd *tc;
1792 	int i, j;
1793 	int ret;
1794 
1795 	for (i = 0; i < 2; i++) {
1796 		hclge_cmd_setup_basic_desc(&desc[i],
1797 					   HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1798 		req = (struct hclge_rx_com_thrd *)&desc[i].data;
1799 
1800 		/* The first descriptor set the NEXT bit to 1 */
1801 		if (i == 0)
1802 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1803 		else
1804 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1805 
1806 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1807 			tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1808 
1809 			req->com_thrd[j].high =
1810 				cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1811 			req->com_thrd[j].high |=
1812 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1813 			req->com_thrd[j].low =
1814 				cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1815 			req->com_thrd[j].low |=
1816 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1817 		}
1818 	}
1819 
1820 	/* Send 2 descriptors at one time */
1821 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
1822 	if (ret)
1823 		dev_err(&hdev->pdev->dev,
1824 			"common threshold config cmd failed %d\n", ret);
1825 	return ret;
1826 }
1827 
1828 static int hclge_common_wl_config(struct hclge_dev *hdev,
1829 				  struct hclge_pkt_buf_alloc *buf_alloc)
1830 {
1831 	struct hclge_shared_buf *buf = &buf_alloc->s_buf;
1832 	struct hclge_rx_com_wl *req;
1833 	struct hclge_desc desc;
1834 	int ret;
1835 
1836 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
1837 
1838 	req = (struct hclge_rx_com_wl *)desc.data;
1839 	req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
1840 	req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1841 
1842 	req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
1843 	req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1844 
1845 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1846 	if (ret)
1847 		dev_err(&hdev->pdev->dev,
1848 			"common waterline config cmd failed %d\n", ret);
1849 
1850 	return ret;
1851 }
1852 
1853 int hclge_buffer_alloc(struct hclge_dev *hdev)
1854 {
1855 	struct hclge_pkt_buf_alloc *pkt_buf;
1856 	int ret;
1857 
1858 	pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
1859 	if (!pkt_buf)
1860 		return -ENOMEM;
1861 
1862 	ret = hclge_tx_buffer_calc(hdev, pkt_buf);
1863 	if (ret) {
1864 		dev_err(&hdev->pdev->dev,
1865 			"could not calc tx buffer size for all TCs %d\n", ret);
1866 		goto out;
1867 	}
1868 
1869 	ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
1870 	if (ret) {
1871 		dev_err(&hdev->pdev->dev,
1872 			"could not alloc tx buffers %d\n", ret);
1873 		goto out;
1874 	}
1875 
1876 	ret = hclge_rx_buffer_calc(hdev, pkt_buf);
1877 	if (ret) {
1878 		dev_err(&hdev->pdev->dev,
1879 			"could not calc rx priv buffer size for all TCs %d\n",
1880 			ret);
1881 		goto out;
1882 	}
1883 
1884 	ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
1885 	if (ret) {
1886 		dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
1887 			ret);
1888 		goto out;
1889 	}
1890 
1891 	if (hnae3_dev_dcb_supported(hdev)) {
1892 		ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
1893 		if (ret) {
1894 			dev_err(&hdev->pdev->dev,
1895 				"could not configure rx private waterline %d\n",
1896 				ret);
1897 			goto out;
1898 		}
1899 
1900 		ret = hclge_common_thrd_config(hdev, pkt_buf);
1901 		if (ret) {
1902 			dev_err(&hdev->pdev->dev,
1903 				"could not configure common threshold %d\n",
1904 				ret);
1905 			goto out;
1906 		}
1907 	}
1908 
1909 	ret = hclge_common_wl_config(hdev, pkt_buf);
1910 	if (ret)
1911 		dev_err(&hdev->pdev->dev,
1912 			"could not configure common waterline %d\n", ret);
1913 
1914 out:
1915 	kfree(pkt_buf);
1916 	return ret;
1917 }
1918 
1919 static int hclge_init_roce_base_info(struct hclge_vport *vport)
1920 {
1921 	struct hnae3_handle *roce = &vport->roce;
1922 	struct hnae3_handle *nic = &vport->nic;
1923 
1924 	roce->rinfo.num_vectors = vport->back->num_roce_msi;
1925 
1926 	if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
1927 	    vport->back->num_msi_left == 0)
1928 		return -EINVAL;
1929 
1930 	roce->rinfo.base_vector = vport->back->roce_base_vector;
1931 
1932 	roce->rinfo.netdev = nic->kinfo.netdev;
1933 	roce->rinfo.roce_io_base = vport->back->hw.io_base;
1934 
1935 	roce->pdev = nic->pdev;
1936 	roce->ae_algo = nic->ae_algo;
1937 	roce->numa_node_mask = nic->numa_node_mask;
1938 
1939 	return 0;
1940 }
1941 
1942 static int hclge_init_msi(struct hclge_dev *hdev)
1943 {
1944 	struct pci_dev *pdev = hdev->pdev;
1945 	int vectors;
1946 	int i;
1947 
1948 	vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
1949 					PCI_IRQ_MSI | PCI_IRQ_MSIX);
1950 	if (vectors < 0) {
1951 		dev_err(&pdev->dev,
1952 			"failed(%d) to allocate MSI/MSI-X vectors\n",
1953 			vectors);
1954 		return vectors;
1955 	}
1956 	if (vectors < hdev->num_msi)
1957 		dev_warn(&hdev->pdev->dev,
1958 			 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
1959 			 hdev->num_msi, vectors);
1960 
1961 	hdev->num_msi = vectors;
1962 	hdev->num_msi_left = vectors;
1963 	hdev->base_msi_vector = pdev->irq;
1964 	hdev->roce_base_vector = hdev->base_msi_vector +
1965 				hdev->roce_base_msix_offset;
1966 
1967 	hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
1968 					   sizeof(u16), GFP_KERNEL);
1969 	if (!hdev->vector_status) {
1970 		pci_free_irq_vectors(pdev);
1971 		return -ENOMEM;
1972 	}
1973 
1974 	for (i = 0; i < hdev->num_msi; i++)
1975 		hdev->vector_status[i] = HCLGE_INVALID_VPORT;
1976 
1977 	hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
1978 					sizeof(int), GFP_KERNEL);
1979 	if (!hdev->vector_irq) {
1980 		pci_free_irq_vectors(pdev);
1981 		return -ENOMEM;
1982 	}
1983 
1984 	return 0;
1985 }
1986 
1987 static u8 hclge_check_speed_dup(u8 duplex, int speed)
1988 {
1989 
1990 	if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
1991 		duplex = HCLGE_MAC_FULL;
1992 
1993 	return duplex;
1994 }
1995 
1996 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
1997 				      u8 duplex)
1998 {
1999 	struct hclge_config_mac_speed_dup_cmd *req;
2000 	struct hclge_desc desc;
2001 	int ret;
2002 
2003 	req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2004 
2005 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2006 
2007 	hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
2008 
2009 	switch (speed) {
2010 	case HCLGE_MAC_SPEED_10M:
2011 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2012 				HCLGE_CFG_SPEED_S, 6);
2013 		break;
2014 	case HCLGE_MAC_SPEED_100M:
2015 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2016 				HCLGE_CFG_SPEED_S, 7);
2017 		break;
2018 	case HCLGE_MAC_SPEED_1G:
2019 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2020 				HCLGE_CFG_SPEED_S, 0);
2021 		break;
2022 	case HCLGE_MAC_SPEED_10G:
2023 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2024 				HCLGE_CFG_SPEED_S, 1);
2025 		break;
2026 	case HCLGE_MAC_SPEED_25G:
2027 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2028 				HCLGE_CFG_SPEED_S, 2);
2029 		break;
2030 	case HCLGE_MAC_SPEED_40G:
2031 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2032 				HCLGE_CFG_SPEED_S, 3);
2033 		break;
2034 	case HCLGE_MAC_SPEED_50G:
2035 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2036 				HCLGE_CFG_SPEED_S, 4);
2037 		break;
2038 	case HCLGE_MAC_SPEED_100G:
2039 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2040 				HCLGE_CFG_SPEED_S, 5);
2041 		break;
2042 	default:
2043 		dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2044 		return -EINVAL;
2045 	}
2046 
2047 	hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2048 		      1);
2049 
2050 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2051 	if (ret) {
2052 		dev_err(&hdev->pdev->dev,
2053 			"mac speed/duplex config cmd failed %d.\n", ret);
2054 		return ret;
2055 	}
2056 
2057 	return 0;
2058 }
2059 
2060 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2061 {
2062 	int ret;
2063 
2064 	duplex = hclge_check_speed_dup(duplex, speed);
2065 	if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2066 		return 0;
2067 
2068 	ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2069 	if (ret)
2070 		return ret;
2071 
2072 	hdev->hw.mac.speed = speed;
2073 	hdev->hw.mac.duplex = duplex;
2074 
2075 	return 0;
2076 }
2077 
2078 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2079 				     u8 duplex)
2080 {
2081 	struct hclge_vport *vport = hclge_get_vport(handle);
2082 	struct hclge_dev *hdev = vport->back;
2083 
2084 	return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2085 }
2086 
2087 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2088 {
2089 	struct hclge_config_auto_neg_cmd *req;
2090 	struct hclge_desc desc;
2091 	u32 flag = 0;
2092 	int ret;
2093 
2094 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2095 
2096 	req = (struct hclge_config_auto_neg_cmd *)desc.data;
2097 	hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
2098 	req->cfg_an_cmd_flag = cpu_to_le32(flag);
2099 
2100 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2101 	if (ret)
2102 		dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2103 			ret);
2104 
2105 	return ret;
2106 }
2107 
2108 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2109 {
2110 	struct hclge_vport *vport = hclge_get_vport(handle);
2111 	struct hclge_dev *hdev = vport->back;
2112 
2113 	return hclge_set_autoneg_en(hdev, enable);
2114 }
2115 
2116 static int hclge_get_autoneg(struct hnae3_handle *handle)
2117 {
2118 	struct hclge_vport *vport = hclge_get_vport(handle);
2119 	struct hclge_dev *hdev = vport->back;
2120 	struct phy_device *phydev = hdev->hw.mac.phydev;
2121 
2122 	if (phydev)
2123 		return phydev->autoneg;
2124 
2125 	return hdev->hw.mac.autoneg;
2126 }
2127 
2128 static int hclge_mac_init(struct hclge_dev *hdev)
2129 {
2130 	struct hclge_mac *mac = &hdev->hw.mac;
2131 	int ret;
2132 
2133 	hdev->support_sfp_query = true;
2134 	hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2135 	ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2136 					 hdev->hw.mac.duplex);
2137 	if (ret) {
2138 		dev_err(&hdev->pdev->dev,
2139 			"Config mac speed dup fail ret=%d\n", ret);
2140 		return ret;
2141 	}
2142 
2143 	mac->link = 0;
2144 
2145 	ret = hclge_set_mac_mtu(hdev, hdev->mps);
2146 	if (ret) {
2147 		dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2148 		return ret;
2149 	}
2150 
2151 	ret = hclge_buffer_alloc(hdev);
2152 	if (ret)
2153 		dev_err(&hdev->pdev->dev,
2154 			"allocate buffer fail, ret=%d\n", ret);
2155 
2156 	return ret;
2157 }
2158 
2159 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2160 {
2161 	if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
2162 	    !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2163 		schedule_work(&hdev->mbx_service_task);
2164 }
2165 
2166 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2167 {
2168 	if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2169 		schedule_work(&hdev->rst_service_task);
2170 }
2171 
2172 static void hclge_task_schedule(struct hclge_dev *hdev)
2173 {
2174 	if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2175 	    !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2176 	    !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2177 		(void)schedule_work(&hdev->service_task);
2178 }
2179 
2180 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2181 {
2182 	struct hclge_link_status_cmd *req;
2183 	struct hclge_desc desc;
2184 	int link_status;
2185 	int ret;
2186 
2187 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2188 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2189 	if (ret) {
2190 		dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2191 			ret);
2192 		return ret;
2193 	}
2194 
2195 	req = (struct hclge_link_status_cmd *)desc.data;
2196 	link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2197 
2198 	return !!link_status;
2199 }
2200 
2201 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2202 {
2203 	int mac_state;
2204 	int link_stat;
2205 
2206 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2207 		return 0;
2208 
2209 	mac_state = hclge_get_mac_link_status(hdev);
2210 
2211 	if (hdev->hw.mac.phydev) {
2212 		if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2213 			link_stat = mac_state &
2214 				hdev->hw.mac.phydev->link;
2215 		else
2216 			link_stat = 0;
2217 
2218 	} else {
2219 		link_stat = mac_state;
2220 	}
2221 
2222 	return !!link_stat;
2223 }
2224 
2225 static void hclge_update_link_status(struct hclge_dev *hdev)
2226 {
2227 	struct hnae3_client *rclient = hdev->roce_client;
2228 	struct hnae3_client *client = hdev->nic_client;
2229 	struct hnae3_handle *rhandle;
2230 	struct hnae3_handle *handle;
2231 	int state;
2232 	int i;
2233 
2234 	if (!client)
2235 		return;
2236 	state = hclge_get_mac_phy_link(hdev);
2237 	if (state != hdev->hw.mac.link) {
2238 		for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2239 			handle = &hdev->vport[i].nic;
2240 			client->ops->link_status_change(handle, state);
2241 			rhandle = &hdev->vport[i].roce;
2242 			if (rclient && rclient->ops->link_status_change)
2243 				rclient->ops->link_status_change(rhandle,
2244 								 state);
2245 		}
2246 		hdev->hw.mac.link = state;
2247 	}
2248 }
2249 
2250 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2251 {
2252 	struct hclge_sfp_speed_cmd *resp = NULL;
2253 	struct hclge_desc desc;
2254 	int ret;
2255 
2256 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SFP_GET_SPEED, true);
2257 	resp = (struct hclge_sfp_speed_cmd *)desc.data;
2258 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2259 	if (ret == -EOPNOTSUPP) {
2260 		dev_warn(&hdev->pdev->dev,
2261 			 "IMP do not support get SFP speed %d\n", ret);
2262 		return ret;
2263 	} else if (ret) {
2264 		dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2265 		return ret;
2266 	}
2267 
2268 	*speed = resp->sfp_speed;
2269 
2270 	return 0;
2271 }
2272 
2273 static int hclge_update_speed_duplex(struct hclge_dev *hdev)
2274 {
2275 	struct hclge_mac mac = hdev->hw.mac;
2276 	int speed;
2277 	int ret;
2278 
2279 	/* get the speed from SFP cmd when phy
2280 	 * doesn't exit.
2281 	 */
2282 	if (mac.phydev)
2283 		return 0;
2284 
2285 	/* if IMP does not support get SFP/qSFP speed, return directly */
2286 	if (!hdev->support_sfp_query)
2287 		return 0;
2288 
2289 	ret = hclge_get_sfp_speed(hdev, &speed);
2290 	if (ret == -EOPNOTSUPP) {
2291 		hdev->support_sfp_query = false;
2292 		return ret;
2293 	} else if (ret) {
2294 		return ret;
2295 	}
2296 
2297 	if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2298 		return 0; /* do nothing if no SFP */
2299 
2300 	/* must config full duplex for SFP */
2301 	return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2302 }
2303 
2304 static int hclge_update_speed_duplex_h(struct hnae3_handle *handle)
2305 {
2306 	struct hclge_vport *vport = hclge_get_vport(handle);
2307 	struct hclge_dev *hdev = vport->back;
2308 
2309 	return hclge_update_speed_duplex(hdev);
2310 }
2311 
2312 static int hclge_get_status(struct hnae3_handle *handle)
2313 {
2314 	struct hclge_vport *vport = hclge_get_vport(handle);
2315 	struct hclge_dev *hdev = vport->back;
2316 
2317 	hclge_update_link_status(hdev);
2318 
2319 	return hdev->hw.mac.link;
2320 }
2321 
2322 static void hclge_service_timer(struct timer_list *t)
2323 {
2324 	struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2325 
2326 	mod_timer(&hdev->service_timer, jiffies + HZ);
2327 	hdev->hw_stats.stats_timer++;
2328 	hclge_task_schedule(hdev);
2329 }
2330 
2331 static void hclge_service_complete(struct hclge_dev *hdev)
2332 {
2333 	WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2334 
2335 	/* Flush memory before next watchdog */
2336 	smp_mb__before_atomic();
2337 	clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2338 }
2339 
2340 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2341 {
2342 	u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2343 
2344 	/* fetch the events from their corresponding regs */
2345 	rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2346 	cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2347 	msix_src_reg = hclge_read_dev(&hdev->hw,
2348 				      HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2349 
2350 	/* Assumption: If by any chance reset and mailbox events are reported
2351 	 * together then we will only process reset event in this go and will
2352 	 * defer the processing of the mailbox events. Since, we would have not
2353 	 * cleared RX CMDQ event this time we would receive again another
2354 	 * interrupt from H/W just for the mailbox.
2355 	 */
2356 
2357 	/* check for vector0 reset event sources */
2358 	if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2359 		dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2360 		set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2361 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2362 		*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2363 		return HCLGE_VECTOR0_EVENT_RST;
2364 	}
2365 
2366 	if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2367 		dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2368 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2369 		set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2370 		*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2371 		return HCLGE_VECTOR0_EVENT_RST;
2372 	}
2373 
2374 	if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
2375 		dev_info(&hdev->pdev->dev, "core reset interrupt\n");
2376 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2377 		set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
2378 		*clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2379 		return HCLGE_VECTOR0_EVENT_RST;
2380 	}
2381 
2382 	/* check for vector0 msix event source */
2383 	if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK)
2384 		return HCLGE_VECTOR0_EVENT_ERR;
2385 
2386 	/* check for vector0 mailbox(=CMDQ RX) event source */
2387 	if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2388 		cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2389 		*clearval = cmdq_src_reg;
2390 		return HCLGE_VECTOR0_EVENT_MBX;
2391 	}
2392 
2393 	return HCLGE_VECTOR0_EVENT_OTHER;
2394 }
2395 
2396 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2397 				    u32 regclr)
2398 {
2399 	switch (event_type) {
2400 	case HCLGE_VECTOR0_EVENT_RST:
2401 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2402 		break;
2403 	case HCLGE_VECTOR0_EVENT_MBX:
2404 		hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2405 		break;
2406 	default:
2407 		break;
2408 	}
2409 }
2410 
2411 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2412 {
2413 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2414 				BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2415 				BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2416 				BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2417 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2418 }
2419 
2420 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2421 {
2422 	writel(enable ? 1 : 0, vector->addr);
2423 }
2424 
2425 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2426 {
2427 	struct hclge_dev *hdev = data;
2428 	u32 event_cause;
2429 	u32 clearval;
2430 
2431 	hclge_enable_vector(&hdev->misc_vector, false);
2432 	event_cause = hclge_check_event_cause(hdev, &clearval);
2433 
2434 	/* vector 0 interrupt is shared with reset and mailbox source events.*/
2435 	switch (event_cause) {
2436 	case HCLGE_VECTOR0_EVENT_ERR:
2437 		/* we do not know what type of reset is required now. This could
2438 		 * only be decided after we fetch the type of errors which
2439 		 * caused this event. Therefore, we will do below for now:
2440 		 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2441 		 *    have defered type of reset to be used.
2442 		 * 2. Schedule the reset serivce task.
2443 		 * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
2444 		 *    will fetch the correct type of reset.  This would be done
2445 		 *    by first decoding the types of errors.
2446 		 */
2447 		set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2448 		/* fall through */
2449 	case HCLGE_VECTOR0_EVENT_RST:
2450 		hclge_reset_task_schedule(hdev);
2451 		break;
2452 	case HCLGE_VECTOR0_EVENT_MBX:
2453 		/* If we are here then,
2454 		 * 1. Either we are not handling any mbx task and we are not
2455 		 *    scheduled as well
2456 		 *                        OR
2457 		 * 2. We could be handling a mbx task but nothing more is
2458 		 *    scheduled.
2459 		 * In both cases, we should schedule mbx task as there are more
2460 		 * mbx messages reported by this interrupt.
2461 		 */
2462 		hclge_mbx_task_schedule(hdev);
2463 		break;
2464 	default:
2465 		dev_warn(&hdev->pdev->dev,
2466 			 "received unknown or unhandled event of vector0\n");
2467 		break;
2468 	}
2469 
2470 	/* clear the source of interrupt if it is not cause by reset */
2471 	if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2472 		hclge_clear_event_cause(hdev, event_cause, clearval);
2473 		hclge_enable_vector(&hdev->misc_vector, true);
2474 	}
2475 
2476 	return IRQ_HANDLED;
2477 }
2478 
2479 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2480 {
2481 	if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2482 		dev_warn(&hdev->pdev->dev,
2483 			 "vector(vector_id %d) has been freed.\n", vector_id);
2484 		return;
2485 	}
2486 
2487 	hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2488 	hdev->num_msi_left += 1;
2489 	hdev->num_msi_used -= 1;
2490 }
2491 
2492 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2493 {
2494 	struct hclge_misc_vector *vector = &hdev->misc_vector;
2495 
2496 	vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2497 
2498 	vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2499 	hdev->vector_status[0] = 0;
2500 
2501 	hdev->num_msi_left -= 1;
2502 	hdev->num_msi_used += 1;
2503 }
2504 
2505 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2506 {
2507 	int ret;
2508 
2509 	hclge_get_misc_vector(hdev);
2510 
2511 	/* this would be explicitly freed in the end */
2512 	ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2513 			  0, "hclge_misc", hdev);
2514 	if (ret) {
2515 		hclge_free_vector(hdev, 0);
2516 		dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2517 			hdev->misc_vector.vector_irq);
2518 	}
2519 
2520 	return ret;
2521 }
2522 
2523 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2524 {
2525 	free_irq(hdev->misc_vector.vector_irq, hdev);
2526 	hclge_free_vector(hdev, 0);
2527 }
2528 
2529 int hclge_notify_client(struct hclge_dev *hdev,
2530 			enum hnae3_reset_notify_type type)
2531 {
2532 	struct hnae3_client *client = hdev->nic_client;
2533 	u16 i;
2534 
2535 	if (!client->ops->reset_notify)
2536 		return -EOPNOTSUPP;
2537 
2538 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2539 		struct hnae3_handle *handle = &hdev->vport[i].nic;
2540 		int ret;
2541 
2542 		ret = client->ops->reset_notify(handle, type);
2543 		if (ret) {
2544 			dev_err(&hdev->pdev->dev,
2545 				"notify nic client failed %d(%d)\n", type, ret);
2546 			return ret;
2547 		}
2548 	}
2549 
2550 	return 0;
2551 }
2552 
2553 static int hclge_notify_roce_client(struct hclge_dev *hdev,
2554 				    enum hnae3_reset_notify_type type)
2555 {
2556 	struct hnae3_client *client = hdev->roce_client;
2557 	int ret = 0;
2558 	u16 i;
2559 
2560 	if (!client)
2561 		return 0;
2562 
2563 	if (!client->ops->reset_notify)
2564 		return -EOPNOTSUPP;
2565 
2566 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2567 		struct hnae3_handle *handle = &hdev->vport[i].roce;
2568 
2569 		ret = client->ops->reset_notify(handle, type);
2570 		if (ret) {
2571 			dev_err(&hdev->pdev->dev,
2572 				"notify roce client failed %d(%d)",
2573 				type, ret);
2574 			return ret;
2575 		}
2576 	}
2577 
2578 	return ret;
2579 }
2580 
2581 static int hclge_reset_wait(struct hclge_dev *hdev)
2582 {
2583 #define HCLGE_RESET_WATI_MS	100
2584 #define HCLGE_RESET_WAIT_CNT	200
2585 	u32 val, reg, reg_bit;
2586 	u32 cnt = 0;
2587 
2588 	switch (hdev->reset_type) {
2589 	case HNAE3_IMP_RESET:
2590 		reg = HCLGE_GLOBAL_RESET_REG;
2591 		reg_bit = HCLGE_IMP_RESET_BIT;
2592 		break;
2593 	case HNAE3_GLOBAL_RESET:
2594 		reg = HCLGE_GLOBAL_RESET_REG;
2595 		reg_bit = HCLGE_GLOBAL_RESET_BIT;
2596 		break;
2597 	case HNAE3_CORE_RESET:
2598 		reg = HCLGE_GLOBAL_RESET_REG;
2599 		reg_bit = HCLGE_CORE_RESET_BIT;
2600 		break;
2601 	case HNAE3_FUNC_RESET:
2602 		reg = HCLGE_FUN_RST_ING;
2603 		reg_bit = HCLGE_FUN_RST_ING_B;
2604 		break;
2605 	case HNAE3_FLR_RESET:
2606 		break;
2607 	default:
2608 		dev_err(&hdev->pdev->dev,
2609 			"Wait for unsupported reset type: %d\n",
2610 			hdev->reset_type);
2611 		return -EINVAL;
2612 	}
2613 
2614 	if (hdev->reset_type == HNAE3_FLR_RESET) {
2615 		while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
2616 		       cnt++ < HCLGE_RESET_WAIT_CNT)
2617 			msleep(HCLGE_RESET_WATI_MS);
2618 
2619 		if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
2620 			dev_err(&hdev->pdev->dev,
2621 				"flr wait timeout: %d\n", cnt);
2622 			return -EBUSY;
2623 		}
2624 
2625 		return 0;
2626 	}
2627 
2628 	val = hclge_read_dev(&hdev->hw, reg);
2629 	while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
2630 		msleep(HCLGE_RESET_WATI_MS);
2631 		val = hclge_read_dev(&hdev->hw, reg);
2632 		cnt++;
2633 	}
2634 
2635 	if (cnt >= HCLGE_RESET_WAIT_CNT) {
2636 		dev_warn(&hdev->pdev->dev,
2637 			 "Wait for reset timeout: %d\n", hdev->reset_type);
2638 		return -EBUSY;
2639 	}
2640 
2641 	return 0;
2642 }
2643 
2644 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
2645 {
2646 	struct hclge_vf_rst_cmd *req;
2647 	struct hclge_desc desc;
2648 
2649 	req = (struct hclge_vf_rst_cmd *)desc.data;
2650 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
2651 	req->dest_vfid = func_id;
2652 
2653 	if (reset)
2654 		req->vf_rst = 0x1;
2655 
2656 	return hclge_cmd_send(&hdev->hw, &desc, 1);
2657 }
2658 
2659 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
2660 {
2661 	int i;
2662 
2663 	for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
2664 		struct hclge_vport *vport = &hdev->vport[i];
2665 		int ret;
2666 
2667 		/* Send cmd to set/clear VF's FUNC_RST_ING */
2668 		ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
2669 		if (ret) {
2670 			dev_err(&hdev->pdev->dev,
2671 				"set vf(%d) rst failed %d!\n",
2672 				vport->vport_id, ret);
2673 			return ret;
2674 		}
2675 
2676 		if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
2677 			continue;
2678 
2679 		/* Inform VF to process the reset.
2680 		 * hclge_inform_reset_assert_to_vf may fail if VF
2681 		 * driver is not loaded.
2682 		 */
2683 		ret = hclge_inform_reset_assert_to_vf(vport);
2684 		if (ret)
2685 			dev_warn(&hdev->pdev->dev,
2686 				 "inform reset to vf(%d) failed %d!\n",
2687 				 vport->vport_id, ret);
2688 	}
2689 
2690 	return 0;
2691 }
2692 
2693 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
2694 {
2695 	struct hclge_desc desc;
2696 	struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
2697 	int ret;
2698 
2699 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
2700 	hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
2701 	req->fun_reset_vfid = func_id;
2702 
2703 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2704 	if (ret)
2705 		dev_err(&hdev->pdev->dev,
2706 			"send function reset cmd fail, status =%d\n", ret);
2707 
2708 	return ret;
2709 }
2710 
2711 static void hclge_do_reset(struct hclge_dev *hdev)
2712 {
2713 	struct hnae3_handle *handle = &hdev->vport[0].nic;
2714 	struct pci_dev *pdev = hdev->pdev;
2715 	u32 val;
2716 
2717 	if (hclge_get_hw_reset_stat(handle)) {
2718 		dev_info(&pdev->dev, "Hardware reset not finish\n");
2719 		dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
2720 			 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
2721 			 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
2722 		return;
2723 	}
2724 
2725 	switch (hdev->reset_type) {
2726 	case HNAE3_GLOBAL_RESET:
2727 		val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2728 		hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
2729 		hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2730 		dev_info(&pdev->dev, "Global Reset requested\n");
2731 		break;
2732 	case HNAE3_CORE_RESET:
2733 		val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2734 		hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
2735 		hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2736 		dev_info(&pdev->dev, "Core Reset requested\n");
2737 		break;
2738 	case HNAE3_FUNC_RESET:
2739 		dev_info(&pdev->dev, "PF Reset requested\n");
2740 		/* schedule again to check later */
2741 		set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
2742 		hclge_reset_task_schedule(hdev);
2743 		break;
2744 	case HNAE3_FLR_RESET:
2745 		dev_info(&pdev->dev, "FLR requested\n");
2746 		/* schedule again to check later */
2747 		set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
2748 		hclge_reset_task_schedule(hdev);
2749 		break;
2750 	default:
2751 		dev_warn(&pdev->dev,
2752 			 "Unsupported reset type: %d\n", hdev->reset_type);
2753 		break;
2754 	}
2755 }
2756 
2757 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
2758 						   unsigned long *addr)
2759 {
2760 	enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
2761 
2762 	/* first, resolve any unknown reset type to the known type(s) */
2763 	if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
2764 		/* we will intentionally ignore any errors from this function
2765 		 *  as we will end up in *some* reset request in any case
2766 		 */
2767 		hclge_handle_hw_msix_error(hdev, addr);
2768 		clear_bit(HNAE3_UNKNOWN_RESET, addr);
2769 		/* We defered the clearing of the error event which caused
2770 		 * interrupt since it was not posssible to do that in
2771 		 * interrupt context (and this is the reason we introduced
2772 		 * new UNKNOWN reset type). Now, the errors have been
2773 		 * handled and cleared in hardware we can safely enable
2774 		 * interrupts. This is an exception to the norm.
2775 		 */
2776 		hclge_enable_vector(&hdev->misc_vector, true);
2777 	}
2778 
2779 	/* return the highest priority reset level amongst all */
2780 	if (test_bit(HNAE3_IMP_RESET, addr)) {
2781 		rst_level = HNAE3_IMP_RESET;
2782 		clear_bit(HNAE3_IMP_RESET, addr);
2783 		clear_bit(HNAE3_GLOBAL_RESET, addr);
2784 		clear_bit(HNAE3_CORE_RESET, addr);
2785 		clear_bit(HNAE3_FUNC_RESET, addr);
2786 	} else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
2787 		rst_level = HNAE3_GLOBAL_RESET;
2788 		clear_bit(HNAE3_GLOBAL_RESET, addr);
2789 		clear_bit(HNAE3_CORE_RESET, addr);
2790 		clear_bit(HNAE3_FUNC_RESET, addr);
2791 	} else if (test_bit(HNAE3_CORE_RESET, addr)) {
2792 		rst_level = HNAE3_CORE_RESET;
2793 		clear_bit(HNAE3_CORE_RESET, addr);
2794 		clear_bit(HNAE3_FUNC_RESET, addr);
2795 	} else if (test_bit(HNAE3_FUNC_RESET, addr)) {
2796 		rst_level = HNAE3_FUNC_RESET;
2797 		clear_bit(HNAE3_FUNC_RESET, addr);
2798 	} else if (test_bit(HNAE3_FLR_RESET, addr)) {
2799 		rst_level = HNAE3_FLR_RESET;
2800 		clear_bit(HNAE3_FLR_RESET, addr);
2801 	}
2802 
2803 	if (hdev->reset_type != HNAE3_NONE_RESET &&
2804 	    rst_level < hdev->reset_type)
2805 		return HNAE3_NONE_RESET;
2806 
2807 	return rst_level;
2808 }
2809 
2810 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
2811 {
2812 	u32 clearval = 0;
2813 
2814 	switch (hdev->reset_type) {
2815 	case HNAE3_IMP_RESET:
2816 		clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2817 		break;
2818 	case HNAE3_GLOBAL_RESET:
2819 		clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2820 		break;
2821 	case HNAE3_CORE_RESET:
2822 		clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2823 		break;
2824 	default:
2825 		break;
2826 	}
2827 
2828 	if (!clearval)
2829 		return;
2830 
2831 	hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
2832 	hclge_enable_vector(&hdev->misc_vector, true);
2833 }
2834 
2835 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
2836 {
2837 	int ret = 0;
2838 
2839 	switch (hdev->reset_type) {
2840 	case HNAE3_FUNC_RESET:
2841 		/* fall through */
2842 	case HNAE3_FLR_RESET:
2843 		ret = hclge_set_all_vf_rst(hdev, true);
2844 		break;
2845 	default:
2846 		break;
2847 	}
2848 
2849 	return ret;
2850 }
2851 
2852 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
2853 {
2854 	u32 reg_val;
2855 	int ret = 0;
2856 
2857 	switch (hdev->reset_type) {
2858 	case HNAE3_FUNC_RESET:
2859 		/* There is no mechanism for PF to know if VF has stopped IO
2860 		 * for now, just wait 100 ms for VF to stop IO
2861 		 */
2862 		msleep(100);
2863 		ret = hclge_func_reset_cmd(hdev, 0);
2864 		if (ret) {
2865 			dev_err(&hdev->pdev->dev,
2866 				"asserting function reset fail %d!\n", ret);
2867 			return ret;
2868 		}
2869 
2870 		/* After performaning pf reset, it is not necessary to do the
2871 		 * mailbox handling or send any command to firmware, because
2872 		 * any mailbox handling or command to firmware is only valid
2873 		 * after hclge_cmd_init is called.
2874 		 */
2875 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2876 		break;
2877 	case HNAE3_FLR_RESET:
2878 		/* There is no mechanism for PF to know if VF has stopped IO
2879 		 * for now, just wait 100 ms for VF to stop IO
2880 		 */
2881 		msleep(100);
2882 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2883 		set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
2884 		break;
2885 	case HNAE3_IMP_RESET:
2886 		reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
2887 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
2888 				BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
2889 		break;
2890 	default:
2891 		break;
2892 	}
2893 
2894 	dev_info(&hdev->pdev->dev, "prepare wait ok\n");
2895 
2896 	return ret;
2897 }
2898 
2899 static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
2900 {
2901 #define MAX_RESET_FAIL_CNT 5
2902 #define RESET_UPGRADE_DELAY_SEC 10
2903 
2904 	if (hdev->reset_pending) {
2905 		dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
2906 			 hdev->reset_pending);
2907 		return true;
2908 	} else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
2909 		   (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
2910 		    BIT(HCLGE_IMP_RESET_BIT))) {
2911 		dev_info(&hdev->pdev->dev,
2912 			 "reset failed because IMP Reset is pending\n");
2913 		hclge_clear_reset_cause(hdev);
2914 		return false;
2915 	} else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
2916 		hdev->reset_fail_cnt++;
2917 		if (is_timeout) {
2918 			set_bit(hdev->reset_type, &hdev->reset_pending);
2919 			dev_info(&hdev->pdev->dev,
2920 				 "re-schedule to wait for hw reset done\n");
2921 			return true;
2922 		}
2923 
2924 		dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
2925 		hclge_clear_reset_cause(hdev);
2926 		mod_timer(&hdev->reset_timer,
2927 			  jiffies + RESET_UPGRADE_DELAY_SEC * HZ);
2928 
2929 		return false;
2930 	}
2931 
2932 	hclge_clear_reset_cause(hdev);
2933 	dev_err(&hdev->pdev->dev, "Reset fail!\n");
2934 	return false;
2935 }
2936 
2937 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
2938 {
2939 	int ret = 0;
2940 
2941 	switch (hdev->reset_type) {
2942 	case HNAE3_FUNC_RESET:
2943 		/* fall through */
2944 	case HNAE3_FLR_RESET:
2945 		ret = hclge_set_all_vf_rst(hdev, false);
2946 		break;
2947 	default:
2948 		break;
2949 	}
2950 
2951 	return ret;
2952 }
2953 
2954 static void hclge_reset(struct hclge_dev *hdev)
2955 {
2956 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
2957 	bool is_timeout = false;
2958 	int ret;
2959 
2960 	/* Initialize ae_dev reset status as well, in case enet layer wants to
2961 	 * know if device is undergoing reset
2962 	 */
2963 	ae_dev->reset_type = hdev->reset_type;
2964 	hdev->reset_count++;
2965 	/* perform reset of the stack & ae device for a client */
2966 	ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
2967 	if (ret)
2968 		goto err_reset;
2969 
2970 	ret = hclge_reset_prepare_down(hdev);
2971 	if (ret)
2972 		goto err_reset;
2973 
2974 	rtnl_lock();
2975 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2976 	if (ret)
2977 		goto err_reset_lock;
2978 
2979 	rtnl_unlock();
2980 
2981 	ret = hclge_reset_prepare_wait(hdev);
2982 	if (ret)
2983 		goto err_reset;
2984 
2985 	if (hclge_reset_wait(hdev)) {
2986 		is_timeout = true;
2987 		goto err_reset;
2988 	}
2989 
2990 	ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
2991 	if (ret)
2992 		goto err_reset;
2993 
2994 	rtnl_lock();
2995 	ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
2996 	if (ret)
2997 		goto err_reset_lock;
2998 
2999 	ret = hclge_reset_ae_dev(hdev->ae_dev);
3000 	if (ret)
3001 		goto err_reset_lock;
3002 
3003 	ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3004 	if (ret)
3005 		goto err_reset_lock;
3006 
3007 	ret = hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3008 	if (ret)
3009 		goto err_reset_lock;
3010 
3011 	hclge_clear_reset_cause(hdev);
3012 
3013 	ret = hclge_reset_prepare_up(hdev);
3014 	if (ret)
3015 		goto err_reset_lock;
3016 
3017 	ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3018 	if (ret)
3019 		goto err_reset_lock;
3020 
3021 	rtnl_unlock();
3022 
3023 	ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3024 	if (ret)
3025 		goto err_reset;
3026 
3027 	ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3028 	if (ret)
3029 		goto err_reset;
3030 
3031 	hdev->last_reset_time = jiffies;
3032 	hdev->reset_fail_cnt = 0;
3033 	ae_dev->reset_type = HNAE3_NONE_RESET;
3034 	del_timer(&hdev->reset_timer);
3035 
3036 	return;
3037 
3038 err_reset_lock:
3039 	rtnl_unlock();
3040 err_reset:
3041 	if (hclge_reset_err_handle(hdev, is_timeout))
3042 		hclge_reset_task_schedule(hdev);
3043 }
3044 
3045 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3046 {
3047 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3048 	struct hclge_dev *hdev = ae_dev->priv;
3049 
3050 	/* We might end up getting called broadly because of 2 below cases:
3051 	 * 1. Recoverable error was conveyed through APEI and only way to bring
3052 	 *    normalcy is to reset.
3053 	 * 2. A new reset request from the stack due to timeout
3054 	 *
3055 	 * For the first case,error event might not have ae handle available.
3056 	 * check if this is a new reset request and we are not here just because
3057 	 * last reset attempt did not succeed and watchdog hit us again. We will
3058 	 * know this if last reset request did not occur very recently (watchdog
3059 	 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3060 	 * In case of new request we reset the "reset level" to PF reset.
3061 	 * And if it is a repeat reset request of the most recent one then we
3062 	 * want to make sure we throttle the reset request. Therefore, we will
3063 	 * not allow it again before 3*HZ times.
3064 	 */
3065 	if (!handle)
3066 		handle = &hdev->vport[0].nic;
3067 
3068 	if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ)))
3069 		return;
3070 	else if (hdev->default_reset_request)
3071 		hdev->reset_level =
3072 			hclge_get_reset_level(hdev,
3073 					      &hdev->default_reset_request);
3074 	else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
3075 		hdev->reset_level = HNAE3_FUNC_RESET;
3076 
3077 	dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
3078 		 hdev->reset_level);
3079 
3080 	/* request reset & schedule reset task */
3081 	set_bit(hdev->reset_level, &hdev->reset_request);
3082 	hclge_reset_task_schedule(hdev);
3083 
3084 	if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3085 		hdev->reset_level++;
3086 }
3087 
3088 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3089 					enum hnae3_reset_type rst_type)
3090 {
3091 	struct hclge_dev *hdev = ae_dev->priv;
3092 
3093 	set_bit(rst_type, &hdev->default_reset_request);
3094 }
3095 
3096 static void hclge_reset_timer(struct timer_list *t)
3097 {
3098 	struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3099 
3100 	dev_info(&hdev->pdev->dev,
3101 		 "triggering global reset in reset timer\n");
3102 	set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
3103 	hclge_reset_event(hdev->pdev, NULL);
3104 }
3105 
3106 static void hclge_reset_subtask(struct hclge_dev *hdev)
3107 {
3108 	/* check if there is any ongoing reset in the hardware. This status can
3109 	 * be checked from reset_pending. If there is then, we need to wait for
3110 	 * hardware to complete reset.
3111 	 *    a. If we are able to figure out in reasonable time that hardware
3112 	 *       has fully resetted then, we can proceed with driver, client
3113 	 *       reset.
3114 	 *    b. else, we can come back later to check this status so re-sched
3115 	 *       now.
3116 	 */
3117 	hdev->last_reset_time = jiffies;
3118 	hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
3119 	if (hdev->reset_type != HNAE3_NONE_RESET)
3120 		hclge_reset(hdev);
3121 
3122 	/* check if we got any *new* reset requests to be honored */
3123 	hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
3124 	if (hdev->reset_type != HNAE3_NONE_RESET)
3125 		hclge_do_reset(hdev);
3126 
3127 	hdev->reset_type = HNAE3_NONE_RESET;
3128 }
3129 
3130 static void hclge_reset_service_task(struct work_struct *work)
3131 {
3132 	struct hclge_dev *hdev =
3133 		container_of(work, struct hclge_dev, rst_service_task);
3134 
3135 	if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3136 		return;
3137 
3138 	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3139 
3140 	hclge_reset_subtask(hdev);
3141 
3142 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3143 }
3144 
3145 static void hclge_mailbox_service_task(struct work_struct *work)
3146 {
3147 	struct hclge_dev *hdev =
3148 		container_of(work, struct hclge_dev, mbx_service_task);
3149 
3150 	if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3151 		return;
3152 
3153 	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3154 
3155 	hclge_mbx_handler(hdev);
3156 
3157 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3158 }
3159 
3160 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3161 {
3162 	int i;
3163 
3164 	/* start from vport 1 for PF is always alive */
3165 	for (i = 1; i < hdev->num_alloc_vport; i++) {
3166 		struct hclge_vport *vport = &hdev->vport[i];
3167 
3168 		if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3169 			clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3170 
3171 		/* If vf is not alive, set to default value */
3172 		if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3173 			vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3174 	}
3175 }
3176 
3177 static void hclge_service_task(struct work_struct *work)
3178 {
3179 	struct hclge_dev *hdev =
3180 		container_of(work, struct hclge_dev, service_task);
3181 
3182 	if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3183 		hclge_update_stats_for_all(hdev);
3184 		hdev->hw_stats.stats_timer = 0;
3185 	}
3186 
3187 	hclge_update_speed_duplex(hdev);
3188 	hclge_update_link_status(hdev);
3189 	hclge_update_vport_alive(hdev);
3190 	hclge_service_complete(hdev);
3191 }
3192 
3193 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3194 {
3195 	/* VF handle has no client */
3196 	if (!handle->client)
3197 		return container_of(handle, struct hclge_vport, nic);
3198 	else if (handle->client->type == HNAE3_CLIENT_ROCE)
3199 		return container_of(handle, struct hclge_vport, roce);
3200 	else
3201 		return container_of(handle, struct hclge_vport, nic);
3202 }
3203 
3204 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3205 			    struct hnae3_vector_info *vector_info)
3206 {
3207 	struct hclge_vport *vport = hclge_get_vport(handle);
3208 	struct hnae3_vector_info *vector = vector_info;
3209 	struct hclge_dev *hdev = vport->back;
3210 	int alloc = 0;
3211 	int i, j;
3212 
3213 	vector_num = min(hdev->num_msi_left, vector_num);
3214 
3215 	for (j = 0; j < vector_num; j++) {
3216 		for (i = 1; i < hdev->num_msi; i++) {
3217 			if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3218 				vector->vector = pci_irq_vector(hdev->pdev, i);
3219 				vector->io_addr = hdev->hw.io_base +
3220 					HCLGE_VECTOR_REG_BASE +
3221 					(i - 1) * HCLGE_VECTOR_REG_OFFSET +
3222 					vport->vport_id *
3223 					HCLGE_VECTOR_VF_OFFSET;
3224 				hdev->vector_status[i] = vport->vport_id;
3225 				hdev->vector_irq[i] = vector->vector;
3226 
3227 				vector++;
3228 				alloc++;
3229 
3230 				break;
3231 			}
3232 		}
3233 	}
3234 	hdev->num_msi_left -= alloc;
3235 	hdev->num_msi_used += alloc;
3236 
3237 	return alloc;
3238 }
3239 
3240 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3241 {
3242 	int i;
3243 
3244 	for (i = 0; i < hdev->num_msi; i++)
3245 		if (vector == hdev->vector_irq[i])
3246 			return i;
3247 
3248 	return -EINVAL;
3249 }
3250 
3251 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3252 {
3253 	struct hclge_vport *vport = hclge_get_vport(handle);
3254 	struct hclge_dev *hdev = vport->back;
3255 	int vector_id;
3256 
3257 	vector_id = hclge_get_vector_index(hdev, vector);
3258 	if (vector_id < 0) {
3259 		dev_err(&hdev->pdev->dev,
3260 			"Get vector index fail. vector_id =%d\n", vector_id);
3261 		return vector_id;
3262 	}
3263 
3264 	hclge_free_vector(hdev, vector_id);
3265 
3266 	return 0;
3267 }
3268 
3269 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3270 {
3271 	return HCLGE_RSS_KEY_SIZE;
3272 }
3273 
3274 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3275 {
3276 	return HCLGE_RSS_IND_TBL_SIZE;
3277 }
3278 
3279 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3280 				  const u8 hfunc, const u8 *key)
3281 {
3282 	struct hclge_rss_config_cmd *req;
3283 	struct hclge_desc desc;
3284 	int key_offset;
3285 	int key_size;
3286 	int ret;
3287 
3288 	req = (struct hclge_rss_config_cmd *)desc.data;
3289 
3290 	for (key_offset = 0; key_offset < 3; key_offset++) {
3291 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3292 					   false);
3293 
3294 		req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3295 		req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3296 
3297 		if (key_offset == 2)
3298 			key_size =
3299 			HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
3300 		else
3301 			key_size = HCLGE_RSS_HASH_KEY_NUM;
3302 
3303 		memcpy(req->hash_key,
3304 		       key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3305 
3306 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3307 		if (ret) {
3308 			dev_err(&hdev->pdev->dev,
3309 				"Configure RSS config fail, status = %d\n",
3310 				ret);
3311 			return ret;
3312 		}
3313 	}
3314 	return 0;
3315 }
3316 
3317 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3318 {
3319 	struct hclge_rss_indirection_table_cmd *req;
3320 	struct hclge_desc desc;
3321 	int i, j;
3322 	int ret;
3323 
3324 	req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3325 
3326 	for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3327 		hclge_cmd_setup_basic_desc
3328 			(&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3329 
3330 		req->start_table_index =
3331 			cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3332 		req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3333 
3334 		for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3335 			req->rss_result[j] =
3336 				indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3337 
3338 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3339 		if (ret) {
3340 			dev_err(&hdev->pdev->dev,
3341 				"Configure rss indir table fail,status = %d\n",
3342 				ret);
3343 			return ret;
3344 		}
3345 	}
3346 	return 0;
3347 }
3348 
3349 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3350 				 u16 *tc_size, u16 *tc_offset)
3351 {
3352 	struct hclge_rss_tc_mode_cmd *req;
3353 	struct hclge_desc desc;
3354 	int ret;
3355 	int i;
3356 
3357 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3358 	req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3359 
3360 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3361 		u16 mode = 0;
3362 
3363 		hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3364 		hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3365 				HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3366 		hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3367 				HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3368 
3369 		req->rss_tc_mode[i] = cpu_to_le16(mode);
3370 	}
3371 
3372 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3373 	if (ret)
3374 		dev_err(&hdev->pdev->dev,
3375 			"Configure rss tc mode fail, status = %d\n", ret);
3376 
3377 	return ret;
3378 }
3379 
3380 static void hclge_get_rss_type(struct hclge_vport *vport)
3381 {
3382 	if (vport->rss_tuple_sets.ipv4_tcp_en ||
3383 	    vport->rss_tuple_sets.ipv4_udp_en ||
3384 	    vport->rss_tuple_sets.ipv4_sctp_en ||
3385 	    vport->rss_tuple_sets.ipv6_tcp_en ||
3386 	    vport->rss_tuple_sets.ipv6_udp_en ||
3387 	    vport->rss_tuple_sets.ipv6_sctp_en)
3388 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
3389 	else if (vport->rss_tuple_sets.ipv4_fragment_en ||
3390 		 vport->rss_tuple_sets.ipv6_fragment_en)
3391 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
3392 	else
3393 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
3394 }
3395 
3396 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3397 {
3398 	struct hclge_rss_input_tuple_cmd *req;
3399 	struct hclge_desc desc;
3400 	int ret;
3401 
3402 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3403 
3404 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3405 
3406 	/* Get the tuple cfg from pf */
3407 	req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
3408 	req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
3409 	req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
3410 	req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
3411 	req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
3412 	req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
3413 	req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
3414 	req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
3415 	hclge_get_rss_type(&hdev->vport[0]);
3416 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3417 	if (ret)
3418 		dev_err(&hdev->pdev->dev,
3419 			"Configure rss input fail, status = %d\n", ret);
3420 	return ret;
3421 }
3422 
3423 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3424 			 u8 *key, u8 *hfunc)
3425 {
3426 	struct hclge_vport *vport = hclge_get_vport(handle);
3427 	int i;
3428 
3429 	/* Get hash algorithm */
3430 	if (hfunc) {
3431 		switch (vport->rss_algo) {
3432 		case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
3433 			*hfunc = ETH_RSS_HASH_TOP;
3434 			break;
3435 		case HCLGE_RSS_HASH_ALGO_SIMPLE:
3436 			*hfunc = ETH_RSS_HASH_XOR;
3437 			break;
3438 		default:
3439 			*hfunc = ETH_RSS_HASH_UNKNOWN;
3440 			break;
3441 		}
3442 	}
3443 
3444 	/* Get the RSS Key required by the user */
3445 	if (key)
3446 		memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3447 
3448 	/* Get indirect table */
3449 	if (indir)
3450 		for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3451 			indir[i] =  vport->rss_indirection_tbl[i];
3452 
3453 	return 0;
3454 }
3455 
3456 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3457 			 const  u8 *key, const  u8 hfunc)
3458 {
3459 	struct hclge_vport *vport = hclge_get_vport(handle);
3460 	struct hclge_dev *hdev = vport->back;
3461 	u8 hash_algo;
3462 	int ret, i;
3463 
3464 	/* Set the RSS Hash Key if specififed by the user */
3465 	if (key) {
3466 		switch (hfunc) {
3467 		case ETH_RSS_HASH_TOP:
3468 			hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3469 			break;
3470 		case ETH_RSS_HASH_XOR:
3471 			hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3472 			break;
3473 		case ETH_RSS_HASH_NO_CHANGE:
3474 			hash_algo = vport->rss_algo;
3475 			break;
3476 		default:
3477 			return -EINVAL;
3478 		}
3479 
3480 		ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3481 		if (ret)
3482 			return ret;
3483 
3484 		/* Update the shadow RSS key with user specified qids */
3485 		memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3486 		vport->rss_algo = hash_algo;
3487 	}
3488 
3489 	/* Update the shadow RSS table with user specified qids */
3490 	for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3491 		vport->rss_indirection_tbl[i] = indir[i];
3492 
3493 	/* Update the hardware */
3494 	return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
3495 }
3496 
3497 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3498 {
3499 	u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3500 
3501 	if (nfc->data & RXH_L4_B_2_3)
3502 		hash_sets |= HCLGE_D_PORT_BIT;
3503 	else
3504 		hash_sets &= ~HCLGE_D_PORT_BIT;
3505 
3506 	if (nfc->data & RXH_IP_SRC)
3507 		hash_sets |= HCLGE_S_IP_BIT;
3508 	else
3509 		hash_sets &= ~HCLGE_S_IP_BIT;
3510 
3511 	if (nfc->data & RXH_IP_DST)
3512 		hash_sets |= HCLGE_D_IP_BIT;
3513 	else
3514 		hash_sets &= ~HCLGE_D_IP_BIT;
3515 
3516 	if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3517 		hash_sets |= HCLGE_V_TAG_BIT;
3518 
3519 	return hash_sets;
3520 }
3521 
3522 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3523 			       struct ethtool_rxnfc *nfc)
3524 {
3525 	struct hclge_vport *vport = hclge_get_vport(handle);
3526 	struct hclge_dev *hdev = vport->back;
3527 	struct hclge_rss_input_tuple_cmd *req;
3528 	struct hclge_desc desc;
3529 	u8 tuple_sets;
3530 	int ret;
3531 
3532 	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3533 			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
3534 		return -EINVAL;
3535 
3536 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3537 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3538 
3539 	req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3540 	req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3541 	req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3542 	req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3543 	req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3544 	req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3545 	req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3546 	req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
3547 
3548 	tuple_sets = hclge_get_rss_hash_bits(nfc);
3549 	switch (nfc->flow_type) {
3550 	case TCP_V4_FLOW:
3551 		req->ipv4_tcp_en = tuple_sets;
3552 		break;
3553 	case TCP_V6_FLOW:
3554 		req->ipv6_tcp_en = tuple_sets;
3555 		break;
3556 	case UDP_V4_FLOW:
3557 		req->ipv4_udp_en = tuple_sets;
3558 		break;
3559 	case UDP_V6_FLOW:
3560 		req->ipv6_udp_en = tuple_sets;
3561 		break;
3562 	case SCTP_V4_FLOW:
3563 		req->ipv4_sctp_en = tuple_sets;
3564 		break;
3565 	case SCTP_V6_FLOW:
3566 		if ((nfc->data & RXH_L4_B_0_1) ||
3567 		    (nfc->data & RXH_L4_B_2_3))
3568 			return -EINVAL;
3569 
3570 		req->ipv6_sctp_en = tuple_sets;
3571 		break;
3572 	case IPV4_FLOW:
3573 		req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3574 		break;
3575 	case IPV6_FLOW:
3576 		req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3577 		break;
3578 	default:
3579 		return -EINVAL;
3580 	}
3581 
3582 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3583 	if (ret) {
3584 		dev_err(&hdev->pdev->dev,
3585 			"Set rss tuple fail, status = %d\n", ret);
3586 		return ret;
3587 	}
3588 
3589 	vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
3590 	vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
3591 	vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
3592 	vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
3593 	vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
3594 	vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
3595 	vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
3596 	vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
3597 	hclge_get_rss_type(vport);
3598 	return 0;
3599 }
3600 
3601 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
3602 			       struct ethtool_rxnfc *nfc)
3603 {
3604 	struct hclge_vport *vport = hclge_get_vport(handle);
3605 	u8 tuple_sets;
3606 
3607 	nfc->data = 0;
3608 
3609 	switch (nfc->flow_type) {
3610 	case TCP_V4_FLOW:
3611 		tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
3612 		break;
3613 	case UDP_V4_FLOW:
3614 		tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
3615 		break;
3616 	case TCP_V6_FLOW:
3617 		tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
3618 		break;
3619 	case UDP_V6_FLOW:
3620 		tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
3621 		break;
3622 	case SCTP_V4_FLOW:
3623 		tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
3624 		break;
3625 	case SCTP_V6_FLOW:
3626 		tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
3627 		break;
3628 	case IPV4_FLOW:
3629 	case IPV6_FLOW:
3630 		tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
3631 		break;
3632 	default:
3633 		return -EINVAL;
3634 	}
3635 
3636 	if (!tuple_sets)
3637 		return 0;
3638 
3639 	if (tuple_sets & HCLGE_D_PORT_BIT)
3640 		nfc->data |= RXH_L4_B_2_3;
3641 	if (tuple_sets & HCLGE_S_PORT_BIT)
3642 		nfc->data |= RXH_L4_B_0_1;
3643 	if (tuple_sets & HCLGE_D_IP_BIT)
3644 		nfc->data |= RXH_IP_DST;
3645 	if (tuple_sets & HCLGE_S_IP_BIT)
3646 		nfc->data |= RXH_IP_SRC;
3647 
3648 	return 0;
3649 }
3650 
3651 static int hclge_get_tc_size(struct hnae3_handle *handle)
3652 {
3653 	struct hclge_vport *vport = hclge_get_vport(handle);
3654 	struct hclge_dev *hdev = vport->back;
3655 
3656 	return hdev->rss_size_max;
3657 }
3658 
3659 int hclge_rss_init_hw(struct hclge_dev *hdev)
3660 {
3661 	struct hclge_vport *vport = hdev->vport;
3662 	u8 *rss_indir = vport[0].rss_indirection_tbl;
3663 	u16 rss_size = vport[0].alloc_rss_size;
3664 	u8 *key = vport[0].rss_hash_key;
3665 	u8 hfunc = vport[0].rss_algo;
3666 	u16 tc_offset[HCLGE_MAX_TC_NUM];
3667 	u16 tc_valid[HCLGE_MAX_TC_NUM];
3668 	u16 tc_size[HCLGE_MAX_TC_NUM];
3669 	u16 roundup_size;
3670 	int i, ret;
3671 
3672 	ret = hclge_set_rss_indir_table(hdev, rss_indir);
3673 	if (ret)
3674 		return ret;
3675 
3676 	ret = hclge_set_rss_algo_key(hdev, hfunc, key);
3677 	if (ret)
3678 		return ret;
3679 
3680 	ret = hclge_set_rss_input_tuple(hdev);
3681 	if (ret)
3682 		return ret;
3683 
3684 	/* Each TC have the same queue size, and tc_size set to hardware is
3685 	 * the log2 of roundup power of two of rss_size, the acutal queue
3686 	 * size is limited by indirection table.
3687 	 */
3688 	if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
3689 		dev_err(&hdev->pdev->dev,
3690 			"Configure rss tc size failed, invalid TC_SIZE = %d\n",
3691 			rss_size);
3692 		return -EINVAL;
3693 	}
3694 
3695 	roundup_size = roundup_pow_of_two(rss_size);
3696 	roundup_size = ilog2(roundup_size);
3697 
3698 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3699 		tc_valid[i] = 0;
3700 
3701 		if (!(hdev->hw_tc_map & BIT(i)))
3702 			continue;
3703 
3704 		tc_valid[i] = 1;
3705 		tc_size[i] = roundup_size;
3706 		tc_offset[i] = rss_size * i;
3707 	}
3708 
3709 	return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
3710 }
3711 
3712 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
3713 {
3714 	struct hclge_vport *vport = hdev->vport;
3715 	int i, j;
3716 
3717 	for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
3718 		for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3719 			vport[j].rss_indirection_tbl[i] =
3720 				i % vport[j].alloc_rss_size;
3721 	}
3722 }
3723 
3724 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
3725 {
3726 	int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3727 	struct hclge_vport *vport = hdev->vport;
3728 
3729 	if (hdev->pdev->revision >= 0x21)
3730 		rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3731 
3732 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3733 		vport[i].rss_tuple_sets.ipv4_tcp_en =
3734 			HCLGE_RSS_INPUT_TUPLE_OTHER;
3735 		vport[i].rss_tuple_sets.ipv4_udp_en =
3736 			HCLGE_RSS_INPUT_TUPLE_OTHER;
3737 		vport[i].rss_tuple_sets.ipv4_sctp_en =
3738 			HCLGE_RSS_INPUT_TUPLE_SCTP;
3739 		vport[i].rss_tuple_sets.ipv4_fragment_en =
3740 			HCLGE_RSS_INPUT_TUPLE_OTHER;
3741 		vport[i].rss_tuple_sets.ipv6_tcp_en =
3742 			HCLGE_RSS_INPUT_TUPLE_OTHER;
3743 		vport[i].rss_tuple_sets.ipv6_udp_en =
3744 			HCLGE_RSS_INPUT_TUPLE_OTHER;
3745 		vport[i].rss_tuple_sets.ipv6_sctp_en =
3746 			HCLGE_RSS_INPUT_TUPLE_SCTP;
3747 		vport[i].rss_tuple_sets.ipv6_fragment_en =
3748 			HCLGE_RSS_INPUT_TUPLE_OTHER;
3749 
3750 		vport[i].rss_algo = rss_algo;
3751 
3752 		memcpy(vport[i].rss_hash_key, hclge_hash_key,
3753 		       HCLGE_RSS_KEY_SIZE);
3754 	}
3755 
3756 	hclge_rss_indir_init_cfg(hdev);
3757 }
3758 
3759 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
3760 				int vector_id, bool en,
3761 				struct hnae3_ring_chain_node *ring_chain)
3762 {
3763 	struct hclge_dev *hdev = vport->back;
3764 	struct hnae3_ring_chain_node *node;
3765 	struct hclge_desc desc;
3766 	struct hclge_ctrl_vector_chain_cmd *req
3767 		= (struct hclge_ctrl_vector_chain_cmd *)desc.data;
3768 	enum hclge_cmd_status status;
3769 	enum hclge_opcode_type op;
3770 	u16 tqp_type_and_id;
3771 	int i;
3772 
3773 	op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
3774 	hclge_cmd_setup_basic_desc(&desc, op, false);
3775 	req->int_vector_id = vector_id;
3776 
3777 	i = 0;
3778 	for (node = ring_chain; node; node = node->next) {
3779 		tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
3780 		hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
3781 				HCLGE_INT_TYPE_S,
3782 				hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
3783 		hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
3784 				HCLGE_TQP_ID_S, node->tqp_index);
3785 		hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
3786 				HCLGE_INT_GL_IDX_S,
3787 				hnae3_get_field(node->int_gl_idx,
3788 						HNAE3_RING_GL_IDX_M,
3789 						HNAE3_RING_GL_IDX_S));
3790 		req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
3791 		if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
3792 			req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
3793 			req->vfid = vport->vport_id;
3794 
3795 			status = hclge_cmd_send(&hdev->hw, &desc, 1);
3796 			if (status) {
3797 				dev_err(&hdev->pdev->dev,
3798 					"Map TQP fail, status is %d.\n",
3799 					status);
3800 				return -EIO;
3801 			}
3802 			i = 0;
3803 
3804 			hclge_cmd_setup_basic_desc(&desc,
3805 						   op,
3806 						   false);
3807 			req->int_vector_id = vector_id;
3808 		}
3809 	}
3810 
3811 	if (i > 0) {
3812 		req->int_cause_num = i;
3813 		req->vfid = vport->vport_id;
3814 		status = hclge_cmd_send(&hdev->hw, &desc, 1);
3815 		if (status) {
3816 			dev_err(&hdev->pdev->dev,
3817 				"Map TQP fail, status is %d.\n", status);
3818 			return -EIO;
3819 		}
3820 	}
3821 
3822 	return 0;
3823 }
3824 
3825 static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
3826 				    int vector,
3827 				    struct hnae3_ring_chain_node *ring_chain)
3828 {
3829 	struct hclge_vport *vport = hclge_get_vport(handle);
3830 	struct hclge_dev *hdev = vport->back;
3831 	int vector_id;
3832 
3833 	vector_id = hclge_get_vector_index(hdev, vector);
3834 	if (vector_id < 0) {
3835 		dev_err(&hdev->pdev->dev,
3836 			"Get vector index fail. vector_id =%d\n", vector_id);
3837 		return vector_id;
3838 	}
3839 
3840 	return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
3841 }
3842 
3843 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
3844 				       int vector,
3845 				       struct hnae3_ring_chain_node *ring_chain)
3846 {
3847 	struct hclge_vport *vport = hclge_get_vport(handle);
3848 	struct hclge_dev *hdev = vport->back;
3849 	int vector_id, ret;
3850 
3851 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3852 		return 0;
3853 
3854 	vector_id = hclge_get_vector_index(hdev, vector);
3855 	if (vector_id < 0) {
3856 		dev_err(&handle->pdev->dev,
3857 			"Get vector index fail. ret =%d\n", vector_id);
3858 		return vector_id;
3859 	}
3860 
3861 	ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
3862 	if (ret)
3863 		dev_err(&handle->pdev->dev,
3864 			"Unmap ring from vector fail. vectorid=%d, ret =%d\n",
3865 			vector_id,
3866 			ret);
3867 
3868 	return ret;
3869 }
3870 
3871 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
3872 			       struct hclge_promisc_param *param)
3873 {
3874 	struct hclge_promisc_cfg_cmd *req;
3875 	struct hclge_desc desc;
3876 	int ret;
3877 
3878 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
3879 
3880 	req = (struct hclge_promisc_cfg_cmd *)desc.data;
3881 	req->vf_id = param->vf_id;
3882 
3883 	/* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
3884 	 * pdev revision(0x20), new revision support them. The
3885 	 * value of this two fields will not return error when driver
3886 	 * send command to fireware in revision(0x20).
3887 	 */
3888 	req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
3889 		HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
3890 
3891 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3892 	if (ret)
3893 		dev_err(&hdev->pdev->dev,
3894 			"Set promisc mode fail, status is %d.\n", ret);
3895 
3896 	return ret;
3897 }
3898 
3899 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
3900 			      bool en_mc, bool en_bc, int vport_id)
3901 {
3902 	if (!param)
3903 		return;
3904 
3905 	memset(param, 0, sizeof(struct hclge_promisc_param));
3906 	if (en_uc)
3907 		param->enable = HCLGE_PROMISC_EN_UC;
3908 	if (en_mc)
3909 		param->enable |= HCLGE_PROMISC_EN_MC;
3910 	if (en_bc)
3911 		param->enable |= HCLGE_PROMISC_EN_BC;
3912 	param->vf_id = vport_id;
3913 }
3914 
3915 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
3916 				  bool en_mc_pmc)
3917 {
3918 	struct hclge_vport *vport = hclge_get_vport(handle);
3919 	struct hclge_dev *hdev = vport->back;
3920 	struct hclge_promisc_param param;
3921 	bool en_bc_pmc = true;
3922 
3923 	/* For revision 0x20, if broadcast promisc enabled, vlan filter is
3924 	 * always bypassed. So broadcast promisc should be disabled until
3925 	 * user enable promisc mode
3926 	 */
3927 	if (handle->pdev->revision == 0x20)
3928 		en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
3929 
3930 	hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
3931 				 vport->vport_id);
3932 	return hclge_cmd_set_promisc_mode(hdev, &param);
3933 }
3934 
3935 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
3936 {
3937 	struct hclge_get_fd_mode_cmd *req;
3938 	struct hclge_desc desc;
3939 	int ret;
3940 
3941 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
3942 
3943 	req = (struct hclge_get_fd_mode_cmd *)desc.data;
3944 
3945 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3946 	if (ret) {
3947 		dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
3948 		return ret;
3949 	}
3950 
3951 	*fd_mode = req->mode;
3952 
3953 	return ret;
3954 }
3955 
3956 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
3957 				   u32 *stage1_entry_num,
3958 				   u32 *stage2_entry_num,
3959 				   u16 *stage1_counter_num,
3960 				   u16 *stage2_counter_num)
3961 {
3962 	struct hclge_get_fd_allocation_cmd *req;
3963 	struct hclge_desc desc;
3964 	int ret;
3965 
3966 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
3967 
3968 	req = (struct hclge_get_fd_allocation_cmd *)desc.data;
3969 
3970 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3971 	if (ret) {
3972 		dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
3973 			ret);
3974 		return ret;
3975 	}
3976 
3977 	*stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
3978 	*stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
3979 	*stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
3980 	*stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
3981 
3982 	return ret;
3983 }
3984 
3985 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
3986 {
3987 	struct hclge_set_fd_key_config_cmd *req;
3988 	struct hclge_fd_key_cfg *stage;
3989 	struct hclge_desc desc;
3990 	int ret;
3991 
3992 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
3993 
3994 	req = (struct hclge_set_fd_key_config_cmd *)desc.data;
3995 	stage = &hdev->fd_cfg.key_cfg[stage_num];
3996 	req->stage = stage_num;
3997 	req->key_select = stage->key_sel;
3998 	req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
3999 	req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4000 	req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4001 	req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4002 	req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4003 	req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4004 
4005 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4006 	if (ret)
4007 		dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4008 
4009 	return ret;
4010 }
4011 
4012 static int hclge_init_fd_config(struct hclge_dev *hdev)
4013 {
4014 #define LOW_2_WORDS		0x03
4015 	struct hclge_fd_key_cfg *key_cfg;
4016 	int ret;
4017 
4018 	if (!hnae3_dev_fd_supported(hdev))
4019 		return 0;
4020 
4021 	ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4022 	if (ret)
4023 		return ret;
4024 
4025 	switch (hdev->fd_cfg.fd_mode) {
4026 	case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4027 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4028 		break;
4029 	case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4030 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4031 		break;
4032 	default:
4033 		dev_err(&hdev->pdev->dev,
4034 			"Unsupported flow director mode %d\n",
4035 			hdev->fd_cfg.fd_mode);
4036 		return -EOPNOTSUPP;
4037 	}
4038 
4039 	hdev->fd_cfg.proto_support =
4040 		TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4041 		UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4042 	key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4043 	key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4044 	key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4045 	key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4046 	key_cfg->outer_sipv6_word_en = 0;
4047 	key_cfg->outer_dipv6_word_en = 0;
4048 
4049 	key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4050 				BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4051 				BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4052 				BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4053 
4054 	/* If use max 400bit key, we can support tuples for ether type */
4055 	if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4056 		hdev->fd_cfg.proto_support |= ETHER_FLOW;
4057 		key_cfg->tuple_active |=
4058 				BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4059 	}
4060 
4061 	/* roce_type is used to filter roce frames
4062 	 * dst_vport is used to specify the rule
4063 	 */
4064 	key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4065 
4066 	ret = hclge_get_fd_allocation(hdev,
4067 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4068 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4069 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4070 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4071 	if (ret)
4072 		return ret;
4073 
4074 	return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4075 }
4076 
4077 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4078 				int loc, u8 *key, bool is_add)
4079 {
4080 	struct hclge_fd_tcam_config_1_cmd *req1;
4081 	struct hclge_fd_tcam_config_2_cmd *req2;
4082 	struct hclge_fd_tcam_config_3_cmd *req3;
4083 	struct hclge_desc desc[3];
4084 	int ret;
4085 
4086 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4087 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4088 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4089 	desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4090 	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4091 
4092 	req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4093 	req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4094 	req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4095 
4096 	req1->stage = stage;
4097 	req1->xy_sel = sel_x ? 1 : 0;
4098 	hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4099 	req1->index = cpu_to_le32(loc);
4100 	req1->entry_vld = sel_x ? is_add : 0;
4101 
4102 	if (key) {
4103 		memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4104 		memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4105 		       sizeof(req2->tcam_data));
4106 		memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4107 		       sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4108 	}
4109 
4110 	ret = hclge_cmd_send(&hdev->hw, desc, 3);
4111 	if (ret)
4112 		dev_err(&hdev->pdev->dev,
4113 			"config tcam key fail, ret=%d\n",
4114 			ret);
4115 
4116 	return ret;
4117 }
4118 
4119 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4120 			      struct hclge_fd_ad_data *action)
4121 {
4122 	struct hclge_fd_ad_config_cmd *req;
4123 	struct hclge_desc desc;
4124 	u64 ad_data = 0;
4125 	int ret;
4126 
4127 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4128 
4129 	req = (struct hclge_fd_ad_config_cmd *)desc.data;
4130 	req->index = cpu_to_le32(loc);
4131 	req->stage = stage;
4132 
4133 	hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4134 		      action->write_rule_id_to_bd);
4135 	hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4136 			action->rule_id);
4137 	ad_data <<= 32;
4138 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4139 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4140 		      action->forward_to_direct_queue);
4141 	hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4142 			action->queue_id);
4143 	hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4144 	hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4145 			HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4146 	hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4147 	hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4148 			action->counter_id);
4149 
4150 	req->ad_data = cpu_to_le64(ad_data);
4151 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4152 	if (ret)
4153 		dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4154 
4155 	return ret;
4156 }
4157 
4158 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4159 				   struct hclge_fd_rule *rule)
4160 {
4161 	u16 tmp_x_s, tmp_y_s;
4162 	u32 tmp_x_l, tmp_y_l;
4163 	int i;
4164 
4165 	if (rule->unused_tuple & tuple_bit)
4166 		return true;
4167 
4168 	switch (tuple_bit) {
4169 	case 0:
4170 		return false;
4171 	case BIT(INNER_DST_MAC):
4172 		for (i = 0; i < 6; i++) {
4173 			calc_x(key_x[5 - i], rule->tuples.dst_mac[i],
4174 			       rule->tuples_mask.dst_mac[i]);
4175 			calc_y(key_y[5 - i], rule->tuples.dst_mac[i],
4176 			       rule->tuples_mask.dst_mac[i]);
4177 		}
4178 
4179 		return true;
4180 	case BIT(INNER_SRC_MAC):
4181 		for (i = 0; i < 6; i++) {
4182 			calc_x(key_x[5 - i], rule->tuples.src_mac[i],
4183 			       rule->tuples.src_mac[i]);
4184 			calc_y(key_y[5 - i], rule->tuples.src_mac[i],
4185 			       rule->tuples.src_mac[i]);
4186 		}
4187 
4188 		return true;
4189 	case BIT(INNER_VLAN_TAG_FST):
4190 		calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4191 		       rule->tuples_mask.vlan_tag1);
4192 		calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4193 		       rule->tuples_mask.vlan_tag1);
4194 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4195 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4196 
4197 		return true;
4198 	case BIT(INNER_ETH_TYPE):
4199 		calc_x(tmp_x_s, rule->tuples.ether_proto,
4200 		       rule->tuples_mask.ether_proto);
4201 		calc_y(tmp_y_s, rule->tuples.ether_proto,
4202 		       rule->tuples_mask.ether_proto);
4203 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4204 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4205 
4206 		return true;
4207 	case BIT(INNER_IP_TOS):
4208 		calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4209 		calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4210 
4211 		return true;
4212 	case BIT(INNER_IP_PROTO):
4213 		calc_x(*key_x, rule->tuples.ip_proto,
4214 		       rule->tuples_mask.ip_proto);
4215 		calc_y(*key_y, rule->tuples.ip_proto,
4216 		       rule->tuples_mask.ip_proto);
4217 
4218 		return true;
4219 	case BIT(INNER_SRC_IP):
4220 		calc_x(tmp_x_l, rule->tuples.src_ip[3],
4221 		       rule->tuples_mask.src_ip[3]);
4222 		calc_y(tmp_y_l, rule->tuples.src_ip[3],
4223 		       rule->tuples_mask.src_ip[3]);
4224 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4225 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4226 
4227 		return true;
4228 	case BIT(INNER_DST_IP):
4229 		calc_x(tmp_x_l, rule->tuples.dst_ip[3],
4230 		       rule->tuples_mask.dst_ip[3]);
4231 		calc_y(tmp_y_l, rule->tuples.dst_ip[3],
4232 		       rule->tuples_mask.dst_ip[3]);
4233 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4234 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4235 
4236 		return true;
4237 	case BIT(INNER_SRC_PORT):
4238 		calc_x(tmp_x_s, rule->tuples.src_port,
4239 		       rule->tuples_mask.src_port);
4240 		calc_y(tmp_y_s, rule->tuples.src_port,
4241 		       rule->tuples_mask.src_port);
4242 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4243 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4244 
4245 		return true;
4246 	case BIT(INNER_DST_PORT):
4247 		calc_x(tmp_x_s, rule->tuples.dst_port,
4248 		       rule->tuples_mask.dst_port);
4249 		calc_y(tmp_y_s, rule->tuples.dst_port,
4250 		       rule->tuples_mask.dst_port);
4251 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4252 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4253 
4254 		return true;
4255 	default:
4256 		return false;
4257 	}
4258 }
4259 
4260 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4261 				 u8 vf_id, u8 network_port_id)
4262 {
4263 	u32 port_number = 0;
4264 
4265 	if (port_type == HOST_PORT) {
4266 		hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4267 				pf_id);
4268 		hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4269 				vf_id);
4270 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4271 	} else {
4272 		hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4273 				HCLGE_NETWORK_PORT_ID_S, network_port_id);
4274 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4275 	}
4276 
4277 	return port_number;
4278 }
4279 
4280 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4281 				       __le32 *key_x, __le32 *key_y,
4282 				       struct hclge_fd_rule *rule)
4283 {
4284 	u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4285 	u8 cur_pos = 0, tuple_size, shift_bits;
4286 	int i;
4287 
4288 	for (i = 0; i < MAX_META_DATA; i++) {
4289 		tuple_size = meta_data_key_info[i].key_length;
4290 		tuple_bit = key_cfg->meta_data_active & BIT(i);
4291 
4292 		switch (tuple_bit) {
4293 		case BIT(ROCE_TYPE):
4294 			hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4295 			cur_pos += tuple_size;
4296 			break;
4297 		case BIT(DST_VPORT):
4298 			port_number = hclge_get_port_number(HOST_PORT, 0,
4299 							    rule->vf_id, 0);
4300 			hnae3_set_field(meta_data,
4301 					GENMASK(cur_pos + tuple_size, cur_pos),
4302 					cur_pos, port_number);
4303 			cur_pos += tuple_size;
4304 			break;
4305 		default:
4306 			break;
4307 		}
4308 	}
4309 
4310 	calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4311 	calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4312 	shift_bits = sizeof(meta_data) * 8 - cur_pos;
4313 
4314 	*key_x = cpu_to_le32(tmp_x << shift_bits);
4315 	*key_y = cpu_to_le32(tmp_y << shift_bits);
4316 }
4317 
4318 /* A complete key is combined with meta data key and tuple key.
4319  * Meta data key is stored at the MSB region, and tuple key is stored at
4320  * the LSB region, unused bits will be filled 0.
4321  */
4322 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
4323 			    struct hclge_fd_rule *rule)
4324 {
4325 	struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
4326 	u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
4327 	u8 *cur_key_x, *cur_key_y;
4328 	int i, ret, tuple_size;
4329 	u8 meta_data_region;
4330 
4331 	memset(key_x, 0, sizeof(key_x));
4332 	memset(key_y, 0, sizeof(key_y));
4333 	cur_key_x = key_x;
4334 	cur_key_y = key_y;
4335 
4336 	for (i = 0 ; i < MAX_TUPLE; i++) {
4337 		bool tuple_valid;
4338 		u32 check_tuple;
4339 
4340 		tuple_size = tuple_key_info[i].key_length / 8;
4341 		check_tuple = key_cfg->tuple_active & BIT(i);
4342 
4343 		tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
4344 						     cur_key_y, rule);
4345 		if (tuple_valid) {
4346 			cur_key_x += tuple_size;
4347 			cur_key_y += tuple_size;
4348 		}
4349 	}
4350 
4351 	meta_data_region = hdev->fd_cfg.max_key_length / 8 -
4352 			MAX_META_DATA_LENGTH / 8;
4353 
4354 	hclge_fd_convert_meta_data(key_cfg,
4355 				   (__le32 *)(key_x + meta_data_region),
4356 				   (__le32 *)(key_y + meta_data_region),
4357 				   rule);
4358 
4359 	ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
4360 				   true);
4361 	if (ret) {
4362 		dev_err(&hdev->pdev->dev,
4363 			"fd key_y config fail, loc=%d, ret=%d\n",
4364 			rule->queue_id, ret);
4365 		return ret;
4366 	}
4367 
4368 	ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
4369 				   true);
4370 	if (ret)
4371 		dev_err(&hdev->pdev->dev,
4372 			"fd key_x config fail, loc=%d, ret=%d\n",
4373 			rule->queue_id, ret);
4374 	return ret;
4375 }
4376 
4377 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
4378 			       struct hclge_fd_rule *rule)
4379 {
4380 	struct hclge_fd_ad_data ad_data;
4381 
4382 	ad_data.ad_id = rule->location;
4383 
4384 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4385 		ad_data.drop_packet = true;
4386 		ad_data.forward_to_direct_queue = false;
4387 		ad_data.queue_id = 0;
4388 	} else {
4389 		ad_data.drop_packet = false;
4390 		ad_data.forward_to_direct_queue = true;
4391 		ad_data.queue_id = rule->queue_id;
4392 	}
4393 
4394 	ad_data.use_counter = false;
4395 	ad_data.counter_id = 0;
4396 
4397 	ad_data.use_next_stage = false;
4398 	ad_data.next_input_key = 0;
4399 
4400 	ad_data.write_rule_id_to_bd = true;
4401 	ad_data.rule_id = rule->location;
4402 
4403 	return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
4404 }
4405 
4406 static int hclge_fd_check_spec(struct hclge_dev *hdev,
4407 			       struct ethtool_rx_flow_spec *fs, u32 *unused)
4408 {
4409 	struct ethtool_tcpip4_spec *tcp_ip4_spec;
4410 	struct ethtool_usrip4_spec *usr_ip4_spec;
4411 	struct ethtool_tcpip6_spec *tcp_ip6_spec;
4412 	struct ethtool_usrip6_spec *usr_ip6_spec;
4413 	struct ethhdr *ether_spec;
4414 
4415 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4416 		return -EINVAL;
4417 
4418 	if (!(fs->flow_type & hdev->fd_cfg.proto_support))
4419 		return -EOPNOTSUPP;
4420 
4421 	if ((fs->flow_type & FLOW_EXT) &&
4422 	    (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
4423 		dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
4424 		return -EOPNOTSUPP;
4425 	}
4426 
4427 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4428 	case SCTP_V4_FLOW:
4429 	case TCP_V4_FLOW:
4430 	case UDP_V4_FLOW:
4431 		tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
4432 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
4433 
4434 		if (!tcp_ip4_spec->ip4src)
4435 			*unused |= BIT(INNER_SRC_IP);
4436 
4437 		if (!tcp_ip4_spec->ip4dst)
4438 			*unused |= BIT(INNER_DST_IP);
4439 
4440 		if (!tcp_ip4_spec->psrc)
4441 			*unused |= BIT(INNER_SRC_PORT);
4442 
4443 		if (!tcp_ip4_spec->pdst)
4444 			*unused |= BIT(INNER_DST_PORT);
4445 
4446 		if (!tcp_ip4_spec->tos)
4447 			*unused |= BIT(INNER_IP_TOS);
4448 
4449 		break;
4450 	case IP_USER_FLOW:
4451 		usr_ip4_spec = &fs->h_u.usr_ip4_spec;
4452 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4453 			BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4454 
4455 		if (!usr_ip4_spec->ip4src)
4456 			*unused |= BIT(INNER_SRC_IP);
4457 
4458 		if (!usr_ip4_spec->ip4dst)
4459 			*unused |= BIT(INNER_DST_IP);
4460 
4461 		if (!usr_ip4_spec->tos)
4462 			*unused |= BIT(INNER_IP_TOS);
4463 
4464 		if (!usr_ip4_spec->proto)
4465 			*unused |= BIT(INNER_IP_PROTO);
4466 
4467 		if (usr_ip4_spec->l4_4_bytes)
4468 			return -EOPNOTSUPP;
4469 
4470 		if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
4471 			return -EOPNOTSUPP;
4472 
4473 		break;
4474 	case SCTP_V6_FLOW:
4475 	case TCP_V6_FLOW:
4476 	case UDP_V6_FLOW:
4477 		tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
4478 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4479 			BIT(INNER_IP_TOS);
4480 
4481 		if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
4482 		    !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
4483 			*unused |= BIT(INNER_SRC_IP);
4484 
4485 		if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
4486 		    !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
4487 			*unused |= BIT(INNER_DST_IP);
4488 
4489 		if (!tcp_ip6_spec->psrc)
4490 			*unused |= BIT(INNER_SRC_PORT);
4491 
4492 		if (!tcp_ip6_spec->pdst)
4493 			*unused |= BIT(INNER_DST_PORT);
4494 
4495 		if (tcp_ip6_spec->tclass)
4496 			return -EOPNOTSUPP;
4497 
4498 		break;
4499 	case IPV6_USER_FLOW:
4500 		usr_ip6_spec = &fs->h_u.usr_ip6_spec;
4501 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4502 			BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
4503 			BIT(INNER_DST_PORT);
4504 
4505 		if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
4506 		    !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
4507 			*unused |= BIT(INNER_SRC_IP);
4508 
4509 		if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
4510 		    !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
4511 			*unused |= BIT(INNER_DST_IP);
4512 
4513 		if (!usr_ip6_spec->l4_proto)
4514 			*unused |= BIT(INNER_IP_PROTO);
4515 
4516 		if (usr_ip6_spec->tclass)
4517 			return -EOPNOTSUPP;
4518 
4519 		if (usr_ip6_spec->l4_4_bytes)
4520 			return -EOPNOTSUPP;
4521 
4522 		break;
4523 	case ETHER_FLOW:
4524 		ether_spec = &fs->h_u.ether_spec;
4525 		*unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4526 			BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
4527 			BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
4528 
4529 		if (is_zero_ether_addr(ether_spec->h_source))
4530 			*unused |= BIT(INNER_SRC_MAC);
4531 
4532 		if (is_zero_ether_addr(ether_spec->h_dest))
4533 			*unused |= BIT(INNER_DST_MAC);
4534 
4535 		if (!ether_spec->h_proto)
4536 			*unused |= BIT(INNER_ETH_TYPE);
4537 
4538 		break;
4539 	default:
4540 		return -EOPNOTSUPP;
4541 	}
4542 
4543 	if ((fs->flow_type & FLOW_EXT)) {
4544 		if (fs->h_ext.vlan_etype)
4545 			return -EOPNOTSUPP;
4546 		if (!fs->h_ext.vlan_tci)
4547 			*unused |= BIT(INNER_VLAN_TAG_FST);
4548 
4549 		if (fs->m_ext.vlan_tci) {
4550 			if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
4551 				return -EINVAL;
4552 		}
4553 	} else {
4554 		*unused |= BIT(INNER_VLAN_TAG_FST);
4555 	}
4556 
4557 	if (fs->flow_type & FLOW_MAC_EXT) {
4558 		if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
4559 			return -EOPNOTSUPP;
4560 
4561 		if (is_zero_ether_addr(fs->h_ext.h_dest))
4562 			*unused |= BIT(INNER_DST_MAC);
4563 		else
4564 			*unused &= ~(BIT(INNER_DST_MAC));
4565 	}
4566 
4567 	return 0;
4568 }
4569 
4570 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
4571 {
4572 	struct hclge_fd_rule *rule = NULL;
4573 	struct hlist_node *node2;
4574 
4575 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4576 		if (rule->location >= location)
4577 			break;
4578 	}
4579 
4580 	return  rule && rule->location == location;
4581 }
4582 
4583 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
4584 				     struct hclge_fd_rule *new_rule,
4585 				     u16 location,
4586 				     bool is_add)
4587 {
4588 	struct hclge_fd_rule *rule = NULL, *parent = NULL;
4589 	struct hlist_node *node2;
4590 
4591 	if (is_add && !new_rule)
4592 		return -EINVAL;
4593 
4594 	hlist_for_each_entry_safe(rule, node2,
4595 				  &hdev->fd_rule_list, rule_node) {
4596 		if (rule->location >= location)
4597 			break;
4598 		parent = rule;
4599 	}
4600 
4601 	if (rule && rule->location == location) {
4602 		hlist_del(&rule->rule_node);
4603 		kfree(rule);
4604 		hdev->hclge_fd_rule_num--;
4605 
4606 		if (!is_add)
4607 			return 0;
4608 
4609 	} else if (!is_add) {
4610 		dev_err(&hdev->pdev->dev,
4611 			"delete fail, rule %d is inexistent\n",
4612 			location);
4613 		return -EINVAL;
4614 	}
4615 
4616 	INIT_HLIST_NODE(&new_rule->rule_node);
4617 
4618 	if (parent)
4619 		hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
4620 	else
4621 		hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
4622 
4623 	hdev->hclge_fd_rule_num++;
4624 
4625 	return 0;
4626 }
4627 
4628 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
4629 			      struct ethtool_rx_flow_spec *fs,
4630 			      struct hclge_fd_rule *rule)
4631 {
4632 	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
4633 
4634 	switch (flow_type) {
4635 	case SCTP_V4_FLOW:
4636 	case TCP_V4_FLOW:
4637 	case UDP_V4_FLOW:
4638 		rule->tuples.src_ip[3] =
4639 				be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
4640 		rule->tuples_mask.src_ip[3] =
4641 				be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
4642 
4643 		rule->tuples.dst_ip[3] =
4644 				be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
4645 		rule->tuples_mask.dst_ip[3] =
4646 				be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
4647 
4648 		rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
4649 		rule->tuples_mask.src_port =
4650 				be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
4651 
4652 		rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
4653 		rule->tuples_mask.dst_port =
4654 				be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
4655 
4656 		rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
4657 		rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
4658 
4659 		rule->tuples.ether_proto = ETH_P_IP;
4660 		rule->tuples_mask.ether_proto = 0xFFFF;
4661 
4662 		break;
4663 	case IP_USER_FLOW:
4664 		rule->tuples.src_ip[3] =
4665 				be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
4666 		rule->tuples_mask.src_ip[3] =
4667 				be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
4668 
4669 		rule->tuples.dst_ip[3] =
4670 				be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
4671 		rule->tuples_mask.dst_ip[3] =
4672 				be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
4673 
4674 		rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
4675 		rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
4676 
4677 		rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
4678 		rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
4679 
4680 		rule->tuples.ether_proto = ETH_P_IP;
4681 		rule->tuples_mask.ether_proto = 0xFFFF;
4682 
4683 		break;
4684 	case SCTP_V6_FLOW:
4685 	case TCP_V6_FLOW:
4686 	case UDP_V6_FLOW:
4687 		be32_to_cpu_array(rule->tuples.src_ip,
4688 				  fs->h_u.tcp_ip6_spec.ip6src, 4);
4689 		be32_to_cpu_array(rule->tuples_mask.src_ip,
4690 				  fs->m_u.tcp_ip6_spec.ip6src, 4);
4691 
4692 		be32_to_cpu_array(rule->tuples.dst_ip,
4693 				  fs->h_u.tcp_ip6_spec.ip6dst, 4);
4694 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
4695 				  fs->m_u.tcp_ip6_spec.ip6dst, 4);
4696 
4697 		rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
4698 		rule->tuples_mask.src_port =
4699 				be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
4700 
4701 		rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
4702 		rule->tuples_mask.dst_port =
4703 				be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
4704 
4705 		rule->tuples.ether_proto = ETH_P_IPV6;
4706 		rule->tuples_mask.ether_proto = 0xFFFF;
4707 
4708 		break;
4709 	case IPV6_USER_FLOW:
4710 		be32_to_cpu_array(rule->tuples.src_ip,
4711 				  fs->h_u.usr_ip6_spec.ip6src, 4);
4712 		be32_to_cpu_array(rule->tuples_mask.src_ip,
4713 				  fs->m_u.usr_ip6_spec.ip6src, 4);
4714 
4715 		be32_to_cpu_array(rule->tuples.dst_ip,
4716 				  fs->h_u.usr_ip6_spec.ip6dst, 4);
4717 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
4718 				  fs->m_u.usr_ip6_spec.ip6dst, 4);
4719 
4720 		rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
4721 		rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
4722 
4723 		rule->tuples.ether_proto = ETH_P_IPV6;
4724 		rule->tuples_mask.ether_proto = 0xFFFF;
4725 
4726 		break;
4727 	case ETHER_FLOW:
4728 		ether_addr_copy(rule->tuples.src_mac,
4729 				fs->h_u.ether_spec.h_source);
4730 		ether_addr_copy(rule->tuples_mask.src_mac,
4731 				fs->m_u.ether_spec.h_source);
4732 
4733 		ether_addr_copy(rule->tuples.dst_mac,
4734 				fs->h_u.ether_spec.h_dest);
4735 		ether_addr_copy(rule->tuples_mask.dst_mac,
4736 				fs->m_u.ether_spec.h_dest);
4737 
4738 		rule->tuples.ether_proto =
4739 				be16_to_cpu(fs->h_u.ether_spec.h_proto);
4740 		rule->tuples_mask.ether_proto =
4741 				be16_to_cpu(fs->m_u.ether_spec.h_proto);
4742 
4743 		break;
4744 	default:
4745 		return -EOPNOTSUPP;
4746 	}
4747 
4748 	switch (flow_type) {
4749 	case SCTP_V4_FLOW:
4750 	case SCTP_V6_FLOW:
4751 		rule->tuples.ip_proto = IPPROTO_SCTP;
4752 		rule->tuples_mask.ip_proto = 0xFF;
4753 		break;
4754 	case TCP_V4_FLOW:
4755 	case TCP_V6_FLOW:
4756 		rule->tuples.ip_proto = IPPROTO_TCP;
4757 		rule->tuples_mask.ip_proto = 0xFF;
4758 		break;
4759 	case UDP_V4_FLOW:
4760 	case UDP_V6_FLOW:
4761 		rule->tuples.ip_proto = IPPROTO_UDP;
4762 		rule->tuples_mask.ip_proto = 0xFF;
4763 		break;
4764 	default:
4765 		break;
4766 	}
4767 
4768 	if ((fs->flow_type & FLOW_EXT)) {
4769 		rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
4770 		rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
4771 	}
4772 
4773 	if (fs->flow_type & FLOW_MAC_EXT) {
4774 		ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
4775 		ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
4776 	}
4777 
4778 	return 0;
4779 }
4780 
4781 static int hclge_add_fd_entry(struct hnae3_handle *handle,
4782 			      struct ethtool_rxnfc *cmd)
4783 {
4784 	struct hclge_vport *vport = hclge_get_vport(handle);
4785 	struct hclge_dev *hdev = vport->back;
4786 	u16 dst_vport_id = 0, q_index = 0;
4787 	struct ethtool_rx_flow_spec *fs;
4788 	struct hclge_fd_rule *rule;
4789 	u32 unused = 0;
4790 	u8 action;
4791 	int ret;
4792 
4793 	if (!hnae3_dev_fd_supported(hdev))
4794 		return -EOPNOTSUPP;
4795 
4796 	if (!hdev->fd_en) {
4797 		dev_warn(&hdev->pdev->dev,
4798 			 "Please enable flow director first\n");
4799 		return -EOPNOTSUPP;
4800 	}
4801 
4802 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4803 
4804 	ret = hclge_fd_check_spec(hdev, fs, &unused);
4805 	if (ret) {
4806 		dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
4807 		return ret;
4808 	}
4809 
4810 	if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
4811 		action = HCLGE_FD_ACTION_DROP_PACKET;
4812 	} else {
4813 		u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
4814 		u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
4815 		u16 tqps;
4816 
4817 		if (vf > hdev->num_req_vfs) {
4818 			dev_err(&hdev->pdev->dev,
4819 				"Error: vf id (%d) > max vf num (%d)\n",
4820 				vf, hdev->num_req_vfs);
4821 			return -EINVAL;
4822 		}
4823 
4824 		dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
4825 		tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
4826 
4827 		if (ring >= tqps) {
4828 			dev_err(&hdev->pdev->dev,
4829 				"Error: queue id (%d) > max tqp num (%d)\n",
4830 				ring, tqps - 1);
4831 			return -EINVAL;
4832 		}
4833 
4834 		action = HCLGE_FD_ACTION_ACCEPT_PACKET;
4835 		q_index = ring;
4836 	}
4837 
4838 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
4839 	if (!rule)
4840 		return -ENOMEM;
4841 
4842 	ret = hclge_fd_get_tuple(hdev, fs, rule);
4843 	if (ret)
4844 		goto free_rule;
4845 
4846 	rule->flow_type = fs->flow_type;
4847 
4848 	rule->location = fs->location;
4849 	rule->unused_tuple = unused;
4850 	rule->vf_id = dst_vport_id;
4851 	rule->queue_id = q_index;
4852 	rule->action = action;
4853 
4854 	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
4855 	if (ret)
4856 		goto free_rule;
4857 
4858 	ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
4859 	if (ret)
4860 		goto free_rule;
4861 
4862 	ret = hclge_fd_update_rule_list(hdev, rule, fs->location, true);
4863 	if (ret)
4864 		goto free_rule;
4865 
4866 	return ret;
4867 
4868 free_rule:
4869 	kfree(rule);
4870 	return ret;
4871 }
4872 
4873 static int hclge_del_fd_entry(struct hnae3_handle *handle,
4874 			      struct ethtool_rxnfc *cmd)
4875 {
4876 	struct hclge_vport *vport = hclge_get_vport(handle);
4877 	struct hclge_dev *hdev = vport->back;
4878 	struct ethtool_rx_flow_spec *fs;
4879 	int ret;
4880 
4881 	if (!hnae3_dev_fd_supported(hdev))
4882 		return -EOPNOTSUPP;
4883 
4884 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4885 
4886 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4887 		return -EINVAL;
4888 
4889 	if (!hclge_fd_rule_exist(hdev, fs->location)) {
4890 		dev_err(&hdev->pdev->dev,
4891 			"Delete fail, rule %d is inexistent\n",
4892 			fs->location);
4893 		return -ENOENT;
4894 	}
4895 
4896 	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4897 				   fs->location, NULL, false);
4898 	if (ret)
4899 		return ret;
4900 
4901 	return hclge_fd_update_rule_list(hdev, NULL, fs->location,
4902 					 false);
4903 }
4904 
4905 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
4906 				     bool clear_list)
4907 {
4908 	struct hclge_vport *vport = hclge_get_vport(handle);
4909 	struct hclge_dev *hdev = vport->back;
4910 	struct hclge_fd_rule *rule;
4911 	struct hlist_node *node;
4912 
4913 	if (!hnae3_dev_fd_supported(hdev))
4914 		return;
4915 
4916 	if (clear_list) {
4917 		hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
4918 					  rule_node) {
4919 			hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4920 					     rule->location, NULL, false);
4921 			hlist_del(&rule->rule_node);
4922 			kfree(rule);
4923 			hdev->hclge_fd_rule_num--;
4924 		}
4925 	} else {
4926 		hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
4927 					  rule_node)
4928 			hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4929 					     rule->location, NULL, false);
4930 	}
4931 }
4932 
4933 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
4934 {
4935 	struct hclge_vport *vport = hclge_get_vport(handle);
4936 	struct hclge_dev *hdev = vport->back;
4937 	struct hclge_fd_rule *rule;
4938 	struct hlist_node *node;
4939 	int ret;
4940 
4941 	/* Return ok here, because reset error handling will check this
4942 	 * return value. If error is returned here, the reset process will
4943 	 * fail.
4944 	 */
4945 	if (!hnae3_dev_fd_supported(hdev))
4946 		return 0;
4947 
4948 	/* if fd is disabled, should not restore it when reset */
4949 	if (!hdev->fd_en)
4950 		return 0;
4951 
4952 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
4953 		ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
4954 		if (!ret)
4955 			ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
4956 
4957 		if (ret) {
4958 			dev_warn(&hdev->pdev->dev,
4959 				 "Restore rule %d failed, remove it\n",
4960 				 rule->location);
4961 			hlist_del(&rule->rule_node);
4962 			kfree(rule);
4963 			hdev->hclge_fd_rule_num--;
4964 		}
4965 	}
4966 	return 0;
4967 }
4968 
4969 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
4970 				 struct ethtool_rxnfc *cmd)
4971 {
4972 	struct hclge_vport *vport = hclge_get_vport(handle);
4973 	struct hclge_dev *hdev = vport->back;
4974 
4975 	if (!hnae3_dev_fd_supported(hdev))
4976 		return -EOPNOTSUPP;
4977 
4978 	cmd->rule_cnt = hdev->hclge_fd_rule_num;
4979 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
4980 
4981 	return 0;
4982 }
4983 
4984 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
4985 				  struct ethtool_rxnfc *cmd)
4986 {
4987 	struct hclge_vport *vport = hclge_get_vport(handle);
4988 	struct hclge_fd_rule *rule = NULL;
4989 	struct hclge_dev *hdev = vport->back;
4990 	struct ethtool_rx_flow_spec *fs;
4991 	struct hlist_node *node2;
4992 
4993 	if (!hnae3_dev_fd_supported(hdev))
4994 		return -EOPNOTSUPP;
4995 
4996 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4997 
4998 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4999 		if (rule->location >= fs->location)
5000 			break;
5001 	}
5002 
5003 	if (!rule || fs->location != rule->location)
5004 		return -ENOENT;
5005 
5006 	fs->flow_type = rule->flow_type;
5007 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5008 	case SCTP_V4_FLOW:
5009 	case TCP_V4_FLOW:
5010 	case UDP_V4_FLOW:
5011 		fs->h_u.tcp_ip4_spec.ip4src =
5012 				cpu_to_be32(rule->tuples.src_ip[3]);
5013 		fs->m_u.tcp_ip4_spec.ip4src =
5014 				rule->unused_tuple & BIT(INNER_SRC_IP) ?
5015 				0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5016 
5017 		fs->h_u.tcp_ip4_spec.ip4dst =
5018 				cpu_to_be32(rule->tuples.dst_ip[3]);
5019 		fs->m_u.tcp_ip4_spec.ip4dst =
5020 				rule->unused_tuple & BIT(INNER_DST_IP) ?
5021 				0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5022 
5023 		fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5024 		fs->m_u.tcp_ip4_spec.psrc =
5025 				rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5026 				0 : cpu_to_be16(rule->tuples_mask.src_port);
5027 
5028 		fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5029 		fs->m_u.tcp_ip4_spec.pdst =
5030 				rule->unused_tuple & BIT(INNER_DST_PORT) ?
5031 				0 : cpu_to_be16(rule->tuples_mask.dst_port);
5032 
5033 		fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5034 		fs->m_u.tcp_ip4_spec.tos =
5035 				rule->unused_tuple & BIT(INNER_IP_TOS) ?
5036 				0 : rule->tuples_mask.ip_tos;
5037 
5038 		break;
5039 	case IP_USER_FLOW:
5040 		fs->h_u.usr_ip4_spec.ip4src =
5041 				cpu_to_be32(rule->tuples.src_ip[3]);
5042 		fs->m_u.tcp_ip4_spec.ip4src =
5043 				rule->unused_tuple & BIT(INNER_SRC_IP) ?
5044 				0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5045 
5046 		fs->h_u.usr_ip4_spec.ip4dst =
5047 				cpu_to_be32(rule->tuples.dst_ip[3]);
5048 		fs->m_u.usr_ip4_spec.ip4dst =
5049 				rule->unused_tuple & BIT(INNER_DST_IP) ?
5050 				0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5051 
5052 		fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5053 		fs->m_u.usr_ip4_spec.tos =
5054 				rule->unused_tuple & BIT(INNER_IP_TOS) ?
5055 				0 : rule->tuples_mask.ip_tos;
5056 
5057 		fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5058 		fs->m_u.usr_ip4_spec.proto =
5059 				rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5060 				0 : rule->tuples_mask.ip_proto;
5061 
5062 		fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5063 
5064 		break;
5065 	case SCTP_V6_FLOW:
5066 	case TCP_V6_FLOW:
5067 	case UDP_V6_FLOW:
5068 		cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5069 				  rule->tuples.src_ip, 4);
5070 		if (rule->unused_tuple & BIT(INNER_SRC_IP))
5071 			memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4);
5072 		else
5073 			cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5074 					  rule->tuples_mask.src_ip, 4);
5075 
5076 		cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5077 				  rule->tuples.dst_ip, 4);
5078 		if (rule->unused_tuple & BIT(INNER_DST_IP))
5079 			memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5080 		else
5081 			cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5082 					  rule->tuples_mask.dst_ip, 4);
5083 
5084 		fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5085 		fs->m_u.tcp_ip6_spec.psrc =
5086 				rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5087 				0 : cpu_to_be16(rule->tuples_mask.src_port);
5088 
5089 		fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5090 		fs->m_u.tcp_ip6_spec.pdst =
5091 				rule->unused_tuple & BIT(INNER_DST_PORT) ?
5092 				0 : cpu_to_be16(rule->tuples_mask.dst_port);
5093 
5094 		break;
5095 	case IPV6_USER_FLOW:
5096 		cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5097 				  rule->tuples.src_ip, 4);
5098 		if (rule->unused_tuple & BIT(INNER_SRC_IP))
5099 			memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4);
5100 		else
5101 			cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5102 					  rule->tuples_mask.src_ip, 4);
5103 
5104 		cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5105 				  rule->tuples.dst_ip, 4);
5106 		if (rule->unused_tuple & BIT(INNER_DST_IP))
5107 			memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5108 		else
5109 			cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5110 					  rule->tuples_mask.dst_ip, 4);
5111 
5112 		fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5113 		fs->m_u.usr_ip6_spec.l4_proto =
5114 				rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5115 				0 : rule->tuples_mask.ip_proto;
5116 
5117 		break;
5118 	case ETHER_FLOW:
5119 		ether_addr_copy(fs->h_u.ether_spec.h_source,
5120 				rule->tuples.src_mac);
5121 		if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5122 			eth_zero_addr(fs->m_u.ether_spec.h_source);
5123 		else
5124 			ether_addr_copy(fs->m_u.ether_spec.h_source,
5125 					rule->tuples_mask.src_mac);
5126 
5127 		ether_addr_copy(fs->h_u.ether_spec.h_dest,
5128 				rule->tuples.dst_mac);
5129 		if (rule->unused_tuple & BIT(INNER_DST_MAC))
5130 			eth_zero_addr(fs->m_u.ether_spec.h_dest);
5131 		else
5132 			ether_addr_copy(fs->m_u.ether_spec.h_dest,
5133 					rule->tuples_mask.dst_mac);
5134 
5135 		fs->h_u.ether_spec.h_proto =
5136 				cpu_to_be16(rule->tuples.ether_proto);
5137 		fs->m_u.ether_spec.h_proto =
5138 				rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
5139 				0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5140 
5141 		break;
5142 	default:
5143 		return -EOPNOTSUPP;
5144 	}
5145 
5146 	if (fs->flow_type & FLOW_EXT) {
5147 		fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5148 		fs->m_ext.vlan_tci =
5149 				rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5150 				cpu_to_be16(VLAN_VID_MASK) :
5151 				cpu_to_be16(rule->tuples_mask.vlan_tag1);
5152 	}
5153 
5154 	if (fs->flow_type & FLOW_MAC_EXT) {
5155 		ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5156 		if (rule->unused_tuple & BIT(INNER_DST_MAC))
5157 			eth_zero_addr(fs->m_u.ether_spec.h_dest);
5158 		else
5159 			ether_addr_copy(fs->m_u.ether_spec.h_dest,
5160 					rule->tuples_mask.dst_mac);
5161 	}
5162 
5163 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5164 		fs->ring_cookie = RX_CLS_FLOW_DISC;
5165 	} else {
5166 		u64 vf_id;
5167 
5168 		fs->ring_cookie = rule->queue_id;
5169 		vf_id = rule->vf_id;
5170 		vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5171 		fs->ring_cookie |= vf_id;
5172 	}
5173 
5174 	return 0;
5175 }
5176 
5177 static int hclge_get_all_rules(struct hnae3_handle *handle,
5178 			       struct ethtool_rxnfc *cmd, u32 *rule_locs)
5179 {
5180 	struct hclge_vport *vport = hclge_get_vport(handle);
5181 	struct hclge_dev *hdev = vport->back;
5182 	struct hclge_fd_rule *rule;
5183 	struct hlist_node *node2;
5184 	int cnt = 0;
5185 
5186 	if (!hnae3_dev_fd_supported(hdev))
5187 		return -EOPNOTSUPP;
5188 
5189 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5190 
5191 	hlist_for_each_entry_safe(rule, node2,
5192 				  &hdev->fd_rule_list, rule_node) {
5193 		if (cnt == cmd->rule_cnt)
5194 			return -EMSGSIZE;
5195 
5196 		rule_locs[cnt] = rule->location;
5197 		cnt++;
5198 	}
5199 
5200 	cmd->rule_cnt = cnt;
5201 
5202 	return 0;
5203 }
5204 
5205 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
5206 {
5207 	struct hclge_vport *vport = hclge_get_vport(handle);
5208 	struct hclge_dev *hdev = vport->back;
5209 
5210 	return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
5211 	       hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
5212 }
5213 
5214 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
5215 {
5216 	struct hclge_vport *vport = hclge_get_vport(handle);
5217 	struct hclge_dev *hdev = vport->back;
5218 
5219 	return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
5220 }
5221 
5222 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
5223 {
5224 	struct hclge_vport *vport = hclge_get_vport(handle);
5225 	struct hclge_dev *hdev = vport->back;
5226 
5227 	return hdev->reset_count;
5228 }
5229 
5230 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
5231 {
5232 	struct hclge_vport *vport = hclge_get_vport(handle);
5233 	struct hclge_dev *hdev = vport->back;
5234 
5235 	hdev->fd_en = enable;
5236 	if (!enable)
5237 		hclge_del_all_fd_entries(handle, false);
5238 	else
5239 		hclge_restore_fd_entries(handle);
5240 }
5241 
5242 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
5243 {
5244 	struct hclge_desc desc;
5245 	struct hclge_config_mac_mode_cmd *req =
5246 		(struct hclge_config_mac_mode_cmd *)desc.data;
5247 	u32 loop_en = 0;
5248 	int ret;
5249 
5250 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
5251 	hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
5252 	hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
5253 	hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
5254 	hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
5255 	hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
5256 	hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
5257 	hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
5258 	hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
5259 	hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
5260 	hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
5261 	hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
5262 	hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
5263 	hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
5264 	hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
5265 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5266 
5267 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5268 	if (ret)
5269 		dev_err(&hdev->pdev->dev,
5270 			"mac enable fail, ret =%d.\n", ret);
5271 }
5272 
5273 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
5274 {
5275 	struct hclge_config_mac_mode_cmd *req;
5276 	struct hclge_desc desc;
5277 	u32 loop_en;
5278 	int ret;
5279 
5280 	req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
5281 	/* 1 Read out the MAC mode config at first */
5282 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
5283 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5284 	if (ret) {
5285 		dev_err(&hdev->pdev->dev,
5286 			"mac loopback get fail, ret =%d.\n", ret);
5287 		return ret;
5288 	}
5289 
5290 	/* 2 Then setup the loopback flag */
5291 	loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
5292 	hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
5293 	hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
5294 	hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
5295 
5296 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5297 
5298 	/* 3 Config mac work mode with loopback flag
5299 	 * and its original configure parameters
5300 	 */
5301 	hclge_cmd_reuse_desc(&desc, false);
5302 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5303 	if (ret)
5304 		dev_err(&hdev->pdev->dev,
5305 			"mac loopback set fail, ret =%d.\n", ret);
5306 	return ret;
5307 }
5308 
5309 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
5310 				     enum hnae3_loop loop_mode)
5311 {
5312 #define HCLGE_SERDES_RETRY_MS	10
5313 #define HCLGE_SERDES_RETRY_NUM	100
5314 
5315 #define HCLGE_MAC_LINK_STATUS_MS   20
5316 #define HCLGE_MAC_LINK_STATUS_NUM  10
5317 #define HCLGE_MAC_LINK_STATUS_DOWN 0
5318 #define HCLGE_MAC_LINK_STATUS_UP   1
5319 
5320 	struct hclge_serdes_lb_cmd *req;
5321 	struct hclge_desc desc;
5322 	int mac_link_ret = 0;
5323 	int ret, i = 0;
5324 	u8 loop_mode_b;
5325 
5326 	req = (struct hclge_serdes_lb_cmd *)desc.data;
5327 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
5328 
5329 	switch (loop_mode) {
5330 	case HNAE3_LOOP_SERIAL_SERDES:
5331 		loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
5332 		break;
5333 	case HNAE3_LOOP_PARALLEL_SERDES:
5334 		loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
5335 		break;
5336 	default:
5337 		dev_err(&hdev->pdev->dev,
5338 			"unsupported serdes loopback mode %d\n", loop_mode);
5339 		return -ENOTSUPP;
5340 	}
5341 
5342 	if (en) {
5343 		req->enable = loop_mode_b;
5344 		req->mask = loop_mode_b;
5345 		mac_link_ret = HCLGE_MAC_LINK_STATUS_UP;
5346 	} else {
5347 		req->mask = loop_mode_b;
5348 		mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN;
5349 	}
5350 
5351 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5352 	if (ret) {
5353 		dev_err(&hdev->pdev->dev,
5354 			"serdes loopback set fail, ret = %d\n", ret);
5355 		return ret;
5356 	}
5357 
5358 	do {
5359 		msleep(HCLGE_SERDES_RETRY_MS);
5360 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
5361 					   true);
5362 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5363 		if (ret) {
5364 			dev_err(&hdev->pdev->dev,
5365 				"serdes loopback get, ret = %d\n", ret);
5366 			return ret;
5367 		}
5368 	} while (++i < HCLGE_SERDES_RETRY_NUM &&
5369 		 !(req->result & HCLGE_CMD_SERDES_DONE_B));
5370 
5371 	if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
5372 		dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
5373 		return -EBUSY;
5374 	} else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
5375 		dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
5376 		return -EIO;
5377 	}
5378 
5379 	hclge_cfg_mac_mode(hdev, en);
5380 
5381 	i = 0;
5382 	do {
5383 		/* serdes Internal loopback, independent of the network cable.*/
5384 		msleep(HCLGE_MAC_LINK_STATUS_MS);
5385 		ret = hclge_get_mac_link_status(hdev);
5386 		if (ret == mac_link_ret)
5387 			return 0;
5388 	} while (++i < HCLGE_MAC_LINK_STATUS_NUM);
5389 
5390 	dev_err(&hdev->pdev->dev, "config mac mode timeout\n");
5391 
5392 	return -EBUSY;
5393 }
5394 
5395 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
5396 			    int stream_id, bool enable)
5397 {
5398 	struct hclge_desc desc;
5399 	struct hclge_cfg_com_tqp_queue_cmd *req =
5400 		(struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
5401 	int ret;
5402 
5403 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
5404 	req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
5405 	req->stream_id = cpu_to_le16(stream_id);
5406 	req->enable |= enable << HCLGE_TQP_ENABLE_B;
5407 
5408 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5409 	if (ret)
5410 		dev_err(&hdev->pdev->dev,
5411 			"Tqp enable fail, status =%d.\n", ret);
5412 	return ret;
5413 }
5414 
5415 static int hclge_set_loopback(struct hnae3_handle *handle,
5416 			      enum hnae3_loop loop_mode, bool en)
5417 {
5418 	struct hclge_vport *vport = hclge_get_vport(handle);
5419 	struct hnae3_knic_private_info *kinfo;
5420 	struct hclge_dev *hdev = vport->back;
5421 	int i, ret;
5422 
5423 	switch (loop_mode) {
5424 	case HNAE3_LOOP_APP:
5425 		ret = hclge_set_app_loopback(hdev, en);
5426 		break;
5427 	case HNAE3_LOOP_SERIAL_SERDES:
5428 	case HNAE3_LOOP_PARALLEL_SERDES:
5429 		ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
5430 		break;
5431 	default:
5432 		ret = -ENOTSUPP;
5433 		dev_err(&hdev->pdev->dev,
5434 			"loop_mode %d is not supported\n", loop_mode);
5435 		break;
5436 	}
5437 
5438 	if (ret)
5439 		return ret;
5440 
5441 	kinfo = &vport->nic.kinfo;
5442 	for (i = 0; i < kinfo->num_tqps; i++) {
5443 		ret = hclge_tqp_enable(hdev, i, 0, en);
5444 		if (ret)
5445 			return ret;
5446 	}
5447 
5448 	return 0;
5449 }
5450 
5451 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
5452 {
5453 	struct hclge_vport *vport = hclge_get_vport(handle);
5454 	struct hnae3_knic_private_info *kinfo;
5455 	struct hnae3_queue *queue;
5456 	struct hclge_tqp *tqp;
5457 	int i;
5458 
5459 	kinfo = &vport->nic.kinfo;
5460 	for (i = 0; i < kinfo->num_tqps; i++) {
5461 		queue = handle->kinfo.tqp[i];
5462 		tqp = container_of(queue, struct hclge_tqp, q);
5463 		memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
5464 	}
5465 }
5466 
5467 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
5468 {
5469 	struct hclge_vport *vport = hclge_get_vport(handle);
5470 	struct hclge_dev *hdev = vport->back;
5471 
5472 	if (enable) {
5473 		mod_timer(&hdev->service_timer, jiffies + HZ);
5474 	} else {
5475 		del_timer_sync(&hdev->service_timer);
5476 		cancel_work_sync(&hdev->service_task);
5477 		clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
5478 	}
5479 }
5480 
5481 static int hclge_ae_start(struct hnae3_handle *handle)
5482 {
5483 	struct hclge_vport *vport = hclge_get_vport(handle);
5484 	struct hclge_dev *hdev = vport->back;
5485 
5486 	/* mac enable */
5487 	hclge_cfg_mac_mode(hdev, true);
5488 	clear_bit(HCLGE_STATE_DOWN, &hdev->state);
5489 	hdev->hw.mac.link = 0;
5490 
5491 	/* reset tqp stats */
5492 	hclge_reset_tqp_stats(handle);
5493 
5494 	hclge_mac_start_phy(hdev);
5495 
5496 	return 0;
5497 }
5498 
5499 static void hclge_ae_stop(struct hnae3_handle *handle)
5500 {
5501 	struct hclge_vport *vport = hclge_get_vport(handle);
5502 	struct hclge_dev *hdev = vport->back;
5503 	int i;
5504 
5505 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
5506 
5507 	/* If it is not PF reset, the firmware will disable the MAC,
5508 	 * so it only need to stop phy here.
5509 	 */
5510 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
5511 	    hdev->reset_type != HNAE3_FUNC_RESET) {
5512 		hclge_mac_stop_phy(hdev);
5513 		return;
5514 	}
5515 
5516 	for (i = 0; i < handle->kinfo.num_tqps; i++)
5517 		hclge_reset_tqp(handle, i);
5518 
5519 	/* Mac disable */
5520 	hclge_cfg_mac_mode(hdev, false);
5521 
5522 	hclge_mac_stop_phy(hdev);
5523 
5524 	/* reset tqp stats */
5525 	hclge_reset_tqp_stats(handle);
5526 	hclge_update_link_status(hdev);
5527 }
5528 
5529 int hclge_vport_start(struct hclge_vport *vport)
5530 {
5531 	set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
5532 	vport->last_active_jiffies = jiffies;
5533 	return 0;
5534 }
5535 
5536 void hclge_vport_stop(struct hclge_vport *vport)
5537 {
5538 	clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
5539 }
5540 
5541 static int hclge_client_start(struct hnae3_handle *handle)
5542 {
5543 	struct hclge_vport *vport = hclge_get_vport(handle);
5544 
5545 	return hclge_vport_start(vport);
5546 }
5547 
5548 static void hclge_client_stop(struct hnae3_handle *handle)
5549 {
5550 	struct hclge_vport *vport = hclge_get_vport(handle);
5551 
5552 	hclge_vport_stop(vport);
5553 }
5554 
5555 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
5556 					 u16 cmdq_resp, u8  resp_code,
5557 					 enum hclge_mac_vlan_tbl_opcode op)
5558 {
5559 	struct hclge_dev *hdev = vport->back;
5560 	int return_status = -EIO;
5561 
5562 	if (cmdq_resp) {
5563 		dev_err(&hdev->pdev->dev,
5564 			"cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
5565 			cmdq_resp);
5566 		return -EIO;
5567 	}
5568 
5569 	if (op == HCLGE_MAC_VLAN_ADD) {
5570 		if ((!resp_code) || (resp_code == 1)) {
5571 			return_status = 0;
5572 		} else if (resp_code == 2) {
5573 			return_status = -ENOSPC;
5574 			dev_err(&hdev->pdev->dev,
5575 				"add mac addr failed for uc_overflow.\n");
5576 		} else if (resp_code == 3) {
5577 			return_status = -ENOSPC;
5578 			dev_err(&hdev->pdev->dev,
5579 				"add mac addr failed for mc_overflow.\n");
5580 		} else {
5581 			dev_err(&hdev->pdev->dev,
5582 				"add mac addr failed for undefined, code=%d.\n",
5583 				resp_code);
5584 		}
5585 	} else if (op == HCLGE_MAC_VLAN_REMOVE) {
5586 		if (!resp_code) {
5587 			return_status = 0;
5588 		} else if (resp_code == 1) {
5589 			return_status = -ENOENT;
5590 			dev_dbg(&hdev->pdev->dev,
5591 				"remove mac addr failed for miss.\n");
5592 		} else {
5593 			dev_err(&hdev->pdev->dev,
5594 				"remove mac addr failed for undefined, code=%d.\n",
5595 				resp_code);
5596 		}
5597 	} else if (op == HCLGE_MAC_VLAN_LKUP) {
5598 		if (!resp_code) {
5599 			return_status = 0;
5600 		} else if (resp_code == 1) {
5601 			return_status = -ENOENT;
5602 			dev_dbg(&hdev->pdev->dev,
5603 				"lookup mac addr failed for miss.\n");
5604 		} else {
5605 			dev_err(&hdev->pdev->dev,
5606 				"lookup mac addr failed for undefined, code=%d.\n",
5607 				resp_code);
5608 		}
5609 	} else {
5610 		return_status = -EINVAL;
5611 		dev_err(&hdev->pdev->dev,
5612 			"unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
5613 			op);
5614 	}
5615 
5616 	return return_status;
5617 }
5618 
5619 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
5620 {
5621 	int word_num;
5622 	int bit_num;
5623 
5624 	if (vfid > 255 || vfid < 0)
5625 		return -EIO;
5626 
5627 	if (vfid >= 0 && vfid <= 191) {
5628 		word_num = vfid / 32;
5629 		bit_num  = vfid % 32;
5630 		if (clr)
5631 			desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
5632 		else
5633 			desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
5634 	} else {
5635 		word_num = (vfid - 192) / 32;
5636 		bit_num  = vfid % 32;
5637 		if (clr)
5638 			desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
5639 		else
5640 			desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
5641 	}
5642 
5643 	return 0;
5644 }
5645 
5646 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
5647 {
5648 #define HCLGE_DESC_NUMBER 3
5649 #define HCLGE_FUNC_NUMBER_PER_DESC 6
5650 	int i, j;
5651 
5652 	for (i = 1; i < HCLGE_DESC_NUMBER; i++)
5653 		for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
5654 			if (desc[i].data[j])
5655 				return false;
5656 
5657 	return true;
5658 }
5659 
5660 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
5661 				   const u8 *addr, bool is_mc)
5662 {
5663 	const unsigned char *mac_addr = addr;
5664 	u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
5665 		       (mac_addr[0]) | (mac_addr[1] << 8);
5666 	u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
5667 
5668 	hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5669 	if (is_mc) {
5670 		hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
5671 		hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5672 	}
5673 
5674 	new_req->mac_addr_hi32 = cpu_to_le32(high_val);
5675 	new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
5676 }
5677 
5678 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
5679 				     struct hclge_mac_vlan_tbl_entry_cmd *req)
5680 {
5681 	struct hclge_dev *hdev = vport->back;
5682 	struct hclge_desc desc;
5683 	u8 resp_code;
5684 	u16 retval;
5685 	int ret;
5686 
5687 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
5688 
5689 	memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5690 
5691 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5692 	if (ret) {
5693 		dev_err(&hdev->pdev->dev,
5694 			"del mac addr failed for cmd_send, ret =%d.\n",
5695 			ret);
5696 		return ret;
5697 	}
5698 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5699 	retval = le16_to_cpu(desc.retval);
5700 
5701 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
5702 					     HCLGE_MAC_VLAN_REMOVE);
5703 }
5704 
5705 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
5706 				     struct hclge_mac_vlan_tbl_entry_cmd *req,
5707 				     struct hclge_desc *desc,
5708 				     bool is_mc)
5709 {
5710 	struct hclge_dev *hdev = vport->back;
5711 	u8 resp_code;
5712 	u16 retval;
5713 	int ret;
5714 
5715 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
5716 	if (is_mc) {
5717 		desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5718 		memcpy(desc[0].data,
5719 		       req,
5720 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5721 		hclge_cmd_setup_basic_desc(&desc[1],
5722 					   HCLGE_OPC_MAC_VLAN_ADD,
5723 					   true);
5724 		desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5725 		hclge_cmd_setup_basic_desc(&desc[2],
5726 					   HCLGE_OPC_MAC_VLAN_ADD,
5727 					   true);
5728 		ret = hclge_cmd_send(&hdev->hw, desc, 3);
5729 	} else {
5730 		memcpy(desc[0].data,
5731 		       req,
5732 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5733 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
5734 	}
5735 	if (ret) {
5736 		dev_err(&hdev->pdev->dev,
5737 			"lookup mac addr failed for cmd_send, ret =%d.\n",
5738 			ret);
5739 		return ret;
5740 	}
5741 	resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
5742 	retval = le16_to_cpu(desc[0].retval);
5743 
5744 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
5745 					     HCLGE_MAC_VLAN_LKUP);
5746 }
5747 
5748 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
5749 				  struct hclge_mac_vlan_tbl_entry_cmd *req,
5750 				  struct hclge_desc *mc_desc)
5751 {
5752 	struct hclge_dev *hdev = vport->back;
5753 	int cfg_status;
5754 	u8 resp_code;
5755 	u16 retval;
5756 	int ret;
5757 
5758 	if (!mc_desc) {
5759 		struct hclge_desc desc;
5760 
5761 		hclge_cmd_setup_basic_desc(&desc,
5762 					   HCLGE_OPC_MAC_VLAN_ADD,
5763 					   false);
5764 		memcpy(desc.data, req,
5765 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5766 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5767 		resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5768 		retval = le16_to_cpu(desc.retval);
5769 
5770 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
5771 							   resp_code,
5772 							   HCLGE_MAC_VLAN_ADD);
5773 	} else {
5774 		hclge_cmd_reuse_desc(&mc_desc[0], false);
5775 		mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5776 		hclge_cmd_reuse_desc(&mc_desc[1], false);
5777 		mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5778 		hclge_cmd_reuse_desc(&mc_desc[2], false);
5779 		mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
5780 		memcpy(mc_desc[0].data, req,
5781 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5782 		ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
5783 		resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
5784 		retval = le16_to_cpu(mc_desc[0].retval);
5785 
5786 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
5787 							   resp_code,
5788 							   HCLGE_MAC_VLAN_ADD);
5789 	}
5790 
5791 	if (ret) {
5792 		dev_err(&hdev->pdev->dev,
5793 			"add mac addr failed for cmd_send, ret =%d.\n",
5794 			ret);
5795 		return ret;
5796 	}
5797 
5798 	return cfg_status;
5799 }
5800 
5801 static int hclge_init_umv_space(struct hclge_dev *hdev)
5802 {
5803 	u16 allocated_size = 0;
5804 	int ret;
5805 
5806 	ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
5807 				  true);
5808 	if (ret)
5809 		return ret;
5810 
5811 	if (allocated_size < hdev->wanted_umv_size)
5812 		dev_warn(&hdev->pdev->dev,
5813 			 "Alloc umv space failed, want %d, get %d\n",
5814 			 hdev->wanted_umv_size, allocated_size);
5815 
5816 	mutex_init(&hdev->umv_mutex);
5817 	hdev->max_umv_size = allocated_size;
5818 	hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
5819 	hdev->share_umv_size = hdev->priv_umv_size +
5820 			hdev->max_umv_size % (hdev->num_req_vfs + 2);
5821 
5822 	return 0;
5823 }
5824 
5825 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
5826 {
5827 	int ret;
5828 
5829 	if (hdev->max_umv_size > 0) {
5830 		ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
5831 					  false);
5832 		if (ret)
5833 			return ret;
5834 		hdev->max_umv_size = 0;
5835 	}
5836 	mutex_destroy(&hdev->umv_mutex);
5837 
5838 	return 0;
5839 }
5840 
5841 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
5842 			       u16 *allocated_size, bool is_alloc)
5843 {
5844 	struct hclge_umv_spc_alc_cmd *req;
5845 	struct hclge_desc desc;
5846 	int ret;
5847 
5848 	req = (struct hclge_umv_spc_alc_cmd *)desc.data;
5849 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
5850 	hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc);
5851 	req->space_size = cpu_to_le32(space_size);
5852 
5853 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5854 	if (ret) {
5855 		dev_err(&hdev->pdev->dev,
5856 			"%s umv space failed for cmd_send, ret =%d\n",
5857 			is_alloc ? "allocate" : "free", ret);
5858 		return ret;
5859 	}
5860 
5861 	if (is_alloc && allocated_size)
5862 		*allocated_size = le32_to_cpu(desc.data[1]);
5863 
5864 	return 0;
5865 }
5866 
5867 static void hclge_reset_umv_space(struct hclge_dev *hdev)
5868 {
5869 	struct hclge_vport *vport;
5870 	int i;
5871 
5872 	for (i = 0; i < hdev->num_alloc_vport; i++) {
5873 		vport = &hdev->vport[i];
5874 		vport->used_umv_num = 0;
5875 	}
5876 
5877 	mutex_lock(&hdev->umv_mutex);
5878 	hdev->share_umv_size = hdev->priv_umv_size +
5879 			hdev->max_umv_size % (hdev->num_req_vfs + 2);
5880 	mutex_unlock(&hdev->umv_mutex);
5881 }
5882 
5883 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
5884 {
5885 	struct hclge_dev *hdev = vport->back;
5886 	bool is_full;
5887 
5888 	mutex_lock(&hdev->umv_mutex);
5889 	is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
5890 		   hdev->share_umv_size == 0);
5891 	mutex_unlock(&hdev->umv_mutex);
5892 
5893 	return is_full;
5894 }
5895 
5896 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
5897 {
5898 	struct hclge_dev *hdev = vport->back;
5899 
5900 	mutex_lock(&hdev->umv_mutex);
5901 	if (is_free) {
5902 		if (vport->used_umv_num > hdev->priv_umv_size)
5903 			hdev->share_umv_size++;
5904 
5905 		if (vport->used_umv_num > 0)
5906 			vport->used_umv_num--;
5907 	} else {
5908 		if (vport->used_umv_num >= hdev->priv_umv_size &&
5909 		    hdev->share_umv_size > 0)
5910 			hdev->share_umv_size--;
5911 		vport->used_umv_num++;
5912 	}
5913 	mutex_unlock(&hdev->umv_mutex);
5914 }
5915 
5916 static int hclge_add_uc_addr(struct hnae3_handle *handle,
5917 			     const unsigned char *addr)
5918 {
5919 	struct hclge_vport *vport = hclge_get_vport(handle);
5920 
5921 	return hclge_add_uc_addr_common(vport, addr);
5922 }
5923 
5924 int hclge_add_uc_addr_common(struct hclge_vport *vport,
5925 			     const unsigned char *addr)
5926 {
5927 	struct hclge_dev *hdev = vport->back;
5928 	struct hclge_mac_vlan_tbl_entry_cmd req;
5929 	struct hclge_desc desc;
5930 	u16 egress_port = 0;
5931 	int ret;
5932 
5933 	/* mac addr check */
5934 	if (is_zero_ether_addr(addr) ||
5935 	    is_broadcast_ether_addr(addr) ||
5936 	    is_multicast_ether_addr(addr)) {
5937 		dev_err(&hdev->pdev->dev,
5938 			"Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
5939 			 addr,
5940 			 is_zero_ether_addr(addr),
5941 			 is_broadcast_ether_addr(addr),
5942 			 is_multicast_ether_addr(addr));
5943 		return -EINVAL;
5944 	}
5945 
5946 	memset(&req, 0, sizeof(req));
5947 
5948 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
5949 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
5950 
5951 	req.egress_port = cpu_to_le16(egress_port);
5952 
5953 	hclge_prepare_mac_addr(&req, addr, false);
5954 
5955 	/* Lookup the mac address in the mac_vlan table, and add
5956 	 * it if the entry is inexistent. Repeated unicast entry
5957 	 * is not allowed in the mac vlan table.
5958 	 */
5959 	ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
5960 	if (ret == -ENOENT) {
5961 		if (!hclge_is_umv_space_full(vport)) {
5962 			ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
5963 			if (!ret)
5964 				hclge_update_umv_space(vport, false);
5965 			return ret;
5966 		}
5967 
5968 		dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
5969 			hdev->priv_umv_size);
5970 
5971 		return -ENOSPC;
5972 	}
5973 
5974 	/* check if we just hit the duplicate */
5975 	if (!ret) {
5976 		dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
5977 			 vport->vport_id, addr);
5978 		return 0;
5979 	}
5980 
5981 	dev_err(&hdev->pdev->dev,
5982 		"PF failed to add unicast entry(%pM) in the MAC table\n",
5983 		addr);
5984 
5985 	return ret;
5986 }
5987 
5988 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
5989 			    const unsigned char *addr)
5990 {
5991 	struct hclge_vport *vport = hclge_get_vport(handle);
5992 
5993 	return hclge_rm_uc_addr_common(vport, addr);
5994 }
5995 
5996 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
5997 			    const unsigned char *addr)
5998 {
5999 	struct hclge_dev *hdev = vport->back;
6000 	struct hclge_mac_vlan_tbl_entry_cmd req;
6001 	int ret;
6002 
6003 	/* mac addr check */
6004 	if (is_zero_ether_addr(addr) ||
6005 	    is_broadcast_ether_addr(addr) ||
6006 	    is_multicast_ether_addr(addr)) {
6007 		dev_dbg(&hdev->pdev->dev,
6008 			"Remove mac err! invalid mac:%pM.\n",
6009 			 addr);
6010 		return -EINVAL;
6011 	}
6012 
6013 	memset(&req, 0, sizeof(req));
6014 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6015 	hclge_prepare_mac_addr(&req, addr, false);
6016 	ret = hclge_remove_mac_vlan_tbl(vport, &req);
6017 	if (!ret)
6018 		hclge_update_umv_space(vport, true);
6019 
6020 	return ret;
6021 }
6022 
6023 static int hclge_add_mc_addr(struct hnae3_handle *handle,
6024 			     const unsigned char *addr)
6025 {
6026 	struct hclge_vport *vport = hclge_get_vport(handle);
6027 
6028 	return hclge_add_mc_addr_common(vport, addr);
6029 }
6030 
6031 int hclge_add_mc_addr_common(struct hclge_vport *vport,
6032 			     const unsigned char *addr)
6033 {
6034 	struct hclge_dev *hdev = vport->back;
6035 	struct hclge_mac_vlan_tbl_entry_cmd req;
6036 	struct hclge_desc desc[3];
6037 	int status;
6038 
6039 	/* mac addr check */
6040 	if (!is_multicast_ether_addr(addr)) {
6041 		dev_err(&hdev->pdev->dev,
6042 			"Add mc mac err! invalid mac:%pM.\n",
6043 			 addr);
6044 		return -EINVAL;
6045 	}
6046 	memset(&req, 0, sizeof(req));
6047 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6048 	hclge_prepare_mac_addr(&req, addr, true);
6049 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6050 	if (!status) {
6051 		/* This mac addr exist, update VFID for it */
6052 		hclge_update_desc_vfid(desc, vport->vport_id, false);
6053 		status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6054 	} else {
6055 		/* This mac addr do not exist, add new entry for it */
6056 		memset(desc[0].data, 0, sizeof(desc[0].data));
6057 		memset(desc[1].data, 0, sizeof(desc[0].data));
6058 		memset(desc[2].data, 0, sizeof(desc[0].data));
6059 		hclge_update_desc_vfid(desc, vport->vport_id, false);
6060 		status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6061 	}
6062 
6063 	if (status == -ENOSPC)
6064 		dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
6065 
6066 	return status;
6067 }
6068 
6069 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
6070 			    const unsigned char *addr)
6071 {
6072 	struct hclge_vport *vport = hclge_get_vport(handle);
6073 
6074 	return hclge_rm_mc_addr_common(vport, addr);
6075 }
6076 
6077 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
6078 			    const unsigned char *addr)
6079 {
6080 	struct hclge_dev *hdev = vport->back;
6081 	struct hclge_mac_vlan_tbl_entry_cmd req;
6082 	enum hclge_cmd_status status;
6083 	struct hclge_desc desc[3];
6084 
6085 	/* mac addr check */
6086 	if (!is_multicast_ether_addr(addr)) {
6087 		dev_dbg(&hdev->pdev->dev,
6088 			"Remove mc mac err! invalid mac:%pM.\n",
6089 			 addr);
6090 		return -EINVAL;
6091 	}
6092 
6093 	memset(&req, 0, sizeof(req));
6094 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6095 	hclge_prepare_mac_addr(&req, addr, true);
6096 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6097 	if (!status) {
6098 		/* This mac addr exist, remove this handle's VFID for it */
6099 		hclge_update_desc_vfid(desc, vport->vport_id, true);
6100 
6101 		if (hclge_is_all_function_id_zero(desc))
6102 			/* All the vfid is zero, so need to delete this entry */
6103 			status = hclge_remove_mac_vlan_tbl(vport, &req);
6104 		else
6105 			/* Not all the vfid is zero, update the vfid */
6106 			status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6107 
6108 	} else {
6109 		/* Maybe this mac address is in mta table, but it cannot be
6110 		 * deleted here because an entry of mta represents an address
6111 		 * range rather than a specific address. the delete action to
6112 		 * all entries will take effect in update_mta_status called by
6113 		 * hns3_nic_set_rx_mode.
6114 		 */
6115 		status = 0;
6116 	}
6117 
6118 	return status;
6119 }
6120 
6121 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6122 			       enum HCLGE_MAC_ADDR_TYPE mac_type)
6123 {
6124 	struct hclge_vport_mac_addr_cfg *mac_cfg;
6125 	struct list_head *list;
6126 
6127 	if (!vport->vport_id)
6128 		return;
6129 
6130 	mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
6131 	if (!mac_cfg)
6132 		return;
6133 
6134 	mac_cfg->hd_tbl_status = true;
6135 	memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
6136 
6137 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6138 	       &vport->uc_mac_list : &vport->mc_mac_list;
6139 
6140 	list_add_tail(&mac_cfg->node, list);
6141 }
6142 
6143 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6144 			      bool is_write_tbl,
6145 			      enum HCLGE_MAC_ADDR_TYPE mac_type)
6146 {
6147 	struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6148 	struct list_head *list;
6149 	bool uc_flag, mc_flag;
6150 
6151 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6152 	       &vport->uc_mac_list : &vport->mc_mac_list;
6153 
6154 	uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
6155 	mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
6156 
6157 	list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6158 		if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
6159 			if (uc_flag && mac_cfg->hd_tbl_status)
6160 				hclge_rm_uc_addr_common(vport, mac_addr);
6161 
6162 			if (mc_flag && mac_cfg->hd_tbl_status)
6163 				hclge_rm_mc_addr_common(vport, mac_addr);
6164 
6165 			list_del(&mac_cfg->node);
6166 			kfree(mac_cfg);
6167 			break;
6168 		}
6169 	}
6170 }
6171 
6172 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
6173 				  enum HCLGE_MAC_ADDR_TYPE mac_type)
6174 {
6175 	struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6176 	struct list_head *list;
6177 
6178 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6179 	       &vport->uc_mac_list : &vport->mc_mac_list;
6180 
6181 	list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6182 		if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
6183 			hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
6184 
6185 		if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
6186 			hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
6187 
6188 		mac_cfg->hd_tbl_status = false;
6189 		if (is_del_list) {
6190 			list_del(&mac_cfg->node);
6191 			kfree(mac_cfg);
6192 		}
6193 	}
6194 }
6195 
6196 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
6197 {
6198 	struct hclge_vport_mac_addr_cfg *mac, *tmp;
6199 	struct hclge_vport *vport;
6200 	int i;
6201 
6202 	mutex_lock(&hdev->vport_cfg_mutex);
6203 	for (i = 0; i < hdev->num_alloc_vport; i++) {
6204 		vport = &hdev->vport[i];
6205 		list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
6206 			list_del(&mac->node);
6207 			kfree(mac);
6208 		}
6209 
6210 		list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
6211 			list_del(&mac->node);
6212 			kfree(mac);
6213 		}
6214 	}
6215 	mutex_unlock(&hdev->vport_cfg_mutex);
6216 }
6217 
6218 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
6219 					      u16 cmdq_resp, u8 resp_code)
6220 {
6221 #define HCLGE_ETHERTYPE_SUCCESS_ADD		0
6222 #define HCLGE_ETHERTYPE_ALREADY_ADD		1
6223 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW	2
6224 #define HCLGE_ETHERTYPE_KEY_CONFLICT		3
6225 
6226 	int return_status;
6227 
6228 	if (cmdq_resp) {
6229 		dev_err(&hdev->pdev->dev,
6230 			"cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
6231 			cmdq_resp);
6232 		return -EIO;
6233 	}
6234 
6235 	switch (resp_code) {
6236 	case HCLGE_ETHERTYPE_SUCCESS_ADD:
6237 	case HCLGE_ETHERTYPE_ALREADY_ADD:
6238 		return_status = 0;
6239 		break;
6240 	case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
6241 		dev_err(&hdev->pdev->dev,
6242 			"add mac ethertype failed for manager table overflow.\n");
6243 		return_status = -EIO;
6244 		break;
6245 	case HCLGE_ETHERTYPE_KEY_CONFLICT:
6246 		dev_err(&hdev->pdev->dev,
6247 			"add mac ethertype failed for key conflict.\n");
6248 		return_status = -EIO;
6249 		break;
6250 	default:
6251 		dev_err(&hdev->pdev->dev,
6252 			"add mac ethertype failed for undefined, code=%d.\n",
6253 			resp_code);
6254 		return_status = -EIO;
6255 	}
6256 
6257 	return return_status;
6258 }
6259 
6260 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
6261 			     const struct hclge_mac_mgr_tbl_entry_cmd *req)
6262 {
6263 	struct hclge_desc desc;
6264 	u8 resp_code;
6265 	u16 retval;
6266 	int ret;
6267 
6268 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
6269 	memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
6270 
6271 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6272 	if (ret) {
6273 		dev_err(&hdev->pdev->dev,
6274 			"add mac ethertype failed for cmd_send, ret =%d.\n",
6275 			ret);
6276 		return ret;
6277 	}
6278 
6279 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6280 	retval = le16_to_cpu(desc.retval);
6281 
6282 	return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
6283 }
6284 
6285 static int init_mgr_tbl(struct hclge_dev *hdev)
6286 {
6287 	int ret;
6288 	int i;
6289 
6290 	for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
6291 		ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
6292 		if (ret) {
6293 			dev_err(&hdev->pdev->dev,
6294 				"add mac ethertype failed, ret =%d.\n",
6295 				ret);
6296 			return ret;
6297 		}
6298 	}
6299 
6300 	return 0;
6301 }
6302 
6303 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
6304 {
6305 	struct hclge_vport *vport = hclge_get_vport(handle);
6306 	struct hclge_dev *hdev = vport->back;
6307 
6308 	ether_addr_copy(p, hdev->hw.mac.mac_addr);
6309 }
6310 
6311 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
6312 			      bool is_first)
6313 {
6314 	const unsigned char *new_addr = (const unsigned char *)p;
6315 	struct hclge_vport *vport = hclge_get_vport(handle);
6316 	struct hclge_dev *hdev = vport->back;
6317 	int ret;
6318 
6319 	/* mac addr check */
6320 	if (is_zero_ether_addr(new_addr) ||
6321 	    is_broadcast_ether_addr(new_addr) ||
6322 	    is_multicast_ether_addr(new_addr)) {
6323 		dev_err(&hdev->pdev->dev,
6324 			"Change uc mac err! invalid mac:%p.\n",
6325 			 new_addr);
6326 		return -EINVAL;
6327 	}
6328 
6329 	if ((!is_first || is_kdump_kernel()) &&
6330 	    hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
6331 		dev_warn(&hdev->pdev->dev,
6332 			 "remove old uc mac address fail.\n");
6333 
6334 	ret = hclge_add_uc_addr(handle, new_addr);
6335 	if (ret) {
6336 		dev_err(&hdev->pdev->dev,
6337 			"add uc mac address fail, ret =%d.\n",
6338 			ret);
6339 
6340 		if (!is_first &&
6341 		    hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
6342 			dev_err(&hdev->pdev->dev,
6343 				"restore uc mac address fail.\n");
6344 
6345 		return -EIO;
6346 	}
6347 
6348 	ret = hclge_pause_addr_cfg(hdev, new_addr);
6349 	if (ret) {
6350 		dev_err(&hdev->pdev->dev,
6351 			"configure mac pause address fail, ret =%d.\n",
6352 			ret);
6353 		return -EIO;
6354 	}
6355 
6356 	ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
6357 
6358 	return 0;
6359 }
6360 
6361 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
6362 			  int cmd)
6363 {
6364 	struct hclge_vport *vport = hclge_get_vport(handle);
6365 	struct hclge_dev *hdev = vport->back;
6366 
6367 	if (!hdev->hw.mac.phydev)
6368 		return -EOPNOTSUPP;
6369 
6370 	return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
6371 }
6372 
6373 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
6374 				      u8 fe_type, bool filter_en, u8 vf_id)
6375 {
6376 	struct hclge_vlan_filter_ctrl_cmd *req;
6377 	struct hclge_desc desc;
6378 	int ret;
6379 
6380 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
6381 
6382 	req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
6383 	req->vlan_type = vlan_type;
6384 	req->vlan_fe = filter_en ? fe_type : 0;
6385 	req->vf_id = vf_id;
6386 
6387 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6388 	if (ret)
6389 		dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
6390 			ret);
6391 
6392 	return ret;
6393 }
6394 
6395 #define HCLGE_FILTER_TYPE_VF		0
6396 #define HCLGE_FILTER_TYPE_PORT		1
6397 #define HCLGE_FILTER_FE_EGRESS_V1_B	BIT(0)
6398 #define HCLGE_FILTER_FE_NIC_INGRESS_B	BIT(0)
6399 #define HCLGE_FILTER_FE_NIC_EGRESS_B	BIT(1)
6400 #define HCLGE_FILTER_FE_ROCE_INGRESS_B	BIT(2)
6401 #define HCLGE_FILTER_FE_ROCE_EGRESS_B	BIT(3)
6402 #define HCLGE_FILTER_FE_EGRESS		(HCLGE_FILTER_FE_NIC_EGRESS_B \
6403 					| HCLGE_FILTER_FE_ROCE_EGRESS_B)
6404 #define HCLGE_FILTER_FE_INGRESS		(HCLGE_FILTER_FE_NIC_INGRESS_B \
6405 					| HCLGE_FILTER_FE_ROCE_INGRESS_B)
6406 
6407 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
6408 {
6409 	struct hclge_vport *vport = hclge_get_vport(handle);
6410 	struct hclge_dev *hdev = vport->back;
6411 
6412 	if (hdev->pdev->revision >= 0x21) {
6413 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6414 					   HCLGE_FILTER_FE_EGRESS, enable, 0);
6415 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
6416 					   HCLGE_FILTER_FE_INGRESS, enable, 0);
6417 	} else {
6418 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6419 					   HCLGE_FILTER_FE_EGRESS_V1_B, enable,
6420 					   0);
6421 	}
6422 	if (enable)
6423 		handle->netdev_flags |= HNAE3_VLAN_FLTR;
6424 	else
6425 		handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
6426 }
6427 
6428 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
6429 				    bool is_kill, u16 vlan, u8 qos,
6430 				    __be16 proto)
6431 {
6432 #define HCLGE_MAX_VF_BYTES  16
6433 	struct hclge_vlan_filter_vf_cfg_cmd *req0;
6434 	struct hclge_vlan_filter_vf_cfg_cmd *req1;
6435 	struct hclge_desc desc[2];
6436 	u8 vf_byte_val;
6437 	u8 vf_byte_off;
6438 	int ret;
6439 
6440 	hclge_cmd_setup_basic_desc(&desc[0],
6441 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
6442 	hclge_cmd_setup_basic_desc(&desc[1],
6443 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
6444 
6445 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6446 
6447 	vf_byte_off = vfid / 8;
6448 	vf_byte_val = 1 << (vfid % 8);
6449 
6450 	req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
6451 	req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
6452 
6453 	req0->vlan_id  = cpu_to_le16(vlan);
6454 	req0->vlan_cfg = is_kill;
6455 
6456 	if (vf_byte_off < HCLGE_MAX_VF_BYTES)
6457 		req0->vf_bitmap[vf_byte_off] = vf_byte_val;
6458 	else
6459 		req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
6460 
6461 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
6462 	if (ret) {
6463 		dev_err(&hdev->pdev->dev,
6464 			"Send vf vlan command fail, ret =%d.\n",
6465 			ret);
6466 		return ret;
6467 	}
6468 
6469 	if (!is_kill) {
6470 #define HCLGE_VF_VLAN_NO_ENTRY	2
6471 		if (!req0->resp_code || req0->resp_code == 1)
6472 			return 0;
6473 
6474 		if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
6475 			dev_warn(&hdev->pdev->dev,
6476 				 "vf vlan table is full, vf vlan filter is disabled\n");
6477 			return 0;
6478 		}
6479 
6480 		dev_err(&hdev->pdev->dev,
6481 			"Add vf vlan filter fail, ret =%d.\n",
6482 			req0->resp_code);
6483 	} else {
6484 #define HCLGE_VF_VLAN_DEL_NO_FOUND	1
6485 		if (!req0->resp_code)
6486 			return 0;
6487 
6488 		if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
6489 			dev_warn(&hdev->pdev->dev,
6490 				 "vlan %d filter is not in vf vlan table\n",
6491 				 vlan);
6492 			return 0;
6493 		}
6494 
6495 		dev_err(&hdev->pdev->dev,
6496 			"Kill vf vlan filter fail, ret =%d.\n",
6497 			req0->resp_code);
6498 	}
6499 
6500 	return -EIO;
6501 }
6502 
6503 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
6504 				      u16 vlan_id, bool is_kill)
6505 {
6506 	struct hclge_vlan_filter_pf_cfg_cmd *req;
6507 	struct hclge_desc desc;
6508 	u8 vlan_offset_byte_val;
6509 	u8 vlan_offset_byte;
6510 	u8 vlan_offset_160;
6511 	int ret;
6512 
6513 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
6514 
6515 	vlan_offset_160 = vlan_id / 160;
6516 	vlan_offset_byte = (vlan_id % 160) / 8;
6517 	vlan_offset_byte_val = 1 << (vlan_id % 8);
6518 
6519 	req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
6520 	req->vlan_offset = vlan_offset_160;
6521 	req->vlan_cfg = is_kill;
6522 	req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
6523 
6524 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6525 	if (ret)
6526 		dev_err(&hdev->pdev->dev,
6527 			"port vlan command, send fail, ret =%d.\n", ret);
6528 	return ret;
6529 }
6530 
6531 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
6532 				    u16 vport_id, u16 vlan_id, u8 qos,
6533 				    bool is_kill)
6534 {
6535 	u16 vport_idx, vport_num = 0;
6536 	int ret;
6537 
6538 	if (is_kill && !vlan_id)
6539 		return 0;
6540 
6541 	ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
6542 				       0, proto);
6543 	if (ret) {
6544 		dev_err(&hdev->pdev->dev,
6545 			"Set %d vport vlan filter config fail, ret =%d.\n",
6546 			vport_id, ret);
6547 		return ret;
6548 	}
6549 
6550 	/* vlan 0 may be added twice when 8021q module is enabled */
6551 	if (!is_kill && !vlan_id &&
6552 	    test_bit(vport_id, hdev->vlan_table[vlan_id]))
6553 		return 0;
6554 
6555 	if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
6556 		dev_err(&hdev->pdev->dev,
6557 			"Add port vlan failed, vport %d is already in vlan %d\n",
6558 			vport_id, vlan_id);
6559 		return -EINVAL;
6560 	}
6561 
6562 	if (is_kill &&
6563 	    !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
6564 		dev_err(&hdev->pdev->dev,
6565 			"Delete port vlan failed, vport %d is not in vlan %d\n",
6566 			vport_id, vlan_id);
6567 		return -EINVAL;
6568 	}
6569 
6570 	for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
6571 		vport_num++;
6572 
6573 	if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
6574 		ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
6575 						 is_kill);
6576 
6577 	return ret;
6578 }
6579 
6580 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
6581 {
6582 	struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
6583 	struct hclge_vport_vtag_tx_cfg_cmd *req;
6584 	struct hclge_dev *hdev = vport->back;
6585 	struct hclge_desc desc;
6586 	int status;
6587 
6588 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
6589 
6590 	req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
6591 	req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
6592 	req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
6593 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
6594 		      vcfg->accept_tag1 ? 1 : 0);
6595 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
6596 		      vcfg->accept_untag1 ? 1 : 0);
6597 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
6598 		      vcfg->accept_tag2 ? 1 : 0);
6599 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
6600 		      vcfg->accept_untag2 ? 1 : 0);
6601 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
6602 		      vcfg->insert_tag1_en ? 1 : 0);
6603 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
6604 		      vcfg->insert_tag2_en ? 1 : 0);
6605 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
6606 
6607 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
6608 	req->vf_bitmap[req->vf_offset] =
6609 		1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
6610 
6611 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
6612 	if (status)
6613 		dev_err(&hdev->pdev->dev,
6614 			"Send port txvlan cfg command fail, ret =%d\n",
6615 			status);
6616 
6617 	return status;
6618 }
6619 
6620 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
6621 {
6622 	struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
6623 	struct hclge_vport_vtag_rx_cfg_cmd *req;
6624 	struct hclge_dev *hdev = vport->back;
6625 	struct hclge_desc desc;
6626 	int status;
6627 
6628 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
6629 
6630 	req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
6631 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
6632 		      vcfg->strip_tag1_en ? 1 : 0);
6633 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
6634 		      vcfg->strip_tag2_en ? 1 : 0);
6635 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
6636 		      vcfg->vlan1_vlan_prionly ? 1 : 0);
6637 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
6638 		      vcfg->vlan2_vlan_prionly ? 1 : 0);
6639 
6640 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
6641 	req->vf_bitmap[req->vf_offset] =
6642 		1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
6643 
6644 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
6645 	if (status)
6646 		dev_err(&hdev->pdev->dev,
6647 			"Send port rxvlan cfg command fail, ret =%d\n",
6648 			status);
6649 
6650 	return status;
6651 }
6652 
6653 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
6654 				  u16 port_base_vlan_state,
6655 				  u16 vlan_tag)
6656 {
6657 	int ret;
6658 
6659 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
6660 		vport->txvlan_cfg.accept_tag1 = true;
6661 		vport->txvlan_cfg.insert_tag1_en = false;
6662 		vport->txvlan_cfg.default_tag1 = 0;
6663 	} else {
6664 		vport->txvlan_cfg.accept_tag1 = false;
6665 		vport->txvlan_cfg.insert_tag1_en = true;
6666 		vport->txvlan_cfg.default_tag1 = vlan_tag;
6667 	}
6668 
6669 	vport->txvlan_cfg.accept_untag1 = true;
6670 
6671 	/* accept_tag2 and accept_untag2 are not supported on
6672 	 * pdev revision(0x20), new revision support them,
6673 	 * this two fields can not be configured by user.
6674 	 */
6675 	vport->txvlan_cfg.accept_tag2 = true;
6676 	vport->txvlan_cfg.accept_untag2 = true;
6677 	vport->txvlan_cfg.insert_tag2_en = false;
6678 	vport->txvlan_cfg.default_tag2 = 0;
6679 
6680 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
6681 		vport->rxvlan_cfg.strip_tag1_en = false;
6682 		vport->rxvlan_cfg.strip_tag2_en =
6683 				vport->rxvlan_cfg.rx_vlan_offload_en;
6684 	} else {
6685 		vport->rxvlan_cfg.strip_tag1_en =
6686 				vport->rxvlan_cfg.rx_vlan_offload_en;
6687 		vport->rxvlan_cfg.strip_tag2_en = true;
6688 	}
6689 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
6690 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
6691 
6692 	ret = hclge_set_vlan_tx_offload_cfg(vport);
6693 	if (ret)
6694 		return ret;
6695 
6696 	return hclge_set_vlan_rx_offload_cfg(vport);
6697 }
6698 
6699 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
6700 {
6701 	struct hclge_rx_vlan_type_cfg_cmd *rx_req;
6702 	struct hclge_tx_vlan_type_cfg_cmd *tx_req;
6703 	struct hclge_desc desc;
6704 	int status;
6705 
6706 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
6707 	rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
6708 	rx_req->ot_fst_vlan_type =
6709 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
6710 	rx_req->ot_sec_vlan_type =
6711 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
6712 	rx_req->in_fst_vlan_type =
6713 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
6714 	rx_req->in_sec_vlan_type =
6715 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
6716 
6717 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
6718 	if (status) {
6719 		dev_err(&hdev->pdev->dev,
6720 			"Send rxvlan protocol type command fail, ret =%d\n",
6721 			status);
6722 		return status;
6723 	}
6724 
6725 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
6726 
6727 	tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
6728 	tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
6729 	tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
6730 
6731 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
6732 	if (status)
6733 		dev_err(&hdev->pdev->dev,
6734 			"Send txvlan protocol type command fail, ret =%d\n",
6735 			status);
6736 
6737 	return status;
6738 }
6739 
6740 static int hclge_init_vlan_config(struct hclge_dev *hdev)
6741 {
6742 #define HCLGE_DEF_VLAN_TYPE		0x8100
6743 
6744 	struct hnae3_handle *handle = &hdev->vport[0].nic;
6745 	struct hclge_vport *vport;
6746 	int ret;
6747 	int i;
6748 
6749 	if (hdev->pdev->revision >= 0x21) {
6750 		/* for revision 0x21, vf vlan filter is per function */
6751 		for (i = 0; i < hdev->num_alloc_vport; i++) {
6752 			vport = &hdev->vport[i];
6753 			ret = hclge_set_vlan_filter_ctrl(hdev,
6754 							 HCLGE_FILTER_TYPE_VF,
6755 							 HCLGE_FILTER_FE_EGRESS,
6756 							 true,
6757 							 vport->vport_id);
6758 			if (ret)
6759 				return ret;
6760 		}
6761 
6762 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
6763 						 HCLGE_FILTER_FE_INGRESS, true,
6764 						 0);
6765 		if (ret)
6766 			return ret;
6767 	} else {
6768 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6769 						 HCLGE_FILTER_FE_EGRESS_V1_B,
6770 						 true, 0);
6771 		if (ret)
6772 			return ret;
6773 	}
6774 
6775 	handle->netdev_flags |= HNAE3_VLAN_FLTR;
6776 
6777 	hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
6778 	hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
6779 	hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
6780 	hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
6781 	hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
6782 	hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
6783 
6784 	ret = hclge_set_vlan_protocol_type(hdev);
6785 	if (ret)
6786 		return ret;
6787 
6788 	for (i = 0; i < hdev->num_alloc_vport; i++) {
6789 		u16 vlan_tag;
6790 
6791 		vport = &hdev->vport[i];
6792 		vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
6793 
6794 		ret = hclge_vlan_offload_cfg(vport,
6795 					     vport->port_base_vlan_cfg.state,
6796 					     vlan_tag);
6797 		if (ret)
6798 			return ret;
6799 	}
6800 
6801 	return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
6802 }
6803 
6804 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
6805 				       bool writen_to_tbl)
6806 {
6807 	struct hclge_vport_vlan_cfg *vlan;
6808 
6809 	/* vlan 0 is reserved */
6810 	if (!vlan_id)
6811 		return;
6812 
6813 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
6814 	if (!vlan)
6815 		return;
6816 
6817 	vlan->hd_tbl_status = writen_to_tbl;
6818 	vlan->vlan_id = vlan_id;
6819 
6820 	list_add_tail(&vlan->node, &vport->vlan_list);
6821 }
6822 
6823 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
6824 {
6825 	struct hclge_vport_vlan_cfg *vlan, *tmp;
6826 	struct hclge_dev *hdev = vport->back;
6827 	int ret;
6828 
6829 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
6830 		if (!vlan->hd_tbl_status) {
6831 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
6832 						       vport->vport_id,
6833 						       vlan->vlan_id, 0, false);
6834 			if (ret) {
6835 				dev_err(&hdev->pdev->dev,
6836 					"restore vport vlan list failed, ret=%d\n",
6837 					ret);
6838 				return ret;
6839 			}
6840 		}
6841 		vlan->hd_tbl_status = true;
6842 	}
6843 
6844 	return 0;
6845 }
6846 
6847 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
6848 				      bool is_write_tbl)
6849 {
6850 	struct hclge_vport_vlan_cfg *vlan, *tmp;
6851 	struct hclge_dev *hdev = vport->back;
6852 
6853 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
6854 		if (vlan->vlan_id == vlan_id) {
6855 			if (is_write_tbl && vlan->hd_tbl_status)
6856 				hclge_set_vlan_filter_hw(hdev,
6857 							 htons(ETH_P_8021Q),
6858 							 vport->vport_id,
6859 							 vlan_id, 0,
6860 							 true);
6861 
6862 			list_del(&vlan->node);
6863 			kfree(vlan);
6864 			break;
6865 		}
6866 	}
6867 }
6868 
6869 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
6870 {
6871 	struct hclge_vport_vlan_cfg *vlan, *tmp;
6872 	struct hclge_dev *hdev = vport->back;
6873 
6874 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
6875 		if (vlan->hd_tbl_status)
6876 			hclge_set_vlan_filter_hw(hdev,
6877 						 htons(ETH_P_8021Q),
6878 						 vport->vport_id,
6879 						 vlan->vlan_id, 0,
6880 						 true);
6881 
6882 		vlan->hd_tbl_status = false;
6883 		if (is_del_list) {
6884 			list_del(&vlan->node);
6885 			kfree(vlan);
6886 		}
6887 	}
6888 }
6889 
6890 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
6891 {
6892 	struct hclge_vport_vlan_cfg *vlan, *tmp;
6893 	struct hclge_vport *vport;
6894 	int i;
6895 
6896 	mutex_lock(&hdev->vport_cfg_mutex);
6897 	for (i = 0; i < hdev->num_alloc_vport; i++) {
6898 		vport = &hdev->vport[i];
6899 		list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
6900 			list_del(&vlan->node);
6901 			kfree(vlan);
6902 		}
6903 	}
6904 	mutex_unlock(&hdev->vport_cfg_mutex);
6905 }
6906 
6907 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
6908 {
6909 	struct hclge_vport *vport = hclge_get_vport(handle);
6910 
6911 	if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
6912 		vport->rxvlan_cfg.strip_tag1_en = false;
6913 		vport->rxvlan_cfg.strip_tag2_en = enable;
6914 	} else {
6915 		vport->rxvlan_cfg.strip_tag1_en = enable;
6916 		vport->rxvlan_cfg.strip_tag2_en = true;
6917 	}
6918 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
6919 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
6920 	vport->rxvlan_cfg.rx_vlan_offload_en = enable;
6921 
6922 	return hclge_set_vlan_rx_offload_cfg(vport);
6923 }
6924 
6925 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
6926 					    u16 port_base_vlan_state,
6927 					    struct hclge_vlan_info *new_info,
6928 					    struct hclge_vlan_info *old_info)
6929 {
6930 	struct hclge_dev *hdev = vport->back;
6931 	int ret;
6932 
6933 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
6934 		hclge_rm_vport_all_vlan_table(vport, false);
6935 		return hclge_set_vlan_filter_hw(hdev,
6936 						 htons(new_info->vlan_proto),
6937 						 vport->vport_id,
6938 						 new_info->vlan_tag,
6939 						 new_info->qos, false);
6940 	}
6941 
6942 	ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
6943 				       vport->vport_id, old_info->vlan_tag,
6944 				       old_info->qos, true);
6945 	if (ret)
6946 		return ret;
6947 
6948 	return hclge_add_vport_all_vlan_table(vport);
6949 }
6950 
6951 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
6952 				    struct hclge_vlan_info *vlan_info)
6953 {
6954 	struct hnae3_handle *nic = &vport->nic;
6955 	struct hclge_vlan_info *old_vlan_info;
6956 	struct hclge_dev *hdev = vport->back;
6957 	int ret;
6958 
6959 	old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
6960 
6961 	ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
6962 	if (ret)
6963 		return ret;
6964 
6965 	if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
6966 		/* add new VLAN tag */
6967 		ret = hclge_set_vlan_filter_hw(hdev,
6968 					       htons(vlan_info->vlan_proto),
6969 					       vport->vport_id,
6970 					       vlan_info->vlan_tag,
6971 					       vlan_info->qos, false);
6972 		if (ret)
6973 			return ret;
6974 
6975 		/* remove old VLAN tag */
6976 		ret = hclge_set_vlan_filter_hw(hdev,
6977 					       htons(old_vlan_info->vlan_proto),
6978 					       vport->vport_id,
6979 					       old_vlan_info->vlan_tag,
6980 					       old_vlan_info->qos, true);
6981 		if (ret)
6982 			return ret;
6983 
6984 		goto update;
6985 	}
6986 
6987 	ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
6988 					       old_vlan_info);
6989 	if (ret)
6990 		return ret;
6991 
6992 	/* update state only when disable/enable port based VLAN */
6993 	vport->port_base_vlan_cfg.state = state;
6994 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
6995 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
6996 	else
6997 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
6998 
6999 update:
7000 	vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
7001 	vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
7002 	vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
7003 
7004 	return 0;
7005 }
7006 
7007 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
7008 					  enum hnae3_port_base_vlan_state state,
7009 					  u16 vlan)
7010 {
7011 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7012 		if (!vlan)
7013 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7014 		else
7015 			return HNAE3_PORT_BASE_VLAN_ENABLE;
7016 	} else {
7017 		if (!vlan)
7018 			return HNAE3_PORT_BASE_VLAN_DISABLE;
7019 		else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
7020 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7021 		else
7022 			return HNAE3_PORT_BASE_VLAN_MODIFY;
7023 	}
7024 }
7025 
7026 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
7027 				    u16 vlan, u8 qos, __be16 proto)
7028 {
7029 	struct hclge_vport *vport = hclge_get_vport(handle);
7030 	struct hclge_dev *hdev = vport->back;
7031 	struct hclge_vlan_info vlan_info;
7032 	u16 state;
7033 	int ret;
7034 
7035 	if (hdev->pdev->revision == 0x20)
7036 		return -EOPNOTSUPP;
7037 
7038 	/* qos is a 3 bits value, so can not be bigger than 7 */
7039 	if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
7040 		return -EINVAL;
7041 	if (proto != htons(ETH_P_8021Q))
7042 		return -EPROTONOSUPPORT;
7043 
7044 	vport = &hdev->vport[vfid];
7045 	state = hclge_get_port_base_vlan_state(vport,
7046 					       vport->port_base_vlan_cfg.state,
7047 					       vlan);
7048 	if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
7049 		return 0;
7050 
7051 	vlan_info.vlan_tag = vlan;
7052 	vlan_info.qos = qos;
7053 	vlan_info.vlan_proto = ntohs(proto);
7054 
7055 	/* update port based VLAN for PF */
7056 	if (!vfid) {
7057 		hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7058 		ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
7059 		hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7060 
7061 		return ret;
7062 	}
7063 
7064 	if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
7065 		return hclge_update_port_base_vlan_cfg(vport, state,
7066 						       &vlan_info);
7067 	} else {
7068 		ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
7069 							(u8)vfid, state,
7070 							vlan, qos,
7071 							ntohs(proto));
7072 		return ret;
7073 	}
7074 }
7075 
7076 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
7077 			  u16 vlan_id, bool is_kill)
7078 {
7079 	struct hclge_vport *vport = hclge_get_vport(handle);
7080 	struct hclge_dev *hdev = vport->back;
7081 	bool writen_to_tbl = false;
7082 	int ret = 0;
7083 
7084 	/* when port based VLAN enabled, we use port based VLAN as the VLAN
7085 	 * filter entry. In this case, we don't update VLAN filter table
7086 	 * when user add new VLAN or remove exist VLAN, just update the vport
7087 	 * VLAN list. The VLAN id in VLAN list won't be writen in VLAN filter
7088 	 * table until port based VLAN disabled
7089 	 */
7090 	if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7091 		ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
7092 					       vlan_id, 0, is_kill);
7093 		writen_to_tbl = true;
7094 	}
7095 
7096 	if (ret)
7097 		return ret;
7098 
7099 	if (is_kill)
7100 		hclge_rm_vport_vlan_table(vport, vlan_id, false);
7101 	else
7102 		hclge_add_vport_vlan_table(vport, vlan_id,
7103 					   writen_to_tbl);
7104 
7105 	return 0;
7106 }
7107 
7108 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
7109 {
7110 	struct hclge_config_max_frm_size_cmd *req;
7111 	struct hclge_desc desc;
7112 
7113 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
7114 
7115 	req = (struct hclge_config_max_frm_size_cmd *)desc.data;
7116 	req->max_frm_size = cpu_to_le16(new_mps);
7117 	req->min_frm_size = HCLGE_MAC_MIN_FRAME;
7118 
7119 	return hclge_cmd_send(&hdev->hw, &desc, 1);
7120 }
7121 
7122 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
7123 {
7124 	struct hclge_vport *vport = hclge_get_vport(handle);
7125 
7126 	return hclge_set_vport_mtu(vport, new_mtu);
7127 }
7128 
7129 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
7130 {
7131 	struct hclge_dev *hdev = vport->back;
7132 	int i, max_frm_size, ret = 0;
7133 
7134 	max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7135 	if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
7136 	    max_frm_size > HCLGE_MAC_MAX_FRAME)
7137 		return -EINVAL;
7138 
7139 	max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
7140 	mutex_lock(&hdev->vport_lock);
7141 	/* VF's mps must fit within hdev->mps */
7142 	if (vport->vport_id && max_frm_size > hdev->mps) {
7143 		mutex_unlock(&hdev->vport_lock);
7144 		return -EINVAL;
7145 	} else if (vport->vport_id) {
7146 		vport->mps = max_frm_size;
7147 		mutex_unlock(&hdev->vport_lock);
7148 		return 0;
7149 	}
7150 
7151 	/* PF's mps must be greater then VF's mps */
7152 	for (i = 1; i < hdev->num_alloc_vport; i++)
7153 		if (max_frm_size < hdev->vport[i].mps) {
7154 			mutex_unlock(&hdev->vport_lock);
7155 			return -EINVAL;
7156 		}
7157 
7158 	hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7159 
7160 	ret = hclge_set_mac_mtu(hdev, max_frm_size);
7161 	if (ret) {
7162 		dev_err(&hdev->pdev->dev,
7163 			"Change mtu fail, ret =%d\n", ret);
7164 		goto out;
7165 	}
7166 
7167 	hdev->mps = max_frm_size;
7168 	vport->mps = max_frm_size;
7169 
7170 	ret = hclge_buffer_alloc(hdev);
7171 	if (ret)
7172 		dev_err(&hdev->pdev->dev,
7173 			"Allocate buffer fail, ret =%d\n", ret);
7174 
7175 out:
7176 	hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7177 	mutex_unlock(&hdev->vport_lock);
7178 	return ret;
7179 }
7180 
7181 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
7182 				    bool enable)
7183 {
7184 	struct hclge_reset_tqp_queue_cmd *req;
7185 	struct hclge_desc desc;
7186 	int ret;
7187 
7188 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
7189 
7190 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7191 	req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7192 	hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
7193 
7194 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7195 	if (ret) {
7196 		dev_err(&hdev->pdev->dev,
7197 			"Send tqp reset cmd error, status =%d\n", ret);
7198 		return ret;
7199 	}
7200 
7201 	return 0;
7202 }
7203 
7204 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
7205 {
7206 	struct hclge_reset_tqp_queue_cmd *req;
7207 	struct hclge_desc desc;
7208 	int ret;
7209 
7210 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
7211 
7212 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7213 	req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7214 
7215 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7216 	if (ret) {
7217 		dev_err(&hdev->pdev->dev,
7218 			"Get reset status error, status =%d\n", ret);
7219 		return ret;
7220 	}
7221 
7222 	return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
7223 }
7224 
7225 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
7226 {
7227 	struct hnae3_queue *queue;
7228 	struct hclge_tqp *tqp;
7229 
7230 	queue = handle->kinfo.tqp[queue_id];
7231 	tqp = container_of(queue, struct hclge_tqp, q);
7232 
7233 	return tqp->index;
7234 }
7235 
7236 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
7237 {
7238 	struct hclge_vport *vport = hclge_get_vport(handle);
7239 	struct hclge_dev *hdev = vport->back;
7240 	int reset_try_times = 0;
7241 	int reset_status;
7242 	u16 queue_gid;
7243 	int ret = 0;
7244 
7245 	queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
7246 
7247 	ret = hclge_tqp_enable(hdev, queue_id, 0, false);
7248 	if (ret) {
7249 		dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
7250 		return ret;
7251 	}
7252 
7253 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7254 	if (ret) {
7255 		dev_err(&hdev->pdev->dev,
7256 			"Send reset tqp cmd fail, ret = %d\n", ret);
7257 		return ret;
7258 	}
7259 
7260 	reset_try_times = 0;
7261 	while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7262 		/* Wait for tqp hw reset */
7263 		msleep(20);
7264 		reset_status = hclge_get_reset_status(hdev, queue_gid);
7265 		if (reset_status)
7266 			break;
7267 	}
7268 
7269 	if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7270 		dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
7271 		return ret;
7272 	}
7273 
7274 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7275 	if (ret)
7276 		dev_err(&hdev->pdev->dev,
7277 			"Deassert the soft reset fail, ret = %d\n", ret);
7278 
7279 	return ret;
7280 }
7281 
7282 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
7283 {
7284 	struct hclge_dev *hdev = vport->back;
7285 	int reset_try_times = 0;
7286 	int reset_status;
7287 	u16 queue_gid;
7288 	int ret;
7289 
7290 	queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
7291 
7292 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7293 	if (ret) {
7294 		dev_warn(&hdev->pdev->dev,
7295 			 "Send reset tqp cmd fail, ret = %d\n", ret);
7296 		return;
7297 	}
7298 
7299 	reset_try_times = 0;
7300 	while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7301 		/* Wait for tqp hw reset */
7302 		msleep(20);
7303 		reset_status = hclge_get_reset_status(hdev, queue_gid);
7304 		if (reset_status)
7305 			break;
7306 	}
7307 
7308 	if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7309 		dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
7310 		return;
7311 	}
7312 
7313 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7314 	if (ret)
7315 		dev_warn(&hdev->pdev->dev,
7316 			 "Deassert the soft reset fail, ret = %d\n", ret);
7317 }
7318 
7319 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
7320 {
7321 	struct hclge_vport *vport = hclge_get_vport(handle);
7322 	struct hclge_dev *hdev = vport->back;
7323 
7324 	return hdev->fw_version;
7325 }
7326 
7327 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7328 {
7329 	struct phy_device *phydev = hdev->hw.mac.phydev;
7330 
7331 	if (!phydev)
7332 		return;
7333 
7334 	phy_set_asym_pause(phydev, rx_en, tx_en);
7335 }
7336 
7337 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7338 {
7339 	int ret;
7340 
7341 	if (rx_en && tx_en)
7342 		hdev->fc_mode_last_time = HCLGE_FC_FULL;
7343 	else if (rx_en && !tx_en)
7344 		hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
7345 	else if (!rx_en && tx_en)
7346 		hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
7347 	else
7348 		hdev->fc_mode_last_time = HCLGE_FC_NONE;
7349 
7350 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
7351 		return 0;
7352 
7353 	ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
7354 	if (ret) {
7355 		dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
7356 			ret);
7357 		return ret;
7358 	}
7359 
7360 	hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
7361 
7362 	return 0;
7363 }
7364 
7365 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
7366 {
7367 	struct phy_device *phydev = hdev->hw.mac.phydev;
7368 	u16 remote_advertising = 0;
7369 	u16 local_advertising = 0;
7370 	u32 rx_pause, tx_pause;
7371 	u8 flowctl;
7372 
7373 	if (!phydev->link || !phydev->autoneg)
7374 		return 0;
7375 
7376 	local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
7377 
7378 	if (phydev->pause)
7379 		remote_advertising = LPA_PAUSE_CAP;
7380 
7381 	if (phydev->asym_pause)
7382 		remote_advertising |= LPA_PAUSE_ASYM;
7383 
7384 	flowctl = mii_resolve_flowctrl_fdx(local_advertising,
7385 					   remote_advertising);
7386 	tx_pause = flowctl & FLOW_CTRL_TX;
7387 	rx_pause = flowctl & FLOW_CTRL_RX;
7388 
7389 	if (phydev->duplex == HCLGE_MAC_HALF) {
7390 		tx_pause = 0;
7391 		rx_pause = 0;
7392 	}
7393 
7394 	return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
7395 }
7396 
7397 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
7398 				 u32 *rx_en, u32 *tx_en)
7399 {
7400 	struct hclge_vport *vport = hclge_get_vport(handle);
7401 	struct hclge_dev *hdev = vport->back;
7402 
7403 	*auto_neg = hclge_get_autoneg(handle);
7404 
7405 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
7406 		*rx_en = 0;
7407 		*tx_en = 0;
7408 		return;
7409 	}
7410 
7411 	if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
7412 		*rx_en = 1;
7413 		*tx_en = 0;
7414 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
7415 		*tx_en = 1;
7416 		*rx_en = 0;
7417 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
7418 		*rx_en = 1;
7419 		*tx_en = 1;
7420 	} else {
7421 		*rx_en = 0;
7422 		*tx_en = 0;
7423 	}
7424 }
7425 
7426 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
7427 				u32 rx_en, u32 tx_en)
7428 {
7429 	struct hclge_vport *vport = hclge_get_vport(handle);
7430 	struct hclge_dev *hdev = vport->back;
7431 	struct phy_device *phydev = hdev->hw.mac.phydev;
7432 	u32 fc_autoneg;
7433 
7434 	fc_autoneg = hclge_get_autoneg(handle);
7435 	if (auto_neg != fc_autoneg) {
7436 		dev_info(&hdev->pdev->dev,
7437 			 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
7438 		return -EOPNOTSUPP;
7439 	}
7440 
7441 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
7442 		dev_info(&hdev->pdev->dev,
7443 			 "Priority flow control enabled. Cannot set link flow control.\n");
7444 		return -EOPNOTSUPP;
7445 	}
7446 
7447 	hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
7448 
7449 	if (!fc_autoneg)
7450 		return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
7451 
7452 	/* Only support flow control negotiation for netdev with
7453 	 * phy attached for now.
7454 	 */
7455 	if (!phydev)
7456 		return -EOPNOTSUPP;
7457 
7458 	return phy_start_aneg(phydev);
7459 }
7460 
7461 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
7462 					  u8 *auto_neg, u32 *speed, u8 *duplex)
7463 {
7464 	struct hclge_vport *vport = hclge_get_vport(handle);
7465 	struct hclge_dev *hdev = vport->back;
7466 
7467 	if (speed)
7468 		*speed = hdev->hw.mac.speed;
7469 	if (duplex)
7470 		*duplex = hdev->hw.mac.duplex;
7471 	if (auto_neg)
7472 		*auto_neg = hdev->hw.mac.autoneg;
7473 }
7474 
7475 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type)
7476 {
7477 	struct hclge_vport *vport = hclge_get_vport(handle);
7478 	struct hclge_dev *hdev = vport->back;
7479 
7480 	if (media_type)
7481 		*media_type = hdev->hw.mac.media_type;
7482 }
7483 
7484 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
7485 				u8 *tp_mdix_ctrl, u8 *tp_mdix)
7486 {
7487 	struct hclge_vport *vport = hclge_get_vport(handle);
7488 	struct hclge_dev *hdev = vport->back;
7489 	struct phy_device *phydev = hdev->hw.mac.phydev;
7490 	int mdix_ctrl, mdix, retval, is_resolved;
7491 
7492 	if (!phydev) {
7493 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
7494 		*tp_mdix = ETH_TP_MDI_INVALID;
7495 		return;
7496 	}
7497 
7498 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
7499 
7500 	retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
7501 	mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
7502 				    HCLGE_PHY_MDIX_CTRL_S);
7503 
7504 	retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
7505 	mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
7506 	is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
7507 
7508 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
7509 
7510 	switch (mdix_ctrl) {
7511 	case 0x0:
7512 		*tp_mdix_ctrl = ETH_TP_MDI;
7513 		break;
7514 	case 0x1:
7515 		*tp_mdix_ctrl = ETH_TP_MDI_X;
7516 		break;
7517 	case 0x3:
7518 		*tp_mdix_ctrl = ETH_TP_MDI_AUTO;
7519 		break;
7520 	default:
7521 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
7522 		break;
7523 	}
7524 
7525 	if (!is_resolved)
7526 		*tp_mdix = ETH_TP_MDI_INVALID;
7527 	else if (mdix)
7528 		*tp_mdix = ETH_TP_MDI_X;
7529 	else
7530 		*tp_mdix = ETH_TP_MDI;
7531 }
7532 
7533 static int hclge_init_client_instance(struct hnae3_client *client,
7534 				      struct hnae3_ae_dev *ae_dev)
7535 {
7536 	struct hclge_dev *hdev = ae_dev->priv;
7537 	struct hclge_vport *vport;
7538 	int i, ret;
7539 
7540 	for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
7541 		vport = &hdev->vport[i];
7542 
7543 		switch (client->type) {
7544 		case HNAE3_CLIENT_KNIC:
7545 
7546 			hdev->nic_client = client;
7547 			vport->nic.client = client;
7548 			ret = client->ops->init_instance(&vport->nic);
7549 			if (ret)
7550 				goto clear_nic;
7551 
7552 			hnae3_set_client_init_flag(client, ae_dev, 1);
7553 
7554 			if (hdev->roce_client &&
7555 			    hnae3_dev_roce_supported(hdev)) {
7556 				struct hnae3_client *rc = hdev->roce_client;
7557 
7558 				ret = hclge_init_roce_base_info(vport);
7559 				if (ret)
7560 					goto clear_roce;
7561 
7562 				ret = rc->ops->init_instance(&vport->roce);
7563 				if (ret)
7564 					goto clear_roce;
7565 
7566 				hnae3_set_client_init_flag(hdev->roce_client,
7567 							   ae_dev, 1);
7568 			}
7569 
7570 			break;
7571 		case HNAE3_CLIENT_UNIC:
7572 			hdev->nic_client = client;
7573 			vport->nic.client = client;
7574 
7575 			ret = client->ops->init_instance(&vport->nic);
7576 			if (ret)
7577 				goto clear_nic;
7578 
7579 			hnae3_set_client_init_flag(client, ae_dev, 1);
7580 
7581 			break;
7582 		case HNAE3_CLIENT_ROCE:
7583 			if (hnae3_dev_roce_supported(hdev)) {
7584 				hdev->roce_client = client;
7585 				vport->roce.client = client;
7586 			}
7587 
7588 			if (hdev->roce_client && hdev->nic_client) {
7589 				ret = hclge_init_roce_base_info(vport);
7590 				if (ret)
7591 					goto clear_roce;
7592 
7593 				ret = client->ops->init_instance(&vport->roce);
7594 				if (ret)
7595 					goto clear_roce;
7596 
7597 				hnae3_set_client_init_flag(client, ae_dev, 1);
7598 			}
7599 
7600 			break;
7601 		default:
7602 			return -EINVAL;
7603 		}
7604 	}
7605 
7606 	return 0;
7607 
7608 clear_nic:
7609 	hdev->nic_client = NULL;
7610 	vport->nic.client = NULL;
7611 	return ret;
7612 clear_roce:
7613 	hdev->roce_client = NULL;
7614 	vport->roce.client = NULL;
7615 	return ret;
7616 }
7617 
7618 static void hclge_uninit_client_instance(struct hnae3_client *client,
7619 					 struct hnae3_ae_dev *ae_dev)
7620 {
7621 	struct hclge_dev *hdev = ae_dev->priv;
7622 	struct hclge_vport *vport;
7623 	int i;
7624 
7625 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
7626 		vport = &hdev->vport[i];
7627 		if (hdev->roce_client) {
7628 			hdev->roce_client->ops->uninit_instance(&vport->roce,
7629 								0);
7630 			hdev->roce_client = NULL;
7631 			vport->roce.client = NULL;
7632 		}
7633 		if (client->type == HNAE3_CLIENT_ROCE)
7634 			return;
7635 		if (hdev->nic_client && client->ops->uninit_instance) {
7636 			client->ops->uninit_instance(&vport->nic, 0);
7637 			hdev->nic_client = NULL;
7638 			vport->nic.client = NULL;
7639 		}
7640 	}
7641 }
7642 
7643 static int hclge_pci_init(struct hclge_dev *hdev)
7644 {
7645 	struct pci_dev *pdev = hdev->pdev;
7646 	struct hclge_hw *hw;
7647 	int ret;
7648 
7649 	ret = pci_enable_device(pdev);
7650 	if (ret) {
7651 		dev_err(&pdev->dev, "failed to enable PCI device\n");
7652 		return ret;
7653 	}
7654 
7655 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
7656 	if (ret) {
7657 		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
7658 		if (ret) {
7659 			dev_err(&pdev->dev,
7660 				"can't set consistent PCI DMA");
7661 			goto err_disable_device;
7662 		}
7663 		dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
7664 	}
7665 
7666 	ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
7667 	if (ret) {
7668 		dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
7669 		goto err_disable_device;
7670 	}
7671 
7672 	pci_set_master(pdev);
7673 	hw = &hdev->hw;
7674 	hw->io_base = pcim_iomap(pdev, 2, 0);
7675 	if (!hw->io_base) {
7676 		dev_err(&pdev->dev, "Can't map configuration register space\n");
7677 		ret = -ENOMEM;
7678 		goto err_clr_master;
7679 	}
7680 
7681 	hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
7682 
7683 	return 0;
7684 err_clr_master:
7685 	pci_clear_master(pdev);
7686 	pci_release_regions(pdev);
7687 err_disable_device:
7688 	pci_disable_device(pdev);
7689 
7690 	return ret;
7691 }
7692 
7693 static void hclge_pci_uninit(struct hclge_dev *hdev)
7694 {
7695 	struct pci_dev *pdev = hdev->pdev;
7696 
7697 	pcim_iounmap(pdev, hdev->hw.io_base);
7698 	pci_free_irq_vectors(pdev);
7699 	pci_clear_master(pdev);
7700 	pci_release_mem_regions(pdev);
7701 	pci_disable_device(pdev);
7702 }
7703 
7704 static void hclge_state_init(struct hclge_dev *hdev)
7705 {
7706 	set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
7707 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
7708 	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
7709 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7710 	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
7711 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
7712 }
7713 
7714 static void hclge_state_uninit(struct hclge_dev *hdev)
7715 {
7716 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
7717 
7718 	if (hdev->service_timer.function)
7719 		del_timer_sync(&hdev->service_timer);
7720 	if (hdev->reset_timer.function)
7721 		del_timer_sync(&hdev->reset_timer);
7722 	if (hdev->service_task.func)
7723 		cancel_work_sync(&hdev->service_task);
7724 	if (hdev->rst_service_task.func)
7725 		cancel_work_sync(&hdev->rst_service_task);
7726 	if (hdev->mbx_service_task.func)
7727 		cancel_work_sync(&hdev->mbx_service_task);
7728 }
7729 
7730 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
7731 {
7732 #define HCLGE_FLR_WAIT_MS	100
7733 #define HCLGE_FLR_WAIT_CNT	50
7734 	struct hclge_dev *hdev = ae_dev->priv;
7735 	int cnt = 0;
7736 
7737 	clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
7738 	clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
7739 	set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
7740 	hclge_reset_event(hdev->pdev, NULL);
7741 
7742 	while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
7743 	       cnt++ < HCLGE_FLR_WAIT_CNT)
7744 		msleep(HCLGE_FLR_WAIT_MS);
7745 
7746 	if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
7747 		dev_err(&hdev->pdev->dev,
7748 			"flr wait down timeout: %d\n", cnt);
7749 }
7750 
7751 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
7752 {
7753 	struct hclge_dev *hdev = ae_dev->priv;
7754 
7755 	set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
7756 }
7757 
7758 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
7759 {
7760 	struct pci_dev *pdev = ae_dev->pdev;
7761 	struct hclge_dev *hdev;
7762 	int ret;
7763 
7764 	hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
7765 	if (!hdev) {
7766 		ret = -ENOMEM;
7767 		goto out;
7768 	}
7769 
7770 	hdev->pdev = pdev;
7771 	hdev->ae_dev = ae_dev;
7772 	hdev->reset_type = HNAE3_NONE_RESET;
7773 	hdev->reset_level = HNAE3_FUNC_RESET;
7774 	ae_dev->priv = hdev;
7775 	hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7776 
7777 	mutex_init(&hdev->vport_lock);
7778 	mutex_init(&hdev->vport_cfg_mutex);
7779 
7780 	ret = hclge_pci_init(hdev);
7781 	if (ret) {
7782 		dev_err(&pdev->dev, "PCI init failed\n");
7783 		goto out;
7784 	}
7785 
7786 	/* Firmware command queue initialize */
7787 	ret = hclge_cmd_queue_init(hdev);
7788 	if (ret) {
7789 		dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
7790 		goto err_pci_uninit;
7791 	}
7792 
7793 	/* Firmware command initialize */
7794 	ret = hclge_cmd_init(hdev);
7795 	if (ret)
7796 		goto err_cmd_uninit;
7797 
7798 	ret = hclge_get_cap(hdev);
7799 	if (ret) {
7800 		dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
7801 			ret);
7802 		goto err_cmd_uninit;
7803 	}
7804 
7805 	ret = hclge_configure(hdev);
7806 	if (ret) {
7807 		dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
7808 		goto err_cmd_uninit;
7809 	}
7810 
7811 	ret = hclge_init_msi(hdev);
7812 	if (ret) {
7813 		dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
7814 		goto err_cmd_uninit;
7815 	}
7816 
7817 	ret = hclge_misc_irq_init(hdev);
7818 	if (ret) {
7819 		dev_err(&pdev->dev,
7820 			"Misc IRQ(vector0) init error, ret = %d.\n",
7821 			ret);
7822 		goto err_msi_uninit;
7823 	}
7824 
7825 	ret = hclge_alloc_tqps(hdev);
7826 	if (ret) {
7827 		dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
7828 		goto err_msi_irq_uninit;
7829 	}
7830 
7831 	ret = hclge_alloc_vport(hdev);
7832 	if (ret) {
7833 		dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
7834 		goto err_msi_irq_uninit;
7835 	}
7836 
7837 	ret = hclge_map_tqp(hdev);
7838 	if (ret) {
7839 		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
7840 		goto err_msi_irq_uninit;
7841 	}
7842 
7843 	if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
7844 		ret = hclge_mac_mdio_config(hdev);
7845 		if (ret) {
7846 			dev_err(&hdev->pdev->dev,
7847 				"mdio config fail ret=%d\n", ret);
7848 			goto err_msi_irq_uninit;
7849 		}
7850 	}
7851 
7852 	ret = hclge_init_umv_space(hdev);
7853 	if (ret) {
7854 		dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
7855 		goto err_mdiobus_unreg;
7856 	}
7857 
7858 	ret = hclge_mac_init(hdev);
7859 	if (ret) {
7860 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
7861 		goto err_mdiobus_unreg;
7862 	}
7863 
7864 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
7865 	if (ret) {
7866 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
7867 		goto err_mdiobus_unreg;
7868 	}
7869 
7870 	ret = hclge_config_gro(hdev, true);
7871 	if (ret)
7872 		goto err_mdiobus_unreg;
7873 
7874 	ret = hclge_init_vlan_config(hdev);
7875 	if (ret) {
7876 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
7877 		goto err_mdiobus_unreg;
7878 	}
7879 
7880 	ret = hclge_tm_schd_init(hdev);
7881 	if (ret) {
7882 		dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
7883 		goto err_mdiobus_unreg;
7884 	}
7885 
7886 	hclge_rss_init_cfg(hdev);
7887 	ret = hclge_rss_init_hw(hdev);
7888 	if (ret) {
7889 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
7890 		goto err_mdiobus_unreg;
7891 	}
7892 
7893 	ret = init_mgr_tbl(hdev);
7894 	if (ret) {
7895 		dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
7896 		goto err_mdiobus_unreg;
7897 	}
7898 
7899 	ret = hclge_init_fd_config(hdev);
7900 	if (ret) {
7901 		dev_err(&pdev->dev,
7902 			"fd table init fail, ret=%d\n", ret);
7903 		goto err_mdiobus_unreg;
7904 	}
7905 
7906 	ret = hclge_hw_error_set_state(hdev, true);
7907 	if (ret) {
7908 		dev_err(&pdev->dev,
7909 			"fail(%d) to enable hw error interrupts\n", ret);
7910 		goto err_mdiobus_unreg;
7911 	}
7912 
7913 	hclge_dcb_ops_set(hdev);
7914 
7915 	timer_setup(&hdev->service_timer, hclge_service_timer, 0);
7916 	timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
7917 	INIT_WORK(&hdev->service_task, hclge_service_task);
7918 	INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
7919 	INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
7920 
7921 	hclge_clear_all_event_cause(hdev);
7922 
7923 	/* Enable MISC vector(vector0) */
7924 	hclge_enable_vector(&hdev->misc_vector, true);
7925 
7926 	hclge_state_init(hdev);
7927 	hdev->last_reset_time = jiffies;
7928 
7929 	pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
7930 	return 0;
7931 
7932 err_mdiobus_unreg:
7933 	if (hdev->hw.mac.phydev)
7934 		mdiobus_unregister(hdev->hw.mac.mdio_bus);
7935 err_msi_irq_uninit:
7936 	hclge_misc_irq_uninit(hdev);
7937 err_msi_uninit:
7938 	pci_free_irq_vectors(pdev);
7939 err_cmd_uninit:
7940 	hclge_cmd_uninit(hdev);
7941 err_pci_uninit:
7942 	pcim_iounmap(pdev, hdev->hw.io_base);
7943 	pci_clear_master(pdev);
7944 	pci_release_regions(pdev);
7945 	pci_disable_device(pdev);
7946 out:
7947 	return ret;
7948 }
7949 
7950 static void hclge_stats_clear(struct hclge_dev *hdev)
7951 {
7952 	memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
7953 }
7954 
7955 static void hclge_reset_vport_state(struct hclge_dev *hdev)
7956 {
7957 	struct hclge_vport *vport = hdev->vport;
7958 	int i;
7959 
7960 	for (i = 0; i < hdev->num_alloc_vport; i++) {
7961 		hclge_vport_stop(vport);
7962 		vport++;
7963 	}
7964 }
7965 
7966 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
7967 {
7968 	struct hclge_dev *hdev = ae_dev->priv;
7969 	struct pci_dev *pdev = ae_dev->pdev;
7970 	int ret;
7971 
7972 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
7973 
7974 	hclge_stats_clear(hdev);
7975 	memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
7976 
7977 	ret = hclge_cmd_init(hdev);
7978 	if (ret) {
7979 		dev_err(&pdev->dev, "Cmd queue init failed\n");
7980 		return ret;
7981 	}
7982 
7983 	ret = hclge_map_tqp(hdev);
7984 	if (ret) {
7985 		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
7986 		return ret;
7987 	}
7988 
7989 	hclge_reset_umv_space(hdev);
7990 
7991 	ret = hclge_mac_init(hdev);
7992 	if (ret) {
7993 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
7994 		return ret;
7995 	}
7996 
7997 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
7998 	if (ret) {
7999 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8000 		return ret;
8001 	}
8002 
8003 	ret = hclge_config_gro(hdev, true);
8004 	if (ret)
8005 		return ret;
8006 
8007 	ret = hclge_init_vlan_config(hdev);
8008 	if (ret) {
8009 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8010 		return ret;
8011 	}
8012 
8013 	ret = hclge_tm_init_hw(hdev, true);
8014 	if (ret) {
8015 		dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
8016 		return ret;
8017 	}
8018 
8019 	ret = hclge_rss_init_hw(hdev);
8020 	if (ret) {
8021 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8022 		return ret;
8023 	}
8024 
8025 	ret = hclge_init_fd_config(hdev);
8026 	if (ret) {
8027 		dev_err(&pdev->dev,
8028 			"fd table init fail, ret=%d\n", ret);
8029 		return ret;
8030 	}
8031 
8032 	/* Re-enable the hw error interrupts because
8033 	 * the interrupts get disabled on core/global reset.
8034 	 */
8035 	ret = hclge_hw_error_set_state(hdev, true);
8036 	if (ret) {
8037 		dev_err(&pdev->dev,
8038 			"fail(%d) to re-enable HNS hw error interrupts\n", ret);
8039 		return ret;
8040 	}
8041 
8042 	hclge_reset_vport_state(hdev);
8043 
8044 	dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
8045 		 HCLGE_DRIVER_NAME);
8046 
8047 	return 0;
8048 }
8049 
8050 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
8051 {
8052 	struct hclge_dev *hdev = ae_dev->priv;
8053 	struct hclge_mac *mac = &hdev->hw.mac;
8054 
8055 	hclge_state_uninit(hdev);
8056 
8057 	if (mac->phydev)
8058 		mdiobus_unregister(mac->mdio_bus);
8059 
8060 	hclge_uninit_umv_space(hdev);
8061 
8062 	/* Disable MISC vector(vector0) */
8063 	hclge_enable_vector(&hdev->misc_vector, false);
8064 	synchronize_irq(hdev->misc_vector.vector_irq);
8065 
8066 	hclge_hw_error_set_state(hdev, false);
8067 	hclge_cmd_uninit(hdev);
8068 	hclge_misc_irq_uninit(hdev);
8069 	hclge_pci_uninit(hdev);
8070 	mutex_destroy(&hdev->vport_lock);
8071 	hclge_uninit_vport_mac_table(hdev);
8072 	hclge_uninit_vport_vlan_table(hdev);
8073 	mutex_destroy(&hdev->vport_cfg_mutex);
8074 	ae_dev->priv = NULL;
8075 }
8076 
8077 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
8078 {
8079 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8080 	struct hclge_vport *vport = hclge_get_vport(handle);
8081 	struct hclge_dev *hdev = vport->back;
8082 
8083 	return min_t(u32, hdev->rss_size_max,
8084 		     vport->alloc_tqps / kinfo->num_tc);
8085 }
8086 
8087 static void hclge_get_channels(struct hnae3_handle *handle,
8088 			       struct ethtool_channels *ch)
8089 {
8090 	ch->max_combined = hclge_get_max_channels(handle);
8091 	ch->other_count = 1;
8092 	ch->max_other = 1;
8093 	ch->combined_count = handle->kinfo.rss_size;
8094 }
8095 
8096 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
8097 					u16 *alloc_tqps, u16 *max_rss_size)
8098 {
8099 	struct hclge_vport *vport = hclge_get_vport(handle);
8100 	struct hclge_dev *hdev = vport->back;
8101 
8102 	*alloc_tqps = vport->alloc_tqps;
8103 	*max_rss_size = hdev->rss_size_max;
8104 }
8105 
8106 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
8107 			      bool rxfh_configured)
8108 {
8109 	struct hclge_vport *vport = hclge_get_vport(handle);
8110 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
8111 	struct hclge_dev *hdev = vport->back;
8112 	int cur_rss_size = kinfo->rss_size;
8113 	int cur_tqps = kinfo->num_tqps;
8114 	u16 tc_offset[HCLGE_MAX_TC_NUM];
8115 	u16 tc_valid[HCLGE_MAX_TC_NUM];
8116 	u16 tc_size[HCLGE_MAX_TC_NUM];
8117 	u16 roundup_size;
8118 	u32 *rss_indir;
8119 	int ret, i;
8120 
8121 	kinfo->req_rss_size = new_tqps_num;
8122 
8123 	ret = hclge_tm_vport_map_update(hdev);
8124 	if (ret) {
8125 		dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
8126 		return ret;
8127 	}
8128 
8129 	roundup_size = roundup_pow_of_two(kinfo->rss_size);
8130 	roundup_size = ilog2(roundup_size);
8131 	/* Set the RSS TC mode according to the new RSS size */
8132 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
8133 		tc_valid[i] = 0;
8134 
8135 		if (!(hdev->hw_tc_map & BIT(i)))
8136 			continue;
8137 
8138 		tc_valid[i] = 1;
8139 		tc_size[i] = roundup_size;
8140 		tc_offset[i] = kinfo->rss_size * i;
8141 	}
8142 	ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
8143 	if (ret)
8144 		return ret;
8145 
8146 	/* RSS indirection table has been configuared by user */
8147 	if (rxfh_configured)
8148 		goto out;
8149 
8150 	/* Reinitializes the rss indirect table according to the new RSS size */
8151 	rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
8152 	if (!rss_indir)
8153 		return -ENOMEM;
8154 
8155 	for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
8156 		rss_indir[i] = i % kinfo->rss_size;
8157 
8158 	ret = hclge_set_rss(handle, rss_indir, NULL, 0);
8159 	if (ret)
8160 		dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
8161 			ret);
8162 
8163 	kfree(rss_indir);
8164 
8165 out:
8166 	if (!ret)
8167 		dev_info(&hdev->pdev->dev,
8168 			 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
8169 			 cur_rss_size, kinfo->rss_size,
8170 			 cur_tqps, kinfo->rss_size * kinfo->num_tc);
8171 
8172 	return ret;
8173 }
8174 
8175 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
8176 			      u32 *regs_num_64_bit)
8177 {
8178 	struct hclge_desc desc;
8179 	u32 total_num;
8180 	int ret;
8181 
8182 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
8183 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8184 	if (ret) {
8185 		dev_err(&hdev->pdev->dev,
8186 			"Query register number cmd failed, ret = %d.\n", ret);
8187 		return ret;
8188 	}
8189 
8190 	*regs_num_32_bit = le32_to_cpu(desc.data[0]);
8191 	*regs_num_64_bit = le32_to_cpu(desc.data[1]);
8192 
8193 	total_num = *regs_num_32_bit + *regs_num_64_bit;
8194 	if (!total_num)
8195 		return -EINVAL;
8196 
8197 	return 0;
8198 }
8199 
8200 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8201 				 void *data)
8202 {
8203 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
8204 
8205 	struct hclge_desc *desc;
8206 	u32 *reg_val = data;
8207 	__le32 *desc_data;
8208 	int cmd_num;
8209 	int i, k, n;
8210 	int ret;
8211 
8212 	if (regs_num == 0)
8213 		return 0;
8214 
8215 	cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM);
8216 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8217 	if (!desc)
8218 		return -ENOMEM;
8219 
8220 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
8221 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8222 	if (ret) {
8223 		dev_err(&hdev->pdev->dev,
8224 			"Query 32 bit register cmd failed, ret = %d.\n", ret);
8225 		kfree(desc);
8226 		return ret;
8227 	}
8228 
8229 	for (i = 0; i < cmd_num; i++) {
8230 		if (i == 0) {
8231 			desc_data = (__le32 *)(&desc[i].data[0]);
8232 			n = HCLGE_32_BIT_REG_RTN_DATANUM - 2;
8233 		} else {
8234 			desc_data = (__le32 *)(&desc[i]);
8235 			n = HCLGE_32_BIT_REG_RTN_DATANUM;
8236 		}
8237 		for (k = 0; k < n; k++) {
8238 			*reg_val++ = le32_to_cpu(*desc_data++);
8239 
8240 			regs_num--;
8241 			if (!regs_num)
8242 				break;
8243 		}
8244 	}
8245 
8246 	kfree(desc);
8247 	return 0;
8248 }
8249 
8250 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8251 				 void *data)
8252 {
8253 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
8254 
8255 	struct hclge_desc *desc;
8256 	u64 *reg_val = data;
8257 	__le64 *desc_data;
8258 	int cmd_num;
8259 	int i, k, n;
8260 	int ret;
8261 
8262 	if (regs_num == 0)
8263 		return 0;
8264 
8265 	cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM);
8266 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8267 	if (!desc)
8268 		return -ENOMEM;
8269 
8270 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
8271 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8272 	if (ret) {
8273 		dev_err(&hdev->pdev->dev,
8274 			"Query 64 bit register cmd failed, ret = %d.\n", ret);
8275 		kfree(desc);
8276 		return ret;
8277 	}
8278 
8279 	for (i = 0; i < cmd_num; i++) {
8280 		if (i == 0) {
8281 			desc_data = (__le64 *)(&desc[i].data[0]);
8282 			n = HCLGE_64_BIT_REG_RTN_DATANUM - 1;
8283 		} else {
8284 			desc_data = (__le64 *)(&desc[i]);
8285 			n = HCLGE_64_BIT_REG_RTN_DATANUM;
8286 		}
8287 		for (k = 0; k < n; k++) {
8288 			*reg_val++ = le64_to_cpu(*desc_data++);
8289 
8290 			regs_num--;
8291 			if (!regs_num)
8292 				break;
8293 		}
8294 	}
8295 
8296 	kfree(desc);
8297 	return 0;
8298 }
8299 
8300 #define MAX_SEPARATE_NUM	4
8301 #define SEPARATOR_VALUE		0xFFFFFFFF
8302 #define REG_NUM_PER_LINE	4
8303 #define REG_LEN_PER_LINE	(REG_NUM_PER_LINE * sizeof(u32))
8304 
8305 static int hclge_get_regs_len(struct hnae3_handle *handle)
8306 {
8307 	int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
8308 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8309 	struct hclge_vport *vport = hclge_get_vport(handle);
8310 	struct hclge_dev *hdev = vport->back;
8311 	u32 regs_num_32_bit, regs_num_64_bit;
8312 	int ret;
8313 
8314 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
8315 	if (ret) {
8316 		dev_err(&hdev->pdev->dev,
8317 			"Get register number failed, ret = %d.\n", ret);
8318 		return -EOPNOTSUPP;
8319 	}
8320 
8321 	cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
8322 	common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
8323 	ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
8324 	tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
8325 
8326 	return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
8327 		tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
8328 		regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
8329 }
8330 
8331 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
8332 			   void *data)
8333 {
8334 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8335 	struct hclge_vport *vport = hclge_get_vport(handle);
8336 	struct hclge_dev *hdev = vport->back;
8337 	u32 regs_num_32_bit, regs_num_64_bit;
8338 	int i, j, reg_um, separator_num;
8339 	u32 *reg = data;
8340 	int ret;
8341 
8342 	*version = hdev->fw_version;
8343 
8344 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
8345 	if (ret) {
8346 		dev_err(&hdev->pdev->dev,
8347 			"Get register number failed, ret = %d.\n", ret);
8348 		return;
8349 	}
8350 
8351 	/* fetching per-PF registers valus from PF PCIe register space */
8352 	reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
8353 	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8354 	for (i = 0; i < reg_um; i++)
8355 		*reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
8356 	for (i = 0; i < separator_num; i++)
8357 		*reg++ = SEPARATOR_VALUE;
8358 
8359 	reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
8360 	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8361 	for (i = 0; i < reg_um; i++)
8362 		*reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
8363 	for (i = 0; i < separator_num; i++)
8364 		*reg++ = SEPARATOR_VALUE;
8365 
8366 	reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
8367 	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8368 	for (j = 0; j < kinfo->num_tqps; j++) {
8369 		for (i = 0; i < reg_um; i++)
8370 			*reg++ = hclge_read_dev(&hdev->hw,
8371 						ring_reg_addr_list[i] +
8372 						0x200 * j);
8373 		for (i = 0; i < separator_num; i++)
8374 			*reg++ = SEPARATOR_VALUE;
8375 	}
8376 
8377 	reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
8378 	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8379 	for (j = 0; j < hdev->num_msi_used - 1; j++) {
8380 		for (i = 0; i < reg_um; i++)
8381 			*reg++ = hclge_read_dev(&hdev->hw,
8382 						tqp_intr_reg_addr_list[i] +
8383 						4 * j);
8384 		for (i = 0; i < separator_num; i++)
8385 			*reg++ = SEPARATOR_VALUE;
8386 	}
8387 
8388 	/* fetching PF common registers values from firmware */
8389 	ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
8390 	if (ret) {
8391 		dev_err(&hdev->pdev->dev,
8392 			"Get 32 bit register failed, ret = %d.\n", ret);
8393 		return;
8394 	}
8395 
8396 	reg += regs_num_32_bit;
8397 	ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
8398 	if (ret)
8399 		dev_err(&hdev->pdev->dev,
8400 			"Get 64 bit register failed, ret = %d.\n", ret);
8401 }
8402 
8403 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
8404 {
8405 	struct hclge_set_led_state_cmd *req;
8406 	struct hclge_desc desc;
8407 	int ret;
8408 
8409 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
8410 
8411 	req = (struct hclge_set_led_state_cmd *)desc.data;
8412 	hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
8413 			HCLGE_LED_LOCATE_STATE_S, locate_led_status);
8414 
8415 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8416 	if (ret)
8417 		dev_err(&hdev->pdev->dev,
8418 			"Send set led state cmd error, ret =%d\n", ret);
8419 
8420 	return ret;
8421 }
8422 
8423 enum hclge_led_status {
8424 	HCLGE_LED_OFF,
8425 	HCLGE_LED_ON,
8426 	HCLGE_LED_NO_CHANGE = 0xFF,
8427 };
8428 
8429 static int hclge_set_led_id(struct hnae3_handle *handle,
8430 			    enum ethtool_phys_id_state status)
8431 {
8432 	struct hclge_vport *vport = hclge_get_vport(handle);
8433 	struct hclge_dev *hdev = vport->back;
8434 
8435 	switch (status) {
8436 	case ETHTOOL_ID_ACTIVE:
8437 		return hclge_set_led_status(hdev, HCLGE_LED_ON);
8438 	case ETHTOOL_ID_INACTIVE:
8439 		return hclge_set_led_status(hdev, HCLGE_LED_OFF);
8440 	default:
8441 		return -EINVAL;
8442 	}
8443 }
8444 
8445 static void hclge_get_link_mode(struct hnae3_handle *handle,
8446 				unsigned long *supported,
8447 				unsigned long *advertising)
8448 {
8449 	unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
8450 	struct hclge_vport *vport = hclge_get_vport(handle);
8451 	struct hclge_dev *hdev = vport->back;
8452 	unsigned int idx = 0;
8453 
8454 	for (; idx < size; idx++) {
8455 		supported[idx] = hdev->hw.mac.supported[idx];
8456 		advertising[idx] = hdev->hw.mac.advertising[idx];
8457 	}
8458 }
8459 
8460 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
8461 {
8462 	struct hclge_vport *vport = hclge_get_vport(handle);
8463 	struct hclge_dev *hdev = vport->back;
8464 
8465 	return hclge_config_gro(hdev, enable);
8466 }
8467 
8468 static const struct hnae3_ae_ops hclge_ops = {
8469 	.init_ae_dev = hclge_init_ae_dev,
8470 	.uninit_ae_dev = hclge_uninit_ae_dev,
8471 	.flr_prepare = hclge_flr_prepare,
8472 	.flr_done = hclge_flr_done,
8473 	.init_client_instance = hclge_init_client_instance,
8474 	.uninit_client_instance = hclge_uninit_client_instance,
8475 	.map_ring_to_vector = hclge_map_ring_to_vector,
8476 	.unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
8477 	.get_vector = hclge_get_vector,
8478 	.put_vector = hclge_put_vector,
8479 	.set_promisc_mode = hclge_set_promisc_mode,
8480 	.set_loopback = hclge_set_loopback,
8481 	.start = hclge_ae_start,
8482 	.stop = hclge_ae_stop,
8483 	.client_start = hclge_client_start,
8484 	.client_stop = hclge_client_stop,
8485 	.get_status = hclge_get_status,
8486 	.get_ksettings_an_result = hclge_get_ksettings_an_result,
8487 	.update_speed_duplex_h = hclge_update_speed_duplex_h,
8488 	.cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
8489 	.get_media_type = hclge_get_media_type,
8490 	.get_rss_key_size = hclge_get_rss_key_size,
8491 	.get_rss_indir_size = hclge_get_rss_indir_size,
8492 	.get_rss = hclge_get_rss,
8493 	.set_rss = hclge_set_rss,
8494 	.set_rss_tuple = hclge_set_rss_tuple,
8495 	.get_rss_tuple = hclge_get_rss_tuple,
8496 	.get_tc_size = hclge_get_tc_size,
8497 	.get_mac_addr = hclge_get_mac_addr,
8498 	.set_mac_addr = hclge_set_mac_addr,
8499 	.do_ioctl = hclge_do_ioctl,
8500 	.add_uc_addr = hclge_add_uc_addr,
8501 	.rm_uc_addr = hclge_rm_uc_addr,
8502 	.add_mc_addr = hclge_add_mc_addr,
8503 	.rm_mc_addr = hclge_rm_mc_addr,
8504 	.set_autoneg = hclge_set_autoneg,
8505 	.get_autoneg = hclge_get_autoneg,
8506 	.get_pauseparam = hclge_get_pauseparam,
8507 	.set_pauseparam = hclge_set_pauseparam,
8508 	.set_mtu = hclge_set_mtu,
8509 	.reset_queue = hclge_reset_tqp,
8510 	.get_stats = hclge_get_stats,
8511 	.update_stats = hclge_update_stats,
8512 	.get_strings = hclge_get_strings,
8513 	.get_sset_count = hclge_get_sset_count,
8514 	.get_fw_version = hclge_get_fw_version,
8515 	.get_mdix_mode = hclge_get_mdix_mode,
8516 	.enable_vlan_filter = hclge_enable_vlan_filter,
8517 	.set_vlan_filter = hclge_set_vlan_filter,
8518 	.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
8519 	.enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
8520 	.reset_event = hclge_reset_event,
8521 	.set_default_reset_request = hclge_set_def_reset_request,
8522 	.get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
8523 	.set_channels = hclge_set_channels,
8524 	.get_channels = hclge_get_channels,
8525 	.get_regs_len = hclge_get_regs_len,
8526 	.get_regs = hclge_get_regs,
8527 	.set_led_id = hclge_set_led_id,
8528 	.get_link_mode = hclge_get_link_mode,
8529 	.add_fd_entry = hclge_add_fd_entry,
8530 	.del_fd_entry = hclge_del_fd_entry,
8531 	.del_all_fd_entries = hclge_del_all_fd_entries,
8532 	.get_fd_rule_cnt = hclge_get_fd_rule_cnt,
8533 	.get_fd_rule_info = hclge_get_fd_rule_info,
8534 	.get_fd_all_rules = hclge_get_all_rules,
8535 	.restore_fd_rules = hclge_restore_fd_entries,
8536 	.enable_fd = hclge_enable_fd,
8537 	.dbg_run_cmd = hclge_dbg_run_cmd,
8538 	.handle_hw_ras_error = hclge_handle_hw_ras_error,
8539 	.get_hw_reset_stat = hclge_get_hw_reset_stat,
8540 	.ae_dev_resetting = hclge_ae_dev_resetting,
8541 	.ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
8542 	.set_gro_en = hclge_gro_en,
8543 	.get_global_queue_id = hclge_covert_handle_qid_global,
8544 	.set_timer_task = hclge_set_timer_task,
8545 	.mac_connect_phy = hclge_mac_connect_phy,
8546 	.mac_disconnect_phy = hclge_mac_disconnect_phy,
8547 };
8548 
8549 static struct hnae3_ae_algo ae_algo = {
8550 	.ops = &hclge_ops,
8551 	.pdev_id_table = ae_algo_pci_tbl,
8552 };
8553 
8554 static int hclge_init(void)
8555 {
8556 	pr_info("%s is initializing\n", HCLGE_NAME);
8557 
8558 	hnae3_register_ae_algo(&ae_algo);
8559 
8560 	return 0;
8561 }
8562 
8563 static void hclge_exit(void)
8564 {
8565 	hnae3_unregister_ae_algo(&ae_algo);
8566 }
8567 module_init(hclge_init);
8568 module_exit(hclge_exit);
8569 
8570 MODULE_LICENSE("GPL");
8571 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
8572 MODULE_DESCRIPTION("HCLGE Driver");
8573 MODULE_VERSION(HCLGE_MOD_VERSION);
8574