xref: /linux/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c (revision 77380998d91dee8aafdbe42634776ba1ef692f1e)
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <net/rtnetlink.h>
16 #include "hclge_cmd.h"
17 #include "hclge_dcb.h"
18 #include "hclge_main.h"
19 #include "hclge_mbx.h"
20 #include "hclge_mdio.h"
21 #include "hclge_tm.h"
22 #include "hclge_err.h"
23 #include "hnae3.h"
24 
25 #define HCLGE_NAME			"hclge"
26 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
27 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
28 
29 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu);
30 static int hclge_init_vlan_config(struct hclge_dev *hdev);
31 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
32 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
33 			       u16 *allocated_size, bool is_alloc);
34 
35 static struct hnae3_ae_algo ae_algo;
36 
37 static const struct pci_device_id ae_algo_pci_tbl[] = {
38 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
39 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
40 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
41 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
42 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
43 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
44 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
45 	/* required last entry */
46 	{0, }
47 };
48 
49 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
50 
51 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
52 	"App    Loopback test",
53 	"Serdes serial Loopback test",
54 	"Serdes parallel Loopback test",
55 	"Phy    Loopback test"
56 };
57 
58 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
59 	{"mac_tx_mac_pause_num",
60 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
61 	{"mac_rx_mac_pause_num",
62 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
63 	{"mac_tx_pfc_pri0_pkt_num",
64 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
65 	{"mac_tx_pfc_pri1_pkt_num",
66 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
67 	{"mac_tx_pfc_pri2_pkt_num",
68 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
69 	{"mac_tx_pfc_pri3_pkt_num",
70 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
71 	{"mac_tx_pfc_pri4_pkt_num",
72 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
73 	{"mac_tx_pfc_pri5_pkt_num",
74 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
75 	{"mac_tx_pfc_pri6_pkt_num",
76 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
77 	{"mac_tx_pfc_pri7_pkt_num",
78 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
79 	{"mac_rx_pfc_pri0_pkt_num",
80 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
81 	{"mac_rx_pfc_pri1_pkt_num",
82 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
83 	{"mac_rx_pfc_pri2_pkt_num",
84 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
85 	{"mac_rx_pfc_pri3_pkt_num",
86 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
87 	{"mac_rx_pfc_pri4_pkt_num",
88 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
89 	{"mac_rx_pfc_pri5_pkt_num",
90 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
91 	{"mac_rx_pfc_pri6_pkt_num",
92 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
93 	{"mac_rx_pfc_pri7_pkt_num",
94 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
95 	{"mac_tx_total_pkt_num",
96 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
97 	{"mac_tx_total_oct_num",
98 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
99 	{"mac_tx_good_pkt_num",
100 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
101 	{"mac_tx_bad_pkt_num",
102 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
103 	{"mac_tx_good_oct_num",
104 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
105 	{"mac_tx_bad_oct_num",
106 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
107 	{"mac_tx_uni_pkt_num",
108 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
109 	{"mac_tx_multi_pkt_num",
110 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
111 	{"mac_tx_broad_pkt_num",
112 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
113 	{"mac_tx_undersize_pkt_num",
114 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
115 	{"mac_tx_oversize_pkt_num",
116 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
117 	{"mac_tx_64_oct_pkt_num",
118 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
119 	{"mac_tx_65_127_oct_pkt_num",
120 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
121 	{"mac_tx_128_255_oct_pkt_num",
122 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
123 	{"mac_tx_256_511_oct_pkt_num",
124 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
125 	{"mac_tx_512_1023_oct_pkt_num",
126 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
127 	{"mac_tx_1024_1518_oct_pkt_num",
128 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
129 	{"mac_tx_1519_2047_oct_pkt_num",
130 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
131 	{"mac_tx_2048_4095_oct_pkt_num",
132 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
133 	{"mac_tx_4096_8191_oct_pkt_num",
134 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
135 	{"mac_tx_8192_9216_oct_pkt_num",
136 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
137 	{"mac_tx_9217_12287_oct_pkt_num",
138 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
139 	{"mac_tx_12288_16383_oct_pkt_num",
140 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
141 	{"mac_tx_1519_max_good_pkt_num",
142 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
143 	{"mac_tx_1519_max_bad_pkt_num",
144 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
145 	{"mac_rx_total_pkt_num",
146 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
147 	{"mac_rx_total_oct_num",
148 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
149 	{"mac_rx_good_pkt_num",
150 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
151 	{"mac_rx_bad_pkt_num",
152 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
153 	{"mac_rx_good_oct_num",
154 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
155 	{"mac_rx_bad_oct_num",
156 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
157 	{"mac_rx_uni_pkt_num",
158 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
159 	{"mac_rx_multi_pkt_num",
160 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
161 	{"mac_rx_broad_pkt_num",
162 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
163 	{"mac_rx_undersize_pkt_num",
164 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
165 	{"mac_rx_oversize_pkt_num",
166 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
167 	{"mac_rx_64_oct_pkt_num",
168 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
169 	{"mac_rx_65_127_oct_pkt_num",
170 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
171 	{"mac_rx_128_255_oct_pkt_num",
172 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
173 	{"mac_rx_256_511_oct_pkt_num",
174 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
175 	{"mac_rx_512_1023_oct_pkt_num",
176 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
177 	{"mac_rx_1024_1518_oct_pkt_num",
178 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
179 	{"mac_rx_1519_2047_oct_pkt_num",
180 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
181 	{"mac_rx_2048_4095_oct_pkt_num",
182 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
183 	{"mac_rx_4096_8191_oct_pkt_num",
184 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
185 	{"mac_rx_8192_9216_oct_pkt_num",
186 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
187 	{"mac_rx_9217_12287_oct_pkt_num",
188 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
189 	{"mac_rx_12288_16383_oct_pkt_num",
190 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
191 	{"mac_rx_1519_max_good_pkt_num",
192 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
193 	{"mac_rx_1519_max_bad_pkt_num",
194 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
195 
196 	{"mac_tx_fragment_pkt_num",
197 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
198 	{"mac_tx_undermin_pkt_num",
199 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
200 	{"mac_tx_jabber_pkt_num",
201 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
202 	{"mac_tx_err_all_pkt_num",
203 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
204 	{"mac_tx_from_app_good_pkt_num",
205 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
206 	{"mac_tx_from_app_bad_pkt_num",
207 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
208 	{"mac_rx_fragment_pkt_num",
209 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
210 	{"mac_rx_undermin_pkt_num",
211 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
212 	{"mac_rx_jabber_pkt_num",
213 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
214 	{"mac_rx_fcs_err_pkt_num",
215 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
216 	{"mac_rx_send_app_good_pkt_num",
217 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
218 	{"mac_rx_send_app_bad_pkt_num",
219 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
220 };
221 
222 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
223 	{
224 		.flags = HCLGE_MAC_MGR_MASK_VLAN_B,
225 		.ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP),
226 		.mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
227 		.mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
228 		.i_port_bitmap = 0x1,
229 	},
230 };
231 
232 static int hclge_mac_update_stats(struct hclge_dev *hdev)
233 {
234 #define HCLGE_MAC_CMD_NUM 21
235 #define HCLGE_RTN_DATA_NUM 4
236 
237 	u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
238 	struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
239 	__le64 *desc_data;
240 	int i, k, n;
241 	int ret;
242 
243 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
244 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
245 	if (ret) {
246 		dev_err(&hdev->pdev->dev,
247 			"Get MAC pkt stats fail, status = %d.\n", ret);
248 
249 		return ret;
250 	}
251 
252 	for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
253 		if (unlikely(i == 0)) {
254 			desc_data = (__le64 *)(&desc[i].data[0]);
255 			n = HCLGE_RTN_DATA_NUM - 2;
256 		} else {
257 			desc_data = (__le64 *)(&desc[i]);
258 			n = HCLGE_RTN_DATA_NUM;
259 		}
260 		for (k = 0; k < n; k++) {
261 			*data++ += le64_to_cpu(*desc_data);
262 			desc_data++;
263 		}
264 	}
265 
266 	return 0;
267 }
268 
269 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
270 {
271 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
272 	struct hclge_vport *vport = hclge_get_vport(handle);
273 	struct hclge_dev *hdev = vport->back;
274 	struct hnae3_queue *queue;
275 	struct hclge_desc desc[1];
276 	struct hclge_tqp *tqp;
277 	int ret, i;
278 
279 	for (i = 0; i < kinfo->num_tqps; i++) {
280 		queue = handle->kinfo.tqp[i];
281 		tqp = container_of(queue, struct hclge_tqp, q);
282 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
283 		hclge_cmd_setup_basic_desc(&desc[0],
284 					   HCLGE_OPC_QUERY_RX_STATUS,
285 					   true);
286 
287 		desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
288 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
289 		if (ret) {
290 			dev_err(&hdev->pdev->dev,
291 				"Query tqp stat fail, status = %d,queue = %d\n",
292 				ret,	i);
293 			return ret;
294 		}
295 		tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
296 			le32_to_cpu(desc[0].data[1]);
297 	}
298 
299 	for (i = 0; i < kinfo->num_tqps; i++) {
300 		queue = handle->kinfo.tqp[i];
301 		tqp = container_of(queue, struct hclge_tqp, q);
302 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
303 		hclge_cmd_setup_basic_desc(&desc[0],
304 					   HCLGE_OPC_QUERY_TX_STATUS,
305 					   true);
306 
307 		desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
308 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
309 		if (ret) {
310 			dev_err(&hdev->pdev->dev,
311 				"Query tqp stat fail, status = %d,queue = %d\n",
312 				ret, i);
313 			return ret;
314 		}
315 		tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
316 			le32_to_cpu(desc[0].data[1]);
317 	}
318 
319 	return 0;
320 }
321 
322 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
323 {
324 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
325 	struct hclge_tqp *tqp;
326 	u64 *buff = data;
327 	int i;
328 
329 	for (i = 0; i < kinfo->num_tqps; i++) {
330 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
331 		*buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
332 	}
333 
334 	for (i = 0; i < kinfo->num_tqps; i++) {
335 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
336 		*buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
337 	}
338 
339 	return buff;
340 }
341 
342 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
343 {
344 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
345 
346 	return kinfo->num_tqps * (2);
347 }
348 
349 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
350 {
351 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
352 	u8 *buff = data;
353 	int i = 0;
354 
355 	for (i = 0; i < kinfo->num_tqps; i++) {
356 		struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
357 			struct hclge_tqp, q);
358 		snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
359 			 tqp->index);
360 		buff = buff + ETH_GSTRING_LEN;
361 	}
362 
363 	for (i = 0; i < kinfo->num_tqps; i++) {
364 		struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
365 			struct hclge_tqp, q);
366 		snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
367 			 tqp->index);
368 		buff = buff + ETH_GSTRING_LEN;
369 	}
370 
371 	return buff;
372 }
373 
374 static u64 *hclge_comm_get_stats(void *comm_stats,
375 				 const struct hclge_comm_stats_str strs[],
376 				 int size, u64 *data)
377 {
378 	u64 *buf = data;
379 	u32 i;
380 
381 	for (i = 0; i < size; i++)
382 		buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
383 
384 	return buf + size;
385 }
386 
387 static u8 *hclge_comm_get_strings(u32 stringset,
388 				  const struct hclge_comm_stats_str strs[],
389 				  int size, u8 *data)
390 {
391 	char *buff = (char *)data;
392 	u32 i;
393 
394 	if (stringset != ETH_SS_STATS)
395 		return buff;
396 
397 	for (i = 0; i < size; i++) {
398 		snprintf(buff, ETH_GSTRING_LEN,
399 			 strs[i].desc);
400 		buff = buff + ETH_GSTRING_LEN;
401 	}
402 
403 	return (u8 *)buff;
404 }
405 
406 static void hclge_update_netstat(struct hclge_hw_stats *hw_stats,
407 				 struct net_device_stats *net_stats)
408 {
409 	net_stats->tx_dropped = 0;
410 	net_stats->rx_errors = hw_stats->mac_stats.mac_rx_oversize_pkt_num;
411 	net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num;
412 	net_stats->rx_errors += hw_stats->mac_stats.mac_rx_fcs_err_pkt_num;
413 
414 	net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num;
415 	net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num;
416 
417 	net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rx_fcs_err_pkt_num;
418 	net_stats->rx_length_errors =
419 		hw_stats->mac_stats.mac_rx_undersize_pkt_num;
420 	net_stats->rx_length_errors +=
421 		hw_stats->mac_stats.mac_rx_oversize_pkt_num;
422 	net_stats->rx_over_errors =
423 		hw_stats->mac_stats.mac_rx_oversize_pkt_num;
424 }
425 
426 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
427 {
428 	struct hnae3_handle *handle;
429 	int status;
430 
431 	handle = &hdev->vport[0].nic;
432 	if (handle->client) {
433 		status = hclge_tqps_update_stats(handle);
434 		if (status) {
435 			dev_err(&hdev->pdev->dev,
436 				"Update TQPS stats fail, status = %d.\n",
437 				status);
438 		}
439 	}
440 
441 	status = hclge_mac_update_stats(hdev);
442 	if (status)
443 		dev_err(&hdev->pdev->dev,
444 			"Update MAC stats fail, status = %d.\n", status);
445 
446 	hclge_update_netstat(&hdev->hw_stats, &handle->kinfo.netdev->stats);
447 }
448 
449 static void hclge_update_stats(struct hnae3_handle *handle,
450 			       struct net_device_stats *net_stats)
451 {
452 	struct hclge_vport *vport = hclge_get_vport(handle);
453 	struct hclge_dev *hdev = vport->back;
454 	struct hclge_hw_stats *hw_stats = &hdev->hw_stats;
455 	int status;
456 
457 	if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
458 		return;
459 
460 	status = hclge_mac_update_stats(hdev);
461 	if (status)
462 		dev_err(&hdev->pdev->dev,
463 			"Update MAC stats fail, status = %d.\n",
464 			status);
465 
466 	status = hclge_tqps_update_stats(handle);
467 	if (status)
468 		dev_err(&hdev->pdev->dev,
469 			"Update TQPS stats fail, status = %d.\n",
470 			status);
471 
472 	hclge_update_netstat(hw_stats, net_stats);
473 
474 	clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
475 }
476 
477 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
478 {
479 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
480 		HNAE3_SUPPORT_PHY_LOOPBACK |\
481 		HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
482 		HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
483 
484 	struct hclge_vport *vport = hclge_get_vport(handle);
485 	struct hclge_dev *hdev = vport->back;
486 	int count = 0;
487 
488 	/* Loopback test support rules:
489 	 * mac: only GE mode support
490 	 * serdes: all mac mode will support include GE/XGE/LGE/CGE
491 	 * phy: only support when phy device exist on board
492 	 */
493 	if (stringset == ETH_SS_TEST) {
494 		/* clear loopback bit flags at first */
495 		handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
496 		if (hdev->pdev->revision >= 0x21 ||
497 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
498 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
499 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
500 			count += 1;
501 			handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
502 		}
503 
504 		count += 2;
505 		handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
506 		handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
507 	} else if (stringset == ETH_SS_STATS) {
508 		count = ARRAY_SIZE(g_mac_stats_string) +
509 			hclge_tqps_get_sset_count(handle, stringset);
510 	}
511 
512 	return count;
513 }
514 
515 static void hclge_get_strings(struct hnae3_handle *handle,
516 			      u32 stringset,
517 			      u8 *data)
518 {
519 	u8 *p = (char *)data;
520 	int size;
521 
522 	if (stringset == ETH_SS_STATS) {
523 		size = ARRAY_SIZE(g_mac_stats_string);
524 		p = hclge_comm_get_strings(stringset,
525 					   g_mac_stats_string,
526 					   size,
527 					   p);
528 		p = hclge_tqps_get_strings(handle, p);
529 	} else if (stringset == ETH_SS_TEST) {
530 		if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
531 			memcpy(p,
532 			       hns3_nic_test_strs[HNAE3_LOOP_APP],
533 			       ETH_GSTRING_LEN);
534 			p += ETH_GSTRING_LEN;
535 		}
536 		if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
537 			memcpy(p,
538 			       hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
539 			       ETH_GSTRING_LEN);
540 			p += ETH_GSTRING_LEN;
541 		}
542 		if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
543 			memcpy(p,
544 			       hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
545 			       ETH_GSTRING_LEN);
546 			p += ETH_GSTRING_LEN;
547 		}
548 		if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
549 			memcpy(p,
550 			       hns3_nic_test_strs[HNAE3_LOOP_PHY],
551 			       ETH_GSTRING_LEN);
552 			p += ETH_GSTRING_LEN;
553 		}
554 	}
555 }
556 
557 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
558 {
559 	struct hclge_vport *vport = hclge_get_vport(handle);
560 	struct hclge_dev *hdev = vport->back;
561 	u64 *p;
562 
563 	p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
564 				 g_mac_stats_string,
565 				 ARRAY_SIZE(g_mac_stats_string),
566 				 data);
567 	p = hclge_tqps_get_stats(handle, p);
568 }
569 
570 static int hclge_parse_func_status(struct hclge_dev *hdev,
571 				   struct hclge_func_status_cmd *status)
572 {
573 	if (!(status->pf_state & HCLGE_PF_STATE_DONE))
574 		return -EINVAL;
575 
576 	/* Set the pf to main pf */
577 	if (status->pf_state & HCLGE_PF_STATE_MAIN)
578 		hdev->flag |= HCLGE_FLAG_MAIN;
579 	else
580 		hdev->flag &= ~HCLGE_FLAG_MAIN;
581 
582 	return 0;
583 }
584 
585 static int hclge_query_function_status(struct hclge_dev *hdev)
586 {
587 	struct hclge_func_status_cmd *req;
588 	struct hclge_desc desc;
589 	int timeout = 0;
590 	int ret;
591 
592 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
593 	req = (struct hclge_func_status_cmd *)desc.data;
594 
595 	do {
596 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
597 		if (ret) {
598 			dev_err(&hdev->pdev->dev,
599 				"query function status failed %d.\n",
600 				ret);
601 
602 			return ret;
603 		}
604 
605 		/* Check pf reset is done */
606 		if (req->pf_state)
607 			break;
608 		usleep_range(1000, 2000);
609 	} while (timeout++ < 5);
610 
611 	ret = hclge_parse_func_status(hdev, req);
612 
613 	return ret;
614 }
615 
616 static int hclge_query_pf_resource(struct hclge_dev *hdev)
617 {
618 	struct hclge_pf_res_cmd *req;
619 	struct hclge_desc desc;
620 	int ret;
621 
622 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
623 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
624 	if (ret) {
625 		dev_err(&hdev->pdev->dev,
626 			"query pf resource failed %d.\n", ret);
627 		return ret;
628 	}
629 
630 	req = (struct hclge_pf_res_cmd *)desc.data;
631 	hdev->num_tqps = __le16_to_cpu(req->tqp_num);
632 	hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
633 
634 	if (hnae3_dev_roce_supported(hdev)) {
635 		hdev->roce_base_msix_offset =
636 		hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
637 				HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
638 		hdev->num_roce_msi =
639 		hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
640 				HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
641 
642 		/* PF should have NIC vectors and Roce vectors,
643 		 * NIC vectors are queued before Roce vectors.
644 		 */
645 		hdev->num_msi = hdev->num_roce_msi  +
646 				hdev->roce_base_msix_offset;
647 	} else {
648 		hdev->num_msi =
649 		hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
650 				HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
651 	}
652 
653 	return 0;
654 }
655 
656 static int hclge_parse_speed(int speed_cmd, int *speed)
657 {
658 	switch (speed_cmd) {
659 	case 6:
660 		*speed = HCLGE_MAC_SPEED_10M;
661 		break;
662 	case 7:
663 		*speed = HCLGE_MAC_SPEED_100M;
664 		break;
665 	case 0:
666 		*speed = HCLGE_MAC_SPEED_1G;
667 		break;
668 	case 1:
669 		*speed = HCLGE_MAC_SPEED_10G;
670 		break;
671 	case 2:
672 		*speed = HCLGE_MAC_SPEED_25G;
673 		break;
674 	case 3:
675 		*speed = HCLGE_MAC_SPEED_40G;
676 		break;
677 	case 4:
678 		*speed = HCLGE_MAC_SPEED_50G;
679 		break;
680 	case 5:
681 		*speed = HCLGE_MAC_SPEED_100G;
682 		break;
683 	default:
684 		return -EINVAL;
685 	}
686 
687 	return 0;
688 }
689 
690 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
691 					u8 speed_ability)
692 {
693 	unsigned long *supported = hdev->hw.mac.supported;
694 
695 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
696 		set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
697 			supported);
698 
699 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
700 		set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
701 			supported);
702 
703 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
704 		set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
705 			supported);
706 
707 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
708 		set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
709 			supported);
710 
711 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
712 		set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
713 			supported);
714 
715 	set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported);
716 	set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
717 }
718 
719 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
720 {
721 	u8 media_type = hdev->hw.mac.media_type;
722 
723 	if (media_type != HNAE3_MEDIA_TYPE_FIBER)
724 		return;
725 
726 	hclge_parse_fiber_link_mode(hdev, speed_ability);
727 }
728 
729 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
730 {
731 	struct hclge_cfg_param_cmd *req;
732 	u64 mac_addr_tmp_high;
733 	u64 mac_addr_tmp;
734 	int i;
735 
736 	req = (struct hclge_cfg_param_cmd *)desc[0].data;
737 
738 	/* get the configuration */
739 	cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
740 					      HCLGE_CFG_VMDQ_M,
741 					      HCLGE_CFG_VMDQ_S);
742 	cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
743 				      HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
744 	cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
745 					    HCLGE_CFG_TQP_DESC_N_M,
746 					    HCLGE_CFG_TQP_DESC_N_S);
747 
748 	cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
749 					HCLGE_CFG_PHY_ADDR_M,
750 					HCLGE_CFG_PHY_ADDR_S);
751 	cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
752 					  HCLGE_CFG_MEDIA_TP_M,
753 					  HCLGE_CFG_MEDIA_TP_S);
754 	cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
755 					  HCLGE_CFG_RX_BUF_LEN_M,
756 					  HCLGE_CFG_RX_BUF_LEN_S);
757 	/* get mac_address */
758 	mac_addr_tmp = __le32_to_cpu(req->param[2]);
759 	mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
760 					    HCLGE_CFG_MAC_ADDR_H_M,
761 					    HCLGE_CFG_MAC_ADDR_H_S);
762 
763 	mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
764 
765 	cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
766 					     HCLGE_CFG_DEFAULT_SPEED_M,
767 					     HCLGE_CFG_DEFAULT_SPEED_S);
768 	cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
769 					    HCLGE_CFG_RSS_SIZE_M,
770 					    HCLGE_CFG_RSS_SIZE_S);
771 
772 	for (i = 0; i < ETH_ALEN; i++)
773 		cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
774 
775 	req = (struct hclge_cfg_param_cmd *)desc[1].data;
776 	cfg->numa_node_map = __le32_to_cpu(req->param[0]);
777 
778 	cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
779 					     HCLGE_CFG_SPEED_ABILITY_M,
780 					     HCLGE_CFG_SPEED_ABILITY_S);
781 	cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
782 					 HCLGE_CFG_UMV_TBL_SPACE_M,
783 					 HCLGE_CFG_UMV_TBL_SPACE_S);
784 	if (!cfg->umv_space)
785 		cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
786 }
787 
788 /* hclge_get_cfg: query the static parameter from flash
789  * @hdev: pointer to struct hclge_dev
790  * @hcfg: the config structure to be getted
791  */
792 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
793 {
794 	struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
795 	struct hclge_cfg_param_cmd *req;
796 	int i, ret;
797 
798 	for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
799 		u32 offset = 0;
800 
801 		req = (struct hclge_cfg_param_cmd *)desc[i].data;
802 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
803 					   true);
804 		hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
805 				HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
806 		/* Len should be united by 4 bytes when send to hardware */
807 		hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
808 				HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
809 		req->offset = cpu_to_le32(offset);
810 	}
811 
812 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
813 	if (ret) {
814 		dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
815 		return ret;
816 	}
817 
818 	hclge_parse_cfg(hcfg, desc);
819 
820 	return 0;
821 }
822 
823 static int hclge_get_cap(struct hclge_dev *hdev)
824 {
825 	int ret;
826 
827 	ret = hclge_query_function_status(hdev);
828 	if (ret) {
829 		dev_err(&hdev->pdev->dev,
830 			"query function status error %d.\n", ret);
831 		return ret;
832 	}
833 
834 	/* get pf resource */
835 	ret = hclge_query_pf_resource(hdev);
836 	if (ret)
837 		dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
838 
839 	return ret;
840 }
841 
842 static int hclge_configure(struct hclge_dev *hdev)
843 {
844 	struct hclge_cfg cfg;
845 	int ret, i;
846 
847 	ret = hclge_get_cfg(hdev, &cfg);
848 	if (ret) {
849 		dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
850 		return ret;
851 	}
852 
853 	hdev->num_vmdq_vport = cfg.vmdq_vport_num;
854 	hdev->base_tqp_pid = 0;
855 	hdev->rss_size_max = cfg.rss_size_max;
856 	hdev->rx_buf_len = cfg.rx_buf_len;
857 	ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
858 	hdev->hw.mac.media_type = cfg.media_type;
859 	hdev->hw.mac.phy_addr = cfg.phy_addr;
860 	hdev->num_desc = cfg.tqp_desc_num;
861 	hdev->tm_info.num_pg = 1;
862 	hdev->tc_max = cfg.tc_num;
863 	hdev->tm_info.hw_pfc_map = 0;
864 	hdev->wanted_umv_size = cfg.umv_space;
865 
866 	ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
867 	if (ret) {
868 		dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
869 		return ret;
870 	}
871 
872 	hclge_parse_link_mode(hdev, cfg.speed_ability);
873 
874 	if ((hdev->tc_max > HNAE3_MAX_TC) ||
875 	    (hdev->tc_max < 1)) {
876 		dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
877 			 hdev->tc_max);
878 		hdev->tc_max = 1;
879 	}
880 
881 	/* Dev does not support DCB */
882 	if (!hnae3_dev_dcb_supported(hdev)) {
883 		hdev->tc_max = 1;
884 		hdev->pfc_max = 0;
885 	} else {
886 		hdev->pfc_max = hdev->tc_max;
887 	}
888 
889 	hdev->tm_info.num_tc = hdev->tc_max;
890 
891 	/* Currently not support uncontiuous tc */
892 	for (i = 0; i < hdev->tm_info.num_tc; i++)
893 		hnae3_set_bit(hdev->hw_tc_map, i, 1);
894 
895 	hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
896 
897 	return ret;
898 }
899 
900 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
901 			    int tso_mss_max)
902 {
903 	struct hclge_cfg_tso_status_cmd *req;
904 	struct hclge_desc desc;
905 	u16 tso_mss;
906 
907 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
908 
909 	req = (struct hclge_cfg_tso_status_cmd *)desc.data;
910 
911 	tso_mss = 0;
912 	hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
913 			HCLGE_TSO_MSS_MIN_S, tso_mss_min);
914 	req->tso_mss_min = cpu_to_le16(tso_mss);
915 
916 	tso_mss = 0;
917 	hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
918 			HCLGE_TSO_MSS_MIN_S, tso_mss_max);
919 	req->tso_mss_max = cpu_to_le16(tso_mss);
920 
921 	return hclge_cmd_send(&hdev->hw, &desc, 1);
922 }
923 
924 static int hclge_alloc_tqps(struct hclge_dev *hdev)
925 {
926 	struct hclge_tqp *tqp;
927 	int i;
928 
929 	hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
930 				  sizeof(struct hclge_tqp), GFP_KERNEL);
931 	if (!hdev->htqp)
932 		return -ENOMEM;
933 
934 	tqp = hdev->htqp;
935 
936 	for (i = 0; i < hdev->num_tqps; i++) {
937 		tqp->dev = &hdev->pdev->dev;
938 		tqp->index = i;
939 
940 		tqp->q.ae_algo = &ae_algo;
941 		tqp->q.buf_size = hdev->rx_buf_len;
942 		tqp->q.desc_num = hdev->num_desc;
943 		tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
944 			i * HCLGE_TQP_REG_SIZE;
945 
946 		tqp++;
947 	}
948 
949 	return 0;
950 }
951 
952 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
953 				  u16 tqp_pid, u16 tqp_vid, bool is_pf)
954 {
955 	struct hclge_tqp_map_cmd *req;
956 	struct hclge_desc desc;
957 	int ret;
958 
959 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
960 
961 	req = (struct hclge_tqp_map_cmd *)desc.data;
962 	req->tqp_id = cpu_to_le16(tqp_pid);
963 	req->tqp_vf = func_id;
964 	req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
965 			1 << HCLGE_TQP_MAP_EN_B;
966 	req->tqp_vid = cpu_to_le16(tqp_vid);
967 
968 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
969 	if (ret)
970 		dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
971 
972 	return ret;
973 }
974 
975 static int  hclge_assign_tqp(struct hclge_vport *vport)
976 {
977 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
978 	struct hclge_dev *hdev = vport->back;
979 	int i, alloced;
980 
981 	for (i = 0, alloced = 0; i < hdev->num_tqps &&
982 	     alloced < kinfo->num_tqps; i++) {
983 		if (!hdev->htqp[i].alloced) {
984 			hdev->htqp[i].q.handle = &vport->nic;
985 			hdev->htqp[i].q.tqp_index = alloced;
986 			hdev->htqp[i].q.desc_num = kinfo->num_desc;
987 			kinfo->tqp[alloced] = &hdev->htqp[i].q;
988 			hdev->htqp[i].alloced = true;
989 			alloced++;
990 		}
991 	}
992 	vport->alloc_tqps = kinfo->num_tqps;
993 
994 	return 0;
995 }
996 
997 static int hclge_knic_setup(struct hclge_vport *vport,
998 			    u16 num_tqps, u16 num_desc)
999 {
1000 	struct hnae3_handle *nic = &vport->nic;
1001 	struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1002 	struct hclge_dev *hdev = vport->back;
1003 	int i, ret;
1004 
1005 	kinfo->num_desc = num_desc;
1006 	kinfo->rx_buf_len = hdev->rx_buf_len;
1007 	kinfo->num_tc = min_t(u16, num_tqps, hdev->tm_info.num_tc);
1008 	kinfo->rss_size
1009 		= min_t(u16, hdev->rss_size_max, num_tqps / kinfo->num_tc);
1010 	kinfo->num_tqps = kinfo->rss_size * kinfo->num_tc;
1011 
1012 	for (i = 0; i < HNAE3_MAX_TC; i++) {
1013 		if (hdev->hw_tc_map & BIT(i)) {
1014 			kinfo->tc_info[i].enable = true;
1015 			kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
1016 			kinfo->tc_info[i].tqp_count = kinfo->rss_size;
1017 			kinfo->tc_info[i].tc = i;
1018 		} else {
1019 			/* Set to default queue if TC is disable */
1020 			kinfo->tc_info[i].enable = false;
1021 			kinfo->tc_info[i].tqp_offset = 0;
1022 			kinfo->tc_info[i].tqp_count = 1;
1023 			kinfo->tc_info[i].tc = 0;
1024 		}
1025 	}
1026 
1027 	kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
1028 				  sizeof(struct hnae3_queue *), GFP_KERNEL);
1029 	if (!kinfo->tqp)
1030 		return -ENOMEM;
1031 
1032 	ret = hclge_assign_tqp(vport);
1033 	if (ret)
1034 		dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1035 
1036 	return ret;
1037 }
1038 
1039 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1040 				  struct hclge_vport *vport)
1041 {
1042 	struct hnae3_handle *nic = &vport->nic;
1043 	struct hnae3_knic_private_info *kinfo;
1044 	u16 i;
1045 
1046 	kinfo = &nic->kinfo;
1047 	for (i = 0; i < kinfo->num_tqps; i++) {
1048 		struct hclge_tqp *q =
1049 			container_of(kinfo->tqp[i], struct hclge_tqp, q);
1050 		bool is_pf;
1051 		int ret;
1052 
1053 		is_pf = !(vport->vport_id);
1054 		ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1055 					     i, is_pf);
1056 		if (ret)
1057 			return ret;
1058 	}
1059 
1060 	return 0;
1061 }
1062 
1063 static int hclge_map_tqp(struct hclge_dev *hdev)
1064 {
1065 	struct hclge_vport *vport = hdev->vport;
1066 	u16 i, num_vport;
1067 
1068 	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1069 	for (i = 0; i < num_vport; i++)	{
1070 		int ret;
1071 
1072 		ret = hclge_map_tqp_to_vport(hdev, vport);
1073 		if (ret)
1074 			return ret;
1075 
1076 		vport++;
1077 	}
1078 
1079 	return 0;
1080 }
1081 
1082 static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
1083 {
1084 	/* this would be initialized later */
1085 }
1086 
1087 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1088 {
1089 	struct hnae3_handle *nic = &vport->nic;
1090 	struct hclge_dev *hdev = vport->back;
1091 	int ret;
1092 
1093 	nic->pdev = hdev->pdev;
1094 	nic->ae_algo = &ae_algo;
1095 	nic->numa_node_mask = hdev->numa_node_mask;
1096 
1097 	if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
1098 		ret = hclge_knic_setup(vport, num_tqps, hdev->num_desc);
1099 		if (ret) {
1100 			dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
1101 				ret);
1102 			return ret;
1103 		}
1104 	} else {
1105 		hclge_unic_setup(vport, num_tqps);
1106 	}
1107 
1108 	return 0;
1109 }
1110 
1111 static int hclge_alloc_vport(struct hclge_dev *hdev)
1112 {
1113 	struct pci_dev *pdev = hdev->pdev;
1114 	struct hclge_vport *vport;
1115 	u32 tqp_main_vport;
1116 	u32 tqp_per_vport;
1117 	int num_vport, i;
1118 	int ret;
1119 
1120 	/* We need to alloc a vport for main NIC of PF */
1121 	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1122 
1123 	if (hdev->num_tqps < num_vport) {
1124 		dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1125 			hdev->num_tqps, num_vport);
1126 		return -EINVAL;
1127 	}
1128 
1129 	/* Alloc the same number of TQPs for every vport */
1130 	tqp_per_vport = hdev->num_tqps / num_vport;
1131 	tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1132 
1133 	vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1134 			     GFP_KERNEL);
1135 	if (!vport)
1136 		return -ENOMEM;
1137 
1138 	hdev->vport = vport;
1139 	hdev->num_alloc_vport = num_vport;
1140 
1141 	if (IS_ENABLED(CONFIG_PCI_IOV))
1142 		hdev->num_alloc_vfs = hdev->num_req_vfs;
1143 
1144 	for (i = 0; i < num_vport; i++) {
1145 		vport->back = hdev;
1146 		vport->vport_id = i;
1147 
1148 		if (i == 0)
1149 			ret = hclge_vport_setup(vport, tqp_main_vport);
1150 		else
1151 			ret = hclge_vport_setup(vport, tqp_per_vport);
1152 		if (ret) {
1153 			dev_err(&pdev->dev,
1154 				"vport setup failed for vport %d, %d\n",
1155 				i, ret);
1156 			return ret;
1157 		}
1158 
1159 		vport++;
1160 	}
1161 
1162 	return 0;
1163 }
1164 
1165 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1166 				    struct hclge_pkt_buf_alloc *buf_alloc)
1167 {
1168 /* TX buffer size is unit by 128 byte */
1169 #define HCLGE_BUF_SIZE_UNIT_SHIFT	7
1170 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK	BIT(15)
1171 	struct hclge_tx_buff_alloc_cmd *req;
1172 	struct hclge_desc desc;
1173 	int ret;
1174 	u8 i;
1175 
1176 	req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1177 
1178 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1179 	for (i = 0; i < HCLGE_TC_NUM; i++) {
1180 		u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1181 
1182 		req->tx_pkt_buff[i] =
1183 			cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1184 				     HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1185 	}
1186 
1187 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1188 	if (ret)
1189 		dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1190 			ret);
1191 
1192 	return ret;
1193 }
1194 
1195 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1196 				 struct hclge_pkt_buf_alloc *buf_alloc)
1197 {
1198 	int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1199 
1200 	if (ret)
1201 		dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1202 
1203 	return ret;
1204 }
1205 
1206 static int hclge_get_tc_num(struct hclge_dev *hdev)
1207 {
1208 	int i, cnt = 0;
1209 
1210 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1211 		if (hdev->hw_tc_map & BIT(i))
1212 			cnt++;
1213 	return cnt;
1214 }
1215 
1216 static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev)
1217 {
1218 	int i, cnt = 0;
1219 
1220 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1221 		if (hdev->hw_tc_map & BIT(i) &&
1222 		    hdev->tm_info.hw_pfc_map & BIT(i))
1223 			cnt++;
1224 	return cnt;
1225 }
1226 
1227 /* Get the number of pfc enabled TCs, which have private buffer */
1228 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1229 				  struct hclge_pkt_buf_alloc *buf_alloc)
1230 {
1231 	struct hclge_priv_buf *priv;
1232 	int i, cnt = 0;
1233 
1234 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1235 		priv = &buf_alloc->priv_buf[i];
1236 		if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1237 		    priv->enable)
1238 			cnt++;
1239 	}
1240 
1241 	return cnt;
1242 }
1243 
1244 /* Get the number of pfc disabled TCs, which have private buffer */
1245 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1246 				     struct hclge_pkt_buf_alloc *buf_alloc)
1247 {
1248 	struct hclge_priv_buf *priv;
1249 	int i, cnt = 0;
1250 
1251 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1252 		priv = &buf_alloc->priv_buf[i];
1253 		if (hdev->hw_tc_map & BIT(i) &&
1254 		    !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1255 		    priv->enable)
1256 			cnt++;
1257 	}
1258 
1259 	return cnt;
1260 }
1261 
1262 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1263 {
1264 	struct hclge_priv_buf *priv;
1265 	u32 rx_priv = 0;
1266 	int i;
1267 
1268 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1269 		priv = &buf_alloc->priv_buf[i];
1270 		if (priv->enable)
1271 			rx_priv += priv->buf_size;
1272 	}
1273 	return rx_priv;
1274 }
1275 
1276 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1277 {
1278 	u32 i, total_tx_size = 0;
1279 
1280 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1281 		total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1282 
1283 	return total_tx_size;
1284 }
1285 
1286 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1287 				struct hclge_pkt_buf_alloc *buf_alloc,
1288 				u32 rx_all)
1289 {
1290 	u32 shared_buf_min, shared_buf_tc, shared_std;
1291 	int tc_num, pfc_enable_num;
1292 	u32 shared_buf;
1293 	u32 rx_priv;
1294 	int i;
1295 
1296 	tc_num = hclge_get_tc_num(hdev);
1297 	pfc_enable_num = hclge_get_pfc_enalbe_num(hdev);
1298 
1299 	if (hnae3_dev_dcb_supported(hdev))
1300 		shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV;
1301 	else
1302 		shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_NON_DCB_DV;
1303 
1304 	shared_buf_tc = pfc_enable_num * hdev->mps +
1305 			(tc_num - pfc_enable_num) * hdev->mps / 2 +
1306 			hdev->mps;
1307 	shared_std = max_t(u32, shared_buf_min, shared_buf_tc);
1308 
1309 	rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1310 	if (rx_all <= rx_priv + shared_std)
1311 		return false;
1312 
1313 	shared_buf = rx_all - rx_priv;
1314 	buf_alloc->s_buf.buf_size = shared_buf;
1315 	buf_alloc->s_buf.self.high = shared_buf;
1316 	buf_alloc->s_buf.self.low =  2 * hdev->mps;
1317 
1318 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1319 		if ((hdev->hw_tc_map & BIT(i)) &&
1320 		    (hdev->tm_info.hw_pfc_map & BIT(i))) {
1321 			buf_alloc->s_buf.tc_thrd[i].low = hdev->mps;
1322 			buf_alloc->s_buf.tc_thrd[i].high = 2 * hdev->mps;
1323 		} else {
1324 			buf_alloc->s_buf.tc_thrd[i].low = 0;
1325 			buf_alloc->s_buf.tc_thrd[i].high = hdev->mps;
1326 		}
1327 	}
1328 
1329 	return true;
1330 }
1331 
1332 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1333 				struct hclge_pkt_buf_alloc *buf_alloc)
1334 {
1335 	u32 i, total_size;
1336 
1337 	total_size = hdev->pkt_buf_size;
1338 
1339 	/* alloc tx buffer for all enabled tc */
1340 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1341 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1342 
1343 		if (total_size < HCLGE_DEFAULT_TX_BUF)
1344 			return -ENOMEM;
1345 
1346 		if (hdev->hw_tc_map & BIT(i))
1347 			priv->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
1348 		else
1349 			priv->tx_buf_size = 0;
1350 
1351 		total_size -= priv->tx_buf_size;
1352 	}
1353 
1354 	return 0;
1355 }
1356 
1357 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1358  * @hdev: pointer to struct hclge_dev
1359  * @buf_alloc: pointer to buffer calculation data
1360  * @return: 0: calculate sucessful, negative: fail
1361  */
1362 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1363 				struct hclge_pkt_buf_alloc *buf_alloc)
1364 {
1365 #define HCLGE_BUF_SIZE_UNIT	128
1366 	u32 rx_all = hdev->pkt_buf_size, aligned_mps;
1367 	int no_pfc_priv_num, pfc_priv_num;
1368 	struct hclge_priv_buf *priv;
1369 	int i;
1370 
1371 	aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1372 	rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1373 
1374 	/* When DCB is not supported, rx private
1375 	 * buffer is not allocated.
1376 	 */
1377 	if (!hnae3_dev_dcb_supported(hdev)) {
1378 		if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1379 			return -ENOMEM;
1380 
1381 		return 0;
1382 	}
1383 
1384 	/* step 1, try to alloc private buffer for all enabled tc */
1385 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1386 		priv = &buf_alloc->priv_buf[i];
1387 		if (hdev->hw_tc_map & BIT(i)) {
1388 			priv->enable = 1;
1389 			if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1390 				priv->wl.low = aligned_mps;
1391 				priv->wl.high = priv->wl.low + aligned_mps;
1392 				priv->buf_size = priv->wl.high +
1393 						HCLGE_DEFAULT_DV;
1394 			} else {
1395 				priv->wl.low = 0;
1396 				priv->wl.high = 2 * aligned_mps;
1397 				priv->buf_size = priv->wl.high;
1398 			}
1399 		} else {
1400 			priv->enable = 0;
1401 			priv->wl.low = 0;
1402 			priv->wl.high = 0;
1403 			priv->buf_size = 0;
1404 		}
1405 	}
1406 
1407 	if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1408 		return 0;
1409 
1410 	/* step 2, try to decrease the buffer size of
1411 	 * no pfc TC's private buffer
1412 	 */
1413 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1414 		priv = &buf_alloc->priv_buf[i];
1415 
1416 		priv->enable = 0;
1417 		priv->wl.low = 0;
1418 		priv->wl.high = 0;
1419 		priv->buf_size = 0;
1420 
1421 		if (!(hdev->hw_tc_map & BIT(i)))
1422 			continue;
1423 
1424 		priv->enable = 1;
1425 
1426 		if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1427 			priv->wl.low = 128;
1428 			priv->wl.high = priv->wl.low + aligned_mps;
1429 			priv->buf_size = priv->wl.high + HCLGE_DEFAULT_DV;
1430 		} else {
1431 			priv->wl.low = 0;
1432 			priv->wl.high = aligned_mps;
1433 			priv->buf_size = priv->wl.high;
1434 		}
1435 	}
1436 
1437 	if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1438 		return 0;
1439 
1440 	/* step 3, try to reduce the number of pfc disabled TCs,
1441 	 * which have private buffer
1442 	 */
1443 	/* get the total no pfc enable TC number, which have private buffer */
1444 	no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1445 
1446 	/* let the last to be cleared first */
1447 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1448 		priv = &buf_alloc->priv_buf[i];
1449 
1450 		if (hdev->hw_tc_map & BIT(i) &&
1451 		    !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1452 			/* Clear the no pfc TC private buffer */
1453 			priv->wl.low = 0;
1454 			priv->wl.high = 0;
1455 			priv->buf_size = 0;
1456 			priv->enable = 0;
1457 			no_pfc_priv_num--;
1458 		}
1459 
1460 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1461 		    no_pfc_priv_num == 0)
1462 			break;
1463 	}
1464 
1465 	if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1466 		return 0;
1467 
1468 	/* step 4, try to reduce the number of pfc enabled TCs
1469 	 * which have private buffer.
1470 	 */
1471 	pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1472 
1473 	/* let the last to be cleared first */
1474 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1475 		priv = &buf_alloc->priv_buf[i];
1476 
1477 		if (hdev->hw_tc_map & BIT(i) &&
1478 		    hdev->tm_info.hw_pfc_map & BIT(i)) {
1479 			/* Reduce the number of pfc TC with private buffer */
1480 			priv->wl.low = 0;
1481 			priv->enable = 0;
1482 			priv->wl.high = 0;
1483 			priv->buf_size = 0;
1484 			pfc_priv_num--;
1485 		}
1486 
1487 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1488 		    pfc_priv_num == 0)
1489 			break;
1490 	}
1491 	if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1492 		return 0;
1493 
1494 	return -ENOMEM;
1495 }
1496 
1497 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1498 				   struct hclge_pkt_buf_alloc *buf_alloc)
1499 {
1500 	struct hclge_rx_priv_buff_cmd *req;
1501 	struct hclge_desc desc;
1502 	int ret;
1503 	int i;
1504 
1505 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1506 	req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1507 
1508 	/* Alloc private buffer TCs */
1509 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1510 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1511 
1512 		req->buf_num[i] =
1513 			cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1514 		req->buf_num[i] |=
1515 			cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1516 	}
1517 
1518 	req->shared_buf =
1519 		cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1520 			    (1 << HCLGE_TC0_PRI_BUF_EN_B));
1521 
1522 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1523 	if (ret)
1524 		dev_err(&hdev->pdev->dev,
1525 			"rx private buffer alloc cmd failed %d\n", ret);
1526 
1527 	return ret;
1528 }
1529 
1530 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1531 				   struct hclge_pkt_buf_alloc *buf_alloc)
1532 {
1533 	struct hclge_rx_priv_wl_buf *req;
1534 	struct hclge_priv_buf *priv;
1535 	struct hclge_desc desc[2];
1536 	int i, j;
1537 	int ret;
1538 
1539 	for (i = 0; i < 2; i++) {
1540 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1541 					   false);
1542 		req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1543 
1544 		/* The first descriptor set the NEXT bit to 1 */
1545 		if (i == 0)
1546 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1547 		else
1548 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1549 
1550 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1551 			u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1552 
1553 			priv = &buf_alloc->priv_buf[idx];
1554 			req->tc_wl[j].high =
1555 				cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1556 			req->tc_wl[j].high |=
1557 				cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1558 			req->tc_wl[j].low =
1559 				cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1560 			req->tc_wl[j].low |=
1561 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1562 		}
1563 	}
1564 
1565 	/* Send 2 descriptor at one time */
1566 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
1567 	if (ret)
1568 		dev_err(&hdev->pdev->dev,
1569 			"rx private waterline config cmd failed %d\n",
1570 			ret);
1571 	return ret;
1572 }
1573 
1574 static int hclge_common_thrd_config(struct hclge_dev *hdev,
1575 				    struct hclge_pkt_buf_alloc *buf_alloc)
1576 {
1577 	struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
1578 	struct hclge_rx_com_thrd *req;
1579 	struct hclge_desc desc[2];
1580 	struct hclge_tc_thrd *tc;
1581 	int i, j;
1582 	int ret;
1583 
1584 	for (i = 0; i < 2; i++) {
1585 		hclge_cmd_setup_basic_desc(&desc[i],
1586 					   HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1587 		req = (struct hclge_rx_com_thrd *)&desc[i].data;
1588 
1589 		/* The first descriptor set the NEXT bit to 1 */
1590 		if (i == 0)
1591 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1592 		else
1593 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1594 
1595 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1596 			tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1597 
1598 			req->com_thrd[j].high =
1599 				cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1600 			req->com_thrd[j].high |=
1601 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1602 			req->com_thrd[j].low =
1603 				cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1604 			req->com_thrd[j].low |=
1605 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1606 		}
1607 	}
1608 
1609 	/* Send 2 descriptors at one time */
1610 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
1611 	if (ret)
1612 		dev_err(&hdev->pdev->dev,
1613 			"common threshold config cmd failed %d\n", ret);
1614 	return ret;
1615 }
1616 
1617 static int hclge_common_wl_config(struct hclge_dev *hdev,
1618 				  struct hclge_pkt_buf_alloc *buf_alloc)
1619 {
1620 	struct hclge_shared_buf *buf = &buf_alloc->s_buf;
1621 	struct hclge_rx_com_wl *req;
1622 	struct hclge_desc desc;
1623 	int ret;
1624 
1625 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
1626 
1627 	req = (struct hclge_rx_com_wl *)desc.data;
1628 	req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
1629 	req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1630 
1631 	req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
1632 	req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1633 
1634 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1635 	if (ret)
1636 		dev_err(&hdev->pdev->dev,
1637 			"common waterline config cmd failed %d\n", ret);
1638 
1639 	return ret;
1640 }
1641 
1642 int hclge_buffer_alloc(struct hclge_dev *hdev)
1643 {
1644 	struct hclge_pkt_buf_alloc *pkt_buf;
1645 	int ret;
1646 
1647 	pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
1648 	if (!pkt_buf)
1649 		return -ENOMEM;
1650 
1651 	ret = hclge_tx_buffer_calc(hdev, pkt_buf);
1652 	if (ret) {
1653 		dev_err(&hdev->pdev->dev,
1654 			"could not calc tx buffer size for all TCs %d\n", ret);
1655 		goto out;
1656 	}
1657 
1658 	ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
1659 	if (ret) {
1660 		dev_err(&hdev->pdev->dev,
1661 			"could not alloc tx buffers %d\n", ret);
1662 		goto out;
1663 	}
1664 
1665 	ret = hclge_rx_buffer_calc(hdev, pkt_buf);
1666 	if (ret) {
1667 		dev_err(&hdev->pdev->dev,
1668 			"could not calc rx priv buffer size for all TCs %d\n",
1669 			ret);
1670 		goto out;
1671 	}
1672 
1673 	ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
1674 	if (ret) {
1675 		dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
1676 			ret);
1677 		goto out;
1678 	}
1679 
1680 	if (hnae3_dev_dcb_supported(hdev)) {
1681 		ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
1682 		if (ret) {
1683 			dev_err(&hdev->pdev->dev,
1684 				"could not configure rx private waterline %d\n",
1685 				ret);
1686 			goto out;
1687 		}
1688 
1689 		ret = hclge_common_thrd_config(hdev, pkt_buf);
1690 		if (ret) {
1691 			dev_err(&hdev->pdev->dev,
1692 				"could not configure common threshold %d\n",
1693 				ret);
1694 			goto out;
1695 		}
1696 	}
1697 
1698 	ret = hclge_common_wl_config(hdev, pkt_buf);
1699 	if (ret)
1700 		dev_err(&hdev->pdev->dev,
1701 			"could not configure common waterline %d\n", ret);
1702 
1703 out:
1704 	kfree(pkt_buf);
1705 	return ret;
1706 }
1707 
1708 static int hclge_init_roce_base_info(struct hclge_vport *vport)
1709 {
1710 	struct hnae3_handle *roce = &vport->roce;
1711 	struct hnae3_handle *nic = &vport->nic;
1712 
1713 	roce->rinfo.num_vectors = vport->back->num_roce_msi;
1714 
1715 	if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
1716 	    vport->back->num_msi_left == 0)
1717 		return -EINVAL;
1718 
1719 	roce->rinfo.base_vector = vport->back->roce_base_vector;
1720 
1721 	roce->rinfo.netdev = nic->kinfo.netdev;
1722 	roce->rinfo.roce_io_base = vport->back->hw.io_base;
1723 
1724 	roce->pdev = nic->pdev;
1725 	roce->ae_algo = nic->ae_algo;
1726 	roce->numa_node_mask = nic->numa_node_mask;
1727 
1728 	return 0;
1729 }
1730 
1731 static int hclge_init_msi(struct hclge_dev *hdev)
1732 {
1733 	struct pci_dev *pdev = hdev->pdev;
1734 	int vectors;
1735 	int i;
1736 
1737 	vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
1738 					PCI_IRQ_MSI | PCI_IRQ_MSIX);
1739 	if (vectors < 0) {
1740 		dev_err(&pdev->dev,
1741 			"failed(%d) to allocate MSI/MSI-X vectors\n",
1742 			vectors);
1743 		return vectors;
1744 	}
1745 	if (vectors < hdev->num_msi)
1746 		dev_warn(&hdev->pdev->dev,
1747 			 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
1748 			 hdev->num_msi, vectors);
1749 
1750 	hdev->num_msi = vectors;
1751 	hdev->num_msi_left = vectors;
1752 	hdev->base_msi_vector = pdev->irq;
1753 	hdev->roce_base_vector = hdev->base_msi_vector +
1754 				hdev->roce_base_msix_offset;
1755 
1756 	hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
1757 					   sizeof(u16), GFP_KERNEL);
1758 	if (!hdev->vector_status) {
1759 		pci_free_irq_vectors(pdev);
1760 		return -ENOMEM;
1761 	}
1762 
1763 	for (i = 0; i < hdev->num_msi; i++)
1764 		hdev->vector_status[i] = HCLGE_INVALID_VPORT;
1765 
1766 	hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
1767 					sizeof(int), GFP_KERNEL);
1768 	if (!hdev->vector_irq) {
1769 		pci_free_irq_vectors(pdev);
1770 		return -ENOMEM;
1771 	}
1772 
1773 	return 0;
1774 }
1775 
1776 static u8 hclge_check_speed_dup(u8 duplex, int speed)
1777 {
1778 
1779 	if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
1780 		duplex = HCLGE_MAC_FULL;
1781 
1782 	return duplex;
1783 }
1784 
1785 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
1786 				      u8 duplex)
1787 {
1788 	struct hclge_config_mac_speed_dup_cmd *req;
1789 	struct hclge_desc desc;
1790 	int ret;
1791 
1792 	req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
1793 
1794 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
1795 
1796 	hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
1797 
1798 	switch (speed) {
1799 	case HCLGE_MAC_SPEED_10M:
1800 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1801 				HCLGE_CFG_SPEED_S, 6);
1802 		break;
1803 	case HCLGE_MAC_SPEED_100M:
1804 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1805 				HCLGE_CFG_SPEED_S, 7);
1806 		break;
1807 	case HCLGE_MAC_SPEED_1G:
1808 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1809 				HCLGE_CFG_SPEED_S, 0);
1810 		break;
1811 	case HCLGE_MAC_SPEED_10G:
1812 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1813 				HCLGE_CFG_SPEED_S, 1);
1814 		break;
1815 	case HCLGE_MAC_SPEED_25G:
1816 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1817 				HCLGE_CFG_SPEED_S, 2);
1818 		break;
1819 	case HCLGE_MAC_SPEED_40G:
1820 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1821 				HCLGE_CFG_SPEED_S, 3);
1822 		break;
1823 	case HCLGE_MAC_SPEED_50G:
1824 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1825 				HCLGE_CFG_SPEED_S, 4);
1826 		break;
1827 	case HCLGE_MAC_SPEED_100G:
1828 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1829 				HCLGE_CFG_SPEED_S, 5);
1830 		break;
1831 	default:
1832 		dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
1833 		return -EINVAL;
1834 	}
1835 
1836 	hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
1837 		      1);
1838 
1839 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1840 	if (ret) {
1841 		dev_err(&hdev->pdev->dev,
1842 			"mac speed/duplex config cmd failed %d.\n", ret);
1843 		return ret;
1844 	}
1845 
1846 	return 0;
1847 }
1848 
1849 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
1850 {
1851 	int ret;
1852 
1853 	duplex = hclge_check_speed_dup(duplex, speed);
1854 	if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
1855 		return 0;
1856 
1857 	ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
1858 	if (ret)
1859 		return ret;
1860 
1861 	hdev->hw.mac.speed = speed;
1862 	hdev->hw.mac.duplex = duplex;
1863 
1864 	return 0;
1865 }
1866 
1867 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
1868 				     u8 duplex)
1869 {
1870 	struct hclge_vport *vport = hclge_get_vport(handle);
1871 	struct hclge_dev *hdev = vport->back;
1872 
1873 	return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
1874 }
1875 
1876 static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed,
1877 					u8 *duplex)
1878 {
1879 	struct hclge_query_an_speed_dup_cmd *req;
1880 	struct hclge_desc desc;
1881 	int speed_tmp;
1882 	int ret;
1883 
1884 	req = (struct hclge_query_an_speed_dup_cmd *)desc.data;
1885 
1886 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true);
1887 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1888 	if (ret) {
1889 		dev_err(&hdev->pdev->dev,
1890 			"mac speed/autoneg/duplex query cmd failed %d\n",
1891 			ret);
1892 		return ret;
1893 	}
1894 
1895 	*duplex = hnae3_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B);
1896 	speed_tmp = hnae3_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M,
1897 				    HCLGE_QUERY_SPEED_S);
1898 
1899 	ret = hclge_parse_speed(speed_tmp, speed);
1900 	if (ret)
1901 		dev_err(&hdev->pdev->dev,
1902 			"could not parse speed(=%d), %d\n", speed_tmp, ret);
1903 
1904 	return ret;
1905 }
1906 
1907 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
1908 {
1909 	struct hclge_config_auto_neg_cmd *req;
1910 	struct hclge_desc desc;
1911 	u32 flag = 0;
1912 	int ret;
1913 
1914 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
1915 
1916 	req = (struct hclge_config_auto_neg_cmd *)desc.data;
1917 	hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
1918 	req->cfg_an_cmd_flag = cpu_to_le32(flag);
1919 
1920 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1921 	if (ret)
1922 		dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
1923 			ret);
1924 
1925 	return ret;
1926 }
1927 
1928 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
1929 {
1930 	struct hclge_vport *vport = hclge_get_vport(handle);
1931 	struct hclge_dev *hdev = vport->back;
1932 
1933 	return hclge_set_autoneg_en(hdev, enable);
1934 }
1935 
1936 static int hclge_get_autoneg(struct hnae3_handle *handle)
1937 {
1938 	struct hclge_vport *vport = hclge_get_vport(handle);
1939 	struct hclge_dev *hdev = vport->back;
1940 	struct phy_device *phydev = hdev->hw.mac.phydev;
1941 
1942 	if (phydev)
1943 		return phydev->autoneg;
1944 
1945 	return hdev->hw.mac.autoneg;
1946 }
1947 
1948 static int hclge_mac_init(struct hclge_dev *hdev)
1949 {
1950 	struct hnae3_handle *handle = &hdev->vport[0].nic;
1951 	struct net_device *netdev = handle->kinfo.netdev;
1952 	struct hclge_mac *mac = &hdev->hw.mac;
1953 	int mtu;
1954 	int ret;
1955 
1956 	hdev->hw.mac.duplex = HCLGE_MAC_FULL;
1957 	ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
1958 					 hdev->hw.mac.duplex);
1959 	if (ret) {
1960 		dev_err(&hdev->pdev->dev,
1961 			"Config mac speed dup fail ret=%d\n", ret);
1962 		return ret;
1963 	}
1964 
1965 	mac->link = 0;
1966 
1967 	if (netdev)
1968 		mtu = netdev->mtu;
1969 	else
1970 		mtu = ETH_DATA_LEN;
1971 
1972 	ret = hclge_set_mtu(handle, mtu);
1973 	if (ret)
1974 		dev_err(&hdev->pdev->dev,
1975 			"set mtu failed ret=%d\n", ret);
1976 
1977 	return ret;
1978 }
1979 
1980 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
1981 {
1982 	if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
1983 		schedule_work(&hdev->mbx_service_task);
1984 }
1985 
1986 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
1987 {
1988 	if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
1989 		schedule_work(&hdev->rst_service_task);
1990 }
1991 
1992 static void hclge_task_schedule(struct hclge_dev *hdev)
1993 {
1994 	if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
1995 	    !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
1996 	    !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
1997 		(void)schedule_work(&hdev->service_task);
1998 }
1999 
2000 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2001 {
2002 	struct hclge_link_status_cmd *req;
2003 	struct hclge_desc desc;
2004 	int link_status;
2005 	int ret;
2006 
2007 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2008 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2009 	if (ret) {
2010 		dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2011 			ret);
2012 		return ret;
2013 	}
2014 
2015 	req = (struct hclge_link_status_cmd *)desc.data;
2016 	link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2017 
2018 	return !!link_status;
2019 }
2020 
2021 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2022 {
2023 	int mac_state;
2024 	int link_stat;
2025 
2026 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2027 		return 0;
2028 
2029 	mac_state = hclge_get_mac_link_status(hdev);
2030 
2031 	if (hdev->hw.mac.phydev) {
2032 		if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2033 			link_stat = mac_state &
2034 				hdev->hw.mac.phydev->link;
2035 		else
2036 			link_stat = 0;
2037 
2038 	} else {
2039 		link_stat = mac_state;
2040 	}
2041 
2042 	return !!link_stat;
2043 }
2044 
2045 static void hclge_update_link_status(struct hclge_dev *hdev)
2046 {
2047 	struct hnae3_client *client = hdev->nic_client;
2048 	struct hnae3_handle *handle;
2049 	int state;
2050 	int i;
2051 
2052 	if (!client)
2053 		return;
2054 	state = hclge_get_mac_phy_link(hdev);
2055 	if (state != hdev->hw.mac.link) {
2056 		for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2057 			handle = &hdev->vport[i].nic;
2058 			client->ops->link_status_change(handle, state);
2059 		}
2060 		hdev->hw.mac.link = state;
2061 	}
2062 }
2063 
2064 static int hclge_update_speed_duplex(struct hclge_dev *hdev)
2065 {
2066 	struct hclge_mac mac = hdev->hw.mac;
2067 	u8 duplex;
2068 	int speed;
2069 	int ret;
2070 
2071 	/* get the speed and duplex as autoneg'result from mac cmd when phy
2072 	 * doesn't exit.
2073 	 */
2074 	if (mac.phydev || !mac.autoneg)
2075 		return 0;
2076 
2077 	ret = hclge_query_mac_an_speed_dup(hdev, &speed, &duplex);
2078 	if (ret) {
2079 		dev_err(&hdev->pdev->dev,
2080 			"mac autoneg/speed/duplex query failed %d\n", ret);
2081 		return ret;
2082 	}
2083 
2084 	ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2085 	if (ret) {
2086 		dev_err(&hdev->pdev->dev,
2087 			"mac speed/duplex config failed %d\n", ret);
2088 		return ret;
2089 	}
2090 
2091 	return 0;
2092 }
2093 
2094 static int hclge_update_speed_duplex_h(struct hnae3_handle *handle)
2095 {
2096 	struct hclge_vport *vport = hclge_get_vport(handle);
2097 	struct hclge_dev *hdev = vport->back;
2098 
2099 	return hclge_update_speed_duplex(hdev);
2100 }
2101 
2102 static int hclge_get_status(struct hnae3_handle *handle)
2103 {
2104 	struct hclge_vport *vport = hclge_get_vport(handle);
2105 	struct hclge_dev *hdev = vport->back;
2106 
2107 	hclge_update_link_status(hdev);
2108 
2109 	return hdev->hw.mac.link;
2110 }
2111 
2112 static void hclge_service_timer(struct timer_list *t)
2113 {
2114 	struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2115 
2116 	mod_timer(&hdev->service_timer, jiffies + HZ);
2117 	hdev->hw_stats.stats_timer++;
2118 	hclge_task_schedule(hdev);
2119 }
2120 
2121 static void hclge_service_complete(struct hclge_dev *hdev)
2122 {
2123 	WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2124 
2125 	/* Flush memory before next watchdog */
2126 	smp_mb__before_atomic();
2127 	clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2128 }
2129 
2130 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2131 {
2132 	u32 rst_src_reg;
2133 	u32 cmdq_src_reg;
2134 
2135 	/* fetch the events from their corresponding regs */
2136 	rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2137 	cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2138 
2139 	/* Assumption: If by any chance reset and mailbox events are reported
2140 	 * together then we will only process reset event in this go and will
2141 	 * defer the processing of the mailbox events. Since, we would have not
2142 	 * cleared RX CMDQ event this time we would receive again another
2143 	 * interrupt from H/W just for the mailbox.
2144 	 */
2145 
2146 	/* check for vector0 reset event sources */
2147 	if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2148 		dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2149 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2150 		set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2151 		*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2152 		return HCLGE_VECTOR0_EVENT_RST;
2153 	}
2154 
2155 	if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
2156 		dev_info(&hdev->pdev->dev, "core reset interrupt\n");
2157 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2158 		set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
2159 		*clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2160 		return HCLGE_VECTOR0_EVENT_RST;
2161 	}
2162 
2163 	if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2164 		dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2165 		set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2166 		*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2167 		return HCLGE_VECTOR0_EVENT_RST;
2168 	}
2169 
2170 	/* check for vector0 mailbox(=CMDQ RX) event source */
2171 	if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2172 		cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2173 		*clearval = cmdq_src_reg;
2174 		return HCLGE_VECTOR0_EVENT_MBX;
2175 	}
2176 
2177 	return HCLGE_VECTOR0_EVENT_OTHER;
2178 }
2179 
2180 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2181 				    u32 regclr)
2182 {
2183 	switch (event_type) {
2184 	case HCLGE_VECTOR0_EVENT_RST:
2185 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2186 		break;
2187 	case HCLGE_VECTOR0_EVENT_MBX:
2188 		hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2189 		break;
2190 	default:
2191 		break;
2192 	}
2193 }
2194 
2195 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2196 {
2197 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2198 				BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2199 				BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2200 				BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2201 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2202 }
2203 
2204 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2205 {
2206 	writel(enable ? 1 : 0, vector->addr);
2207 }
2208 
2209 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2210 {
2211 	struct hclge_dev *hdev = data;
2212 	u32 event_cause;
2213 	u32 clearval;
2214 
2215 	hclge_enable_vector(&hdev->misc_vector, false);
2216 	event_cause = hclge_check_event_cause(hdev, &clearval);
2217 
2218 	/* vector 0 interrupt is shared with reset and mailbox source events.*/
2219 	switch (event_cause) {
2220 	case HCLGE_VECTOR0_EVENT_RST:
2221 		hclge_reset_task_schedule(hdev);
2222 		break;
2223 	case HCLGE_VECTOR0_EVENT_MBX:
2224 		/* If we are here then,
2225 		 * 1. Either we are not handling any mbx task and we are not
2226 		 *    scheduled as well
2227 		 *                        OR
2228 		 * 2. We could be handling a mbx task but nothing more is
2229 		 *    scheduled.
2230 		 * In both cases, we should schedule mbx task as there are more
2231 		 * mbx messages reported by this interrupt.
2232 		 */
2233 		hclge_mbx_task_schedule(hdev);
2234 		break;
2235 	default:
2236 		dev_warn(&hdev->pdev->dev,
2237 			 "received unknown or unhandled event of vector0\n");
2238 		break;
2239 	}
2240 
2241 	/* clear the source of interrupt if it is not cause by reset */
2242 	if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2243 		hclge_clear_event_cause(hdev, event_cause, clearval);
2244 		hclge_enable_vector(&hdev->misc_vector, true);
2245 	}
2246 
2247 	return IRQ_HANDLED;
2248 }
2249 
2250 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2251 {
2252 	if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2253 		dev_warn(&hdev->pdev->dev,
2254 			 "vector(vector_id %d) has been freed.\n", vector_id);
2255 		return;
2256 	}
2257 
2258 	hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2259 	hdev->num_msi_left += 1;
2260 	hdev->num_msi_used -= 1;
2261 }
2262 
2263 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2264 {
2265 	struct hclge_misc_vector *vector = &hdev->misc_vector;
2266 
2267 	vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2268 
2269 	vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2270 	hdev->vector_status[0] = 0;
2271 
2272 	hdev->num_msi_left -= 1;
2273 	hdev->num_msi_used += 1;
2274 }
2275 
2276 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2277 {
2278 	int ret;
2279 
2280 	hclge_get_misc_vector(hdev);
2281 
2282 	/* this would be explicitly freed in the end */
2283 	ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2284 			  0, "hclge_misc", hdev);
2285 	if (ret) {
2286 		hclge_free_vector(hdev, 0);
2287 		dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2288 			hdev->misc_vector.vector_irq);
2289 	}
2290 
2291 	return ret;
2292 }
2293 
2294 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2295 {
2296 	free_irq(hdev->misc_vector.vector_irq, hdev);
2297 	hclge_free_vector(hdev, 0);
2298 }
2299 
2300 static int hclge_notify_client(struct hclge_dev *hdev,
2301 			       enum hnae3_reset_notify_type type)
2302 {
2303 	struct hnae3_client *client = hdev->nic_client;
2304 	u16 i;
2305 
2306 	if (!client->ops->reset_notify)
2307 		return -EOPNOTSUPP;
2308 
2309 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2310 		struct hnae3_handle *handle = &hdev->vport[i].nic;
2311 		int ret;
2312 
2313 		ret = client->ops->reset_notify(handle, type);
2314 		if (ret) {
2315 			dev_err(&hdev->pdev->dev,
2316 				"notify nic client failed %d(%d)\n", type, ret);
2317 			return ret;
2318 		}
2319 	}
2320 
2321 	return 0;
2322 }
2323 
2324 static int hclge_notify_roce_client(struct hclge_dev *hdev,
2325 				    enum hnae3_reset_notify_type type)
2326 {
2327 	struct hnae3_client *client = hdev->roce_client;
2328 	int ret = 0;
2329 	u16 i;
2330 
2331 	if (!client)
2332 		return 0;
2333 
2334 	if (!client->ops->reset_notify)
2335 		return -EOPNOTSUPP;
2336 
2337 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2338 		struct hnae3_handle *handle = &hdev->vport[i].roce;
2339 
2340 		ret = client->ops->reset_notify(handle, type);
2341 		if (ret) {
2342 			dev_err(&hdev->pdev->dev,
2343 				"notify roce client failed %d(%d)",
2344 				type, ret);
2345 			return ret;
2346 		}
2347 	}
2348 
2349 	return ret;
2350 }
2351 
2352 static int hclge_reset_wait(struct hclge_dev *hdev)
2353 {
2354 #define HCLGE_RESET_WATI_MS	100
2355 #define HCLGE_RESET_WAIT_CNT	5
2356 	u32 val, reg, reg_bit;
2357 	u32 cnt = 0;
2358 
2359 	switch (hdev->reset_type) {
2360 	case HNAE3_GLOBAL_RESET:
2361 		reg = HCLGE_GLOBAL_RESET_REG;
2362 		reg_bit = HCLGE_GLOBAL_RESET_BIT;
2363 		break;
2364 	case HNAE3_CORE_RESET:
2365 		reg = HCLGE_GLOBAL_RESET_REG;
2366 		reg_bit = HCLGE_CORE_RESET_BIT;
2367 		break;
2368 	case HNAE3_FUNC_RESET:
2369 		reg = HCLGE_FUN_RST_ING;
2370 		reg_bit = HCLGE_FUN_RST_ING_B;
2371 		break;
2372 	default:
2373 		dev_err(&hdev->pdev->dev,
2374 			"Wait for unsupported reset type: %d\n",
2375 			hdev->reset_type);
2376 		return -EINVAL;
2377 	}
2378 
2379 	val = hclge_read_dev(&hdev->hw, reg);
2380 	while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
2381 		msleep(HCLGE_RESET_WATI_MS);
2382 		val = hclge_read_dev(&hdev->hw, reg);
2383 		cnt++;
2384 	}
2385 
2386 	if (cnt >= HCLGE_RESET_WAIT_CNT) {
2387 		dev_warn(&hdev->pdev->dev,
2388 			 "Wait for reset timeout: %d\n", hdev->reset_type);
2389 		return -EBUSY;
2390 	}
2391 
2392 	return 0;
2393 }
2394 
2395 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
2396 {
2397 	struct hclge_desc desc;
2398 	struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
2399 	int ret;
2400 
2401 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
2402 	hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
2403 	req->fun_reset_vfid = func_id;
2404 
2405 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2406 	if (ret)
2407 		dev_err(&hdev->pdev->dev,
2408 			"send function reset cmd fail, status =%d\n", ret);
2409 
2410 	return ret;
2411 }
2412 
2413 static void hclge_do_reset(struct hclge_dev *hdev)
2414 {
2415 	struct pci_dev *pdev = hdev->pdev;
2416 	u32 val;
2417 
2418 	switch (hdev->reset_type) {
2419 	case HNAE3_GLOBAL_RESET:
2420 		val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2421 		hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
2422 		hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2423 		dev_info(&pdev->dev, "Global Reset requested\n");
2424 		break;
2425 	case HNAE3_CORE_RESET:
2426 		val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2427 		hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
2428 		hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2429 		dev_info(&pdev->dev, "Core Reset requested\n");
2430 		break;
2431 	case HNAE3_FUNC_RESET:
2432 		dev_info(&pdev->dev, "PF Reset requested\n");
2433 		/* schedule again to check later */
2434 		set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
2435 		hclge_reset_task_schedule(hdev);
2436 		break;
2437 	default:
2438 		dev_warn(&pdev->dev,
2439 			 "Unsupported reset type: %d\n", hdev->reset_type);
2440 		break;
2441 	}
2442 }
2443 
2444 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
2445 						   unsigned long *addr)
2446 {
2447 	enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
2448 
2449 	/* return the highest priority reset level amongst all */
2450 	if (test_bit(HNAE3_IMP_RESET, addr)) {
2451 		rst_level = HNAE3_IMP_RESET;
2452 		clear_bit(HNAE3_IMP_RESET, addr);
2453 		clear_bit(HNAE3_GLOBAL_RESET, addr);
2454 		clear_bit(HNAE3_CORE_RESET, addr);
2455 		clear_bit(HNAE3_FUNC_RESET, addr);
2456 	} else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
2457 		rst_level = HNAE3_GLOBAL_RESET;
2458 		clear_bit(HNAE3_GLOBAL_RESET, addr);
2459 		clear_bit(HNAE3_CORE_RESET, addr);
2460 		clear_bit(HNAE3_FUNC_RESET, addr);
2461 	} else if (test_bit(HNAE3_CORE_RESET, addr)) {
2462 		rst_level = HNAE3_CORE_RESET;
2463 		clear_bit(HNAE3_CORE_RESET, addr);
2464 		clear_bit(HNAE3_FUNC_RESET, addr);
2465 	} else if (test_bit(HNAE3_FUNC_RESET, addr)) {
2466 		rst_level = HNAE3_FUNC_RESET;
2467 		clear_bit(HNAE3_FUNC_RESET, addr);
2468 	}
2469 
2470 	return rst_level;
2471 }
2472 
2473 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
2474 {
2475 	u32 clearval = 0;
2476 
2477 	switch (hdev->reset_type) {
2478 	case HNAE3_IMP_RESET:
2479 		clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2480 		break;
2481 	case HNAE3_GLOBAL_RESET:
2482 		clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2483 		break;
2484 	case HNAE3_CORE_RESET:
2485 		clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2486 		break;
2487 	default:
2488 		break;
2489 	}
2490 
2491 	if (!clearval)
2492 		return;
2493 
2494 	hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
2495 	hclge_enable_vector(&hdev->misc_vector, true);
2496 }
2497 
2498 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
2499 {
2500 	int ret = 0;
2501 
2502 	switch (hdev->reset_type) {
2503 	case HNAE3_FUNC_RESET:
2504 		ret = hclge_func_reset_cmd(hdev, 0);
2505 		if (ret) {
2506 			dev_err(&hdev->pdev->dev,
2507 				"assertting function reset fail %d!\n", ret);
2508 			return ret;
2509 		}
2510 
2511 		/* After performaning pf reset, it is not necessary to do the
2512 		 * mailbox handling or send any command to firmware, because
2513 		 * any mailbox handling or command to firmware is only valid
2514 		 * after hclge_cmd_init is called.
2515 		 */
2516 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2517 		break;
2518 	default:
2519 		break;
2520 	}
2521 
2522 	dev_info(&hdev->pdev->dev, "prepare wait ok\n");
2523 
2524 	return ret;
2525 }
2526 
2527 static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
2528 {
2529 #define MAX_RESET_FAIL_CNT 5
2530 #define RESET_UPGRADE_DELAY_SEC 10
2531 
2532 	if (hdev->reset_pending) {
2533 		dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
2534 			 hdev->reset_pending);
2535 		return true;
2536 	} else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
2537 		   (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
2538 		    BIT(HCLGE_IMP_RESET_BIT))) {
2539 		dev_info(&hdev->pdev->dev,
2540 			 "reset failed because IMP Reset is pending\n");
2541 		hclge_clear_reset_cause(hdev);
2542 		return false;
2543 	} else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
2544 		hdev->reset_fail_cnt++;
2545 		if (is_timeout) {
2546 			set_bit(hdev->reset_type, &hdev->reset_pending);
2547 			dev_info(&hdev->pdev->dev,
2548 				 "re-schedule to wait for hw reset done\n");
2549 			return true;
2550 		}
2551 
2552 		dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
2553 		hclge_clear_reset_cause(hdev);
2554 		mod_timer(&hdev->reset_timer,
2555 			  jiffies + RESET_UPGRADE_DELAY_SEC * HZ);
2556 
2557 		return false;
2558 	}
2559 
2560 	hclge_clear_reset_cause(hdev);
2561 	dev_err(&hdev->pdev->dev, "Reset fail!\n");
2562 	return false;
2563 }
2564 
2565 static void hclge_reset(struct hclge_dev *hdev)
2566 {
2567 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
2568 	bool is_timeout = false;
2569 	int ret;
2570 
2571 	/* Initialize ae_dev reset status as well, in case enet layer wants to
2572 	 * know if device is undergoing reset
2573 	 */
2574 	ae_dev->reset_type = hdev->reset_type;
2575 	hdev->reset_count++;
2576 	hdev->last_reset_time = jiffies;
2577 	/* perform reset of the stack & ae device for a client */
2578 	ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
2579 	if (ret)
2580 		goto err_reset;
2581 
2582 	rtnl_lock();
2583 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2584 	if (ret)
2585 		goto err_reset_lock;
2586 
2587 	rtnl_unlock();
2588 
2589 	ret = hclge_reset_prepare_wait(hdev);
2590 	if (ret)
2591 		goto err_reset;
2592 
2593 	if (hclge_reset_wait(hdev)) {
2594 		is_timeout = true;
2595 		goto err_reset;
2596 	}
2597 
2598 	ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
2599 	if (ret)
2600 		goto err_reset;
2601 
2602 	rtnl_lock();
2603 	ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
2604 	if (ret)
2605 		goto err_reset_lock;
2606 
2607 	ret = hclge_reset_ae_dev(hdev->ae_dev);
2608 	if (ret)
2609 		goto err_reset_lock;
2610 
2611 	ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
2612 	if (ret)
2613 		goto err_reset_lock;
2614 
2615 	hclge_clear_reset_cause(hdev);
2616 
2617 	ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2618 	if (ret)
2619 		goto err_reset_lock;
2620 
2621 	rtnl_unlock();
2622 
2623 	ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
2624 	if (ret)
2625 		goto err_reset;
2626 
2627 	ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
2628 	if (ret)
2629 		goto err_reset;
2630 
2631 	return;
2632 
2633 err_reset_lock:
2634 	rtnl_unlock();
2635 err_reset:
2636 	if (hclge_reset_err_handle(hdev, is_timeout))
2637 		hclge_reset_task_schedule(hdev);
2638 }
2639 
2640 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
2641 {
2642 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
2643 	struct hclge_dev *hdev = ae_dev->priv;
2644 
2645 	/* We might end up getting called broadly because of 2 below cases:
2646 	 * 1. Recoverable error was conveyed through APEI and only way to bring
2647 	 *    normalcy is to reset.
2648 	 * 2. A new reset request from the stack due to timeout
2649 	 *
2650 	 * For the first case,error event might not have ae handle available.
2651 	 * check if this is a new reset request and we are not here just because
2652 	 * last reset attempt did not succeed and watchdog hit us again. We will
2653 	 * know this if last reset request did not occur very recently (watchdog
2654 	 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
2655 	 * In case of new request we reset the "reset level" to PF reset.
2656 	 * And if it is a repeat reset request of the most recent one then we
2657 	 * want to make sure we throttle the reset request. Therefore, we will
2658 	 * not allow it again before 3*HZ times.
2659 	 */
2660 	if (!handle)
2661 		handle = &hdev->vport[0].nic;
2662 
2663 	if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ)))
2664 		return;
2665 	else if (hdev->default_reset_request)
2666 		hdev->reset_level =
2667 			hclge_get_reset_level(hdev,
2668 					      &hdev->default_reset_request);
2669 	else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
2670 		hdev->reset_level = HNAE3_FUNC_RESET;
2671 
2672 	dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
2673 		 hdev->reset_level);
2674 
2675 	/* request reset & schedule reset task */
2676 	set_bit(hdev->reset_level, &hdev->reset_request);
2677 	hclge_reset_task_schedule(hdev);
2678 
2679 	if (hdev->reset_level < HNAE3_GLOBAL_RESET)
2680 		hdev->reset_level++;
2681 }
2682 
2683 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
2684 					enum hnae3_reset_type rst_type)
2685 {
2686 	struct hclge_dev *hdev = ae_dev->priv;
2687 
2688 	set_bit(rst_type, &hdev->default_reset_request);
2689 }
2690 
2691 static void hclge_reset_timer(struct timer_list *t)
2692 {
2693 	struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
2694 
2695 	dev_info(&hdev->pdev->dev,
2696 		 "triggering global reset in reset timer\n");
2697 	set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
2698 	hclge_reset_event(hdev->pdev, NULL);
2699 }
2700 
2701 static void hclge_reset_subtask(struct hclge_dev *hdev)
2702 {
2703 	/* check if there is any ongoing reset in the hardware. This status can
2704 	 * be checked from reset_pending. If there is then, we need to wait for
2705 	 * hardware to complete reset.
2706 	 *    a. If we are able to figure out in reasonable time that hardware
2707 	 *       has fully resetted then, we can proceed with driver, client
2708 	 *       reset.
2709 	 *    b. else, we can come back later to check this status so re-sched
2710 	 *       now.
2711 	 */
2712 	hdev->last_reset_time = jiffies;
2713 	hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
2714 	if (hdev->reset_type != HNAE3_NONE_RESET)
2715 		hclge_reset(hdev);
2716 
2717 	/* check if we got any *new* reset requests to be honored */
2718 	hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
2719 	if (hdev->reset_type != HNAE3_NONE_RESET)
2720 		hclge_do_reset(hdev);
2721 
2722 	hdev->reset_type = HNAE3_NONE_RESET;
2723 }
2724 
2725 static void hclge_reset_service_task(struct work_struct *work)
2726 {
2727 	struct hclge_dev *hdev =
2728 		container_of(work, struct hclge_dev, rst_service_task);
2729 
2730 	if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
2731 		return;
2732 
2733 	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
2734 
2735 	hclge_reset_subtask(hdev);
2736 
2737 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
2738 }
2739 
2740 static void hclge_mailbox_service_task(struct work_struct *work)
2741 {
2742 	struct hclge_dev *hdev =
2743 		container_of(work, struct hclge_dev, mbx_service_task);
2744 
2745 	if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
2746 		return;
2747 
2748 	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
2749 
2750 	hclge_mbx_handler(hdev);
2751 
2752 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
2753 }
2754 
2755 static void hclge_service_task(struct work_struct *work)
2756 {
2757 	struct hclge_dev *hdev =
2758 		container_of(work, struct hclge_dev, service_task);
2759 
2760 	if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
2761 		hclge_update_stats_for_all(hdev);
2762 		hdev->hw_stats.stats_timer = 0;
2763 	}
2764 
2765 	hclge_update_speed_duplex(hdev);
2766 	hclge_update_link_status(hdev);
2767 	hclge_service_complete(hdev);
2768 }
2769 
2770 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
2771 {
2772 	/* VF handle has no client */
2773 	if (!handle->client)
2774 		return container_of(handle, struct hclge_vport, nic);
2775 	else if (handle->client->type == HNAE3_CLIENT_ROCE)
2776 		return container_of(handle, struct hclge_vport, roce);
2777 	else
2778 		return container_of(handle, struct hclge_vport, nic);
2779 }
2780 
2781 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
2782 			    struct hnae3_vector_info *vector_info)
2783 {
2784 	struct hclge_vport *vport = hclge_get_vport(handle);
2785 	struct hnae3_vector_info *vector = vector_info;
2786 	struct hclge_dev *hdev = vport->back;
2787 	int alloc = 0;
2788 	int i, j;
2789 
2790 	vector_num = min(hdev->num_msi_left, vector_num);
2791 
2792 	for (j = 0; j < vector_num; j++) {
2793 		for (i = 1; i < hdev->num_msi; i++) {
2794 			if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
2795 				vector->vector = pci_irq_vector(hdev->pdev, i);
2796 				vector->io_addr = hdev->hw.io_base +
2797 					HCLGE_VECTOR_REG_BASE +
2798 					(i - 1) * HCLGE_VECTOR_REG_OFFSET +
2799 					vport->vport_id *
2800 					HCLGE_VECTOR_VF_OFFSET;
2801 				hdev->vector_status[i] = vport->vport_id;
2802 				hdev->vector_irq[i] = vector->vector;
2803 
2804 				vector++;
2805 				alloc++;
2806 
2807 				break;
2808 			}
2809 		}
2810 	}
2811 	hdev->num_msi_left -= alloc;
2812 	hdev->num_msi_used += alloc;
2813 
2814 	return alloc;
2815 }
2816 
2817 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
2818 {
2819 	int i;
2820 
2821 	for (i = 0; i < hdev->num_msi; i++)
2822 		if (vector == hdev->vector_irq[i])
2823 			return i;
2824 
2825 	return -EINVAL;
2826 }
2827 
2828 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
2829 {
2830 	struct hclge_vport *vport = hclge_get_vport(handle);
2831 	struct hclge_dev *hdev = vport->back;
2832 	int vector_id;
2833 
2834 	vector_id = hclge_get_vector_index(hdev, vector);
2835 	if (vector_id < 0) {
2836 		dev_err(&hdev->pdev->dev,
2837 			"Get vector index fail. vector_id =%d\n", vector_id);
2838 		return vector_id;
2839 	}
2840 
2841 	hclge_free_vector(hdev, vector_id);
2842 
2843 	return 0;
2844 }
2845 
2846 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
2847 {
2848 	return HCLGE_RSS_KEY_SIZE;
2849 }
2850 
2851 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
2852 {
2853 	return HCLGE_RSS_IND_TBL_SIZE;
2854 }
2855 
2856 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
2857 				  const u8 hfunc, const u8 *key)
2858 {
2859 	struct hclge_rss_config_cmd *req;
2860 	struct hclge_desc desc;
2861 	int key_offset;
2862 	int key_size;
2863 	int ret;
2864 
2865 	req = (struct hclge_rss_config_cmd *)desc.data;
2866 
2867 	for (key_offset = 0; key_offset < 3; key_offset++) {
2868 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
2869 					   false);
2870 
2871 		req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
2872 		req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
2873 
2874 		if (key_offset == 2)
2875 			key_size =
2876 			HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
2877 		else
2878 			key_size = HCLGE_RSS_HASH_KEY_NUM;
2879 
2880 		memcpy(req->hash_key,
2881 		       key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
2882 
2883 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2884 		if (ret) {
2885 			dev_err(&hdev->pdev->dev,
2886 				"Configure RSS config fail, status = %d\n",
2887 				ret);
2888 			return ret;
2889 		}
2890 	}
2891 	return 0;
2892 }
2893 
2894 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
2895 {
2896 	struct hclge_rss_indirection_table_cmd *req;
2897 	struct hclge_desc desc;
2898 	int i, j;
2899 	int ret;
2900 
2901 	req = (struct hclge_rss_indirection_table_cmd *)desc.data;
2902 
2903 	for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
2904 		hclge_cmd_setup_basic_desc
2905 			(&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
2906 
2907 		req->start_table_index =
2908 			cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
2909 		req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
2910 
2911 		for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
2912 			req->rss_result[j] =
2913 				indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
2914 
2915 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2916 		if (ret) {
2917 			dev_err(&hdev->pdev->dev,
2918 				"Configure rss indir table fail,status = %d\n",
2919 				ret);
2920 			return ret;
2921 		}
2922 	}
2923 	return 0;
2924 }
2925 
2926 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
2927 				 u16 *tc_size, u16 *tc_offset)
2928 {
2929 	struct hclge_rss_tc_mode_cmd *req;
2930 	struct hclge_desc desc;
2931 	int ret;
2932 	int i;
2933 
2934 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
2935 	req = (struct hclge_rss_tc_mode_cmd *)desc.data;
2936 
2937 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2938 		u16 mode = 0;
2939 
2940 		hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
2941 		hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
2942 				HCLGE_RSS_TC_SIZE_S, tc_size[i]);
2943 		hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
2944 				HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
2945 
2946 		req->rss_tc_mode[i] = cpu_to_le16(mode);
2947 	}
2948 
2949 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2950 	if (ret)
2951 		dev_err(&hdev->pdev->dev,
2952 			"Configure rss tc mode fail, status = %d\n", ret);
2953 
2954 	return ret;
2955 }
2956 
2957 static void hclge_get_rss_type(struct hclge_vport *vport)
2958 {
2959 	if (vport->rss_tuple_sets.ipv4_tcp_en ||
2960 	    vport->rss_tuple_sets.ipv4_udp_en ||
2961 	    vport->rss_tuple_sets.ipv4_sctp_en ||
2962 	    vport->rss_tuple_sets.ipv6_tcp_en ||
2963 	    vport->rss_tuple_sets.ipv6_udp_en ||
2964 	    vport->rss_tuple_sets.ipv6_sctp_en)
2965 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
2966 	else if (vport->rss_tuple_sets.ipv4_fragment_en ||
2967 		 vport->rss_tuple_sets.ipv6_fragment_en)
2968 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
2969 	else
2970 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
2971 }
2972 
2973 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
2974 {
2975 	struct hclge_rss_input_tuple_cmd *req;
2976 	struct hclge_desc desc;
2977 	int ret;
2978 
2979 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
2980 
2981 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
2982 
2983 	/* Get the tuple cfg from pf */
2984 	req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
2985 	req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
2986 	req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
2987 	req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
2988 	req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
2989 	req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
2990 	req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
2991 	req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
2992 	hclge_get_rss_type(&hdev->vport[0]);
2993 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2994 	if (ret)
2995 		dev_err(&hdev->pdev->dev,
2996 			"Configure rss input fail, status = %d\n", ret);
2997 	return ret;
2998 }
2999 
3000 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3001 			 u8 *key, u8 *hfunc)
3002 {
3003 	struct hclge_vport *vport = hclge_get_vport(handle);
3004 	int i;
3005 
3006 	/* Get hash algorithm */
3007 	if (hfunc) {
3008 		switch (vport->rss_algo) {
3009 		case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
3010 			*hfunc = ETH_RSS_HASH_TOP;
3011 			break;
3012 		case HCLGE_RSS_HASH_ALGO_SIMPLE:
3013 			*hfunc = ETH_RSS_HASH_XOR;
3014 			break;
3015 		default:
3016 			*hfunc = ETH_RSS_HASH_UNKNOWN;
3017 			break;
3018 		}
3019 	}
3020 
3021 	/* Get the RSS Key required by the user */
3022 	if (key)
3023 		memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3024 
3025 	/* Get indirect table */
3026 	if (indir)
3027 		for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3028 			indir[i] =  vport->rss_indirection_tbl[i];
3029 
3030 	return 0;
3031 }
3032 
3033 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3034 			 const  u8 *key, const  u8 hfunc)
3035 {
3036 	struct hclge_vport *vport = hclge_get_vport(handle);
3037 	struct hclge_dev *hdev = vport->back;
3038 	u8 hash_algo;
3039 	int ret, i;
3040 
3041 	/* Set the RSS Hash Key if specififed by the user */
3042 	if (key) {
3043 		switch (hfunc) {
3044 		case ETH_RSS_HASH_TOP:
3045 			hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3046 			break;
3047 		case ETH_RSS_HASH_XOR:
3048 			hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3049 			break;
3050 		case ETH_RSS_HASH_NO_CHANGE:
3051 			hash_algo = vport->rss_algo;
3052 			break;
3053 		default:
3054 			return -EINVAL;
3055 		}
3056 
3057 		ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3058 		if (ret)
3059 			return ret;
3060 
3061 		/* Update the shadow RSS key with user specified qids */
3062 		memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3063 		vport->rss_algo = hash_algo;
3064 	}
3065 
3066 	/* Update the shadow RSS table with user specified qids */
3067 	for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3068 		vport->rss_indirection_tbl[i] = indir[i];
3069 
3070 	/* Update the hardware */
3071 	return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
3072 }
3073 
3074 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3075 {
3076 	u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3077 
3078 	if (nfc->data & RXH_L4_B_2_3)
3079 		hash_sets |= HCLGE_D_PORT_BIT;
3080 	else
3081 		hash_sets &= ~HCLGE_D_PORT_BIT;
3082 
3083 	if (nfc->data & RXH_IP_SRC)
3084 		hash_sets |= HCLGE_S_IP_BIT;
3085 	else
3086 		hash_sets &= ~HCLGE_S_IP_BIT;
3087 
3088 	if (nfc->data & RXH_IP_DST)
3089 		hash_sets |= HCLGE_D_IP_BIT;
3090 	else
3091 		hash_sets &= ~HCLGE_D_IP_BIT;
3092 
3093 	if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3094 		hash_sets |= HCLGE_V_TAG_BIT;
3095 
3096 	return hash_sets;
3097 }
3098 
3099 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3100 			       struct ethtool_rxnfc *nfc)
3101 {
3102 	struct hclge_vport *vport = hclge_get_vport(handle);
3103 	struct hclge_dev *hdev = vport->back;
3104 	struct hclge_rss_input_tuple_cmd *req;
3105 	struct hclge_desc desc;
3106 	u8 tuple_sets;
3107 	int ret;
3108 
3109 	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3110 			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
3111 		return -EINVAL;
3112 
3113 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3114 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3115 
3116 	req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3117 	req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3118 	req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3119 	req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3120 	req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3121 	req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3122 	req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3123 	req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
3124 
3125 	tuple_sets = hclge_get_rss_hash_bits(nfc);
3126 	switch (nfc->flow_type) {
3127 	case TCP_V4_FLOW:
3128 		req->ipv4_tcp_en = tuple_sets;
3129 		break;
3130 	case TCP_V6_FLOW:
3131 		req->ipv6_tcp_en = tuple_sets;
3132 		break;
3133 	case UDP_V4_FLOW:
3134 		req->ipv4_udp_en = tuple_sets;
3135 		break;
3136 	case UDP_V6_FLOW:
3137 		req->ipv6_udp_en = tuple_sets;
3138 		break;
3139 	case SCTP_V4_FLOW:
3140 		req->ipv4_sctp_en = tuple_sets;
3141 		break;
3142 	case SCTP_V6_FLOW:
3143 		if ((nfc->data & RXH_L4_B_0_1) ||
3144 		    (nfc->data & RXH_L4_B_2_3))
3145 			return -EINVAL;
3146 
3147 		req->ipv6_sctp_en = tuple_sets;
3148 		break;
3149 	case IPV4_FLOW:
3150 		req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3151 		break;
3152 	case IPV6_FLOW:
3153 		req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3154 		break;
3155 	default:
3156 		return -EINVAL;
3157 	}
3158 
3159 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3160 	if (ret) {
3161 		dev_err(&hdev->pdev->dev,
3162 			"Set rss tuple fail, status = %d\n", ret);
3163 		return ret;
3164 	}
3165 
3166 	vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
3167 	vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
3168 	vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
3169 	vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
3170 	vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
3171 	vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
3172 	vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
3173 	vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
3174 	hclge_get_rss_type(vport);
3175 	return 0;
3176 }
3177 
3178 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
3179 			       struct ethtool_rxnfc *nfc)
3180 {
3181 	struct hclge_vport *vport = hclge_get_vport(handle);
3182 	u8 tuple_sets;
3183 
3184 	nfc->data = 0;
3185 
3186 	switch (nfc->flow_type) {
3187 	case TCP_V4_FLOW:
3188 		tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
3189 		break;
3190 	case UDP_V4_FLOW:
3191 		tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
3192 		break;
3193 	case TCP_V6_FLOW:
3194 		tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
3195 		break;
3196 	case UDP_V6_FLOW:
3197 		tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
3198 		break;
3199 	case SCTP_V4_FLOW:
3200 		tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
3201 		break;
3202 	case SCTP_V6_FLOW:
3203 		tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
3204 		break;
3205 	case IPV4_FLOW:
3206 	case IPV6_FLOW:
3207 		tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
3208 		break;
3209 	default:
3210 		return -EINVAL;
3211 	}
3212 
3213 	if (!tuple_sets)
3214 		return 0;
3215 
3216 	if (tuple_sets & HCLGE_D_PORT_BIT)
3217 		nfc->data |= RXH_L4_B_2_3;
3218 	if (tuple_sets & HCLGE_S_PORT_BIT)
3219 		nfc->data |= RXH_L4_B_0_1;
3220 	if (tuple_sets & HCLGE_D_IP_BIT)
3221 		nfc->data |= RXH_IP_DST;
3222 	if (tuple_sets & HCLGE_S_IP_BIT)
3223 		nfc->data |= RXH_IP_SRC;
3224 
3225 	return 0;
3226 }
3227 
3228 static int hclge_get_tc_size(struct hnae3_handle *handle)
3229 {
3230 	struct hclge_vport *vport = hclge_get_vport(handle);
3231 	struct hclge_dev *hdev = vport->back;
3232 
3233 	return hdev->rss_size_max;
3234 }
3235 
3236 int hclge_rss_init_hw(struct hclge_dev *hdev)
3237 {
3238 	struct hclge_vport *vport = hdev->vport;
3239 	u8 *rss_indir = vport[0].rss_indirection_tbl;
3240 	u16 rss_size = vport[0].alloc_rss_size;
3241 	u8 *key = vport[0].rss_hash_key;
3242 	u8 hfunc = vport[0].rss_algo;
3243 	u16 tc_offset[HCLGE_MAX_TC_NUM];
3244 	u16 tc_valid[HCLGE_MAX_TC_NUM];
3245 	u16 tc_size[HCLGE_MAX_TC_NUM];
3246 	u16 roundup_size;
3247 	int i, ret;
3248 
3249 	ret = hclge_set_rss_indir_table(hdev, rss_indir);
3250 	if (ret)
3251 		return ret;
3252 
3253 	ret = hclge_set_rss_algo_key(hdev, hfunc, key);
3254 	if (ret)
3255 		return ret;
3256 
3257 	ret = hclge_set_rss_input_tuple(hdev);
3258 	if (ret)
3259 		return ret;
3260 
3261 	/* Each TC have the same queue size, and tc_size set to hardware is
3262 	 * the log2 of roundup power of two of rss_size, the acutal queue
3263 	 * size is limited by indirection table.
3264 	 */
3265 	if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
3266 		dev_err(&hdev->pdev->dev,
3267 			"Configure rss tc size failed, invalid TC_SIZE = %d\n",
3268 			rss_size);
3269 		return -EINVAL;
3270 	}
3271 
3272 	roundup_size = roundup_pow_of_two(rss_size);
3273 	roundup_size = ilog2(roundup_size);
3274 
3275 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3276 		tc_valid[i] = 0;
3277 
3278 		if (!(hdev->hw_tc_map & BIT(i)))
3279 			continue;
3280 
3281 		tc_valid[i] = 1;
3282 		tc_size[i] = roundup_size;
3283 		tc_offset[i] = rss_size * i;
3284 	}
3285 
3286 	return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
3287 }
3288 
3289 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
3290 {
3291 	struct hclge_vport *vport = hdev->vport;
3292 	int i, j;
3293 
3294 	for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
3295 		for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3296 			vport[j].rss_indirection_tbl[i] =
3297 				i % vport[j].alloc_rss_size;
3298 	}
3299 }
3300 
3301 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
3302 {
3303 	struct hclge_vport *vport = hdev->vport;
3304 	int i;
3305 
3306 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3307 		vport[i].rss_tuple_sets.ipv4_tcp_en =
3308 			HCLGE_RSS_INPUT_TUPLE_OTHER;
3309 		vport[i].rss_tuple_sets.ipv4_udp_en =
3310 			HCLGE_RSS_INPUT_TUPLE_OTHER;
3311 		vport[i].rss_tuple_sets.ipv4_sctp_en =
3312 			HCLGE_RSS_INPUT_TUPLE_SCTP;
3313 		vport[i].rss_tuple_sets.ipv4_fragment_en =
3314 			HCLGE_RSS_INPUT_TUPLE_OTHER;
3315 		vport[i].rss_tuple_sets.ipv6_tcp_en =
3316 			HCLGE_RSS_INPUT_TUPLE_OTHER;
3317 		vport[i].rss_tuple_sets.ipv6_udp_en =
3318 			HCLGE_RSS_INPUT_TUPLE_OTHER;
3319 		vport[i].rss_tuple_sets.ipv6_sctp_en =
3320 			HCLGE_RSS_INPUT_TUPLE_SCTP;
3321 		vport[i].rss_tuple_sets.ipv6_fragment_en =
3322 			HCLGE_RSS_INPUT_TUPLE_OTHER;
3323 
3324 		vport[i].rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3325 
3326 		netdev_rss_key_fill(vport[i].rss_hash_key, HCLGE_RSS_KEY_SIZE);
3327 	}
3328 
3329 	hclge_rss_indir_init_cfg(hdev);
3330 }
3331 
3332 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
3333 				int vector_id, bool en,
3334 				struct hnae3_ring_chain_node *ring_chain)
3335 {
3336 	struct hclge_dev *hdev = vport->back;
3337 	struct hnae3_ring_chain_node *node;
3338 	struct hclge_desc desc;
3339 	struct hclge_ctrl_vector_chain_cmd *req
3340 		= (struct hclge_ctrl_vector_chain_cmd *)desc.data;
3341 	enum hclge_cmd_status status;
3342 	enum hclge_opcode_type op;
3343 	u16 tqp_type_and_id;
3344 	int i;
3345 
3346 	op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
3347 	hclge_cmd_setup_basic_desc(&desc, op, false);
3348 	req->int_vector_id = vector_id;
3349 
3350 	i = 0;
3351 	for (node = ring_chain; node; node = node->next) {
3352 		tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
3353 		hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
3354 				HCLGE_INT_TYPE_S,
3355 				hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
3356 		hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
3357 				HCLGE_TQP_ID_S, node->tqp_index);
3358 		hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
3359 				HCLGE_INT_GL_IDX_S,
3360 				hnae3_get_field(node->int_gl_idx,
3361 						HNAE3_RING_GL_IDX_M,
3362 						HNAE3_RING_GL_IDX_S));
3363 		req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
3364 		if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
3365 			req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
3366 			req->vfid = vport->vport_id;
3367 
3368 			status = hclge_cmd_send(&hdev->hw, &desc, 1);
3369 			if (status) {
3370 				dev_err(&hdev->pdev->dev,
3371 					"Map TQP fail, status is %d.\n",
3372 					status);
3373 				return -EIO;
3374 			}
3375 			i = 0;
3376 
3377 			hclge_cmd_setup_basic_desc(&desc,
3378 						   op,
3379 						   false);
3380 			req->int_vector_id = vector_id;
3381 		}
3382 	}
3383 
3384 	if (i > 0) {
3385 		req->int_cause_num = i;
3386 		req->vfid = vport->vport_id;
3387 		status = hclge_cmd_send(&hdev->hw, &desc, 1);
3388 		if (status) {
3389 			dev_err(&hdev->pdev->dev,
3390 				"Map TQP fail, status is %d.\n", status);
3391 			return -EIO;
3392 		}
3393 	}
3394 
3395 	return 0;
3396 }
3397 
3398 static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
3399 				    int vector,
3400 				    struct hnae3_ring_chain_node *ring_chain)
3401 {
3402 	struct hclge_vport *vport = hclge_get_vport(handle);
3403 	struct hclge_dev *hdev = vport->back;
3404 	int vector_id;
3405 
3406 	vector_id = hclge_get_vector_index(hdev, vector);
3407 	if (vector_id < 0) {
3408 		dev_err(&hdev->pdev->dev,
3409 			"Get vector index fail. vector_id =%d\n", vector_id);
3410 		return vector_id;
3411 	}
3412 
3413 	return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
3414 }
3415 
3416 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
3417 				       int vector,
3418 				       struct hnae3_ring_chain_node *ring_chain)
3419 {
3420 	struct hclge_vport *vport = hclge_get_vport(handle);
3421 	struct hclge_dev *hdev = vport->back;
3422 	int vector_id, ret;
3423 
3424 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3425 		return 0;
3426 
3427 	vector_id = hclge_get_vector_index(hdev, vector);
3428 	if (vector_id < 0) {
3429 		dev_err(&handle->pdev->dev,
3430 			"Get vector index fail. ret =%d\n", vector_id);
3431 		return vector_id;
3432 	}
3433 
3434 	ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
3435 	if (ret)
3436 		dev_err(&handle->pdev->dev,
3437 			"Unmap ring from vector fail. vectorid=%d, ret =%d\n",
3438 			vector_id,
3439 			ret);
3440 
3441 	return ret;
3442 }
3443 
3444 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
3445 			       struct hclge_promisc_param *param)
3446 {
3447 	struct hclge_promisc_cfg_cmd *req;
3448 	struct hclge_desc desc;
3449 	int ret;
3450 
3451 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
3452 
3453 	req = (struct hclge_promisc_cfg_cmd *)desc.data;
3454 	req->vf_id = param->vf_id;
3455 
3456 	/* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
3457 	 * pdev revision(0x20), new revision support them. The
3458 	 * value of this two fields will not return error when driver
3459 	 * send command to fireware in revision(0x20).
3460 	 */
3461 	req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
3462 		HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
3463 
3464 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3465 	if (ret)
3466 		dev_err(&hdev->pdev->dev,
3467 			"Set promisc mode fail, status is %d.\n", ret);
3468 
3469 	return ret;
3470 }
3471 
3472 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
3473 			      bool en_mc, bool en_bc, int vport_id)
3474 {
3475 	if (!param)
3476 		return;
3477 
3478 	memset(param, 0, sizeof(struct hclge_promisc_param));
3479 	if (en_uc)
3480 		param->enable = HCLGE_PROMISC_EN_UC;
3481 	if (en_mc)
3482 		param->enable |= HCLGE_PROMISC_EN_MC;
3483 	if (en_bc)
3484 		param->enable |= HCLGE_PROMISC_EN_BC;
3485 	param->vf_id = vport_id;
3486 }
3487 
3488 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
3489 				  bool en_mc_pmc)
3490 {
3491 	struct hclge_vport *vport = hclge_get_vport(handle);
3492 	struct hclge_dev *hdev = vport->back;
3493 	struct hclge_promisc_param param;
3494 
3495 	hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, true,
3496 				 vport->vport_id);
3497 	return hclge_cmd_set_promisc_mode(hdev, &param);
3498 }
3499 
3500 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
3501 {
3502 	struct hclge_get_fd_mode_cmd *req;
3503 	struct hclge_desc desc;
3504 	int ret;
3505 
3506 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
3507 
3508 	req = (struct hclge_get_fd_mode_cmd *)desc.data;
3509 
3510 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3511 	if (ret) {
3512 		dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
3513 		return ret;
3514 	}
3515 
3516 	*fd_mode = req->mode;
3517 
3518 	return ret;
3519 }
3520 
3521 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
3522 				   u32 *stage1_entry_num,
3523 				   u32 *stage2_entry_num,
3524 				   u16 *stage1_counter_num,
3525 				   u16 *stage2_counter_num)
3526 {
3527 	struct hclge_get_fd_allocation_cmd *req;
3528 	struct hclge_desc desc;
3529 	int ret;
3530 
3531 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
3532 
3533 	req = (struct hclge_get_fd_allocation_cmd *)desc.data;
3534 
3535 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3536 	if (ret) {
3537 		dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
3538 			ret);
3539 		return ret;
3540 	}
3541 
3542 	*stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
3543 	*stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
3544 	*stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
3545 	*stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
3546 
3547 	return ret;
3548 }
3549 
3550 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
3551 {
3552 	struct hclge_set_fd_key_config_cmd *req;
3553 	struct hclge_fd_key_cfg *stage;
3554 	struct hclge_desc desc;
3555 	int ret;
3556 
3557 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
3558 
3559 	req = (struct hclge_set_fd_key_config_cmd *)desc.data;
3560 	stage = &hdev->fd_cfg.key_cfg[stage_num];
3561 	req->stage = stage_num;
3562 	req->key_select = stage->key_sel;
3563 	req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
3564 	req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
3565 	req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
3566 	req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
3567 	req->tuple_mask = cpu_to_le32(~stage->tuple_active);
3568 	req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
3569 
3570 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3571 	if (ret)
3572 		dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
3573 
3574 	return ret;
3575 }
3576 
3577 static int hclge_init_fd_config(struct hclge_dev *hdev)
3578 {
3579 #define LOW_2_WORDS		0x03
3580 	struct hclge_fd_key_cfg *key_cfg;
3581 	int ret;
3582 
3583 	if (!hnae3_dev_fd_supported(hdev))
3584 		return 0;
3585 
3586 	ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
3587 	if (ret)
3588 		return ret;
3589 
3590 	switch (hdev->fd_cfg.fd_mode) {
3591 	case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
3592 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
3593 		break;
3594 	case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
3595 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
3596 		break;
3597 	default:
3598 		dev_err(&hdev->pdev->dev,
3599 			"Unsupported flow director mode %d\n",
3600 			hdev->fd_cfg.fd_mode);
3601 		return -EOPNOTSUPP;
3602 	}
3603 
3604 	hdev->fd_cfg.fd_en = true;
3605 	hdev->fd_cfg.proto_support =
3606 		TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
3607 		UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
3608 	key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
3609 	key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
3610 	key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
3611 	key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
3612 	key_cfg->outer_sipv6_word_en = 0;
3613 	key_cfg->outer_dipv6_word_en = 0;
3614 
3615 	key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
3616 				BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
3617 				BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
3618 				BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
3619 
3620 	/* If use max 400bit key, we can support tuples for ether type */
3621 	if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
3622 		hdev->fd_cfg.proto_support |= ETHER_FLOW;
3623 		key_cfg->tuple_active |=
3624 				BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
3625 	}
3626 
3627 	/* roce_type is used to filter roce frames
3628 	 * dst_vport is used to specify the rule
3629 	 */
3630 	key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
3631 
3632 	ret = hclge_get_fd_allocation(hdev,
3633 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
3634 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
3635 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
3636 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
3637 	if (ret)
3638 		return ret;
3639 
3640 	return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
3641 }
3642 
3643 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
3644 				int loc, u8 *key, bool is_add)
3645 {
3646 	struct hclge_fd_tcam_config_1_cmd *req1;
3647 	struct hclge_fd_tcam_config_2_cmd *req2;
3648 	struct hclge_fd_tcam_config_3_cmd *req3;
3649 	struct hclge_desc desc[3];
3650 	int ret;
3651 
3652 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
3653 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3654 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
3655 	desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3656 	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
3657 
3658 	req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
3659 	req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
3660 	req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
3661 
3662 	req1->stage = stage;
3663 	req1->xy_sel = sel_x ? 1 : 0;
3664 	hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
3665 	req1->index = cpu_to_le32(loc);
3666 	req1->entry_vld = sel_x ? is_add : 0;
3667 
3668 	if (key) {
3669 		memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
3670 		memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
3671 		       sizeof(req2->tcam_data));
3672 		memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
3673 		       sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
3674 	}
3675 
3676 	ret = hclge_cmd_send(&hdev->hw, desc, 3);
3677 	if (ret)
3678 		dev_err(&hdev->pdev->dev,
3679 			"config tcam key fail, ret=%d\n",
3680 			ret);
3681 
3682 	return ret;
3683 }
3684 
3685 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
3686 			      struct hclge_fd_ad_data *action)
3687 {
3688 	struct hclge_fd_ad_config_cmd *req;
3689 	struct hclge_desc desc;
3690 	u64 ad_data = 0;
3691 	int ret;
3692 
3693 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
3694 
3695 	req = (struct hclge_fd_ad_config_cmd *)desc.data;
3696 	req->index = cpu_to_le32(loc);
3697 	req->stage = stage;
3698 
3699 	hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
3700 		      action->write_rule_id_to_bd);
3701 	hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
3702 			action->rule_id);
3703 	ad_data <<= 32;
3704 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
3705 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
3706 		      action->forward_to_direct_queue);
3707 	hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
3708 			action->queue_id);
3709 	hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
3710 	hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
3711 			HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
3712 	hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
3713 	hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
3714 			action->counter_id);
3715 
3716 	req->ad_data = cpu_to_le64(ad_data);
3717 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3718 	if (ret)
3719 		dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
3720 
3721 	return ret;
3722 }
3723 
3724 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
3725 				   struct hclge_fd_rule *rule)
3726 {
3727 	u16 tmp_x_s, tmp_y_s;
3728 	u32 tmp_x_l, tmp_y_l;
3729 	int i;
3730 
3731 	if (rule->unused_tuple & tuple_bit)
3732 		return true;
3733 
3734 	switch (tuple_bit) {
3735 	case 0:
3736 		return false;
3737 	case BIT(INNER_DST_MAC):
3738 		for (i = 0; i < 6; i++) {
3739 			calc_x(key_x[5 - i], rule->tuples.dst_mac[i],
3740 			       rule->tuples_mask.dst_mac[i]);
3741 			calc_y(key_y[5 - i], rule->tuples.dst_mac[i],
3742 			       rule->tuples_mask.dst_mac[i]);
3743 		}
3744 
3745 		return true;
3746 	case BIT(INNER_SRC_MAC):
3747 		for (i = 0; i < 6; i++) {
3748 			calc_x(key_x[5 - i], rule->tuples.src_mac[i],
3749 			       rule->tuples.src_mac[i]);
3750 			calc_y(key_y[5 - i], rule->tuples.src_mac[i],
3751 			       rule->tuples.src_mac[i]);
3752 		}
3753 
3754 		return true;
3755 	case BIT(INNER_VLAN_TAG_FST):
3756 		calc_x(tmp_x_s, rule->tuples.vlan_tag1,
3757 		       rule->tuples_mask.vlan_tag1);
3758 		calc_y(tmp_y_s, rule->tuples.vlan_tag1,
3759 		       rule->tuples_mask.vlan_tag1);
3760 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
3761 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
3762 
3763 		return true;
3764 	case BIT(INNER_ETH_TYPE):
3765 		calc_x(tmp_x_s, rule->tuples.ether_proto,
3766 		       rule->tuples_mask.ether_proto);
3767 		calc_y(tmp_y_s, rule->tuples.ether_proto,
3768 		       rule->tuples_mask.ether_proto);
3769 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
3770 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
3771 
3772 		return true;
3773 	case BIT(INNER_IP_TOS):
3774 		calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
3775 		calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
3776 
3777 		return true;
3778 	case BIT(INNER_IP_PROTO):
3779 		calc_x(*key_x, rule->tuples.ip_proto,
3780 		       rule->tuples_mask.ip_proto);
3781 		calc_y(*key_y, rule->tuples.ip_proto,
3782 		       rule->tuples_mask.ip_proto);
3783 
3784 		return true;
3785 	case BIT(INNER_SRC_IP):
3786 		calc_x(tmp_x_l, rule->tuples.src_ip[3],
3787 		       rule->tuples_mask.src_ip[3]);
3788 		calc_y(tmp_y_l, rule->tuples.src_ip[3],
3789 		       rule->tuples_mask.src_ip[3]);
3790 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
3791 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
3792 
3793 		return true;
3794 	case BIT(INNER_DST_IP):
3795 		calc_x(tmp_x_l, rule->tuples.dst_ip[3],
3796 		       rule->tuples_mask.dst_ip[3]);
3797 		calc_y(tmp_y_l, rule->tuples.dst_ip[3],
3798 		       rule->tuples_mask.dst_ip[3]);
3799 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
3800 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
3801 
3802 		return true;
3803 	case BIT(INNER_SRC_PORT):
3804 		calc_x(tmp_x_s, rule->tuples.src_port,
3805 		       rule->tuples_mask.src_port);
3806 		calc_y(tmp_y_s, rule->tuples.src_port,
3807 		       rule->tuples_mask.src_port);
3808 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
3809 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
3810 
3811 		return true;
3812 	case BIT(INNER_DST_PORT):
3813 		calc_x(tmp_x_s, rule->tuples.dst_port,
3814 		       rule->tuples_mask.dst_port);
3815 		calc_y(tmp_y_s, rule->tuples.dst_port,
3816 		       rule->tuples_mask.dst_port);
3817 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
3818 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
3819 
3820 		return true;
3821 	default:
3822 		return false;
3823 	}
3824 }
3825 
3826 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
3827 				 u8 vf_id, u8 network_port_id)
3828 {
3829 	u32 port_number = 0;
3830 
3831 	if (port_type == HOST_PORT) {
3832 		hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
3833 				pf_id);
3834 		hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
3835 				vf_id);
3836 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
3837 	} else {
3838 		hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
3839 				HCLGE_NETWORK_PORT_ID_S, network_port_id);
3840 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
3841 	}
3842 
3843 	return port_number;
3844 }
3845 
3846 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
3847 				       __le32 *key_x, __le32 *key_y,
3848 				       struct hclge_fd_rule *rule)
3849 {
3850 	u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
3851 	u8 cur_pos = 0, tuple_size, shift_bits;
3852 	int i;
3853 
3854 	for (i = 0; i < MAX_META_DATA; i++) {
3855 		tuple_size = meta_data_key_info[i].key_length;
3856 		tuple_bit = key_cfg->meta_data_active & BIT(i);
3857 
3858 		switch (tuple_bit) {
3859 		case BIT(ROCE_TYPE):
3860 			hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
3861 			cur_pos += tuple_size;
3862 			break;
3863 		case BIT(DST_VPORT):
3864 			port_number = hclge_get_port_number(HOST_PORT, 0,
3865 							    rule->vf_id, 0);
3866 			hnae3_set_field(meta_data,
3867 					GENMASK(cur_pos + tuple_size, cur_pos),
3868 					cur_pos, port_number);
3869 			cur_pos += tuple_size;
3870 			break;
3871 		default:
3872 			break;
3873 		}
3874 	}
3875 
3876 	calc_x(tmp_x, meta_data, 0xFFFFFFFF);
3877 	calc_y(tmp_y, meta_data, 0xFFFFFFFF);
3878 	shift_bits = sizeof(meta_data) * 8 - cur_pos;
3879 
3880 	*key_x = cpu_to_le32(tmp_x << shift_bits);
3881 	*key_y = cpu_to_le32(tmp_y << shift_bits);
3882 }
3883 
3884 /* A complete key is combined with meta data key and tuple key.
3885  * Meta data key is stored at the MSB region, and tuple key is stored at
3886  * the LSB region, unused bits will be filled 0.
3887  */
3888 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
3889 			    struct hclge_fd_rule *rule)
3890 {
3891 	struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
3892 	u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
3893 	u8 *cur_key_x, *cur_key_y;
3894 	int i, ret, tuple_size;
3895 	u8 meta_data_region;
3896 
3897 	memset(key_x, 0, sizeof(key_x));
3898 	memset(key_y, 0, sizeof(key_y));
3899 	cur_key_x = key_x;
3900 	cur_key_y = key_y;
3901 
3902 	for (i = 0 ; i < MAX_TUPLE; i++) {
3903 		bool tuple_valid;
3904 		u32 check_tuple;
3905 
3906 		tuple_size = tuple_key_info[i].key_length / 8;
3907 		check_tuple = key_cfg->tuple_active & BIT(i);
3908 
3909 		tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
3910 						     cur_key_y, rule);
3911 		if (tuple_valid) {
3912 			cur_key_x += tuple_size;
3913 			cur_key_y += tuple_size;
3914 		}
3915 	}
3916 
3917 	meta_data_region = hdev->fd_cfg.max_key_length / 8 -
3918 			MAX_META_DATA_LENGTH / 8;
3919 
3920 	hclge_fd_convert_meta_data(key_cfg,
3921 				   (__le32 *)(key_x + meta_data_region),
3922 				   (__le32 *)(key_y + meta_data_region),
3923 				   rule);
3924 
3925 	ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
3926 				   true);
3927 	if (ret) {
3928 		dev_err(&hdev->pdev->dev,
3929 			"fd key_y config fail, loc=%d, ret=%d\n",
3930 			rule->queue_id, ret);
3931 		return ret;
3932 	}
3933 
3934 	ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
3935 				   true);
3936 	if (ret)
3937 		dev_err(&hdev->pdev->dev,
3938 			"fd key_x config fail, loc=%d, ret=%d\n",
3939 			rule->queue_id, ret);
3940 	return ret;
3941 }
3942 
3943 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
3944 			       struct hclge_fd_rule *rule)
3945 {
3946 	struct hclge_fd_ad_data ad_data;
3947 
3948 	ad_data.ad_id = rule->location;
3949 
3950 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
3951 		ad_data.drop_packet = true;
3952 		ad_data.forward_to_direct_queue = false;
3953 		ad_data.queue_id = 0;
3954 	} else {
3955 		ad_data.drop_packet = false;
3956 		ad_data.forward_to_direct_queue = true;
3957 		ad_data.queue_id = rule->queue_id;
3958 	}
3959 
3960 	ad_data.use_counter = false;
3961 	ad_data.counter_id = 0;
3962 
3963 	ad_data.use_next_stage = false;
3964 	ad_data.next_input_key = 0;
3965 
3966 	ad_data.write_rule_id_to_bd = true;
3967 	ad_data.rule_id = rule->location;
3968 
3969 	return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
3970 }
3971 
3972 static int hclge_fd_check_spec(struct hclge_dev *hdev,
3973 			       struct ethtool_rx_flow_spec *fs, u32 *unused)
3974 {
3975 	struct ethtool_tcpip4_spec *tcp_ip4_spec;
3976 	struct ethtool_usrip4_spec *usr_ip4_spec;
3977 	struct ethtool_tcpip6_spec *tcp_ip6_spec;
3978 	struct ethtool_usrip6_spec *usr_ip6_spec;
3979 	struct ethhdr *ether_spec;
3980 
3981 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
3982 		return -EINVAL;
3983 
3984 	if (!(fs->flow_type & hdev->fd_cfg.proto_support))
3985 		return -EOPNOTSUPP;
3986 
3987 	if ((fs->flow_type & FLOW_EXT) &&
3988 	    (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
3989 		dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
3990 		return -EOPNOTSUPP;
3991 	}
3992 
3993 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
3994 	case SCTP_V4_FLOW:
3995 	case TCP_V4_FLOW:
3996 	case UDP_V4_FLOW:
3997 		tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
3998 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
3999 
4000 		if (!tcp_ip4_spec->ip4src)
4001 			*unused |= BIT(INNER_SRC_IP);
4002 
4003 		if (!tcp_ip4_spec->ip4dst)
4004 			*unused |= BIT(INNER_DST_IP);
4005 
4006 		if (!tcp_ip4_spec->psrc)
4007 			*unused |= BIT(INNER_SRC_PORT);
4008 
4009 		if (!tcp_ip4_spec->pdst)
4010 			*unused |= BIT(INNER_DST_PORT);
4011 
4012 		if (!tcp_ip4_spec->tos)
4013 			*unused |= BIT(INNER_IP_TOS);
4014 
4015 		break;
4016 	case IP_USER_FLOW:
4017 		usr_ip4_spec = &fs->h_u.usr_ip4_spec;
4018 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4019 			BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4020 
4021 		if (!usr_ip4_spec->ip4src)
4022 			*unused |= BIT(INNER_SRC_IP);
4023 
4024 		if (!usr_ip4_spec->ip4dst)
4025 			*unused |= BIT(INNER_DST_IP);
4026 
4027 		if (!usr_ip4_spec->tos)
4028 			*unused |= BIT(INNER_IP_TOS);
4029 
4030 		if (!usr_ip4_spec->proto)
4031 			*unused |= BIT(INNER_IP_PROTO);
4032 
4033 		if (usr_ip4_spec->l4_4_bytes)
4034 			return -EOPNOTSUPP;
4035 
4036 		if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
4037 			return -EOPNOTSUPP;
4038 
4039 		break;
4040 	case SCTP_V6_FLOW:
4041 	case TCP_V6_FLOW:
4042 	case UDP_V6_FLOW:
4043 		tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
4044 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4045 			BIT(INNER_IP_TOS);
4046 
4047 		if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
4048 		    !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
4049 			*unused |= BIT(INNER_SRC_IP);
4050 
4051 		if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
4052 		    !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
4053 			*unused |= BIT(INNER_DST_IP);
4054 
4055 		if (!tcp_ip6_spec->psrc)
4056 			*unused |= BIT(INNER_SRC_PORT);
4057 
4058 		if (!tcp_ip6_spec->pdst)
4059 			*unused |= BIT(INNER_DST_PORT);
4060 
4061 		if (tcp_ip6_spec->tclass)
4062 			return -EOPNOTSUPP;
4063 
4064 		break;
4065 	case IPV6_USER_FLOW:
4066 		usr_ip6_spec = &fs->h_u.usr_ip6_spec;
4067 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4068 			BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
4069 			BIT(INNER_DST_PORT);
4070 
4071 		if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
4072 		    !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
4073 			*unused |= BIT(INNER_SRC_IP);
4074 
4075 		if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
4076 		    !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
4077 			*unused |= BIT(INNER_DST_IP);
4078 
4079 		if (!usr_ip6_spec->l4_proto)
4080 			*unused |= BIT(INNER_IP_PROTO);
4081 
4082 		if (usr_ip6_spec->tclass)
4083 			return -EOPNOTSUPP;
4084 
4085 		if (usr_ip6_spec->l4_4_bytes)
4086 			return -EOPNOTSUPP;
4087 
4088 		break;
4089 	case ETHER_FLOW:
4090 		ether_spec = &fs->h_u.ether_spec;
4091 		*unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4092 			BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
4093 			BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
4094 
4095 		if (is_zero_ether_addr(ether_spec->h_source))
4096 			*unused |= BIT(INNER_SRC_MAC);
4097 
4098 		if (is_zero_ether_addr(ether_spec->h_dest))
4099 			*unused |= BIT(INNER_DST_MAC);
4100 
4101 		if (!ether_spec->h_proto)
4102 			*unused |= BIT(INNER_ETH_TYPE);
4103 
4104 		break;
4105 	default:
4106 		return -EOPNOTSUPP;
4107 	}
4108 
4109 	if ((fs->flow_type & FLOW_EXT)) {
4110 		if (fs->h_ext.vlan_etype)
4111 			return -EOPNOTSUPP;
4112 		if (!fs->h_ext.vlan_tci)
4113 			*unused |= BIT(INNER_VLAN_TAG_FST);
4114 
4115 		if (fs->m_ext.vlan_tci) {
4116 			if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
4117 				return -EINVAL;
4118 		}
4119 	} else {
4120 		*unused |= BIT(INNER_VLAN_TAG_FST);
4121 	}
4122 
4123 	if (fs->flow_type & FLOW_MAC_EXT) {
4124 		if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
4125 			return -EOPNOTSUPP;
4126 
4127 		if (is_zero_ether_addr(fs->h_ext.h_dest))
4128 			*unused |= BIT(INNER_DST_MAC);
4129 		else
4130 			*unused &= ~(BIT(INNER_DST_MAC));
4131 	}
4132 
4133 	return 0;
4134 }
4135 
4136 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
4137 {
4138 	struct hclge_fd_rule *rule = NULL;
4139 	struct hlist_node *node2;
4140 
4141 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4142 		if (rule->location >= location)
4143 			break;
4144 	}
4145 
4146 	return  rule && rule->location == location;
4147 }
4148 
4149 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
4150 				     struct hclge_fd_rule *new_rule,
4151 				     u16 location,
4152 				     bool is_add)
4153 {
4154 	struct hclge_fd_rule *rule = NULL, *parent = NULL;
4155 	struct hlist_node *node2;
4156 
4157 	if (is_add && !new_rule)
4158 		return -EINVAL;
4159 
4160 	hlist_for_each_entry_safe(rule, node2,
4161 				  &hdev->fd_rule_list, rule_node) {
4162 		if (rule->location >= location)
4163 			break;
4164 		parent = rule;
4165 	}
4166 
4167 	if (rule && rule->location == location) {
4168 		hlist_del(&rule->rule_node);
4169 		kfree(rule);
4170 		hdev->hclge_fd_rule_num--;
4171 
4172 		if (!is_add)
4173 			return 0;
4174 
4175 	} else if (!is_add) {
4176 		dev_err(&hdev->pdev->dev,
4177 			"delete fail, rule %d is inexistent\n",
4178 			location);
4179 		return -EINVAL;
4180 	}
4181 
4182 	INIT_HLIST_NODE(&new_rule->rule_node);
4183 
4184 	if (parent)
4185 		hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
4186 	else
4187 		hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
4188 
4189 	hdev->hclge_fd_rule_num++;
4190 
4191 	return 0;
4192 }
4193 
4194 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
4195 			      struct ethtool_rx_flow_spec *fs,
4196 			      struct hclge_fd_rule *rule)
4197 {
4198 	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
4199 
4200 	switch (flow_type) {
4201 	case SCTP_V4_FLOW:
4202 	case TCP_V4_FLOW:
4203 	case UDP_V4_FLOW:
4204 		rule->tuples.src_ip[3] =
4205 				be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
4206 		rule->tuples_mask.src_ip[3] =
4207 				be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
4208 
4209 		rule->tuples.dst_ip[3] =
4210 				be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
4211 		rule->tuples_mask.dst_ip[3] =
4212 				be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
4213 
4214 		rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
4215 		rule->tuples_mask.src_port =
4216 				be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
4217 
4218 		rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
4219 		rule->tuples_mask.dst_port =
4220 				be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
4221 
4222 		rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
4223 		rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
4224 
4225 		rule->tuples.ether_proto = ETH_P_IP;
4226 		rule->tuples_mask.ether_proto = 0xFFFF;
4227 
4228 		break;
4229 	case IP_USER_FLOW:
4230 		rule->tuples.src_ip[3] =
4231 				be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
4232 		rule->tuples_mask.src_ip[3] =
4233 				be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
4234 
4235 		rule->tuples.dst_ip[3] =
4236 				be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
4237 		rule->tuples_mask.dst_ip[3] =
4238 				be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
4239 
4240 		rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
4241 		rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
4242 
4243 		rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
4244 		rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
4245 
4246 		rule->tuples.ether_proto = ETH_P_IP;
4247 		rule->tuples_mask.ether_proto = 0xFFFF;
4248 
4249 		break;
4250 	case SCTP_V6_FLOW:
4251 	case TCP_V6_FLOW:
4252 	case UDP_V6_FLOW:
4253 		be32_to_cpu_array(rule->tuples.src_ip,
4254 				  fs->h_u.tcp_ip6_spec.ip6src, 4);
4255 		be32_to_cpu_array(rule->tuples_mask.src_ip,
4256 				  fs->m_u.tcp_ip6_spec.ip6src, 4);
4257 
4258 		be32_to_cpu_array(rule->tuples.dst_ip,
4259 				  fs->h_u.tcp_ip6_spec.ip6dst, 4);
4260 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
4261 				  fs->m_u.tcp_ip6_spec.ip6dst, 4);
4262 
4263 		rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
4264 		rule->tuples_mask.src_port =
4265 				be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
4266 
4267 		rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
4268 		rule->tuples_mask.dst_port =
4269 				be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
4270 
4271 		rule->tuples.ether_proto = ETH_P_IPV6;
4272 		rule->tuples_mask.ether_proto = 0xFFFF;
4273 
4274 		break;
4275 	case IPV6_USER_FLOW:
4276 		be32_to_cpu_array(rule->tuples.src_ip,
4277 				  fs->h_u.usr_ip6_spec.ip6src, 4);
4278 		be32_to_cpu_array(rule->tuples_mask.src_ip,
4279 				  fs->m_u.usr_ip6_spec.ip6src, 4);
4280 
4281 		be32_to_cpu_array(rule->tuples.dst_ip,
4282 				  fs->h_u.usr_ip6_spec.ip6dst, 4);
4283 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
4284 				  fs->m_u.usr_ip6_spec.ip6dst, 4);
4285 
4286 		rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
4287 		rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
4288 
4289 		rule->tuples.ether_proto = ETH_P_IPV6;
4290 		rule->tuples_mask.ether_proto = 0xFFFF;
4291 
4292 		break;
4293 	case ETHER_FLOW:
4294 		ether_addr_copy(rule->tuples.src_mac,
4295 				fs->h_u.ether_spec.h_source);
4296 		ether_addr_copy(rule->tuples_mask.src_mac,
4297 				fs->m_u.ether_spec.h_source);
4298 
4299 		ether_addr_copy(rule->tuples.dst_mac,
4300 				fs->h_u.ether_spec.h_dest);
4301 		ether_addr_copy(rule->tuples_mask.dst_mac,
4302 				fs->m_u.ether_spec.h_dest);
4303 
4304 		rule->tuples.ether_proto =
4305 				be16_to_cpu(fs->h_u.ether_spec.h_proto);
4306 		rule->tuples_mask.ether_proto =
4307 				be16_to_cpu(fs->m_u.ether_spec.h_proto);
4308 
4309 		break;
4310 	default:
4311 		return -EOPNOTSUPP;
4312 	}
4313 
4314 	switch (flow_type) {
4315 	case SCTP_V4_FLOW:
4316 	case SCTP_V6_FLOW:
4317 		rule->tuples.ip_proto = IPPROTO_SCTP;
4318 		rule->tuples_mask.ip_proto = 0xFF;
4319 		break;
4320 	case TCP_V4_FLOW:
4321 	case TCP_V6_FLOW:
4322 		rule->tuples.ip_proto = IPPROTO_TCP;
4323 		rule->tuples_mask.ip_proto = 0xFF;
4324 		break;
4325 	case UDP_V4_FLOW:
4326 	case UDP_V6_FLOW:
4327 		rule->tuples.ip_proto = IPPROTO_UDP;
4328 		rule->tuples_mask.ip_proto = 0xFF;
4329 		break;
4330 	default:
4331 		break;
4332 	}
4333 
4334 	if ((fs->flow_type & FLOW_EXT)) {
4335 		rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
4336 		rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
4337 	}
4338 
4339 	if (fs->flow_type & FLOW_MAC_EXT) {
4340 		ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
4341 		ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
4342 	}
4343 
4344 	return 0;
4345 }
4346 
4347 static int hclge_add_fd_entry(struct hnae3_handle *handle,
4348 			      struct ethtool_rxnfc *cmd)
4349 {
4350 	struct hclge_vport *vport = hclge_get_vport(handle);
4351 	struct hclge_dev *hdev = vport->back;
4352 	u16 dst_vport_id = 0, q_index = 0;
4353 	struct ethtool_rx_flow_spec *fs;
4354 	struct hclge_fd_rule *rule;
4355 	u32 unused = 0;
4356 	u8 action;
4357 	int ret;
4358 
4359 	if (!hnae3_dev_fd_supported(hdev))
4360 		return -EOPNOTSUPP;
4361 
4362 	if (!hdev->fd_cfg.fd_en) {
4363 		dev_warn(&hdev->pdev->dev,
4364 			 "Please enable flow director first\n");
4365 		return -EOPNOTSUPP;
4366 	}
4367 
4368 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4369 
4370 	ret = hclge_fd_check_spec(hdev, fs, &unused);
4371 	if (ret) {
4372 		dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
4373 		return ret;
4374 	}
4375 
4376 	if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
4377 		action = HCLGE_FD_ACTION_DROP_PACKET;
4378 	} else {
4379 		u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
4380 		u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
4381 		u16 tqps;
4382 
4383 		dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
4384 		tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
4385 
4386 		if (ring >= tqps) {
4387 			dev_err(&hdev->pdev->dev,
4388 				"Error: queue id (%d) > max tqp num (%d)\n",
4389 				ring, tqps - 1);
4390 			return -EINVAL;
4391 		}
4392 
4393 		if (vf > hdev->num_req_vfs) {
4394 			dev_err(&hdev->pdev->dev,
4395 				"Error: vf id (%d) > max vf num (%d)\n",
4396 				vf, hdev->num_req_vfs);
4397 			return -EINVAL;
4398 		}
4399 
4400 		action = HCLGE_FD_ACTION_ACCEPT_PACKET;
4401 		q_index = ring;
4402 	}
4403 
4404 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
4405 	if (!rule)
4406 		return -ENOMEM;
4407 
4408 	ret = hclge_fd_get_tuple(hdev, fs, rule);
4409 	if (ret)
4410 		goto free_rule;
4411 
4412 	rule->flow_type = fs->flow_type;
4413 
4414 	rule->location = fs->location;
4415 	rule->unused_tuple = unused;
4416 	rule->vf_id = dst_vport_id;
4417 	rule->queue_id = q_index;
4418 	rule->action = action;
4419 
4420 	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
4421 	if (ret)
4422 		goto free_rule;
4423 
4424 	ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
4425 	if (ret)
4426 		goto free_rule;
4427 
4428 	ret = hclge_fd_update_rule_list(hdev, rule, fs->location, true);
4429 	if (ret)
4430 		goto free_rule;
4431 
4432 	return ret;
4433 
4434 free_rule:
4435 	kfree(rule);
4436 	return ret;
4437 }
4438 
4439 static int hclge_del_fd_entry(struct hnae3_handle *handle,
4440 			      struct ethtool_rxnfc *cmd)
4441 {
4442 	struct hclge_vport *vport = hclge_get_vport(handle);
4443 	struct hclge_dev *hdev = vport->back;
4444 	struct ethtool_rx_flow_spec *fs;
4445 	int ret;
4446 
4447 	if (!hnae3_dev_fd_supported(hdev))
4448 		return -EOPNOTSUPP;
4449 
4450 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4451 
4452 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4453 		return -EINVAL;
4454 
4455 	if (!hclge_fd_rule_exist(hdev, fs->location)) {
4456 		dev_err(&hdev->pdev->dev,
4457 			"Delete fail, rule %d is inexistent\n",
4458 			fs->location);
4459 		return -ENOENT;
4460 	}
4461 
4462 	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4463 				   fs->location, NULL, false);
4464 	if (ret)
4465 		return ret;
4466 
4467 	return hclge_fd_update_rule_list(hdev, NULL, fs->location,
4468 					 false);
4469 }
4470 
4471 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
4472 				     bool clear_list)
4473 {
4474 	struct hclge_vport *vport = hclge_get_vport(handle);
4475 	struct hclge_dev *hdev = vport->back;
4476 	struct hclge_fd_rule *rule;
4477 	struct hlist_node *node;
4478 
4479 	if (!hnae3_dev_fd_supported(hdev))
4480 		return;
4481 
4482 	if (clear_list) {
4483 		hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
4484 					  rule_node) {
4485 			hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4486 					     rule->location, NULL, false);
4487 			hlist_del(&rule->rule_node);
4488 			kfree(rule);
4489 			hdev->hclge_fd_rule_num--;
4490 		}
4491 	} else {
4492 		hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
4493 					  rule_node)
4494 			hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4495 					     rule->location, NULL, false);
4496 	}
4497 }
4498 
4499 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
4500 {
4501 	struct hclge_vport *vport = hclge_get_vport(handle);
4502 	struct hclge_dev *hdev = vport->back;
4503 	struct hclge_fd_rule *rule;
4504 	struct hlist_node *node;
4505 	int ret;
4506 
4507 	/* Return ok here, because reset error handling will check this
4508 	 * return value. If error is returned here, the reset process will
4509 	 * fail.
4510 	 */
4511 	if (!hnae3_dev_fd_supported(hdev))
4512 		return 0;
4513 
4514 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
4515 		ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
4516 		if (!ret)
4517 			ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
4518 
4519 		if (ret) {
4520 			dev_warn(&hdev->pdev->dev,
4521 				 "Restore rule %d failed, remove it\n",
4522 				 rule->location);
4523 			hlist_del(&rule->rule_node);
4524 			kfree(rule);
4525 			hdev->hclge_fd_rule_num--;
4526 		}
4527 	}
4528 	return 0;
4529 }
4530 
4531 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
4532 				 struct ethtool_rxnfc *cmd)
4533 {
4534 	struct hclge_vport *vport = hclge_get_vport(handle);
4535 	struct hclge_dev *hdev = vport->back;
4536 
4537 	if (!hnae3_dev_fd_supported(hdev))
4538 		return -EOPNOTSUPP;
4539 
4540 	cmd->rule_cnt = hdev->hclge_fd_rule_num;
4541 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
4542 
4543 	return 0;
4544 }
4545 
4546 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
4547 				  struct ethtool_rxnfc *cmd)
4548 {
4549 	struct hclge_vport *vport = hclge_get_vport(handle);
4550 	struct hclge_fd_rule *rule = NULL;
4551 	struct hclge_dev *hdev = vport->back;
4552 	struct ethtool_rx_flow_spec *fs;
4553 	struct hlist_node *node2;
4554 
4555 	if (!hnae3_dev_fd_supported(hdev))
4556 		return -EOPNOTSUPP;
4557 
4558 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4559 
4560 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4561 		if (rule->location >= fs->location)
4562 			break;
4563 	}
4564 
4565 	if (!rule || fs->location != rule->location)
4566 		return -ENOENT;
4567 
4568 	fs->flow_type = rule->flow_type;
4569 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4570 	case SCTP_V4_FLOW:
4571 	case TCP_V4_FLOW:
4572 	case UDP_V4_FLOW:
4573 		fs->h_u.tcp_ip4_spec.ip4src =
4574 				cpu_to_be32(rule->tuples.src_ip[3]);
4575 		fs->m_u.tcp_ip4_spec.ip4src =
4576 				rule->unused_tuple & BIT(INNER_SRC_IP) ?
4577 				0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
4578 
4579 		fs->h_u.tcp_ip4_spec.ip4dst =
4580 				cpu_to_be32(rule->tuples.dst_ip[3]);
4581 		fs->m_u.tcp_ip4_spec.ip4dst =
4582 				rule->unused_tuple & BIT(INNER_DST_IP) ?
4583 				0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
4584 
4585 		fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
4586 		fs->m_u.tcp_ip4_spec.psrc =
4587 				rule->unused_tuple & BIT(INNER_SRC_PORT) ?
4588 				0 : cpu_to_be16(rule->tuples_mask.src_port);
4589 
4590 		fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
4591 		fs->m_u.tcp_ip4_spec.pdst =
4592 				rule->unused_tuple & BIT(INNER_DST_PORT) ?
4593 				0 : cpu_to_be16(rule->tuples_mask.dst_port);
4594 
4595 		fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
4596 		fs->m_u.tcp_ip4_spec.tos =
4597 				rule->unused_tuple & BIT(INNER_IP_TOS) ?
4598 				0 : rule->tuples_mask.ip_tos;
4599 
4600 		break;
4601 	case IP_USER_FLOW:
4602 		fs->h_u.usr_ip4_spec.ip4src =
4603 				cpu_to_be32(rule->tuples.src_ip[3]);
4604 		fs->m_u.tcp_ip4_spec.ip4src =
4605 				rule->unused_tuple & BIT(INNER_SRC_IP) ?
4606 				0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
4607 
4608 		fs->h_u.usr_ip4_spec.ip4dst =
4609 				cpu_to_be32(rule->tuples.dst_ip[3]);
4610 		fs->m_u.usr_ip4_spec.ip4dst =
4611 				rule->unused_tuple & BIT(INNER_DST_IP) ?
4612 				0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
4613 
4614 		fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
4615 		fs->m_u.usr_ip4_spec.tos =
4616 				rule->unused_tuple & BIT(INNER_IP_TOS) ?
4617 				0 : rule->tuples_mask.ip_tos;
4618 
4619 		fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
4620 		fs->m_u.usr_ip4_spec.proto =
4621 				rule->unused_tuple & BIT(INNER_IP_PROTO) ?
4622 				0 : rule->tuples_mask.ip_proto;
4623 
4624 		fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
4625 
4626 		break;
4627 	case SCTP_V6_FLOW:
4628 	case TCP_V6_FLOW:
4629 	case UDP_V6_FLOW:
4630 		cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
4631 				  rule->tuples.src_ip, 4);
4632 		if (rule->unused_tuple & BIT(INNER_SRC_IP))
4633 			memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4);
4634 		else
4635 			cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
4636 					  rule->tuples_mask.src_ip, 4);
4637 
4638 		cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
4639 				  rule->tuples.dst_ip, 4);
4640 		if (rule->unused_tuple & BIT(INNER_DST_IP))
4641 			memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4);
4642 		else
4643 			cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
4644 					  rule->tuples_mask.dst_ip, 4);
4645 
4646 		fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
4647 		fs->m_u.tcp_ip6_spec.psrc =
4648 				rule->unused_tuple & BIT(INNER_SRC_PORT) ?
4649 				0 : cpu_to_be16(rule->tuples_mask.src_port);
4650 
4651 		fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
4652 		fs->m_u.tcp_ip6_spec.pdst =
4653 				rule->unused_tuple & BIT(INNER_DST_PORT) ?
4654 				0 : cpu_to_be16(rule->tuples_mask.dst_port);
4655 
4656 		break;
4657 	case IPV6_USER_FLOW:
4658 		cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
4659 				  rule->tuples.src_ip, 4);
4660 		if (rule->unused_tuple & BIT(INNER_SRC_IP))
4661 			memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4);
4662 		else
4663 			cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
4664 					  rule->tuples_mask.src_ip, 4);
4665 
4666 		cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
4667 				  rule->tuples.dst_ip, 4);
4668 		if (rule->unused_tuple & BIT(INNER_DST_IP))
4669 			memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4);
4670 		else
4671 			cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
4672 					  rule->tuples_mask.dst_ip, 4);
4673 
4674 		fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
4675 		fs->m_u.usr_ip6_spec.l4_proto =
4676 				rule->unused_tuple & BIT(INNER_IP_PROTO) ?
4677 				0 : rule->tuples_mask.ip_proto;
4678 
4679 		break;
4680 	case ETHER_FLOW:
4681 		ether_addr_copy(fs->h_u.ether_spec.h_source,
4682 				rule->tuples.src_mac);
4683 		if (rule->unused_tuple & BIT(INNER_SRC_MAC))
4684 			eth_zero_addr(fs->m_u.ether_spec.h_source);
4685 		else
4686 			ether_addr_copy(fs->m_u.ether_spec.h_source,
4687 					rule->tuples_mask.src_mac);
4688 
4689 		ether_addr_copy(fs->h_u.ether_spec.h_dest,
4690 				rule->tuples.dst_mac);
4691 		if (rule->unused_tuple & BIT(INNER_DST_MAC))
4692 			eth_zero_addr(fs->m_u.ether_spec.h_dest);
4693 		else
4694 			ether_addr_copy(fs->m_u.ether_spec.h_dest,
4695 					rule->tuples_mask.dst_mac);
4696 
4697 		fs->h_u.ether_spec.h_proto =
4698 				cpu_to_be16(rule->tuples.ether_proto);
4699 		fs->m_u.ether_spec.h_proto =
4700 				rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
4701 				0 : cpu_to_be16(rule->tuples_mask.ether_proto);
4702 
4703 		break;
4704 	default:
4705 		return -EOPNOTSUPP;
4706 	}
4707 
4708 	if (fs->flow_type & FLOW_EXT) {
4709 		fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
4710 		fs->m_ext.vlan_tci =
4711 				rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
4712 				cpu_to_be16(VLAN_VID_MASK) :
4713 				cpu_to_be16(rule->tuples_mask.vlan_tag1);
4714 	}
4715 
4716 	if (fs->flow_type & FLOW_MAC_EXT) {
4717 		ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
4718 		if (rule->unused_tuple & BIT(INNER_DST_MAC))
4719 			eth_zero_addr(fs->m_u.ether_spec.h_dest);
4720 		else
4721 			ether_addr_copy(fs->m_u.ether_spec.h_dest,
4722 					rule->tuples_mask.dst_mac);
4723 	}
4724 
4725 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4726 		fs->ring_cookie = RX_CLS_FLOW_DISC;
4727 	} else {
4728 		u64 vf_id;
4729 
4730 		fs->ring_cookie = rule->queue_id;
4731 		vf_id = rule->vf_id;
4732 		vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
4733 		fs->ring_cookie |= vf_id;
4734 	}
4735 
4736 	return 0;
4737 }
4738 
4739 static int hclge_get_all_rules(struct hnae3_handle *handle,
4740 			       struct ethtool_rxnfc *cmd, u32 *rule_locs)
4741 {
4742 	struct hclge_vport *vport = hclge_get_vport(handle);
4743 	struct hclge_dev *hdev = vport->back;
4744 	struct hclge_fd_rule *rule;
4745 	struct hlist_node *node2;
4746 	int cnt = 0;
4747 
4748 	if (!hnae3_dev_fd_supported(hdev))
4749 		return -EOPNOTSUPP;
4750 
4751 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
4752 
4753 	hlist_for_each_entry_safe(rule, node2,
4754 				  &hdev->fd_rule_list, rule_node) {
4755 		if (cnt == cmd->rule_cnt)
4756 			return -EMSGSIZE;
4757 
4758 		rule_locs[cnt] = rule->location;
4759 		cnt++;
4760 	}
4761 
4762 	cmd->rule_cnt = cnt;
4763 
4764 	return 0;
4765 }
4766 
4767 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
4768 {
4769 	struct hclge_vport *vport = hclge_get_vport(handle);
4770 	struct hclge_dev *hdev = vport->back;
4771 
4772 	return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
4773 	       hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
4774 }
4775 
4776 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
4777 {
4778 	struct hclge_vport *vport = hclge_get_vport(handle);
4779 	struct hclge_dev *hdev = vport->back;
4780 
4781 	return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4782 }
4783 
4784 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
4785 {
4786 	struct hclge_vport *vport = hclge_get_vport(handle);
4787 	struct hclge_dev *hdev = vport->back;
4788 
4789 	return hdev->reset_count;
4790 }
4791 
4792 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
4793 {
4794 	struct hclge_vport *vport = hclge_get_vport(handle);
4795 	struct hclge_dev *hdev = vport->back;
4796 
4797 	hdev->fd_cfg.fd_en = enable;
4798 	if (!enable)
4799 		hclge_del_all_fd_entries(handle, false);
4800 	else
4801 		hclge_restore_fd_entries(handle);
4802 }
4803 
4804 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
4805 {
4806 	struct hclge_desc desc;
4807 	struct hclge_config_mac_mode_cmd *req =
4808 		(struct hclge_config_mac_mode_cmd *)desc.data;
4809 	u32 loop_en = 0;
4810 	int ret;
4811 
4812 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
4813 	hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
4814 	hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
4815 	hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
4816 	hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
4817 	hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
4818 	hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
4819 	hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
4820 	hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
4821 	hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
4822 	hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
4823 	hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
4824 	hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
4825 	hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
4826 	hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
4827 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
4828 
4829 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4830 	if (ret)
4831 		dev_err(&hdev->pdev->dev,
4832 			"mac enable fail, ret =%d.\n", ret);
4833 }
4834 
4835 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
4836 {
4837 	struct hclge_config_mac_mode_cmd *req;
4838 	struct hclge_desc desc;
4839 	u32 loop_en;
4840 	int ret;
4841 
4842 	req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
4843 	/* 1 Read out the MAC mode config at first */
4844 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
4845 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4846 	if (ret) {
4847 		dev_err(&hdev->pdev->dev,
4848 			"mac loopback get fail, ret =%d.\n", ret);
4849 		return ret;
4850 	}
4851 
4852 	/* 2 Then setup the loopback flag */
4853 	loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
4854 	hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
4855 	hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
4856 	hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
4857 
4858 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
4859 
4860 	/* 3 Config mac work mode with loopback flag
4861 	 * and its original configure parameters
4862 	 */
4863 	hclge_cmd_reuse_desc(&desc, false);
4864 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4865 	if (ret)
4866 		dev_err(&hdev->pdev->dev,
4867 			"mac loopback set fail, ret =%d.\n", ret);
4868 	return ret;
4869 }
4870 
4871 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
4872 				     enum hnae3_loop loop_mode)
4873 {
4874 #define HCLGE_SERDES_RETRY_MS	10
4875 #define HCLGE_SERDES_RETRY_NUM	100
4876 	struct hclge_serdes_lb_cmd *req;
4877 	struct hclge_desc desc;
4878 	int ret, i = 0;
4879 	u8 loop_mode_b;
4880 
4881 	req = (struct hclge_serdes_lb_cmd *)desc.data;
4882 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
4883 
4884 	switch (loop_mode) {
4885 	case HNAE3_LOOP_SERIAL_SERDES:
4886 		loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
4887 		break;
4888 	case HNAE3_LOOP_PARALLEL_SERDES:
4889 		loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
4890 		break;
4891 	default:
4892 		dev_err(&hdev->pdev->dev,
4893 			"unsupported serdes loopback mode %d\n", loop_mode);
4894 		return -ENOTSUPP;
4895 	}
4896 
4897 	if (en) {
4898 		req->enable = loop_mode_b;
4899 		req->mask = loop_mode_b;
4900 	} else {
4901 		req->mask = loop_mode_b;
4902 	}
4903 
4904 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4905 	if (ret) {
4906 		dev_err(&hdev->pdev->dev,
4907 			"serdes loopback set fail, ret = %d\n", ret);
4908 		return ret;
4909 	}
4910 
4911 	do {
4912 		msleep(HCLGE_SERDES_RETRY_MS);
4913 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
4914 					   true);
4915 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4916 		if (ret) {
4917 			dev_err(&hdev->pdev->dev,
4918 				"serdes loopback get, ret = %d\n", ret);
4919 			return ret;
4920 		}
4921 	} while (++i < HCLGE_SERDES_RETRY_NUM &&
4922 		 !(req->result & HCLGE_CMD_SERDES_DONE_B));
4923 
4924 	if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
4925 		dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
4926 		return -EBUSY;
4927 	} else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
4928 		dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
4929 		return -EIO;
4930 	}
4931 
4932 	hclge_cfg_mac_mode(hdev, en);
4933 	return 0;
4934 }
4935 
4936 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
4937 			    int stream_id, bool enable)
4938 {
4939 	struct hclge_desc desc;
4940 	struct hclge_cfg_com_tqp_queue_cmd *req =
4941 		(struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
4942 	int ret;
4943 
4944 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
4945 	req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
4946 	req->stream_id = cpu_to_le16(stream_id);
4947 	req->enable |= enable << HCLGE_TQP_ENABLE_B;
4948 
4949 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4950 	if (ret)
4951 		dev_err(&hdev->pdev->dev,
4952 			"Tqp enable fail, status =%d.\n", ret);
4953 	return ret;
4954 }
4955 
4956 static int hclge_set_loopback(struct hnae3_handle *handle,
4957 			      enum hnae3_loop loop_mode, bool en)
4958 {
4959 	struct hclge_vport *vport = hclge_get_vport(handle);
4960 	struct hclge_dev *hdev = vport->back;
4961 	int i, ret;
4962 
4963 	switch (loop_mode) {
4964 	case HNAE3_LOOP_APP:
4965 		ret = hclge_set_app_loopback(hdev, en);
4966 		break;
4967 	case HNAE3_LOOP_SERIAL_SERDES:
4968 	case HNAE3_LOOP_PARALLEL_SERDES:
4969 		ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
4970 		break;
4971 	default:
4972 		ret = -ENOTSUPP;
4973 		dev_err(&hdev->pdev->dev,
4974 			"loop_mode %d is not supported\n", loop_mode);
4975 		break;
4976 	}
4977 
4978 	for (i = 0; i < vport->alloc_tqps; i++) {
4979 		ret = hclge_tqp_enable(hdev, i, 0, en);
4980 		if (ret)
4981 			return ret;
4982 	}
4983 
4984 	return 0;
4985 }
4986 
4987 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
4988 {
4989 	struct hclge_vport *vport = hclge_get_vport(handle);
4990 	struct hnae3_queue *queue;
4991 	struct hclge_tqp *tqp;
4992 	int i;
4993 
4994 	for (i = 0; i < vport->alloc_tqps; i++) {
4995 		queue = handle->kinfo.tqp[i];
4996 		tqp = container_of(queue, struct hclge_tqp, q);
4997 		memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
4998 	}
4999 }
5000 
5001 static int hclge_ae_start(struct hnae3_handle *handle)
5002 {
5003 	struct hclge_vport *vport = hclge_get_vport(handle);
5004 	struct hclge_dev *hdev = vport->back;
5005 
5006 	/* mac enable */
5007 	hclge_cfg_mac_mode(hdev, true);
5008 	clear_bit(HCLGE_STATE_DOWN, &hdev->state);
5009 	mod_timer(&hdev->service_timer, jiffies + HZ);
5010 	hdev->hw.mac.link = 0;
5011 
5012 	/* reset tqp stats */
5013 	hclge_reset_tqp_stats(handle);
5014 
5015 	hclge_mac_start_phy(hdev);
5016 
5017 	return 0;
5018 }
5019 
5020 static void hclge_ae_stop(struct hnae3_handle *handle)
5021 {
5022 	struct hclge_vport *vport = hclge_get_vport(handle);
5023 	struct hclge_dev *hdev = vport->back;
5024 
5025 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
5026 
5027 	del_timer_sync(&hdev->service_timer);
5028 	cancel_work_sync(&hdev->service_task);
5029 	clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
5030 
5031 	/* If it is not PF reset, the firmware will disable the MAC,
5032 	 * so it only need to stop phy here.
5033 	 */
5034 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
5035 	    hdev->reset_type != HNAE3_FUNC_RESET) {
5036 		hclge_mac_stop_phy(hdev);
5037 		return;
5038 	}
5039 
5040 	/* Mac disable */
5041 	hclge_cfg_mac_mode(hdev, false);
5042 
5043 	hclge_mac_stop_phy(hdev);
5044 
5045 	/* reset tqp stats */
5046 	hclge_reset_tqp_stats(handle);
5047 	del_timer_sync(&hdev->service_timer);
5048 	cancel_work_sync(&hdev->service_task);
5049 	hclge_update_link_status(hdev);
5050 }
5051 
5052 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
5053 					 u16 cmdq_resp, u8  resp_code,
5054 					 enum hclge_mac_vlan_tbl_opcode op)
5055 {
5056 	struct hclge_dev *hdev = vport->back;
5057 	int return_status = -EIO;
5058 
5059 	if (cmdq_resp) {
5060 		dev_err(&hdev->pdev->dev,
5061 			"cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
5062 			cmdq_resp);
5063 		return -EIO;
5064 	}
5065 
5066 	if (op == HCLGE_MAC_VLAN_ADD) {
5067 		if ((!resp_code) || (resp_code == 1)) {
5068 			return_status = 0;
5069 		} else if (resp_code == 2) {
5070 			return_status = -ENOSPC;
5071 			dev_err(&hdev->pdev->dev,
5072 				"add mac addr failed for uc_overflow.\n");
5073 		} else if (resp_code == 3) {
5074 			return_status = -ENOSPC;
5075 			dev_err(&hdev->pdev->dev,
5076 				"add mac addr failed for mc_overflow.\n");
5077 		} else {
5078 			dev_err(&hdev->pdev->dev,
5079 				"add mac addr failed for undefined, code=%d.\n",
5080 				resp_code);
5081 		}
5082 	} else if (op == HCLGE_MAC_VLAN_REMOVE) {
5083 		if (!resp_code) {
5084 			return_status = 0;
5085 		} else if (resp_code == 1) {
5086 			return_status = -ENOENT;
5087 			dev_dbg(&hdev->pdev->dev,
5088 				"remove mac addr failed for miss.\n");
5089 		} else {
5090 			dev_err(&hdev->pdev->dev,
5091 				"remove mac addr failed for undefined, code=%d.\n",
5092 				resp_code);
5093 		}
5094 	} else if (op == HCLGE_MAC_VLAN_LKUP) {
5095 		if (!resp_code) {
5096 			return_status = 0;
5097 		} else if (resp_code == 1) {
5098 			return_status = -ENOENT;
5099 			dev_dbg(&hdev->pdev->dev,
5100 				"lookup mac addr failed for miss.\n");
5101 		} else {
5102 			dev_err(&hdev->pdev->dev,
5103 				"lookup mac addr failed for undefined, code=%d.\n",
5104 				resp_code);
5105 		}
5106 	} else {
5107 		return_status = -EINVAL;
5108 		dev_err(&hdev->pdev->dev,
5109 			"unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
5110 			op);
5111 	}
5112 
5113 	return return_status;
5114 }
5115 
5116 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
5117 {
5118 	int word_num;
5119 	int bit_num;
5120 
5121 	if (vfid > 255 || vfid < 0)
5122 		return -EIO;
5123 
5124 	if (vfid >= 0 && vfid <= 191) {
5125 		word_num = vfid / 32;
5126 		bit_num  = vfid % 32;
5127 		if (clr)
5128 			desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
5129 		else
5130 			desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
5131 	} else {
5132 		word_num = (vfid - 192) / 32;
5133 		bit_num  = vfid % 32;
5134 		if (clr)
5135 			desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
5136 		else
5137 			desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
5138 	}
5139 
5140 	return 0;
5141 }
5142 
5143 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
5144 {
5145 #define HCLGE_DESC_NUMBER 3
5146 #define HCLGE_FUNC_NUMBER_PER_DESC 6
5147 	int i, j;
5148 
5149 	for (i = 1; i < HCLGE_DESC_NUMBER; i++)
5150 		for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
5151 			if (desc[i].data[j])
5152 				return false;
5153 
5154 	return true;
5155 }
5156 
5157 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
5158 				   const u8 *addr)
5159 {
5160 	const unsigned char *mac_addr = addr;
5161 	u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
5162 		       (mac_addr[0]) | (mac_addr[1] << 8);
5163 	u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
5164 
5165 	new_req->mac_addr_hi32 = cpu_to_le32(high_val);
5166 	new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
5167 }
5168 
5169 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
5170 				     struct hclge_mac_vlan_tbl_entry_cmd *req)
5171 {
5172 	struct hclge_dev *hdev = vport->back;
5173 	struct hclge_desc desc;
5174 	u8 resp_code;
5175 	u16 retval;
5176 	int ret;
5177 
5178 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
5179 
5180 	memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5181 
5182 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5183 	if (ret) {
5184 		dev_err(&hdev->pdev->dev,
5185 			"del mac addr failed for cmd_send, ret =%d.\n",
5186 			ret);
5187 		return ret;
5188 	}
5189 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5190 	retval = le16_to_cpu(desc.retval);
5191 
5192 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
5193 					     HCLGE_MAC_VLAN_REMOVE);
5194 }
5195 
5196 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
5197 				     struct hclge_mac_vlan_tbl_entry_cmd *req,
5198 				     struct hclge_desc *desc,
5199 				     bool is_mc)
5200 {
5201 	struct hclge_dev *hdev = vport->back;
5202 	u8 resp_code;
5203 	u16 retval;
5204 	int ret;
5205 
5206 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
5207 	if (is_mc) {
5208 		desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5209 		memcpy(desc[0].data,
5210 		       req,
5211 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5212 		hclge_cmd_setup_basic_desc(&desc[1],
5213 					   HCLGE_OPC_MAC_VLAN_ADD,
5214 					   true);
5215 		desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5216 		hclge_cmd_setup_basic_desc(&desc[2],
5217 					   HCLGE_OPC_MAC_VLAN_ADD,
5218 					   true);
5219 		ret = hclge_cmd_send(&hdev->hw, desc, 3);
5220 	} else {
5221 		memcpy(desc[0].data,
5222 		       req,
5223 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5224 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
5225 	}
5226 	if (ret) {
5227 		dev_err(&hdev->pdev->dev,
5228 			"lookup mac addr failed for cmd_send, ret =%d.\n",
5229 			ret);
5230 		return ret;
5231 	}
5232 	resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
5233 	retval = le16_to_cpu(desc[0].retval);
5234 
5235 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
5236 					     HCLGE_MAC_VLAN_LKUP);
5237 }
5238 
5239 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
5240 				  struct hclge_mac_vlan_tbl_entry_cmd *req,
5241 				  struct hclge_desc *mc_desc)
5242 {
5243 	struct hclge_dev *hdev = vport->back;
5244 	int cfg_status;
5245 	u8 resp_code;
5246 	u16 retval;
5247 	int ret;
5248 
5249 	if (!mc_desc) {
5250 		struct hclge_desc desc;
5251 
5252 		hclge_cmd_setup_basic_desc(&desc,
5253 					   HCLGE_OPC_MAC_VLAN_ADD,
5254 					   false);
5255 		memcpy(desc.data, req,
5256 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5257 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5258 		resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5259 		retval = le16_to_cpu(desc.retval);
5260 
5261 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
5262 							   resp_code,
5263 							   HCLGE_MAC_VLAN_ADD);
5264 	} else {
5265 		hclge_cmd_reuse_desc(&mc_desc[0], false);
5266 		mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5267 		hclge_cmd_reuse_desc(&mc_desc[1], false);
5268 		mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5269 		hclge_cmd_reuse_desc(&mc_desc[2], false);
5270 		mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
5271 		memcpy(mc_desc[0].data, req,
5272 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5273 		ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
5274 		resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
5275 		retval = le16_to_cpu(mc_desc[0].retval);
5276 
5277 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
5278 							   resp_code,
5279 							   HCLGE_MAC_VLAN_ADD);
5280 	}
5281 
5282 	if (ret) {
5283 		dev_err(&hdev->pdev->dev,
5284 			"add mac addr failed for cmd_send, ret =%d.\n",
5285 			ret);
5286 		return ret;
5287 	}
5288 
5289 	return cfg_status;
5290 }
5291 
5292 static int hclge_init_umv_space(struct hclge_dev *hdev)
5293 {
5294 	u16 allocated_size = 0;
5295 	int ret;
5296 
5297 	ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
5298 				  true);
5299 	if (ret)
5300 		return ret;
5301 
5302 	if (allocated_size < hdev->wanted_umv_size)
5303 		dev_warn(&hdev->pdev->dev,
5304 			 "Alloc umv space failed, want %d, get %d\n",
5305 			 hdev->wanted_umv_size, allocated_size);
5306 
5307 	mutex_init(&hdev->umv_mutex);
5308 	hdev->max_umv_size = allocated_size;
5309 	hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
5310 	hdev->share_umv_size = hdev->priv_umv_size +
5311 			hdev->max_umv_size % (hdev->num_req_vfs + 2);
5312 
5313 	return 0;
5314 }
5315 
5316 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
5317 {
5318 	int ret;
5319 
5320 	if (hdev->max_umv_size > 0) {
5321 		ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
5322 					  false);
5323 		if (ret)
5324 			return ret;
5325 		hdev->max_umv_size = 0;
5326 	}
5327 	mutex_destroy(&hdev->umv_mutex);
5328 
5329 	return 0;
5330 }
5331 
5332 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
5333 			       u16 *allocated_size, bool is_alloc)
5334 {
5335 	struct hclge_umv_spc_alc_cmd *req;
5336 	struct hclge_desc desc;
5337 	int ret;
5338 
5339 	req = (struct hclge_umv_spc_alc_cmd *)desc.data;
5340 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
5341 	hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc);
5342 	req->space_size = cpu_to_le32(space_size);
5343 
5344 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5345 	if (ret) {
5346 		dev_err(&hdev->pdev->dev,
5347 			"%s umv space failed for cmd_send, ret =%d\n",
5348 			is_alloc ? "allocate" : "free", ret);
5349 		return ret;
5350 	}
5351 
5352 	if (is_alloc && allocated_size)
5353 		*allocated_size = le32_to_cpu(desc.data[1]);
5354 
5355 	return 0;
5356 }
5357 
5358 static void hclge_reset_umv_space(struct hclge_dev *hdev)
5359 {
5360 	struct hclge_vport *vport;
5361 	int i;
5362 
5363 	for (i = 0; i < hdev->num_alloc_vport; i++) {
5364 		vport = &hdev->vport[i];
5365 		vport->used_umv_num = 0;
5366 	}
5367 
5368 	mutex_lock(&hdev->umv_mutex);
5369 	hdev->share_umv_size = hdev->priv_umv_size +
5370 			hdev->max_umv_size % (hdev->num_req_vfs + 2);
5371 	mutex_unlock(&hdev->umv_mutex);
5372 }
5373 
5374 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
5375 {
5376 	struct hclge_dev *hdev = vport->back;
5377 	bool is_full;
5378 
5379 	mutex_lock(&hdev->umv_mutex);
5380 	is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
5381 		   hdev->share_umv_size == 0);
5382 	mutex_unlock(&hdev->umv_mutex);
5383 
5384 	return is_full;
5385 }
5386 
5387 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
5388 {
5389 	struct hclge_dev *hdev = vport->back;
5390 
5391 	mutex_lock(&hdev->umv_mutex);
5392 	if (is_free) {
5393 		if (vport->used_umv_num > hdev->priv_umv_size)
5394 			hdev->share_umv_size++;
5395 		vport->used_umv_num--;
5396 	} else {
5397 		if (vport->used_umv_num >= hdev->priv_umv_size)
5398 			hdev->share_umv_size--;
5399 		vport->used_umv_num++;
5400 	}
5401 	mutex_unlock(&hdev->umv_mutex);
5402 }
5403 
5404 static int hclge_add_uc_addr(struct hnae3_handle *handle,
5405 			     const unsigned char *addr)
5406 {
5407 	struct hclge_vport *vport = hclge_get_vport(handle);
5408 
5409 	return hclge_add_uc_addr_common(vport, addr);
5410 }
5411 
5412 int hclge_add_uc_addr_common(struct hclge_vport *vport,
5413 			     const unsigned char *addr)
5414 {
5415 	struct hclge_dev *hdev = vport->back;
5416 	struct hclge_mac_vlan_tbl_entry_cmd req;
5417 	struct hclge_desc desc;
5418 	u16 egress_port = 0;
5419 	int ret;
5420 
5421 	/* mac addr check */
5422 	if (is_zero_ether_addr(addr) ||
5423 	    is_broadcast_ether_addr(addr) ||
5424 	    is_multicast_ether_addr(addr)) {
5425 		dev_err(&hdev->pdev->dev,
5426 			"Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
5427 			 addr,
5428 			 is_zero_ether_addr(addr),
5429 			 is_broadcast_ether_addr(addr),
5430 			 is_multicast_ether_addr(addr));
5431 		return -EINVAL;
5432 	}
5433 
5434 	memset(&req, 0, sizeof(req));
5435 	hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5436 
5437 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
5438 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
5439 
5440 	req.egress_port = cpu_to_le16(egress_port);
5441 
5442 	hclge_prepare_mac_addr(&req, addr);
5443 
5444 	/* Lookup the mac address in the mac_vlan table, and add
5445 	 * it if the entry is inexistent. Repeated unicast entry
5446 	 * is not allowed in the mac vlan table.
5447 	 */
5448 	ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
5449 	if (ret == -ENOENT) {
5450 		if (!hclge_is_umv_space_full(vport)) {
5451 			ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
5452 			if (!ret)
5453 				hclge_update_umv_space(vport, false);
5454 			return ret;
5455 		}
5456 
5457 		dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
5458 			hdev->priv_umv_size);
5459 
5460 		return -ENOSPC;
5461 	}
5462 
5463 	/* check if we just hit the duplicate */
5464 	if (!ret)
5465 		ret = -EINVAL;
5466 
5467 	dev_err(&hdev->pdev->dev,
5468 		"PF failed to add unicast entry(%pM) in the MAC table\n",
5469 		addr);
5470 
5471 	return ret;
5472 }
5473 
5474 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
5475 			    const unsigned char *addr)
5476 {
5477 	struct hclge_vport *vport = hclge_get_vport(handle);
5478 
5479 	return hclge_rm_uc_addr_common(vport, addr);
5480 }
5481 
5482 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
5483 			    const unsigned char *addr)
5484 {
5485 	struct hclge_dev *hdev = vport->back;
5486 	struct hclge_mac_vlan_tbl_entry_cmd req;
5487 	int ret;
5488 
5489 	/* mac addr check */
5490 	if (is_zero_ether_addr(addr) ||
5491 	    is_broadcast_ether_addr(addr) ||
5492 	    is_multicast_ether_addr(addr)) {
5493 		dev_dbg(&hdev->pdev->dev,
5494 			"Remove mac err! invalid mac:%pM.\n",
5495 			 addr);
5496 		return -EINVAL;
5497 	}
5498 
5499 	memset(&req, 0, sizeof(req));
5500 	hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5501 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
5502 	hclge_prepare_mac_addr(&req, addr);
5503 	ret = hclge_remove_mac_vlan_tbl(vport, &req);
5504 	if (!ret)
5505 		hclge_update_umv_space(vport, true);
5506 
5507 	return ret;
5508 }
5509 
5510 static int hclge_add_mc_addr(struct hnae3_handle *handle,
5511 			     const unsigned char *addr)
5512 {
5513 	struct hclge_vport *vport = hclge_get_vport(handle);
5514 
5515 	return hclge_add_mc_addr_common(vport, addr);
5516 }
5517 
5518 int hclge_add_mc_addr_common(struct hclge_vport *vport,
5519 			     const unsigned char *addr)
5520 {
5521 	struct hclge_dev *hdev = vport->back;
5522 	struct hclge_mac_vlan_tbl_entry_cmd req;
5523 	struct hclge_desc desc[3];
5524 	int status;
5525 
5526 	/* mac addr check */
5527 	if (!is_multicast_ether_addr(addr)) {
5528 		dev_err(&hdev->pdev->dev,
5529 			"Add mc mac err! invalid mac:%pM.\n",
5530 			 addr);
5531 		return -EINVAL;
5532 	}
5533 	memset(&req, 0, sizeof(req));
5534 	hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5535 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
5536 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
5537 	hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5538 	hclge_prepare_mac_addr(&req, addr);
5539 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
5540 	if (!status) {
5541 		/* This mac addr exist, update VFID for it */
5542 		hclge_update_desc_vfid(desc, vport->vport_id, false);
5543 		status = hclge_add_mac_vlan_tbl(vport, &req, desc);
5544 	} else {
5545 		/* This mac addr do not exist, add new entry for it */
5546 		memset(desc[0].data, 0, sizeof(desc[0].data));
5547 		memset(desc[1].data, 0, sizeof(desc[0].data));
5548 		memset(desc[2].data, 0, sizeof(desc[0].data));
5549 		hclge_update_desc_vfid(desc, vport->vport_id, false);
5550 		status = hclge_add_mac_vlan_tbl(vport, &req, desc);
5551 	}
5552 
5553 	if (status == -ENOSPC)
5554 		dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
5555 
5556 	return status;
5557 }
5558 
5559 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
5560 			    const unsigned char *addr)
5561 {
5562 	struct hclge_vport *vport = hclge_get_vport(handle);
5563 
5564 	return hclge_rm_mc_addr_common(vport, addr);
5565 }
5566 
5567 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
5568 			    const unsigned char *addr)
5569 {
5570 	struct hclge_dev *hdev = vport->back;
5571 	struct hclge_mac_vlan_tbl_entry_cmd req;
5572 	enum hclge_cmd_status status;
5573 	struct hclge_desc desc[3];
5574 
5575 	/* mac addr check */
5576 	if (!is_multicast_ether_addr(addr)) {
5577 		dev_dbg(&hdev->pdev->dev,
5578 			"Remove mc mac err! invalid mac:%pM.\n",
5579 			 addr);
5580 		return -EINVAL;
5581 	}
5582 
5583 	memset(&req, 0, sizeof(req));
5584 	hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5585 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
5586 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
5587 	hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5588 	hclge_prepare_mac_addr(&req, addr);
5589 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
5590 	if (!status) {
5591 		/* This mac addr exist, remove this handle's VFID for it */
5592 		hclge_update_desc_vfid(desc, vport->vport_id, true);
5593 
5594 		if (hclge_is_all_function_id_zero(desc))
5595 			/* All the vfid is zero, so need to delete this entry */
5596 			status = hclge_remove_mac_vlan_tbl(vport, &req);
5597 		else
5598 			/* Not all the vfid is zero, update the vfid */
5599 			status = hclge_add_mac_vlan_tbl(vport, &req, desc);
5600 
5601 	} else {
5602 		/* Maybe this mac address is in mta table, but it cannot be
5603 		 * deleted here because an entry of mta represents an address
5604 		 * range rather than a specific address. the delete action to
5605 		 * all entries will take effect in update_mta_status called by
5606 		 * hns3_nic_set_rx_mode.
5607 		 */
5608 		status = 0;
5609 	}
5610 
5611 	return status;
5612 }
5613 
5614 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
5615 					      u16 cmdq_resp, u8 resp_code)
5616 {
5617 #define HCLGE_ETHERTYPE_SUCCESS_ADD		0
5618 #define HCLGE_ETHERTYPE_ALREADY_ADD		1
5619 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW	2
5620 #define HCLGE_ETHERTYPE_KEY_CONFLICT		3
5621 
5622 	int return_status;
5623 
5624 	if (cmdq_resp) {
5625 		dev_err(&hdev->pdev->dev,
5626 			"cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
5627 			cmdq_resp);
5628 		return -EIO;
5629 	}
5630 
5631 	switch (resp_code) {
5632 	case HCLGE_ETHERTYPE_SUCCESS_ADD:
5633 	case HCLGE_ETHERTYPE_ALREADY_ADD:
5634 		return_status = 0;
5635 		break;
5636 	case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
5637 		dev_err(&hdev->pdev->dev,
5638 			"add mac ethertype failed for manager table overflow.\n");
5639 		return_status = -EIO;
5640 		break;
5641 	case HCLGE_ETHERTYPE_KEY_CONFLICT:
5642 		dev_err(&hdev->pdev->dev,
5643 			"add mac ethertype failed for key conflict.\n");
5644 		return_status = -EIO;
5645 		break;
5646 	default:
5647 		dev_err(&hdev->pdev->dev,
5648 			"add mac ethertype failed for undefined, code=%d.\n",
5649 			resp_code);
5650 		return_status = -EIO;
5651 	}
5652 
5653 	return return_status;
5654 }
5655 
5656 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
5657 			     const struct hclge_mac_mgr_tbl_entry_cmd *req)
5658 {
5659 	struct hclge_desc desc;
5660 	u8 resp_code;
5661 	u16 retval;
5662 	int ret;
5663 
5664 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
5665 	memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
5666 
5667 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5668 	if (ret) {
5669 		dev_err(&hdev->pdev->dev,
5670 			"add mac ethertype failed for cmd_send, ret =%d.\n",
5671 			ret);
5672 		return ret;
5673 	}
5674 
5675 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5676 	retval = le16_to_cpu(desc.retval);
5677 
5678 	return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
5679 }
5680 
5681 static int init_mgr_tbl(struct hclge_dev *hdev)
5682 {
5683 	int ret;
5684 	int i;
5685 
5686 	for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
5687 		ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
5688 		if (ret) {
5689 			dev_err(&hdev->pdev->dev,
5690 				"add mac ethertype failed, ret =%d.\n",
5691 				ret);
5692 			return ret;
5693 		}
5694 	}
5695 
5696 	return 0;
5697 }
5698 
5699 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
5700 {
5701 	struct hclge_vport *vport = hclge_get_vport(handle);
5702 	struct hclge_dev *hdev = vport->back;
5703 
5704 	ether_addr_copy(p, hdev->hw.mac.mac_addr);
5705 }
5706 
5707 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
5708 			      bool is_first)
5709 {
5710 	const unsigned char *new_addr = (const unsigned char *)p;
5711 	struct hclge_vport *vport = hclge_get_vport(handle);
5712 	struct hclge_dev *hdev = vport->back;
5713 	int ret;
5714 
5715 	/* mac addr check */
5716 	if (is_zero_ether_addr(new_addr) ||
5717 	    is_broadcast_ether_addr(new_addr) ||
5718 	    is_multicast_ether_addr(new_addr)) {
5719 		dev_err(&hdev->pdev->dev,
5720 			"Change uc mac err! invalid mac:%p.\n",
5721 			 new_addr);
5722 		return -EINVAL;
5723 	}
5724 
5725 	if (!is_first && hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
5726 		dev_warn(&hdev->pdev->dev,
5727 			 "remove old uc mac address fail.\n");
5728 
5729 	ret = hclge_add_uc_addr(handle, new_addr);
5730 	if (ret) {
5731 		dev_err(&hdev->pdev->dev,
5732 			"add uc mac address fail, ret =%d.\n",
5733 			ret);
5734 
5735 		if (!is_first &&
5736 		    hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
5737 			dev_err(&hdev->pdev->dev,
5738 				"restore uc mac address fail.\n");
5739 
5740 		return -EIO;
5741 	}
5742 
5743 	ret = hclge_pause_addr_cfg(hdev, new_addr);
5744 	if (ret) {
5745 		dev_err(&hdev->pdev->dev,
5746 			"configure mac pause address fail, ret =%d.\n",
5747 			ret);
5748 		return -EIO;
5749 	}
5750 
5751 	ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
5752 
5753 	return 0;
5754 }
5755 
5756 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
5757 			  int cmd)
5758 {
5759 	struct hclge_vport *vport = hclge_get_vport(handle);
5760 	struct hclge_dev *hdev = vport->back;
5761 
5762 	if (!hdev->hw.mac.phydev)
5763 		return -EOPNOTSUPP;
5764 
5765 	return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
5766 }
5767 
5768 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
5769 				      u8 fe_type, bool filter_en)
5770 {
5771 	struct hclge_vlan_filter_ctrl_cmd *req;
5772 	struct hclge_desc desc;
5773 	int ret;
5774 
5775 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
5776 
5777 	req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
5778 	req->vlan_type = vlan_type;
5779 	req->vlan_fe = filter_en ? fe_type : 0;
5780 
5781 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5782 	if (ret)
5783 		dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
5784 			ret);
5785 
5786 	return ret;
5787 }
5788 
5789 #define HCLGE_FILTER_TYPE_VF		0
5790 #define HCLGE_FILTER_TYPE_PORT		1
5791 #define HCLGE_FILTER_FE_EGRESS_V1_B	BIT(0)
5792 #define HCLGE_FILTER_FE_NIC_INGRESS_B	BIT(0)
5793 #define HCLGE_FILTER_FE_NIC_EGRESS_B	BIT(1)
5794 #define HCLGE_FILTER_FE_ROCE_INGRESS_B	BIT(2)
5795 #define HCLGE_FILTER_FE_ROCE_EGRESS_B	BIT(3)
5796 #define HCLGE_FILTER_FE_EGRESS		(HCLGE_FILTER_FE_NIC_EGRESS_B \
5797 					| HCLGE_FILTER_FE_ROCE_EGRESS_B)
5798 #define HCLGE_FILTER_FE_INGRESS		(HCLGE_FILTER_FE_NIC_INGRESS_B \
5799 					| HCLGE_FILTER_FE_ROCE_INGRESS_B)
5800 
5801 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
5802 {
5803 	struct hclge_vport *vport = hclge_get_vport(handle);
5804 	struct hclge_dev *hdev = vport->back;
5805 
5806 	if (hdev->pdev->revision >= 0x21) {
5807 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
5808 					   HCLGE_FILTER_FE_EGRESS, enable);
5809 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
5810 					   HCLGE_FILTER_FE_INGRESS, enable);
5811 	} else {
5812 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
5813 					   HCLGE_FILTER_FE_EGRESS_V1_B, enable);
5814 	}
5815 	if (enable)
5816 		handle->netdev_flags |= HNAE3_VLAN_FLTR;
5817 	else
5818 		handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
5819 }
5820 
5821 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
5822 				    bool is_kill, u16 vlan, u8 qos,
5823 				    __be16 proto)
5824 {
5825 #define HCLGE_MAX_VF_BYTES  16
5826 	struct hclge_vlan_filter_vf_cfg_cmd *req0;
5827 	struct hclge_vlan_filter_vf_cfg_cmd *req1;
5828 	struct hclge_desc desc[2];
5829 	u8 vf_byte_val;
5830 	u8 vf_byte_off;
5831 	int ret;
5832 
5833 	hclge_cmd_setup_basic_desc(&desc[0],
5834 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
5835 	hclge_cmd_setup_basic_desc(&desc[1],
5836 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
5837 
5838 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5839 
5840 	vf_byte_off = vfid / 8;
5841 	vf_byte_val = 1 << (vfid % 8);
5842 
5843 	req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
5844 	req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
5845 
5846 	req0->vlan_id  = cpu_to_le16(vlan);
5847 	req0->vlan_cfg = is_kill;
5848 
5849 	if (vf_byte_off < HCLGE_MAX_VF_BYTES)
5850 		req0->vf_bitmap[vf_byte_off] = vf_byte_val;
5851 	else
5852 		req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
5853 
5854 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
5855 	if (ret) {
5856 		dev_err(&hdev->pdev->dev,
5857 			"Send vf vlan command fail, ret =%d.\n",
5858 			ret);
5859 		return ret;
5860 	}
5861 
5862 	if (!is_kill) {
5863 #define HCLGE_VF_VLAN_NO_ENTRY	2
5864 		if (!req0->resp_code || req0->resp_code == 1)
5865 			return 0;
5866 
5867 		if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
5868 			dev_warn(&hdev->pdev->dev,
5869 				 "vf vlan table is full, vf vlan filter is disabled\n");
5870 			return 0;
5871 		}
5872 
5873 		dev_err(&hdev->pdev->dev,
5874 			"Add vf vlan filter fail, ret =%d.\n",
5875 			req0->resp_code);
5876 	} else {
5877 #define HCLGE_VF_VLAN_DEL_NO_FOUND	1
5878 		if (!req0->resp_code)
5879 			return 0;
5880 
5881 		if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
5882 			dev_warn(&hdev->pdev->dev,
5883 				 "vlan %d filter is not in vf vlan table\n",
5884 				 vlan);
5885 			return 0;
5886 		}
5887 
5888 		dev_err(&hdev->pdev->dev,
5889 			"Kill vf vlan filter fail, ret =%d.\n",
5890 			req0->resp_code);
5891 	}
5892 
5893 	return -EIO;
5894 }
5895 
5896 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
5897 				      u16 vlan_id, bool is_kill)
5898 {
5899 	struct hclge_vlan_filter_pf_cfg_cmd *req;
5900 	struct hclge_desc desc;
5901 	u8 vlan_offset_byte_val;
5902 	u8 vlan_offset_byte;
5903 	u8 vlan_offset_160;
5904 	int ret;
5905 
5906 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
5907 
5908 	vlan_offset_160 = vlan_id / 160;
5909 	vlan_offset_byte = (vlan_id % 160) / 8;
5910 	vlan_offset_byte_val = 1 << (vlan_id % 8);
5911 
5912 	req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
5913 	req->vlan_offset = vlan_offset_160;
5914 	req->vlan_cfg = is_kill;
5915 	req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
5916 
5917 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5918 	if (ret)
5919 		dev_err(&hdev->pdev->dev,
5920 			"port vlan command, send fail, ret =%d.\n", ret);
5921 	return ret;
5922 }
5923 
5924 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
5925 				    u16 vport_id, u16 vlan_id, u8 qos,
5926 				    bool is_kill)
5927 {
5928 	u16 vport_idx, vport_num = 0;
5929 	int ret;
5930 
5931 	if (is_kill && !vlan_id)
5932 		return 0;
5933 
5934 	ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
5935 				       0, proto);
5936 	if (ret) {
5937 		dev_err(&hdev->pdev->dev,
5938 			"Set %d vport vlan filter config fail, ret =%d.\n",
5939 			vport_id, ret);
5940 		return ret;
5941 	}
5942 
5943 	/* vlan 0 may be added twice when 8021q module is enabled */
5944 	if (!is_kill && !vlan_id &&
5945 	    test_bit(vport_id, hdev->vlan_table[vlan_id]))
5946 		return 0;
5947 
5948 	if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
5949 		dev_err(&hdev->pdev->dev,
5950 			"Add port vlan failed, vport %d is already in vlan %d\n",
5951 			vport_id, vlan_id);
5952 		return -EINVAL;
5953 	}
5954 
5955 	if (is_kill &&
5956 	    !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
5957 		dev_err(&hdev->pdev->dev,
5958 			"Delete port vlan failed, vport %d is not in vlan %d\n",
5959 			vport_id, vlan_id);
5960 		return -EINVAL;
5961 	}
5962 
5963 	for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
5964 		vport_num++;
5965 
5966 	if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
5967 		ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
5968 						 is_kill);
5969 
5970 	return ret;
5971 }
5972 
5973 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
5974 			  u16 vlan_id, bool is_kill)
5975 {
5976 	struct hclge_vport *vport = hclge_get_vport(handle);
5977 	struct hclge_dev *hdev = vport->back;
5978 
5979 	return hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, vlan_id,
5980 					0, is_kill);
5981 }
5982 
5983 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
5984 				    u16 vlan, u8 qos, __be16 proto)
5985 {
5986 	struct hclge_vport *vport = hclge_get_vport(handle);
5987 	struct hclge_dev *hdev = vport->back;
5988 
5989 	if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7))
5990 		return -EINVAL;
5991 	if (proto != htons(ETH_P_8021Q))
5992 		return -EPROTONOSUPPORT;
5993 
5994 	return hclge_set_vlan_filter_hw(hdev, proto, vfid, vlan, qos, false);
5995 }
5996 
5997 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
5998 {
5999 	struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
6000 	struct hclge_vport_vtag_tx_cfg_cmd *req;
6001 	struct hclge_dev *hdev = vport->back;
6002 	struct hclge_desc desc;
6003 	int status;
6004 
6005 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
6006 
6007 	req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
6008 	req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
6009 	req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
6010 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
6011 		      vcfg->accept_tag1 ? 1 : 0);
6012 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
6013 		      vcfg->accept_untag1 ? 1 : 0);
6014 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
6015 		      vcfg->accept_tag2 ? 1 : 0);
6016 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
6017 		      vcfg->accept_untag2 ? 1 : 0);
6018 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
6019 		      vcfg->insert_tag1_en ? 1 : 0);
6020 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
6021 		      vcfg->insert_tag2_en ? 1 : 0);
6022 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
6023 
6024 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
6025 	req->vf_bitmap[req->vf_offset] =
6026 		1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
6027 
6028 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
6029 	if (status)
6030 		dev_err(&hdev->pdev->dev,
6031 			"Send port txvlan cfg command fail, ret =%d\n",
6032 			status);
6033 
6034 	return status;
6035 }
6036 
6037 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
6038 {
6039 	struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
6040 	struct hclge_vport_vtag_rx_cfg_cmd *req;
6041 	struct hclge_dev *hdev = vport->back;
6042 	struct hclge_desc desc;
6043 	int status;
6044 
6045 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
6046 
6047 	req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
6048 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
6049 		      vcfg->strip_tag1_en ? 1 : 0);
6050 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
6051 		      vcfg->strip_tag2_en ? 1 : 0);
6052 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
6053 		      vcfg->vlan1_vlan_prionly ? 1 : 0);
6054 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
6055 		      vcfg->vlan2_vlan_prionly ? 1 : 0);
6056 
6057 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
6058 	req->vf_bitmap[req->vf_offset] =
6059 		1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
6060 
6061 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
6062 	if (status)
6063 		dev_err(&hdev->pdev->dev,
6064 			"Send port rxvlan cfg command fail, ret =%d\n",
6065 			status);
6066 
6067 	return status;
6068 }
6069 
6070 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
6071 {
6072 	struct hclge_rx_vlan_type_cfg_cmd *rx_req;
6073 	struct hclge_tx_vlan_type_cfg_cmd *tx_req;
6074 	struct hclge_desc desc;
6075 	int status;
6076 
6077 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
6078 	rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
6079 	rx_req->ot_fst_vlan_type =
6080 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
6081 	rx_req->ot_sec_vlan_type =
6082 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
6083 	rx_req->in_fst_vlan_type =
6084 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
6085 	rx_req->in_sec_vlan_type =
6086 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
6087 
6088 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
6089 	if (status) {
6090 		dev_err(&hdev->pdev->dev,
6091 			"Send rxvlan protocol type command fail, ret =%d\n",
6092 			status);
6093 		return status;
6094 	}
6095 
6096 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
6097 
6098 	tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
6099 	tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
6100 	tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
6101 
6102 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
6103 	if (status)
6104 		dev_err(&hdev->pdev->dev,
6105 			"Send txvlan protocol type command fail, ret =%d\n",
6106 			status);
6107 
6108 	return status;
6109 }
6110 
6111 static int hclge_init_vlan_config(struct hclge_dev *hdev)
6112 {
6113 #define HCLGE_DEF_VLAN_TYPE		0x8100
6114 
6115 	struct hnae3_handle *handle = &hdev->vport[0].nic;
6116 	struct hclge_vport *vport;
6117 	int ret;
6118 	int i;
6119 
6120 	if (hdev->pdev->revision >= 0x21) {
6121 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6122 						 HCLGE_FILTER_FE_EGRESS, true);
6123 		if (ret)
6124 			return ret;
6125 
6126 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
6127 						 HCLGE_FILTER_FE_INGRESS, true);
6128 		if (ret)
6129 			return ret;
6130 	} else {
6131 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6132 						 HCLGE_FILTER_FE_EGRESS_V1_B,
6133 						 true);
6134 		if (ret)
6135 			return ret;
6136 	}
6137 
6138 	handle->netdev_flags |= HNAE3_VLAN_FLTR;
6139 
6140 	hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
6141 	hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
6142 	hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
6143 	hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
6144 	hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
6145 	hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
6146 
6147 	ret = hclge_set_vlan_protocol_type(hdev);
6148 	if (ret)
6149 		return ret;
6150 
6151 	for (i = 0; i < hdev->num_alloc_vport; i++) {
6152 		vport = &hdev->vport[i];
6153 		vport->txvlan_cfg.accept_tag1 = true;
6154 		vport->txvlan_cfg.accept_untag1 = true;
6155 
6156 		/* accept_tag2 and accept_untag2 are not supported on
6157 		 * pdev revision(0x20), new revision support them. The
6158 		 * value of this two fields will not return error when driver
6159 		 * send command to fireware in revision(0x20).
6160 		 * This two fields can not configured by user.
6161 		 */
6162 		vport->txvlan_cfg.accept_tag2 = true;
6163 		vport->txvlan_cfg.accept_untag2 = true;
6164 
6165 		vport->txvlan_cfg.insert_tag1_en = false;
6166 		vport->txvlan_cfg.insert_tag2_en = false;
6167 		vport->txvlan_cfg.default_tag1 = 0;
6168 		vport->txvlan_cfg.default_tag2 = 0;
6169 
6170 		ret = hclge_set_vlan_tx_offload_cfg(vport);
6171 		if (ret)
6172 			return ret;
6173 
6174 		vport->rxvlan_cfg.strip_tag1_en = false;
6175 		vport->rxvlan_cfg.strip_tag2_en = true;
6176 		vport->rxvlan_cfg.vlan1_vlan_prionly = false;
6177 		vport->rxvlan_cfg.vlan2_vlan_prionly = false;
6178 
6179 		ret = hclge_set_vlan_rx_offload_cfg(vport);
6180 		if (ret)
6181 			return ret;
6182 	}
6183 
6184 	return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
6185 }
6186 
6187 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
6188 {
6189 	struct hclge_vport *vport = hclge_get_vport(handle);
6190 
6191 	vport->rxvlan_cfg.strip_tag1_en = false;
6192 	vport->rxvlan_cfg.strip_tag2_en = enable;
6193 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
6194 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
6195 
6196 	return hclge_set_vlan_rx_offload_cfg(vport);
6197 }
6198 
6199 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mtu)
6200 {
6201 	struct hclge_config_max_frm_size_cmd *req;
6202 	struct hclge_desc desc;
6203 	int max_frm_size;
6204 	int ret;
6205 
6206 	max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
6207 
6208 	if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
6209 	    max_frm_size > HCLGE_MAC_MAX_FRAME)
6210 		return -EINVAL;
6211 
6212 	max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
6213 
6214 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
6215 
6216 	req = (struct hclge_config_max_frm_size_cmd *)desc.data;
6217 	req->max_frm_size = cpu_to_le16(max_frm_size);
6218 	req->min_frm_size = HCLGE_MAC_MIN_FRAME;
6219 
6220 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6221 	if (ret)
6222 		dev_err(&hdev->pdev->dev, "set mtu fail, ret =%d.\n", ret);
6223 	else
6224 		hdev->mps = max_frm_size;
6225 
6226 	return ret;
6227 }
6228 
6229 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
6230 {
6231 	struct hclge_vport *vport = hclge_get_vport(handle);
6232 	struct hclge_dev *hdev = vport->back;
6233 	int ret;
6234 
6235 	ret = hclge_set_mac_mtu(hdev, new_mtu);
6236 	if (ret) {
6237 		dev_err(&hdev->pdev->dev,
6238 			"Change mtu fail, ret =%d\n", ret);
6239 		return ret;
6240 	}
6241 
6242 	ret = hclge_buffer_alloc(hdev);
6243 	if (ret)
6244 		dev_err(&hdev->pdev->dev,
6245 			"Allocate buffer fail, ret =%d\n", ret);
6246 
6247 	return ret;
6248 }
6249 
6250 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
6251 				    bool enable)
6252 {
6253 	struct hclge_reset_tqp_queue_cmd *req;
6254 	struct hclge_desc desc;
6255 	int ret;
6256 
6257 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
6258 
6259 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
6260 	req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
6261 	hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
6262 
6263 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6264 	if (ret) {
6265 		dev_err(&hdev->pdev->dev,
6266 			"Send tqp reset cmd error, status =%d\n", ret);
6267 		return ret;
6268 	}
6269 
6270 	return 0;
6271 }
6272 
6273 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
6274 {
6275 	struct hclge_reset_tqp_queue_cmd *req;
6276 	struct hclge_desc desc;
6277 	int ret;
6278 
6279 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
6280 
6281 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
6282 	req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
6283 
6284 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6285 	if (ret) {
6286 		dev_err(&hdev->pdev->dev,
6287 			"Get reset status error, status =%d\n", ret);
6288 		return ret;
6289 	}
6290 
6291 	return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
6292 }
6293 
6294 static u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle,
6295 					  u16 queue_id)
6296 {
6297 	struct hnae3_queue *queue;
6298 	struct hclge_tqp *tqp;
6299 
6300 	queue = handle->kinfo.tqp[queue_id];
6301 	tqp = container_of(queue, struct hclge_tqp, q);
6302 
6303 	return tqp->index;
6304 }
6305 
6306 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
6307 {
6308 	struct hclge_vport *vport = hclge_get_vport(handle);
6309 	struct hclge_dev *hdev = vport->back;
6310 	int reset_try_times = 0;
6311 	int reset_status;
6312 	u16 queue_gid;
6313 	int ret = 0;
6314 
6315 	queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
6316 
6317 	ret = hclge_tqp_enable(hdev, queue_id, 0, false);
6318 	if (ret) {
6319 		dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
6320 		return ret;
6321 	}
6322 
6323 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
6324 	if (ret) {
6325 		dev_err(&hdev->pdev->dev,
6326 			"Send reset tqp cmd fail, ret = %d\n", ret);
6327 		return ret;
6328 	}
6329 
6330 	reset_try_times = 0;
6331 	while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
6332 		/* Wait for tqp hw reset */
6333 		msleep(20);
6334 		reset_status = hclge_get_reset_status(hdev, queue_gid);
6335 		if (reset_status)
6336 			break;
6337 	}
6338 
6339 	if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
6340 		dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
6341 		return ret;
6342 	}
6343 
6344 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
6345 	if (ret)
6346 		dev_err(&hdev->pdev->dev,
6347 			"Deassert the soft reset fail, ret = %d\n", ret);
6348 
6349 	return ret;
6350 }
6351 
6352 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
6353 {
6354 	struct hclge_dev *hdev = vport->back;
6355 	int reset_try_times = 0;
6356 	int reset_status;
6357 	u16 queue_gid;
6358 	int ret;
6359 
6360 	queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
6361 
6362 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
6363 	if (ret) {
6364 		dev_warn(&hdev->pdev->dev,
6365 			 "Send reset tqp cmd fail, ret = %d\n", ret);
6366 		return;
6367 	}
6368 
6369 	reset_try_times = 0;
6370 	while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
6371 		/* Wait for tqp hw reset */
6372 		msleep(20);
6373 		reset_status = hclge_get_reset_status(hdev, queue_gid);
6374 		if (reset_status)
6375 			break;
6376 	}
6377 
6378 	if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
6379 		dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
6380 		return;
6381 	}
6382 
6383 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
6384 	if (ret)
6385 		dev_warn(&hdev->pdev->dev,
6386 			 "Deassert the soft reset fail, ret = %d\n", ret);
6387 }
6388 
6389 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
6390 {
6391 	struct hclge_vport *vport = hclge_get_vport(handle);
6392 	struct hclge_dev *hdev = vport->back;
6393 
6394 	return hdev->fw_version;
6395 }
6396 
6397 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
6398 {
6399 	struct phy_device *phydev = hdev->hw.mac.phydev;
6400 
6401 	if (!phydev)
6402 		return;
6403 
6404 	phy_set_asym_pause(phydev, rx_en, tx_en);
6405 }
6406 
6407 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
6408 {
6409 	int ret;
6410 
6411 	if (rx_en && tx_en)
6412 		hdev->fc_mode_last_time = HCLGE_FC_FULL;
6413 	else if (rx_en && !tx_en)
6414 		hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
6415 	else if (!rx_en && tx_en)
6416 		hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
6417 	else
6418 		hdev->fc_mode_last_time = HCLGE_FC_NONE;
6419 
6420 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
6421 		return 0;
6422 
6423 	ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
6424 	if (ret) {
6425 		dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
6426 			ret);
6427 		return ret;
6428 	}
6429 
6430 	hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
6431 
6432 	return 0;
6433 }
6434 
6435 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
6436 {
6437 	struct phy_device *phydev = hdev->hw.mac.phydev;
6438 	u16 remote_advertising = 0;
6439 	u16 local_advertising = 0;
6440 	u32 rx_pause, tx_pause;
6441 	u8 flowctl;
6442 
6443 	if (!phydev->link || !phydev->autoneg)
6444 		return 0;
6445 
6446 	local_advertising = ethtool_adv_to_lcl_adv_t(phydev->advertising);
6447 
6448 	if (phydev->pause)
6449 		remote_advertising = LPA_PAUSE_CAP;
6450 
6451 	if (phydev->asym_pause)
6452 		remote_advertising |= LPA_PAUSE_ASYM;
6453 
6454 	flowctl = mii_resolve_flowctrl_fdx(local_advertising,
6455 					   remote_advertising);
6456 	tx_pause = flowctl & FLOW_CTRL_TX;
6457 	rx_pause = flowctl & FLOW_CTRL_RX;
6458 
6459 	if (phydev->duplex == HCLGE_MAC_HALF) {
6460 		tx_pause = 0;
6461 		rx_pause = 0;
6462 	}
6463 
6464 	return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
6465 }
6466 
6467 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
6468 				 u32 *rx_en, u32 *tx_en)
6469 {
6470 	struct hclge_vport *vport = hclge_get_vport(handle);
6471 	struct hclge_dev *hdev = vport->back;
6472 
6473 	*auto_neg = hclge_get_autoneg(handle);
6474 
6475 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
6476 		*rx_en = 0;
6477 		*tx_en = 0;
6478 		return;
6479 	}
6480 
6481 	if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
6482 		*rx_en = 1;
6483 		*tx_en = 0;
6484 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
6485 		*tx_en = 1;
6486 		*rx_en = 0;
6487 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
6488 		*rx_en = 1;
6489 		*tx_en = 1;
6490 	} else {
6491 		*rx_en = 0;
6492 		*tx_en = 0;
6493 	}
6494 }
6495 
6496 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
6497 				u32 rx_en, u32 tx_en)
6498 {
6499 	struct hclge_vport *vport = hclge_get_vport(handle);
6500 	struct hclge_dev *hdev = vport->back;
6501 	struct phy_device *phydev = hdev->hw.mac.phydev;
6502 	u32 fc_autoneg;
6503 
6504 	fc_autoneg = hclge_get_autoneg(handle);
6505 	if (auto_neg != fc_autoneg) {
6506 		dev_info(&hdev->pdev->dev,
6507 			 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
6508 		return -EOPNOTSUPP;
6509 	}
6510 
6511 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
6512 		dev_info(&hdev->pdev->dev,
6513 			 "Priority flow control enabled. Cannot set link flow control.\n");
6514 		return -EOPNOTSUPP;
6515 	}
6516 
6517 	hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
6518 
6519 	if (!fc_autoneg)
6520 		return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
6521 
6522 	/* Only support flow control negotiation for netdev with
6523 	 * phy attached for now.
6524 	 */
6525 	if (!phydev)
6526 		return -EOPNOTSUPP;
6527 
6528 	return phy_start_aneg(phydev);
6529 }
6530 
6531 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
6532 					  u8 *auto_neg, u32 *speed, u8 *duplex)
6533 {
6534 	struct hclge_vport *vport = hclge_get_vport(handle);
6535 	struct hclge_dev *hdev = vport->back;
6536 
6537 	if (speed)
6538 		*speed = hdev->hw.mac.speed;
6539 	if (duplex)
6540 		*duplex = hdev->hw.mac.duplex;
6541 	if (auto_neg)
6542 		*auto_neg = hdev->hw.mac.autoneg;
6543 }
6544 
6545 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type)
6546 {
6547 	struct hclge_vport *vport = hclge_get_vport(handle);
6548 	struct hclge_dev *hdev = vport->back;
6549 
6550 	if (media_type)
6551 		*media_type = hdev->hw.mac.media_type;
6552 }
6553 
6554 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
6555 				u8 *tp_mdix_ctrl, u8 *tp_mdix)
6556 {
6557 	struct hclge_vport *vport = hclge_get_vport(handle);
6558 	struct hclge_dev *hdev = vport->back;
6559 	struct phy_device *phydev = hdev->hw.mac.phydev;
6560 	int mdix_ctrl, mdix, retval, is_resolved;
6561 
6562 	if (!phydev) {
6563 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
6564 		*tp_mdix = ETH_TP_MDI_INVALID;
6565 		return;
6566 	}
6567 
6568 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
6569 
6570 	retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
6571 	mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
6572 				    HCLGE_PHY_MDIX_CTRL_S);
6573 
6574 	retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
6575 	mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
6576 	is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
6577 
6578 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
6579 
6580 	switch (mdix_ctrl) {
6581 	case 0x0:
6582 		*tp_mdix_ctrl = ETH_TP_MDI;
6583 		break;
6584 	case 0x1:
6585 		*tp_mdix_ctrl = ETH_TP_MDI_X;
6586 		break;
6587 	case 0x3:
6588 		*tp_mdix_ctrl = ETH_TP_MDI_AUTO;
6589 		break;
6590 	default:
6591 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
6592 		break;
6593 	}
6594 
6595 	if (!is_resolved)
6596 		*tp_mdix = ETH_TP_MDI_INVALID;
6597 	else if (mdix)
6598 		*tp_mdix = ETH_TP_MDI_X;
6599 	else
6600 		*tp_mdix = ETH_TP_MDI;
6601 }
6602 
6603 static int hclge_init_instance_hw(struct hclge_dev *hdev)
6604 {
6605 	return hclge_mac_connect_phy(hdev);
6606 }
6607 
6608 static void hclge_uninit_instance_hw(struct hclge_dev *hdev)
6609 {
6610 	hclge_mac_disconnect_phy(hdev);
6611 }
6612 
6613 static int hclge_init_client_instance(struct hnae3_client *client,
6614 				      struct hnae3_ae_dev *ae_dev)
6615 {
6616 	struct hclge_dev *hdev = ae_dev->priv;
6617 	struct hclge_vport *vport;
6618 	int i, ret;
6619 
6620 	for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
6621 		vport = &hdev->vport[i];
6622 
6623 		switch (client->type) {
6624 		case HNAE3_CLIENT_KNIC:
6625 
6626 			hdev->nic_client = client;
6627 			vport->nic.client = client;
6628 			ret = client->ops->init_instance(&vport->nic);
6629 			if (ret)
6630 				goto clear_nic;
6631 
6632 			ret = hclge_init_instance_hw(hdev);
6633 			if (ret) {
6634 			        client->ops->uninit_instance(&vport->nic,
6635 			                                     0);
6636 				goto clear_nic;
6637 			}
6638 
6639 			hnae3_set_client_init_flag(client, ae_dev, 1);
6640 
6641 			if (hdev->roce_client &&
6642 			    hnae3_dev_roce_supported(hdev)) {
6643 				struct hnae3_client *rc = hdev->roce_client;
6644 
6645 				ret = hclge_init_roce_base_info(vport);
6646 				if (ret)
6647 					goto clear_roce;
6648 
6649 				ret = rc->ops->init_instance(&vport->roce);
6650 				if (ret)
6651 					goto clear_roce;
6652 
6653 				hnae3_set_client_init_flag(hdev->roce_client,
6654 							   ae_dev, 1);
6655 			}
6656 
6657 			break;
6658 		case HNAE3_CLIENT_UNIC:
6659 			hdev->nic_client = client;
6660 			vport->nic.client = client;
6661 
6662 			ret = client->ops->init_instance(&vport->nic);
6663 			if (ret)
6664 				goto clear_nic;
6665 
6666 			hnae3_set_client_init_flag(client, ae_dev, 1);
6667 
6668 			break;
6669 		case HNAE3_CLIENT_ROCE:
6670 			if (hnae3_dev_roce_supported(hdev)) {
6671 				hdev->roce_client = client;
6672 				vport->roce.client = client;
6673 			}
6674 
6675 			if (hdev->roce_client && hdev->nic_client) {
6676 				ret = hclge_init_roce_base_info(vport);
6677 				if (ret)
6678 					goto clear_roce;
6679 
6680 				ret = client->ops->init_instance(&vport->roce);
6681 				if (ret)
6682 					goto clear_roce;
6683 
6684 				hnae3_set_client_init_flag(client, ae_dev, 1);
6685 			}
6686 
6687 			break;
6688 		default:
6689 			return -EINVAL;
6690 		}
6691 	}
6692 
6693 	return 0;
6694 
6695 clear_nic:
6696 	hdev->nic_client = NULL;
6697 	vport->nic.client = NULL;
6698 	return ret;
6699 clear_roce:
6700 	hdev->roce_client = NULL;
6701 	vport->roce.client = NULL;
6702 	return ret;
6703 }
6704 
6705 static void hclge_uninit_client_instance(struct hnae3_client *client,
6706 					 struct hnae3_ae_dev *ae_dev)
6707 {
6708 	struct hclge_dev *hdev = ae_dev->priv;
6709 	struct hclge_vport *vport;
6710 	int i;
6711 
6712 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
6713 		vport = &hdev->vport[i];
6714 		if (hdev->roce_client) {
6715 			hdev->roce_client->ops->uninit_instance(&vport->roce,
6716 								0);
6717 			hdev->roce_client = NULL;
6718 			vport->roce.client = NULL;
6719 		}
6720 		if (client->type == HNAE3_CLIENT_ROCE)
6721 			return;
6722 		if (hdev->nic_client && client->ops->uninit_instance) {
6723 			hclge_uninit_instance_hw(hdev);
6724 			client->ops->uninit_instance(&vport->nic, 0);
6725 			hdev->nic_client = NULL;
6726 			vport->nic.client = NULL;
6727 		}
6728 	}
6729 }
6730 
6731 static int hclge_pci_init(struct hclge_dev *hdev)
6732 {
6733 	struct pci_dev *pdev = hdev->pdev;
6734 	struct hclge_hw *hw;
6735 	int ret;
6736 
6737 	ret = pci_enable_device(pdev);
6738 	if (ret) {
6739 		dev_err(&pdev->dev, "failed to enable PCI device\n");
6740 		return ret;
6741 	}
6742 
6743 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6744 	if (ret) {
6745 		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
6746 		if (ret) {
6747 			dev_err(&pdev->dev,
6748 				"can't set consistent PCI DMA");
6749 			goto err_disable_device;
6750 		}
6751 		dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
6752 	}
6753 
6754 	ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
6755 	if (ret) {
6756 		dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
6757 		goto err_disable_device;
6758 	}
6759 
6760 	pci_set_master(pdev);
6761 	hw = &hdev->hw;
6762 	hw->io_base = pcim_iomap(pdev, 2, 0);
6763 	if (!hw->io_base) {
6764 		dev_err(&pdev->dev, "Can't map configuration register space\n");
6765 		ret = -ENOMEM;
6766 		goto err_clr_master;
6767 	}
6768 
6769 	hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
6770 
6771 	return 0;
6772 err_clr_master:
6773 	pci_clear_master(pdev);
6774 	pci_release_regions(pdev);
6775 err_disable_device:
6776 	pci_disable_device(pdev);
6777 
6778 	return ret;
6779 }
6780 
6781 static void hclge_pci_uninit(struct hclge_dev *hdev)
6782 {
6783 	struct pci_dev *pdev = hdev->pdev;
6784 
6785 	pcim_iounmap(pdev, hdev->hw.io_base);
6786 	pci_free_irq_vectors(pdev);
6787 	pci_clear_master(pdev);
6788 	pci_release_mem_regions(pdev);
6789 	pci_disable_device(pdev);
6790 }
6791 
6792 static void hclge_state_init(struct hclge_dev *hdev)
6793 {
6794 	set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
6795 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
6796 	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
6797 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6798 	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
6799 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
6800 }
6801 
6802 static void hclge_state_uninit(struct hclge_dev *hdev)
6803 {
6804 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
6805 
6806 	if (hdev->service_timer.function)
6807 		del_timer_sync(&hdev->service_timer);
6808 	if (hdev->reset_timer.function)
6809 		del_timer_sync(&hdev->reset_timer);
6810 	if (hdev->service_task.func)
6811 		cancel_work_sync(&hdev->service_task);
6812 	if (hdev->rst_service_task.func)
6813 		cancel_work_sync(&hdev->rst_service_task);
6814 	if (hdev->mbx_service_task.func)
6815 		cancel_work_sync(&hdev->mbx_service_task);
6816 }
6817 
6818 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
6819 {
6820 	struct pci_dev *pdev = ae_dev->pdev;
6821 	struct hclge_dev *hdev;
6822 	int ret;
6823 
6824 	hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
6825 	if (!hdev) {
6826 		ret = -ENOMEM;
6827 		goto out;
6828 	}
6829 
6830 	hdev->pdev = pdev;
6831 	hdev->ae_dev = ae_dev;
6832 	hdev->reset_type = HNAE3_NONE_RESET;
6833 	hdev->reset_level = HNAE3_FUNC_RESET;
6834 	ae_dev->priv = hdev;
6835 
6836 	ret = hclge_pci_init(hdev);
6837 	if (ret) {
6838 		dev_err(&pdev->dev, "PCI init failed\n");
6839 		goto out;
6840 	}
6841 
6842 	/* Firmware command queue initialize */
6843 	ret = hclge_cmd_queue_init(hdev);
6844 	if (ret) {
6845 		dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
6846 		goto err_pci_uninit;
6847 	}
6848 
6849 	/* Firmware command initialize */
6850 	ret = hclge_cmd_init(hdev);
6851 	if (ret)
6852 		goto err_cmd_uninit;
6853 
6854 	ret = hclge_get_cap(hdev);
6855 	if (ret) {
6856 		dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
6857 			ret);
6858 		goto err_cmd_uninit;
6859 	}
6860 
6861 	ret = hclge_configure(hdev);
6862 	if (ret) {
6863 		dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
6864 		goto err_cmd_uninit;
6865 	}
6866 
6867 	ret = hclge_init_msi(hdev);
6868 	if (ret) {
6869 		dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
6870 		goto err_cmd_uninit;
6871 	}
6872 
6873 	ret = hclge_misc_irq_init(hdev);
6874 	if (ret) {
6875 		dev_err(&pdev->dev,
6876 			"Misc IRQ(vector0) init error, ret = %d.\n",
6877 			ret);
6878 		goto err_msi_uninit;
6879 	}
6880 
6881 	ret = hclge_alloc_tqps(hdev);
6882 	if (ret) {
6883 		dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
6884 		goto err_msi_irq_uninit;
6885 	}
6886 
6887 	ret = hclge_alloc_vport(hdev);
6888 	if (ret) {
6889 		dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
6890 		goto err_msi_irq_uninit;
6891 	}
6892 
6893 	ret = hclge_map_tqp(hdev);
6894 	if (ret) {
6895 		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
6896 		goto err_msi_irq_uninit;
6897 	}
6898 
6899 	if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
6900 		ret = hclge_mac_mdio_config(hdev);
6901 		if (ret) {
6902 			dev_err(&hdev->pdev->dev,
6903 				"mdio config fail ret=%d\n", ret);
6904 			goto err_msi_irq_uninit;
6905 		}
6906 	}
6907 
6908 	ret = hclge_init_umv_space(hdev);
6909 	if (ret) {
6910 		dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
6911 		goto err_msi_irq_uninit;
6912 	}
6913 
6914 	ret = hclge_mac_init(hdev);
6915 	if (ret) {
6916 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
6917 		goto err_mdiobus_unreg;
6918 	}
6919 
6920 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
6921 	if (ret) {
6922 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
6923 		goto err_mdiobus_unreg;
6924 	}
6925 
6926 	ret = hclge_init_vlan_config(hdev);
6927 	if (ret) {
6928 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
6929 		goto err_mdiobus_unreg;
6930 	}
6931 
6932 	ret = hclge_tm_schd_init(hdev);
6933 	if (ret) {
6934 		dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
6935 		goto err_mdiobus_unreg;
6936 	}
6937 
6938 	hclge_rss_init_cfg(hdev);
6939 	ret = hclge_rss_init_hw(hdev);
6940 	if (ret) {
6941 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
6942 		goto err_mdiobus_unreg;
6943 	}
6944 
6945 	ret = init_mgr_tbl(hdev);
6946 	if (ret) {
6947 		dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
6948 		goto err_mdiobus_unreg;
6949 	}
6950 
6951 	ret = hclge_init_fd_config(hdev);
6952 	if (ret) {
6953 		dev_err(&pdev->dev,
6954 			"fd table init fail, ret=%d\n", ret);
6955 		goto err_mdiobus_unreg;
6956 	}
6957 
6958 	ret = hclge_hw_error_set_state(hdev, true);
6959 	if (ret) {
6960 		dev_err(&pdev->dev,
6961 			"hw error interrupts enable failed, ret =%d\n", ret);
6962 		goto err_mdiobus_unreg;
6963 	}
6964 
6965 	hclge_dcb_ops_set(hdev);
6966 
6967 	timer_setup(&hdev->service_timer, hclge_service_timer, 0);
6968 	timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
6969 	INIT_WORK(&hdev->service_task, hclge_service_task);
6970 	INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
6971 	INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
6972 
6973 	hclge_clear_all_event_cause(hdev);
6974 
6975 	/* Enable MISC vector(vector0) */
6976 	hclge_enable_vector(&hdev->misc_vector, true);
6977 
6978 	hclge_state_init(hdev);
6979 	hdev->last_reset_time = jiffies;
6980 
6981 	pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
6982 	return 0;
6983 
6984 err_mdiobus_unreg:
6985 	if (hdev->hw.mac.phydev)
6986 		mdiobus_unregister(hdev->hw.mac.mdio_bus);
6987 err_msi_irq_uninit:
6988 	hclge_misc_irq_uninit(hdev);
6989 err_msi_uninit:
6990 	pci_free_irq_vectors(pdev);
6991 err_cmd_uninit:
6992 	hclge_destroy_cmd_queue(&hdev->hw);
6993 err_pci_uninit:
6994 	pcim_iounmap(pdev, hdev->hw.io_base);
6995 	pci_clear_master(pdev);
6996 	pci_release_regions(pdev);
6997 	pci_disable_device(pdev);
6998 out:
6999 	return ret;
7000 }
7001 
7002 static void hclge_stats_clear(struct hclge_dev *hdev)
7003 {
7004 	memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
7005 }
7006 
7007 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
7008 {
7009 	struct hclge_dev *hdev = ae_dev->priv;
7010 	struct pci_dev *pdev = ae_dev->pdev;
7011 	int ret;
7012 
7013 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
7014 
7015 	hclge_stats_clear(hdev);
7016 	memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
7017 
7018 	ret = hclge_cmd_init(hdev);
7019 	if (ret) {
7020 		dev_err(&pdev->dev, "Cmd queue init failed\n");
7021 		return ret;
7022 	}
7023 
7024 	ret = hclge_get_cap(hdev);
7025 	if (ret) {
7026 		dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
7027 			ret);
7028 		return ret;
7029 	}
7030 
7031 	ret = hclge_configure(hdev);
7032 	if (ret) {
7033 		dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
7034 		return ret;
7035 	}
7036 
7037 	ret = hclge_map_tqp(hdev);
7038 	if (ret) {
7039 		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
7040 		return ret;
7041 	}
7042 
7043 	hclge_reset_umv_space(hdev);
7044 
7045 	ret = hclge_mac_init(hdev);
7046 	if (ret) {
7047 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
7048 		return ret;
7049 	}
7050 
7051 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
7052 	if (ret) {
7053 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
7054 		return ret;
7055 	}
7056 
7057 	ret = hclge_init_vlan_config(hdev);
7058 	if (ret) {
7059 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
7060 		return ret;
7061 	}
7062 
7063 	ret = hclge_tm_init_hw(hdev);
7064 	if (ret) {
7065 		dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
7066 		return ret;
7067 	}
7068 
7069 	ret = hclge_rss_init_hw(hdev);
7070 	if (ret) {
7071 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
7072 		return ret;
7073 	}
7074 
7075 	ret = hclge_init_fd_config(hdev);
7076 	if (ret) {
7077 		dev_err(&pdev->dev,
7078 			"fd table init fail, ret=%d\n", ret);
7079 		return ret;
7080 	}
7081 
7082 	/* Re-enable the TM hw error interrupts because
7083 	 * they get disabled on core/global reset.
7084 	 */
7085 	if (hclge_enable_tm_hw_error(hdev, true))
7086 		dev_err(&pdev->dev, "failed to enable TM hw error interrupts\n");
7087 
7088 	dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
7089 		 HCLGE_DRIVER_NAME);
7090 
7091 	return 0;
7092 }
7093 
7094 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
7095 {
7096 	struct hclge_dev *hdev = ae_dev->priv;
7097 	struct hclge_mac *mac = &hdev->hw.mac;
7098 
7099 	hclge_state_uninit(hdev);
7100 
7101 	if (mac->phydev)
7102 		mdiobus_unregister(mac->mdio_bus);
7103 
7104 	hclge_uninit_umv_space(hdev);
7105 
7106 	/* Disable MISC vector(vector0) */
7107 	hclge_enable_vector(&hdev->misc_vector, false);
7108 	synchronize_irq(hdev->misc_vector.vector_irq);
7109 
7110 	hclge_hw_error_set_state(hdev, false);
7111 	hclge_destroy_cmd_queue(&hdev->hw);
7112 	hclge_misc_irq_uninit(hdev);
7113 	hclge_pci_uninit(hdev);
7114 	ae_dev->priv = NULL;
7115 }
7116 
7117 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
7118 {
7119 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
7120 	struct hclge_vport *vport = hclge_get_vport(handle);
7121 	struct hclge_dev *hdev = vport->back;
7122 
7123 	return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps);
7124 }
7125 
7126 static void hclge_get_channels(struct hnae3_handle *handle,
7127 			       struct ethtool_channels *ch)
7128 {
7129 	struct hclge_vport *vport = hclge_get_vport(handle);
7130 
7131 	ch->max_combined = hclge_get_max_channels(handle);
7132 	ch->other_count = 1;
7133 	ch->max_other = 1;
7134 	ch->combined_count = vport->alloc_tqps;
7135 }
7136 
7137 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
7138 					u16 *alloc_tqps, u16 *max_rss_size)
7139 {
7140 	struct hclge_vport *vport = hclge_get_vport(handle);
7141 	struct hclge_dev *hdev = vport->back;
7142 
7143 	*alloc_tqps = vport->alloc_tqps;
7144 	*max_rss_size = hdev->rss_size_max;
7145 }
7146 
7147 static void hclge_release_tqp(struct hclge_vport *vport)
7148 {
7149 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
7150 	struct hclge_dev *hdev = vport->back;
7151 	int i;
7152 
7153 	for (i = 0; i < kinfo->num_tqps; i++) {
7154 		struct hclge_tqp *tqp =
7155 			container_of(kinfo->tqp[i], struct hclge_tqp, q);
7156 
7157 		tqp->q.handle = NULL;
7158 		tqp->q.tqp_index = 0;
7159 		tqp->alloced = false;
7160 	}
7161 
7162 	devm_kfree(&hdev->pdev->dev, kinfo->tqp);
7163 	kinfo->tqp = NULL;
7164 }
7165 
7166 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num)
7167 {
7168 	struct hclge_vport *vport = hclge_get_vport(handle);
7169 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
7170 	struct hclge_dev *hdev = vport->back;
7171 	int cur_rss_size = kinfo->rss_size;
7172 	int cur_tqps = kinfo->num_tqps;
7173 	u16 tc_offset[HCLGE_MAX_TC_NUM];
7174 	u16 tc_valid[HCLGE_MAX_TC_NUM];
7175 	u16 tc_size[HCLGE_MAX_TC_NUM];
7176 	u16 roundup_size;
7177 	u32 *rss_indir;
7178 	int ret, i;
7179 
7180 	/* Free old tqps, and reallocate with new tqp number when nic setup */
7181 	hclge_release_tqp(vport);
7182 
7183 	ret = hclge_knic_setup(vport, new_tqps_num, kinfo->num_desc);
7184 	if (ret) {
7185 		dev_err(&hdev->pdev->dev, "setup nic fail, ret =%d\n", ret);
7186 		return ret;
7187 	}
7188 
7189 	ret = hclge_map_tqp_to_vport(hdev, vport);
7190 	if (ret) {
7191 		dev_err(&hdev->pdev->dev, "map vport tqp fail, ret =%d\n", ret);
7192 		return ret;
7193 	}
7194 
7195 	ret = hclge_tm_schd_init(hdev);
7196 	if (ret) {
7197 		dev_err(&hdev->pdev->dev, "tm schd init fail, ret =%d\n", ret);
7198 		return ret;
7199 	}
7200 
7201 	roundup_size = roundup_pow_of_two(kinfo->rss_size);
7202 	roundup_size = ilog2(roundup_size);
7203 	/* Set the RSS TC mode according to the new RSS size */
7204 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
7205 		tc_valid[i] = 0;
7206 
7207 		if (!(hdev->hw_tc_map & BIT(i)))
7208 			continue;
7209 
7210 		tc_valid[i] = 1;
7211 		tc_size[i] = roundup_size;
7212 		tc_offset[i] = kinfo->rss_size * i;
7213 	}
7214 	ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
7215 	if (ret)
7216 		return ret;
7217 
7218 	/* Reinitializes the rss indirect table according to the new RSS size */
7219 	rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
7220 	if (!rss_indir)
7221 		return -ENOMEM;
7222 
7223 	for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
7224 		rss_indir[i] = i % kinfo->rss_size;
7225 
7226 	ret = hclge_set_rss(handle, rss_indir, NULL, 0);
7227 	if (ret)
7228 		dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
7229 			ret);
7230 
7231 	kfree(rss_indir);
7232 
7233 	if (!ret)
7234 		dev_info(&hdev->pdev->dev,
7235 			 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
7236 			 cur_rss_size, kinfo->rss_size,
7237 			 cur_tqps, kinfo->rss_size * kinfo->num_tc);
7238 
7239 	return ret;
7240 }
7241 
7242 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
7243 			      u32 *regs_num_64_bit)
7244 {
7245 	struct hclge_desc desc;
7246 	u32 total_num;
7247 	int ret;
7248 
7249 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
7250 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7251 	if (ret) {
7252 		dev_err(&hdev->pdev->dev,
7253 			"Query register number cmd failed, ret = %d.\n", ret);
7254 		return ret;
7255 	}
7256 
7257 	*regs_num_32_bit = le32_to_cpu(desc.data[0]);
7258 	*regs_num_64_bit = le32_to_cpu(desc.data[1]);
7259 
7260 	total_num = *regs_num_32_bit + *regs_num_64_bit;
7261 	if (!total_num)
7262 		return -EINVAL;
7263 
7264 	return 0;
7265 }
7266 
7267 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
7268 				 void *data)
7269 {
7270 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
7271 
7272 	struct hclge_desc *desc;
7273 	u32 *reg_val = data;
7274 	__le32 *desc_data;
7275 	int cmd_num;
7276 	int i, k, n;
7277 	int ret;
7278 
7279 	if (regs_num == 0)
7280 		return 0;
7281 
7282 	cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM);
7283 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
7284 	if (!desc)
7285 		return -ENOMEM;
7286 
7287 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
7288 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
7289 	if (ret) {
7290 		dev_err(&hdev->pdev->dev,
7291 			"Query 32 bit register cmd failed, ret = %d.\n", ret);
7292 		kfree(desc);
7293 		return ret;
7294 	}
7295 
7296 	for (i = 0; i < cmd_num; i++) {
7297 		if (i == 0) {
7298 			desc_data = (__le32 *)(&desc[i].data[0]);
7299 			n = HCLGE_32_BIT_REG_RTN_DATANUM - 2;
7300 		} else {
7301 			desc_data = (__le32 *)(&desc[i]);
7302 			n = HCLGE_32_BIT_REG_RTN_DATANUM;
7303 		}
7304 		for (k = 0; k < n; k++) {
7305 			*reg_val++ = le32_to_cpu(*desc_data++);
7306 
7307 			regs_num--;
7308 			if (!regs_num)
7309 				break;
7310 		}
7311 	}
7312 
7313 	kfree(desc);
7314 	return 0;
7315 }
7316 
7317 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
7318 				 void *data)
7319 {
7320 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
7321 
7322 	struct hclge_desc *desc;
7323 	u64 *reg_val = data;
7324 	__le64 *desc_data;
7325 	int cmd_num;
7326 	int i, k, n;
7327 	int ret;
7328 
7329 	if (regs_num == 0)
7330 		return 0;
7331 
7332 	cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM);
7333 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
7334 	if (!desc)
7335 		return -ENOMEM;
7336 
7337 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
7338 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
7339 	if (ret) {
7340 		dev_err(&hdev->pdev->dev,
7341 			"Query 64 bit register cmd failed, ret = %d.\n", ret);
7342 		kfree(desc);
7343 		return ret;
7344 	}
7345 
7346 	for (i = 0; i < cmd_num; i++) {
7347 		if (i == 0) {
7348 			desc_data = (__le64 *)(&desc[i].data[0]);
7349 			n = HCLGE_64_BIT_REG_RTN_DATANUM - 1;
7350 		} else {
7351 			desc_data = (__le64 *)(&desc[i]);
7352 			n = HCLGE_64_BIT_REG_RTN_DATANUM;
7353 		}
7354 		for (k = 0; k < n; k++) {
7355 			*reg_val++ = le64_to_cpu(*desc_data++);
7356 
7357 			regs_num--;
7358 			if (!regs_num)
7359 				break;
7360 		}
7361 	}
7362 
7363 	kfree(desc);
7364 	return 0;
7365 }
7366 
7367 static int hclge_get_regs_len(struct hnae3_handle *handle)
7368 {
7369 	struct hclge_vport *vport = hclge_get_vport(handle);
7370 	struct hclge_dev *hdev = vport->back;
7371 	u32 regs_num_32_bit, regs_num_64_bit;
7372 	int ret;
7373 
7374 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
7375 	if (ret) {
7376 		dev_err(&hdev->pdev->dev,
7377 			"Get register number failed, ret = %d.\n", ret);
7378 		return -EOPNOTSUPP;
7379 	}
7380 
7381 	return regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
7382 }
7383 
7384 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
7385 			   void *data)
7386 {
7387 	struct hclge_vport *vport = hclge_get_vport(handle);
7388 	struct hclge_dev *hdev = vport->back;
7389 	u32 regs_num_32_bit, regs_num_64_bit;
7390 	int ret;
7391 
7392 	*version = hdev->fw_version;
7393 
7394 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
7395 	if (ret) {
7396 		dev_err(&hdev->pdev->dev,
7397 			"Get register number failed, ret = %d.\n", ret);
7398 		return;
7399 	}
7400 
7401 	ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, data);
7402 	if (ret) {
7403 		dev_err(&hdev->pdev->dev,
7404 			"Get 32 bit register failed, ret = %d.\n", ret);
7405 		return;
7406 	}
7407 
7408 	data = (u32 *)data + regs_num_32_bit;
7409 	ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit,
7410 				    data);
7411 	if (ret)
7412 		dev_err(&hdev->pdev->dev,
7413 			"Get 64 bit register failed, ret = %d.\n", ret);
7414 }
7415 
7416 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
7417 {
7418 	struct hclge_set_led_state_cmd *req;
7419 	struct hclge_desc desc;
7420 	int ret;
7421 
7422 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
7423 
7424 	req = (struct hclge_set_led_state_cmd *)desc.data;
7425 	hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
7426 			HCLGE_LED_LOCATE_STATE_S, locate_led_status);
7427 
7428 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7429 	if (ret)
7430 		dev_err(&hdev->pdev->dev,
7431 			"Send set led state cmd error, ret =%d\n", ret);
7432 
7433 	return ret;
7434 }
7435 
7436 enum hclge_led_status {
7437 	HCLGE_LED_OFF,
7438 	HCLGE_LED_ON,
7439 	HCLGE_LED_NO_CHANGE = 0xFF,
7440 };
7441 
7442 static int hclge_set_led_id(struct hnae3_handle *handle,
7443 			    enum ethtool_phys_id_state status)
7444 {
7445 	struct hclge_vport *vport = hclge_get_vport(handle);
7446 	struct hclge_dev *hdev = vport->back;
7447 
7448 	switch (status) {
7449 	case ETHTOOL_ID_ACTIVE:
7450 		return hclge_set_led_status(hdev, HCLGE_LED_ON);
7451 	case ETHTOOL_ID_INACTIVE:
7452 		return hclge_set_led_status(hdev, HCLGE_LED_OFF);
7453 	default:
7454 		return -EINVAL;
7455 	}
7456 }
7457 
7458 static void hclge_get_link_mode(struct hnae3_handle *handle,
7459 				unsigned long *supported,
7460 				unsigned long *advertising)
7461 {
7462 	unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
7463 	struct hclge_vport *vport = hclge_get_vport(handle);
7464 	struct hclge_dev *hdev = vport->back;
7465 	unsigned int idx = 0;
7466 
7467 	for (; idx < size; idx++) {
7468 		supported[idx] = hdev->hw.mac.supported[idx];
7469 		advertising[idx] = hdev->hw.mac.advertising[idx];
7470 	}
7471 }
7472 
7473 static const struct hnae3_ae_ops hclge_ops = {
7474 	.init_ae_dev = hclge_init_ae_dev,
7475 	.uninit_ae_dev = hclge_uninit_ae_dev,
7476 	.init_client_instance = hclge_init_client_instance,
7477 	.uninit_client_instance = hclge_uninit_client_instance,
7478 	.map_ring_to_vector = hclge_map_ring_to_vector,
7479 	.unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
7480 	.get_vector = hclge_get_vector,
7481 	.put_vector = hclge_put_vector,
7482 	.set_promisc_mode = hclge_set_promisc_mode,
7483 	.set_loopback = hclge_set_loopback,
7484 	.start = hclge_ae_start,
7485 	.stop = hclge_ae_stop,
7486 	.get_status = hclge_get_status,
7487 	.get_ksettings_an_result = hclge_get_ksettings_an_result,
7488 	.update_speed_duplex_h = hclge_update_speed_duplex_h,
7489 	.cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
7490 	.get_media_type = hclge_get_media_type,
7491 	.get_rss_key_size = hclge_get_rss_key_size,
7492 	.get_rss_indir_size = hclge_get_rss_indir_size,
7493 	.get_rss = hclge_get_rss,
7494 	.set_rss = hclge_set_rss,
7495 	.set_rss_tuple = hclge_set_rss_tuple,
7496 	.get_rss_tuple = hclge_get_rss_tuple,
7497 	.get_tc_size = hclge_get_tc_size,
7498 	.get_mac_addr = hclge_get_mac_addr,
7499 	.set_mac_addr = hclge_set_mac_addr,
7500 	.do_ioctl = hclge_do_ioctl,
7501 	.add_uc_addr = hclge_add_uc_addr,
7502 	.rm_uc_addr = hclge_rm_uc_addr,
7503 	.add_mc_addr = hclge_add_mc_addr,
7504 	.rm_mc_addr = hclge_rm_mc_addr,
7505 	.set_autoneg = hclge_set_autoneg,
7506 	.get_autoneg = hclge_get_autoneg,
7507 	.get_pauseparam = hclge_get_pauseparam,
7508 	.set_pauseparam = hclge_set_pauseparam,
7509 	.set_mtu = hclge_set_mtu,
7510 	.reset_queue = hclge_reset_tqp,
7511 	.get_stats = hclge_get_stats,
7512 	.update_stats = hclge_update_stats,
7513 	.get_strings = hclge_get_strings,
7514 	.get_sset_count = hclge_get_sset_count,
7515 	.get_fw_version = hclge_get_fw_version,
7516 	.get_mdix_mode = hclge_get_mdix_mode,
7517 	.enable_vlan_filter = hclge_enable_vlan_filter,
7518 	.set_vlan_filter = hclge_set_vlan_filter,
7519 	.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
7520 	.enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
7521 	.reset_event = hclge_reset_event,
7522 	.set_default_reset_request = hclge_set_def_reset_request,
7523 	.get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
7524 	.set_channels = hclge_set_channels,
7525 	.get_channels = hclge_get_channels,
7526 	.get_regs_len = hclge_get_regs_len,
7527 	.get_regs = hclge_get_regs,
7528 	.set_led_id = hclge_set_led_id,
7529 	.get_link_mode = hclge_get_link_mode,
7530 	.add_fd_entry = hclge_add_fd_entry,
7531 	.del_fd_entry = hclge_del_fd_entry,
7532 	.del_all_fd_entries = hclge_del_all_fd_entries,
7533 	.get_fd_rule_cnt = hclge_get_fd_rule_cnt,
7534 	.get_fd_rule_info = hclge_get_fd_rule_info,
7535 	.get_fd_all_rules = hclge_get_all_rules,
7536 	.restore_fd_rules = hclge_restore_fd_entries,
7537 	.enable_fd = hclge_enable_fd,
7538 	.process_hw_error = hclge_process_ras_hw_error,
7539 	.get_hw_reset_stat = hclge_get_hw_reset_stat,
7540 	.ae_dev_resetting = hclge_ae_dev_resetting,
7541 	.ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
7542 };
7543 
7544 static struct hnae3_ae_algo ae_algo = {
7545 	.ops = &hclge_ops,
7546 	.pdev_id_table = ae_algo_pci_tbl,
7547 };
7548 
7549 static int hclge_init(void)
7550 {
7551 	pr_info("%s is initializing\n", HCLGE_NAME);
7552 
7553 	hnae3_register_ae_algo(&ae_algo);
7554 
7555 	return 0;
7556 }
7557 
7558 static void hclge_exit(void)
7559 {
7560 	hnae3_unregister_ae_algo(&ae_algo);
7561 }
7562 module_init(hclge_init);
7563 module_exit(hclge_exit);
7564 
7565 MODULE_LICENSE("GPL");
7566 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
7567 MODULE_DESCRIPTION("HCLGE Driver");
7568 MODULE_VERSION(HCLGE_MOD_VERSION);
7569