xref: /linux/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c (revision 02ff58dcf70ad7d11b01523dc404166ed11021da)
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <net/rtnetlink.h>
16 #include "hclge_cmd.h"
17 #include "hclge_dcb.h"
18 #include "hclge_main.h"
19 #include "hclge_mbx.h"
20 #include "hclge_mdio.h"
21 #include "hclge_tm.h"
22 #include "hclge_err.h"
23 #include "hnae3.h"
24 
25 #define HCLGE_NAME			"hclge"
26 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
27 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
28 
29 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
30 static int hclge_init_vlan_config(struct hclge_dev *hdev);
31 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
32 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
33 			       u16 *allocated_size, bool is_alloc);
34 
35 static struct hnae3_ae_algo ae_algo;
36 
37 static const struct pci_device_id ae_algo_pci_tbl[] = {
38 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
39 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
40 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
41 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
42 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
43 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
44 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
45 	/* required last entry */
46 	{0, }
47 };
48 
49 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
50 
51 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
52 					 HCLGE_CMDQ_TX_ADDR_H_REG,
53 					 HCLGE_CMDQ_TX_DEPTH_REG,
54 					 HCLGE_CMDQ_TX_TAIL_REG,
55 					 HCLGE_CMDQ_TX_HEAD_REG,
56 					 HCLGE_CMDQ_RX_ADDR_L_REG,
57 					 HCLGE_CMDQ_RX_ADDR_H_REG,
58 					 HCLGE_CMDQ_RX_DEPTH_REG,
59 					 HCLGE_CMDQ_RX_TAIL_REG,
60 					 HCLGE_CMDQ_RX_HEAD_REG,
61 					 HCLGE_VECTOR0_CMDQ_SRC_REG,
62 					 HCLGE_CMDQ_INTR_STS_REG,
63 					 HCLGE_CMDQ_INTR_EN_REG,
64 					 HCLGE_CMDQ_INTR_GEN_REG};
65 
66 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
67 					   HCLGE_VECTOR0_OTER_EN_REG,
68 					   HCLGE_MISC_RESET_STS_REG,
69 					   HCLGE_MISC_VECTOR_INT_STS,
70 					   HCLGE_GLOBAL_RESET_REG,
71 					   HCLGE_FUN_RST_ING,
72 					   HCLGE_GRO_EN_REG};
73 
74 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
75 					 HCLGE_RING_RX_ADDR_H_REG,
76 					 HCLGE_RING_RX_BD_NUM_REG,
77 					 HCLGE_RING_RX_BD_LENGTH_REG,
78 					 HCLGE_RING_RX_MERGE_EN_REG,
79 					 HCLGE_RING_RX_TAIL_REG,
80 					 HCLGE_RING_RX_HEAD_REG,
81 					 HCLGE_RING_RX_FBD_NUM_REG,
82 					 HCLGE_RING_RX_OFFSET_REG,
83 					 HCLGE_RING_RX_FBD_OFFSET_REG,
84 					 HCLGE_RING_RX_STASH_REG,
85 					 HCLGE_RING_RX_BD_ERR_REG,
86 					 HCLGE_RING_TX_ADDR_L_REG,
87 					 HCLGE_RING_TX_ADDR_H_REG,
88 					 HCLGE_RING_TX_BD_NUM_REG,
89 					 HCLGE_RING_TX_PRIORITY_REG,
90 					 HCLGE_RING_TX_TC_REG,
91 					 HCLGE_RING_TX_MERGE_EN_REG,
92 					 HCLGE_RING_TX_TAIL_REG,
93 					 HCLGE_RING_TX_HEAD_REG,
94 					 HCLGE_RING_TX_FBD_NUM_REG,
95 					 HCLGE_RING_TX_OFFSET_REG,
96 					 HCLGE_RING_TX_EBD_NUM_REG,
97 					 HCLGE_RING_TX_EBD_OFFSET_REG,
98 					 HCLGE_RING_TX_BD_ERR_REG,
99 					 HCLGE_RING_EN_REG};
100 
101 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
102 					     HCLGE_TQP_INTR_GL0_REG,
103 					     HCLGE_TQP_INTR_GL1_REG,
104 					     HCLGE_TQP_INTR_GL2_REG,
105 					     HCLGE_TQP_INTR_RL_REG};
106 
107 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
108 	"App    Loopback test",
109 	"Serdes serial Loopback test",
110 	"Serdes parallel Loopback test",
111 	"Phy    Loopback test"
112 };
113 
114 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
115 	{"mac_tx_mac_pause_num",
116 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
117 	{"mac_rx_mac_pause_num",
118 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
119 	{"mac_tx_pfc_pri0_pkt_num",
120 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
121 	{"mac_tx_pfc_pri1_pkt_num",
122 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
123 	{"mac_tx_pfc_pri2_pkt_num",
124 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
125 	{"mac_tx_pfc_pri3_pkt_num",
126 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
127 	{"mac_tx_pfc_pri4_pkt_num",
128 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
129 	{"mac_tx_pfc_pri5_pkt_num",
130 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
131 	{"mac_tx_pfc_pri6_pkt_num",
132 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
133 	{"mac_tx_pfc_pri7_pkt_num",
134 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
135 	{"mac_rx_pfc_pri0_pkt_num",
136 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
137 	{"mac_rx_pfc_pri1_pkt_num",
138 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
139 	{"mac_rx_pfc_pri2_pkt_num",
140 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
141 	{"mac_rx_pfc_pri3_pkt_num",
142 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
143 	{"mac_rx_pfc_pri4_pkt_num",
144 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
145 	{"mac_rx_pfc_pri5_pkt_num",
146 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
147 	{"mac_rx_pfc_pri6_pkt_num",
148 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
149 	{"mac_rx_pfc_pri7_pkt_num",
150 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
151 	{"mac_tx_total_pkt_num",
152 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
153 	{"mac_tx_total_oct_num",
154 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
155 	{"mac_tx_good_pkt_num",
156 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
157 	{"mac_tx_bad_pkt_num",
158 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
159 	{"mac_tx_good_oct_num",
160 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
161 	{"mac_tx_bad_oct_num",
162 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
163 	{"mac_tx_uni_pkt_num",
164 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
165 	{"mac_tx_multi_pkt_num",
166 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
167 	{"mac_tx_broad_pkt_num",
168 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
169 	{"mac_tx_undersize_pkt_num",
170 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
171 	{"mac_tx_oversize_pkt_num",
172 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
173 	{"mac_tx_64_oct_pkt_num",
174 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
175 	{"mac_tx_65_127_oct_pkt_num",
176 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
177 	{"mac_tx_128_255_oct_pkt_num",
178 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
179 	{"mac_tx_256_511_oct_pkt_num",
180 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
181 	{"mac_tx_512_1023_oct_pkt_num",
182 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
183 	{"mac_tx_1024_1518_oct_pkt_num",
184 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
185 	{"mac_tx_1519_2047_oct_pkt_num",
186 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
187 	{"mac_tx_2048_4095_oct_pkt_num",
188 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
189 	{"mac_tx_4096_8191_oct_pkt_num",
190 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
191 	{"mac_tx_8192_9216_oct_pkt_num",
192 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
193 	{"mac_tx_9217_12287_oct_pkt_num",
194 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
195 	{"mac_tx_12288_16383_oct_pkt_num",
196 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
197 	{"mac_tx_1519_max_good_pkt_num",
198 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
199 	{"mac_tx_1519_max_bad_pkt_num",
200 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
201 	{"mac_rx_total_pkt_num",
202 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
203 	{"mac_rx_total_oct_num",
204 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
205 	{"mac_rx_good_pkt_num",
206 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
207 	{"mac_rx_bad_pkt_num",
208 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
209 	{"mac_rx_good_oct_num",
210 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
211 	{"mac_rx_bad_oct_num",
212 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
213 	{"mac_rx_uni_pkt_num",
214 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
215 	{"mac_rx_multi_pkt_num",
216 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
217 	{"mac_rx_broad_pkt_num",
218 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
219 	{"mac_rx_undersize_pkt_num",
220 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
221 	{"mac_rx_oversize_pkt_num",
222 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
223 	{"mac_rx_64_oct_pkt_num",
224 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
225 	{"mac_rx_65_127_oct_pkt_num",
226 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
227 	{"mac_rx_128_255_oct_pkt_num",
228 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
229 	{"mac_rx_256_511_oct_pkt_num",
230 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
231 	{"mac_rx_512_1023_oct_pkt_num",
232 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
233 	{"mac_rx_1024_1518_oct_pkt_num",
234 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
235 	{"mac_rx_1519_2047_oct_pkt_num",
236 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
237 	{"mac_rx_2048_4095_oct_pkt_num",
238 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
239 	{"mac_rx_4096_8191_oct_pkt_num",
240 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
241 	{"mac_rx_8192_9216_oct_pkt_num",
242 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
243 	{"mac_rx_9217_12287_oct_pkt_num",
244 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
245 	{"mac_rx_12288_16383_oct_pkt_num",
246 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
247 	{"mac_rx_1519_max_good_pkt_num",
248 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
249 	{"mac_rx_1519_max_bad_pkt_num",
250 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
251 
252 	{"mac_tx_fragment_pkt_num",
253 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
254 	{"mac_tx_undermin_pkt_num",
255 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
256 	{"mac_tx_jabber_pkt_num",
257 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
258 	{"mac_tx_err_all_pkt_num",
259 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
260 	{"mac_tx_from_app_good_pkt_num",
261 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
262 	{"mac_tx_from_app_bad_pkt_num",
263 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
264 	{"mac_rx_fragment_pkt_num",
265 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
266 	{"mac_rx_undermin_pkt_num",
267 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
268 	{"mac_rx_jabber_pkt_num",
269 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
270 	{"mac_rx_fcs_err_pkt_num",
271 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
272 	{"mac_rx_send_app_good_pkt_num",
273 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
274 	{"mac_rx_send_app_bad_pkt_num",
275 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
276 };
277 
278 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
279 	{
280 		.flags = HCLGE_MAC_MGR_MASK_VLAN_B,
281 		.ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP),
282 		.mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
283 		.mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
284 		.i_port_bitmap = 0x1,
285 	},
286 };
287 
288 static int hclge_mac_update_stats(struct hclge_dev *hdev)
289 {
290 #define HCLGE_MAC_CMD_NUM 21
291 #define HCLGE_RTN_DATA_NUM 4
292 
293 	u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
294 	struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
295 	__le64 *desc_data;
296 	int i, k, n;
297 	int ret;
298 
299 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
300 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
301 	if (ret) {
302 		dev_err(&hdev->pdev->dev,
303 			"Get MAC pkt stats fail, status = %d.\n", ret);
304 
305 		return ret;
306 	}
307 
308 	for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
309 		if (unlikely(i == 0)) {
310 			desc_data = (__le64 *)(&desc[i].data[0]);
311 			n = HCLGE_RTN_DATA_NUM - 2;
312 		} else {
313 			desc_data = (__le64 *)(&desc[i]);
314 			n = HCLGE_RTN_DATA_NUM;
315 		}
316 		for (k = 0; k < n; k++) {
317 			*data++ += le64_to_cpu(*desc_data);
318 			desc_data++;
319 		}
320 	}
321 
322 	return 0;
323 }
324 
325 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
326 {
327 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
328 	struct hclge_vport *vport = hclge_get_vport(handle);
329 	struct hclge_dev *hdev = vport->back;
330 	struct hnae3_queue *queue;
331 	struct hclge_desc desc[1];
332 	struct hclge_tqp *tqp;
333 	int ret, i;
334 
335 	for (i = 0; i < kinfo->num_tqps; i++) {
336 		queue = handle->kinfo.tqp[i];
337 		tqp = container_of(queue, struct hclge_tqp, q);
338 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
339 		hclge_cmd_setup_basic_desc(&desc[0],
340 					   HCLGE_OPC_QUERY_RX_STATUS,
341 					   true);
342 
343 		desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
344 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
345 		if (ret) {
346 			dev_err(&hdev->pdev->dev,
347 				"Query tqp stat fail, status = %d,queue = %d\n",
348 				ret,	i);
349 			return ret;
350 		}
351 		tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
352 			le32_to_cpu(desc[0].data[1]);
353 	}
354 
355 	for (i = 0; i < kinfo->num_tqps; i++) {
356 		queue = handle->kinfo.tqp[i];
357 		tqp = container_of(queue, struct hclge_tqp, q);
358 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
359 		hclge_cmd_setup_basic_desc(&desc[0],
360 					   HCLGE_OPC_QUERY_TX_STATUS,
361 					   true);
362 
363 		desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
364 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
365 		if (ret) {
366 			dev_err(&hdev->pdev->dev,
367 				"Query tqp stat fail, status = %d,queue = %d\n",
368 				ret, i);
369 			return ret;
370 		}
371 		tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
372 			le32_to_cpu(desc[0].data[1]);
373 	}
374 
375 	return 0;
376 }
377 
378 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
379 {
380 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
381 	struct hclge_tqp *tqp;
382 	u64 *buff = data;
383 	int i;
384 
385 	for (i = 0; i < kinfo->num_tqps; i++) {
386 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
387 		*buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
388 	}
389 
390 	for (i = 0; i < kinfo->num_tqps; i++) {
391 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
392 		*buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
393 	}
394 
395 	return buff;
396 }
397 
398 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
399 {
400 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
401 
402 	return kinfo->num_tqps * (2);
403 }
404 
405 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
406 {
407 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
408 	u8 *buff = data;
409 	int i = 0;
410 
411 	for (i = 0; i < kinfo->num_tqps; i++) {
412 		struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
413 			struct hclge_tqp, q);
414 		snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
415 			 tqp->index);
416 		buff = buff + ETH_GSTRING_LEN;
417 	}
418 
419 	for (i = 0; i < kinfo->num_tqps; i++) {
420 		struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
421 			struct hclge_tqp, q);
422 		snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
423 			 tqp->index);
424 		buff = buff + ETH_GSTRING_LEN;
425 	}
426 
427 	return buff;
428 }
429 
430 static u64 *hclge_comm_get_stats(void *comm_stats,
431 				 const struct hclge_comm_stats_str strs[],
432 				 int size, u64 *data)
433 {
434 	u64 *buf = data;
435 	u32 i;
436 
437 	for (i = 0; i < size; i++)
438 		buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
439 
440 	return buf + size;
441 }
442 
443 static u8 *hclge_comm_get_strings(u32 stringset,
444 				  const struct hclge_comm_stats_str strs[],
445 				  int size, u8 *data)
446 {
447 	char *buff = (char *)data;
448 	u32 i;
449 
450 	if (stringset != ETH_SS_STATS)
451 		return buff;
452 
453 	for (i = 0; i < size; i++) {
454 		snprintf(buff, ETH_GSTRING_LEN,
455 			 strs[i].desc);
456 		buff = buff + ETH_GSTRING_LEN;
457 	}
458 
459 	return (u8 *)buff;
460 }
461 
462 static void hclge_update_netstat(struct hclge_hw_stats *hw_stats,
463 				 struct net_device_stats *net_stats)
464 {
465 	net_stats->tx_dropped = 0;
466 	net_stats->rx_errors = hw_stats->mac_stats.mac_rx_oversize_pkt_num;
467 	net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num;
468 	net_stats->rx_errors += hw_stats->mac_stats.mac_rx_fcs_err_pkt_num;
469 
470 	net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num;
471 	net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num;
472 
473 	net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rx_fcs_err_pkt_num;
474 	net_stats->rx_length_errors =
475 		hw_stats->mac_stats.mac_rx_undersize_pkt_num;
476 	net_stats->rx_length_errors +=
477 		hw_stats->mac_stats.mac_rx_oversize_pkt_num;
478 	net_stats->rx_over_errors =
479 		hw_stats->mac_stats.mac_rx_oversize_pkt_num;
480 }
481 
482 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
483 {
484 	struct hnae3_handle *handle;
485 	int status;
486 
487 	handle = &hdev->vport[0].nic;
488 	if (handle->client) {
489 		status = hclge_tqps_update_stats(handle);
490 		if (status) {
491 			dev_err(&hdev->pdev->dev,
492 				"Update TQPS stats fail, status = %d.\n",
493 				status);
494 		}
495 	}
496 
497 	status = hclge_mac_update_stats(hdev);
498 	if (status)
499 		dev_err(&hdev->pdev->dev,
500 			"Update MAC stats fail, status = %d.\n", status);
501 
502 	hclge_update_netstat(&hdev->hw_stats, &handle->kinfo.netdev->stats);
503 }
504 
505 static void hclge_update_stats(struct hnae3_handle *handle,
506 			       struct net_device_stats *net_stats)
507 {
508 	struct hclge_vport *vport = hclge_get_vport(handle);
509 	struct hclge_dev *hdev = vport->back;
510 	struct hclge_hw_stats *hw_stats = &hdev->hw_stats;
511 	int status;
512 
513 	if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
514 		return;
515 
516 	status = hclge_mac_update_stats(hdev);
517 	if (status)
518 		dev_err(&hdev->pdev->dev,
519 			"Update MAC stats fail, status = %d.\n",
520 			status);
521 
522 	status = hclge_tqps_update_stats(handle);
523 	if (status)
524 		dev_err(&hdev->pdev->dev,
525 			"Update TQPS stats fail, status = %d.\n",
526 			status);
527 
528 	hclge_update_netstat(hw_stats, net_stats);
529 
530 	clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
531 }
532 
533 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
534 {
535 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
536 		HNAE3_SUPPORT_PHY_LOOPBACK |\
537 		HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
538 		HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
539 
540 	struct hclge_vport *vport = hclge_get_vport(handle);
541 	struct hclge_dev *hdev = vport->back;
542 	int count = 0;
543 
544 	/* Loopback test support rules:
545 	 * mac: only GE mode support
546 	 * serdes: all mac mode will support include GE/XGE/LGE/CGE
547 	 * phy: only support when phy device exist on board
548 	 */
549 	if (stringset == ETH_SS_TEST) {
550 		/* clear loopback bit flags at first */
551 		handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
552 		if (hdev->pdev->revision >= 0x21 ||
553 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
554 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
555 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
556 			count += 1;
557 			handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
558 		}
559 
560 		count += 2;
561 		handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
562 		handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
563 	} else if (stringset == ETH_SS_STATS) {
564 		count = ARRAY_SIZE(g_mac_stats_string) +
565 			hclge_tqps_get_sset_count(handle, stringset);
566 	}
567 
568 	return count;
569 }
570 
571 static void hclge_get_strings(struct hnae3_handle *handle,
572 			      u32 stringset,
573 			      u8 *data)
574 {
575 	u8 *p = (char *)data;
576 	int size;
577 
578 	if (stringset == ETH_SS_STATS) {
579 		size = ARRAY_SIZE(g_mac_stats_string);
580 		p = hclge_comm_get_strings(stringset,
581 					   g_mac_stats_string,
582 					   size,
583 					   p);
584 		p = hclge_tqps_get_strings(handle, p);
585 	} else if (stringset == ETH_SS_TEST) {
586 		if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
587 			memcpy(p,
588 			       hns3_nic_test_strs[HNAE3_LOOP_APP],
589 			       ETH_GSTRING_LEN);
590 			p += ETH_GSTRING_LEN;
591 		}
592 		if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
593 			memcpy(p,
594 			       hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
595 			       ETH_GSTRING_LEN);
596 			p += ETH_GSTRING_LEN;
597 		}
598 		if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
599 			memcpy(p,
600 			       hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
601 			       ETH_GSTRING_LEN);
602 			p += ETH_GSTRING_LEN;
603 		}
604 		if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
605 			memcpy(p,
606 			       hns3_nic_test_strs[HNAE3_LOOP_PHY],
607 			       ETH_GSTRING_LEN);
608 			p += ETH_GSTRING_LEN;
609 		}
610 	}
611 }
612 
613 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
614 {
615 	struct hclge_vport *vport = hclge_get_vport(handle);
616 	struct hclge_dev *hdev = vport->back;
617 	u64 *p;
618 
619 	p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
620 				 g_mac_stats_string,
621 				 ARRAY_SIZE(g_mac_stats_string),
622 				 data);
623 	p = hclge_tqps_get_stats(handle, p);
624 }
625 
626 static int hclge_parse_func_status(struct hclge_dev *hdev,
627 				   struct hclge_func_status_cmd *status)
628 {
629 	if (!(status->pf_state & HCLGE_PF_STATE_DONE))
630 		return -EINVAL;
631 
632 	/* Set the pf to main pf */
633 	if (status->pf_state & HCLGE_PF_STATE_MAIN)
634 		hdev->flag |= HCLGE_FLAG_MAIN;
635 	else
636 		hdev->flag &= ~HCLGE_FLAG_MAIN;
637 
638 	return 0;
639 }
640 
641 static int hclge_query_function_status(struct hclge_dev *hdev)
642 {
643 	struct hclge_func_status_cmd *req;
644 	struct hclge_desc desc;
645 	int timeout = 0;
646 	int ret;
647 
648 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
649 	req = (struct hclge_func_status_cmd *)desc.data;
650 
651 	do {
652 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
653 		if (ret) {
654 			dev_err(&hdev->pdev->dev,
655 				"query function status failed %d.\n",
656 				ret);
657 
658 			return ret;
659 		}
660 
661 		/* Check pf reset is done */
662 		if (req->pf_state)
663 			break;
664 		usleep_range(1000, 2000);
665 	} while (timeout++ < 5);
666 
667 	ret = hclge_parse_func_status(hdev, req);
668 
669 	return ret;
670 }
671 
672 static int hclge_query_pf_resource(struct hclge_dev *hdev)
673 {
674 	struct hclge_pf_res_cmd *req;
675 	struct hclge_desc desc;
676 	int ret;
677 
678 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
679 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
680 	if (ret) {
681 		dev_err(&hdev->pdev->dev,
682 			"query pf resource failed %d.\n", ret);
683 		return ret;
684 	}
685 
686 	req = (struct hclge_pf_res_cmd *)desc.data;
687 	hdev->num_tqps = __le16_to_cpu(req->tqp_num);
688 	hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
689 
690 	if (hnae3_dev_roce_supported(hdev)) {
691 		hdev->roce_base_msix_offset =
692 		hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
693 				HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
694 		hdev->num_roce_msi =
695 		hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
696 				HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
697 
698 		/* PF should have NIC vectors and Roce vectors,
699 		 * NIC vectors are queued before Roce vectors.
700 		 */
701 		hdev->num_msi = hdev->num_roce_msi  +
702 				hdev->roce_base_msix_offset;
703 	} else {
704 		hdev->num_msi =
705 		hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
706 				HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
707 	}
708 
709 	return 0;
710 }
711 
712 static int hclge_parse_speed(int speed_cmd, int *speed)
713 {
714 	switch (speed_cmd) {
715 	case 6:
716 		*speed = HCLGE_MAC_SPEED_10M;
717 		break;
718 	case 7:
719 		*speed = HCLGE_MAC_SPEED_100M;
720 		break;
721 	case 0:
722 		*speed = HCLGE_MAC_SPEED_1G;
723 		break;
724 	case 1:
725 		*speed = HCLGE_MAC_SPEED_10G;
726 		break;
727 	case 2:
728 		*speed = HCLGE_MAC_SPEED_25G;
729 		break;
730 	case 3:
731 		*speed = HCLGE_MAC_SPEED_40G;
732 		break;
733 	case 4:
734 		*speed = HCLGE_MAC_SPEED_50G;
735 		break;
736 	case 5:
737 		*speed = HCLGE_MAC_SPEED_100G;
738 		break;
739 	default:
740 		return -EINVAL;
741 	}
742 
743 	return 0;
744 }
745 
746 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
747 					u8 speed_ability)
748 {
749 	unsigned long *supported = hdev->hw.mac.supported;
750 
751 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
752 		set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
753 			supported);
754 
755 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
756 		set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
757 			supported);
758 
759 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
760 		set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
761 			supported);
762 
763 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
764 		set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
765 			supported);
766 
767 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
768 		set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
769 			supported);
770 
771 	set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported);
772 	set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
773 }
774 
775 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
776 {
777 	u8 media_type = hdev->hw.mac.media_type;
778 
779 	if (media_type != HNAE3_MEDIA_TYPE_FIBER)
780 		return;
781 
782 	hclge_parse_fiber_link_mode(hdev, speed_ability);
783 }
784 
785 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
786 {
787 	struct hclge_cfg_param_cmd *req;
788 	u64 mac_addr_tmp_high;
789 	u64 mac_addr_tmp;
790 	int i;
791 
792 	req = (struct hclge_cfg_param_cmd *)desc[0].data;
793 
794 	/* get the configuration */
795 	cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
796 					      HCLGE_CFG_VMDQ_M,
797 					      HCLGE_CFG_VMDQ_S);
798 	cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
799 				      HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
800 	cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
801 					    HCLGE_CFG_TQP_DESC_N_M,
802 					    HCLGE_CFG_TQP_DESC_N_S);
803 
804 	cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
805 					HCLGE_CFG_PHY_ADDR_M,
806 					HCLGE_CFG_PHY_ADDR_S);
807 	cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
808 					  HCLGE_CFG_MEDIA_TP_M,
809 					  HCLGE_CFG_MEDIA_TP_S);
810 	cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
811 					  HCLGE_CFG_RX_BUF_LEN_M,
812 					  HCLGE_CFG_RX_BUF_LEN_S);
813 	/* get mac_address */
814 	mac_addr_tmp = __le32_to_cpu(req->param[2]);
815 	mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
816 					    HCLGE_CFG_MAC_ADDR_H_M,
817 					    HCLGE_CFG_MAC_ADDR_H_S);
818 
819 	mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
820 
821 	cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
822 					     HCLGE_CFG_DEFAULT_SPEED_M,
823 					     HCLGE_CFG_DEFAULT_SPEED_S);
824 	cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
825 					    HCLGE_CFG_RSS_SIZE_M,
826 					    HCLGE_CFG_RSS_SIZE_S);
827 
828 	for (i = 0; i < ETH_ALEN; i++)
829 		cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
830 
831 	req = (struct hclge_cfg_param_cmd *)desc[1].data;
832 	cfg->numa_node_map = __le32_to_cpu(req->param[0]);
833 
834 	cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
835 					     HCLGE_CFG_SPEED_ABILITY_M,
836 					     HCLGE_CFG_SPEED_ABILITY_S);
837 	cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
838 					 HCLGE_CFG_UMV_TBL_SPACE_M,
839 					 HCLGE_CFG_UMV_TBL_SPACE_S);
840 	if (!cfg->umv_space)
841 		cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
842 }
843 
844 /* hclge_get_cfg: query the static parameter from flash
845  * @hdev: pointer to struct hclge_dev
846  * @hcfg: the config structure to be getted
847  */
848 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
849 {
850 	struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
851 	struct hclge_cfg_param_cmd *req;
852 	int i, ret;
853 
854 	for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
855 		u32 offset = 0;
856 
857 		req = (struct hclge_cfg_param_cmd *)desc[i].data;
858 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
859 					   true);
860 		hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
861 				HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
862 		/* Len should be united by 4 bytes when send to hardware */
863 		hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
864 				HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
865 		req->offset = cpu_to_le32(offset);
866 	}
867 
868 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
869 	if (ret) {
870 		dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
871 		return ret;
872 	}
873 
874 	hclge_parse_cfg(hcfg, desc);
875 
876 	return 0;
877 }
878 
879 static int hclge_get_cap(struct hclge_dev *hdev)
880 {
881 	int ret;
882 
883 	ret = hclge_query_function_status(hdev);
884 	if (ret) {
885 		dev_err(&hdev->pdev->dev,
886 			"query function status error %d.\n", ret);
887 		return ret;
888 	}
889 
890 	/* get pf resource */
891 	ret = hclge_query_pf_resource(hdev);
892 	if (ret)
893 		dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
894 
895 	return ret;
896 }
897 
898 static int hclge_configure(struct hclge_dev *hdev)
899 {
900 	struct hclge_cfg cfg;
901 	int ret, i;
902 
903 	ret = hclge_get_cfg(hdev, &cfg);
904 	if (ret) {
905 		dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
906 		return ret;
907 	}
908 
909 	hdev->num_vmdq_vport = cfg.vmdq_vport_num;
910 	hdev->base_tqp_pid = 0;
911 	hdev->rss_size_max = cfg.rss_size_max;
912 	hdev->rx_buf_len = cfg.rx_buf_len;
913 	ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
914 	hdev->hw.mac.media_type = cfg.media_type;
915 	hdev->hw.mac.phy_addr = cfg.phy_addr;
916 	hdev->num_desc = cfg.tqp_desc_num;
917 	hdev->tm_info.num_pg = 1;
918 	hdev->tc_max = cfg.tc_num;
919 	hdev->tm_info.hw_pfc_map = 0;
920 	hdev->wanted_umv_size = cfg.umv_space;
921 
922 	ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
923 	if (ret) {
924 		dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
925 		return ret;
926 	}
927 
928 	hclge_parse_link_mode(hdev, cfg.speed_ability);
929 
930 	if ((hdev->tc_max > HNAE3_MAX_TC) ||
931 	    (hdev->tc_max < 1)) {
932 		dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
933 			 hdev->tc_max);
934 		hdev->tc_max = 1;
935 	}
936 
937 	/* Dev does not support DCB */
938 	if (!hnae3_dev_dcb_supported(hdev)) {
939 		hdev->tc_max = 1;
940 		hdev->pfc_max = 0;
941 	} else {
942 		hdev->pfc_max = hdev->tc_max;
943 	}
944 
945 	hdev->tm_info.num_tc = hdev->tc_max;
946 
947 	/* Currently not support uncontiuous tc */
948 	for (i = 0; i < hdev->tm_info.num_tc; i++)
949 		hnae3_set_bit(hdev->hw_tc_map, i, 1);
950 
951 	hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
952 
953 	return ret;
954 }
955 
956 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
957 			    int tso_mss_max)
958 {
959 	struct hclge_cfg_tso_status_cmd *req;
960 	struct hclge_desc desc;
961 	u16 tso_mss;
962 
963 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
964 
965 	req = (struct hclge_cfg_tso_status_cmd *)desc.data;
966 
967 	tso_mss = 0;
968 	hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
969 			HCLGE_TSO_MSS_MIN_S, tso_mss_min);
970 	req->tso_mss_min = cpu_to_le16(tso_mss);
971 
972 	tso_mss = 0;
973 	hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
974 			HCLGE_TSO_MSS_MIN_S, tso_mss_max);
975 	req->tso_mss_max = cpu_to_le16(tso_mss);
976 
977 	return hclge_cmd_send(&hdev->hw, &desc, 1);
978 }
979 
980 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
981 {
982 	struct hclge_cfg_gro_status_cmd *req;
983 	struct hclge_desc desc;
984 	int ret;
985 
986 	if (!hnae3_dev_gro_supported(hdev))
987 		return 0;
988 
989 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
990 	req = (struct hclge_cfg_gro_status_cmd *)desc.data;
991 
992 	req->gro_en = cpu_to_le16(en ? 1 : 0);
993 
994 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
995 	if (ret)
996 		dev_err(&hdev->pdev->dev,
997 			"GRO hardware config cmd failed, ret = %d\n", ret);
998 
999 	return ret;
1000 }
1001 
1002 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1003 {
1004 	struct hclge_tqp *tqp;
1005 	int i;
1006 
1007 	hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1008 				  sizeof(struct hclge_tqp), GFP_KERNEL);
1009 	if (!hdev->htqp)
1010 		return -ENOMEM;
1011 
1012 	tqp = hdev->htqp;
1013 
1014 	for (i = 0; i < hdev->num_tqps; i++) {
1015 		tqp->dev = &hdev->pdev->dev;
1016 		tqp->index = i;
1017 
1018 		tqp->q.ae_algo = &ae_algo;
1019 		tqp->q.buf_size = hdev->rx_buf_len;
1020 		tqp->q.desc_num = hdev->num_desc;
1021 		tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1022 			i * HCLGE_TQP_REG_SIZE;
1023 
1024 		tqp++;
1025 	}
1026 
1027 	return 0;
1028 }
1029 
1030 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1031 				  u16 tqp_pid, u16 tqp_vid, bool is_pf)
1032 {
1033 	struct hclge_tqp_map_cmd *req;
1034 	struct hclge_desc desc;
1035 	int ret;
1036 
1037 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1038 
1039 	req = (struct hclge_tqp_map_cmd *)desc.data;
1040 	req->tqp_id = cpu_to_le16(tqp_pid);
1041 	req->tqp_vf = func_id;
1042 	req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1043 			1 << HCLGE_TQP_MAP_EN_B;
1044 	req->tqp_vid = cpu_to_le16(tqp_vid);
1045 
1046 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1047 	if (ret)
1048 		dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1049 
1050 	return ret;
1051 }
1052 
1053 static int  hclge_assign_tqp(struct hclge_vport *vport)
1054 {
1055 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1056 	struct hclge_dev *hdev = vport->back;
1057 	int i, alloced;
1058 
1059 	for (i = 0, alloced = 0; i < hdev->num_tqps &&
1060 	     alloced < kinfo->num_tqps; i++) {
1061 		if (!hdev->htqp[i].alloced) {
1062 			hdev->htqp[i].q.handle = &vport->nic;
1063 			hdev->htqp[i].q.tqp_index = alloced;
1064 			hdev->htqp[i].q.desc_num = kinfo->num_desc;
1065 			kinfo->tqp[alloced] = &hdev->htqp[i].q;
1066 			hdev->htqp[i].alloced = true;
1067 			alloced++;
1068 		}
1069 	}
1070 	vport->alloc_tqps = kinfo->num_tqps;
1071 
1072 	return 0;
1073 }
1074 
1075 static int hclge_knic_setup(struct hclge_vport *vport,
1076 			    u16 num_tqps, u16 num_desc)
1077 {
1078 	struct hnae3_handle *nic = &vport->nic;
1079 	struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1080 	struct hclge_dev *hdev = vport->back;
1081 	int i, ret;
1082 
1083 	kinfo->num_desc = num_desc;
1084 	kinfo->rx_buf_len = hdev->rx_buf_len;
1085 	kinfo->num_tc = min_t(u16, num_tqps, hdev->tm_info.num_tc);
1086 	kinfo->rss_size
1087 		= min_t(u16, hdev->rss_size_max, num_tqps / kinfo->num_tc);
1088 	kinfo->num_tqps = kinfo->rss_size * kinfo->num_tc;
1089 
1090 	for (i = 0; i < HNAE3_MAX_TC; i++) {
1091 		if (hdev->hw_tc_map & BIT(i)) {
1092 			kinfo->tc_info[i].enable = true;
1093 			kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
1094 			kinfo->tc_info[i].tqp_count = kinfo->rss_size;
1095 			kinfo->tc_info[i].tc = i;
1096 		} else {
1097 			/* Set to default queue if TC is disable */
1098 			kinfo->tc_info[i].enable = false;
1099 			kinfo->tc_info[i].tqp_offset = 0;
1100 			kinfo->tc_info[i].tqp_count = 1;
1101 			kinfo->tc_info[i].tc = 0;
1102 		}
1103 	}
1104 
1105 	kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
1106 				  sizeof(struct hnae3_queue *), GFP_KERNEL);
1107 	if (!kinfo->tqp)
1108 		return -ENOMEM;
1109 
1110 	ret = hclge_assign_tqp(vport);
1111 	if (ret)
1112 		dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1113 
1114 	return ret;
1115 }
1116 
1117 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1118 				  struct hclge_vport *vport)
1119 {
1120 	struct hnae3_handle *nic = &vport->nic;
1121 	struct hnae3_knic_private_info *kinfo;
1122 	u16 i;
1123 
1124 	kinfo = &nic->kinfo;
1125 	for (i = 0; i < kinfo->num_tqps; i++) {
1126 		struct hclge_tqp *q =
1127 			container_of(kinfo->tqp[i], struct hclge_tqp, q);
1128 		bool is_pf;
1129 		int ret;
1130 
1131 		is_pf = !(vport->vport_id);
1132 		ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1133 					     i, is_pf);
1134 		if (ret)
1135 			return ret;
1136 	}
1137 
1138 	return 0;
1139 }
1140 
1141 static int hclge_map_tqp(struct hclge_dev *hdev)
1142 {
1143 	struct hclge_vport *vport = hdev->vport;
1144 	u16 i, num_vport;
1145 
1146 	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1147 	for (i = 0; i < num_vport; i++)	{
1148 		int ret;
1149 
1150 		ret = hclge_map_tqp_to_vport(hdev, vport);
1151 		if (ret)
1152 			return ret;
1153 
1154 		vport++;
1155 	}
1156 
1157 	return 0;
1158 }
1159 
1160 static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
1161 {
1162 	/* this would be initialized later */
1163 }
1164 
1165 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1166 {
1167 	struct hnae3_handle *nic = &vport->nic;
1168 	struct hclge_dev *hdev = vport->back;
1169 	int ret;
1170 
1171 	nic->pdev = hdev->pdev;
1172 	nic->ae_algo = &ae_algo;
1173 	nic->numa_node_mask = hdev->numa_node_mask;
1174 
1175 	if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
1176 		ret = hclge_knic_setup(vport, num_tqps, hdev->num_desc);
1177 		if (ret) {
1178 			dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
1179 				ret);
1180 			return ret;
1181 		}
1182 	} else {
1183 		hclge_unic_setup(vport, num_tqps);
1184 	}
1185 
1186 	return 0;
1187 }
1188 
1189 static int hclge_alloc_vport(struct hclge_dev *hdev)
1190 {
1191 	struct pci_dev *pdev = hdev->pdev;
1192 	struct hclge_vport *vport;
1193 	u32 tqp_main_vport;
1194 	u32 tqp_per_vport;
1195 	int num_vport, i;
1196 	int ret;
1197 
1198 	/* We need to alloc a vport for main NIC of PF */
1199 	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1200 
1201 	if (hdev->num_tqps < num_vport) {
1202 		dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1203 			hdev->num_tqps, num_vport);
1204 		return -EINVAL;
1205 	}
1206 
1207 	/* Alloc the same number of TQPs for every vport */
1208 	tqp_per_vport = hdev->num_tqps / num_vport;
1209 	tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1210 
1211 	vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1212 			     GFP_KERNEL);
1213 	if (!vport)
1214 		return -ENOMEM;
1215 
1216 	hdev->vport = vport;
1217 	hdev->num_alloc_vport = num_vport;
1218 
1219 	if (IS_ENABLED(CONFIG_PCI_IOV))
1220 		hdev->num_alloc_vfs = hdev->num_req_vfs;
1221 
1222 	for (i = 0; i < num_vport; i++) {
1223 		vport->back = hdev;
1224 		vport->vport_id = i;
1225 		vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1226 
1227 		if (i == 0)
1228 			ret = hclge_vport_setup(vport, tqp_main_vport);
1229 		else
1230 			ret = hclge_vport_setup(vport, tqp_per_vport);
1231 		if (ret) {
1232 			dev_err(&pdev->dev,
1233 				"vport setup failed for vport %d, %d\n",
1234 				i, ret);
1235 			return ret;
1236 		}
1237 
1238 		vport++;
1239 	}
1240 
1241 	return 0;
1242 }
1243 
1244 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1245 				    struct hclge_pkt_buf_alloc *buf_alloc)
1246 {
1247 /* TX buffer size is unit by 128 byte */
1248 #define HCLGE_BUF_SIZE_UNIT_SHIFT	7
1249 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK	BIT(15)
1250 	struct hclge_tx_buff_alloc_cmd *req;
1251 	struct hclge_desc desc;
1252 	int ret;
1253 	u8 i;
1254 
1255 	req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1256 
1257 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1258 	for (i = 0; i < HCLGE_TC_NUM; i++) {
1259 		u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1260 
1261 		req->tx_pkt_buff[i] =
1262 			cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1263 				     HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1264 	}
1265 
1266 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1267 	if (ret)
1268 		dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1269 			ret);
1270 
1271 	return ret;
1272 }
1273 
1274 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1275 				 struct hclge_pkt_buf_alloc *buf_alloc)
1276 {
1277 	int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1278 
1279 	if (ret)
1280 		dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1281 
1282 	return ret;
1283 }
1284 
1285 static int hclge_get_tc_num(struct hclge_dev *hdev)
1286 {
1287 	int i, cnt = 0;
1288 
1289 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1290 		if (hdev->hw_tc_map & BIT(i))
1291 			cnt++;
1292 	return cnt;
1293 }
1294 
1295 static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev)
1296 {
1297 	int i, cnt = 0;
1298 
1299 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1300 		if (hdev->hw_tc_map & BIT(i) &&
1301 		    hdev->tm_info.hw_pfc_map & BIT(i))
1302 			cnt++;
1303 	return cnt;
1304 }
1305 
1306 /* Get the number of pfc enabled TCs, which have private buffer */
1307 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1308 				  struct hclge_pkt_buf_alloc *buf_alloc)
1309 {
1310 	struct hclge_priv_buf *priv;
1311 	int i, cnt = 0;
1312 
1313 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1314 		priv = &buf_alloc->priv_buf[i];
1315 		if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1316 		    priv->enable)
1317 			cnt++;
1318 	}
1319 
1320 	return cnt;
1321 }
1322 
1323 /* Get the number of pfc disabled TCs, which have private buffer */
1324 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1325 				     struct hclge_pkt_buf_alloc *buf_alloc)
1326 {
1327 	struct hclge_priv_buf *priv;
1328 	int i, cnt = 0;
1329 
1330 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1331 		priv = &buf_alloc->priv_buf[i];
1332 		if (hdev->hw_tc_map & BIT(i) &&
1333 		    !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1334 		    priv->enable)
1335 			cnt++;
1336 	}
1337 
1338 	return cnt;
1339 }
1340 
1341 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1342 {
1343 	struct hclge_priv_buf *priv;
1344 	u32 rx_priv = 0;
1345 	int i;
1346 
1347 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1348 		priv = &buf_alloc->priv_buf[i];
1349 		if (priv->enable)
1350 			rx_priv += priv->buf_size;
1351 	}
1352 	return rx_priv;
1353 }
1354 
1355 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1356 {
1357 	u32 i, total_tx_size = 0;
1358 
1359 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1360 		total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1361 
1362 	return total_tx_size;
1363 }
1364 
1365 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1366 				struct hclge_pkt_buf_alloc *buf_alloc,
1367 				u32 rx_all)
1368 {
1369 	u32 shared_buf_min, shared_buf_tc, shared_std;
1370 	int tc_num, pfc_enable_num;
1371 	u32 shared_buf;
1372 	u32 rx_priv;
1373 	int i;
1374 
1375 	tc_num = hclge_get_tc_num(hdev);
1376 	pfc_enable_num = hclge_get_pfc_enalbe_num(hdev);
1377 
1378 	if (hnae3_dev_dcb_supported(hdev))
1379 		shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV;
1380 	else
1381 		shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_NON_DCB_DV;
1382 
1383 	shared_buf_tc = pfc_enable_num * hdev->mps +
1384 			(tc_num - pfc_enable_num) * hdev->mps / 2 +
1385 			hdev->mps;
1386 	shared_std = max_t(u32, shared_buf_min, shared_buf_tc);
1387 
1388 	rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1389 	if (rx_all <= rx_priv + shared_std)
1390 		return false;
1391 
1392 	shared_buf = rx_all - rx_priv;
1393 	buf_alloc->s_buf.buf_size = shared_buf;
1394 	buf_alloc->s_buf.self.high = shared_buf;
1395 	buf_alloc->s_buf.self.low =  2 * hdev->mps;
1396 
1397 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1398 		if ((hdev->hw_tc_map & BIT(i)) &&
1399 		    (hdev->tm_info.hw_pfc_map & BIT(i))) {
1400 			buf_alloc->s_buf.tc_thrd[i].low = hdev->mps;
1401 			buf_alloc->s_buf.tc_thrd[i].high = 2 * hdev->mps;
1402 		} else {
1403 			buf_alloc->s_buf.tc_thrd[i].low = 0;
1404 			buf_alloc->s_buf.tc_thrd[i].high = hdev->mps;
1405 		}
1406 	}
1407 
1408 	return true;
1409 }
1410 
1411 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1412 				struct hclge_pkt_buf_alloc *buf_alloc)
1413 {
1414 	u32 i, total_size;
1415 
1416 	total_size = hdev->pkt_buf_size;
1417 
1418 	/* alloc tx buffer for all enabled tc */
1419 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1420 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1421 
1422 		if (total_size < HCLGE_DEFAULT_TX_BUF)
1423 			return -ENOMEM;
1424 
1425 		if (hdev->hw_tc_map & BIT(i))
1426 			priv->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
1427 		else
1428 			priv->tx_buf_size = 0;
1429 
1430 		total_size -= priv->tx_buf_size;
1431 	}
1432 
1433 	return 0;
1434 }
1435 
1436 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1437  * @hdev: pointer to struct hclge_dev
1438  * @buf_alloc: pointer to buffer calculation data
1439  * @return: 0: calculate sucessful, negative: fail
1440  */
1441 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1442 				struct hclge_pkt_buf_alloc *buf_alloc)
1443 {
1444 #define HCLGE_BUF_SIZE_UNIT	128
1445 	u32 rx_all = hdev->pkt_buf_size, aligned_mps;
1446 	int no_pfc_priv_num, pfc_priv_num;
1447 	struct hclge_priv_buf *priv;
1448 	int i;
1449 
1450 	aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1451 	rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1452 
1453 	/* When DCB is not supported, rx private
1454 	 * buffer is not allocated.
1455 	 */
1456 	if (!hnae3_dev_dcb_supported(hdev)) {
1457 		if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1458 			return -ENOMEM;
1459 
1460 		return 0;
1461 	}
1462 
1463 	/* step 1, try to alloc private buffer for all enabled tc */
1464 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1465 		priv = &buf_alloc->priv_buf[i];
1466 		if (hdev->hw_tc_map & BIT(i)) {
1467 			priv->enable = 1;
1468 			if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1469 				priv->wl.low = aligned_mps;
1470 				priv->wl.high = priv->wl.low + aligned_mps;
1471 				priv->buf_size = priv->wl.high +
1472 						HCLGE_DEFAULT_DV;
1473 			} else {
1474 				priv->wl.low = 0;
1475 				priv->wl.high = 2 * aligned_mps;
1476 				priv->buf_size = priv->wl.high;
1477 			}
1478 		} else {
1479 			priv->enable = 0;
1480 			priv->wl.low = 0;
1481 			priv->wl.high = 0;
1482 			priv->buf_size = 0;
1483 		}
1484 	}
1485 
1486 	if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1487 		return 0;
1488 
1489 	/* step 2, try to decrease the buffer size of
1490 	 * no pfc TC's private buffer
1491 	 */
1492 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1493 		priv = &buf_alloc->priv_buf[i];
1494 
1495 		priv->enable = 0;
1496 		priv->wl.low = 0;
1497 		priv->wl.high = 0;
1498 		priv->buf_size = 0;
1499 
1500 		if (!(hdev->hw_tc_map & BIT(i)))
1501 			continue;
1502 
1503 		priv->enable = 1;
1504 
1505 		if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1506 			priv->wl.low = 128;
1507 			priv->wl.high = priv->wl.low + aligned_mps;
1508 			priv->buf_size = priv->wl.high + HCLGE_DEFAULT_DV;
1509 		} else {
1510 			priv->wl.low = 0;
1511 			priv->wl.high = aligned_mps;
1512 			priv->buf_size = priv->wl.high;
1513 		}
1514 	}
1515 
1516 	if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1517 		return 0;
1518 
1519 	/* step 3, try to reduce the number of pfc disabled TCs,
1520 	 * which have private buffer
1521 	 */
1522 	/* get the total no pfc enable TC number, which have private buffer */
1523 	no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1524 
1525 	/* let the last to be cleared first */
1526 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1527 		priv = &buf_alloc->priv_buf[i];
1528 
1529 		if (hdev->hw_tc_map & BIT(i) &&
1530 		    !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1531 			/* Clear the no pfc TC private buffer */
1532 			priv->wl.low = 0;
1533 			priv->wl.high = 0;
1534 			priv->buf_size = 0;
1535 			priv->enable = 0;
1536 			no_pfc_priv_num--;
1537 		}
1538 
1539 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1540 		    no_pfc_priv_num == 0)
1541 			break;
1542 	}
1543 
1544 	if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1545 		return 0;
1546 
1547 	/* step 4, try to reduce the number of pfc enabled TCs
1548 	 * which have private buffer.
1549 	 */
1550 	pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1551 
1552 	/* let the last to be cleared first */
1553 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1554 		priv = &buf_alloc->priv_buf[i];
1555 
1556 		if (hdev->hw_tc_map & BIT(i) &&
1557 		    hdev->tm_info.hw_pfc_map & BIT(i)) {
1558 			/* Reduce the number of pfc TC with private buffer */
1559 			priv->wl.low = 0;
1560 			priv->enable = 0;
1561 			priv->wl.high = 0;
1562 			priv->buf_size = 0;
1563 			pfc_priv_num--;
1564 		}
1565 
1566 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1567 		    pfc_priv_num == 0)
1568 			break;
1569 	}
1570 	if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1571 		return 0;
1572 
1573 	return -ENOMEM;
1574 }
1575 
1576 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1577 				   struct hclge_pkt_buf_alloc *buf_alloc)
1578 {
1579 	struct hclge_rx_priv_buff_cmd *req;
1580 	struct hclge_desc desc;
1581 	int ret;
1582 	int i;
1583 
1584 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1585 	req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1586 
1587 	/* Alloc private buffer TCs */
1588 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1589 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1590 
1591 		req->buf_num[i] =
1592 			cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1593 		req->buf_num[i] |=
1594 			cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1595 	}
1596 
1597 	req->shared_buf =
1598 		cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1599 			    (1 << HCLGE_TC0_PRI_BUF_EN_B));
1600 
1601 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1602 	if (ret)
1603 		dev_err(&hdev->pdev->dev,
1604 			"rx private buffer alloc cmd failed %d\n", ret);
1605 
1606 	return ret;
1607 }
1608 
1609 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1610 				   struct hclge_pkt_buf_alloc *buf_alloc)
1611 {
1612 	struct hclge_rx_priv_wl_buf *req;
1613 	struct hclge_priv_buf *priv;
1614 	struct hclge_desc desc[2];
1615 	int i, j;
1616 	int ret;
1617 
1618 	for (i = 0; i < 2; i++) {
1619 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1620 					   false);
1621 		req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1622 
1623 		/* The first descriptor set the NEXT bit to 1 */
1624 		if (i == 0)
1625 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1626 		else
1627 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1628 
1629 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1630 			u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1631 
1632 			priv = &buf_alloc->priv_buf[idx];
1633 			req->tc_wl[j].high =
1634 				cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1635 			req->tc_wl[j].high |=
1636 				cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1637 			req->tc_wl[j].low =
1638 				cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1639 			req->tc_wl[j].low |=
1640 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1641 		}
1642 	}
1643 
1644 	/* Send 2 descriptor at one time */
1645 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
1646 	if (ret)
1647 		dev_err(&hdev->pdev->dev,
1648 			"rx private waterline config cmd failed %d\n",
1649 			ret);
1650 	return ret;
1651 }
1652 
1653 static int hclge_common_thrd_config(struct hclge_dev *hdev,
1654 				    struct hclge_pkt_buf_alloc *buf_alloc)
1655 {
1656 	struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
1657 	struct hclge_rx_com_thrd *req;
1658 	struct hclge_desc desc[2];
1659 	struct hclge_tc_thrd *tc;
1660 	int i, j;
1661 	int ret;
1662 
1663 	for (i = 0; i < 2; i++) {
1664 		hclge_cmd_setup_basic_desc(&desc[i],
1665 					   HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1666 		req = (struct hclge_rx_com_thrd *)&desc[i].data;
1667 
1668 		/* The first descriptor set the NEXT bit to 1 */
1669 		if (i == 0)
1670 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1671 		else
1672 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1673 
1674 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1675 			tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1676 
1677 			req->com_thrd[j].high =
1678 				cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1679 			req->com_thrd[j].high |=
1680 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1681 			req->com_thrd[j].low =
1682 				cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1683 			req->com_thrd[j].low |=
1684 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1685 		}
1686 	}
1687 
1688 	/* Send 2 descriptors at one time */
1689 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
1690 	if (ret)
1691 		dev_err(&hdev->pdev->dev,
1692 			"common threshold config cmd failed %d\n", ret);
1693 	return ret;
1694 }
1695 
1696 static int hclge_common_wl_config(struct hclge_dev *hdev,
1697 				  struct hclge_pkt_buf_alloc *buf_alloc)
1698 {
1699 	struct hclge_shared_buf *buf = &buf_alloc->s_buf;
1700 	struct hclge_rx_com_wl *req;
1701 	struct hclge_desc desc;
1702 	int ret;
1703 
1704 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
1705 
1706 	req = (struct hclge_rx_com_wl *)desc.data;
1707 	req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
1708 	req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1709 
1710 	req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
1711 	req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1712 
1713 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1714 	if (ret)
1715 		dev_err(&hdev->pdev->dev,
1716 			"common waterline config cmd failed %d\n", ret);
1717 
1718 	return ret;
1719 }
1720 
1721 int hclge_buffer_alloc(struct hclge_dev *hdev)
1722 {
1723 	struct hclge_pkt_buf_alloc *pkt_buf;
1724 	int ret;
1725 
1726 	pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
1727 	if (!pkt_buf)
1728 		return -ENOMEM;
1729 
1730 	ret = hclge_tx_buffer_calc(hdev, pkt_buf);
1731 	if (ret) {
1732 		dev_err(&hdev->pdev->dev,
1733 			"could not calc tx buffer size for all TCs %d\n", ret);
1734 		goto out;
1735 	}
1736 
1737 	ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
1738 	if (ret) {
1739 		dev_err(&hdev->pdev->dev,
1740 			"could not alloc tx buffers %d\n", ret);
1741 		goto out;
1742 	}
1743 
1744 	ret = hclge_rx_buffer_calc(hdev, pkt_buf);
1745 	if (ret) {
1746 		dev_err(&hdev->pdev->dev,
1747 			"could not calc rx priv buffer size for all TCs %d\n",
1748 			ret);
1749 		goto out;
1750 	}
1751 
1752 	ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
1753 	if (ret) {
1754 		dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
1755 			ret);
1756 		goto out;
1757 	}
1758 
1759 	if (hnae3_dev_dcb_supported(hdev)) {
1760 		ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
1761 		if (ret) {
1762 			dev_err(&hdev->pdev->dev,
1763 				"could not configure rx private waterline %d\n",
1764 				ret);
1765 			goto out;
1766 		}
1767 
1768 		ret = hclge_common_thrd_config(hdev, pkt_buf);
1769 		if (ret) {
1770 			dev_err(&hdev->pdev->dev,
1771 				"could not configure common threshold %d\n",
1772 				ret);
1773 			goto out;
1774 		}
1775 	}
1776 
1777 	ret = hclge_common_wl_config(hdev, pkt_buf);
1778 	if (ret)
1779 		dev_err(&hdev->pdev->dev,
1780 			"could not configure common waterline %d\n", ret);
1781 
1782 out:
1783 	kfree(pkt_buf);
1784 	return ret;
1785 }
1786 
1787 static int hclge_init_roce_base_info(struct hclge_vport *vport)
1788 {
1789 	struct hnae3_handle *roce = &vport->roce;
1790 	struct hnae3_handle *nic = &vport->nic;
1791 
1792 	roce->rinfo.num_vectors = vport->back->num_roce_msi;
1793 
1794 	if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
1795 	    vport->back->num_msi_left == 0)
1796 		return -EINVAL;
1797 
1798 	roce->rinfo.base_vector = vport->back->roce_base_vector;
1799 
1800 	roce->rinfo.netdev = nic->kinfo.netdev;
1801 	roce->rinfo.roce_io_base = vport->back->hw.io_base;
1802 
1803 	roce->pdev = nic->pdev;
1804 	roce->ae_algo = nic->ae_algo;
1805 	roce->numa_node_mask = nic->numa_node_mask;
1806 
1807 	return 0;
1808 }
1809 
1810 static int hclge_init_msi(struct hclge_dev *hdev)
1811 {
1812 	struct pci_dev *pdev = hdev->pdev;
1813 	int vectors;
1814 	int i;
1815 
1816 	vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
1817 					PCI_IRQ_MSI | PCI_IRQ_MSIX);
1818 	if (vectors < 0) {
1819 		dev_err(&pdev->dev,
1820 			"failed(%d) to allocate MSI/MSI-X vectors\n",
1821 			vectors);
1822 		return vectors;
1823 	}
1824 	if (vectors < hdev->num_msi)
1825 		dev_warn(&hdev->pdev->dev,
1826 			 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
1827 			 hdev->num_msi, vectors);
1828 
1829 	hdev->num_msi = vectors;
1830 	hdev->num_msi_left = vectors;
1831 	hdev->base_msi_vector = pdev->irq;
1832 	hdev->roce_base_vector = hdev->base_msi_vector +
1833 				hdev->roce_base_msix_offset;
1834 
1835 	hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
1836 					   sizeof(u16), GFP_KERNEL);
1837 	if (!hdev->vector_status) {
1838 		pci_free_irq_vectors(pdev);
1839 		return -ENOMEM;
1840 	}
1841 
1842 	for (i = 0; i < hdev->num_msi; i++)
1843 		hdev->vector_status[i] = HCLGE_INVALID_VPORT;
1844 
1845 	hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
1846 					sizeof(int), GFP_KERNEL);
1847 	if (!hdev->vector_irq) {
1848 		pci_free_irq_vectors(pdev);
1849 		return -ENOMEM;
1850 	}
1851 
1852 	return 0;
1853 }
1854 
1855 static u8 hclge_check_speed_dup(u8 duplex, int speed)
1856 {
1857 
1858 	if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
1859 		duplex = HCLGE_MAC_FULL;
1860 
1861 	return duplex;
1862 }
1863 
1864 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
1865 				      u8 duplex)
1866 {
1867 	struct hclge_config_mac_speed_dup_cmd *req;
1868 	struct hclge_desc desc;
1869 	int ret;
1870 
1871 	req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
1872 
1873 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
1874 
1875 	hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
1876 
1877 	switch (speed) {
1878 	case HCLGE_MAC_SPEED_10M:
1879 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1880 				HCLGE_CFG_SPEED_S, 6);
1881 		break;
1882 	case HCLGE_MAC_SPEED_100M:
1883 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1884 				HCLGE_CFG_SPEED_S, 7);
1885 		break;
1886 	case HCLGE_MAC_SPEED_1G:
1887 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1888 				HCLGE_CFG_SPEED_S, 0);
1889 		break;
1890 	case HCLGE_MAC_SPEED_10G:
1891 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1892 				HCLGE_CFG_SPEED_S, 1);
1893 		break;
1894 	case HCLGE_MAC_SPEED_25G:
1895 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1896 				HCLGE_CFG_SPEED_S, 2);
1897 		break;
1898 	case HCLGE_MAC_SPEED_40G:
1899 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1900 				HCLGE_CFG_SPEED_S, 3);
1901 		break;
1902 	case HCLGE_MAC_SPEED_50G:
1903 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1904 				HCLGE_CFG_SPEED_S, 4);
1905 		break;
1906 	case HCLGE_MAC_SPEED_100G:
1907 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1908 				HCLGE_CFG_SPEED_S, 5);
1909 		break;
1910 	default:
1911 		dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
1912 		return -EINVAL;
1913 	}
1914 
1915 	hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
1916 		      1);
1917 
1918 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1919 	if (ret) {
1920 		dev_err(&hdev->pdev->dev,
1921 			"mac speed/duplex config cmd failed %d.\n", ret);
1922 		return ret;
1923 	}
1924 
1925 	return 0;
1926 }
1927 
1928 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
1929 {
1930 	int ret;
1931 
1932 	duplex = hclge_check_speed_dup(duplex, speed);
1933 	if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
1934 		return 0;
1935 
1936 	ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
1937 	if (ret)
1938 		return ret;
1939 
1940 	hdev->hw.mac.speed = speed;
1941 	hdev->hw.mac.duplex = duplex;
1942 
1943 	return 0;
1944 }
1945 
1946 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
1947 				     u8 duplex)
1948 {
1949 	struct hclge_vport *vport = hclge_get_vport(handle);
1950 	struct hclge_dev *hdev = vport->back;
1951 
1952 	return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
1953 }
1954 
1955 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
1956 {
1957 	struct hclge_config_auto_neg_cmd *req;
1958 	struct hclge_desc desc;
1959 	u32 flag = 0;
1960 	int ret;
1961 
1962 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
1963 
1964 	req = (struct hclge_config_auto_neg_cmd *)desc.data;
1965 	hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
1966 	req->cfg_an_cmd_flag = cpu_to_le32(flag);
1967 
1968 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1969 	if (ret)
1970 		dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
1971 			ret);
1972 
1973 	return ret;
1974 }
1975 
1976 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
1977 {
1978 	struct hclge_vport *vport = hclge_get_vport(handle);
1979 	struct hclge_dev *hdev = vport->back;
1980 
1981 	return hclge_set_autoneg_en(hdev, enable);
1982 }
1983 
1984 static int hclge_get_autoneg(struct hnae3_handle *handle)
1985 {
1986 	struct hclge_vport *vport = hclge_get_vport(handle);
1987 	struct hclge_dev *hdev = vport->back;
1988 	struct phy_device *phydev = hdev->hw.mac.phydev;
1989 
1990 	if (phydev)
1991 		return phydev->autoneg;
1992 
1993 	return hdev->hw.mac.autoneg;
1994 }
1995 
1996 static int hclge_mac_init(struct hclge_dev *hdev)
1997 {
1998 	struct hclge_mac *mac = &hdev->hw.mac;
1999 	int ret;
2000 
2001 	hdev->support_sfp_query = true;
2002 	hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2003 	ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2004 					 hdev->hw.mac.duplex);
2005 	if (ret) {
2006 		dev_err(&hdev->pdev->dev,
2007 			"Config mac speed dup fail ret=%d\n", ret);
2008 		return ret;
2009 	}
2010 
2011 	mac->link = 0;
2012 
2013 	ret = hclge_set_mac_mtu(hdev, hdev->mps);
2014 	if (ret) {
2015 		dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2016 		return ret;
2017 	}
2018 
2019 	ret = hclge_buffer_alloc(hdev);
2020 	if (ret)
2021 		dev_err(&hdev->pdev->dev,
2022 			"allocate buffer fail, ret=%d\n", ret);
2023 
2024 	return ret;
2025 }
2026 
2027 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2028 {
2029 	if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2030 		schedule_work(&hdev->mbx_service_task);
2031 }
2032 
2033 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2034 {
2035 	if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2036 		schedule_work(&hdev->rst_service_task);
2037 }
2038 
2039 static void hclge_task_schedule(struct hclge_dev *hdev)
2040 {
2041 	if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2042 	    !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2043 	    !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2044 		(void)schedule_work(&hdev->service_task);
2045 }
2046 
2047 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2048 {
2049 	struct hclge_link_status_cmd *req;
2050 	struct hclge_desc desc;
2051 	int link_status;
2052 	int ret;
2053 
2054 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2055 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2056 	if (ret) {
2057 		dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2058 			ret);
2059 		return ret;
2060 	}
2061 
2062 	req = (struct hclge_link_status_cmd *)desc.data;
2063 	link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2064 
2065 	return !!link_status;
2066 }
2067 
2068 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2069 {
2070 	int mac_state;
2071 	int link_stat;
2072 
2073 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2074 		return 0;
2075 
2076 	mac_state = hclge_get_mac_link_status(hdev);
2077 
2078 	if (hdev->hw.mac.phydev) {
2079 		if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2080 			link_stat = mac_state &
2081 				hdev->hw.mac.phydev->link;
2082 		else
2083 			link_stat = 0;
2084 
2085 	} else {
2086 		link_stat = mac_state;
2087 	}
2088 
2089 	return !!link_stat;
2090 }
2091 
2092 static void hclge_update_link_status(struct hclge_dev *hdev)
2093 {
2094 	struct hnae3_client *client = hdev->nic_client;
2095 	struct hnae3_handle *handle;
2096 	int state;
2097 	int i;
2098 
2099 	if (!client)
2100 		return;
2101 	state = hclge_get_mac_phy_link(hdev);
2102 	if (state != hdev->hw.mac.link) {
2103 		for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2104 			handle = &hdev->vport[i].nic;
2105 			client->ops->link_status_change(handle, state);
2106 		}
2107 		hdev->hw.mac.link = state;
2108 	}
2109 }
2110 
2111 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2112 {
2113 	struct hclge_sfp_speed_cmd *resp = NULL;
2114 	struct hclge_desc desc;
2115 	int ret;
2116 
2117 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SFP_GET_SPEED, true);
2118 	resp = (struct hclge_sfp_speed_cmd *)desc.data;
2119 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2120 	if (ret == -EOPNOTSUPP) {
2121 		dev_warn(&hdev->pdev->dev,
2122 			 "IMP do not support get SFP speed %d\n", ret);
2123 		return ret;
2124 	} else if (ret) {
2125 		dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2126 		return ret;
2127 	}
2128 
2129 	*speed = resp->sfp_speed;
2130 
2131 	return 0;
2132 }
2133 
2134 static int hclge_update_speed_duplex(struct hclge_dev *hdev)
2135 {
2136 	struct hclge_mac mac = hdev->hw.mac;
2137 	int speed;
2138 	int ret;
2139 
2140 	/* get the speed from SFP cmd when phy
2141 	 * doesn't exit.
2142 	 */
2143 	if (mac.phydev)
2144 		return 0;
2145 
2146 	/* if IMP does not support get SFP/qSFP speed, return directly */
2147 	if (!hdev->support_sfp_query)
2148 		return 0;
2149 
2150 	ret = hclge_get_sfp_speed(hdev, &speed);
2151 	if (ret == -EOPNOTSUPP) {
2152 		hdev->support_sfp_query = false;
2153 		return ret;
2154 	} else if (ret) {
2155 		return ret;
2156 	}
2157 
2158 	if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2159 		return 0; /* do nothing if no SFP */
2160 
2161 	/* must config full duplex for SFP */
2162 	return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2163 }
2164 
2165 static int hclge_update_speed_duplex_h(struct hnae3_handle *handle)
2166 {
2167 	struct hclge_vport *vport = hclge_get_vport(handle);
2168 	struct hclge_dev *hdev = vport->back;
2169 
2170 	return hclge_update_speed_duplex(hdev);
2171 }
2172 
2173 static int hclge_get_status(struct hnae3_handle *handle)
2174 {
2175 	struct hclge_vport *vport = hclge_get_vport(handle);
2176 	struct hclge_dev *hdev = vport->back;
2177 
2178 	hclge_update_link_status(hdev);
2179 
2180 	return hdev->hw.mac.link;
2181 }
2182 
2183 static void hclge_service_timer(struct timer_list *t)
2184 {
2185 	struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2186 
2187 	mod_timer(&hdev->service_timer, jiffies + HZ);
2188 	hdev->hw_stats.stats_timer++;
2189 	hclge_task_schedule(hdev);
2190 }
2191 
2192 static void hclge_service_complete(struct hclge_dev *hdev)
2193 {
2194 	WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2195 
2196 	/* Flush memory before next watchdog */
2197 	smp_mb__before_atomic();
2198 	clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2199 }
2200 
2201 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2202 {
2203 	u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2204 
2205 	/* fetch the events from their corresponding regs */
2206 	rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2207 	cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2208 	msix_src_reg = hclge_read_dev(&hdev->hw,
2209 				      HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2210 
2211 	/* Assumption: If by any chance reset and mailbox events are reported
2212 	 * together then we will only process reset event in this go and will
2213 	 * defer the processing of the mailbox events. Since, we would have not
2214 	 * cleared RX CMDQ event this time we would receive again another
2215 	 * interrupt from H/W just for the mailbox.
2216 	 */
2217 
2218 	/* check for vector0 reset event sources */
2219 	if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2220 		dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2221 		set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2222 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2223 		*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2224 		return HCLGE_VECTOR0_EVENT_RST;
2225 	}
2226 
2227 	if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2228 		dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2229 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2230 		set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2231 		*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2232 		return HCLGE_VECTOR0_EVENT_RST;
2233 	}
2234 
2235 	if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
2236 		dev_info(&hdev->pdev->dev, "core reset interrupt\n");
2237 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2238 		set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
2239 		*clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2240 		return HCLGE_VECTOR0_EVENT_RST;
2241 	}
2242 
2243 	/* check for vector0 msix event source */
2244 	if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK)
2245 		return HCLGE_VECTOR0_EVENT_ERR;
2246 
2247 	/* check for vector0 mailbox(=CMDQ RX) event source */
2248 	if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2249 		cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2250 		*clearval = cmdq_src_reg;
2251 		return HCLGE_VECTOR0_EVENT_MBX;
2252 	}
2253 
2254 	return HCLGE_VECTOR0_EVENT_OTHER;
2255 }
2256 
2257 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2258 				    u32 regclr)
2259 {
2260 	switch (event_type) {
2261 	case HCLGE_VECTOR0_EVENT_RST:
2262 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2263 		break;
2264 	case HCLGE_VECTOR0_EVENT_MBX:
2265 		hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2266 		break;
2267 	default:
2268 		break;
2269 	}
2270 }
2271 
2272 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2273 {
2274 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2275 				BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2276 				BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2277 				BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2278 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2279 }
2280 
2281 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2282 {
2283 	writel(enable ? 1 : 0, vector->addr);
2284 }
2285 
2286 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2287 {
2288 	struct hclge_dev *hdev = data;
2289 	u32 event_cause;
2290 	u32 clearval;
2291 
2292 	hclge_enable_vector(&hdev->misc_vector, false);
2293 	event_cause = hclge_check_event_cause(hdev, &clearval);
2294 
2295 	/* vector 0 interrupt is shared with reset and mailbox source events.*/
2296 	switch (event_cause) {
2297 	case HCLGE_VECTOR0_EVENT_ERR:
2298 		/* we do not know what type of reset is required now. This could
2299 		 * only be decided after we fetch the type of errors which
2300 		 * caused this event. Therefore, we will do below for now:
2301 		 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2302 		 *    have defered type of reset to be used.
2303 		 * 2. Schedule the reset serivce task.
2304 		 * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
2305 		 *    will fetch the correct type of reset.  This would be done
2306 		 *    by first decoding the types of errors.
2307 		 */
2308 		set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2309 		/* fall through */
2310 	case HCLGE_VECTOR0_EVENT_RST:
2311 		hclge_reset_task_schedule(hdev);
2312 		break;
2313 	case HCLGE_VECTOR0_EVENT_MBX:
2314 		/* If we are here then,
2315 		 * 1. Either we are not handling any mbx task and we are not
2316 		 *    scheduled as well
2317 		 *                        OR
2318 		 * 2. We could be handling a mbx task but nothing more is
2319 		 *    scheduled.
2320 		 * In both cases, we should schedule mbx task as there are more
2321 		 * mbx messages reported by this interrupt.
2322 		 */
2323 		hclge_mbx_task_schedule(hdev);
2324 		break;
2325 	default:
2326 		dev_warn(&hdev->pdev->dev,
2327 			 "received unknown or unhandled event of vector0\n");
2328 		break;
2329 	}
2330 
2331 	/* clear the source of interrupt if it is not cause by reset */
2332 	if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2333 		hclge_clear_event_cause(hdev, event_cause, clearval);
2334 		hclge_enable_vector(&hdev->misc_vector, true);
2335 	}
2336 
2337 	return IRQ_HANDLED;
2338 }
2339 
2340 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2341 {
2342 	if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2343 		dev_warn(&hdev->pdev->dev,
2344 			 "vector(vector_id %d) has been freed.\n", vector_id);
2345 		return;
2346 	}
2347 
2348 	hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2349 	hdev->num_msi_left += 1;
2350 	hdev->num_msi_used -= 1;
2351 }
2352 
2353 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2354 {
2355 	struct hclge_misc_vector *vector = &hdev->misc_vector;
2356 
2357 	vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2358 
2359 	vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2360 	hdev->vector_status[0] = 0;
2361 
2362 	hdev->num_msi_left -= 1;
2363 	hdev->num_msi_used += 1;
2364 }
2365 
2366 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2367 {
2368 	int ret;
2369 
2370 	hclge_get_misc_vector(hdev);
2371 
2372 	/* this would be explicitly freed in the end */
2373 	ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2374 			  0, "hclge_misc", hdev);
2375 	if (ret) {
2376 		hclge_free_vector(hdev, 0);
2377 		dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2378 			hdev->misc_vector.vector_irq);
2379 	}
2380 
2381 	return ret;
2382 }
2383 
2384 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2385 {
2386 	free_irq(hdev->misc_vector.vector_irq, hdev);
2387 	hclge_free_vector(hdev, 0);
2388 }
2389 
2390 static int hclge_notify_client(struct hclge_dev *hdev,
2391 			       enum hnae3_reset_notify_type type)
2392 {
2393 	struct hnae3_client *client = hdev->nic_client;
2394 	u16 i;
2395 
2396 	if (!client->ops->reset_notify)
2397 		return -EOPNOTSUPP;
2398 
2399 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2400 		struct hnae3_handle *handle = &hdev->vport[i].nic;
2401 		int ret;
2402 
2403 		ret = client->ops->reset_notify(handle, type);
2404 		if (ret) {
2405 			dev_err(&hdev->pdev->dev,
2406 				"notify nic client failed %d(%d)\n", type, ret);
2407 			return ret;
2408 		}
2409 	}
2410 
2411 	return 0;
2412 }
2413 
2414 static int hclge_notify_roce_client(struct hclge_dev *hdev,
2415 				    enum hnae3_reset_notify_type type)
2416 {
2417 	struct hnae3_client *client = hdev->roce_client;
2418 	int ret = 0;
2419 	u16 i;
2420 
2421 	if (!client)
2422 		return 0;
2423 
2424 	if (!client->ops->reset_notify)
2425 		return -EOPNOTSUPP;
2426 
2427 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2428 		struct hnae3_handle *handle = &hdev->vport[i].roce;
2429 
2430 		ret = client->ops->reset_notify(handle, type);
2431 		if (ret) {
2432 			dev_err(&hdev->pdev->dev,
2433 				"notify roce client failed %d(%d)",
2434 				type, ret);
2435 			return ret;
2436 		}
2437 	}
2438 
2439 	return ret;
2440 }
2441 
2442 static int hclge_reset_wait(struct hclge_dev *hdev)
2443 {
2444 #define HCLGE_RESET_WATI_MS	100
2445 #define HCLGE_RESET_WAIT_CNT	200
2446 	u32 val, reg, reg_bit;
2447 	u32 cnt = 0;
2448 
2449 	switch (hdev->reset_type) {
2450 	case HNAE3_IMP_RESET:
2451 		reg = HCLGE_GLOBAL_RESET_REG;
2452 		reg_bit = HCLGE_IMP_RESET_BIT;
2453 		break;
2454 	case HNAE3_GLOBAL_RESET:
2455 		reg = HCLGE_GLOBAL_RESET_REG;
2456 		reg_bit = HCLGE_GLOBAL_RESET_BIT;
2457 		break;
2458 	case HNAE3_CORE_RESET:
2459 		reg = HCLGE_GLOBAL_RESET_REG;
2460 		reg_bit = HCLGE_CORE_RESET_BIT;
2461 		break;
2462 	case HNAE3_FUNC_RESET:
2463 		reg = HCLGE_FUN_RST_ING;
2464 		reg_bit = HCLGE_FUN_RST_ING_B;
2465 		break;
2466 	case HNAE3_FLR_RESET:
2467 		break;
2468 	default:
2469 		dev_err(&hdev->pdev->dev,
2470 			"Wait for unsupported reset type: %d\n",
2471 			hdev->reset_type);
2472 		return -EINVAL;
2473 	}
2474 
2475 	if (hdev->reset_type == HNAE3_FLR_RESET) {
2476 		while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
2477 		       cnt++ < HCLGE_RESET_WAIT_CNT)
2478 			msleep(HCLGE_RESET_WATI_MS);
2479 
2480 		if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
2481 			dev_err(&hdev->pdev->dev,
2482 				"flr wait timeout: %d\n", cnt);
2483 			return -EBUSY;
2484 		}
2485 
2486 		return 0;
2487 	}
2488 
2489 	val = hclge_read_dev(&hdev->hw, reg);
2490 	while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
2491 		msleep(HCLGE_RESET_WATI_MS);
2492 		val = hclge_read_dev(&hdev->hw, reg);
2493 		cnt++;
2494 	}
2495 
2496 	if (cnt >= HCLGE_RESET_WAIT_CNT) {
2497 		dev_warn(&hdev->pdev->dev,
2498 			 "Wait for reset timeout: %d\n", hdev->reset_type);
2499 		return -EBUSY;
2500 	}
2501 
2502 	return 0;
2503 }
2504 
2505 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
2506 {
2507 	struct hclge_vf_rst_cmd *req;
2508 	struct hclge_desc desc;
2509 
2510 	req = (struct hclge_vf_rst_cmd *)desc.data;
2511 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
2512 	req->dest_vfid = func_id;
2513 
2514 	if (reset)
2515 		req->vf_rst = 0x1;
2516 
2517 	return hclge_cmd_send(&hdev->hw, &desc, 1);
2518 }
2519 
2520 int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
2521 {
2522 	int i;
2523 
2524 	for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
2525 		struct hclge_vport *vport = &hdev->vport[i];
2526 		int ret;
2527 
2528 		/* Send cmd to set/clear VF's FUNC_RST_ING */
2529 		ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
2530 		if (ret) {
2531 			dev_err(&hdev->pdev->dev,
2532 				"set vf(%d) rst failed %d!\n",
2533 				vport->vport_id, ret);
2534 			return ret;
2535 		}
2536 
2537 		if (!reset)
2538 			continue;
2539 
2540 		/* Inform VF to process the reset.
2541 		 * hclge_inform_reset_assert_to_vf may fail if VF
2542 		 * driver is not loaded.
2543 		 */
2544 		ret = hclge_inform_reset_assert_to_vf(vport);
2545 		if (ret)
2546 			dev_warn(&hdev->pdev->dev,
2547 				 "inform reset to vf(%d) failed %d!\n",
2548 				 vport->vport_id, ret);
2549 	}
2550 
2551 	return 0;
2552 }
2553 
2554 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
2555 {
2556 	struct hclge_desc desc;
2557 	struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
2558 	int ret;
2559 
2560 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
2561 	hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
2562 	req->fun_reset_vfid = func_id;
2563 
2564 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2565 	if (ret)
2566 		dev_err(&hdev->pdev->dev,
2567 			"send function reset cmd fail, status =%d\n", ret);
2568 
2569 	return ret;
2570 }
2571 
2572 static void hclge_do_reset(struct hclge_dev *hdev)
2573 {
2574 	struct pci_dev *pdev = hdev->pdev;
2575 	u32 val;
2576 
2577 	switch (hdev->reset_type) {
2578 	case HNAE3_GLOBAL_RESET:
2579 		val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2580 		hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
2581 		hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2582 		dev_info(&pdev->dev, "Global Reset requested\n");
2583 		break;
2584 	case HNAE3_CORE_RESET:
2585 		val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2586 		hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
2587 		hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2588 		dev_info(&pdev->dev, "Core Reset requested\n");
2589 		break;
2590 	case HNAE3_FUNC_RESET:
2591 		dev_info(&pdev->dev, "PF Reset requested\n");
2592 		/* schedule again to check later */
2593 		set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
2594 		hclge_reset_task_schedule(hdev);
2595 		break;
2596 	case HNAE3_FLR_RESET:
2597 		dev_info(&pdev->dev, "FLR requested\n");
2598 		/* schedule again to check later */
2599 		set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
2600 		hclge_reset_task_schedule(hdev);
2601 		break;
2602 	default:
2603 		dev_warn(&pdev->dev,
2604 			 "Unsupported reset type: %d\n", hdev->reset_type);
2605 		break;
2606 	}
2607 }
2608 
2609 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
2610 						   unsigned long *addr)
2611 {
2612 	enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
2613 
2614 	/* first, resolve any unknown reset type to the known type(s) */
2615 	if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
2616 		/* we will intentionally ignore any errors from this function
2617 		 *  as we will end up in *some* reset request in any case
2618 		 */
2619 		hclge_handle_hw_msix_error(hdev, addr);
2620 		clear_bit(HNAE3_UNKNOWN_RESET, addr);
2621 		/* We defered the clearing of the error event which caused
2622 		 * interrupt since it was not posssible to do that in
2623 		 * interrupt context (and this is the reason we introduced
2624 		 * new UNKNOWN reset type). Now, the errors have been
2625 		 * handled and cleared in hardware we can safely enable
2626 		 * interrupts. This is an exception to the norm.
2627 		 */
2628 		hclge_enable_vector(&hdev->misc_vector, true);
2629 	}
2630 
2631 	/* return the highest priority reset level amongst all */
2632 	if (test_bit(HNAE3_IMP_RESET, addr)) {
2633 		rst_level = HNAE3_IMP_RESET;
2634 		clear_bit(HNAE3_IMP_RESET, addr);
2635 		clear_bit(HNAE3_GLOBAL_RESET, addr);
2636 		clear_bit(HNAE3_CORE_RESET, addr);
2637 		clear_bit(HNAE3_FUNC_RESET, addr);
2638 	} else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
2639 		rst_level = HNAE3_GLOBAL_RESET;
2640 		clear_bit(HNAE3_GLOBAL_RESET, addr);
2641 		clear_bit(HNAE3_CORE_RESET, addr);
2642 		clear_bit(HNAE3_FUNC_RESET, addr);
2643 	} else if (test_bit(HNAE3_CORE_RESET, addr)) {
2644 		rst_level = HNAE3_CORE_RESET;
2645 		clear_bit(HNAE3_CORE_RESET, addr);
2646 		clear_bit(HNAE3_FUNC_RESET, addr);
2647 	} else if (test_bit(HNAE3_FUNC_RESET, addr)) {
2648 		rst_level = HNAE3_FUNC_RESET;
2649 		clear_bit(HNAE3_FUNC_RESET, addr);
2650 	} else if (test_bit(HNAE3_FLR_RESET, addr)) {
2651 		rst_level = HNAE3_FLR_RESET;
2652 		clear_bit(HNAE3_FLR_RESET, addr);
2653 	}
2654 
2655 	return rst_level;
2656 }
2657 
2658 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
2659 {
2660 	u32 clearval = 0;
2661 
2662 	switch (hdev->reset_type) {
2663 	case HNAE3_IMP_RESET:
2664 		clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2665 		break;
2666 	case HNAE3_GLOBAL_RESET:
2667 		clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2668 		break;
2669 	case HNAE3_CORE_RESET:
2670 		clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2671 		break;
2672 	default:
2673 		break;
2674 	}
2675 
2676 	if (!clearval)
2677 		return;
2678 
2679 	hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
2680 	hclge_enable_vector(&hdev->misc_vector, true);
2681 }
2682 
2683 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
2684 {
2685 	int ret = 0;
2686 
2687 	switch (hdev->reset_type) {
2688 	case HNAE3_FUNC_RESET:
2689 		/* fall through */
2690 	case HNAE3_FLR_RESET:
2691 		ret = hclge_set_all_vf_rst(hdev, true);
2692 		break;
2693 	default:
2694 		break;
2695 	}
2696 
2697 	return ret;
2698 }
2699 
2700 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
2701 {
2702 	u32 reg_val;
2703 	int ret = 0;
2704 
2705 	switch (hdev->reset_type) {
2706 	case HNAE3_FUNC_RESET:
2707 		/* There is no mechanism for PF to know if VF has stopped IO
2708 		 * for now, just wait 100 ms for VF to stop IO
2709 		 */
2710 		msleep(100);
2711 		ret = hclge_func_reset_cmd(hdev, 0);
2712 		if (ret) {
2713 			dev_err(&hdev->pdev->dev,
2714 				"asserting function reset fail %d!\n", ret);
2715 			return ret;
2716 		}
2717 
2718 		/* After performaning pf reset, it is not necessary to do the
2719 		 * mailbox handling or send any command to firmware, because
2720 		 * any mailbox handling or command to firmware is only valid
2721 		 * after hclge_cmd_init is called.
2722 		 */
2723 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2724 		break;
2725 	case HNAE3_FLR_RESET:
2726 		/* There is no mechanism for PF to know if VF has stopped IO
2727 		 * for now, just wait 100 ms for VF to stop IO
2728 		 */
2729 		msleep(100);
2730 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2731 		set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
2732 		break;
2733 	case HNAE3_IMP_RESET:
2734 		reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
2735 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
2736 				BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
2737 		break;
2738 	default:
2739 		break;
2740 	}
2741 
2742 	dev_info(&hdev->pdev->dev, "prepare wait ok\n");
2743 
2744 	return ret;
2745 }
2746 
2747 static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
2748 {
2749 #define MAX_RESET_FAIL_CNT 5
2750 #define RESET_UPGRADE_DELAY_SEC 10
2751 
2752 	if (hdev->reset_pending) {
2753 		dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
2754 			 hdev->reset_pending);
2755 		return true;
2756 	} else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
2757 		   (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
2758 		    BIT(HCLGE_IMP_RESET_BIT))) {
2759 		dev_info(&hdev->pdev->dev,
2760 			 "reset failed because IMP Reset is pending\n");
2761 		hclge_clear_reset_cause(hdev);
2762 		return false;
2763 	} else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
2764 		hdev->reset_fail_cnt++;
2765 		if (is_timeout) {
2766 			set_bit(hdev->reset_type, &hdev->reset_pending);
2767 			dev_info(&hdev->pdev->dev,
2768 				 "re-schedule to wait for hw reset done\n");
2769 			return true;
2770 		}
2771 
2772 		dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
2773 		hclge_clear_reset_cause(hdev);
2774 		mod_timer(&hdev->reset_timer,
2775 			  jiffies + RESET_UPGRADE_DELAY_SEC * HZ);
2776 
2777 		return false;
2778 	}
2779 
2780 	hclge_clear_reset_cause(hdev);
2781 	dev_err(&hdev->pdev->dev, "Reset fail!\n");
2782 	return false;
2783 }
2784 
2785 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
2786 {
2787 	int ret = 0;
2788 
2789 	switch (hdev->reset_type) {
2790 	case HNAE3_FUNC_RESET:
2791 		/* fall through */
2792 	case HNAE3_FLR_RESET:
2793 		ret = hclge_set_all_vf_rst(hdev, false);
2794 		break;
2795 	default:
2796 		break;
2797 	}
2798 
2799 	return ret;
2800 }
2801 
2802 static void hclge_reset(struct hclge_dev *hdev)
2803 {
2804 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
2805 	bool is_timeout = false;
2806 	int ret;
2807 
2808 	/* Initialize ae_dev reset status as well, in case enet layer wants to
2809 	 * know if device is undergoing reset
2810 	 */
2811 	ae_dev->reset_type = hdev->reset_type;
2812 	hdev->reset_count++;
2813 	hdev->last_reset_time = jiffies;
2814 	/* perform reset of the stack & ae device for a client */
2815 	ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
2816 	if (ret)
2817 		goto err_reset;
2818 
2819 	ret = hclge_reset_prepare_down(hdev);
2820 	if (ret)
2821 		goto err_reset;
2822 
2823 	rtnl_lock();
2824 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2825 	if (ret)
2826 		goto err_reset_lock;
2827 
2828 	rtnl_unlock();
2829 
2830 	ret = hclge_reset_prepare_wait(hdev);
2831 	if (ret)
2832 		goto err_reset;
2833 
2834 	if (hclge_reset_wait(hdev)) {
2835 		is_timeout = true;
2836 		goto err_reset;
2837 	}
2838 
2839 	ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
2840 	if (ret)
2841 		goto err_reset;
2842 
2843 	rtnl_lock();
2844 	ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
2845 	if (ret)
2846 		goto err_reset_lock;
2847 
2848 	ret = hclge_reset_ae_dev(hdev->ae_dev);
2849 	if (ret)
2850 		goto err_reset_lock;
2851 
2852 	ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
2853 	if (ret)
2854 		goto err_reset_lock;
2855 
2856 	hclge_clear_reset_cause(hdev);
2857 
2858 	ret = hclge_reset_prepare_up(hdev);
2859 	if (ret)
2860 		goto err_reset_lock;
2861 
2862 	ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2863 	if (ret)
2864 		goto err_reset_lock;
2865 
2866 	rtnl_unlock();
2867 
2868 	ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
2869 	if (ret)
2870 		goto err_reset;
2871 
2872 	ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
2873 	if (ret)
2874 		goto err_reset;
2875 
2876 	return;
2877 
2878 err_reset_lock:
2879 	rtnl_unlock();
2880 err_reset:
2881 	if (hclge_reset_err_handle(hdev, is_timeout))
2882 		hclge_reset_task_schedule(hdev);
2883 }
2884 
2885 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
2886 {
2887 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
2888 	struct hclge_dev *hdev = ae_dev->priv;
2889 
2890 	/* We might end up getting called broadly because of 2 below cases:
2891 	 * 1. Recoverable error was conveyed through APEI and only way to bring
2892 	 *    normalcy is to reset.
2893 	 * 2. A new reset request from the stack due to timeout
2894 	 *
2895 	 * For the first case,error event might not have ae handle available.
2896 	 * check if this is a new reset request and we are not here just because
2897 	 * last reset attempt did not succeed and watchdog hit us again. We will
2898 	 * know this if last reset request did not occur very recently (watchdog
2899 	 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
2900 	 * In case of new request we reset the "reset level" to PF reset.
2901 	 * And if it is a repeat reset request of the most recent one then we
2902 	 * want to make sure we throttle the reset request. Therefore, we will
2903 	 * not allow it again before 3*HZ times.
2904 	 */
2905 	if (!handle)
2906 		handle = &hdev->vport[0].nic;
2907 
2908 	if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ)))
2909 		return;
2910 	else if (hdev->default_reset_request)
2911 		hdev->reset_level =
2912 			hclge_get_reset_level(hdev,
2913 					      &hdev->default_reset_request);
2914 	else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
2915 		hdev->reset_level = HNAE3_FUNC_RESET;
2916 
2917 	dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
2918 		 hdev->reset_level);
2919 
2920 	/* request reset & schedule reset task */
2921 	set_bit(hdev->reset_level, &hdev->reset_request);
2922 	hclge_reset_task_schedule(hdev);
2923 
2924 	if (hdev->reset_level < HNAE3_GLOBAL_RESET)
2925 		hdev->reset_level++;
2926 }
2927 
2928 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
2929 					enum hnae3_reset_type rst_type)
2930 {
2931 	struct hclge_dev *hdev = ae_dev->priv;
2932 
2933 	set_bit(rst_type, &hdev->default_reset_request);
2934 }
2935 
2936 static void hclge_reset_timer(struct timer_list *t)
2937 {
2938 	struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
2939 
2940 	dev_info(&hdev->pdev->dev,
2941 		 "triggering global reset in reset timer\n");
2942 	set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
2943 	hclge_reset_event(hdev->pdev, NULL);
2944 }
2945 
2946 static void hclge_reset_subtask(struct hclge_dev *hdev)
2947 {
2948 	/* check if there is any ongoing reset in the hardware. This status can
2949 	 * be checked from reset_pending. If there is then, we need to wait for
2950 	 * hardware to complete reset.
2951 	 *    a. If we are able to figure out in reasonable time that hardware
2952 	 *       has fully resetted then, we can proceed with driver, client
2953 	 *       reset.
2954 	 *    b. else, we can come back later to check this status so re-sched
2955 	 *       now.
2956 	 */
2957 	hdev->last_reset_time = jiffies;
2958 	hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
2959 	if (hdev->reset_type != HNAE3_NONE_RESET)
2960 		hclge_reset(hdev);
2961 
2962 	/* check if we got any *new* reset requests to be honored */
2963 	hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
2964 	if (hdev->reset_type != HNAE3_NONE_RESET)
2965 		hclge_do_reset(hdev);
2966 
2967 	hdev->reset_type = HNAE3_NONE_RESET;
2968 }
2969 
2970 static void hclge_reset_service_task(struct work_struct *work)
2971 {
2972 	struct hclge_dev *hdev =
2973 		container_of(work, struct hclge_dev, rst_service_task);
2974 
2975 	if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
2976 		return;
2977 
2978 	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
2979 
2980 	hclge_reset_subtask(hdev);
2981 
2982 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
2983 }
2984 
2985 static void hclge_mailbox_service_task(struct work_struct *work)
2986 {
2987 	struct hclge_dev *hdev =
2988 		container_of(work, struct hclge_dev, mbx_service_task);
2989 
2990 	if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
2991 		return;
2992 
2993 	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
2994 
2995 	hclge_mbx_handler(hdev);
2996 
2997 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
2998 }
2999 
3000 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3001 {
3002 	int i;
3003 
3004 	/* start from vport 1 for PF is always alive */
3005 	for (i = 1; i < hdev->num_alloc_vport; i++) {
3006 		struct hclge_vport *vport = &hdev->vport[i];
3007 
3008 		if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3009 			clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3010 
3011 		/* If vf is not alive, set to default value */
3012 		if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3013 			vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3014 	}
3015 }
3016 
3017 static void hclge_service_task(struct work_struct *work)
3018 {
3019 	struct hclge_dev *hdev =
3020 		container_of(work, struct hclge_dev, service_task);
3021 
3022 	if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3023 		hclge_update_stats_for_all(hdev);
3024 		hdev->hw_stats.stats_timer = 0;
3025 	}
3026 
3027 	hclge_update_speed_duplex(hdev);
3028 	hclge_update_link_status(hdev);
3029 	hclge_update_vport_alive(hdev);
3030 	hclge_service_complete(hdev);
3031 }
3032 
3033 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3034 {
3035 	/* VF handle has no client */
3036 	if (!handle->client)
3037 		return container_of(handle, struct hclge_vport, nic);
3038 	else if (handle->client->type == HNAE3_CLIENT_ROCE)
3039 		return container_of(handle, struct hclge_vport, roce);
3040 	else
3041 		return container_of(handle, struct hclge_vport, nic);
3042 }
3043 
3044 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3045 			    struct hnae3_vector_info *vector_info)
3046 {
3047 	struct hclge_vport *vport = hclge_get_vport(handle);
3048 	struct hnae3_vector_info *vector = vector_info;
3049 	struct hclge_dev *hdev = vport->back;
3050 	int alloc = 0;
3051 	int i, j;
3052 
3053 	vector_num = min(hdev->num_msi_left, vector_num);
3054 
3055 	for (j = 0; j < vector_num; j++) {
3056 		for (i = 1; i < hdev->num_msi; i++) {
3057 			if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3058 				vector->vector = pci_irq_vector(hdev->pdev, i);
3059 				vector->io_addr = hdev->hw.io_base +
3060 					HCLGE_VECTOR_REG_BASE +
3061 					(i - 1) * HCLGE_VECTOR_REG_OFFSET +
3062 					vport->vport_id *
3063 					HCLGE_VECTOR_VF_OFFSET;
3064 				hdev->vector_status[i] = vport->vport_id;
3065 				hdev->vector_irq[i] = vector->vector;
3066 
3067 				vector++;
3068 				alloc++;
3069 
3070 				break;
3071 			}
3072 		}
3073 	}
3074 	hdev->num_msi_left -= alloc;
3075 	hdev->num_msi_used += alloc;
3076 
3077 	return alloc;
3078 }
3079 
3080 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3081 {
3082 	int i;
3083 
3084 	for (i = 0; i < hdev->num_msi; i++)
3085 		if (vector == hdev->vector_irq[i])
3086 			return i;
3087 
3088 	return -EINVAL;
3089 }
3090 
3091 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3092 {
3093 	struct hclge_vport *vport = hclge_get_vport(handle);
3094 	struct hclge_dev *hdev = vport->back;
3095 	int vector_id;
3096 
3097 	vector_id = hclge_get_vector_index(hdev, vector);
3098 	if (vector_id < 0) {
3099 		dev_err(&hdev->pdev->dev,
3100 			"Get vector index fail. vector_id =%d\n", vector_id);
3101 		return vector_id;
3102 	}
3103 
3104 	hclge_free_vector(hdev, vector_id);
3105 
3106 	return 0;
3107 }
3108 
3109 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3110 {
3111 	return HCLGE_RSS_KEY_SIZE;
3112 }
3113 
3114 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3115 {
3116 	return HCLGE_RSS_IND_TBL_SIZE;
3117 }
3118 
3119 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3120 				  const u8 hfunc, const u8 *key)
3121 {
3122 	struct hclge_rss_config_cmd *req;
3123 	struct hclge_desc desc;
3124 	int key_offset;
3125 	int key_size;
3126 	int ret;
3127 
3128 	req = (struct hclge_rss_config_cmd *)desc.data;
3129 
3130 	for (key_offset = 0; key_offset < 3; key_offset++) {
3131 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3132 					   false);
3133 
3134 		req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3135 		req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3136 
3137 		if (key_offset == 2)
3138 			key_size =
3139 			HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
3140 		else
3141 			key_size = HCLGE_RSS_HASH_KEY_NUM;
3142 
3143 		memcpy(req->hash_key,
3144 		       key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3145 
3146 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3147 		if (ret) {
3148 			dev_err(&hdev->pdev->dev,
3149 				"Configure RSS config fail, status = %d\n",
3150 				ret);
3151 			return ret;
3152 		}
3153 	}
3154 	return 0;
3155 }
3156 
3157 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3158 {
3159 	struct hclge_rss_indirection_table_cmd *req;
3160 	struct hclge_desc desc;
3161 	int i, j;
3162 	int ret;
3163 
3164 	req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3165 
3166 	for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3167 		hclge_cmd_setup_basic_desc
3168 			(&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3169 
3170 		req->start_table_index =
3171 			cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3172 		req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3173 
3174 		for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3175 			req->rss_result[j] =
3176 				indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3177 
3178 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3179 		if (ret) {
3180 			dev_err(&hdev->pdev->dev,
3181 				"Configure rss indir table fail,status = %d\n",
3182 				ret);
3183 			return ret;
3184 		}
3185 	}
3186 	return 0;
3187 }
3188 
3189 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3190 				 u16 *tc_size, u16 *tc_offset)
3191 {
3192 	struct hclge_rss_tc_mode_cmd *req;
3193 	struct hclge_desc desc;
3194 	int ret;
3195 	int i;
3196 
3197 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3198 	req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3199 
3200 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3201 		u16 mode = 0;
3202 
3203 		hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3204 		hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3205 				HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3206 		hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3207 				HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3208 
3209 		req->rss_tc_mode[i] = cpu_to_le16(mode);
3210 	}
3211 
3212 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3213 	if (ret)
3214 		dev_err(&hdev->pdev->dev,
3215 			"Configure rss tc mode fail, status = %d\n", ret);
3216 
3217 	return ret;
3218 }
3219 
3220 static void hclge_get_rss_type(struct hclge_vport *vport)
3221 {
3222 	if (vport->rss_tuple_sets.ipv4_tcp_en ||
3223 	    vport->rss_tuple_sets.ipv4_udp_en ||
3224 	    vport->rss_tuple_sets.ipv4_sctp_en ||
3225 	    vport->rss_tuple_sets.ipv6_tcp_en ||
3226 	    vport->rss_tuple_sets.ipv6_udp_en ||
3227 	    vport->rss_tuple_sets.ipv6_sctp_en)
3228 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
3229 	else if (vport->rss_tuple_sets.ipv4_fragment_en ||
3230 		 vport->rss_tuple_sets.ipv6_fragment_en)
3231 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
3232 	else
3233 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
3234 }
3235 
3236 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3237 {
3238 	struct hclge_rss_input_tuple_cmd *req;
3239 	struct hclge_desc desc;
3240 	int ret;
3241 
3242 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3243 
3244 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3245 
3246 	/* Get the tuple cfg from pf */
3247 	req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
3248 	req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
3249 	req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
3250 	req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
3251 	req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
3252 	req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
3253 	req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
3254 	req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
3255 	hclge_get_rss_type(&hdev->vport[0]);
3256 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3257 	if (ret)
3258 		dev_err(&hdev->pdev->dev,
3259 			"Configure rss input fail, status = %d\n", ret);
3260 	return ret;
3261 }
3262 
3263 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3264 			 u8 *key, u8 *hfunc)
3265 {
3266 	struct hclge_vport *vport = hclge_get_vport(handle);
3267 	int i;
3268 
3269 	/* Get hash algorithm */
3270 	if (hfunc) {
3271 		switch (vport->rss_algo) {
3272 		case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
3273 			*hfunc = ETH_RSS_HASH_TOP;
3274 			break;
3275 		case HCLGE_RSS_HASH_ALGO_SIMPLE:
3276 			*hfunc = ETH_RSS_HASH_XOR;
3277 			break;
3278 		default:
3279 			*hfunc = ETH_RSS_HASH_UNKNOWN;
3280 			break;
3281 		}
3282 	}
3283 
3284 	/* Get the RSS Key required by the user */
3285 	if (key)
3286 		memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3287 
3288 	/* Get indirect table */
3289 	if (indir)
3290 		for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3291 			indir[i] =  vport->rss_indirection_tbl[i];
3292 
3293 	return 0;
3294 }
3295 
3296 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3297 			 const  u8 *key, const  u8 hfunc)
3298 {
3299 	struct hclge_vport *vport = hclge_get_vport(handle);
3300 	struct hclge_dev *hdev = vport->back;
3301 	u8 hash_algo;
3302 	int ret, i;
3303 
3304 	/* Set the RSS Hash Key if specififed by the user */
3305 	if (key) {
3306 		switch (hfunc) {
3307 		case ETH_RSS_HASH_TOP:
3308 			hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3309 			break;
3310 		case ETH_RSS_HASH_XOR:
3311 			hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3312 			break;
3313 		case ETH_RSS_HASH_NO_CHANGE:
3314 			hash_algo = vport->rss_algo;
3315 			break;
3316 		default:
3317 			return -EINVAL;
3318 		}
3319 
3320 		ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3321 		if (ret)
3322 			return ret;
3323 
3324 		/* Update the shadow RSS key with user specified qids */
3325 		memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3326 		vport->rss_algo = hash_algo;
3327 	}
3328 
3329 	/* Update the shadow RSS table with user specified qids */
3330 	for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3331 		vport->rss_indirection_tbl[i] = indir[i];
3332 
3333 	/* Update the hardware */
3334 	return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
3335 }
3336 
3337 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3338 {
3339 	u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3340 
3341 	if (nfc->data & RXH_L4_B_2_3)
3342 		hash_sets |= HCLGE_D_PORT_BIT;
3343 	else
3344 		hash_sets &= ~HCLGE_D_PORT_BIT;
3345 
3346 	if (nfc->data & RXH_IP_SRC)
3347 		hash_sets |= HCLGE_S_IP_BIT;
3348 	else
3349 		hash_sets &= ~HCLGE_S_IP_BIT;
3350 
3351 	if (nfc->data & RXH_IP_DST)
3352 		hash_sets |= HCLGE_D_IP_BIT;
3353 	else
3354 		hash_sets &= ~HCLGE_D_IP_BIT;
3355 
3356 	if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3357 		hash_sets |= HCLGE_V_TAG_BIT;
3358 
3359 	return hash_sets;
3360 }
3361 
3362 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3363 			       struct ethtool_rxnfc *nfc)
3364 {
3365 	struct hclge_vport *vport = hclge_get_vport(handle);
3366 	struct hclge_dev *hdev = vport->back;
3367 	struct hclge_rss_input_tuple_cmd *req;
3368 	struct hclge_desc desc;
3369 	u8 tuple_sets;
3370 	int ret;
3371 
3372 	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3373 			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
3374 		return -EINVAL;
3375 
3376 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3377 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3378 
3379 	req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3380 	req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3381 	req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3382 	req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3383 	req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3384 	req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3385 	req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3386 	req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
3387 
3388 	tuple_sets = hclge_get_rss_hash_bits(nfc);
3389 	switch (nfc->flow_type) {
3390 	case TCP_V4_FLOW:
3391 		req->ipv4_tcp_en = tuple_sets;
3392 		break;
3393 	case TCP_V6_FLOW:
3394 		req->ipv6_tcp_en = tuple_sets;
3395 		break;
3396 	case UDP_V4_FLOW:
3397 		req->ipv4_udp_en = tuple_sets;
3398 		break;
3399 	case UDP_V6_FLOW:
3400 		req->ipv6_udp_en = tuple_sets;
3401 		break;
3402 	case SCTP_V4_FLOW:
3403 		req->ipv4_sctp_en = tuple_sets;
3404 		break;
3405 	case SCTP_V6_FLOW:
3406 		if ((nfc->data & RXH_L4_B_0_1) ||
3407 		    (nfc->data & RXH_L4_B_2_3))
3408 			return -EINVAL;
3409 
3410 		req->ipv6_sctp_en = tuple_sets;
3411 		break;
3412 	case IPV4_FLOW:
3413 		req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3414 		break;
3415 	case IPV6_FLOW:
3416 		req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3417 		break;
3418 	default:
3419 		return -EINVAL;
3420 	}
3421 
3422 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3423 	if (ret) {
3424 		dev_err(&hdev->pdev->dev,
3425 			"Set rss tuple fail, status = %d\n", ret);
3426 		return ret;
3427 	}
3428 
3429 	vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
3430 	vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
3431 	vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
3432 	vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
3433 	vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
3434 	vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
3435 	vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
3436 	vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
3437 	hclge_get_rss_type(vport);
3438 	return 0;
3439 }
3440 
3441 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
3442 			       struct ethtool_rxnfc *nfc)
3443 {
3444 	struct hclge_vport *vport = hclge_get_vport(handle);
3445 	u8 tuple_sets;
3446 
3447 	nfc->data = 0;
3448 
3449 	switch (nfc->flow_type) {
3450 	case TCP_V4_FLOW:
3451 		tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
3452 		break;
3453 	case UDP_V4_FLOW:
3454 		tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
3455 		break;
3456 	case TCP_V6_FLOW:
3457 		tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
3458 		break;
3459 	case UDP_V6_FLOW:
3460 		tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
3461 		break;
3462 	case SCTP_V4_FLOW:
3463 		tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
3464 		break;
3465 	case SCTP_V6_FLOW:
3466 		tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
3467 		break;
3468 	case IPV4_FLOW:
3469 	case IPV6_FLOW:
3470 		tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
3471 		break;
3472 	default:
3473 		return -EINVAL;
3474 	}
3475 
3476 	if (!tuple_sets)
3477 		return 0;
3478 
3479 	if (tuple_sets & HCLGE_D_PORT_BIT)
3480 		nfc->data |= RXH_L4_B_2_3;
3481 	if (tuple_sets & HCLGE_S_PORT_BIT)
3482 		nfc->data |= RXH_L4_B_0_1;
3483 	if (tuple_sets & HCLGE_D_IP_BIT)
3484 		nfc->data |= RXH_IP_DST;
3485 	if (tuple_sets & HCLGE_S_IP_BIT)
3486 		nfc->data |= RXH_IP_SRC;
3487 
3488 	return 0;
3489 }
3490 
3491 static int hclge_get_tc_size(struct hnae3_handle *handle)
3492 {
3493 	struct hclge_vport *vport = hclge_get_vport(handle);
3494 	struct hclge_dev *hdev = vport->back;
3495 
3496 	return hdev->rss_size_max;
3497 }
3498 
3499 int hclge_rss_init_hw(struct hclge_dev *hdev)
3500 {
3501 	struct hclge_vport *vport = hdev->vport;
3502 	u8 *rss_indir = vport[0].rss_indirection_tbl;
3503 	u16 rss_size = vport[0].alloc_rss_size;
3504 	u8 *key = vport[0].rss_hash_key;
3505 	u8 hfunc = vport[0].rss_algo;
3506 	u16 tc_offset[HCLGE_MAX_TC_NUM];
3507 	u16 tc_valid[HCLGE_MAX_TC_NUM];
3508 	u16 tc_size[HCLGE_MAX_TC_NUM];
3509 	u16 roundup_size;
3510 	int i, ret;
3511 
3512 	ret = hclge_set_rss_indir_table(hdev, rss_indir);
3513 	if (ret)
3514 		return ret;
3515 
3516 	ret = hclge_set_rss_algo_key(hdev, hfunc, key);
3517 	if (ret)
3518 		return ret;
3519 
3520 	ret = hclge_set_rss_input_tuple(hdev);
3521 	if (ret)
3522 		return ret;
3523 
3524 	/* Each TC have the same queue size, and tc_size set to hardware is
3525 	 * the log2 of roundup power of two of rss_size, the acutal queue
3526 	 * size is limited by indirection table.
3527 	 */
3528 	if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
3529 		dev_err(&hdev->pdev->dev,
3530 			"Configure rss tc size failed, invalid TC_SIZE = %d\n",
3531 			rss_size);
3532 		return -EINVAL;
3533 	}
3534 
3535 	roundup_size = roundup_pow_of_two(rss_size);
3536 	roundup_size = ilog2(roundup_size);
3537 
3538 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3539 		tc_valid[i] = 0;
3540 
3541 		if (!(hdev->hw_tc_map & BIT(i)))
3542 			continue;
3543 
3544 		tc_valid[i] = 1;
3545 		tc_size[i] = roundup_size;
3546 		tc_offset[i] = rss_size * i;
3547 	}
3548 
3549 	return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
3550 }
3551 
3552 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
3553 {
3554 	struct hclge_vport *vport = hdev->vport;
3555 	int i, j;
3556 
3557 	for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
3558 		for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3559 			vport[j].rss_indirection_tbl[i] =
3560 				i % vport[j].alloc_rss_size;
3561 	}
3562 }
3563 
3564 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
3565 {
3566 	struct hclge_vport *vport = hdev->vport;
3567 	int i;
3568 
3569 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3570 		vport[i].rss_tuple_sets.ipv4_tcp_en =
3571 			HCLGE_RSS_INPUT_TUPLE_OTHER;
3572 		vport[i].rss_tuple_sets.ipv4_udp_en =
3573 			HCLGE_RSS_INPUT_TUPLE_OTHER;
3574 		vport[i].rss_tuple_sets.ipv4_sctp_en =
3575 			HCLGE_RSS_INPUT_TUPLE_SCTP;
3576 		vport[i].rss_tuple_sets.ipv4_fragment_en =
3577 			HCLGE_RSS_INPUT_TUPLE_OTHER;
3578 		vport[i].rss_tuple_sets.ipv6_tcp_en =
3579 			HCLGE_RSS_INPUT_TUPLE_OTHER;
3580 		vport[i].rss_tuple_sets.ipv6_udp_en =
3581 			HCLGE_RSS_INPUT_TUPLE_OTHER;
3582 		vport[i].rss_tuple_sets.ipv6_sctp_en =
3583 			HCLGE_RSS_INPUT_TUPLE_SCTP;
3584 		vport[i].rss_tuple_sets.ipv6_fragment_en =
3585 			HCLGE_RSS_INPUT_TUPLE_OTHER;
3586 
3587 		vport[i].rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3588 
3589 		netdev_rss_key_fill(vport[i].rss_hash_key, HCLGE_RSS_KEY_SIZE);
3590 	}
3591 
3592 	hclge_rss_indir_init_cfg(hdev);
3593 }
3594 
3595 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
3596 				int vector_id, bool en,
3597 				struct hnae3_ring_chain_node *ring_chain)
3598 {
3599 	struct hclge_dev *hdev = vport->back;
3600 	struct hnae3_ring_chain_node *node;
3601 	struct hclge_desc desc;
3602 	struct hclge_ctrl_vector_chain_cmd *req
3603 		= (struct hclge_ctrl_vector_chain_cmd *)desc.data;
3604 	enum hclge_cmd_status status;
3605 	enum hclge_opcode_type op;
3606 	u16 tqp_type_and_id;
3607 	int i;
3608 
3609 	op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
3610 	hclge_cmd_setup_basic_desc(&desc, op, false);
3611 	req->int_vector_id = vector_id;
3612 
3613 	i = 0;
3614 	for (node = ring_chain; node; node = node->next) {
3615 		tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
3616 		hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
3617 				HCLGE_INT_TYPE_S,
3618 				hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
3619 		hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
3620 				HCLGE_TQP_ID_S, node->tqp_index);
3621 		hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
3622 				HCLGE_INT_GL_IDX_S,
3623 				hnae3_get_field(node->int_gl_idx,
3624 						HNAE3_RING_GL_IDX_M,
3625 						HNAE3_RING_GL_IDX_S));
3626 		req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
3627 		if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
3628 			req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
3629 			req->vfid = vport->vport_id;
3630 
3631 			status = hclge_cmd_send(&hdev->hw, &desc, 1);
3632 			if (status) {
3633 				dev_err(&hdev->pdev->dev,
3634 					"Map TQP fail, status is %d.\n",
3635 					status);
3636 				return -EIO;
3637 			}
3638 			i = 0;
3639 
3640 			hclge_cmd_setup_basic_desc(&desc,
3641 						   op,
3642 						   false);
3643 			req->int_vector_id = vector_id;
3644 		}
3645 	}
3646 
3647 	if (i > 0) {
3648 		req->int_cause_num = i;
3649 		req->vfid = vport->vport_id;
3650 		status = hclge_cmd_send(&hdev->hw, &desc, 1);
3651 		if (status) {
3652 			dev_err(&hdev->pdev->dev,
3653 				"Map TQP fail, status is %d.\n", status);
3654 			return -EIO;
3655 		}
3656 	}
3657 
3658 	return 0;
3659 }
3660 
3661 static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
3662 				    int vector,
3663 				    struct hnae3_ring_chain_node *ring_chain)
3664 {
3665 	struct hclge_vport *vport = hclge_get_vport(handle);
3666 	struct hclge_dev *hdev = vport->back;
3667 	int vector_id;
3668 
3669 	vector_id = hclge_get_vector_index(hdev, vector);
3670 	if (vector_id < 0) {
3671 		dev_err(&hdev->pdev->dev,
3672 			"Get vector index fail. vector_id =%d\n", vector_id);
3673 		return vector_id;
3674 	}
3675 
3676 	return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
3677 }
3678 
3679 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
3680 				       int vector,
3681 				       struct hnae3_ring_chain_node *ring_chain)
3682 {
3683 	struct hclge_vport *vport = hclge_get_vport(handle);
3684 	struct hclge_dev *hdev = vport->back;
3685 	int vector_id, ret;
3686 
3687 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3688 		return 0;
3689 
3690 	vector_id = hclge_get_vector_index(hdev, vector);
3691 	if (vector_id < 0) {
3692 		dev_err(&handle->pdev->dev,
3693 			"Get vector index fail. ret =%d\n", vector_id);
3694 		return vector_id;
3695 	}
3696 
3697 	ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
3698 	if (ret)
3699 		dev_err(&handle->pdev->dev,
3700 			"Unmap ring from vector fail. vectorid=%d, ret =%d\n",
3701 			vector_id,
3702 			ret);
3703 
3704 	return ret;
3705 }
3706 
3707 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
3708 			       struct hclge_promisc_param *param)
3709 {
3710 	struct hclge_promisc_cfg_cmd *req;
3711 	struct hclge_desc desc;
3712 	int ret;
3713 
3714 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
3715 
3716 	req = (struct hclge_promisc_cfg_cmd *)desc.data;
3717 	req->vf_id = param->vf_id;
3718 
3719 	/* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
3720 	 * pdev revision(0x20), new revision support them. The
3721 	 * value of this two fields will not return error when driver
3722 	 * send command to fireware in revision(0x20).
3723 	 */
3724 	req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
3725 		HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
3726 
3727 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3728 	if (ret)
3729 		dev_err(&hdev->pdev->dev,
3730 			"Set promisc mode fail, status is %d.\n", ret);
3731 
3732 	return ret;
3733 }
3734 
3735 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
3736 			      bool en_mc, bool en_bc, int vport_id)
3737 {
3738 	if (!param)
3739 		return;
3740 
3741 	memset(param, 0, sizeof(struct hclge_promisc_param));
3742 	if (en_uc)
3743 		param->enable = HCLGE_PROMISC_EN_UC;
3744 	if (en_mc)
3745 		param->enable |= HCLGE_PROMISC_EN_MC;
3746 	if (en_bc)
3747 		param->enable |= HCLGE_PROMISC_EN_BC;
3748 	param->vf_id = vport_id;
3749 }
3750 
3751 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
3752 				  bool en_mc_pmc)
3753 {
3754 	struct hclge_vport *vport = hclge_get_vport(handle);
3755 	struct hclge_dev *hdev = vport->back;
3756 	struct hclge_promisc_param param;
3757 
3758 	hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, true,
3759 				 vport->vport_id);
3760 	return hclge_cmd_set_promisc_mode(hdev, &param);
3761 }
3762 
3763 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
3764 {
3765 	struct hclge_get_fd_mode_cmd *req;
3766 	struct hclge_desc desc;
3767 	int ret;
3768 
3769 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
3770 
3771 	req = (struct hclge_get_fd_mode_cmd *)desc.data;
3772 
3773 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3774 	if (ret) {
3775 		dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
3776 		return ret;
3777 	}
3778 
3779 	*fd_mode = req->mode;
3780 
3781 	return ret;
3782 }
3783 
3784 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
3785 				   u32 *stage1_entry_num,
3786 				   u32 *stage2_entry_num,
3787 				   u16 *stage1_counter_num,
3788 				   u16 *stage2_counter_num)
3789 {
3790 	struct hclge_get_fd_allocation_cmd *req;
3791 	struct hclge_desc desc;
3792 	int ret;
3793 
3794 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
3795 
3796 	req = (struct hclge_get_fd_allocation_cmd *)desc.data;
3797 
3798 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3799 	if (ret) {
3800 		dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
3801 			ret);
3802 		return ret;
3803 	}
3804 
3805 	*stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
3806 	*stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
3807 	*stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
3808 	*stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
3809 
3810 	return ret;
3811 }
3812 
3813 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
3814 {
3815 	struct hclge_set_fd_key_config_cmd *req;
3816 	struct hclge_fd_key_cfg *stage;
3817 	struct hclge_desc desc;
3818 	int ret;
3819 
3820 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
3821 
3822 	req = (struct hclge_set_fd_key_config_cmd *)desc.data;
3823 	stage = &hdev->fd_cfg.key_cfg[stage_num];
3824 	req->stage = stage_num;
3825 	req->key_select = stage->key_sel;
3826 	req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
3827 	req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
3828 	req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
3829 	req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
3830 	req->tuple_mask = cpu_to_le32(~stage->tuple_active);
3831 	req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
3832 
3833 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3834 	if (ret)
3835 		dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
3836 
3837 	return ret;
3838 }
3839 
3840 static int hclge_init_fd_config(struct hclge_dev *hdev)
3841 {
3842 #define LOW_2_WORDS		0x03
3843 	struct hclge_fd_key_cfg *key_cfg;
3844 	int ret;
3845 
3846 	if (!hnae3_dev_fd_supported(hdev))
3847 		return 0;
3848 
3849 	ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
3850 	if (ret)
3851 		return ret;
3852 
3853 	switch (hdev->fd_cfg.fd_mode) {
3854 	case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
3855 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
3856 		break;
3857 	case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
3858 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
3859 		break;
3860 	default:
3861 		dev_err(&hdev->pdev->dev,
3862 			"Unsupported flow director mode %d\n",
3863 			hdev->fd_cfg.fd_mode);
3864 		return -EOPNOTSUPP;
3865 	}
3866 
3867 	hdev->fd_cfg.fd_en = true;
3868 	hdev->fd_cfg.proto_support =
3869 		TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
3870 		UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
3871 	key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
3872 	key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
3873 	key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
3874 	key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
3875 	key_cfg->outer_sipv6_word_en = 0;
3876 	key_cfg->outer_dipv6_word_en = 0;
3877 
3878 	key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
3879 				BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
3880 				BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
3881 				BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
3882 
3883 	/* If use max 400bit key, we can support tuples for ether type */
3884 	if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
3885 		hdev->fd_cfg.proto_support |= ETHER_FLOW;
3886 		key_cfg->tuple_active |=
3887 				BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
3888 	}
3889 
3890 	/* roce_type is used to filter roce frames
3891 	 * dst_vport is used to specify the rule
3892 	 */
3893 	key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
3894 
3895 	ret = hclge_get_fd_allocation(hdev,
3896 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
3897 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
3898 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
3899 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
3900 	if (ret)
3901 		return ret;
3902 
3903 	return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
3904 }
3905 
3906 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
3907 				int loc, u8 *key, bool is_add)
3908 {
3909 	struct hclge_fd_tcam_config_1_cmd *req1;
3910 	struct hclge_fd_tcam_config_2_cmd *req2;
3911 	struct hclge_fd_tcam_config_3_cmd *req3;
3912 	struct hclge_desc desc[3];
3913 	int ret;
3914 
3915 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
3916 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3917 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
3918 	desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3919 	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
3920 
3921 	req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
3922 	req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
3923 	req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
3924 
3925 	req1->stage = stage;
3926 	req1->xy_sel = sel_x ? 1 : 0;
3927 	hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
3928 	req1->index = cpu_to_le32(loc);
3929 	req1->entry_vld = sel_x ? is_add : 0;
3930 
3931 	if (key) {
3932 		memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
3933 		memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
3934 		       sizeof(req2->tcam_data));
3935 		memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
3936 		       sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
3937 	}
3938 
3939 	ret = hclge_cmd_send(&hdev->hw, desc, 3);
3940 	if (ret)
3941 		dev_err(&hdev->pdev->dev,
3942 			"config tcam key fail, ret=%d\n",
3943 			ret);
3944 
3945 	return ret;
3946 }
3947 
3948 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
3949 			      struct hclge_fd_ad_data *action)
3950 {
3951 	struct hclge_fd_ad_config_cmd *req;
3952 	struct hclge_desc desc;
3953 	u64 ad_data = 0;
3954 	int ret;
3955 
3956 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
3957 
3958 	req = (struct hclge_fd_ad_config_cmd *)desc.data;
3959 	req->index = cpu_to_le32(loc);
3960 	req->stage = stage;
3961 
3962 	hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
3963 		      action->write_rule_id_to_bd);
3964 	hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
3965 			action->rule_id);
3966 	ad_data <<= 32;
3967 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
3968 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
3969 		      action->forward_to_direct_queue);
3970 	hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
3971 			action->queue_id);
3972 	hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
3973 	hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
3974 			HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
3975 	hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
3976 	hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
3977 			action->counter_id);
3978 
3979 	req->ad_data = cpu_to_le64(ad_data);
3980 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3981 	if (ret)
3982 		dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
3983 
3984 	return ret;
3985 }
3986 
3987 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
3988 				   struct hclge_fd_rule *rule)
3989 {
3990 	u16 tmp_x_s, tmp_y_s;
3991 	u32 tmp_x_l, tmp_y_l;
3992 	int i;
3993 
3994 	if (rule->unused_tuple & tuple_bit)
3995 		return true;
3996 
3997 	switch (tuple_bit) {
3998 	case 0:
3999 		return false;
4000 	case BIT(INNER_DST_MAC):
4001 		for (i = 0; i < 6; i++) {
4002 			calc_x(key_x[5 - i], rule->tuples.dst_mac[i],
4003 			       rule->tuples_mask.dst_mac[i]);
4004 			calc_y(key_y[5 - i], rule->tuples.dst_mac[i],
4005 			       rule->tuples_mask.dst_mac[i]);
4006 		}
4007 
4008 		return true;
4009 	case BIT(INNER_SRC_MAC):
4010 		for (i = 0; i < 6; i++) {
4011 			calc_x(key_x[5 - i], rule->tuples.src_mac[i],
4012 			       rule->tuples.src_mac[i]);
4013 			calc_y(key_y[5 - i], rule->tuples.src_mac[i],
4014 			       rule->tuples.src_mac[i]);
4015 		}
4016 
4017 		return true;
4018 	case BIT(INNER_VLAN_TAG_FST):
4019 		calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4020 		       rule->tuples_mask.vlan_tag1);
4021 		calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4022 		       rule->tuples_mask.vlan_tag1);
4023 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4024 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4025 
4026 		return true;
4027 	case BIT(INNER_ETH_TYPE):
4028 		calc_x(tmp_x_s, rule->tuples.ether_proto,
4029 		       rule->tuples_mask.ether_proto);
4030 		calc_y(tmp_y_s, rule->tuples.ether_proto,
4031 		       rule->tuples_mask.ether_proto);
4032 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4033 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4034 
4035 		return true;
4036 	case BIT(INNER_IP_TOS):
4037 		calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4038 		calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4039 
4040 		return true;
4041 	case BIT(INNER_IP_PROTO):
4042 		calc_x(*key_x, rule->tuples.ip_proto,
4043 		       rule->tuples_mask.ip_proto);
4044 		calc_y(*key_y, rule->tuples.ip_proto,
4045 		       rule->tuples_mask.ip_proto);
4046 
4047 		return true;
4048 	case BIT(INNER_SRC_IP):
4049 		calc_x(tmp_x_l, rule->tuples.src_ip[3],
4050 		       rule->tuples_mask.src_ip[3]);
4051 		calc_y(tmp_y_l, rule->tuples.src_ip[3],
4052 		       rule->tuples_mask.src_ip[3]);
4053 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4054 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4055 
4056 		return true;
4057 	case BIT(INNER_DST_IP):
4058 		calc_x(tmp_x_l, rule->tuples.dst_ip[3],
4059 		       rule->tuples_mask.dst_ip[3]);
4060 		calc_y(tmp_y_l, rule->tuples.dst_ip[3],
4061 		       rule->tuples_mask.dst_ip[3]);
4062 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4063 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4064 
4065 		return true;
4066 	case BIT(INNER_SRC_PORT):
4067 		calc_x(tmp_x_s, rule->tuples.src_port,
4068 		       rule->tuples_mask.src_port);
4069 		calc_y(tmp_y_s, rule->tuples.src_port,
4070 		       rule->tuples_mask.src_port);
4071 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4072 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4073 
4074 		return true;
4075 	case BIT(INNER_DST_PORT):
4076 		calc_x(tmp_x_s, rule->tuples.dst_port,
4077 		       rule->tuples_mask.dst_port);
4078 		calc_y(tmp_y_s, rule->tuples.dst_port,
4079 		       rule->tuples_mask.dst_port);
4080 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4081 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4082 
4083 		return true;
4084 	default:
4085 		return false;
4086 	}
4087 }
4088 
4089 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4090 				 u8 vf_id, u8 network_port_id)
4091 {
4092 	u32 port_number = 0;
4093 
4094 	if (port_type == HOST_PORT) {
4095 		hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4096 				pf_id);
4097 		hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4098 				vf_id);
4099 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4100 	} else {
4101 		hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4102 				HCLGE_NETWORK_PORT_ID_S, network_port_id);
4103 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4104 	}
4105 
4106 	return port_number;
4107 }
4108 
4109 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4110 				       __le32 *key_x, __le32 *key_y,
4111 				       struct hclge_fd_rule *rule)
4112 {
4113 	u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4114 	u8 cur_pos = 0, tuple_size, shift_bits;
4115 	int i;
4116 
4117 	for (i = 0; i < MAX_META_DATA; i++) {
4118 		tuple_size = meta_data_key_info[i].key_length;
4119 		tuple_bit = key_cfg->meta_data_active & BIT(i);
4120 
4121 		switch (tuple_bit) {
4122 		case BIT(ROCE_TYPE):
4123 			hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4124 			cur_pos += tuple_size;
4125 			break;
4126 		case BIT(DST_VPORT):
4127 			port_number = hclge_get_port_number(HOST_PORT, 0,
4128 							    rule->vf_id, 0);
4129 			hnae3_set_field(meta_data,
4130 					GENMASK(cur_pos + tuple_size, cur_pos),
4131 					cur_pos, port_number);
4132 			cur_pos += tuple_size;
4133 			break;
4134 		default:
4135 			break;
4136 		}
4137 	}
4138 
4139 	calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4140 	calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4141 	shift_bits = sizeof(meta_data) * 8 - cur_pos;
4142 
4143 	*key_x = cpu_to_le32(tmp_x << shift_bits);
4144 	*key_y = cpu_to_le32(tmp_y << shift_bits);
4145 }
4146 
4147 /* A complete key is combined with meta data key and tuple key.
4148  * Meta data key is stored at the MSB region, and tuple key is stored at
4149  * the LSB region, unused bits will be filled 0.
4150  */
4151 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
4152 			    struct hclge_fd_rule *rule)
4153 {
4154 	struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
4155 	u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
4156 	u8 *cur_key_x, *cur_key_y;
4157 	int i, ret, tuple_size;
4158 	u8 meta_data_region;
4159 
4160 	memset(key_x, 0, sizeof(key_x));
4161 	memset(key_y, 0, sizeof(key_y));
4162 	cur_key_x = key_x;
4163 	cur_key_y = key_y;
4164 
4165 	for (i = 0 ; i < MAX_TUPLE; i++) {
4166 		bool tuple_valid;
4167 		u32 check_tuple;
4168 
4169 		tuple_size = tuple_key_info[i].key_length / 8;
4170 		check_tuple = key_cfg->tuple_active & BIT(i);
4171 
4172 		tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
4173 						     cur_key_y, rule);
4174 		if (tuple_valid) {
4175 			cur_key_x += tuple_size;
4176 			cur_key_y += tuple_size;
4177 		}
4178 	}
4179 
4180 	meta_data_region = hdev->fd_cfg.max_key_length / 8 -
4181 			MAX_META_DATA_LENGTH / 8;
4182 
4183 	hclge_fd_convert_meta_data(key_cfg,
4184 				   (__le32 *)(key_x + meta_data_region),
4185 				   (__le32 *)(key_y + meta_data_region),
4186 				   rule);
4187 
4188 	ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
4189 				   true);
4190 	if (ret) {
4191 		dev_err(&hdev->pdev->dev,
4192 			"fd key_y config fail, loc=%d, ret=%d\n",
4193 			rule->queue_id, ret);
4194 		return ret;
4195 	}
4196 
4197 	ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
4198 				   true);
4199 	if (ret)
4200 		dev_err(&hdev->pdev->dev,
4201 			"fd key_x config fail, loc=%d, ret=%d\n",
4202 			rule->queue_id, ret);
4203 	return ret;
4204 }
4205 
4206 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
4207 			       struct hclge_fd_rule *rule)
4208 {
4209 	struct hclge_fd_ad_data ad_data;
4210 
4211 	ad_data.ad_id = rule->location;
4212 
4213 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4214 		ad_data.drop_packet = true;
4215 		ad_data.forward_to_direct_queue = false;
4216 		ad_data.queue_id = 0;
4217 	} else {
4218 		ad_data.drop_packet = false;
4219 		ad_data.forward_to_direct_queue = true;
4220 		ad_data.queue_id = rule->queue_id;
4221 	}
4222 
4223 	ad_data.use_counter = false;
4224 	ad_data.counter_id = 0;
4225 
4226 	ad_data.use_next_stage = false;
4227 	ad_data.next_input_key = 0;
4228 
4229 	ad_data.write_rule_id_to_bd = true;
4230 	ad_data.rule_id = rule->location;
4231 
4232 	return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
4233 }
4234 
4235 static int hclge_fd_check_spec(struct hclge_dev *hdev,
4236 			       struct ethtool_rx_flow_spec *fs, u32 *unused)
4237 {
4238 	struct ethtool_tcpip4_spec *tcp_ip4_spec;
4239 	struct ethtool_usrip4_spec *usr_ip4_spec;
4240 	struct ethtool_tcpip6_spec *tcp_ip6_spec;
4241 	struct ethtool_usrip6_spec *usr_ip6_spec;
4242 	struct ethhdr *ether_spec;
4243 
4244 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4245 		return -EINVAL;
4246 
4247 	if (!(fs->flow_type & hdev->fd_cfg.proto_support))
4248 		return -EOPNOTSUPP;
4249 
4250 	if ((fs->flow_type & FLOW_EXT) &&
4251 	    (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
4252 		dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
4253 		return -EOPNOTSUPP;
4254 	}
4255 
4256 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4257 	case SCTP_V4_FLOW:
4258 	case TCP_V4_FLOW:
4259 	case UDP_V4_FLOW:
4260 		tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
4261 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
4262 
4263 		if (!tcp_ip4_spec->ip4src)
4264 			*unused |= BIT(INNER_SRC_IP);
4265 
4266 		if (!tcp_ip4_spec->ip4dst)
4267 			*unused |= BIT(INNER_DST_IP);
4268 
4269 		if (!tcp_ip4_spec->psrc)
4270 			*unused |= BIT(INNER_SRC_PORT);
4271 
4272 		if (!tcp_ip4_spec->pdst)
4273 			*unused |= BIT(INNER_DST_PORT);
4274 
4275 		if (!tcp_ip4_spec->tos)
4276 			*unused |= BIT(INNER_IP_TOS);
4277 
4278 		break;
4279 	case IP_USER_FLOW:
4280 		usr_ip4_spec = &fs->h_u.usr_ip4_spec;
4281 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4282 			BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4283 
4284 		if (!usr_ip4_spec->ip4src)
4285 			*unused |= BIT(INNER_SRC_IP);
4286 
4287 		if (!usr_ip4_spec->ip4dst)
4288 			*unused |= BIT(INNER_DST_IP);
4289 
4290 		if (!usr_ip4_spec->tos)
4291 			*unused |= BIT(INNER_IP_TOS);
4292 
4293 		if (!usr_ip4_spec->proto)
4294 			*unused |= BIT(INNER_IP_PROTO);
4295 
4296 		if (usr_ip4_spec->l4_4_bytes)
4297 			return -EOPNOTSUPP;
4298 
4299 		if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
4300 			return -EOPNOTSUPP;
4301 
4302 		break;
4303 	case SCTP_V6_FLOW:
4304 	case TCP_V6_FLOW:
4305 	case UDP_V6_FLOW:
4306 		tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
4307 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4308 			BIT(INNER_IP_TOS);
4309 
4310 		if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
4311 		    !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
4312 			*unused |= BIT(INNER_SRC_IP);
4313 
4314 		if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
4315 		    !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
4316 			*unused |= BIT(INNER_DST_IP);
4317 
4318 		if (!tcp_ip6_spec->psrc)
4319 			*unused |= BIT(INNER_SRC_PORT);
4320 
4321 		if (!tcp_ip6_spec->pdst)
4322 			*unused |= BIT(INNER_DST_PORT);
4323 
4324 		if (tcp_ip6_spec->tclass)
4325 			return -EOPNOTSUPP;
4326 
4327 		break;
4328 	case IPV6_USER_FLOW:
4329 		usr_ip6_spec = &fs->h_u.usr_ip6_spec;
4330 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4331 			BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
4332 			BIT(INNER_DST_PORT);
4333 
4334 		if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
4335 		    !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
4336 			*unused |= BIT(INNER_SRC_IP);
4337 
4338 		if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
4339 		    !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
4340 			*unused |= BIT(INNER_DST_IP);
4341 
4342 		if (!usr_ip6_spec->l4_proto)
4343 			*unused |= BIT(INNER_IP_PROTO);
4344 
4345 		if (usr_ip6_spec->tclass)
4346 			return -EOPNOTSUPP;
4347 
4348 		if (usr_ip6_spec->l4_4_bytes)
4349 			return -EOPNOTSUPP;
4350 
4351 		break;
4352 	case ETHER_FLOW:
4353 		ether_spec = &fs->h_u.ether_spec;
4354 		*unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4355 			BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
4356 			BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
4357 
4358 		if (is_zero_ether_addr(ether_spec->h_source))
4359 			*unused |= BIT(INNER_SRC_MAC);
4360 
4361 		if (is_zero_ether_addr(ether_spec->h_dest))
4362 			*unused |= BIT(INNER_DST_MAC);
4363 
4364 		if (!ether_spec->h_proto)
4365 			*unused |= BIT(INNER_ETH_TYPE);
4366 
4367 		break;
4368 	default:
4369 		return -EOPNOTSUPP;
4370 	}
4371 
4372 	if ((fs->flow_type & FLOW_EXT)) {
4373 		if (fs->h_ext.vlan_etype)
4374 			return -EOPNOTSUPP;
4375 		if (!fs->h_ext.vlan_tci)
4376 			*unused |= BIT(INNER_VLAN_TAG_FST);
4377 
4378 		if (fs->m_ext.vlan_tci) {
4379 			if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
4380 				return -EINVAL;
4381 		}
4382 	} else {
4383 		*unused |= BIT(INNER_VLAN_TAG_FST);
4384 	}
4385 
4386 	if (fs->flow_type & FLOW_MAC_EXT) {
4387 		if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
4388 			return -EOPNOTSUPP;
4389 
4390 		if (is_zero_ether_addr(fs->h_ext.h_dest))
4391 			*unused |= BIT(INNER_DST_MAC);
4392 		else
4393 			*unused &= ~(BIT(INNER_DST_MAC));
4394 	}
4395 
4396 	return 0;
4397 }
4398 
4399 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
4400 {
4401 	struct hclge_fd_rule *rule = NULL;
4402 	struct hlist_node *node2;
4403 
4404 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4405 		if (rule->location >= location)
4406 			break;
4407 	}
4408 
4409 	return  rule && rule->location == location;
4410 }
4411 
4412 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
4413 				     struct hclge_fd_rule *new_rule,
4414 				     u16 location,
4415 				     bool is_add)
4416 {
4417 	struct hclge_fd_rule *rule = NULL, *parent = NULL;
4418 	struct hlist_node *node2;
4419 
4420 	if (is_add && !new_rule)
4421 		return -EINVAL;
4422 
4423 	hlist_for_each_entry_safe(rule, node2,
4424 				  &hdev->fd_rule_list, rule_node) {
4425 		if (rule->location >= location)
4426 			break;
4427 		parent = rule;
4428 	}
4429 
4430 	if (rule && rule->location == location) {
4431 		hlist_del(&rule->rule_node);
4432 		kfree(rule);
4433 		hdev->hclge_fd_rule_num--;
4434 
4435 		if (!is_add)
4436 			return 0;
4437 
4438 	} else if (!is_add) {
4439 		dev_err(&hdev->pdev->dev,
4440 			"delete fail, rule %d is inexistent\n",
4441 			location);
4442 		return -EINVAL;
4443 	}
4444 
4445 	INIT_HLIST_NODE(&new_rule->rule_node);
4446 
4447 	if (parent)
4448 		hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
4449 	else
4450 		hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
4451 
4452 	hdev->hclge_fd_rule_num++;
4453 
4454 	return 0;
4455 }
4456 
4457 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
4458 			      struct ethtool_rx_flow_spec *fs,
4459 			      struct hclge_fd_rule *rule)
4460 {
4461 	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
4462 
4463 	switch (flow_type) {
4464 	case SCTP_V4_FLOW:
4465 	case TCP_V4_FLOW:
4466 	case UDP_V4_FLOW:
4467 		rule->tuples.src_ip[3] =
4468 				be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
4469 		rule->tuples_mask.src_ip[3] =
4470 				be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
4471 
4472 		rule->tuples.dst_ip[3] =
4473 				be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
4474 		rule->tuples_mask.dst_ip[3] =
4475 				be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
4476 
4477 		rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
4478 		rule->tuples_mask.src_port =
4479 				be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
4480 
4481 		rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
4482 		rule->tuples_mask.dst_port =
4483 				be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
4484 
4485 		rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
4486 		rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
4487 
4488 		rule->tuples.ether_proto = ETH_P_IP;
4489 		rule->tuples_mask.ether_proto = 0xFFFF;
4490 
4491 		break;
4492 	case IP_USER_FLOW:
4493 		rule->tuples.src_ip[3] =
4494 				be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
4495 		rule->tuples_mask.src_ip[3] =
4496 				be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
4497 
4498 		rule->tuples.dst_ip[3] =
4499 				be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
4500 		rule->tuples_mask.dst_ip[3] =
4501 				be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
4502 
4503 		rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
4504 		rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
4505 
4506 		rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
4507 		rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
4508 
4509 		rule->tuples.ether_proto = ETH_P_IP;
4510 		rule->tuples_mask.ether_proto = 0xFFFF;
4511 
4512 		break;
4513 	case SCTP_V6_FLOW:
4514 	case TCP_V6_FLOW:
4515 	case UDP_V6_FLOW:
4516 		be32_to_cpu_array(rule->tuples.src_ip,
4517 				  fs->h_u.tcp_ip6_spec.ip6src, 4);
4518 		be32_to_cpu_array(rule->tuples_mask.src_ip,
4519 				  fs->m_u.tcp_ip6_spec.ip6src, 4);
4520 
4521 		be32_to_cpu_array(rule->tuples.dst_ip,
4522 				  fs->h_u.tcp_ip6_spec.ip6dst, 4);
4523 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
4524 				  fs->m_u.tcp_ip6_spec.ip6dst, 4);
4525 
4526 		rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
4527 		rule->tuples_mask.src_port =
4528 				be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
4529 
4530 		rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
4531 		rule->tuples_mask.dst_port =
4532 				be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
4533 
4534 		rule->tuples.ether_proto = ETH_P_IPV6;
4535 		rule->tuples_mask.ether_proto = 0xFFFF;
4536 
4537 		break;
4538 	case IPV6_USER_FLOW:
4539 		be32_to_cpu_array(rule->tuples.src_ip,
4540 				  fs->h_u.usr_ip6_spec.ip6src, 4);
4541 		be32_to_cpu_array(rule->tuples_mask.src_ip,
4542 				  fs->m_u.usr_ip6_spec.ip6src, 4);
4543 
4544 		be32_to_cpu_array(rule->tuples.dst_ip,
4545 				  fs->h_u.usr_ip6_spec.ip6dst, 4);
4546 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
4547 				  fs->m_u.usr_ip6_spec.ip6dst, 4);
4548 
4549 		rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
4550 		rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
4551 
4552 		rule->tuples.ether_proto = ETH_P_IPV6;
4553 		rule->tuples_mask.ether_proto = 0xFFFF;
4554 
4555 		break;
4556 	case ETHER_FLOW:
4557 		ether_addr_copy(rule->tuples.src_mac,
4558 				fs->h_u.ether_spec.h_source);
4559 		ether_addr_copy(rule->tuples_mask.src_mac,
4560 				fs->m_u.ether_spec.h_source);
4561 
4562 		ether_addr_copy(rule->tuples.dst_mac,
4563 				fs->h_u.ether_spec.h_dest);
4564 		ether_addr_copy(rule->tuples_mask.dst_mac,
4565 				fs->m_u.ether_spec.h_dest);
4566 
4567 		rule->tuples.ether_proto =
4568 				be16_to_cpu(fs->h_u.ether_spec.h_proto);
4569 		rule->tuples_mask.ether_proto =
4570 				be16_to_cpu(fs->m_u.ether_spec.h_proto);
4571 
4572 		break;
4573 	default:
4574 		return -EOPNOTSUPP;
4575 	}
4576 
4577 	switch (flow_type) {
4578 	case SCTP_V4_FLOW:
4579 	case SCTP_V6_FLOW:
4580 		rule->tuples.ip_proto = IPPROTO_SCTP;
4581 		rule->tuples_mask.ip_proto = 0xFF;
4582 		break;
4583 	case TCP_V4_FLOW:
4584 	case TCP_V6_FLOW:
4585 		rule->tuples.ip_proto = IPPROTO_TCP;
4586 		rule->tuples_mask.ip_proto = 0xFF;
4587 		break;
4588 	case UDP_V4_FLOW:
4589 	case UDP_V6_FLOW:
4590 		rule->tuples.ip_proto = IPPROTO_UDP;
4591 		rule->tuples_mask.ip_proto = 0xFF;
4592 		break;
4593 	default:
4594 		break;
4595 	}
4596 
4597 	if ((fs->flow_type & FLOW_EXT)) {
4598 		rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
4599 		rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
4600 	}
4601 
4602 	if (fs->flow_type & FLOW_MAC_EXT) {
4603 		ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
4604 		ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
4605 	}
4606 
4607 	return 0;
4608 }
4609 
4610 static int hclge_add_fd_entry(struct hnae3_handle *handle,
4611 			      struct ethtool_rxnfc *cmd)
4612 {
4613 	struct hclge_vport *vport = hclge_get_vport(handle);
4614 	struct hclge_dev *hdev = vport->back;
4615 	u16 dst_vport_id = 0, q_index = 0;
4616 	struct ethtool_rx_flow_spec *fs;
4617 	struct hclge_fd_rule *rule;
4618 	u32 unused = 0;
4619 	u8 action;
4620 	int ret;
4621 
4622 	if (!hnae3_dev_fd_supported(hdev))
4623 		return -EOPNOTSUPP;
4624 
4625 	if (!hdev->fd_cfg.fd_en) {
4626 		dev_warn(&hdev->pdev->dev,
4627 			 "Please enable flow director first\n");
4628 		return -EOPNOTSUPP;
4629 	}
4630 
4631 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4632 
4633 	ret = hclge_fd_check_spec(hdev, fs, &unused);
4634 	if (ret) {
4635 		dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
4636 		return ret;
4637 	}
4638 
4639 	if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
4640 		action = HCLGE_FD_ACTION_DROP_PACKET;
4641 	} else {
4642 		u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
4643 		u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
4644 		u16 tqps;
4645 
4646 		dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
4647 		tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
4648 
4649 		if (ring >= tqps) {
4650 			dev_err(&hdev->pdev->dev,
4651 				"Error: queue id (%d) > max tqp num (%d)\n",
4652 				ring, tqps - 1);
4653 			return -EINVAL;
4654 		}
4655 
4656 		if (vf > hdev->num_req_vfs) {
4657 			dev_err(&hdev->pdev->dev,
4658 				"Error: vf id (%d) > max vf num (%d)\n",
4659 				vf, hdev->num_req_vfs);
4660 			return -EINVAL;
4661 		}
4662 
4663 		action = HCLGE_FD_ACTION_ACCEPT_PACKET;
4664 		q_index = ring;
4665 	}
4666 
4667 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
4668 	if (!rule)
4669 		return -ENOMEM;
4670 
4671 	ret = hclge_fd_get_tuple(hdev, fs, rule);
4672 	if (ret)
4673 		goto free_rule;
4674 
4675 	rule->flow_type = fs->flow_type;
4676 
4677 	rule->location = fs->location;
4678 	rule->unused_tuple = unused;
4679 	rule->vf_id = dst_vport_id;
4680 	rule->queue_id = q_index;
4681 	rule->action = action;
4682 
4683 	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
4684 	if (ret)
4685 		goto free_rule;
4686 
4687 	ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
4688 	if (ret)
4689 		goto free_rule;
4690 
4691 	ret = hclge_fd_update_rule_list(hdev, rule, fs->location, true);
4692 	if (ret)
4693 		goto free_rule;
4694 
4695 	return ret;
4696 
4697 free_rule:
4698 	kfree(rule);
4699 	return ret;
4700 }
4701 
4702 static int hclge_del_fd_entry(struct hnae3_handle *handle,
4703 			      struct ethtool_rxnfc *cmd)
4704 {
4705 	struct hclge_vport *vport = hclge_get_vport(handle);
4706 	struct hclge_dev *hdev = vport->back;
4707 	struct ethtool_rx_flow_spec *fs;
4708 	int ret;
4709 
4710 	if (!hnae3_dev_fd_supported(hdev))
4711 		return -EOPNOTSUPP;
4712 
4713 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4714 
4715 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4716 		return -EINVAL;
4717 
4718 	if (!hclge_fd_rule_exist(hdev, fs->location)) {
4719 		dev_err(&hdev->pdev->dev,
4720 			"Delete fail, rule %d is inexistent\n",
4721 			fs->location);
4722 		return -ENOENT;
4723 	}
4724 
4725 	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4726 				   fs->location, NULL, false);
4727 	if (ret)
4728 		return ret;
4729 
4730 	return hclge_fd_update_rule_list(hdev, NULL, fs->location,
4731 					 false);
4732 }
4733 
4734 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
4735 				     bool clear_list)
4736 {
4737 	struct hclge_vport *vport = hclge_get_vport(handle);
4738 	struct hclge_dev *hdev = vport->back;
4739 	struct hclge_fd_rule *rule;
4740 	struct hlist_node *node;
4741 
4742 	if (!hnae3_dev_fd_supported(hdev))
4743 		return;
4744 
4745 	if (clear_list) {
4746 		hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
4747 					  rule_node) {
4748 			hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4749 					     rule->location, NULL, false);
4750 			hlist_del(&rule->rule_node);
4751 			kfree(rule);
4752 			hdev->hclge_fd_rule_num--;
4753 		}
4754 	} else {
4755 		hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
4756 					  rule_node)
4757 			hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4758 					     rule->location, NULL, false);
4759 	}
4760 }
4761 
4762 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
4763 {
4764 	struct hclge_vport *vport = hclge_get_vport(handle);
4765 	struct hclge_dev *hdev = vport->back;
4766 	struct hclge_fd_rule *rule;
4767 	struct hlist_node *node;
4768 	int ret;
4769 
4770 	/* Return ok here, because reset error handling will check this
4771 	 * return value. If error is returned here, the reset process will
4772 	 * fail.
4773 	 */
4774 	if (!hnae3_dev_fd_supported(hdev))
4775 		return 0;
4776 
4777 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
4778 		ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
4779 		if (!ret)
4780 			ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
4781 
4782 		if (ret) {
4783 			dev_warn(&hdev->pdev->dev,
4784 				 "Restore rule %d failed, remove it\n",
4785 				 rule->location);
4786 			hlist_del(&rule->rule_node);
4787 			kfree(rule);
4788 			hdev->hclge_fd_rule_num--;
4789 		}
4790 	}
4791 	return 0;
4792 }
4793 
4794 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
4795 				 struct ethtool_rxnfc *cmd)
4796 {
4797 	struct hclge_vport *vport = hclge_get_vport(handle);
4798 	struct hclge_dev *hdev = vport->back;
4799 
4800 	if (!hnae3_dev_fd_supported(hdev))
4801 		return -EOPNOTSUPP;
4802 
4803 	cmd->rule_cnt = hdev->hclge_fd_rule_num;
4804 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
4805 
4806 	return 0;
4807 }
4808 
4809 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
4810 				  struct ethtool_rxnfc *cmd)
4811 {
4812 	struct hclge_vport *vport = hclge_get_vport(handle);
4813 	struct hclge_fd_rule *rule = NULL;
4814 	struct hclge_dev *hdev = vport->back;
4815 	struct ethtool_rx_flow_spec *fs;
4816 	struct hlist_node *node2;
4817 
4818 	if (!hnae3_dev_fd_supported(hdev))
4819 		return -EOPNOTSUPP;
4820 
4821 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4822 
4823 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4824 		if (rule->location >= fs->location)
4825 			break;
4826 	}
4827 
4828 	if (!rule || fs->location != rule->location)
4829 		return -ENOENT;
4830 
4831 	fs->flow_type = rule->flow_type;
4832 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4833 	case SCTP_V4_FLOW:
4834 	case TCP_V4_FLOW:
4835 	case UDP_V4_FLOW:
4836 		fs->h_u.tcp_ip4_spec.ip4src =
4837 				cpu_to_be32(rule->tuples.src_ip[3]);
4838 		fs->m_u.tcp_ip4_spec.ip4src =
4839 				rule->unused_tuple & BIT(INNER_SRC_IP) ?
4840 				0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
4841 
4842 		fs->h_u.tcp_ip4_spec.ip4dst =
4843 				cpu_to_be32(rule->tuples.dst_ip[3]);
4844 		fs->m_u.tcp_ip4_spec.ip4dst =
4845 				rule->unused_tuple & BIT(INNER_DST_IP) ?
4846 				0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
4847 
4848 		fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
4849 		fs->m_u.tcp_ip4_spec.psrc =
4850 				rule->unused_tuple & BIT(INNER_SRC_PORT) ?
4851 				0 : cpu_to_be16(rule->tuples_mask.src_port);
4852 
4853 		fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
4854 		fs->m_u.tcp_ip4_spec.pdst =
4855 				rule->unused_tuple & BIT(INNER_DST_PORT) ?
4856 				0 : cpu_to_be16(rule->tuples_mask.dst_port);
4857 
4858 		fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
4859 		fs->m_u.tcp_ip4_spec.tos =
4860 				rule->unused_tuple & BIT(INNER_IP_TOS) ?
4861 				0 : rule->tuples_mask.ip_tos;
4862 
4863 		break;
4864 	case IP_USER_FLOW:
4865 		fs->h_u.usr_ip4_spec.ip4src =
4866 				cpu_to_be32(rule->tuples.src_ip[3]);
4867 		fs->m_u.tcp_ip4_spec.ip4src =
4868 				rule->unused_tuple & BIT(INNER_SRC_IP) ?
4869 				0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
4870 
4871 		fs->h_u.usr_ip4_spec.ip4dst =
4872 				cpu_to_be32(rule->tuples.dst_ip[3]);
4873 		fs->m_u.usr_ip4_spec.ip4dst =
4874 				rule->unused_tuple & BIT(INNER_DST_IP) ?
4875 				0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
4876 
4877 		fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
4878 		fs->m_u.usr_ip4_spec.tos =
4879 				rule->unused_tuple & BIT(INNER_IP_TOS) ?
4880 				0 : rule->tuples_mask.ip_tos;
4881 
4882 		fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
4883 		fs->m_u.usr_ip4_spec.proto =
4884 				rule->unused_tuple & BIT(INNER_IP_PROTO) ?
4885 				0 : rule->tuples_mask.ip_proto;
4886 
4887 		fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
4888 
4889 		break;
4890 	case SCTP_V6_FLOW:
4891 	case TCP_V6_FLOW:
4892 	case UDP_V6_FLOW:
4893 		cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
4894 				  rule->tuples.src_ip, 4);
4895 		if (rule->unused_tuple & BIT(INNER_SRC_IP))
4896 			memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4);
4897 		else
4898 			cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
4899 					  rule->tuples_mask.src_ip, 4);
4900 
4901 		cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
4902 				  rule->tuples.dst_ip, 4);
4903 		if (rule->unused_tuple & BIT(INNER_DST_IP))
4904 			memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4);
4905 		else
4906 			cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
4907 					  rule->tuples_mask.dst_ip, 4);
4908 
4909 		fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
4910 		fs->m_u.tcp_ip6_spec.psrc =
4911 				rule->unused_tuple & BIT(INNER_SRC_PORT) ?
4912 				0 : cpu_to_be16(rule->tuples_mask.src_port);
4913 
4914 		fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
4915 		fs->m_u.tcp_ip6_spec.pdst =
4916 				rule->unused_tuple & BIT(INNER_DST_PORT) ?
4917 				0 : cpu_to_be16(rule->tuples_mask.dst_port);
4918 
4919 		break;
4920 	case IPV6_USER_FLOW:
4921 		cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
4922 				  rule->tuples.src_ip, 4);
4923 		if (rule->unused_tuple & BIT(INNER_SRC_IP))
4924 			memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4);
4925 		else
4926 			cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
4927 					  rule->tuples_mask.src_ip, 4);
4928 
4929 		cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
4930 				  rule->tuples.dst_ip, 4);
4931 		if (rule->unused_tuple & BIT(INNER_DST_IP))
4932 			memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4);
4933 		else
4934 			cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
4935 					  rule->tuples_mask.dst_ip, 4);
4936 
4937 		fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
4938 		fs->m_u.usr_ip6_spec.l4_proto =
4939 				rule->unused_tuple & BIT(INNER_IP_PROTO) ?
4940 				0 : rule->tuples_mask.ip_proto;
4941 
4942 		break;
4943 	case ETHER_FLOW:
4944 		ether_addr_copy(fs->h_u.ether_spec.h_source,
4945 				rule->tuples.src_mac);
4946 		if (rule->unused_tuple & BIT(INNER_SRC_MAC))
4947 			eth_zero_addr(fs->m_u.ether_spec.h_source);
4948 		else
4949 			ether_addr_copy(fs->m_u.ether_spec.h_source,
4950 					rule->tuples_mask.src_mac);
4951 
4952 		ether_addr_copy(fs->h_u.ether_spec.h_dest,
4953 				rule->tuples.dst_mac);
4954 		if (rule->unused_tuple & BIT(INNER_DST_MAC))
4955 			eth_zero_addr(fs->m_u.ether_spec.h_dest);
4956 		else
4957 			ether_addr_copy(fs->m_u.ether_spec.h_dest,
4958 					rule->tuples_mask.dst_mac);
4959 
4960 		fs->h_u.ether_spec.h_proto =
4961 				cpu_to_be16(rule->tuples.ether_proto);
4962 		fs->m_u.ether_spec.h_proto =
4963 				rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
4964 				0 : cpu_to_be16(rule->tuples_mask.ether_proto);
4965 
4966 		break;
4967 	default:
4968 		return -EOPNOTSUPP;
4969 	}
4970 
4971 	if (fs->flow_type & FLOW_EXT) {
4972 		fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
4973 		fs->m_ext.vlan_tci =
4974 				rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
4975 				cpu_to_be16(VLAN_VID_MASK) :
4976 				cpu_to_be16(rule->tuples_mask.vlan_tag1);
4977 	}
4978 
4979 	if (fs->flow_type & FLOW_MAC_EXT) {
4980 		ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
4981 		if (rule->unused_tuple & BIT(INNER_DST_MAC))
4982 			eth_zero_addr(fs->m_u.ether_spec.h_dest);
4983 		else
4984 			ether_addr_copy(fs->m_u.ether_spec.h_dest,
4985 					rule->tuples_mask.dst_mac);
4986 	}
4987 
4988 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4989 		fs->ring_cookie = RX_CLS_FLOW_DISC;
4990 	} else {
4991 		u64 vf_id;
4992 
4993 		fs->ring_cookie = rule->queue_id;
4994 		vf_id = rule->vf_id;
4995 		vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
4996 		fs->ring_cookie |= vf_id;
4997 	}
4998 
4999 	return 0;
5000 }
5001 
5002 static int hclge_get_all_rules(struct hnae3_handle *handle,
5003 			       struct ethtool_rxnfc *cmd, u32 *rule_locs)
5004 {
5005 	struct hclge_vport *vport = hclge_get_vport(handle);
5006 	struct hclge_dev *hdev = vport->back;
5007 	struct hclge_fd_rule *rule;
5008 	struct hlist_node *node2;
5009 	int cnt = 0;
5010 
5011 	if (!hnae3_dev_fd_supported(hdev))
5012 		return -EOPNOTSUPP;
5013 
5014 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5015 
5016 	hlist_for_each_entry_safe(rule, node2,
5017 				  &hdev->fd_rule_list, rule_node) {
5018 		if (cnt == cmd->rule_cnt)
5019 			return -EMSGSIZE;
5020 
5021 		rule_locs[cnt] = rule->location;
5022 		cnt++;
5023 	}
5024 
5025 	cmd->rule_cnt = cnt;
5026 
5027 	return 0;
5028 }
5029 
5030 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
5031 {
5032 	struct hclge_vport *vport = hclge_get_vport(handle);
5033 	struct hclge_dev *hdev = vport->back;
5034 
5035 	return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
5036 	       hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
5037 }
5038 
5039 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
5040 {
5041 	struct hclge_vport *vport = hclge_get_vport(handle);
5042 	struct hclge_dev *hdev = vport->back;
5043 
5044 	return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
5045 }
5046 
5047 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
5048 {
5049 	struct hclge_vport *vport = hclge_get_vport(handle);
5050 	struct hclge_dev *hdev = vport->back;
5051 
5052 	return hdev->reset_count;
5053 }
5054 
5055 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
5056 {
5057 	struct hclge_vport *vport = hclge_get_vport(handle);
5058 	struct hclge_dev *hdev = vport->back;
5059 
5060 	hdev->fd_cfg.fd_en = enable;
5061 	if (!enable)
5062 		hclge_del_all_fd_entries(handle, false);
5063 	else
5064 		hclge_restore_fd_entries(handle);
5065 }
5066 
5067 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
5068 {
5069 	struct hclge_desc desc;
5070 	struct hclge_config_mac_mode_cmd *req =
5071 		(struct hclge_config_mac_mode_cmd *)desc.data;
5072 	u32 loop_en = 0;
5073 	int ret;
5074 
5075 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
5076 	hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
5077 	hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
5078 	hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
5079 	hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
5080 	hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
5081 	hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
5082 	hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
5083 	hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
5084 	hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
5085 	hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
5086 	hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
5087 	hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
5088 	hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
5089 	hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
5090 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5091 
5092 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5093 	if (ret)
5094 		dev_err(&hdev->pdev->dev,
5095 			"mac enable fail, ret =%d.\n", ret);
5096 }
5097 
5098 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
5099 {
5100 	struct hclge_config_mac_mode_cmd *req;
5101 	struct hclge_desc desc;
5102 	u32 loop_en;
5103 	int ret;
5104 
5105 	req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
5106 	/* 1 Read out the MAC mode config at first */
5107 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
5108 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5109 	if (ret) {
5110 		dev_err(&hdev->pdev->dev,
5111 			"mac loopback get fail, ret =%d.\n", ret);
5112 		return ret;
5113 	}
5114 
5115 	/* 2 Then setup the loopback flag */
5116 	loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
5117 	hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
5118 	hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
5119 	hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
5120 
5121 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5122 
5123 	/* 3 Config mac work mode with loopback flag
5124 	 * and its original configure parameters
5125 	 */
5126 	hclge_cmd_reuse_desc(&desc, false);
5127 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5128 	if (ret)
5129 		dev_err(&hdev->pdev->dev,
5130 			"mac loopback set fail, ret =%d.\n", ret);
5131 	return ret;
5132 }
5133 
5134 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
5135 				     enum hnae3_loop loop_mode)
5136 {
5137 #define HCLGE_SERDES_RETRY_MS	10
5138 #define HCLGE_SERDES_RETRY_NUM	100
5139 	struct hclge_serdes_lb_cmd *req;
5140 	struct hclge_desc desc;
5141 	int ret, i = 0;
5142 	u8 loop_mode_b;
5143 
5144 	req = (struct hclge_serdes_lb_cmd *)desc.data;
5145 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
5146 
5147 	switch (loop_mode) {
5148 	case HNAE3_LOOP_SERIAL_SERDES:
5149 		loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
5150 		break;
5151 	case HNAE3_LOOP_PARALLEL_SERDES:
5152 		loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
5153 		break;
5154 	default:
5155 		dev_err(&hdev->pdev->dev,
5156 			"unsupported serdes loopback mode %d\n", loop_mode);
5157 		return -ENOTSUPP;
5158 	}
5159 
5160 	if (en) {
5161 		req->enable = loop_mode_b;
5162 		req->mask = loop_mode_b;
5163 	} else {
5164 		req->mask = loop_mode_b;
5165 	}
5166 
5167 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5168 	if (ret) {
5169 		dev_err(&hdev->pdev->dev,
5170 			"serdes loopback set fail, ret = %d\n", ret);
5171 		return ret;
5172 	}
5173 
5174 	do {
5175 		msleep(HCLGE_SERDES_RETRY_MS);
5176 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
5177 					   true);
5178 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5179 		if (ret) {
5180 			dev_err(&hdev->pdev->dev,
5181 				"serdes loopback get, ret = %d\n", ret);
5182 			return ret;
5183 		}
5184 	} while (++i < HCLGE_SERDES_RETRY_NUM &&
5185 		 !(req->result & HCLGE_CMD_SERDES_DONE_B));
5186 
5187 	if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
5188 		dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
5189 		return -EBUSY;
5190 	} else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
5191 		dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
5192 		return -EIO;
5193 	}
5194 
5195 	hclge_cfg_mac_mode(hdev, en);
5196 	return 0;
5197 }
5198 
5199 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
5200 			    int stream_id, bool enable)
5201 {
5202 	struct hclge_desc desc;
5203 	struct hclge_cfg_com_tqp_queue_cmd *req =
5204 		(struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
5205 	int ret;
5206 
5207 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
5208 	req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
5209 	req->stream_id = cpu_to_le16(stream_id);
5210 	req->enable |= enable << HCLGE_TQP_ENABLE_B;
5211 
5212 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5213 	if (ret)
5214 		dev_err(&hdev->pdev->dev,
5215 			"Tqp enable fail, status =%d.\n", ret);
5216 	return ret;
5217 }
5218 
5219 static int hclge_set_loopback(struct hnae3_handle *handle,
5220 			      enum hnae3_loop loop_mode, bool en)
5221 {
5222 	struct hclge_vport *vport = hclge_get_vport(handle);
5223 	struct hclge_dev *hdev = vport->back;
5224 	int i, ret;
5225 
5226 	switch (loop_mode) {
5227 	case HNAE3_LOOP_APP:
5228 		ret = hclge_set_app_loopback(hdev, en);
5229 		break;
5230 	case HNAE3_LOOP_SERIAL_SERDES:
5231 	case HNAE3_LOOP_PARALLEL_SERDES:
5232 		ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
5233 		break;
5234 	default:
5235 		ret = -ENOTSUPP;
5236 		dev_err(&hdev->pdev->dev,
5237 			"loop_mode %d is not supported\n", loop_mode);
5238 		break;
5239 	}
5240 
5241 	for (i = 0; i < vport->alloc_tqps; i++) {
5242 		ret = hclge_tqp_enable(hdev, i, 0, en);
5243 		if (ret)
5244 			return ret;
5245 	}
5246 
5247 	return 0;
5248 }
5249 
5250 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
5251 {
5252 	struct hclge_vport *vport = hclge_get_vport(handle);
5253 	struct hnae3_queue *queue;
5254 	struct hclge_tqp *tqp;
5255 	int i;
5256 
5257 	for (i = 0; i < vport->alloc_tqps; i++) {
5258 		queue = handle->kinfo.tqp[i];
5259 		tqp = container_of(queue, struct hclge_tqp, q);
5260 		memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
5261 	}
5262 }
5263 
5264 static int hclge_ae_start(struct hnae3_handle *handle)
5265 {
5266 	struct hclge_vport *vport = hclge_get_vport(handle);
5267 	struct hclge_dev *hdev = vport->back;
5268 
5269 	/* mac enable */
5270 	hclge_cfg_mac_mode(hdev, true);
5271 	clear_bit(HCLGE_STATE_DOWN, &hdev->state);
5272 	mod_timer(&hdev->service_timer, jiffies + HZ);
5273 	hdev->hw.mac.link = 0;
5274 
5275 	/* reset tqp stats */
5276 	hclge_reset_tqp_stats(handle);
5277 
5278 	hclge_mac_start_phy(hdev);
5279 
5280 	return 0;
5281 }
5282 
5283 static void hclge_ae_stop(struct hnae3_handle *handle)
5284 {
5285 	struct hclge_vport *vport = hclge_get_vport(handle);
5286 	struct hclge_dev *hdev = vport->back;
5287 
5288 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
5289 
5290 	del_timer_sync(&hdev->service_timer);
5291 	cancel_work_sync(&hdev->service_task);
5292 	clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
5293 
5294 	/* If it is not PF reset, the firmware will disable the MAC,
5295 	 * so it only need to stop phy here.
5296 	 */
5297 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
5298 	    hdev->reset_type != HNAE3_FUNC_RESET) {
5299 		hclge_mac_stop_phy(hdev);
5300 		return;
5301 	}
5302 
5303 	/* Mac disable */
5304 	hclge_cfg_mac_mode(hdev, false);
5305 
5306 	hclge_mac_stop_phy(hdev);
5307 
5308 	/* reset tqp stats */
5309 	hclge_reset_tqp_stats(handle);
5310 	del_timer_sync(&hdev->service_timer);
5311 	cancel_work_sync(&hdev->service_task);
5312 	hclge_update_link_status(hdev);
5313 }
5314 
5315 int hclge_vport_start(struct hclge_vport *vport)
5316 {
5317 	set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
5318 	vport->last_active_jiffies = jiffies;
5319 	return 0;
5320 }
5321 
5322 void hclge_vport_stop(struct hclge_vport *vport)
5323 {
5324 	clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
5325 }
5326 
5327 static int hclge_client_start(struct hnae3_handle *handle)
5328 {
5329 	struct hclge_vport *vport = hclge_get_vport(handle);
5330 
5331 	return hclge_vport_start(vport);
5332 }
5333 
5334 static void hclge_client_stop(struct hnae3_handle *handle)
5335 {
5336 	struct hclge_vport *vport = hclge_get_vport(handle);
5337 
5338 	hclge_vport_stop(vport);
5339 }
5340 
5341 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
5342 					 u16 cmdq_resp, u8  resp_code,
5343 					 enum hclge_mac_vlan_tbl_opcode op)
5344 {
5345 	struct hclge_dev *hdev = vport->back;
5346 	int return_status = -EIO;
5347 
5348 	if (cmdq_resp) {
5349 		dev_err(&hdev->pdev->dev,
5350 			"cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
5351 			cmdq_resp);
5352 		return -EIO;
5353 	}
5354 
5355 	if (op == HCLGE_MAC_VLAN_ADD) {
5356 		if ((!resp_code) || (resp_code == 1)) {
5357 			return_status = 0;
5358 		} else if (resp_code == 2) {
5359 			return_status = -ENOSPC;
5360 			dev_err(&hdev->pdev->dev,
5361 				"add mac addr failed for uc_overflow.\n");
5362 		} else if (resp_code == 3) {
5363 			return_status = -ENOSPC;
5364 			dev_err(&hdev->pdev->dev,
5365 				"add mac addr failed for mc_overflow.\n");
5366 		} else {
5367 			dev_err(&hdev->pdev->dev,
5368 				"add mac addr failed for undefined, code=%d.\n",
5369 				resp_code);
5370 		}
5371 	} else if (op == HCLGE_MAC_VLAN_REMOVE) {
5372 		if (!resp_code) {
5373 			return_status = 0;
5374 		} else if (resp_code == 1) {
5375 			return_status = -ENOENT;
5376 			dev_dbg(&hdev->pdev->dev,
5377 				"remove mac addr failed for miss.\n");
5378 		} else {
5379 			dev_err(&hdev->pdev->dev,
5380 				"remove mac addr failed for undefined, code=%d.\n",
5381 				resp_code);
5382 		}
5383 	} else if (op == HCLGE_MAC_VLAN_LKUP) {
5384 		if (!resp_code) {
5385 			return_status = 0;
5386 		} else if (resp_code == 1) {
5387 			return_status = -ENOENT;
5388 			dev_dbg(&hdev->pdev->dev,
5389 				"lookup mac addr failed for miss.\n");
5390 		} else {
5391 			dev_err(&hdev->pdev->dev,
5392 				"lookup mac addr failed for undefined, code=%d.\n",
5393 				resp_code);
5394 		}
5395 	} else {
5396 		return_status = -EINVAL;
5397 		dev_err(&hdev->pdev->dev,
5398 			"unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
5399 			op);
5400 	}
5401 
5402 	return return_status;
5403 }
5404 
5405 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
5406 {
5407 	int word_num;
5408 	int bit_num;
5409 
5410 	if (vfid > 255 || vfid < 0)
5411 		return -EIO;
5412 
5413 	if (vfid >= 0 && vfid <= 191) {
5414 		word_num = vfid / 32;
5415 		bit_num  = vfid % 32;
5416 		if (clr)
5417 			desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
5418 		else
5419 			desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
5420 	} else {
5421 		word_num = (vfid - 192) / 32;
5422 		bit_num  = vfid % 32;
5423 		if (clr)
5424 			desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
5425 		else
5426 			desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
5427 	}
5428 
5429 	return 0;
5430 }
5431 
5432 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
5433 {
5434 #define HCLGE_DESC_NUMBER 3
5435 #define HCLGE_FUNC_NUMBER_PER_DESC 6
5436 	int i, j;
5437 
5438 	for (i = 1; i < HCLGE_DESC_NUMBER; i++)
5439 		for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
5440 			if (desc[i].data[j])
5441 				return false;
5442 
5443 	return true;
5444 }
5445 
5446 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
5447 				   const u8 *addr)
5448 {
5449 	const unsigned char *mac_addr = addr;
5450 	u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
5451 		       (mac_addr[0]) | (mac_addr[1] << 8);
5452 	u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
5453 
5454 	new_req->mac_addr_hi32 = cpu_to_le32(high_val);
5455 	new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
5456 }
5457 
5458 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
5459 				     struct hclge_mac_vlan_tbl_entry_cmd *req)
5460 {
5461 	struct hclge_dev *hdev = vport->back;
5462 	struct hclge_desc desc;
5463 	u8 resp_code;
5464 	u16 retval;
5465 	int ret;
5466 
5467 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
5468 
5469 	memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5470 
5471 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5472 	if (ret) {
5473 		dev_err(&hdev->pdev->dev,
5474 			"del mac addr failed for cmd_send, ret =%d.\n",
5475 			ret);
5476 		return ret;
5477 	}
5478 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5479 	retval = le16_to_cpu(desc.retval);
5480 
5481 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
5482 					     HCLGE_MAC_VLAN_REMOVE);
5483 }
5484 
5485 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
5486 				     struct hclge_mac_vlan_tbl_entry_cmd *req,
5487 				     struct hclge_desc *desc,
5488 				     bool is_mc)
5489 {
5490 	struct hclge_dev *hdev = vport->back;
5491 	u8 resp_code;
5492 	u16 retval;
5493 	int ret;
5494 
5495 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
5496 	if (is_mc) {
5497 		desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5498 		memcpy(desc[0].data,
5499 		       req,
5500 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5501 		hclge_cmd_setup_basic_desc(&desc[1],
5502 					   HCLGE_OPC_MAC_VLAN_ADD,
5503 					   true);
5504 		desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5505 		hclge_cmd_setup_basic_desc(&desc[2],
5506 					   HCLGE_OPC_MAC_VLAN_ADD,
5507 					   true);
5508 		ret = hclge_cmd_send(&hdev->hw, desc, 3);
5509 	} else {
5510 		memcpy(desc[0].data,
5511 		       req,
5512 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5513 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
5514 	}
5515 	if (ret) {
5516 		dev_err(&hdev->pdev->dev,
5517 			"lookup mac addr failed for cmd_send, ret =%d.\n",
5518 			ret);
5519 		return ret;
5520 	}
5521 	resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
5522 	retval = le16_to_cpu(desc[0].retval);
5523 
5524 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
5525 					     HCLGE_MAC_VLAN_LKUP);
5526 }
5527 
5528 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
5529 				  struct hclge_mac_vlan_tbl_entry_cmd *req,
5530 				  struct hclge_desc *mc_desc)
5531 {
5532 	struct hclge_dev *hdev = vport->back;
5533 	int cfg_status;
5534 	u8 resp_code;
5535 	u16 retval;
5536 	int ret;
5537 
5538 	if (!mc_desc) {
5539 		struct hclge_desc desc;
5540 
5541 		hclge_cmd_setup_basic_desc(&desc,
5542 					   HCLGE_OPC_MAC_VLAN_ADD,
5543 					   false);
5544 		memcpy(desc.data, req,
5545 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5546 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5547 		resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5548 		retval = le16_to_cpu(desc.retval);
5549 
5550 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
5551 							   resp_code,
5552 							   HCLGE_MAC_VLAN_ADD);
5553 	} else {
5554 		hclge_cmd_reuse_desc(&mc_desc[0], false);
5555 		mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5556 		hclge_cmd_reuse_desc(&mc_desc[1], false);
5557 		mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5558 		hclge_cmd_reuse_desc(&mc_desc[2], false);
5559 		mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
5560 		memcpy(mc_desc[0].data, req,
5561 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5562 		ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
5563 		resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
5564 		retval = le16_to_cpu(mc_desc[0].retval);
5565 
5566 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
5567 							   resp_code,
5568 							   HCLGE_MAC_VLAN_ADD);
5569 	}
5570 
5571 	if (ret) {
5572 		dev_err(&hdev->pdev->dev,
5573 			"add mac addr failed for cmd_send, ret =%d.\n",
5574 			ret);
5575 		return ret;
5576 	}
5577 
5578 	return cfg_status;
5579 }
5580 
5581 static int hclge_init_umv_space(struct hclge_dev *hdev)
5582 {
5583 	u16 allocated_size = 0;
5584 	int ret;
5585 
5586 	ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
5587 				  true);
5588 	if (ret)
5589 		return ret;
5590 
5591 	if (allocated_size < hdev->wanted_umv_size)
5592 		dev_warn(&hdev->pdev->dev,
5593 			 "Alloc umv space failed, want %d, get %d\n",
5594 			 hdev->wanted_umv_size, allocated_size);
5595 
5596 	mutex_init(&hdev->umv_mutex);
5597 	hdev->max_umv_size = allocated_size;
5598 	hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
5599 	hdev->share_umv_size = hdev->priv_umv_size +
5600 			hdev->max_umv_size % (hdev->num_req_vfs + 2);
5601 
5602 	return 0;
5603 }
5604 
5605 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
5606 {
5607 	int ret;
5608 
5609 	if (hdev->max_umv_size > 0) {
5610 		ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
5611 					  false);
5612 		if (ret)
5613 			return ret;
5614 		hdev->max_umv_size = 0;
5615 	}
5616 	mutex_destroy(&hdev->umv_mutex);
5617 
5618 	return 0;
5619 }
5620 
5621 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
5622 			       u16 *allocated_size, bool is_alloc)
5623 {
5624 	struct hclge_umv_spc_alc_cmd *req;
5625 	struct hclge_desc desc;
5626 	int ret;
5627 
5628 	req = (struct hclge_umv_spc_alc_cmd *)desc.data;
5629 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
5630 	hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc);
5631 	req->space_size = cpu_to_le32(space_size);
5632 
5633 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5634 	if (ret) {
5635 		dev_err(&hdev->pdev->dev,
5636 			"%s umv space failed for cmd_send, ret =%d\n",
5637 			is_alloc ? "allocate" : "free", ret);
5638 		return ret;
5639 	}
5640 
5641 	if (is_alloc && allocated_size)
5642 		*allocated_size = le32_to_cpu(desc.data[1]);
5643 
5644 	return 0;
5645 }
5646 
5647 static void hclge_reset_umv_space(struct hclge_dev *hdev)
5648 {
5649 	struct hclge_vport *vport;
5650 	int i;
5651 
5652 	for (i = 0; i < hdev->num_alloc_vport; i++) {
5653 		vport = &hdev->vport[i];
5654 		vport->used_umv_num = 0;
5655 	}
5656 
5657 	mutex_lock(&hdev->umv_mutex);
5658 	hdev->share_umv_size = hdev->priv_umv_size +
5659 			hdev->max_umv_size % (hdev->num_req_vfs + 2);
5660 	mutex_unlock(&hdev->umv_mutex);
5661 }
5662 
5663 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
5664 {
5665 	struct hclge_dev *hdev = vport->back;
5666 	bool is_full;
5667 
5668 	mutex_lock(&hdev->umv_mutex);
5669 	is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
5670 		   hdev->share_umv_size == 0);
5671 	mutex_unlock(&hdev->umv_mutex);
5672 
5673 	return is_full;
5674 }
5675 
5676 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
5677 {
5678 	struct hclge_dev *hdev = vport->back;
5679 
5680 	mutex_lock(&hdev->umv_mutex);
5681 	if (is_free) {
5682 		if (vport->used_umv_num > hdev->priv_umv_size)
5683 			hdev->share_umv_size++;
5684 		vport->used_umv_num--;
5685 	} else {
5686 		if (vport->used_umv_num >= hdev->priv_umv_size)
5687 			hdev->share_umv_size--;
5688 		vport->used_umv_num++;
5689 	}
5690 	mutex_unlock(&hdev->umv_mutex);
5691 }
5692 
5693 static int hclge_add_uc_addr(struct hnae3_handle *handle,
5694 			     const unsigned char *addr)
5695 {
5696 	struct hclge_vport *vport = hclge_get_vport(handle);
5697 
5698 	return hclge_add_uc_addr_common(vport, addr);
5699 }
5700 
5701 int hclge_add_uc_addr_common(struct hclge_vport *vport,
5702 			     const unsigned char *addr)
5703 {
5704 	struct hclge_dev *hdev = vport->back;
5705 	struct hclge_mac_vlan_tbl_entry_cmd req;
5706 	struct hclge_desc desc;
5707 	u16 egress_port = 0;
5708 	int ret;
5709 
5710 	/* mac addr check */
5711 	if (is_zero_ether_addr(addr) ||
5712 	    is_broadcast_ether_addr(addr) ||
5713 	    is_multicast_ether_addr(addr)) {
5714 		dev_err(&hdev->pdev->dev,
5715 			"Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
5716 			 addr,
5717 			 is_zero_ether_addr(addr),
5718 			 is_broadcast_ether_addr(addr),
5719 			 is_multicast_ether_addr(addr));
5720 		return -EINVAL;
5721 	}
5722 
5723 	memset(&req, 0, sizeof(req));
5724 	hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5725 
5726 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
5727 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
5728 
5729 	req.egress_port = cpu_to_le16(egress_port);
5730 
5731 	hclge_prepare_mac_addr(&req, addr);
5732 
5733 	/* Lookup the mac address in the mac_vlan table, and add
5734 	 * it if the entry is inexistent. Repeated unicast entry
5735 	 * is not allowed in the mac vlan table.
5736 	 */
5737 	ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
5738 	if (ret == -ENOENT) {
5739 		if (!hclge_is_umv_space_full(vport)) {
5740 			ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
5741 			if (!ret)
5742 				hclge_update_umv_space(vport, false);
5743 			return ret;
5744 		}
5745 
5746 		dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
5747 			hdev->priv_umv_size);
5748 
5749 		return -ENOSPC;
5750 	}
5751 
5752 	/* check if we just hit the duplicate */
5753 	if (!ret)
5754 		ret = -EINVAL;
5755 
5756 	dev_err(&hdev->pdev->dev,
5757 		"PF failed to add unicast entry(%pM) in the MAC table\n",
5758 		addr);
5759 
5760 	return ret;
5761 }
5762 
5763 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
5764 			    const unsigned char *addr)
5765 {
5766 	struct hclge_vport *vport = hclge_get_vport(handle);
5767 
5768 	return hclge_rm_uc_addr_common(vport, addr);
5769 }
5770 
5771 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
5772 			    const unsigned char *addr)
5773 {
5774 	struct hclge_dev *hdev = vport->back;
5775 	struct hclge_mac_vlan_tbl_entry_cmd req;
5776 	int ret;
5777 
5778 	/* mac addr check */
5779 	if (is_zero_ether_addr(addr) ||
5780 	    is_broadcast_ether_addr(addr) ||
5781 	    is_multicast_ether_addr(addr)) {
5782 		dev_dbg(&hdev->pdev->dev,
5783 			"Remove mac err! invalid mac:%pM.\n",
5784 			 addr);
5785 		return -EINVAL;
5786 	}
5787 
5788 	memset(&req, 0, sizeof(req));
5789 	hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5790 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
5791 	hclge_prepare_mac_addr(&req, addr);
5792 	ret = hclge_remove_mac_vlan_tbl(vport, &req);
5793 	if (!ret)
5794 		hclge_update_umv_space(vport, true);
5795 
5796 	return ret;
5797 }
5798 
5799 static int hclge_add_mc_addr(struct hnae3_handle *handle,
5800 			     const unsigned char *addr)
5801 {
5802 	struct hclge_vport *vport = hclge_get_vport(handle);
5803 
5804 	return hclge_add_mc_addr_common(vport, addr);
5805 }
5806 
5807 int hclge_add_mc_addr_common(struct hclge_vport *vport,
5808 			     const unsigned char *addr)
5809 {
5810 	struct hclge_dev *hdev = vport->back;
5811 	struct hclge_mac_vlan_tbl_entry_cmd req;
5812 	struct hclge_desc desc[3];
5813 	int status;
5814 
5815 	/* mac addr check */
5816 	if (!is_multicast_ether_addr(addr)) {
5817 		dev_err(&hdev->pdev->dev,
5818 			"Add mc mac err! invalid mac:%pM.\n",
5819 			 addr);
5820 		return -EINVAL;
5821 	}
5822 	memset(&req, 0, sizeof(req));
5823 	hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5824 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
5825 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
5826 	hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5827 	hclge_prepare_mac_addr(&req, addr);
5828 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
5829 	if (!status) {
5830 		/* This mac addr exist, update VFID for it */
5831 		hclge_update_desc_vfid(desc, vport->vport_id, false);
5832 		status = hclge_add_mac_vlan_tbl(vport, &req, desc);
5833 	} else {
5834 		/* This mac addr do not exist, add new entry for it */
5835 		memset(desc[0].data, 0, sizeof(desc[0].data));
5836 		memset(desc[1].data, 0, sizeof(desc[0].data));
5837 		memset(desc[2].data, 0, sizeof(desc[0].data));
5838 		hclge_update_desc_vfid(desc, vport->vport_id, false);
5839 		status = hclge_add_mac_vlan_tbl(vport, &req, desc);
5840 	}
5841 
5842 	if (status == -ENOSPC)
5843 		dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
5844 
5845 	return status;
5846 }
5847 
5848 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
5849 			    const unsigned char *addr)
5850 {
5851 	struct hclge_vport *vport = hclge_get_vport(handle);
5852 
5853 	return hclge_rm_mc_addr_common(vport, addr);
5854 }
5855 
5856 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
5857 			    const unsigned char *addr)
5858 {
5859 	struct hclge_dev *hdev = vport->back;
5860 	struct hclge_mac_vlan_tbl_entry_cmd req;
5861 	enum hclge_cmd_status status;
5862 	struct hclge_desc desc[3];
5863 
5864 	/* mac addr check */
5865 	if (!is_multicast_ether_addr(addr)) {
5866 		dev_dbg(&hdev->pdev->dev,
5867 			"Remove mc mac err! invalid mac:%pM.\n",
5868 			 addr);
5869 		return -EINVAL;
5870 	}
5871 
5872 	memset(&req, 0, sizeof(req));
5873 	hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5874 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
5875 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
5876 	hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5877 	hclge_prepare_mac_addr(&req, addr);
5878 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
5879 	if (!status) {
5880 		/* This mac addr exist, remove this handle's VFID for it */
5881 		hclge_update_desc_vfid(desc, vport->vport_id, true);
5882 
5883 		if (hclge_is_all_function_id_zero(desc))
5884 			/* All the vfid is zero, so need to delete this entry */
5885 			status = hclge_remove_mac_vlan_tbl(vport, &req);
5886 		else
5887 			/* Not all the vfid is zero, update the vfid */
5888 			status = hclge_add_mac_vlan_tbl(vport, &req, desc);
5889 
5890 	} else {
5891 		/* Maybe this mac address is in mta table, but it cannot be
5892 		 * deleted here because an entry of mta represents an address
5893 		 * range rather than a specific address. the delete action to
5894 		 * all entries will take effect in update_mta_status called by
5895 		 * hns3_nic_set_rx_mode.
5896 		 */
5897 		status = 0;
5898 	}
5899 
5900 	return status;
5901 }
5902 
5903 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
5904 					      u16 cmdq_resp, u8 resp_code)
5905 {
5906 #define HCLGE_ETHERTYPE_SUCCESS_ADD		0
5907 #define HCLGE_ETHERTYPE_ALREADY_ADD		1
5908 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW	2
5909 #define HCLGE_ETHERTYPE_KEY_CONFLICT		3
5910 
5911 	int return_status;
5912 
5913 	if (cmdq_resp) {
5914 		dev_err(&hdev->pdev->dev,
5915 			"cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
5916 			cmdq_resp);
5917 		return -EIO;
5918 	}
5919 
5920 	switch (resp_code) {
5921 	case HCLGE_ETHERTYPE_SUCCESS_ADD:
5922 	case HCLGE_ETHERTYPE_ALREADY_ADD:
5923 		return_status = 0;
5924 		break;
5925 	case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
5926 		dev_err(&hdev->pdev->dev,
5927 			"add mac ethertype failed for manager table overflow.\n");
5928 		return_status = -EIO;
5929 		break;
5930 	case HCLGE_ETHERTYPE_KEY_CONFLICT:
5931 		dev_err(&hdev->pdev->dev,
5932 			"add mac ethertype failed for key conflict.\n");
5933 		return_status = -EIO;
5934 		break;
5935 	default:
5936 		dev_err(&hdev->pdev->dev,
5937 			"add mac ethertype failed for undefined, code=%d.\n",
5938 			resp_code);
5939 		return_status = -EIO;
5940 	}
5941 
5942 	return return_status;
5943 }
5944 
5945 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
5946 			     const struct hclge_mac_mgr_tbl_entry_cmd *req)
5947 {
5948 	struct hclge_desc desc;
5949 	u8 resp_code;
5950 	u16 retval;
5951 	int ret;
5952 
5953 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
5954 	memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
5955 
5956 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5957 	if (ret) {
5958 		dev_err(&hdev->pdev->dev,
5959 			"add mac ethertype failed for cmd_send, ret =%d.\n",
5960 			ret);
5961 		return ret;
5962 	}
5963 
5964 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5965 	retval = le16_to_cpu(desc.retval);
5966 
5967 	return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
5968 }
5969 
5970 static int init_mgr_tbl(struct hclge_dev *hdev)
5971 {
5972 	int ret;
5973 	int i;
5974 
5975 	for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
5976 		ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
5977 		if (ret) {
5978 			dev_err(&hdev->pdev->dev,
5979 				"add mac ethertype failed, ret =%d.\n",
5980 				ret);
5981 			return ret;
5982 		}
5983 	}
5984 
5985 	return 0;
5986 }
5987 
5988 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
5989 {
5990 	struct hclge_vport *vport = hclge_get_vport(handle);
5991 	struct hclge_dev *hdev = vport->back;
5992 
5993 	ether_addr_copy(p, hdev->hw.mac.mac_addr);
5994 }
5995 
5996 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
5997 			      bool is_first)
5998 {
5999 	const unsigned char *new_addr = (const unsigned char *)p;
6000 	struct hclge_vport *vport = hclge_get_vport(handle);
6001 	struct hclge_dev *hdev = vport->back;
6002 	int ret;
6003 
6004 	/* mac addr check */
6005 	if (is_zero_ether_addr(new_addr) ||
6006 	    is_broadcast_ether_addr(new_addr) ||
6007 	    is_multicast_ether_addr(new_addr)) {
6008 		dev_err(&hdev->pdev->dev,
6009 			"Change uc mac err! invalid mac:%p.\n",
6010 			 new_addr);
6011 		return -EINVAL;
6012 	}
6013 
6014 	if (!is_first && hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
6015 		dev_warn(&hdev->pdev->dev,
6016 			 "remove old uc mac address fail.\n");
6017 
6018 	ret = hclge_add_uc_addr(handle, new_addr);
6019 	if (ret) {
6020 		dev_err(&hdev->pdev->dev,
6021 			"add uc mac address fail, ret =%d.\n",
6022 			ret);
6023 
6024 		if (!is_first &&
6025 		    hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
6026 			dev_err(&hdev->pdev->dev,
6027 				"restore uc mac address fail.\n");
6028 
6029 		return -EIO;
6030 	}
6031 
6032 	ret = hclge_pause_addr_cfg(hdev, new_addr);
6033 	if (ret) {
6034 		dev_err(&hdev->pdev->dev,
6035 			"configure mac pause address fail, ret =%d.\n",
6036 			ret);
6037 		return -EIO;
6038 	}
6039 
6040 	ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
6041 
6042 	return 0;
6043 }
6044 
6045 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
6046 			  int cmd)
6047 {
6048 	struct hclge_vport *vport = hclge_get_vport(handle);
6049 	struct hclge_dev *hdev = vport->back;
6050 
6051 	if (!hdev->hw.mac.phydev)
6052 		return -EOPNOTSUPP;
6053 
6054 	return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
6055 }
6056 
6057 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
6058 				      u8 fe_type, bool filter_en)
6059 {
6060 	struct hclge_vlan_filter_ctrl_cmd *req;
6061 	struct hclge_desc desc;
6062 	int ret;
6063 
6064 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
6065 
6066 	req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
6067 	req->vlan_type = vlan_type;
6068 	req->vlan_fe = filter_en ? fe_type : 0;
6069 
6070 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6071 	if (ret)
6072 		dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
6073 			ret);
6074 
6075 	return ret;
6076 }
6077 
6078 #define HCLGE_FILTER_TYPE_VF		0
6079 #define HCLGE_FILTER_TYPE_PORT		1
6080 #define HCLGE_FILTER_FE_EGRESS_V1_B	BIT(0)
6081 #define HCLGE_FILTER_FE_NIC_INGRESS_B	BIT(0)
6082 #define HCLGE_FILTER_FE_NIC_EGRESS_B	BIT(1)
6083 #define HCLGE_FILTER_FE_ROCE_INGRESS_B	BIT(2)
6084 #define HCLGE_FILTER_FE_ROCE_EGRESS_B	BIT(3)
6085 #define HCLGE_FILTER_FE_EGRESS		(HCLGE_FILTER_FE_NIC_EGRESS_B \
6086 					| HCLGE_FILTER_FE_ROCE_EGRESS_B)
6087 #define HCLGE_FILTER_FE_INGRESS		(HCLGE_FILTER_FE_NIC_INGRESS_B \
6088 					| HCLGE_FILTER_FE_ROCE_INGRESS_B)
6089 
6090 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
6091 {
6092 	struct hclge_vport *vport = hclge_get_vport(handle);
6093 	struct hclge_dev *hdev = vport->back;
6094 
6095 	if (hdev->pdev->revision >= 0x21) {
6096 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6097 					   HCLGE_FILTER_FE_EGRESS, enable);
6098 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
6099 					   HCLGE_FILTER_FE_INGRESS, enable);
6100 	} else {
6101 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6102 					   HCLGE_FILTER_FE_EGRESS_V1_B, enable);
6103 	}
6104 	if (enable)
6105 		handle->netdev_flags |= HNAE3_VLAN_FLTR;
6106 	else
6107 		handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
6108 }
6109 
6110 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
6111 				    bool is_kill, u16 vlan, u8 qos,
6112 				    __be16 proto)
6113 {
6114 #define HCLGE_MAX_VF_BYTES  16
6115 	struct hclge_vlan_filter_vf_cfg_cmd *req0;
6116 	struct hclge_vlan_filter_vf_cfg_cmd *req1;
6117 	struct hclge_desc desc[2];
6118 	u8 vf_byte_val;
6119 	u8 vf_byte_off;
6120 	int ret;
6121 
6122 	hclge_cmd_setup_basic_desc(&desc[0],
6123 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
6124 	hclge_cmd_setup_basic_desc(&desc[1],
6125 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
6126 
6127 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6128 
6129 	vf_byte_off = vfid / 8;
6130 	vf_byte_val = 1 << (vfid % 8);
6131 
6132 	req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
6133 	req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
6134 
6135 	req0->vlan_id  = cpu_to_le16(vlan);
6136 	req0->vlan_cfg = is_kill;
6137 
6138 	if (vf_byte_off < HCLGE_MAX_VF_BYTES)
6139 		req0->vf_bitmap[vf_byte_off] = vf_byte_val;
6140 	else
6141 		req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
6142 
6143 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
6144 	if (ret) {
6145 		dev_err(&hdev->pdev->dev,
6146 			"Send vf vlan command fail, ret =%d.\n",
6147 			ret);
6148 		return ret;
6149 	}
6150 
6151 	if (!is_kill) {
6152 #define HCLGE_VF_VLAN_NO_ENTRY	2
6153 		if (!req0->resp_code || req0->resp_code == 1)
6154 			return 0;
6155 
6156 		if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
6157 			dev_warn(&hdev->pdev->dev,
6158 				 "vf vlan table is full, vf vlan filter is disabled\n");
6159 			return 0;
6160 		}
6161 
6162 		dev_err(&hdev->pdev->dev,
6163 			"Add vf vlan filter fail, ret =%d.\n",
6164 			req0->resp_code);
6165 	} else {
6166 #define HCLGE_VF_VLAN_DEL_NO_FOUND	1
6167 		if (!req0->resp_code)
6168 			return 0;
6169 
6170 		if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
6171 			dev_warn(&hdev->pdev->dev,
6172 				 "vlan %d filter is not in vf vlan table\n",
6173 				 vlan);
6174 			return 0;
6175 		}
6176 
6177 		dev_err(&hdev->pdev->dev,
6178 			"Kill vf vlan filter fail, ret =%d.\n",
6179 			req0->resp_code);
6180 	}
6181 
6182 	return -EIO;
6183 }
6184 
6185 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
6186 				      u16 vlan_id, bool is_kill)
6187 {
6188 	struct hclge_vlan_filter_pf_cfg_cmd *req;
6189 	struct hclge_desc desc;
6190 	u8 vlan_offset_byte_val;
6191 	u8 vlan_offset_byte;
6192 	u8 vlan_offset_160;
6193 	int ret;
6194 
6195 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
6196 
6197 	vlan_offset_160 = vlan_id / 160;
6198 	vlan_offset_byte = (vlan_id % 160) / 8;
6199 	vlan_offset_byte_val = 1 << (vlan_id % 8);
6200 
6201 	req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
6202 	req->vlan_offset = vlan_offset_160;
6203 	req->vlan_cfg = is_kill;
6204 	req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
6205 
6206 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6207 	if (ret)
6208 		dev_err(&hdev->pdev->dev,
6209 			"port vlan command, send fail, ret =%d.\n", ret);
6210 	return ret;
6211 }
6212 
6213 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
6214 				    u16 vport_id, u16 vlan_id, u8 qos,
6215 				    bool is_kill)
6216 {
6217 	u16 vport_idx, vport_num = 0;
6218 	int ret;
6219 
6220 	if (is_kill && !vlan_id)
6221 		return 0;
6222 
6223 	ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
6224 				       0, proto);
6225 	if (ret) {
6226 		dev_err(&hdev->pdev->dev,
6227 			"Set %d vport vlan filter config fail, ret =%d.\n",
6228 			vport_id, ret);
6229 		return ret;
6230 	}
6231 
6232 	/* vlan 0 may be added twice when 8021q module is enabled */
6233 	if (!is_kill && !vlan_id &&
6234 	    test_bit(vport_id, hdev->vlan_table[vlan_id]))
6235 		return 0;
6236 
6237 	if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
6238 		dev_err(&hdev->pdev->dev,
6239 			"Add port vlan failed, vport %d is already in vlan %d\n",
6240 			vport_id, vlan_id);
6241 		return -EINVAL;
6242 	}
6243 
6244 	if (is_kill &&
6245 	    !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
6246 		dev_err(&hdev->pdev->dev,
6247 			"Delete port vlan failed, vport %d is not in vlan %d\n",
6248 			vport_id, vlan_id);
6249 		return -EINVAL;
6250 	}
6251 
6252 	for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
6253 		vport_num++;
6254 
6255 	if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
6256 		ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
6257 						 is_kill);
6258 
6259 	return ret;
6260 }
6261 
6262 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
6263 			  u16 vlan_id, bool is_kill)
6264 {
6265 	struct hclge_vport *vport = hclge_get_vport(handle);
6266 	struct hclge_dev *hdev = vport->back;
6267 
6268 	return hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, vlan_id,
6269 					0, is_kill);
6270 }
6271 
6272 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
6273 				    u16 vlan, u8 qos, __be16 proto)
6274 {
6275 	struct hclge_vport *vport = hclge_get_vport(handle);
6276 	struct hclge_dev *hdev = vport->back;
6277 
6278 	if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7))
6279 		return -EINVAL;
6280 	if (proto != htons(ETH_P_8021Q))
6281 		return -EPROTONOSUPPORT;
6282 
6283 	return hclge_set_vlan_filter_hw(hdev, proto, vfid, vlan, qos, false);
6284 }
6285 
6286 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
6287 {
6288 	struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
6289 	struct hclge_vport_vtag_tx_cfg_cmd *req;
6290 	struct hclge_dev *hdev = vport->back;
6291 	struct hclge_desc desc;
6292 	int status;
6293 
6294 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
6295 
6296 	req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
6297 	req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
6298 	req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
6299 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
6300 		      vcfg->accept_tag1 ? 1 : 0);
6301 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
6302 		      vcfg->accept_untag1 ? 1 : 0);
6303 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
6304 		      vcfg->accept_tag2 ? 1 : 0);
6305 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
6306 		      vcfg->accept_untag2 ? 1 : 0);
6307 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
6308 		      vcfg->insert_tag1_en ? 1 : 0);
6309 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
6310 		      vcfg->insert_tag2_en ? 1 : 0);
6311 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
6312 
6313 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
6314 	req->vf_bitmap[req->vf_offset] =
6315 		1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
6316 
6317 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
6318 	if (status)
6319 		dev_err(&hdev->pdev->dev,
6320 			"Send port txvlan cfg command fail, ret =%d\n",
6321 			status);
6322 
6323 	return status;
6324 }
6325 
6326 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
6327 {
6328 	struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
6329 	struct hclge_vport_vtag_rx_cfg_cmd *req;
6330 	struct hclge_dev *hdev = vport->back;
6331 	struct hclge_desc desc;
6332 	int status;
6333 
6334 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
6335 
6336 	req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
6337 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
6338 		      vcfg->strip_tag1_en ? 1 : 0);
6339 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
6340 		      vcfg->strip_tag2_en ? 1 : 0);
6341 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
6342 		      vcfg->vlan1_vlan_prionly ? 1 : 0);
6343 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
6344 		      vcfg->vlan2_vlan_prionly ? 1 : 0);
6345 
6346 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
6347 	req->vf_bitmap[req->vf_offset] =
6348 		1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
6349 
6350 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
6351 	if (status)
6352 		dev_err(&hdev->pdev->dev,
6353 			"Send port rxvlan cfg command fail, ret =%d\n",
6354 			status);
6355 
6356 	return status;
6357 }
6358 
6359 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
6360 {
6361 	struct hclge_rx_vlan_type_cfg_cmd *rx_req;
6362 	struct hclge_tx_vlan_type_cfg_cmd *tx_req;
6363 	struct hclge_desc desc;
6364 	int status;
6365 
6366 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
6367 	rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
6368 	rx_req->ot_fst_vlan_type =
6369 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
6370 	rx_req->ot_sec_vlan_type =
6371 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
6372 	rx_req->in_fst_vlan_type =
6373 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
6374 	rx_req->in_sec_vlan_type =
6375 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
6376 
6377 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
6378 	if (status) {
6379 		dev_err(&hdev->pdev->dev,
6380 			"Send rxvlan protocol type command fail, ret =%d\n",
6381 			status);
6382 		return status;
6383 	}
6384 
6385 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
6386 
6387 	tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
6388 	tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
6389 	tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
6390 
6391 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
6392 	if (status)
6393 		dev_err(&hdev->pdev->dev,
6394 			"Send txvlan protocol type command fail, ret =%d\n",
6395 			status);
6396 
6397 	return status;
6398 }
6399 
6400 static int hclge_init_vlan_config(struct hclge_dev *hdev)
6401 {
6402 #define HCLGE_DEF_VLAN_TYPE		0x8100
6403 
6404 	struct hnae3_handle *handle = &hdev->vport[0].nic;
6405 	struct hclge_vport *vport;
6406 	int ret;
6407 	int i;
6408 
6409 	if (hdev->pdev->revision >= 0x21) {
6410 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6411 						 HCLGE_FILTER_FE_EGRESS, true);
6412 		if (ret)
6413 			return ret;
6414 
6415 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
6416 						 HCLGE_FILTER_FE_INGRESS, true);
6417 		if (ret)
6418 			return ret;
6419 	} else {
6420 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6421 						 HCLGE_FILTER_FE_EGRESS_V1_B,
6422 						 true);
6423 		if (ret)
6424 			return ret;
6425 	}
6426 
6427 	handle->netdev_flags |= HNAE3_VLAN_FLTR;
6428 
6429 	hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
6430 	hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
6431 	hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
6432 	hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
6433 	hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
6434 	hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
6435 
6436 	ret = hclge_set_vlan_protocol_type(hdev);
6437 	if (ret)
6438 		return ret;
6439 
6440 	for (i = 0; i < hdev->num_alloc_vport; i++) {
6441 		vport = &hdev->vport[i];
6442 		vport->txvlan_cfg.accept_tag1 = true;
6443 		vport->txvlan_cfg.accept_untag1 = true;
6444 
6445 		/* accept_tag2 and accept_untag2 are not supported on
6446 		 * pdev revision(0x20), new revision support them. The
6447 		 * value of this two fields will not return error when driver
6448 		 * send command to fireware in revision(0x20).
6449 		 * This two fields can not configured by user.
6450 		 */
6451 		vport->txvlan_cfg.accept_tag2 = true;
6452 		vport->txvlan_cfg.accept_untag2 = true;
6453 
6454 		vport->txvlan_cfg.insert_tag1_en = false;
6455 		vport->txvlan_cfg.insert_tag2_en = false;
6456 		vport->txvlan_cfg.default_tag1 = 0;
6457 		vport->txvlan_cfg.default_tag2 = 0;
6458 
6459 		ret = hclge_set_vlan_tx_offload_cfg(vport);
6460 		if (ret)
6461 			return ret;
6462 
6463 		vport->rxvlan_cfg.strip_tag1_en = false;
6464 		vport->rxvlan_cfg.strip_tag2_en = true;
6465 		vport->rxvlan_cfg.vlan1_vlan_prionly = false;
6466 		vport->rxvlan_cfg.vlan2_vlan_prionly = false;
6467 
6468 		ret = hclge_set_vlan_rx_offload_cfg(vport);
6469 		if (ret)
6470 			return ret;
6471 	}
6472 
6473 	return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
6474 }
6475 
6476 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
6477 {
6478 	struct hclge_vport *vport = hclge_get_vport(handle);
6479 
6480 	vport->rxvlan_cfg.strip_tag1_en = false;
6481 	vport->rxvlan_cfg.strip_tag2_en = enable;
6482 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
6483 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
6484 
6485 	return hclge_set_vlan_rx_offload_cfg(vport);
6486 }
6487 
6488 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
6489 {
6490 	struct hclge_config_max_frm_size_cmd *req;
6491 	struct hclge_desc desc;
6492 
6493 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
6494 
6495 	req = (struct hclge_config_max_frm_size_cmd *)desc.data;
6496 	req->max_frm_size = cpu_to_le16(new_mps);
6497 	req->min_frm_size = HCLGE_MAC_MIN_FRAME;
6498 
6499 	return hclge_cmd_send(&hdev->hw, &desc, 1);
6500 }
6501 
6502 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
6503 {
6504 	struct hclge_vport *vport = hclge_get_vport(handle);
6505 
6506 	return hclge_set_vport_mtu(vport, new_mtu);
6507 }
6508 
6509 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
6510 {
6511 	struct hclge_dev *hdev = vport->back;
6512 	int i, max_frm_size, ret = 0;
6513 
6514 	max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
6515 	if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
6516 	    max_frm_size > HCLGE_MAC_MAX_FRAME)
6517 		return -EINVAL;
6518 
6519 	max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
6520 	mutex_lock(&hdev->vport_lock);
6521 	/* VF's mps must fit within hdev->mps */
6522 	if (vport->vport_id && max_frm_size > hdev->mps) {
6523 		mutex_unlock(&hdev->vport_lock);
6524 		return -EINVAL;
6525 	} else if (vport->vport_id) {
6526 		vport->mps = max_frm_size;
6527 		mutex_unlock(&hdev->vport_lock);
6528 		return 0;
6529 	}
6530 
6531 	/* PF's mps must be greater then VF's mps */
6532 	for (i = 1; i < hdev->num_alloc_vport; i++)
6533 		if (max_frm_size < hdev->vport[i].mps) {
6534 			mutex_unlock(&hdev->vport_lock);
6535 			return -EINVAL;
6536 		}
6537 
6538 	hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
6539 
6540 	ret = hclge_set_mac_mtu(hdev, max_frm_size);
6541 	if (ret) {
6542 		dev_err(&hdev->pdev->dev,
6543 			"Change mtu fail, ret =%d\n", ret);
6544 		goto out;
6545 	}
6546 
6547 	hdev->mps = max_frm_size;
6548 	vport->mps = max_frm_size;
6549 
6550 	ret = hclge_buffer_alloc(hdev);
6551 	if (ret)
6552 		dev_err(&hdev->pdev->dev,
6553 			"Allocate buffer fail, ret =%d\n", ret);
6554 
6555 out:
6556 	hclge_notify_client(hdev, HNAE3_UP_CLIENT);
6557 	mutex_unlock(&hdev->vport_lock);
6558 	return ret;
6559 }
6560 
6561 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
6562 				    bool enable)
6563 {
6564 	struct hclge_reset_tqp_queue_cmd *req;
6565 	struct hclge_desc desc;
6566 	int ret;
6567 
6568 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
6569 
6570 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
6571 	req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
6572 	hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
6573 
6574 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6575 	if (ret) {
6576 		dev_err(&hdev->pdev->dev,
6577 			"Send tqp reset cmd error, status =%d\n", ret);
6578 		return ret;
6579 	}
6580 
6581 	return 0;
6582 }
6583 
6584 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
6585 {
6586 	struct hclge_reset_tqp_queue_cmd *req;
6587 	struct hclge_desc desc;
6588 	int ret;
6589 
6590 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
6591 
6592 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
6593 	req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
6594 
6595 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6596 	if (ret) {
6597 		dev_err(&hdev->pdev->dev,
6598 			"Get reset status error, status =%d\n", ret);
6599 		return ret;
6600 	}
6601 
6602 	return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
6603 }
6604 
6605 static u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle,
6606 					  u16 queue_id)
6607 {
6608 	struct hnae3_queue *queue;
6609 	struct hclge_tqp *tqp;
6610 
6611 	queue = handle->kinfo.tqp[queue_id];
6612 	tqp = container_of(queue, struct hclge_tqp, q);
6613 
6614 	return tqp->index;
6615 }
6616 
6617 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
6618 {
6619 	struct hclge_vport *vport = hclge_get_vport(handle);
6620 	struct hclge_dev *hdev = vport->back;
6621 	int reset_try_times = 0;
6622 	int reset_status;
6623 	u16 queue_gid;
6624 	int ret = 0;
6625 
6626 	queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
6627 
6628 	ret = hclge_tqp_enable(hdev, queue_id, 0, false);
6629 	if (ret) {
6630 		dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
6631 		return ret;
6632 	}
6633 
6634 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
6635 	if (ret) {
6636 		dev_err(&hdev->pdev->dev,
6637 			"Send reset tqp cmd fail, ret = %d\n", ret);
6638 		return ret;
6639 	}
6640 
6641 	reset_try_times = 0;
6642 	while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
6643 		/* Wait for tqp hw reset */
6644 		msleep(20);
6645 		reset_status = hclge_get_reset_status(hdev, queue_gid);
6646 		if (reset_status)
6647 			break;
6648 	}
6649 
6650 	if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
6651 		dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
6652 		return ret;
6653 	}
6654 
6655 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
6656 	if (ret)
6657 		dev_err(&hdev->pdev->dev,
6658 			"Deassert the soft reset fail, ret = %d\n", ret);
6659 
6660 	return ret;
6661 }
6662 
6663 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
6664 {
6665 	struct hclge_dev *hdev = vport->back;
6666 	int reset_try_times = 0;
6667 	int reset_status;
6668 	u16 queue_gid;
6669 	int ret;
6670 
6671 	queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
6672 
6673 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
6674 	if (ret) {
6675 		dev_warn(&hdev->pdev->dev,
6676 			 "Send reset tqp cmd fail, ret = %d\n", ret);
6677 		return;
6678 	}
6679 
6680 	reset_try_times = 0;
6681 	while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
6682 		/* Wait for tqp hw reset */
6683 		msleep(20);
6684 		reset_status = hclge_get_reset_status(hdev, queue_gid);
6685 		if (reset_status)
6686 			break;
6687 	}
6688 
6689 	if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
6690 		dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
6691 		return;
6692 	}
6693 
6694 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
6695 	if (ret)
6696 		dev_warn(&hdev->pdev->dev,
6697 			 "Deassert the soft reset fail, ret = %d\n", ret);
6698 }
6699 
6700 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
6701 {
6702 	struct hclge_vport *vport = hclge_get_vport(handle);
6703 	struct hclge_dev *hdev = vport->back;
6704 
6705 	return hdev->fw_version;
6706 }
6707 
6708 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
6709 {
6710 	struct phy_device *phydev = hdev->hw.mac.phydev;
6711 
6712 	if (!phydev)
6713 		return;
6714 
6715 	phy_set_asym_pause(phydev, rx_en, tx_en);
6716 }
6717 
6718 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
6719 {
6720 	int ret;
6721 
6722 	if (rx_en && tx_en)
6723 		hdev->fc_mode_last_time = HCLGE_FC_FULL;
6724 	else if (rx_en && !tx_en)
6725 		hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
6726 	else if (!rx_en && tx_en)
6727 		hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
6728 	else
6729 		hdev->fc_mode_last_time = HCLGE_FC_NONE;
6730 
6731 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
6732 		return 0;
6733 
6734 	ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
6735 	if (ret) {
6736 		dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
6737 			ret);
6738 		return ret;
6739 	}
6740 
6741 	hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
6742 
6743 	return 0;
6744 }
6745 
6746 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
6747 {
6748 	struct phy_device *phydev = hdev->hw.mac.phydev;
6749 	u16 remote_advertising = 0;
6750 	u16 local_advertising = 0;
6751 	u32 rx_pause, tx_pause;
6752 	u8 flowctl;
6753 
6754 	if (!phydev->link || !phydev->autoneg)
6755 		return 0;
6756 
6757 	local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
6758 
6759 	if (phydev->pause)
6760 		remote_advertising = LPA_PAUSE_CAP;
6761 
6762 	if (phydev->asym_pause)
6763 		remote_advertising |= LPA_PAUSE_ASYM;
6764 
6765 	flowctl = mii_resolve_flowctrl_fdx(local_advertising,
6766 					   remote_advertising);
6767 	tx_pause = flowctl & FLOW_CTRL_TX;
6768 	rx_pause = flowctl & FLOW_CTRL_RX;
6769 
6770 	if (phydev->duplex == HCLGE_MAC_HALF) {
6771 		tx_pause = 0;
6772 		rx_pause = 0;
6773 	}
6774 
6775 	return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
6776 }
6777 
6778 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
6779 				 u32 *rx_en, u32 *tx_en)
6780 {
6781 	struct hclge_vport *vport = hclge_get_vport(handle);
6782 	struct hclge_dev *hdev = vport->back;
6783 
6784 	*auto_neg = hclge_get_autoneg(handle);
6785 
6786 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
6787 		*rx_en = 0;
6788 		*tx_en = 0;
6789 		return;
6790 	}
6791 
6792 	if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
6793 		*rx_en = 1;
6794 		*tx_en = 0;
6795 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
6796 		*tx_en = 1;
6797 		*rx_en = 0;
6798 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
6799 		*rx_en = 1;
6800 		*tx_en = 1;
6801 	} else {
6802 		*rx_en = 0;
6803 		*tx_en = 0;
6804 	}
6805 }
6806 
6807 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
6808 				u32 rx_en, u32 tx_en)
6809 {
6810 	struct hclge_vport *vport = hclge_get_vport(handle);
6811 	struct hclge_dev *hdev = vport->back;
6812 	struct phy_device *phydev = hdev->hw.mac.phydev;
6813 	u32 fc_autoneg;
6814 
6815 	fc_autoneg = hclge_get_autoneg(handle);
6816 	if (auto_neg != fc_autoneg) {
6817 		dev_info(&hdev->pdev->dev,
6818 			 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
6819 		return -EOPNOTSUPP;
6820 	}
6821 
6822 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
6823 		dev_info(&hdev->pdev->dev,
6824 			 "Priority flow control enabled. Cannot set link flow control.\n");
6825 		return -EOPNOTSUPP;
6826 	}
6827 
6828 	hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
6829 
6830 	if (!fc_autoneg)
6831 		return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
6832 
6833 	/* Only support flow control negotiation for netdev with
6834 	 * phy attached for now.
6835 	 */
6836 	if (!phydev)
6837 		return -EOPNOTSUPP;
6838 
6839 	return phy_start_aneg(phydev);
6840 }
6841 
6842 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
6843 					  u8 *auto_neg, u32 *speed, u8 *duplex)
6844 {
6845 	struct hclge_vport *vport = hclge_get_vport(handle);
6846 	struct hclge_dev *hdev = vport->back;
6847 
6848 	if (speed)
6849 		*speed = hdev->hw.mac.speed;
6850 	if (duplex)
6851 		*duplex = hdev->hw.mac.duplex;
6852 	if (auto_neg)
6853 		*auto_neg = hdev->hw.mac.autoneg;
6854 }
6855 
6856 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type)
6857 {
6858 	struct hclge_vport *vport = hclge_get_vport(handle);
6859 	struct hclge_dev *hdev = vport->back;
6860 
6861 	if (media_type)
6862 		*media_type = hdev->hw.mac.media_type;
6863 }
6864 
6865 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
6866 				u8 *tp_mdix_ctrl, u8 *tp_mdix)
6867 {
6868 	struct hclge_vport *vport = hclge_get_vport(handle);
6869 	struct hclge_dev *hdev = vport->back;
6870 	struct phy_device *phydev = hdev->hw.mac.phydev;
6871 	int mdix_ctrl, mdix, retval, is_resolved;
6872 
6873 	if (!phydev) {
6874 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
6875 		*tp_mdix = ETH_TP_MDI_INVALID;
6876 		return;
6877 	}
6878 
6879 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
6880 
6881 	retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
6882 	mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
6883 				    HCLGE_PHY_MDIX_CTRL_S);
6884 
6885 	retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
6886 	mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
6887 	is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
6888 
6889 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
6890 
6891 	switch (mdix_ctrl) {
6892 	case 0x0:
6893 		*tp_mdix_ctrl = ETH_TP_MDI;
6894 		break;
6895 	case 0x1:
6896 		*tp_mdix_ctrl = ETH_TP_MDI_X;
6897 		break;
6898 	case 0x3:
6899 		*tp_mdix_ctrl = ETH_TP_MDI_AUTO;
6900 		break;
6901 	default:
6902 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
6903 		break;
6904 	}
6905 
6906 	if (!is_resolved)
6907 		*tp_mdix = ETH_TP_MDI_INVALID;
6908 	else if (mdix)
6909 		*tp_mdix = ETH_TP_MDI_X;
6910 	else
6911 		*tp_mdix = ETH_TP_MDI;
6912 }
6913 
6914 static int hclge_init_instance_hw(struct hclge_dev *hdev)
6915 {
6916 	return hclge_mac_connect_phy(hdev);
6917 }
6918 
6919 static void hclge_uninit_instance_hw(struct hclge_dev *hdev)
6920 {
6921 	hclge_mac_disconnect_phy(hdev);
6922 }
6923 
6924 static int hclge_init_client_instance(struct hnae3_client *client,
6925 				      struct hnae3_ae_dev *ae_dev)
6926 {
6927 	struct hclge_dev *hdev = ae_dev->priv;
6928 	struct hclge_vport *vport;
6929 	int i, ret;
6930 
6931 	for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
6932 		vport = &hdev->vport[i];
6933 
6934 		switch (client->type) {
6935 		case HNAE3_CLIENT_KNIC:
6936 
6937 			hdev->nic_client = client;
6938 			vport->nic.client = client;
6939 			ret = client->ops->init_instance(&vport->nic);
6940 			if (ret)
6941 				goto clear_nic;
6942 
6943 			ret = hclge_init_instance_hw(hdev);
6944 			if (ret) {
6945 			        client->ops->uninit_instance(&vport->nic,
6946 			                                     0);
6947 				goto clear_nic;
6948 			}
6949 
6950 			hnae3_set_client_init_flag(client, ae_dev, 1);
6951 
6952 			if (hdev->roce_client &&
6953 			    hnae3_dev_roce_supported(hdev)) {
6954 				struct hnae3_client *rc = hdev->roce_client;
6955 
6956 				ret = hclge_init_roce_base_info(vport);
6957 				if (ret)
6958 					goto clear_roce;
6959 
6960 				ret = rc->ops->init_instance(&vport->roce);
6961 				if (ret)
6962 					goto clear_roce;
6963 
6964 				hnae3_set_client_init_flag(hdev->roce_client,
6965 							   ae_dev, 1);
6966 			}
6967 
6968 			break;
6969 		case HNAE3_CLIENT_UNIC:
6970 			hdev->nic_client = client;
6971 			vport->nic.client = client;
6972 
6973 			ret = client->ops->init_instance(&vport->nic);
6974 			if (ret)
6975 				goto clear_nic;
6976 
6977 			hnae3_set_client_init_flag(client, ae_dev, 1);
6978 
6979 			break;
6980 		case HNAE3_CLIENT_ROCE:
6981 			if (hnae3_dev_roce_supported(hdev)) {
6982 				hdev->roce_client = client;
6983 				vport->roce.client = client;
6984 			}
6985 
6986 			if (hdev->roce_client && hdev->nic_client) {
6987 				ret = hclge_init_roce_base_info(vport);
6988 				if (ret)
6989 					goto clear_roce;
6990 
6991 				ret = client->ops->init_instance(&vport->roce);
6992 				if (ret)
6993 					goto clear_roce;
6994 
6995 				hnae3_set_client_init_flag(client, ae_dev, 1);
6996 			}
6997 
6998 			break;
6999 		default:
7000 			return -EINVAL;
7001 		}
7002 	}
7003 
7004 	return 0;
7005 
7006 clear_nic:
7007 	hdev->nic_client = NULL;
7008 	vport->nic.client = NULL;
7009 	return ret;
7010 clear_roce:
7011 	hdev->roce_client = NULL;
7012 	vport->roce.client = NULL;
7013 	return ret;
7014 }
7015 
7016 static void hclge_uninit_client_instance(struct hnae3_client *client,
7017 					 struct hnae3_ae_dev *ae_dev)
7018 {
7019 	struct hclge_dev *hdev = ae_dev->priv;
7020 	struct hclge_vport *vport;
7021 	int i;
7022 
7023 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
7024 		vport = &hdev->vport[i];
7025 		if (hdev->roce_client) {
7026 			hdev->roce_client->ops->uninit_instance(&vport->roce,
7027 								0);
7028 			hdev->roce_client = NULL;
7029 			vport->roce.client = NULL;
7030 		}
7031 		if (client->type == HNAE3_CLIENT_ROCE)
7032 			return;
7033 		if (hdev->nic_client && client->ops->uninit_instance) {
7034 			hclge_uninit_instance_hw(hdev);
7035 			client->ops->uninit_instance(&vport->nic, 0);
7036 			hdev->nic_client = NULL;
7037 			vport->nic.client = NULL;
7038 		}
7039 	}
7040 }
7041 
7042 static int hclge_pci_init(struct hclge_dev *hdev)
7043 {
7044 	struct pci_dev *pdev = hdev->pdev;
7045 	struct hclge_hw *hw;
7046 	int ret;
7047 
7048 	ret = pci_enable_device(pdev);
7049 	if (ret) {
7050 		dev_err(&pdev->dev, "failed to enable PCI device\n");
7051 		return ret;
7052 	}
7053 
7054 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
7055 	if (ret) {
7056 		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
7057 		if (ret) {
7058 			dev_err(&pdev->dev,
7059 				"can't set consistent PCI DMA");
7060 			goto err_disable_device;
7061 		}
7062 		dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
7063 	}
7064 
7065 	ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
7066 	if (ret) {
7067 		dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
7068 		goto err_disable_device;
7069 	}
7070 
7071 	pci_set_master(pdev);
7072 	hw = &hdev->hw;
7073 	hw->io_base = pcim_iomap(pdev, 2, 0);
7074 	if (!hw->io_base) {
7075 		dev_err(&pdev->dev, "Can't map configuration register space\n");
7076 		ret = -ENOMEM;
7077 		goto err_clr_master;
7078 	}
7079 
7080 	hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
7081 
7082 	return 0;
7083 err_clr_master:
7084 	pci_clear_master(pdev);
7085 	pci_release_regions(pdev);
7086 err_disable_device:
7087 	pci_disable_device(pdev);
7088 
7089 	return ret;
7090 }
7091 
7092 static void hclge_pci_uninit(struct hclge_dev *hdev)
7093 {
7094 	struct pci_dev *pdev = hdev->pdev;
7095 
7096 	pcim_iounmap(pdev, hdev->hw.io_base);
7097 	pci_free_irq_vectors(pdev);
7098 	pci_clear_master(pdev);
7099 	pci_release_mem_regions(pdev);
7100 	pci_disable_device(pdev);
7101 }
7102 
7103 static void hclge_state_init(struct hclge_dev *hdev)
7104 {
7105 	set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
7106 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
7107 	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
7108 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7109 	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
7110 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
7111 }
7112 
7113 static void hclge_state_uninit(struct hclge_dev *hdev)
7114 {
7115 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
7116 
7117 	if (hdev->service_timer.function)
7118 		del_timer_sync(&hdev->service_timer);
7119 	if (hdev->reset_timer.function)
7120 		del_timer_sync(&hdev->reset_timer);
7121 	if (hdev->service_task.func)
7122 		cancel_work_sync(&hdev->service_task);
7123 	if (hdev->rst_service_task.func)
7124 		cancel_work_sync(&hdev->rst_service_task);
7125 	if (hdev->mbx_service_task.func)
7126 		cancel_work_sync(&hdev->mbx_service_task);
7127 }
7128 
7129 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
7130 {
7131 #define HCLGE_FLR_WAIT_MS	100
7132 #define HCLGE_FLR_WAIT_CNT	50
7133 	struct hclge_dev *hdev = ae_dev->priv;
7134 	int cnt = 0;
7135 
7136 	clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
7137 	clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
7138 	set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
7139 	hclge_reset_event(hdev->pdev, NULL);
7140 
7141 	while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
7142 	       cnt++ < HCLGE_FLR_WAIT_CNT)
7143 		msleep(HCLGE_FLR_WAIT_MS);
7144 
7145 	if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
7146 		dev_err(&hdev->pdev->dev,
7147 			"flr wait down timeout: %d\n", cnt);
7148 }
7149 
7150 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
7151 {
7152 	struct hclge_dev *hdev = ae_dev->priv;
7153 
7154 	set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
7155 }
7156 
7157 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
7158 {
7159 	struct pci_dev *pdev = ae_dev->pdev;
7160 	struct hclge_dev *hdev;
7161 	int ret;
7162 
7163 	hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
7164 	if (!hdev) {
7165 		ret = -ENOMEM;
7166 		goto out;
7167 	}
7168 
7169 	hdev->pdev = pdev;
7170 	hdev->ae_dev = ae_dev;
7171 	hdev->reset_type = HNAE3_NONE_RESET;
7172 	hdev->reset_level = HNAE3_FUNC_RESET;
7173 	ae_dev->priv = hdev;
7174 	hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7175 
7176 	mutex_init(&hdev->vport_lock);
7177 
7178 	ret = hclge_pci_init(hdev);
7179 	if (ret) {
7180 		dev_err(&pdev->dev, "PCI init failed\n");
7181 		goto out;
7182 	}
7183 
7184 	/* Firmware command queue initialize */
7185 	ret = hclge_cmd_queue_init(hdev);
7186 	if (ret) {
7187 		dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
7188 		goto err_pci_uninit;
7189 	}
7190 
7191 	/* Firmware command initialize */
7192 	ret = hclge_cmd_init(hdev);
7193 	if (ret)
7194 		goto err_cmd_uninit;
7195 
7196 	ret = hclge_get_cap(hdev);
7197 	if (ret) {
7198 		dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
7199 			ret);
7200 		goto err_cmd_uninit;
7201 	}
7202 
7203 	ret = hclge_configure(hdev);
7204 	if (ret) {
7205 		dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
7206 		goto err_cmd_uninit;
7207 	}
7208 
7209 	ret = hclge_init_msi(hdev);
7210 	if (ret) {
7211 		dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
7212 		goto err_cmd_uninit;
7213 	}
7214 
7215 	ret = hclge_misc_irq_init(hdev);
7216 	if (ret) {
7217 		dev_err(&pdev->dev,
7218 			"Misc IRQ(vector0) init error, ret = %d.\n",
7219 			ret);
7220 		goto err_msi_uninit;
7221 	}
7222 
7223 	ret = hclge_alloc_tqps(hdev);
7224 	if (ret) {
7225 		dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
7226 		goto err_msi_irq_uninit;
7227 	}
7228 
7229 	ret = hclge_alloc_vport(hdev);
7230 	if (ret) {
7231 		dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
7232 		goto err_msi_irq_uninit;
7233 	}
7234 
7235 	ret = hclge_map_tqp(hdev);
7236 	if (ret) {
7237 		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
7238 		goto err_msi_irq_uninit;
7239 	}
7240 
7241 	if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
7242 		ret = hclge_mac_mdio_config(hdev);
7243 		if (ret) {
7244 			dev_err(&hdev->pdev->dev,
7245 				"mdio config fail ret=%d\n", ret);
7246 			goto err_msi_irq_uninit;
7247 		}
7248 	}
7249 
7250 	ret = hclge_init_umv_space(hdev);
7251 	if (ret) {
7252 		dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
7253 		goto err_msi_irq_uninit;
7254 	}
7255 
7256 	ret = hclge_mac_init(hdev);
7257 	if (ret) {
7258 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
7259 		goto err_mdiobus_unreg;
7260 	}
7261 
7262 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
7263 	if (ret) {
7264 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
7265 		goto err_mdiobus_unreg;
7266 	}
7267 
7268 	ret = hclge_config_gro(hdev, true);
7269 	if (ret)
7270 		goto err_mdiobus_unreg;
7271 
7272 	ret = hclge_init_vlan_config(hdev);
7273 	if (ret) {
7274 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
7275 		goto err_mdiobus_unreg;
7276 	}
7277 
7278 	ret = hclge_tm_schd_init(hdev);
7279 	if (ret) {
7280 		dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
7281 		goto err_mdiobus_unreg;
7282 	}
7283 
7284 	hclge_rss_init_cfg(hdev);
7285 	ret = hclge_rss_init_hw(hdev);
7286 	if (ret) {
7287 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
7288 		goto err_mdiobus_unreg;
7289 	}
7290 
7291 	ret = init_mgr_tbl(hdev);
7292 	if (ret) {
7293 		dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
7294 		goto err_mdiobus_unreg;
7295 	}
7296 
7297 	ret = hclge_init_fd_config(hdev);
7298 	if (ret) {
7299 		dev_err(&pdev->dev,
7300 			"fd table init fail, ret=%d\n", ret);
7301 		goto err_mdiobus_unreg;
7302 	}
7303 
7304 	ret = hclge_hw_error_set_state(hdev, true);
7305 	if (ret) {
7306 		dev_err(&pdev->dev,
7307 			"fail(%d) to enable hw error interrupts\n", ret);
7308 		goto err_mdiobus_unreg;
7309 	}
7310 
7311 	hclge_dcb_ops_set(hdev);
7312 
7313 	timer_setup(&hdev->service_timer, hclge_service_timer, 0);
7314 	timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
7315 	INIT_WORK(&hdev->service_task, hclge_service_task);
7316 	INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
7317 	INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
7318 
7319 	hclge_clear_all_event_cause(hdev);
7320 
7321 	/* Enable MISC vector(vector0) */
7322 	hclge_enable_vector(&hdev->misc_vector, true);
7323 
7324 	hclge_state_init(hdev);
7325 	hdev->last_reset_time = jiffies;
7326 
7327 	pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
7328 	return 0;
7329 
7330 err_mdiobus_unreg:
7331 	if (hdev->hw.mac.phydev)
7332 		mdiobus_unregister(hdev->hw.mac.mdio_bus);
7333 err_msi_irq_uninit:
7334 	hclge_misc_irq_uninit(hdev);
7335 err_msi_uninit:
7336 	pci_free_irq_vectors(pdev);
7337 err_cmd_uninit:
7338 	hclge_destroy_cmd_queue(&hdev->hw);
7339 err_pci_uninit:
7340 	pcim_iounmap(pdev, hdev->hw.io_base);
7341 	pci_clear_master(pdev);
7342 	pci_release_regions(pdev);
7343 	pci_disable_device(pdev);
7344 out:
7345 	return ret;
7346 }
7347 
7348 static void hclge_stats_clear(struct hclge_dev *hdev)
7349 {
7350 	memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
7351 }
7352 
7353 static void hclge_reset_vport_state(struct hclge_dev *hdev)
7354 {
7355 	struct hclge_vport *vport = hdev->vport;
7356 	int i;
7357 
7358 	for (i = 0; i < hdev->num_alloc_vport; i++) {
7359 		hclge_vport_start(vport);
7360 		vport++;
7361 	}
7362 }
7363 
7364 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
7365 {
7366 	struct hclge_dev *hdev = ae_dev->priv;
7367 	struct pci_dev *pdev = ae_dev->pdev;
7368 	int ret;
7369 
7370 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
7371 
7372 	hclge_stats_clear(hdev);
7373 	memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
7374 
7375 	ret = hclge_cmd_init(hdev);
7376 	if (ret) {
7377 		dev_err(&pdev->dev, "Cmd queue init failed\n");
7378 		return ret;
7379 	}
7380 
7381 	ret = hclge_get_cap(hdev);
7382 	if (ret) {
7383 		dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
7384 			ret);
7385 		return ret;
7386 	}
7387 
7388 	ret = hclge_configure(hdev);
7389 	if (ret) {
7390 		dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
7391 		return ret;
7392 	}
7393 
7394 	ret = hclge_map_tqp(hdev);
7395 	if (ret) {
7396 		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
7397 		return ret;
7398 	}
7399 
7400 	hclge_reset_umv_space(hdev);
7401 
7402 	ret = hclge_mac_init(hdev);
7403 	if (ret) {
7404 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
7405 		return ret;
7406 	}
7407 
7408 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
7409 	if (ret) {
7410 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
7411 		return ret;
7412 	}
7413 
7414 	ret = hclge_config_gro(hdev, true);
7415 	if (ret)
7416 		return ret;
7417 
7418 	ret = hclge_init_vlan_config(hdev);
7419 	if (ret) {
7420 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
7421 		return ret;
7422 	}
7423 
7424 	ret = hclge_tm_init_hw(hdev);
7425 	if (ret) {
7426 		dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
7427 		return ret;
7428 	}
7429 
7430 	ret = hclge_rss_init_hw(hdev);
7431 	if (ret) {
7432 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
7433 		return ret;
7434 	}
7435 
7436 	ret = hclge_init_fd_config(hdev);
7437 	if (ret) {
7438 		dev_err(&pdev->dev,
7439 			"fd table init fail, ret=%d\n", ret);
7440 		return ret;
7441 	}
7442 
7443 	/* Re-enable the hw error interrupts because
7444 	 * the interrupts get disabled on core/global reset.
7445 	 */
7446 	ret = hclge_hw_error_set_state(hdev, true);
7447 	if (ret) {
7448 		dev_err(&pdev->dev,
7449 			"fail(%d) to re-enable HNS hw error interrupts\n", ret);
7450 		return ret;
7451 	}
7452 
7453 	hclge_reset_vport_state(hdev);
7454 
7455 	dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
7456 		 HCLGE_DRIVER_NAME);
7457 
7458 	return 0;
7459 }
7460 
7461 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
7462 {
7463 	struct hclge_dev *hdev = ae_dev->priv;
7464 	struct hclge_mac *mac = &hdev->hw.mac;
7465 
7466 	hclge_state_uninit(hdev);
7467 
7468 	if (mac->phydev)
7469 		mdiobus_unregister(mac->mdio_bus);
7470 
7471 	hclge_uninit_umv_space(hdev);
7472 
7473 	/* Disable MISC vector(vector0) */
7474 	hclge_enable_vector(&hdev->misc_vector, false);
7475 	synchronize_irq(hdev->misc_vector.vector_irq);
7476 
7477 	hclge_hw_error_set_state(hdev, false);
7478 	hclge_destroy_cmd_queue(&hdev->hw);
7479 	hclge_misc_irq_uninit(hdev);
7480 	hclge_pci_uninit(hdev);
7481 	mutex_destroy(&hdev->vport_lock);
7482 	ae_dev->priv = NULL;
7483 }
7484 
7485 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
7486 {
7487 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
7488 	struct hclge_vport *vport = hclge_get_vport(handle);
7489 	struct hclge_dev *hdev = vport->back;
7490 
7491 	return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps);
7492 }
7493 
7494 static void hclge_get_channels(struct hnae3_handle *handle,
7495 			       struct ethtool_channels *ch)
7496 {
7497 	struct hclge_vport *vport = hclge_get_vport(handle);
7498 
7499 	ch->max_combined = hclge_get_max_channels(handle);
7500 	ch->other_count = 1;
7501 	ch->max_other = 1;
7502 	ch->combined_count = vport->alloc_tqps;
7503 }
7504 
7505 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
7506 					u16 *alloc_tqps, u16 *max_rss_size)
7507 {
7508 	struct hclge_vport *vport = hclge_get_vport(handle);
7509 	struct hclge_dev *hdev = vport->back;
7510 
7511 	*alloc_tqps = vport->alloc_tqps;
7512 	*max_rss_size = hdev->rss_size_max;
7513 }
7514 
7515 static void hclge_release_tqp(struct hclge_vport *vport)
7516 {
7517 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
7518 	struct hclge_dev *hdev = vport->back;
7519 	int i;
7520 
7521 	for (i = 0; i < kinfo->num_tqps; i++) {
7522 		struct hclge_tqp *tqp =
7523 			container_of(kinfo->tqp[i], struct hclge_tqp, q);
7524 
7525 		tqp->q.handle = NULL;
7526 		tqp->q.tqp_index = 0;
7527 		tqp->alloced = false;
7528 	}
7529 
7530 	devm_kfree(&hdev->pdev->dev, kinfo->tqp);
7531 	kinfo->tqp = NULL;
7532 }
7533 
7534 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num)
7535 {
7536 	struct hclge_vport *vport = hclge_get_vport(handle);
7537 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
7538 	struct hclge_dev *hdev = vport->back;
7539 	int cur_rss_size = kinfo->rss_size;
7540 	int cur_tqps = kinfo->num_tqps;
7541 	u16 tc_offset[HCLGE_MAX_TC_NUM];
7542 	u16 tc_valid[HCLGE_MAX_TC_NUM];
7543 	u16 tc_size[HCLGE_MAX_TC_NUM];
7544 	u16 roundup_size;
7545 	u32 *rss_indir;
7546 	int ret, i;
7547 
7548 	/* Free old tqps, and reallocate with new tqp number when nic setup */
7549 	hclge_release_tqp(vport);
7550 
7551 	ret = hclge_knic_setup(vport, new_tqps_num, kinfo->num_desc);
7552 	if (ret) {
7553 		dev_err(&hdev->pdev->dev, "setup nic fail, ret =%d\n", ret);
7554 		return ret;
7555 	}
7556 
7557 	ret = hclge_map_tqp_to_vport(hdev, vport);
7558 	if (ret) {
7559 		dev_err(&hdev->pdev->dev, "map vport tqp fail, ret =%d\n", ret);
7560 		return ret;
7561 	}
7562 
7563 	ret = hclge_tm_schd_init(hdev);
7564 	if (ret) {
7565 		dev_err(&hdev->pdev->dev, "tm schd init fail, ret =%d\n", ret);
7566 		return ret;
7567 	}
7568 
7569 	roundup_size = roundup_pow_of_two(kinfo->rss_size);
7570 	roundup_size = ilog2(roundup_size);
7571 	/* Set the RSS TC mode according to the new RSS size */
7572 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
7573 		tc_valid[i] = 0;
7574 
7575 		if (!(hdev->hw_tc_map & BIT(i)))
7576 			continue;
7577 
7578 		tc_valid[i] = 1;
7579 		tc_size[i] = roundup_size;
7580 		tc_offset[i] = kinfo->rss_size * i;
7581 	}
7582 	ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
7583 	if (ret)
7584 		return ret;
7585 
7586 	/* Reinitializes the rss indirect table according to the new RSS size */
7587 	rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
7588 	if (!rss_indir)
7589 		return -ENOMEM;
7590 
7591 	for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
7592 		rss_indir[i] = i % kinfo->rss_size;
7593 
7594 	ret = hclge_set_rss(handle, rss_indir, NULL, 0);
7595 	if (ret)
7596 		dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
7597 			ret);
7598 
7599 	kfree(rss_indir);
7600 
7601 	if (!ret)
7602 		dev_info(&hdev->pdev->dev,
7603 			 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
7604 			 cur_rss_size, kinfo->rss_size,
7605 			 cur_tqps, kinfo->rss_size * kinfo->num_tc);
7606 
7607 	return ret;
7608 }
7609 
7610 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
7611 			      u32 *regs_num_64_bit)
7612 {
7613 	struct hclge_desc desc;
7614 	u32 total_num;
7615 	int ret;
7616 
7617 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
7618 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7619 	if (ret) {
7620 		dev_err(&hdev->pdev->dev,
7621 			"Query register number cmd failed, ret = %d.\n", ret);
7622 		return ret;
7623 	}
7624 
7625 	*regs_num_32_bit = le32_to_cpu(desc.data[0]);
7626 	*regs_num_64_bit = le32_to_cpu(desc.data[1]);
7627 
7628 	total_num = *regs_num_32_bit + *regs_num_64_bit;
7629 	if (!total_num)
7630 		return -EINVAL;
7631 
7632 	return 0;
7633 }
7634 
7635 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
7636 				 void *data)
7637 {
7638 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
7639 
7640 	struct hclge_desc *desc;
7641 	u32 *reg_val = data;
7642 	__le32 *desc_data;
7643 	int cmd_num;
7644 	int i, k, n;
7645 	int ret;
7646 
7647 	if (regs_num == 0)
7648 		return 0;
7649 
7650 	cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM);
7651 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
7652 	if (!desc)
7653 		return -ENOMEM;
7654 
7655 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
7656 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
7657 	if (ret) {
7658 		dev_err(&hdev->pdev->dev,
7659 			"Query 32 bit register cmd failed, ret = %d.\n", ret);
7660 		kfree(desc);
7661 		return ret;
7662 	}
7663 
7664 	for (i = 0; i < cmd_num; i++) {
7665 		if (i == 0) {
7666 			desc_data = (__le32 *)(&desc[i].data[0]);
7667 			n = HCLGE_32_BIT_REG_RTN_DATANUM - 2;
7668 		} else {
7669 			desc_data = (__le32 *)(&desc[i]);
7670 			n = HCLGE_32_BIT_REG_RTN_DATANUM;
7671 		}
7672 		for (k = 0; k < n; k++) {
7673 			*reg_val++ = le32_to_cpu(*desc_data++);
7674 
7675 			regs_num--;
7676 			if (!regs_num)
7677 				break;
7678 		}
7679 	}
7680 
7681 	kfree(desc);
7682 	return 0;
7683 }
7684 
7685 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
7686 				 void *data)
7687 {
7688 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
7689 
7690 	struct hclge_desc *desc;
7691 	u64 *reg_val = data;
7692 	__le64 *desc_data;
7693 	int cmd_num;
7694 	int i, k, n;
7695 	int ret;
7696 
7697 	if (regs_num == 0)
7698 		return 0;
7699 
7700 	cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM);
7701 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
7702 	if (!desc)
7703 		return -ENOMEM;
7704 
7705 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
7706 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
7707 	if (ret) {
7708 		dev_err(&hdev->pdev->dev,
7709 			"Query 64 bit register cmd failed, ret = %d.\n", ret);
7710 		kfree(desc);
7711 		return ret;
7712 	}
7713 
7714 	for (i = 0; i < cmd_num; i++) {
7715 		if (i == 0) {
7716 			desc_data = (__le64 *)(&desc[i].data[0]);
7717 			n = HCLGE_64_BIT_REG_RTN_DATANUM - 1;
7718 		} else {
7719 			desc_data = (__le64 *)(&desc[i]);
7720 			n = HCLGE_64_BIT_REG_RTN_DATANUM;
7721 		}
7722 		for (k = 0; k < n; k++) {
7723 			*reg_val++ = le64_to_cpu(*desc_data++);
7724 
7725 			regs_num--;
7726 			if (!regs_num)
7727 				break;
7728 		}
7729 	}
7730 
7731 	kfree(desc);
7732 	return 0;
7733 }
7734 
7735 #define MAX_SEPARATE_NUM	4
7736 #define SEPARATOR_VALUE		0xFFFFFFFF
7737 #define REG_NUM_PER_LINE	4
7738 #define REG_LEN_PER_LINE	(REG_NUM_PER_LINE * sizeof(u32))
7739 
7740 static int hclge_get_regs_len(struct hnae3_handle *handle)
7741 {
7742 	int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
7743 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
7744 	struct hclge_vport *vport = hclge_get_vport(handle);
7745 	struct hclge_dev *hdev = vport->back;
7746 	u32 regs_num_32_bit, regs_num_64_bit;
7747 	int ret;
7748 
7749 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
7750 	if (ret) {
7751 		dev_err(&hdev->pdev->dev,
7752 			"Get register number failed, ret = %d.\n", ret);
7753 		return -EOPNOTSUPP;
7754 	}
7755 
7756 	cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
7757 	common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
7758 	ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
7759 	tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
7760 
7761 	return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
7762 		tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
7763 		regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
7764 }
7765 
7766 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
7767 			   void *data)
7768 {
7769 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
7770 	struct hclge_vport *vport = hclge_get_vport(handle);
7771 	struct hclge_dev *hdev = vport->back;
7772 	u32 regs_num_32_bit, regs_num_64_bit;
7773 	int i, j, reg_um, separator_num;
7774 	u32 *reg = data;
7775 	int ret;
7776 
7777 	*version = hdev->fw_version;
7778 
7779 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
7780 	if (ret) {
7781 		dev_err(&hdev->pdev->dev,
7782 			"Get register number failed, ret = %d.\n", ret);
7783 		return;
7784 	}
7785 
7786 	/* fetching per-PF registers valus from PF PCIe register space */
7787 	reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
7788 	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
7789 	for (i = 0; i < reg_um; i++)
7790 		*reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
7791 	for (i = 0; i < separator_num; i++)
7792 		*reg++ = SEPARATOR_VALUE;
7793 
7794 	reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
7795 	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
7796 	for (i = 0; i < reg_um; i++)
7797 		*reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
7798 	for (i = 0; i < separator_num; i++)
7799 		*reg++ = SEPARATOR_VALUE;
7800 
7801 	reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
7802 	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
7803 	for (j = 0; j < kinfo->num_tqps; j++) {
7804 		for (i = 0; i < reg_um; i++)
7805 			*reg++ = hclge_read_dev(&hdev->hw,
7806 						ring_reg_addr_list[i] +
7807 						0x200 * j);
7808 		for (i = 0; i < separator_num; i++)
7809 			*reg++ = SEPARATOR_VALUE;
7810 	}
7811 
7812 	reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
7813 	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
7814 	for (j = 0; j < hdev->num_msi_used - 1; j++) {
7815 		for (i = 0; i < reg_um; i++)
7816 			*reg++ = hclge_read_dev(&hdev->hw,
7817 						tqp_intr_reg_addr_list[i] +
7818 						4 * j);
7819 		for (i = 0; i < separator_num; i++)
7820 			*reg++ = SEPARATOR_VALUE;
7821 	}
7822 
7823 	/* fetching PF common registers values from firmware */
7824 	ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
7825 	if (ret) {
7826 		dev_err(&hdev->pdev->dev,
7827 			"Get 32 bit register failed, ret = %d.\n", ret);
7828 		return;
7829 	}
7830 
7831 	reg += regs_num_32_bit;
7832 	ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
7833 	if (ret)
7834 		dev_err(&hdev->pdev->dev,
7835 			"Get 64 bit register failed, ret = %d.\n", ret);
7836 }
7837 
7838 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
7839 {
7840 	struct hclge_set_led_state_cmd *req;
7841 	struct hclge_desc desc;
7842 	int ret;
7843 
7844 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
7845 
7846 	req = (struct hclge_set_led_state_cmd *)desc.data;
7847 	hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
7848 			HCLGE_LED_LOCATE_STATE_S, locate_led_status);
7849 
7850 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7851 	if (ret)
7852 		dev_err(&hdev->pdev->dev,
7853 			"Send set led state cmd error, ret =%d\n", ret);
7854 
7855 	return ret;
7856 }
7857 
7858 enum hclge_led_status {
7859 	HCLGE_LED_OFF,
7860 	HCLGE_LED_ON,
7861 	HCLGE_LED_NO_CHANGE = 0xFF,
7862 };
7863 
7864 static int hclge_set_led_id(struct hnae3_handle *handle,
7865 			    enum ethtool_phys_id_state status)
7866 {
7867 	struct hclge_vport *vport = hclge_get_vport(handle);
7868 	struct hclge_dev *hdev = vport->back;
7869 
7870 	switch (status) {
7871 	case ETHTOOL_ID_ACTIVE:
7872 		return hclge_set_led_status(hdev, HCLGE_LED_ON);
7873 	case ETHTOOL_ID_INACTIVE:
7874 		return hclge_set_led_status(hdev, HCLGE_LED_OFF);
7875 	default:
7876 		return -EINVAL;
7877 	}
7878 }
7879 
7880 static void hclge_get_link_mode(struct hnae3_handle *handle,
7881 				unsigned long *supported,
7882 				unsigned long *advertising)
7883 {
7884 	unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
7885 	struct hclge_vport *vport = hclge_get_vport(handle);
7886 	struct hclge_dev *hdev = vport->back;
7887 	unsigned int idx = 0;
7888 
7889 	for (; idx < size; idx++) {
7890 		supported[idx] = hdev->hw.mac.supported[idx];
7891 		advertising[idx] = hdev->hw.mac.advertising[idx];
7892 	}
7893 }
7894 
7895 static int hclge_gro_en(struct hnae3_handle *handle, int enable)
7896 {
7897 	struct hclge_vport *vport = hclge_get_vport(handle);
7898 	struct hclge_dev *hdev = vport->back;
7899 
7900 	return hclge_config_gro(hdev, enable);
7901 }
7902 
7903 static const struct hnae3_ae_ops hclge_ops = {
7904 	.init_ae_dev = hclge_init_ae_dev,
7905 	.uninit_ae_dev = hclge_uninit_ae_dev,
7906 	.flr_prepare = hclge_flr_prepare,
7907 	.flr_done = hclge_flr_done,
7908 	.init_client_instance = hclge_init_client_instance,
7909 	.uninit_client_instance = hclge_uninit_client_instance,
7910 	.map_ring_to_vector = hclge_map_ring_to_vector,
7911 	.unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
7912 	.get_vector = hclge_get_vector,
7913 	.put_vector = hclge_put_vector,
7914 	.set_promisc_mode = hclge_set_promisc_mode,
7915 	.set_loopback = hclge_set_loopback,
7916 	.start = hclge_ae_start,
7917 	.stop = hclge_ae_stop,
7918 	.client_start = hclge_client_start,
7919 	.client_stop = hclge_client_stop,
7920 	.get_status = hclge_get_status,
7921 	.get_ksettings_an_result = hclge_get_ksettings_an_result,
7922 	.update_speed_duplex_h = hclge_update_speed_duplex_h,
7923 	.cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
7924 	.get_media_type = hclge_get_media_type,
7925 	.get_rss_key_size = hclge_get_rss_key_size,
7926 	.get_rss_indir_size = hclge_get_rss_indir_size,
7927 	.get_rss = hclge_get_rss,
7928 	.set_rss = hclge_set_rss,
7929 	.set_rss_tuple = hclge_set_rss_tuple,
7930 	.get_rss_tuple = hclge_get_rss_tuple,
7931 	.get_tc_size = hclge_get_tc_size,
7932 	.get_mac_addr = hclge_get_mac_addr,
7933 	.set_mac_addr = hclge_set_mac_addr,
7934 	.do_ioctl = hclge_do_ioctl,
7935 	.add_uc_addr = hclge_add_uc_addr,
7936 	.rm_uc_addr = hclge_rm_uc_addr,
7937 	.add_mc_addr = hclge_add_mc_addr,
7938 	.rm_mc_addr = hclge_rm_mc_addr,
7939 	.set_autoneg = hclge_set_autoneg,
7940 	.get_autoneg = hclge_get_autoneg,
7941 	.get_pauseparam = hclge_get_pauseparam,
7942 	.set_pauseparam = hclge_set_pauseparam,
7943 	.set_mtu = hclge_set_mtu,
7944 	.reset_queue = hclge_reset_tqp,
7945 	.get_stats = hclge_get_stats,
7946 	.update_stats = hclge_update_stats,
7947 	.get_strings = hclge_get_strings,
7948 	.get_sset_count = hclge_get_sset_count,
7949 	.get_fw_version = hclge_get_fw_version,
7950 	.get_mdix_mode = hclge_get_mdix_mode,
7951 	.enable_vlan_filter = hclge_enable_vlan_filter,
7952 	.set_vlan_filter = hclge_set_vlan_filter,
7953 	.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
7954 	.enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
7955 	.reset_event = hclge_reset_event,
7956 	.set_default_reset_request = hclge_set_def_reset_request,
7957 	.get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
7958 	.set_channels = hclge_set_channels,
7959 	.get_channels = hclge_get_channels,
7960 	.get_regs_len = hclge_get_regs_len,
7961 	.get_regs = hclge_get_regs,
7962 	.set_led_id = hclge_set_led_id,
7963 	.get_link_mode = hclge_get_link_mode,
7964 	.add_fd_entry = hclge_add_fd_entry,
7965 	.del_fd_entry = hclge_del_fd_entry,
7966 	.del_all_fd_entries = hclge_del_all_fd_entries,
7967 	.get_fd_rule_cnt = hclge_get_fd_rule_cnt,
7968 	.get_fd_rule_info = hclge_get_fd_rule_info,
7969 	.get_fd_all_rules = hclge_get_all_rules,
7970 	.restore_fd_rules = hclge_restore_fd_entries,
7971 	.enable_fd = hclge_enable_fd,
7972 	.dbg_run_cmd = hclge_dbg_run_cmd,
7973 	.handle_hw_ras_error = hclge_handle_hw_ras_error,
7974 	.get_hw_reset_stat = hclge_get_hw_reset_stat,
7975 	.ae_dev_resetting = hclge_ae_dev_resetting,
7976 	.ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
7977 	.set_gro_en = hclge_gro_en,
7978 };
7979 
7980 static struct hnae3_ae_algo ae_algo = {
7981 	.ops = &hclge_ops,
7982 	.pdev_id_table = ae_algo_pci_tbl,
7983 };
7984 
7985 static int hclge_init(void)
7986 {
7987 	pr_info("%s is initializing\n", HCLGE_NAME);
7988 
7989 	hnae3_register_ae_algo(&ae_algo);
7990 
7991 	return 0;
7992 }
7993 
7994 static void hclge_exit(void)
7995 {
7996 	hnae3_unregister_ae_algo(&ae_algo);
7997 }
7998 module_init(hclge_init);
7999 module_exit(hclge_exit);
8000 
8001 MODULE_LICENSE("GPL");
8002 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
8003 MODULE_DESCRIPTION("HCLGE Driver");
8004 MODULE_VERSION(HCLGE_MOD_VERSION);
8005