xref: /linux/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c (revision ba95c7452439756d4f6dceb5a188b7c31dbbe5b6)
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
22 #include "hclge_tm.h"
23 #include "hclge_err.h"
24 #include "hnae3.h"
25 
26 #define HCLGE_NAME			"hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29 
30 #define HCLGE_BUF_SIZE_UNIT	256U
31 #define HCLGE_BUF_MUL_BY	2
32 #define HCLGE_BUF_DIV_BY	2
33 #define NEED_RESERVE_TC_NUM	2
34 #define BUF_MAX_PERCENT		100
35 #define BUF_RESERVE_PERCENT	90
36 
37 #define HCLGE_RESET_MAX_FAIL_CNT	5
38 
39 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
40 static int hclge_init_vlan_config(struct hclge_dev *hdev);
41 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
42 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
43 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
44 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
45 			       u16 *allocated_size, bool is_alloc);
46 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
47 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
48 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
49 						   unsigned long *addr);
50 
51 static struct hnae3_ae_algo ae_algo;
52 
53 static const struct pci_device_id ae_algo_pci_tbl[] = {
54 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
55 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
56 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
57 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
58 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
59 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
60 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
61 	/* required last entry */
62 	{0, }
63 };
64 
65 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
66 
67 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
68 					 HCLGE_CMDQ_TX_ADDR_H_REG,
69 					 HCLGE_CMDQ_TX_DEPTH_REG,
70 					 HCLGE_CMDQ_TX_TAIL_REG,
71 					 HCLGE_CMDQ_TX_HEAD_REG,
72 					 HCLGE_CMDQ_RX_ADDR_L_REG,
73 					 HCLGE_CMDQ_RX_ADDR_H_REG,
74 					 HCLGE_CMDQ_RX_DEPTH_REG,
75 					 HCLGE_CMDQ_RX_TAIL_REG,
76 					 HCLGE_CMDQ_RX_HEAD_REG,
77 					 HCLGE_VECTOR0_CMDQ_SRC_REG,
78 					 HCLGE_CMDQ_INTR_STS_REG,
79 					 HCLGE_CMDQ_INTR_EN_REG,
80 					 HCLGE_CMDQ_INTR_GEN_REG};
81 
82 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
83 					   HCLGE_VECTOR0_OTER_EN_REG,
84 					   HCLGE_MISC_RESET_STS_REG,
85 					   HCLGE_MISC_VECTOR_INT_STS,
86 					   HCLGE_GLOBAL_RESET_REG,
87 					   HCLGE_FUN_RST_ING,
88 					   HCLGE_GRO_EN_REG};
89 
90 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
91 					 HCLGE_RING_RX_ADDR_H_REG,
92 					 HCLGE_RING_RX_BD_NUM_REG,
93 					 HCLGE_RING_RX_BD_LENGTH_REG,
94 					 HCLGE_RING_RX_MERGE_EN_REG,
95 					 HCLGE_RING_RX_TAIL_REG,
96 					 HCLGE_RING_RX_HEAD_REG,
97 					 HCLGE_RING_RX_FBD_NUM_REG,
98 					 HCLGE_RING_RX_OFFSET_REG,
99 					 HCLGE_RING_RX_FBD_OFFSET_REG,
100 					 HCLGE_RING_RX_STASH_REG,
101 					 HCLGE_RING_RX_BD_ERR_REG,
102 					 HCLGE_RING_TX_ADDR_L_REG,
103 					 HCLGE_RING_TX_ADDR_H_REG,
104 					 HCLGE_RING_TX_BD_NUM_REG,
105 					 HCLGE_RING_TX_PRIORITY_REG,
106 					 HCLGE_RING_TX_TC_REG,
107 					 HCLGE_RING_TX_MERGE_EN_REG,
108 					 HCLGE_RING_TX_TAIL_REG,
109 					 HCLGE_RING_TX_HEAD_REG,
110 					 HCLGE_RING_TX_FBD_NUM_REG,
111 					 HCLGE_RING_TX_OFFSET_REG,
112 					 HCLGE_RING_TX_EBD_NUM_REG,
113 					 HCLGE_RING_TX_EBD_OFFSET_REG,
114 					 HCLGE_RING_TX_BD_ERR_REG,
115 					 HCLGE_RING_EN_REG};
116 
117 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
118 					     HCLGE_TQP_INTR_GL0_REG,
119 					     HCLGE_TQP_INTR_GL1_REG,
120 					     HCLGE_TQP_INTR_GL2_REG,
121 					     HCLGE_TQP_INTR_RL_REG};
122 
123 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
124 	"App    Loopback test",
125 	"Serdes serial Loopback test",
126 	"Serdes parallel Loopback test",
127 	"Phy    Loopback test"
128 };
129 
130 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
131 	{"mac_tx_mac_pause_num",
132 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
133 	{"mac_rx_mac_pause_num",
134 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
135 	{"mac_tx_control_pkt_num",
136 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
137 	{"mac_rx_control_pkt_num",
138 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
139 	{"mac_tx_pfc_pkt_num",
140 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
141 	{"mac_tx_pfc_pri0_pkt_num",
142 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
143 	{"mac_tx_pfc_pri1_pkt_num",
144 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
145 	{"mac_tx_pfc_pri2_pkt_num",
146 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
147 	{"mac_tx_pfc_pri3_pkt_num",
148 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
149 	{"mac_tx_pfc_pri4_pkt_num",
150 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
151 	{"mac_tx_pfc_pri5_pkt_num",
152 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
153 	{"mac_tx_pfc_pri6_pkt_num",
154 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
155 	{"mac_tx_pfc_pri7_pkt_num",
156 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
157 	{"mac_rx_pfc_pkt_num",
158 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
159 	{"mac_rx_pfc_pri0_pkt_num",
160 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
161 	{"mac_rx_pfc_pri1_pkt_num",
162 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
163 	{"mac_rx_pfc_pri2_pkt_num",
164 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
165 	{"mac_rx_pfc_pri3_pkt_num",
166 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
167 	{"mac_rx_pfc_pri4_pkt_num",
168 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
169 	{"mac_rx_pfc_pri5_pkt_num",
170 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
171 	{"mac_rx_pfc_pri6_pkt_num",
172 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
173 	{"mac_rx_pfc_pri7_pkt_num",
174 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
175 	{"mac_tx_total_pkt_num",
176 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
177 	{"mac_tx_total_oct_num",
178 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
179 	{"mac_tx_good_pkt_num",
180 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
181 	{"mac_tx_bad_pkt_num",
182 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
183 	{"mac_tx_good_oct_num",
184 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
185 	{"mac_tx_bad_oct_num",
186 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
187 	{"mac_tx_uni_pkt_num",
188 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
189 	{"mac_tx_multi_pkt_num",
190 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
191 	{"mac_tx_broad_pkt_num",
192 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
193 	{"mac_tx_undersize_pkt_num",
194 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
195 	{"mac_tx_oversize_pkt_num",
196 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
197 	{"mac_tx_64_oct_pkt_num",
198 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
199 	{"mac_tx_65_127_oct_pkt_num",
200 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
201 	{"mac_tx_128_255_oct_pkt_num",
202 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
203 	{"mac_tx_256_511_oct_pkt_num",
204 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
205 	{"mac_tx_512_1023_oct_pkt_num",
206 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
207 	{"mac_tx_1024_1518_oct_pkt_num",
208 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
209 	{"mac_tx_1519_2047_oct_pkt_num",
210 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
211 	{"mac_tx_2048_4095_oct_pkt_num",
212 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
213 	{"mac_tx_4096_8191_oct_pkt_num",
214 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
215 	{"mac_tx_8192_9216_oct_pkt_num",
216 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
217 	{"mac_tx_9217_12287_oct_pkt_num",
218 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
219 	{"mac_tx_12288_16383_oct_pkt_num",
220 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
221 	{"mac_tx_1519_max_good_pkt_num",
222 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
223 	{"mac_tx_1519_max_bad_pkt_num",
224 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
225 	{"mac_rx_total_pkt_num",
226 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
227 	{"mac_rx_total_oct_num",
228 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
229 	{"mac_rx_good_pkt_num",
230 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
231 	{"mac_rx_bad_pkt_num",
232 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
233 	{"mac_rx_good_oct_num",
234 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
235 	{"mac_rx_bad_oct_num",
236 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
237 	{"mac_rx_uni_pkt_num",
238 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
239 	{"mac_rx_multi_pkt_num",
240 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
241 	{"mac_rx_broad_pkt_num",
242 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
243 	{"mac_rx_undersize_pkt_num",
244 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
245 	{"mac_rx_oversize_pkt_num",
246 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
247 	{"mac_rx_64_oct_pkt_num",
248 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
249 	{"mac_rx_65_127_oct_pkt_num",
250 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
251 	{"mac_rx_128_255_oct_pkt_num",
252 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
253 	{"mac_rx_256_511_oct_pkt_num",
254 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
255 	{"mac_rx_512_1023_oct_pkt_num",
256 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
257 	{"mac_rx_1024_1518_oct_pkt_num",
258 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
259 	{"mac_rx_1519_2047_oct_pkt_num",
260 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
261 	{"mac_rx_2048_4095_oct_pkt_num",
262 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
263 	{"mac_rx_4096_8191_oct_pkt_num",
264 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
265 	{"mac_rx_8192_9216_oct_pkt_num",
266 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
267 	{"mac_rx_9217_12287_oct_pkt_num",
268 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
269 	{"mac_rx_12288_16383_oct_pkt_num",
270 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
271 	{"mac_rx_1519_max_good_pkt_num",
272 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
273 	{"mac_rx_1519_max_bad_pkt_num",
274 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
275 
276 	{"mac_tx_fragment_pkt_num",
277 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
278 	{"mac_tx_undermin_pkt_num",
279 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
280 	{"mac_tx_jabber_pkt_num",
281 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
282 	{"mac_tx_err_all_pkt_num",
283 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
284 	{"mac_tx_from_app_good_pkt_num",
285 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
286 	{"mac_tx_from_app_bad_pkt_num",
287 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
288 	{"mac_rx_fragment_pkt_num",
289 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
290 	{"mac_rx_undermin_pkt_num",
291 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
292 	{"mac_rx_jabber_pkt_num",
293 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
294 	{"mac_rx_fcs_err_pkt_num",
295 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
296 	{"mac_rx_send_app_good_pkt_num",
297 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
298 	{"mac_rx_send_app_bad_pkt_num",
299 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
300 };
301 
302 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
303 	{
304 		.flags = HCLGE_MAC_MGR_MASK_VLAN_B,
305 		.ethter_type = cpu_to_le16(ETH_P_LLDP),
306 		.mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
307 		.mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
308 		.i_port_bitmap = 0x1,
309 	},
310 };
311 
312 static const u8 hclge_hash_key[] = {
313 	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
314 	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
315 	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
316 	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
317 	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
318 };
319 
320 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
321 {
322 #define HCLGE_MAC_CMD_NUM 21
323 
324 	u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
325 	struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
326 	__le64 *desc_data;
327 	int i, k, n;
328 	int ret;
329 
330 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
331 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
332 	if (ret) {
333 		dev_err(&hdev->pdev->dev,
334 			"Get MAC pkt stats fail, status = %d.\n", ret);
335 
336 		return ret;
337 	}
338 
339 	for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
340 		/* for special opcode 0032, only the first desc has the head */
341 		if (unlikely(i == 0)) {
342 			desc_data = (__le64 *)(&desc[i].data[0]);
343 			n = HCLGE_RD_FIRST_STATS_NUM;
344 		} else {
345 			desc_data = (__le64 *)(&desc[i]);
346 			n = HCLGE_RD_OTHER_STATS_NUM;
347 		}
348 
349 		for (k = 0; k < n; k++) {
350 			*data += le64_to_cpu(*desc_data);
351 			data++;
352 			desc_data++;
353 		}
354 	}
355 
356 	return 0;
357 }
358 
359 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
360 {
361 	u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
362 	struct hclge_desc *desc;
363 	__le64 *desc_data;
364 	u16 i, k, n;
365 	int ret;
366 
367 	desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL);
368 	if (!desc)
369 		return -ENOMEM;
370 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
371 	ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
372 	if (ret) {
373 		kfree(desc);
374 		return ret;
375 	}
376 
377 	for (i = 0; i < desc_num; i++) {
378 		/* for special opcode 0034, only the first desc has the head */
379 		if (i == 0) {
380 			desc_data = (__le64 *)(&desc[i].data[0]);
381 			n = HCLGE_RD_FIRST_STATS_NUM;
382 		} else {
383 			desc_data = (__le64 *)(&desc[i]);
384 			n = HCLGE_RD_OTHER_STATS_NUM;
385 		}
386 
387 		for (k = 0; k < n; k++) {
388 			*data += le64_to_cpu(*desc_data);
389 			data++;
390 			desc_data++;
391 		}
392 	}
393 
394 	kfree(desc);
395 
396 	return 0;
397 }
398 
399 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
400 {
401 	struct hclge_desc desc;
402 	__le32 *desc_data;
403 	u32 reg_num;
404 	int ret;
405 
406 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
407 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
408 	if (ret)
409 		return ret;
410 
411 	desc_data = (__le32 *)(&desc.data[0]);
412 	reg_num = le32_to_cpu(*desc_data);
413 
414 	*desc_num = 1 + ((reg_num - 3) >> 2) +
415 		    (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
416 
417 	return 0;
418 }
419 
420 static int hclge_mac_update_stats(struct hclge_dev *hdev)
421 {
422 	u32 desc_num;
423 	int ret;
424 
425 	ret = hclge_mac_query_reg_num(hdev, &desc_num);
426 
427 	/* The firmware supports the new statistics acquisition method */
428 	if (!ret)
429 		ret = hclge_mac_update_stats_complete(hdev, desc_num);
430 	else if (ret == -EOPNOTSUPP)
431 		ret = hclge_mac_update_stats_defective(hdev);
432 	else
433 		dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
434 
435 	return ret;
436 }
437 
438 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
439 {
440 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
441 	struct hclge_vport *vport = hclge_get_vport(handle);
442 	struct hclge_dev *hdev = vport->back;
443 	struct hnae3_queue *queue;
444 	struct hclge_desc desc[1];
445 	struct hclge_tqp *tqp;
446 	int ret, i;
447 
448 	for (i = 0; i < kinfo->num_tqps; i++) {
449 		queue = handle->kinfo.tqp[i];
450 		tqp = container_of(queue, struct hclge_tqp, q);
451 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
452 		hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS,
453 					   true);
454 
455 		desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
456 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
457 		if (ret) {
458 			dev_err(&hdev->pdev->dev,
459 				"Query tqp stat fail, status = %d,queue = %d\n",
460 				ret, i);
461 			return ret;
462 		}
463 		tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
464 			le32_to_cpu(desc[0].data[1]);
465 	}
466 
467 	for (i = 0; i < kinfo->num_tqps; i++) {
468 		queue = handle->kinfo.tqp[i];
469 		tqp = container_of(queue, struct hclge_tqp, q);
470 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
471 		hclge_cmd_setup_basic_desc(&desc[0],
472 					   HCLGE_OPC_QUERY_TX_STATUS,
473 					   true);
474 
475 		desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
476 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
477 		if (ret) {
478 			dev_err(&hdev->pdev->dev,
479 				"Query tqp stat fail, status = %d,queue = %d\n",
480 				ret, i);
481 			return ret;
482 		}
483 		tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
484 			le32_to_cpu(desc[0].data[1]);
485 	}
486 
487 	return 0;
488 }
489 
490 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
491 {
492 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
493 	struct hclge_tqp *tqp;
494 	u64 *buff = data;
495 	int i;
496 
497 	for (i = 0; i < kinfo->num_tqps; i++) {
498 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
499 		*buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
500 	}
501 
502 	for (i = 0; i < kinfo->num_tqps; i++) {
503 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
504 		*buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
505 	}
506 
507 	return buff;
508 }
509 
510 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
511 {
512 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
513 
514 	/* each tqp has TX & RX two queues */
515 	return kinfo->num_tqps * (2);
516 }
517 
518 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
519 {
520 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
521 	u8 *buff = data;
522 	int i = 0;
523 
524 	for (i = 0; i < kinfo->num_tqps; i++) {
525 		struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
526 			struct hclge_tqp, q);
527 		snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
528 			 tqp->index);
529 		buff = buff + ETH_GSTRING_LEN;
530 	}
531 
532 	for (i = 0; i < kinfo->num_tqps; i++) {
533 		struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
534 			struct hclge_tqp, q);
535 		snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
536 			 tqp->index);
537 		buff = buff + ETH_GSTRING_LEN;
538 	}
539 
540 	return buff;
541 }
542 
543 static u64 *hclge_comm_get_stats(const void *comm_stats,
544 				 const struct hclge_comm_stats_str strs[],
545 				 int size, u64 *data)
546 {
547 	u64 *buf = data;
548 	u32 i;
549 
550 	for (i = 0; i < size; i++)
551 		buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
552 
553 	return buf + size;
554 }
555 
556 static u8 *hclge_comm_get_strings(u32 stringset,
557 				  const struct hclge_comm_stats_str strs[],
558 				  int size, u8 *data)
559 {
560 	char *buff = (char *)data;
561 	u32 i;
562 
563 	if (stringset != ETH_SS_STATS)
564 		return buff;
565 
566 	for (i = 0; i < size; i++) {
567 		snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
568 		buff = buff + ETH_GSTRING_LEN;
569 	}
570 
571 	return (u8 *)buff;
572 }
573 
574 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
575 {
576 	struct hnae3_handle *handle;
577 	int status;
578 
579 	handle = &hdev->vport[0].nic;
580 	if (handle->client) {
581 		status = hclge_tqps_update_stats(handle);
582 		if (status) {
583 			dev_err(&hdev->pdev->dev,
584 				"Update TQPS stats fail, status = %d.\n",
585 				status);
586 		}
587 	}
588 
589 	status = hclge_mac_update_stats(hdev);
590 	if (status)
591 		dev_err(&hdev->pdev->dev,
592 			"Update MAC stats fail, status = %d.\n", status);
593 }
594 
595 static void hclge_update_stats(struct hnae3_handle *handle,
596 			       struct net_device_stats *net_stats)
597 {
598 	struct hclge_vport *vport = hclge_get_vport(handle);
599 	struct hclge_dev *hdev = vport->back;
600 	int status;
601 
602 	if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
603 		return;
604 
605 	status = hclge_mac_update_stats(hdev);
606 	if (status)
607 		dev_err(&hdev->pdev->dev,
608 			"Update MAC stats fail, status = %d.\n",
609 			status);
610 
611 	status = hclge_tqps_update_stats(handle);
612 	if (status)
613 		dev_err(&hdev->pdev->dev,
614 			"Update TQPS stats fail, status = %d.\n",
615 			status);
616 
617 	clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
618 }
619 
620 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
621 {
622 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
623 		HNAE3_SUPPORT_PHY_LOOPBACK |\
624 		HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
625 		HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
626 
627 	struct hclge_vport *vport = hclge_get_vport(handle);
628 	struct hclge_dev *hdev = vport->back;
629 	int count = 0;
630 
631 	/* Loopback test support rules:
632 	 * mac: only GE mode support
633 	 * serdes: all mac mode will support include GE/XGE/LGE/CGE
634 	 * phy: only support when phy device exist on board
635 	 */
636 	if (stringset == ETH_SS_TEST) {
637 		/* clear loopback bit flags at first */
638 		handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
639 		if (hdev->pdev->revision >= 0x21 ||
640 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
641 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
642 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
643 			count += 1;
644 			handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
645 		}
646 
647 		count += 2;
648 		handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
649 		handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
650 	} else if (stringset == ETH_SS_STATS) {
651 		count = ARRAY_SIZE(g_mac_stats_string) +
652 			hclge_tqps_get_sset_count(handle, stringset);
653 	}
654 
655 	return count;
656 }
657 
658 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
659 			      u8 *data)
660 {
661 	u8 *p = (char *)data;
662 	int size;
663 
664 	if (stringset == ETH_SS_STATS) {
665 		size = ARRAY_SIZE(g_mac_stats_string);
666 		p = hclge_comm_get_strings(stringset, g_mac_stats_string,
667 					   size, p);
668 		p = hclge_tqps_get_strings(handle, p);
669 	} else if (stringset == ETH_SS_TEST) {
670 		if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
671 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
672 			       ETH_GSTRING_LEN);
673 			p += ETH_GSTRING_LEN;
674 		}
675 		if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
676 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
677 			       ETH_GSTRING_LEN);
678 			p += ETH_GSTRING_LEN;
679 		}
680 		if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
681 			memcpy(p,
682 			       hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
683 			       ETH_GSTRING_LEN);
684 			p += ETH_GSTRING_LEN;
685 		}
686 		if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
687 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
688 			       ETH_GSTRING_LEN);
689 			p += ETH_GSTRING_LEN;
690 		}
691 	}
692 }
693 
694 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
695 {
696 	struct hclge_vport *vport = hclge_get_vport(handle);
697 	struct hclge_dev *hdev = vport->back;
698 	u64 *p;
699 
700 	p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, g_mac_stats_string,
701 				 ARRAY_SIZE(g_mac_stats_string), data);
702 	p = hclge_tqps_get_stats(handle, p);
703 }
704 
705 static void hclge_get_mac_pause_stat(struct hnae3_handle *handle, u64 *tx_cnt,
706 				     u64 *rx_cnt)
707 {
708 	struct hclge_vport *vport = hclge_get_vport(handle);
709 	struct hclge_dev *hdev = vport->back;
710 
711 	*tx_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
712 	*rx_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
713 }
714 
715 static int hclge_parse_func_status(struct hclge_dev *hdev,
716 				   struct hclge_func_status_cmd *status)
717 {
718 	if (!(status->pf_state & HCLGE_PF_STATE_DONE))
719 		return -EINVAL;
720 
721 	/* Set the pf to main pf */
722 	if (status->pf_state & HCLGE_PF_STATE_MAIN)
723 		hdev->flag |= HCLGE_FLAG_MAIN;
724 	else
725 		hdev->flag &= ~HCLGE_FLAG_MAIN;
726 
727 	return 0;
728 }
729 
730 static int hclge_query_function_status(struct hclge_dev *hdev)
731 {
732 #define HCLGE_QUERY_MAX_CNT	5
733 
734 	struct hclge_func_status_cmd *req;
735 	struct hclge_desc desc;
736 	int timeout = 0;
737 	int ret;
738 
739 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
740 	req = (struct hclge_func_status_cmd *)desc.data;
741 
742 	do {
743 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
744 		if (ret) {
745 			dev_err(&hdev->pdev->dev,
746 				"query function status failed %d.\n", ret);
747 			return ret;
748 		}
749 
750 		/* Check pf reset is done */
751 		if (req->pf_state)
752 			break;
753 		usleep_range(1000, 2000);
754 	} while (timeout++ < HCLGE_QUERY_MAX_CNT);
755 
756 	ret = hclge_parse_func_status(hdev, req);
757 
758 	return ret;
759 }
760 
761 static int hclge_query_pf_resource(struct hclge_dev *hdev)
762 {
763 	struct hclge_pf_res_cmd *req;
764 	struct hclge_desc desc;
765 	int ret;
766 
767 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
768 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
769 	if (ret) {
770 		dev_err(&hdev->pdev->dev,
771 			"query pf resource failed %d.\n", ret);
772 		return ret;
773 	}
774 
775 	req = (struct hclge_pf_res_cmd *)desc.data;
776 	hdev->num_tqps = __le16_to_cpu(req->tqp_num);
777 	hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
778 
779 	if (req->tx_buf_size)
780 		hdev->tx_buf_size =
781 			__le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
782 	else
783 		hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
784 
785 	hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
786 
787 	if (req->dv_buf_size)
788 		hdev->dv_buf_size =
789 			__le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
790 	else
791 		hdev->dv_buf_size = HCLGE_DEFAULT_DV;
792 
793 	hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
794 
795 	if (hnae3_dev_roce_supported(hdev)) {
796 		hdev->roce_base_msix_offset =
797 		hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
798 				HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
799 		hdev->num_roce_msi =
800 		hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
801 				HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
802 
803 		/* PF should have NIC vectors and Roce vectors,
804 		 * NIC vectors are queued before Roce vectors.
805 		 */
806 		hdev->num_msi = hdev->num_roce_msi +
807 				hdev->roce_base_msix_offset;
808 	} else {
809 		hdev->num_msi =
810 		hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
811 				HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
812 	}
813 
814 	return 0;
815 }
816 
817 static int hclge_parse_speed(int speed_cmd, int *speed)
818 {
819 	switch (speed_cmd) {
820 	case 6:
821 		*speed = HCLGE_MAC_SPEED_10M;
822 		break;
823 	case 7:
824 		*speed = HCLGE_MAC_SPEED_100M;
825 		break;
826 	case 0:
827 		*speed = HCLGE_MAC_SPEED_1G;
828 		break;
829 	case 1:
830 		*speed = HCLGE_MAC_SPEED_10G;
831 		break;
832 	case 2:
833 		*speed = HCLGE_MAC_SPEED_25G;
834 		break;
835 	case 3:
836 		*speed = HCLGE_MAC_SPEED_40G;
837 		break;
838 	case 4:
839 		*speed = HCLGE_MAC_SPEED_50G;
840 		break;
841 	case 5:
842 		*speed = HCLGE_MAC_SPEED_100G;
843 		break;
844 	default:
845 		return -EINVAL;
846 	}
847 
848 	return 0;
849 }
850 
851 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
852 {
853 	struct hclge_vport *vport = hclge_get_vport(handle);
854 	struct hclge_dev *hdev = vport->back;
855 	u32 speed_ability = hdev->hw.mac.speed_ability;
856 	u32 speed_bit = 0;
857 
858 	switch (speed) {
859 	case HCLGE_MAC_SPEED_10M:
860 		speed_bit = HCLGE_SUPPORT_10M_BIT;
861 		break;
862 	case HCLGE_MAC_SPEED_100M:
863 		speed_bit = HCLGE_SUPPORT_100M_BIT;
864 		break;
865 	case HCLGE_MAC_SPEED_1G:
866 		speed_bit = HCLGE_SUPPORT_1G_BIT;
867 		break;
868 	case HCLGE_MAC_SPEED_10G:
869 		speed_bit = HCLGE_SUPPORT_10G_BIT;
870 		break;
871 	case HCLGE_MAC_SPEED_25G:
872 		speed_bit = HCLGE_SUPPORT_25G_BIT;
873 		break;
874 	case HCLGE_MAC_SPEED_40G:
875 		speed_bit = HCLGE_SUPPORT_40G_BIT;
876 		break;
877 	case HCLGE_MAC_SPEED_50G:
878 		speed_bit = HCLGE_SUPPORT_50G_BIT;
879 		break;
880 	case HCLGE_MAC_SPEED_100G:
881 		speed_bit = HCLGE_SUPPORT_100G_BIT;
882 		break;
883 	default:
884 		return -EINVAL;
885 	}
886 
887 	if (speed_bit & speed_ability)
888 		return 0;
889 
890 	return -EINVAL;
891 }
892 
893 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
894 {
895 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
896 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
897 				 mac->supported);
898 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
899 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
900 				 mac->supported);
901 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
902 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
903 				 mac->supported);
904 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
905 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
906 				 mac->supported);
907 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
908 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
909 				 mac->supported);
910 }
911 
912 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
913 {
914 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
915 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
916 				 mac->supported);
917 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
918 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
919 				 mac->supported);
920 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
921 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
922 				 mac->supported);
923 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
924 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
925 				 mac->supported);
926 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
927 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
928 				 mac->supported);
929 }
930 
931 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
932 {
933 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
934 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
935 				 mac->supported);
936 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
937 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
938 				 mac->supported);
939 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
940 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
941 				 mac->supported);
942 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
943 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
944 				 mac->supported);
945 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
946 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
947 				 mac->supported);
948 }
949 
950 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
951 {
952 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
953 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
954 				 mac->supported);
955 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
956 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
957 				 mac->supported);
958 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
959 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
960 				 mac->supported);
961 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
962 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
963 				 mac->supported);
964 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
965 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
966 				 mac->supported);
967 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
968 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
969 				 mac->supported);
970 }
971 
972 static void hclge_convert_setting_fec(struct hclge_mac *mac)
973 {
974 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
975 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
976 
977 	switch (mac->speed) {
978 	case HCLGE_MAC_SPEED_10G:
979 	case HCLGE_MAC_SPEED_40G:
980 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
981 				 mac->supported);
982 		mac->fec_ability =
983 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
984 		break;
985 	case HCLGE_MAC_SPEED_25G:
986 	case HCLGE_MAC_SPEED_50G:
987 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
988 				 mac->supported);
989 		mac->fec_ability =
990 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
991 			BIT(HNAE3_FEC_AUTO);
992 		break;
993 	case HCLGE_MAC_SPEED_100G:
994 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
995 		mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
996 		break;
997 	default:
998 		mac->fec_ability = 0;
999 		break;
1000 	}
1001 }
1002 
1003 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1004 					u8 speed_ability)
1005 {
1006 	struct hclge_mac *mac = &hdev->hw.mac;
1007 
1008 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1009 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1010 				 mac->supported);
1011 
1012 	hclge_convert_setting_sr(mac, speed_ability);
1013 	hclge_convert_setting_lr(mac, speed_ability);
1014 	hclge_convert_setting_cr(mac, speed_ability);
1015 	if (hdev->pdev->revision >= 0x21)
1016 		hclge_convert_setting_fec(mac);
1017 
1018 	linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1019 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1020 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1021 }
1022 
1023 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1024 					    u8 speed_ability)
1025 {
1026 	struct hclge_mac *mac = &hdev->hw.mac;
1027 
1028 	hclge_convert_setting_kr(mac, speed_ability);
1029 	if (hdev->pdev->revision >= 0x21)
1030 		hclge_convert_setting_fec(mac);
1031 	linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1032 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1033 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1034 }
1035 
1036 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1037 					 u8 speed_ability)
1038 {
1039 	unsigned long *supported = hdev->hw.mac.supported;
1040 
1041 	/* default to support all speed for GE port */
1042 	if (!speed_ability)
1043 		speed_ability = HCLGE_SUPPORT_GE;
1044 
1045 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1046 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1047 				 supported);
1048 
1049 	if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1050 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1051 				 supported);
1052 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1053 				 supported);
1054 	}
1055 
1056 	if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1057 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1058 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1059 	}
1060 
1061 	linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1062 	linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1063 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1064 	linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1065 }
1066 
1067 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1068 {
1069 	u8 media_type = hdev->hw.mac.media_type;
1070 
1071 	if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1072 		hclge_parse_fiber_link_mode(hdev, speed_ability);
1073 	else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1074 		hclge_parse_copper_link_mode(hdev, speed_ability);
1075 	else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1076 		hclge_parse_backplane_link_mode(hdev, speed_ability);
1077 }
1078 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1079 {
1080 	struct hclge_cfg_param_cmd *req;
1081 	u64 mac_addr_tmp_high;
1082 	u64 mac_addr_tmp;
1083 	unsigned int i;
1084 
1085 	req = (struct hclge_cfg_param_cmd *)desc[0].data;
1086 
1087 	/* get the configuration */
1088 	cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1089 					      HCLGE_CFG_VMDQ_M,
1090 					      HCLGE_CFG_VMDQ_S);
1091 	cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1092 				      HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1093 	cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1094 					    HCLGE_CFG_TQP_DESC_N_M,
1095 					    HCLGE_CFG_TQP_DESC_N_S);
1096 
1097 	cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1098 					HCLGE_CFG_PHY_ADDR_M,
1099 					HCLGE_CFG_PHY_ADDR_S);
1100 	cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1101 					  HCLGE_CFG_MEDIA_TP_M,
1102 					  HCLGE_CFG_MEDIA_TP_S);
1103 	cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1104 					  HCLGE_CFG_RX_BUF_LEN_M,
1105 					  HCLGE_CFG_RX_BUF_LEN_S);
1106 	/* get mac_address */
1107 	mac_addr_tmp = __le32_to_cpu(req->param[2]);
1108 	mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1109 					    HCLGE_CFG_MAC_ADDR_H_M,
1110 					    HCLGE_CFG_MAC_ADDR_H_S);
1111 
1112 	mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1113 
1114 	cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1115 					     HCLGE_CFG_DEFAULT_SPEED_M,
1116 					     HCLGE_CFG_DEFAULT_SPEED_S);
1117 	cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1118 					    HCLGE_CFG_RSS_SIZE_M,
1119 					    HCLGE_CFG_RSS_SIZE_S);
1120 
1121 	for (i = 0; i < ETH_ALEN; i++)
1122 		cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1123 
1124 	req = (struct hclge_cfg_param_cmd *)desc[1].data;
1125 	cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1126 
1127 	cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1128 					     HCLGE_CFG_SPEED_ABILITY_M,
1129 					     HCLGE_CFG_SPEED_ABILITY_S);
1130 	cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1131 					 HCLGE_CFG_UMV_TBL_SPACE_M,
1132 					 HCLGE_CFG_UMV_TBL_SPACE_S);
1133 	if (!cfg->umv_space)
1134 		cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1135 }
1136 
1137 /* hclge_get_cfg: query the static parameter from flash
1138  * @hdev: pointer to struct hclge_dev
1139  * @hcfg: the config structure to be getted
1140  */
1141 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1142 {
1143 	struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1144 	struct hclge_cfg_param_cmd *req;
1145 	unsigned int i;
1146 	int ret;
1147 
1148 	for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1149 		u32 offset = 0;
1150 
1151 		req = (struct hclge_cfg_param_cmd *)desc[i].data;
1152 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1153 					   true);
1154 		hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1155 				HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1156 		/* Len should be united by 4 bytes when send to hardware */
1157 		hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1158 				HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1159 		req->offset = cpu_to_le32(offset);
1160 	}
1161 
1162 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1163 	if (ret) {
1164 		dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1165 		return ret;
1166 	}
1167 
1168 	hclge_parse_cfg(hcfg, desc);
1169 
1170 	return 0;
1171 }
1172 
1173 static int hclge_get_cap(struct hclge_dev *hdev)
1174 {
1175 	int ret;
1176 
1177 	ret = hclge_query_function_status(hdev);
1178 	if (ret) {
1179 		dev_err(&hdev->pdev->dev,
1180 			"query function status error %d.\n", ret);
1181 		return ret;
1182 	}
1183 
1184 	/* get pf resource */
1185 	ret = hclge_query_pf_resource(hdev);
1186 	if (ret)
1187 		dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1188 
1189 	return ret;
1190 }
1191 
1192 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1193 {
1194 #define HCLGE_MIN_TX_DESC	64
1195 #define HCLGE_MIN_RX_DESC	64
1196 
1197 	if (!is_kdump_kernel())
1198 		return;
1199 
1200 	dev_info(&hdev->pdev->dev,
1201 		 "Running kdump kernel. Using minimal resources\n");
1202 
1203 	/* minimal queue pairs equals to the number of vports */
1204 	hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1205 	hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1206 	hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1207 }
1208 
1209 static int hclge_configure(struct hclge_dev *hdev)
1210 {
1211 	struct hclge_cfg cfg;
1212 	unsigned int i;
1213 	int ret;
1214 
1215 	ret = hclge_get_cfg(hdev, &cfg);
1216 	if (ret) {
1217 		dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1218 		return ret;
1219 	}
1220 
1221 	hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1222 	hdev->base_tqp_pid = 0;
1223 	hdev->rss_size_max = cfg.rss_size_max;
1224 	hdev->rx_buf_len = cfg.rx_buf_len;
1225 	ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1226 	hdev->hw.mac.media_type = cfg.media_type;
1227 	hdev->hw.mac.phy_addr = cfg.phy_addr;
1228 	hdev->num_tx_desc = cfg.tqp_desc_num;
1229 	hdev->num_rx_desc = cfg.tqp_desc_num;
1230 	hdev->tm_info.num_pg = 1;
1231 	hdev->tc_max = cfg.tc_num;
1232 	hdev->tm_info.hw_pfc_map = 0;
1233 	hdev->wanted_umv_size = cfg.umv_space;
1234 
1235 	if (hnae3_dev_fd_supported(hdev)) {
1236 		hdev->fd_en = true;
1237 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1238 	}
1239 
1240 	ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1241 	if (ret) {
1242 		dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1243 		return ret;
1244 	}
1245 
1246 	hclge_parse_link_mode(hdev, cfg.speed_ability);
1247 
1248 	if ((hdev->tc_max > HNAE3_MAX_TC) ||
1249 	    (hdev->tc_max < 1)) {
1250 		dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1251 			 hdev->tc_max);
1252 		hdev->tc_max = 1;
1253 	}
1254 
1255 	/* Dev does not support DCB */
1256 	if (!hnae3_dev_dcb_supported(hdev)) {
1257 		hdev->tc_max = 1;
1258 		hdev->pfc_max = 0;
1259 	} else {
1260 		hdev->pfc_max = hdev->tc_max;
1261 	}
1262 
1263 	hdev->tm_info.num_tc = 1;
1264 
1265 	/* Currently not support uncontiuous tc */
1266 	for (i = 0; i < hdev->tm_info.num_tc; i++)
1267 		hnae3_set_bit(hdev->hw_tc_map, i, 1);
1268 
1269 	hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1270 
1271 	hclge_init_kdump_kernel_config(hdev);
1272 
1273 	return ret;
1274 }
1275 
1276 static int hclge_config_tso(struct hclge_dev *hdev, unsigned int tso_mss_min,
1277 			    unsigned int tso_mss_max)
1278 {
1279 	struct hclge_cfg_tso_status_cmd *req;
1280 	struct hclge_desc desc;
1281 	u16 tso_mss;
1282 
1283 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1284 
1285 	req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1286 
1287 	tso_mss = 0;
1288 	hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1289 			HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1290 	req->tso_mss_min = cpu_to_le16(tso_mss);
1291 
1292 	tso_mss = 0;
1293 	hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1294 			HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1295 	req->tso_mss_max = cpu_to_le16(tso_mss);
1296 
1297 	return hclge_cmd_send(&hdev->hw, &desc, 1);
1298 }
1299 
1300 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1301 {
1302 	struct hclge_cfg_gro_status_cmd *req;
1303 	struct hclge_desc desc;
1304 	int ret;
1305 
1306 	if (!hnae3_dev_gro_supported(hdev))
1307 		return 0;
1308 
1309 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1310 	req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1311 
1312 	req->gro_en = cpu_to_le16(en ? 1 : 0);
1313 
1314 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1315 	if (ret)
1316 		dev_err(&hdev->pdev->dev,
1317 			"GRO hardware config cmd failed, ret = %d\n", ret);
1318 
1319 	return ret;
1320 }
1321 
1322 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1323 {
1324 	struct hclge_tqp *tqp;
1325 	int i;
1326 
1327 	hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1328 				  sizeof(struct hclge_tqp), GFP_KERNEL);
1329 	if (!hdev->htqp)
1330 		return -ENOMEM;
1331 
1332 	tqp = hdev->htqp;
1333 
1334 	for (i = 0; i < hdev->num_tqps; i++) {
1335 		tqp->dev = &hdev->pdev->dev;
1336 		tqp->index = i;
1337 
1338 		tqp->q.ae_algo = &ae_algo;
1339 		tqp->q.buf_size = hdev->rx_buf_len;
1340 		tqp->q.tx_desc_num = hdev->num_tx_desc;
1341 		tqp->q.rx_desc_num = hdev->num_rx_desc;
1342 		tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1343 			i * HCLGE_TQP_REG_SIZE;
1344 
1345 		tqp++;
1346 	}
1347 
1348 	return 0;
1349 }
1350 
1351 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1352 				  u16 tqp_pid, u16 tqp_vid, bool is_pf)
1353 {
1354 	struct hclge_tqp_map_cmd *req;
1355 	struct hclge_desc desc;
1356 	int ret;
1357 
1358 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1359 
1360 	req = (struct hclge_tqp_map_cmd *)desc.data;
1361 	req->tqp_id = cpu_to_le16(tqp_pid);
1362 	req->tqp_vf = func_id;
1363 	req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1364 			1 << HCLGE_TQP_MAP_EN_B;
1365 	req->tqp_vid = cpu_to_le16(tqp_vid);
1366 
1367 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1368 	if (ret)
1369 		dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1370 
1371 	return ret;
1372 }
1373 
1374 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1375 {
1376 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1377 	struct hclge_dev *hdev = vport->back;
1378 	int i, alloced;
1379 
1380 	for (i = 0, alloced = 0; i < hdev->num_tqps &&
1381 	     alloced < num_tqps; i++) {
1382 		if (!hdev->htqp[i].alloced) {
1383 			hdev->htqp[i].q.handle = &vport->nic;
1384 			hdev->htqp[i].q.tqp_index = alloced;
1385 			hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1386 			hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1387 			kinfo->tqp[alloced] = &hdev->htqp[i].q;
1388 			hdev->htqp[i].alloced = true;
1389 			alloced++;
1390 		}
1391 	}
1392 	vport->alloc_tqps = alloced;
1393 	kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1394 				vport->alloc_tqps / hdev->tm_info.num_tc);
1395 
1396 	return 0;
1397 }
1398 
1399 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1400 			    u16 num_tx_desc, u16 num_rx_desc)
1401 
1402 {
1403 	struct hnae3_handle *nic = &vport->nic;
1404 	struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1405 	struct hclge_dev *hdev = vport->back;
1406 	int ret;
1407 
1408 	kinfo->num_tx_desc = num_tx_desc;
1409 	kinfo->num_rx_desc = num_rx_desc;
1410 
1411 	kinfo->rx_buf_len = hdev->rx_buf_len;
1412 
1413 	kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1414 				  sizeof(struct hnae3_queue *), GFP_KERNEL);
1415 	if (!kinfo->tqp)
1416 		return -ENOMEM;
1417 
1418 	ret = hclge_assign_tqp(vport, num_tqps);
1419 	if (ret)
1420 		dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1421 
1422 	return ret;
1423 }
1424 
1425 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1426 				  struct hclge_vport *vport)
1427 {
1428 	struct hnae3_handle *nic = &vport->nic;
1429 	struct hnae3_knic_private_info *kinfo;
1430 	u16 i;
1431 
1432 	kinfo = &nic->kinfo;
1433 	for (i = 0; i < vport->alloc_tqps; i++) {
1434 		struct hclge_tqp *q =
1435 			container_of(kinfo->tqp[i], struct hclge_tqp, q);
1436 		bool is_pf;
1437 		int ret;
1438 
1439 		is_pf = !(vport->vport_id);
1440 		ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1441 					     i, is_pf);
1442 		if (ret)
1443 			return ret;
1444 	}
1445 
1446 	return 0;
1447 }
1448 
1449 static int hclge_map_tqp(struct hclge_dev *hdev)
1450 {
1451 	struct hclge_vport *vport = hdev->vport;
1452 	u16 i, num_vport;
1453 
1454 	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1455 	for (i = 0; i < num_vport; i++)	{
1456 		int ret;
1457 
1458 		ret = hclge_map_tqp_to_vport(hdev, vport);
1459 		if (ret)
1460 			return ret;
1461 
1462 		vport++;
1463 	}
1464 
1465 	return 0;
1466 }
1467 
1468 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1469 {
1470 	struct hnae3_handle *nic = &vport->nic;
1471 	struct hclge_dev *hdev = vport->back;
1472 	int ret;
1473 
1474 	nic->pdev = hdev->pdev;
1475 	nic->ae_algo = &ae_algo;
1476 	nic->numa_node_mask = hdev->numa_node_mask;
1477 
1478 	ret = hclge_knic_setup(vport, num_tqps,
1479 			       hdev->num_tx_desc, hdev->num_rx_desc);
1480 	if (ret)
1481 		dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1482 
1483 	return ret;
1484 }
1485 
1486 static int hclge_alloc_vport(struct hclge_dev *hdev)
1487 {
1488 	struct pci_dev *pdev = hdev->pdev;
1489 	struct hclge_vport *vport;
1490 	u32 tqp_main_vport;
1491 	u32 tqp_per_vport;
1492 	int num_vport, i;
1493 	int ret;
1494 
1495 	/* We need to alloc a vport for main NIC of PF */
1496 	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1497 
1498 	if (hdev->num_tqps < num_vport) {
1499 		dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1500 			hdev->num_tqps, num_vport);
1501 		return -EINVAL;
1502 	}
1503 
1504 	/* Alloc the same number of TQPs for every vport */
1505 	tqp_per_vport = hdev->num_tqps / num_vport;
1506 	tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1507 
1508 	vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1509 			     GFP_KERNEL);
1510 	if (!vport)
1511 		return -ENOMEM;
1512 
1513 	hdev->vport = vport;
1514 	hdev->num_alloc_vport = num_vport;
1515 
1516 	if (IS_ENABLED(CONFIG_PCI_IOV))
1517 		hdev->num_alloc_vfs = hdev->num_req_vfs;
1518 
1519 	for (i = 0; i < num_vport; i++) {
1520 		vport->back = hdev;
1521 		vport->vport_id = i;
1522 		vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1523 		vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1524 		vport->rxvlan_cfg.rx_vlan_offload_en = true;
1525 		INIT_LIST_HEAD(&vport->vlan_list);
1526 		INIT_LIST_HEAD(&vport->uc_mac_list);
1527 		INIT_LIST_HEAD(&vport->mc_mac_list);
1528 
1529 		if (i == 0)
1530 			ret = hclge_vport_setup(vport, tqp_main_vport);
1531 		else
1532 			ret = hclge_vport_setup(vport, tqp_per_vport);
1533 		if (ret) {
1534 			dev_err(&pdev->dev,
1535 				"vport setup failed for vport %d, %d\n",
1536 				i, ret);
1537 			return ret;
1538 		}
1539 
1540 		vport++;
1541 	}
1542 
1543 	return 0;
1544 }
1545 
1546 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1547 				    struct hclge_pkt_buf_alloc *buf_alloc)
1548 {
1549 /* TX buffer size is unit by 128 byte */
1550 #define HCLGE_BUF_SIZE_UNIT_SHIFT	7
1551 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK	BIT(15)
1552 	struct hclge_tx_buff_alloc_cmd *req;
1553 	struct hclge_desc desc;
1554 	int ret;
1555 	u8 i;
1556 
1557 	req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1558 
1559 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1560 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1561 		u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1562 
1563 		req->tx_pkt_buff[i] =
1564 			cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1565 				     HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1566 	}
1567 
1568 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1569 	if (ret)
1570 		dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1571 			ret);
1572 
1573 	return ret;
1574 }
1575 
1576 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1577 				 struct hclge_pkt_buf_alloc *buf_alloc)
1578 {
1579 	int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1580 
1581 	if (ret)
1582 		dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1583 
1584 	return ret;
1585 }
1586 
1587 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1588 {
1589 	unsigned int i;
1590 	u32 cnt = 0;
1591 
1592 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1593 		if (hdev->hw_tc_map & BIT(i))
1594 			cnt++;
1595 	return cnt;
1596 }
1597 
1598 /* Get the number of pfc enabled TCs, which have private buffer */
1599 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1600 				  struct hclge_pkt_buf_alloc *buf_alloc)
1601 {
1602 	struct hclge_priv_buf *priv;
1603 	unsigned int i;
1604 	int cnt = 0;
1605 
1606 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1607 		priv = &buf_alloc->priv_buf[i];
1608 		if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1609 		    priv->enable)
1610 			cnt++;
1611 	}
1612 
1613 	return cnt;
1614 }
1615 
1616 /* Get the number of pfc disabled TCs, which have private buffer */
1617 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1618 				     struct hclge_pkt_buf_alloc *buf_alloc)
1619 {
1620 	struct hclge_priv_buf *priv;
1621 	unsigned int i;
1622 	int cnt = 0;
1623 
1624 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1625 		priv = &buf_alloc->priv_buf[i];
1626 		if (hdev->hw_tc_map & BIT(i) &&
1627 		    !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1628 		    priv->enable)
1629 			cnt++;
1630 	}
1631 
1632 	return cnt;
1633 }
1634 
1635 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1636 {
1637 	struct hclge_priv_buf *priv;
1638 	u32 rx_priv = 0;
1639 	int i;
1640 
1641 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1642 		priv = &buf_alloc->priv_buf[i];
1643 		if (priv->enable)
1644 			rx_priv += priv->buf_size;
1645 	}
1646 	return rx_priv;
1647 }
1648 
1649 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1650 {
1651 	u32 i, total_tx_size = 0;
1652 
1653 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1654 		total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1655 
1656 	return total_tx_size;
1657 }
1658 
1659 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1660 				struct hclge_pkt_buf_alloc *buf_alloc,
1661 				u32 rx_all)
1662 {
1663 	u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1664 	u32 tc_num = hclge_get_tc_num(hdev);
1665 	u32 shared_buf, aligned_mps;
1666 	u32 rx_priv;
1667 	int i;
1668 
1669 	aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1670 
1671 	if (hnae3_dev_dcb_supported(hdev))
1672 		shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1673 					hdev->dv_buf_size;
1674 	else
1675 		shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1676 					+ hdev->dv_buf_size;
1677 
1678 	shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1679 	shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1680 			     HCLGE_BUF_SIZE_UNIT);
1681 
1682 	rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1683 	if (rx_all < rx_priv + shared_std)
1684 		return false;
1685 
1686 	shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1687 	buf_alloc->s_buf.buf_size = shared_buf;
1688 	if (hnae3_dev_dcb_supported(hdev)) {
1689 		buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1690 		buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1691 			- roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1692 				  HCLGE_BUF_SIZE_UNIT);
1693 	} else {
1694 		buf_alloc->s_buf.self.high = aligned_mps +
1695 						HCLGE_NON_DCB_ADDITIONAL_BUF;
1696 		buf_alloc->s_buf.self.low = aligned_mps;
1697 	}
1698 
1699 	if (hnae3_dev_dcb_supported(hdev)) {
1700 		hi_thrd = shared_buf - hdev->dv_buf_size;
1701 
1702 		if (tc_num <= NEED_RESERVE_TC_NUM)
1703 			hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1704 					/ BUF_MAX_PERCENT;
1705 
1706 		if (tc_num)
1707 			hi_thrd = hi_thrd / tc_num;
1708 
1709 		hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1710 		hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1711 		lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1712 	} else {
1713 		hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1714 		lo_thrd = aligned_mps;
1715 	}
1716 
1717 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1718 		buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1719 		buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1720 	}
1721 
1722 	return true;
1723 }
1724 
1725 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1726 				struct hclge_pkt_buf_alloc *buf_alloc)
1727 {
1728 	u32 i, total_size;
1729 
1730 	total_size = hdev->pkt_buf_size;
1731 
1732 	/* alloc tx buffer for all enabled tc */
1733 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1734 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1735 
1736 		if (hdev->hw_tc_map & BIT(i)) {
1737 			if (total_size < hdev->tx_buf_size)
1738 				return -ENOMEM;
1739 
1740 			priv->tx_buf_size = hdev->tx_buf_size;
1741 		} else {
1742 			priv->tx_buf_size = 0;
1743 		}
1744 
1745 		total_size -= priv->tx_buf_size;
1746 	}
1747 
1748 	return 0;
1749 }
1750 
1751 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1752 				  struct hclge_pkt_buf_alloc *buf_alloc)
1753 {
1754 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1755 	u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1756 	unsigned int i;
1757 
1758 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1759 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1760 
1761 		priv->enable = 0;
1762 		priv->wl.low = 0;
1763 		priv->wl.high = 0;
1764 		priv->buf_size = 0;
1765 
1766 		if (!(hdev->hw_tc_map & BIT(i)))
1767 			continue;
1768 
1769 		priv->enable = 1;
1770 
1771 		if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1772 			priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
1773 			priv->wl.high = roundup(priv->wl.low + aligned_mps,
1774 						HCLGE_BUF_SIZE_UNIT);
1775 		} else {
1776 			priv->wl.low = 0;
1777 			priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
1778 					aligned_mps;
1779 		}
1780 
1781 		priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1782 	}
1783 
1784 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1785 }
1786 
1787 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1788 					  struct hclge_pkt_buf_alloc *buf_alloc)
1789 {
1790 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1791 	int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1792 	int i;
1793 
1794 	/* let the last to be cleared first */
1795 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1796 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1797 		unsigned int mask = BIT((unsigned int)i);
1798 
1799 		if (hdev->hw_tc_map & mask &&
1800 		    !(hdev->tm_info.hw_pfc_map & mask)) {
1801 			/* Clear the no pfc TC private buffer */
1802 			priv->wl.low = 0;
1803 			priv->wl.high = 0;
1804 			priv->buf_size = 0;
1805 			priv->enable = 0;
1806 			no_pfc_priv_num--;
1807 		}
1808 
1809 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1810 		    no_pfc_priv_num == 0)
1811 			break;
1812 	}
1813 
1814 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1815 }
1816 
1817 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1818 					struct hclge_pkt_buf_alloc *buf_alloc)
1819 {
1820 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1821 	int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1822 	int i;
1823 
1824 	/* let the last to be cleared first */
1825 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1826 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1827 		unsigned int mask = BIT((unsigned int)i);
1828 
1829 		if (hdev->hw_tc_map & mask &&
1830 		    hdev->tm_info.hw_pfc_map & mask) {
1831 			/* Reduce the number of pfc TC with private buffer */
1832 			priv->wl.low = 0;
1833 			priv->enable = 0;
1834 			priv->wl.high = 0;
1835 			priv->buf_size = 0;
1836 			pfc_priv_num--;
1837 		}
1838 
1839 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1840 		    pfc_priv_num == 0)
1841 			break;
1842 	}
1843 
1844 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1845 }
1846 
1847 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
1848 				      struct hclge_pkt_buf_alloc *buf_alloc)
1849 {
1850 #define COMPENSATE_BUFFER	0x3C00
1851 #define COMPENSATE_HALF_MPS_NUM	5
1852 #define PRIV_WL_GAP		0x1800
1853 
1854 	u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1855 	u32 tc_num = hclge_get_tc_num(hdev);
1856 	u32 half_mps = hdev->mps >> 1;
1857 	u32 min_rx_priv;
1858 	unsigned int i;
1859 
1860 	if (tc_num)
1861 		rx_priv = rx_priv / tc_num;
1862 
1863 	if (tc_num <= NEED_RESERVE_TC_NUM)
1864 		rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
1865 
1866 	min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
1867 			COMPENSATE_HALF_MPS_NUM * half_mps;
1868 	min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
1869 	rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
1870 
1871 	if (rx_priv < min_rx_priv)
1872 		return false;
1873 
1874 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1875 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1876 
1877 		priv->enable = 0;
1878 		priv->wl.low = 0;
1879 		priv->wl.high = 0;
1880 		priv->buf_size = 0;
1881 
1882 		if (!(hdev->hw_tc_map & BIT(i)))
1883 			continue;
1884 
1885 		priv->enable = 1;
1886 		priv->buf_size = rx_priv;
1887 		priv->wl.high = rx_priv - hdev->dv_buf_size;
1888 		priv->wl.low = priv->wl.high - PRIV_WL_GAP;
1889 	}
1890 
1891 	buf_alloc->s_buf.buf_size = 0;
1892 
1893 	return true;
1894 }
1895 
1896 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1897  * @hdev: pointer to struct hclge_dev
1898  * @buf_alloc: pointer to buffer calculation data
1899  * @return: 0: calculate sucessful, negative: fail
1900  */
1901 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1902 				struct hclge_pkt_buf_alloc *buf_alloc)
1903 {
1904 	/* When DCB is not supported, rx private buffer is not allocated. */
1905 	if (!hnae3_dev_dcb_supported(hdev)) {
1906 		u32 rx_all = hdev->pkt_buf_size;
1907 
1908 		rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1909 		if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1910 			return -ENOMEM;
1911 
1912 		return 0;
1913 	}
1914 
1915 	if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
1916 		return 0;
1917 
1918 	if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
1919 		return 0;
1920 
1921 	/* try to decrease the buffer size */
1922 	if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
1923 		return 0;
1924 
1925 	if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
1926 		return 0;
1927 
1928 	if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
1929 		return 0;
1930 
1931 	return -ENOMEM;
1932 }
1933 
1934 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1935 				   struct hclge_pkt_buf_alloc *buf_alloc)
1936 {
1937 	struct hclge_rx_priv_buff_cmd *req;
1938 	struct hclge_desc desc;
1939 	int ret;
1940 	int i;
1941 
1942 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1943 	req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1944 
1945 	/* Alloc private buffer TCs */
1946 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1947 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1948 
1949 		req->buf_num[i] =
1950 			cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1951 		req->buf_num[i] |=
1952 			cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1953 	}
1954 
1955 	req->shared_buf =
1956 		cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1957 			    (1 << HCLGE_TC0_PRI_BUF_EN_B));
1958 
1959 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1960 	if (ret)
1961 		dev_err(&hdev->pdev->dev,
1962 			"rx private buffer alloc cmd failed %d\n", ret);
1963 
1964 	return ret;
1965 }
1966 
1967 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1968 				   struct hclge_pkt_buf_alloc *buf_alloc)
1969 {
1970 	struct hclge_rx_priv_wl_buf *req;
1971 	struct hclge_priv_buf *priv;
1972 	struct hclge_desc desc[2];
1973 	int i, j;
1974 	int ret;
1975 
1976 	for (i = 0; i < 2; i++) {
1977 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1978 					   false);
1979 		req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1980 
1981 		/* The first descriptor set the NEXT bit to 1 */
1982 		if (i == 0)
1983 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1984 		else
1985 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1986 
1987 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1988 			u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1989 
1990 			priv = &buf_alloc->priv_buf[idx];
1991 			req->tc_wl[j].high =
1992 				cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1993 			req->tc_wl[j].high |=
1994 				cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1995 			req->tc_wl[j].low =
1996 				cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1997 			req->tc_wl[j].low |=
1998 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1999 		}
2000 	}
2001 
2002 	/* Send 2 descriptor at one time */
2003 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2004 	if (ret)
2005 		dev_err(&hdev->pdev->dev,
2006 			"rx private waterline config cmd failed %d\n",
2007 			ret);
2008 	return ret;
2009 }
2010 
2011 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2012 				    struct hclge_pkt_buf_alloc *buf_alloc)
2013 {
2014 	struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2015 	struct hclge_rx_com_thrd *req;
2016 	struct hclge_desc desc[2];
2017 	struct hclge_tc_thrd *tc;
2018 	int i, j;
2019 	int ret;
2020 
2021 	for (i = 0; i < 2; i++) {
2022 		hclge_cmd_setup_basic_desc(&desc[i],
2023 					   HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2024 		req = (struct hclge_rx_com_thrd *)&desc[i].data;
2025 
2026 		/* The first descriptor set the NEXT bit to 1 */
2027 		if (i == 0)
2028 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2029 		else
2030 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2031 
2032 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2033 			tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2034 
2035 			req->com_thrd[j].high =
2036 				cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2037 			req->com_thrd[j].high |=
2038 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2039 			req->com_thrd[j].low =
2040 				cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2041 			req->com_thrd[j].low |=
2042 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2043 		}
2044 	}
2045 
2046 	/* Send 2 descriptors at one time */
2047 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2048 	if (ret)
2049 		dev_err(&hdev->pdev->dev,
2050 			"common threshold config cmd failed %d\n", ret);
2051 	return ret;
2052 }
2053 
2054 static int hclge_common_wl_config(struct hclge_dev *hdev,
2055 				  struct hclge_pkt_buf_alloc *buf_alloc)
2056 {
2057 	struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2058 	struct hclge_rx_com_wl *req;
2059 	struct hclge_desc desc;
2060 	int ret;
2061 
2062 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2063 
2064 	req = (struct hclge_rx_com_wl *)desc.data;
2065 	req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2066 	req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2067 
2068 	req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2069 	req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2070 
2071 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2072 	if (ret)
2073 		dev_err(&hdev->pdev->dev,
2074 			"common waterline config cmd failed %d\n", ret);
2075 
2076 	return ret;
2077 }
2078 
2079 int hclge_buffer_alloc(struct hclge_dev *hdev)
2080 {
2081 	struct hclge_pkt_buf_alloc *pkt_buf;
2082 	int ret;
2083 
2084 	pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2085 	if (!pkt_buf)
2086 		return -ENOMEM;
2087 
2088 	ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2089 	if (ret) {
2090 		dev_err(&hdev->pdev->dev,
2091 			"could not calc tx buffer size for all TCs %d\n", ret);
2092 		goto out;
2093 	}
2094 
2095 	ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2096 	if (ret) {
2097 		dev_err(&hdev->pdev->dev,
2098 			"could not alloc tx buffers %d\n", ret);
2099 		goto out;
2100 	}
2101 
2102 	ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2103 	if (ret) {
2104 		dev_err(&hdev->pdev->dev,
2105 			"could not calc rx priv buffer size for all TCs %d\n",
2106 			ret);
2107 		goto out;
2108 	}
2109 
2110 	ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2111 	if (ret) {
2112 		dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2113 			ret);
2114 		goto out;
2115 	}
2116 
2117 	if (hnae3_dev_dcb_supported(hdev)) {
2118 		ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2119 		if (ret) {
2120 			dev_err(&hdev->pdev->dev,
2121 				"could not configure rx private waterline %d\n",
2122 				ret);
2123 			goto out;
2124 		}
2125 
2126 		ret = hclge_common_thrd_config(hdev, pkt_buf);
2127 		if (ret) {
2128 			dev_err(&hdev->pdev->dev,
2129 				"could not configure common threshold %d\n",
2130 				ret);
2131 			goto out;
2132 		}
2133 	}
2134 
2135 	ret = hclge_common_wl_config(hdev, pkt_buf);
2136 	if (ret)
2137 		dev_err(&hdev->pdev->dev,
2138 			"could not configure common waterline %d\n", ret);
2139 
2140 out:
2141 	kfree(pkt_buf);
2142 	return ret;
2143 }
2144 
2145 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2146 {
2147 	struct hnae3_handle *roce = &vport->roce;
2148 	struct hnae3_handle *nic = &vport->nic;
2149 
2150 	roce->rinfo.num_vectors = vport->back->num_roce_msi;
2151 
2152 	if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2153 	    vport->back->num_msi_left == 0)
2154 		return -EINVAL;
2155 
2156 	roce->rinfo.base_vector = vport->back->roce_base_vector;
2157 
2158 	roce->rinfo.netdev = nic->kinfo.netdev;
2159 	roce->rinfo.roce_io_base = vport->back->hw.io_base;
2160 
2161 	roce->pdev = nic->pdev;
2162 	roce->ae_algo = nic->ae_algo;
2163 	roce->numa_node_mask = nic->numa_node_mask;
2164 
2165 	return 0;
2166 }
2167 
2168 static int hclge_init_msi(struct hclge_dev *hdev)
2169 {
2170 	struct pci_dev *pdev = hdev->pdev;
2171 	int vectors;
2172 	int i;
2173 
2174 	vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
2175 					PCI_IRQ_MSI | PCI_IRQ_MSIX);
2176 	if (vectors < 0) {
2177 		dev_err(&pdev->dev,
2178 			"failed(%d) to allocate MSI/MSI-X vectors\n",
2179 			vectors);
2180 		return vectors;
2181 	}
2182 	if (vectors < hdev->num_msi)
2183 		dev_warn(&hdev->pdev->dev,
2184 			 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2185 			 hdev->num_msi, vectors);
2186 
2187 	hdev->num_msi = vectors;
2188 	hdev->num_msi_left = vectors;
2189 	hdev->base_msi_vector = pdev->irq;
2190 	hdev->roce_base_vector = hdev->base_msi_vector +
2191 				hdev->roce_base_msix_offset;
2192 
2193 	hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2194 					   sizeof(u16), GFP_KERNEL);
2195 	if (!hdev->vector_status) {
2196 		pci_free_irq_vectors(pdev);
2197 		return -ENOMEM;
2198 	}
2199 
2200 	for (i = 0; i < hdev->num_msi; i++)
2201 		hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2202 
2203 	hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2204 					sizeof(int), GFP_KERNEL);
2205 	if (!hdev->vector_irq) {
2206 		pci_free_irq_vectors(pdev);
2207 		return -ENOMEM;
2208 	}
2209 
2210 	return 0;
2211 }
2212 
2213 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2214 {
2215 	if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2216 		duplex = HCLGE_MAC_FULL;
2217 
2218 	return duplex;
2219 }
2220 
2221 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2222 				      u8 duplex)
2223 {
2224 	struct hclge_config_mac_speed_dup_cmd *req;
2225 	struct hclge_desc desc;
2226 	int ret;
2227 
2228 	req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2229 
2230 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2231 
2232 	if (duplex)
2233 		hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2234 
2235 	switch (speed) {
2236 	case HCLGE_MAC_SPEED_10M:
2237 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2238 				HCLGE_CFG_SPEED_S, 6);
2239 		break;
2240 	case HCLGE_MAC_SPEED_100M:
2241 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2242 				HCLGE_CFG_SPEED_S, 7);
2243 		break;
2244 	case HCLGE_MAC_SPEED_1G:
2245 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2246 				HCLGE_CFG_SPEED_S, 0);
2247 		break;
2248 	case HCLGE_MAC_SPEED_10G:
2249 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2250 				HCLGE_CFG_SPEED_S, 1);
2251 		break;
2252 	case HCLGE_MAC_SPEED_25G:
2253 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2254 				HCLGE_CFG_SPEED_S, 2);
2255 		break;
2256 	case HCLGE_MAC_SPEED_40G:
2257 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2258 				HCLGE_CFG_SPEED_S, 3);
2259 		break;
2260 	case HCLGE_MAC_SPEED_50G:
2261 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2262 				HCLGE_CFG_SPEED_S, 4);
2263 		break;
2264 	case HCLGE_MAC_SPEED_100G:
2265 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2266 				HCLGE_CFG_SPEED_S, 5);
2267 		break;
2268 	default:
2269 		dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2270 		return -EINVAL;
2271 	}
2272 
2273 	hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2274 		      1);
2275 
2276 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2277 	if (ret) {
2278 		dev_err(&hdev->pdev->dev,
2279 			"mac speed/duplex config cmd failed %d.\n", ret);
2280 		return ret;
2281 	}
2282 
2283 	return 0;
2284 }
2285 
2286 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2287 {
2288 	int ret;
2289 
2290 	duplex = hclge_check_speed_dup(duplex, speed);
2291 	if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2292 		return 0;
2293 
2294 	ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2295 	if (ret)
2296 		return ret;
2297 
2298 	hdev->hw.mac.speed = speed;
2299 	hdev->hw.mac.duplex = duplex;
2300 
2301 	return 0;
2302 }
2303 
2304 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2305 				     u8 duplex)
2306 {
2307 	struct hclge_vport *vport = hclge_get_vport(handle);
2308 	struct hclge_dev *hdev = vport->back;
2309 
2310 	return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2311 }
2312 
2313 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2314 {
2315 	struct hclge_config_auto_neg_cmd *req;
2316 	struct hclge_desc desc;
2317 	u32 flag = 0;
2318 	int ret;
2319 
2320 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2321 
2322 	req = (struct hclge_config_auto_neg_cmd *)desc.data;
2323 	hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
2324 	req->cfg_an_cmd_flag = cpu_to_le32(flag);
2325 
2326 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2327 	if (ret)
2328 		dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2329 			ret);
2330 
2331 	return ret;
2332 }
2333 
2334 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2335 {
2336 	struct hclge_vport *vport = hclge_get_vport(handle);
2337 	struct hclge_dev *hdev = vport->back;
2338 
2339 	if (!hdev->hw.mac.support_autoneg) {
2340 		if (enable) {
2341 			dev_err(&hdev->pdev->dev,
2342 				"autoneg is not supported by current port\n");
2343 			return -EOPNOTSUPP;
2344 		} else {
2345 			return 0;
2346 		}
2347 	}
2348 
2349 	return hclge_set_autoneg_en(hdev, enable);
2350 }
2351 
2352 static int hclge_get_autoneg(struct hnae3_handle *handle)
2353 {
2354 	struct hclge_vport *vport = hclge_get_vport(handle);
2355 	struct hclge_dev *hdev = vport->back;
2356 	struct phy_device *phydev = hdev->hw.mac.phydev;
2357 
2358 	if (phydev)
2359 		return phydev->autoneg;
2360 
2361 	return hdev->hw.mac.autoneg;
2362 }
2363 
2364 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2365 {
2366 	struct hclge_vport *vport = hclge_get_vport(handle);
2367 	struct hclge_dev *hdev = vport->back;
2368 	int ret;
2369 
2370 	dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2371 
2372 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2373 	if (ret)
2374 		return ret;
2375 	return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2376 }
2377 
2378 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2379 {
2380 	struct hclge_vport *vport = hclge_get_vport(handle);
2381 	struct hclge_dev *hdev = vport->back;
2382 
2383 	if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2384 		return hclge_set_autoneg_en(hdev, !halt);
2385 
2386 	return 0;
2387 }
2388 
2389 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2390 {
2391 	struct hclge_config_fec_cmd *req;
2392 	struct hclge_desc desc;
2393 	int ret;
2394 
2395 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2396 
2397 	req = (struct hclge_config_fec_cmd *)desc.data;
2398 	if (fec_mode & BIT(HNAE3_FEC_AUTO))
2399 		hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2400 	if (fec_mode & BIT(HNAE3_FEC_RS))
2401 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2402 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2403 	if (fec_mode & BIT(HNAE3_FEC_BASER))
2404 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2405 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2406 
2407 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2408 	if (ret)
2409 		dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2410 
2411 	return ret;
2412 }
2413 
2414 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2415 {
2416 	struct hclge_vport *vport = hclge_get_vport(handle);
2417 	struct hclge_dev *hdev = vport->back;
2418 	struct hclge_mac *mac = &hdev->hw.mac;
2419 	int ret;
2420 
2421 	if (fec_mode && !(mac->fec_ability & fec_mode)) {
2422 		dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2423 		return -EINVAL;
2424 	}
2425 
2426 	ret = hclge_set_fec_hw(hdev, fec_mode);
2427 	if (ret)
2428 		return ret;
2429 
2430 	mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2431 	return 0;
2432 }
2433 
2434 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2435 			  u8 *fec_mode)
2436 {
2437 	struct hclge_vport *vport = hclge_get_vport(handle);
2438 	struct hclge_dev *hdev = vport->back;
2439 	struct hclge_mac *mac = &hdev->hw.mac;
2440 
2441 	if (fec_ability)
2442 		*fec_ability = mac->fec_ability;
2443 	if (fec_mode)
2444 		*fec_mode = mac->fec_mode;
2445 }
2446 
2447 static int hclge_mac_init(struct hclge_dev *hdev)
2448 {
2449 	struct hclge_mac *mac = &hdev->hw.mac;
2450 	int ret;
2451 
2452 	hdev->support_sfp_query = true;
2453 	hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2454 	ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2455 					 hdev->hw.mac.duplex);
2456 	if (ret) {
2457 		dev_err(&hdev->pdev->dev,
2458 			"Config mac speed dup fail ret=%d\n", ret);
2459 		return ret;
2460 	}
2461 
2462 	if (hdev->hw.mac.support_autoneg) {
2463 		ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2464 		if (ret) {
2465 			dev_err(&hdev->pdev->dev,
2466 				"Config mac autoneg fail ret=%d\n", ret);
2467 			return ret;
2468 		}
2469 	}
2470 
2471 	mac->link = 0;
2472 
2473 	if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2474 		ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2475 		if (ret) {
2476 			dev_err(&hdev->pdev->dev,
2477 				"Fec mode init fail, ret = %d\n", ret);
2478 			return ret;
2479 		}
2480 	}
2481 
2482 	ret = hclge_set_mac_mtu(hdev, hdev->mps);
2483 	if (ret) {
2484 		dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2485 		return ret;
2486 	}
2487 
2488 	ret = hclge_buffer_alloc(hdev);
2489 	if (ret)
2490 		dev_err(&hdev->pdev->dev,
2491 			"allocate buffer fail, ret=%d\n", ret);
2492 
2493 	return ret;
2494 }
2495 
2496 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2497 {
2498 	if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
2499 	    !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2500 		schedule_work(&hdev->mbx_service_task);
2501 }
2502 
2503 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2504 {
2505 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2506 	    !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2507 		schedule_work(&hdev->rst_service_task);
2508 }
2509 
2510 static void hclge_task_schedule(struct hclge_dev *hdev)
2511 {
2512 	if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2513 	    !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2514 	    !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2515 		(void)schedule_work(&hdev->service_task);
2516 }
2517 
2518 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2519 {
2520 	struct hclge_link_status_cmd *req;
2521 	struct hclge_desc desc;
2522 	int link_status;
2523 	int ret;
2524 
2525 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2526 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2527 	if (ret) {
2528 		dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2529 			ret);
2530 		return ret;
2531 	}
2532 
2533 	req = (struct hclge_link_status_cmd *)desc.data;
2534 	link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2535 
2536 	return !!link_status;
2537 }
2538 
2539 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2540 {
2541 	unsigned int mac_state;
2542 	int link_stat;
2543 
2544 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2545 		return 0;
2546 
2547 	mac_state = hclge_get_mac_link_status(hdev);
2548 
2549 	if (hdev->hw.mac.phydev) {
2550 		if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2551 			link_stat = mac_state &
2552 				hdev->hw.mac.phydev->link;
2553 		else
2554 			link_stat = 0;
2555 
2556 	} else {
2557 		link_stat = mac_state;
2558 	}
2559 
2560 	return !!link_stat;
2561 }
2562 
2563 static void hclge_update_link_status(struct hclge_dev *hdev)
2564 {
2565 	struct hnae3_client *rclient = hdev->roce_client;
2566 	struct hnae3_client *client = hdev->nic_client;
2567 	struct hnae3_handle *rhandle;
2568 	struct hnae3_handle *handle;
2569 	int state;
2570 	int i;
2571 
2572 	if (!client)
2573 		return;
2574 	state = hclge_get_mac_phy_link(hdev);
2575 	if (state != hdev->hw.mac.link) {
2576 		for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2577 			handle = &hdev->vport[i].nic;
2578 			client->ops->link_status_change(handle, state);
2579 			hclge_config_mac_tnl_int(hdev, state);
2580 			rhandle = &hdev->vport[i].roce;
2581 			if (rclient && rclient->ops->link_status_change)
2582 				rclient->ops->link_status_change(rhandle,
2583 								 state);
2584 		}
2585 		hdev->hw.mac.link = state;
2586 	}
2587 }
2588 
2589 static void hclge_update_port_capability(struct hclge_mac *mac)
2590 {
2591 	/* update fec ability by speed */
2592 	hclge_convert_setting_fec(mac);
2593 
2594 	/* firmware can not identify back plane type, the media type
2595 	 * read from configuration can help deal it
2596 	 */
2597 	if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2598 	    mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2599 		mac->module_type = HNAE3_MODULE_TYPE_KR;
2600 	else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2601 		mac->module_type = HNAE3_MODULE_TYPE_TP;
2602 
2603 	if (mac->support_autoneg == true) {
2604 		linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2605 		linkmode_copy(mac->advertising, mac->supported);
2606 	} else {
2607 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2608 				   mac->supported);
2609 		linkmode_zero(mac->advertising);
2610 	}
2611 }
2612 
2613 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2614 {
2615 	struct hclge_sfp_info_cmd *resp;
2616 	struct hclge_desc desc;
2617 	int ret;
2618 
2619 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2620 	resp = (struct hclge_sfp_info_cmd *)desc.data;
2621 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2622 	if (ret == -EOPNOTSUPP) {
2623 		dev_warn(&hdev->pdev->dev,
2624 			 "IMP do not support get SFP speed %d\n", ret);
2625 		return ret;
2626 	} else if (ret) {
2627 		dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2628 		return ret;
2629 	}
2630 
2631 	*speed = le32_to_cpu(resp->speed);
2632 
2633 	return 0;
2634 }
2635 
2636 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2637 {
2638 	struct hclge_sfp_info_cmd *resp;
2639 	struct hclge_desc desc;
2640 	int ret;
2641 
2642 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2643 	resp = (struct hclge_sfp_info_cmd *)desc.data;
2644 
2645 	resp->query_type = QUERY_ACTIVE_SPEED;
2646 
2647 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2648 	if (ret == -EOPNOTSUPP) {
2649 		dev_warn(&hdev->pdev->dev,
2650 			 "IMP does not support get SFP info %d\n", ret);
2651 		return ret;
2652 	} else if (ret) {
2653 		dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2654 		return ret;
2655 	}
2656 
2657 	mac->speed = le32_to_cpu(resp->speed);
2658 	/* if resp->speed_ability is 0, it means it's an old version
2659 	 * firmware, do not update these params
2660 	 */
2661 	if (resp->speed_ability) {
2662 		mac->module_type = le32_to_cpu(resp->module_type);
2663 		mac->speed_ability = le32_to_cpu(resp->speed_ability);
2664 		mac->autoneg = resp->autoneg;
2665 		mac->support_autoneg = resp->autoneg_ability;
2666 		if (!resp->active_fec)
2667 			mac->fec_mode = 0;
2668 		else
2669 			mac->fec_mode = BIT(resp->active_fec);
2670 	} else {
2671 		mac->speed_type = QUERY_SFP_SPEED;
2672 	}
2673 
2674 	return 0;
2675 }
2676 
2677 static int hclge_update_port_info(struct hclge_dev *hdev)
2678 {
2679 	struct hclge_mac *mac = &hdev->hw.mac;
2680 	int speed = HCLGE_MAC_SPEED_UNKNOWN;
2681 	int ret;
2682 
2683 	/* get the port info from SFP cmd if not copper port */
2684 	if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2685 		return 0;
2686 
2687 	/* if IMP does not support get SFP/qSFP info, return directly */
2688 	if (!hdev->support_sfp_query)
2689 		return 0;
2690 
2691 	if (hdev->pdev->revision >= 0x21)
2692 		ret = hclge_get_sfp_info(hdev, mac);
2693 	else
2694 		ret = hclge_get_sfp_speed(hdev, &speed);
2695 
2696 	if (ret == -EOPNOTSUPP) {
2697 		hdev->support_sfp_query = false;
2698 		return ret;
2699 	} else if (ret) {
2700 		return ret;
2701 	}
2702 
2703 	if (hdev->pdev->revision >= 0x21) {
2704 		if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2705 			hclge_update_port_capability(mac);
2706 			return 0;
2707 		}
2708 		return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2709 					       HCLGE_MAC_FULL);
2710 	} else {
2711 		if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2712 			return 0; /* do nothing if no SFP */
2713 
2714 		/* must config full duplex for SFP */
2715 		return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2716 	}
2717 }
2718 
2719 static int hclge_get_status(struct hnae3_handle *handle)
2720 {
2721 	struct hclge_vport *vport = hclge_get_vport(handle);
2722 	struct hclge_dev *hdev = vport->back;
2723 
2724 	hclge_update_link_status(hdev);
2725 
2726 	return hdev->hw.mac.link;
2727 }
2728 
2729 static void hclge_service_timer(struct timer_list *t)
2730 {
2731 	struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2732 
2733 	mod_timer(&hdev->service_timer, jiffies + HZ);
2734 	hdev->hw_stats.stats_timer++;
2735 	hdev->fd_arfs_expire_timer++;
2736 	hclge_task_schedule(hdev);
2737 }
2738 
2739 static void hclge_service_complete(struct hclge_dev *hdev)
2740 {
2741 	WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2742 
2743 	/* Flush memory before next watchdog */
2744 	smp_mb__before_atomic();
2745 	clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2746 }
2747 
2748 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2749 {
2750 	u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2751 
2752 	/* fetch the events from their corresponding regs */
2753 	rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2754 	cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2755 	msix_src_reg = hclge_read_dev(&hdev->hw,
2756 				      HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2757 
2758 	/* Assumption: If by any chance reset and mailbox events are reported
2759 	 * together then we will only process reset event in this go and will
2760 	 * defer the processing of the mailbox events. Since, we would have not
2761 	 * cleared RX CMDQ event this time we would receive again another
2762 	 * interrupt from H/W just for the mailbox.
2763 	 */
2764 
2765 	/* check for vector0 reset event sources */
2766 	if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2767 		dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2768 		set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2769 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2770 		*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2771 		hdev->rst_stats.imp_rst_cnt++;
2772 		return HCLGE_VECTOR0_EVENT_RST;
2773 	}
2774 
2775 	if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2776 		dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2777 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2778 		set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2779 		*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2780 		hdev->rst_stats.global_rst_cnt++;
2781 		return HCLGE_VECTOR0_EVENT_RST;
2782 	}
2783 
2784 	/* check for vector0 msix event source */
2785 	if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
2786 		dev_info(&hdev->pdev->dev, "received event 0x%x\n",
2787 			 msix_src_reg);
2788 		*clearval = msix_src_reg;
2789 		return HCLGE_VECTOR0_EVENT_ERR;
2790 	}
2791 
2792 	/* check for vector0 mailbox(=CMDQ RX) event source */
2793 	if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2794 		cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2795 		*clearval = cmdq_src_reg;
2796 		return HCLGE_VECTOR0_EVENT_MBX;
2797 	}
2798 
2799 	/* print other vector0 event source */
2800 	dev_info(&hdev->pdev->dev,
2801 		 "CMDQ INT status:0x%x, other INT status:0x%x\n",
2802 		 cmdq_src_reg, msix_src_reg);
2803 	*clearval = msix_src_reg;
2804 
2805 	return HCLGE_VECTOR0_EVENT_OTHER;
2806 }
2807 
2808 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2809 				    u32 regclr)
2810 {
2811 	switch (event_type) {
2812 	case HCLGE_VECTOR0_EVENT_RST:
2813 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2814 		break;
2815 	case HCLGE_VECTOR0_EVENT_MBX:
2816 		hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2817 		break;
2818 	default:
2819 		break;
2820 	}
2821 }
2822 
2823 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2824 {
2825 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2826 				BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2827 				BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2828 				BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2829 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2830 }
2831 
2832 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2833 {
2834 	writel(enable ? 1 : 0, vector->addr);
2835 }
2836 
2837 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2838 {
2839 	struct hclge_dev *hdev = data;
2840 	u32 clearval = 0;
2841 	u32 event_cause;
2842 
2843 	hclge_enable_vector(&hdev->misc_vector, false);
2844 	event_cause = hclge_check_event_cause(hdev, &clearval);
2845 
2846 	/* vector 0 interrupt is shared with reset and mailbox source events.*/
2847 	switch (event_cause) {
2848 	case HCLGE_VECTOR0_EVENT_ERR:
2849 		/* we do not know what type of reset is required now. This could
2850 		 * only be decided after we fetch the type of errors which
2851 		 * caused this event. Therefore, we will do below for now:
2852 		 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2853 		 *    have defered type of reset to be used.
2854 		 * 2. Schedule the reset serivce task.
2855 		 * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
2856 		 *    will fetch the correct type of reset.  This would be done
2857 		 *    by first decoding the types of errors.
2858 		 */
2859 		set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2860 		/* fall through */
2861 	case HCLGE_VECTOR0_EVENT_RST:
2862 		hclge_reset_task_schedule(hdev);
2863 		break;
2864 	case HCLGE_VECTOR0_EVENT_MBX:
2865 		/* If we are here then,
2866 		 * 1. Either we are not handling any mbx task and we are not
2867 		 *    scheduled as well
2868 		 *                        OR
2869 		 * 2. We could be handling a mbx task but nothing more is
2870 		 *    scheduled.
2871 		 * In both cases, we should schedule mbx task as there are more
2872 		 * mbx messages reported by this interrupt.
2873 		 */
2874 		hclge_mbx_task_schedule(hdev);
2875 		break;
2876 	default:
2877 		dev_warn(&hdev->pdev->dev,
2878 			 "received unknown or unhandled event of vector0\n");
2879 		break;
2880 	}
2881 
2882 	/* clear the source of interrupt if it is not cause by reset */
2883 	if (!clearval ||
2884 	    event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2885 		hclge_clear_event_cause(hdev, event_cause, clearval);
2886 		hclge_enable_vector(&hdev->misc_vector, true);
2887 	}
2888 
2889 	return IRQ_HANDLED;
2890 }
2891 
2892 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2893 {
2894 	if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2895 		dev_warn(&hdev->pdev->dev,
2896 			 "vector(vector_id %d) has been freed.\n", vector_id);
2897 		return;
2898 	}
2899 
2900 	hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2901 	hdev->num_msi_left += 1;
2902 	hdev->num_msi_used -= 1;
2903 }
2904 
2905 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2906 {
2907 	struct hclge_misc_vector *vector = &hdev->misc_vector;
2908 
2909 	vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2910 
2911 	vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2912 	hdev->vector_status[0] = 0;
2913 
2914 	hdev->num_msi_left -= 1;
2915 	hdev->num_msi_used += 1;
2916 }
2917 
2918 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2919 {
2920 	int ret;
2921 
2922 	hclge_get_misc_vector(hdev);
2923 
2924 	/* this would be explicitly freed in the end */
2925 	ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2926 			  0, "hclge_misc", hdev);
2927 	if (ret) {
2928 		hclge_free_vector(hdev, 0);
2929 		dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2930 			hdev->misc_vector.vector_irq);
2931 	}
2932 
2933 	return ret;
2934 }
2935 
2936 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2937 {
2938 	free_irq(hdev->misc_vector.vector_irq, hdev);
2939 	hclge_free_vector(hdev, 0);
2940 }
2941 
2942 int hclge_notify_client(struct hclge_dev *hdev,
2943 			enum hnae3_reset_notify_type type)
2944 {
2945 	struct hnae3_client *client = hdev->nic_client;
2946 	u16 i;
2947 
2948 	if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
2949 		return 0;
2950 
2951 	if (!client->ops->reset_notify)
2952 		return -EOPNOTSUPP;
2953 
2954 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2955 		struct hnae3_handle *handle = &hdev->vport[i].nic;
2956 		int ret;
2957 
2958 		ret = client->ops->reset_notify(handle, type);
2959 		if (ret) {
2960 			dev_err(&hdev->pdev->dev,
2961 				"notify nic client failed %d(%d)\n", type, ret);
2962 			return ret;
2963 		}
2964 	}
2965 
2966 	return 0;
2967 }
2968 
2969 static int hclge_notify_roce_client(struct hclge_dev *hdev,
2970 				    enum hnae3_reset_notify_type type)
2971 {
2972 	struct hnae3_client *client = hdev->roce_client;
2973 	int ret = 0;
2974 	u16 i;
2975 
2976 	if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
2977 		return 0;
2978 
2979 	if (!client->ops->reset_notify)
2980 		return -EOPNOTSUPP;
2981 
2982 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2983 		struct hnae3_handle *handle = &hdev->vport[i].roce;
2984 
2985 		ret = client->ops->reset_notify(handle, type);
2986 		if (ret) {
2987 			dev_err(&hdev->pdev->dev,
2988 				"notify roce client failed %d(%d)",
2989 				type, ret);
2990 			return ret;
2991 		}
2992 	}
2993 
2994 	return ret;
2995 }
2996 
2997 static int hclge_reset_wait(struct hclge_dev *hdev)
2998 {
2999 #define HCLGE_RESET_WATI_MS	100
3000 #define HCLGE_RESET_WAIT_CNT	200
3001 	u32 val, reg, reg_bit;
3002 	u32 cnt = 0;
3003 
3004 	switch (hdev->reset_type) {
3005 	case HNAE3_IMP_RESET:
3006 		reg = HCLGE_GLOBAL_RESET_REG;
3007 		reg_bit = HCLGE_IMP_RESET_BIT;
3008 		break;
3009 	case HNAE3_GLOBAL_RESET:
3010 		reg = HCLGE_GLOBAL_RESET_REG;
3011 		reg_bit = HCLGE_GLOBAL_RESET_BIT;
3012 		break;
3013 	case HNAE3_FUNC_RESET:
3014 		reg = HCLGE_FUN_RST_ING;
3015 		reg_bit = HCLGE_FUN_RST_ING_B;
3016 		break;
3017 	case HNAE3_FLR_RESET:
3018 		break;
3019 	default:
3020 		dev_err(&hdev->pdev->dev,
3021 			"Wait for unsupported reset type: %d\n",
3022 			hdev->reset_type);
3023 		return -EINVAL;
3024 	}
3025 
3026 	if (hdev->reset_type == HNAE3_FLR_RESET) {
3027 		while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
3028 		       cnt++ < HCLGE_RESET_WAIT_CNT)
3029 			msleep(HCLGE_RESET_WATI_MS);
3030 
3031 		if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
3032 			dev_err(&hdev->pdev->dev,
3033 				"flr wait timeout: %d\n", cnt);
3034 			return -EBUSY;
3035 		}
3036 
3037 		return 0;
3038 	}
3039 
3040 	val = hclge_read_dev(&hdev->hw, reg);
3041 	while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3042 		msleep(HCLGE_RESET_WATI_MS);
3043 		val = hclge_read_dev(&hdev->hw, reg);
3044 		cnt++;
3045 	}
3046 
3047 	if (cnt >= HCLGE_RESET_WAIT_CNT) {
3048 		dev_warn(&hdev->pdev->dev,
3049 			 "Wait for reset timeout: %d\n", hdev->reset_type);
3050 		return -EBUSY;
3051 	}
3052 
3053 	return 0;
3054 }
3055 
3056 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3057 {
3058 	struct hclge_vf_rst_cmd *req;
3059 	struct hclge_desc desc;
3060 
3061 	req = (struct hclge_vf_rst_cmd *)desc.data;
3062 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3063 	req->dest_vfid = func_id;
3064 
3065 	if (reset)
3066 		req->vf_rst = 0x1;
3067 
3068 	return hclge_cmd_send(&hdev->hw, &desc, 1);
3069 }
3070 
3071 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3072 {
3073 	int i;
3074 
3075 	for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3076 		struct hclge_vport *vport = &hdev->vport[i];
3077 		int ret;
3078 
3079 		/* Send cmd to set/clear VF's FUNC_RST_ING */
3080 		ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3081 		if (ret) {
3082 			dev_err(&hdev->pdev->dev,
3083 				"set vf(%d) rst failed %d!\n",
3084 				vport->vport_id, ret);
3085 			return ret;
3086 		}
3087 
3088 		if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3089 			continue;
3090 
3091 		/* Inform VF to process the reset.
3092 		 * hclge_inform_reset_assert_to_vf may fail if VF
3093 		 * driver is not loaded.
3094 		 */
3095 		ret = hclge_inform_reset_assert_to_vf(vport);
3096 		if (ret)
3097 			dev_warn(&hdev->pdev->dev,
3098 				 "inform reset to vf(%d) failed %d!\n",
3099 				 vport->vport_id, ret);
3100 	}
3101 
3102 	return 0;
3103 }
3104 
3105 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3106 {
3107 	struct hclge_desc desc;
3108 	struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3109 	int ret;
3110 
3111 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3112 	hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3113 	req->fun_reset_vfid = func_id;
3114 
3115 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3116 	if (ret)
3117 		dev_err(&hdev->pdev->dev,
3118 			"send function reset cmd fail, status =%d\n", ret);
3119 
3120 	return ret;
3121 }
3122 
3123 static void hclge_do_reset(struct hclge_dev *hdev)
3124 {
3125 	struct hnae3_handle *handle = &hdev->vport[0].nic;
3126 	struct pci_dev *pdev = hdev->pdev;
3127 	u32 val;
3128 
3129 	if (hclge_get_hw_reset_stat(handle)) {
3130 		dev_info(&pdev->dev, "Hardware reset not finish\n");
3131 		dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3132 			 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3133 			 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3134 		return;
3135 	}
3136 
3137 	switch (hdev->reset_type) {
3138 	case HNAE3_GLOBAL_RESET:
3139 		val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3140 		hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3141 		hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3142 		dev_info(&pdev->dev, "Global Reset requested\n");
3143 		break;
3144 	case HNAE3_FUNC_RESET:
3145 		dev_info(&pdev->dev, "PF Reset requested\n");
3146 		/* schedule again to check later */
3147 		set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3148 		hclge_reset_task_schedule(hdev);
3149 		break;
3150 	case HNAE3_FLR_RESET:
3151 		dev_info(&pdev->dev, "FLR requested\n");
3152 		/* schedule again to check later */
3153 		set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
3154 		hclge_reset_task_schedule(hdev);
3155 		break;
3156 	default:
3157 		dev_warn(&pdev->dev,
3158 			 "Unsupported reset type: %d\n", hdev->reset_type);
3159 		break;
3160 	}
3161 }
3162 
3163 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3164 						   unsigned long *addr)
3165 {
3166 	enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3167 	struct hclge_dev *hdev = ae_dev->priv;
3168 
3169 	/* first, resolve any unknown reset type to the known type(s) */
3170 	if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3171 		/* we will intentionally ignore any errors from this function
3172 		 *  as we will end up in *some* reset request in any case
3173 		 */
3174 		hclge_handle_hw_msix_error(hdev, addr);
3175 		clear_bit(HNAE3_UNKNOWN_RESET, addr);
3176 		/* We defered the clearing of the error event which caused
3177 		 * interrupt since it was not posssible to do that in
3178 		 * interrupt context (and this is the reason we introduced
3179 		 * new UNKNOWN reset type). Now, the errors have been
3180 		 * handled and cleared in hardware we can safely enable
3181 		 * interrupts. This is an exception to the norm.
3182 		 */
3183 		hclge_enable_vector(&hdev->misc_vector, true);
3184 	}
3185 
3186 	/* return the highest priority reset level amongst all */
3187 	if (test_bit(HNAE3_IMP_RESET, addr)) {
3188 		rst_level = HNAE3_IMP_RESET;
3189 		clear_bit(HNAE3_IMP_RESET, addr);
3190 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3191 		clear_bit(HNAE3_FUNC_RESET, addr);
3192 	} else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3193 		rst_level = HNAE3_GLOBAL_RESET;
3194 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3195 		clear_bit(HNAE3_FUNC_RESET, addr);
3196 	} else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3197 		rst_level = HNAE3_FUNC_RESET;
3198 		clear_bit(HNAE3_FUNC_RESET, addr);
3199 	} else if (test_bit(HNAE3_FLR_RESET, addr)) {
3200 		rst_level = HNAE3_FLR_RESET;
3201 		clear_bit(HNAE3_FLR_RESET, addr);
3202 	}
3203 
3204 	if (hdev->reset_type != HNAE3_NONE_RESET &&
3205 	    rst_level < hdev->reset_type)
3206 		return HNAE3_NONE_RESET;
3207 
3208 	return rst_level;
3209 }
3210 
3211 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3212 {
3213 	u32 clearval = 0;
3214 
3215 	switch (hdev->reset_type) {
3216 	case HNAE3_IMP_RESET:
3217 		clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3218 		break;
3219 	case HNAE3_GLOBAL_RESET:
3220 		clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3221 		break;
3222 	default:
3223 		break;
3224 	}
3225 
3226 	if (!clearval)
3227 		return;
3228 
3229 	hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
3230 	hclge_enable_vector(&hdev->misc_vector, true);
3231 }
3232 
3233 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
3234 {
3235 	int ret = 0;
3236 
3237 	switch (hdev->reset_type) {
3238 	case HNAE3_FUNC_RESET:
3239 		/* fall through */
3240 	case HNAE3_FLR_RESET:
3241 		ret = hclge_set_all_vf_rst(hdev, true);
3242 		break;
3243 	default:
3244 		break;
3245 	}
3246 
3247 	return ret;
3248 }
3249 
3250 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3251 {
3252 #define HCLGE_RESET_SYNC_TIME 100
3253 
3254 	u32 reg_val;
3255 	int ret = 0;
3256 
3257 	switch (hdev->reset_type) {
3258 	case HNAE3_FUNC_RESET:
3259 		/* There is no mechanism for PF to know if VF has stopped IO
3260 		 * for now, just wait 100 ms for VF to stop IO
3261 		 */
3262 		msleep(HCLGE_RESET_SYNC_TIME);
3263 		ret = hclge_func_reset_cmd(hdev, 0);
3264 		if (ret) {
3265 			dev_err(&hdev->pdev->dev,
3266 				"asserting function reset fail %d!\n", ret);
3267 			return ret;
3268 		}
3269 
3270 		/* After performaning pf reset, it is not necessary to do the
3271 		 * mailbox handling or send any command to firmware, because
3272 		 * any mailbox handling or command to firmware is only valid
3273 		 * after hclge_cmd_init is called.
3274 		 */
3275 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3276 		hdev->rst_stats.pf_rst_cnt++;
3277 		break;
3278 	case HNAE3_FLR_RESET:
3279 		/* There is no mechanism for PF to know if VF has stopped IO
3280 		 * for now, just wait 100 ms for VF to stop IO
3281 		 */
3282 		msleep(HCLGE_RESET_SYNC_TIME);
3283 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3284 		set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
3285 		hdev->rst_stats.flr_rst_cnt++;
3286 		break;
3287 	case HNAE3_IMP_RESET:
3288 		reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3289 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3290 				BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3291 		break;
3292 	default:
3293 		break;
3294 	}
3295 
3296 	/* inform hardware that preparatory work is done */
3297 	msleep(HCLGE_RESET_SYNC_TIME);
3298 	hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG,
3299 			HCLGE_NIC_CMQ_ENABLE);
3300 	dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3301 
3302 	return ret;
3303 }
3304 
3305 static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
3306 {
3307 #define MAX_RESET_FAIL_CNT 5
3308 
3309 	if (hdev->reset_pending) {
3310 		dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3311 			 hdev->reset_pending);
3312 		return true;
3313 	} else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
3314 		   (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
3315 		    BIT(HCLGE_IMP_RESET_BIT))) {
3316 		dev_info(&hdev->pdev->dev,
3317 			 "reset failed because IMP Reset is pending\n");
3318 		hclge_clear_reset_cause(hdev);
3319 		return false;
3320 	} else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3321 		hdev->reset_fail_cnt++;
3322 		if (is_timeout) {
3323 			set_bit(hdev->reset_type, &hdev->reset_pending);
3324 			dev_info(&hdev->pdev->dev,
3325 				 "re-schedule to wait for hw reset done\n");
3326 			return true;
3327 		}
3328 
3329 		dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
3330 		hclge_clear_reset_cause(hdev);
3331 		set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
3332 		mod_timer(&hdev->reset_timer,
3333 			  jiffies + HCLGE_RESET_INTERVAL);
3334 
3335 		return false;
3336 	}
3337 
3338 	hclge_clear_reset_cause(hdev);
3339 	dev_err(&hdev->pdev->dev, "Reset fail!\n");
3340 	return false;
3341 }
3342 
3343 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3344 {
3345 	int ret = 0;
3346 
3347 	switch (hdev->reset_type) {
3348 	case HNAE3_FUNC_RESET:
3349 		/* fall through */
3350 	case HNAE3_FLR_RESET:
3351 		ret = hclge_set_all_vf_rst(hdev, false);
3352 		break;
3353 	default:
3354 		break;
3355 	}
3356 
3357 	return ret;
3358 }
3359 
3360 static int hclge_reset_stack(struct hclge_dev *hdev)
3361 {
3362 	int ret;
3363 
3364 	ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3365 	if (ret)
3366 		return ret;
3367 
3368 	ret = hclge_reset_ae_dev(hdev->ae_dev);
3369 	if (ret)
3370 		return ret;
3371 
3372 	ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3373 	if (ret)
3374 		return ret;
3375 
3376 	return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3377 }
3378 
3379 static void hclge_reset(struct hclge_dev *hdev)
3380 {
3381 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3382 	bool is_timeout = false;
3383 	int ret;
3384 
3385 	/* Initialize ae_dev reset status as well, in case enet layer wants to
3386 	 * know if device is undergoing reset
3387 	 */
3388 	ae_dev->reset_type = hdev->reset_type;
3389 	hdev->rst_stats.reset_cnt++;
3390 	/* perform reset of the stack & ae device for a client */
3391 	ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3392 	if (ret)
3393 		goto err_reset;
3394 
3395 	ret = hclge_reset_prepare_down(hdev);
3396 	if (ret)
3397 		goto err_reset;
3398 
3399 	rtnl_lock();
3400 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3401 	if (ret)
3402 		goto err_reset_lock;
3403 
3404 	rtnl_unlock();
3405 
3406 	ret = hclge_reset_prepare_wait(hdev);
3407 	if (ret)
3408 		goto err_reset;
3409 
3410 	if (hclge_reset_wait(hdev)) {
3411 		is_timeout = true;
3412 		goto err_reset;
3413 	}
3414 
3415 	hdev->rst_stats.hw_reset_done_cnt++;
3416 
3417 	ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3418 	if (ret)
3419 		goto err_reset;
3420 
3421 	rtnl_lock();
3422 
3423 	ret = hclge_reset_stack(hdev);
3424 	if (ret)
3425 		goto err_reset_lock;
3426 
3427 	hclge_clear_reset_cause(hdev);
3428 
3429 	ret = hclge_reset_prepare_up(hdev);
3430 	if (ret)
3431 		goto err_reset_lock;
3432 
3433 	rtnl_unlock();
3434 
3435 	ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3436 	/* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3437 	 * times
3438 	 */
3439 	if (ret && hdev->reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3440 		goto err_reset;
3441 
3442 	rtnl_lock();
3443 
3444 	ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3445 	if (ret)
3446 		goto err_reset_lock;
3447 
3448 	rtnl_unlock();
3449 
3450 	ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3451 	if (ret)
3452 		goto err_reset;
3453 
3454 	hdev->last_reset_time = jiffies;
3455 	hdev->reset_fail_cnt = 0;
3456 	hdev->rst_stats.reset_done_cnt++;
3457 	ae_dev->reset_type = HNAE3_NONE_RESET;
3458 	del_timer(&hdev->reset_timer);
3459 
3460 	return;
3461 
3462 err_reset_lock:
3463 	rtnl_unlock();
3464 err_reset:
3465 	if (hclge_reset_err_handle(hdev, is_timeout))
3466 		hclge_reset_task_schedule(hdev);
3467 }
3468 
3469 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3470 {
3471 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3472 	struct hclge_dev *hdev = ae_dev->priv;
3473 
3474 	/* We might end up getting called broadly because of 2 below cases:
3475 	 * 1. Recoverable error was conveyed through APEI and only way to bring
3476 	 *    normalcy is to reset.
3477 	 * 2. A new reset request from the stack due to timeout
3478 	 *
3479 	 * For the first case,error event might not have ae handle available.
3480 	 * check if this is a new reset request and we are not here just because
3481 	 * last reset attempt did not succeed and watchdog hit us again. We will
3482 	 * know this if last reset request did not occur very recently (watchdog
3483 	 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3484 	 * In case of new request we reset the "reset level" to PF reset.
3485 	 * And if it is a repeat reset request of the most recent one then we
3486 	 * want to make sure we throttle the reset request. Therefore, we will
3487 	 * not allow it again before 3*HZ times.
3488 	 */
3489 	if (!handle)
3490 		handle = &hdev->vport[0].nic;
3491 
3492 	if (time_before(jiffies, (hdev->last_reset_time +
3493 				  HCLGE_RESET_INTERVAL)))
3494 		return;
3495 	else if (hdev->default_reset_request)
3496 		hdev->reset_level =
3497 			hclge_get_reset_level(ae_dev,
3498 					      &hdev->default_reset_request);
3499 	else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
3500 		hdev->reset_level = HNAE3_FUNC_RESET;
3501 
3502 	dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
3503 		 hdev->reset_level);
3504 
3505 	/* request reset & schedule reset task */
3506 	set_bit(hdev->reset_level, &hdev->reset_request);
3507 	hclge_reset_task_schedule(hdev);
3508 
3509 	if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3510 		hdev->reset_level++;
3511 }
3512 
3513 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3514 					enum hnae3_reset_type rst_type)
3515 {
3516 	struct hclge_dev *hdev = ae_dev->priv;
3517 
3518 	set_bit(rst_type, &hdev->default_reset_request);
3519 }
3520 
3521 static void hclge_reset_timer(struct timer_list *t)
3522 {
3523 	struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3524 
3525 	dev_info(&hdev->pdev->dev,
3526 		 "triggering reset in reset timer\n");
3527 	hclge_reset_event(hdev->pdev, NULL);
3528 }
3529 
3530 static void hclge_reset_subtask(struct hclge_dev *hdev)
3531 {
3532 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3533 
3534 	/* check if there is any ongoing reset in the hardware. This status can
3535 	 * be checked from reset_pending. If there is then, we need to wait for
3536 	 * hardware to complete reset.
3537 	 *    a. If we are able to figure out in reasonable time that hardware
3538 	 *       has fully resetted then, we can proceed with driver, client
3539 	 *       reset.
3540 	 *    b. else, we can come back later to check this status so re-sched
3541 	 *       now.
3542 	 */
3543 	hdev->last_reset_time = jiffies;
3544 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
3545 	if (hdev->reset_type != HNAE3_NONE_RESET)
3546 		hclge_reset(hdev);
3547 
3548 	/* check if we got any *new* reset requests to be honored */
3549 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
3550 	if (hdev->reset_type != HNAE3_NONE_RESET)
3551 		hclge_do_reset(hdev);
3552 
3553 	hdev->reset_type = HNAE3_NONE_RESET;
3554 }
3555 
3556 static void hclge_reset_service_task(struct work_struct *work)
3557 {
3558 	struct hclge_dev *hdev =
3559 		container_of(work, struct hclge_dev, rst_service_task);
3560 
3561 	if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3562 		return;
3563 
3564 	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3565 
3566 	hclge_reset_subtask(hdev);
3567 
3568 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3569 }
3570 
3571 static void hclge_mailbox_service_task(struct work_struct *work)
3572 {
3573 	struct hclge_dev *hdev =
3574 		container_of(work, struct hclge_dev, mbx_service_task);
3575 
3576 	if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3577 		return;
3578 
3579 	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3580 
3581 	hclge_mbx_handler(hdev);
3582 
3583 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3584 }
3585 
3586 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3587 {
3588 	int i;
3589 
3590 	/* start from vport 1 for PF is always alive */
3591 	for (i = 1; i < hdev->num_alloc_vport; i++) {
3592 		struct hclge_vport *vport = &hdev->vport[i];
3593 
3594 		if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3595 			clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3596 
3597 		/* If vf is not alive, set to default value */
3598 		if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3599 			vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3600 	}
3601 }
3602 
3603 static void hclge_service_task(struct work_struct *work)
3604 {
3605 	struct hclge_dev *hdev =
3606 		container_of(work, struct hclge_dev, service_task);
3607 
3608 	if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3609 		hclge_update_stats_for_all(hdev);
3610 		hdev->hw_stats.stats_timer = 0;
3611 	}
3612 
3613 	hclge_update_port_info(hdev);
3614 	hclge_update_link_status(hdev);
3615 	hclge_update_vport_alive(hdev);
3616 	hclge_sync_vlan_filter(hdev);
3617 	if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) {
3618 		hclge_rfs_filter_expire(hdev);
3619 		hdev->fd_arfs_expire_timer = 0;
3620 	}
3621 	hclge_service_complete(hdev);
3622 }
3623 
3624 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3625 {
3626 	/* VF handle has no client */
3627 	if (!handle->client)
3628 		return container_of(handle, struct hclge_vport, nic);
3629 	else if (handle->client->type == HNAE3_CLIENT_ROCE)
3630 		return container_of(handle, struct hclge_vport, roce);
3631 	else
3632 		return container_of(handle, struct hclge_vport, nic);
3633 }
3634 
3635 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3636 			    struct hnae3_vector_info *vector_info)
3637 {
3638 	struct hclge_vport *vport = hclge_get_vport(handle);
3639 	struct hnae3_vector_info *vector = vector_info;
3640 	struct hclge_dev *hdev = vport->back;
3641 	int alloc = 0;
3642 	int i, j;
3643 
3644 	vector_num = min(hdev->num_msi_left, vector_num);
3645 
3646 	for (j = 0; j < vector_num; j++) {
3647 		for (i = 1; i < hdev->num_msi; i++) {
3648 			if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3649 				vector->vector = pci_irq_vector(hdev->pdev, i);
3650 				vector->io_addr = hdev->hw.io_base +
3651 					HCLGE_VECTOR_REG_BASE +
3652 					(i - 1) * HCLGE_VECTOR_REG_OFFSET +
3653 					vport->vport_id *
3654 					HCLGE_VECTOR_VF_OFFSET;
3655 				hdev->vector_status[i] = vport->vport_id;
3656 				hdev->vector_irq[i] = vector->vector;
3657 
3658 				vector++;
3659 				alloc++;
3660 
3661 				break;
3662 			}
3663 		}
3664 	}
3665 	hdev->num_msi_left -= alloc;
3666 	hdev->num_msi_used += alloc;
3667 
3668 	return alloc;
3669 }
3670 
3671 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3672 {
3673 	int i;
3674 
3675 	for (i = 0; i < hdev->num_msi; i++)
3676 		if (vector == hdev->vector_irq[i])
3677 			return i;
3678 
3679 	return -EINVAL;
3680 }
3681 
3682 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3683 {
3684 	struct hclge_vport *vport = hclge_get_vport(handle);
3685 	struct hclge_dev *hdev = vport->back;
3686 	int vector_id;
3687 
3688 	vector_id = hclge_get_vector_index(hdev, vector);
3689 	if (vector_id < 0) {
3690 		dev_err(&hdev->pdev->dev,
3691 			"Get vector index fail. vector_id =%d\n", vector_id);
3692 		return vector_id;
3693 	}
3694 
3695 	hclge_free_vector(hdev, vector_id);
3696 
3697 	return 0;
3698 }
3699 
3700 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3701 {
3702 	return HCLGE_RSS_KEY_SIZE;
3703 }
3704 
3705 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3706 {
3707 	return HCLGE_RSS_IND_TBL_SIZE;
3708 }
3709 
3710 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3711 				  const u8 hfunc, const u8 *key)
3712 {
3713 	struct hclge_rss_config_cmd *req;
3714 	unsigned int key_offset = 0;
3715 	struct hclge_desc desc;
3716 	int key_counts;
3717 	int key_size;
3718 	int ret;
3719 
3720 	key_counts = HCLGE_RSS_KEY_SIZE;
3721 	req = (struct hclge_rss_config_cmd *)desc.data;
3722 
3723 	while (key_counts) {
3724 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3725 					   false);
3726 
3727 		req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3728 		req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3729 
3730 		key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
3731 		memcpy(req->hash_key,
3732 		       key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3733 
3734 		key_counts -= key_size;
3735 		key_offset++;
3736 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3737 		if (ret) {
3738 			dev_err(&hdev->pdev->dev,
3739 				"Configure RSS config fail, status = %d\n",
3740 				ret);
3741 			return ret;
3742 		}
3743 	}
3744 	return 0;
3745 }
3746 
3747 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3748 {
3749 	struct hclge_rss_indirection_table_cmd *req;
3750 	struct hclge_desc desc;
3751 	int i, j;
3752 	int ret;
3753 
3754 	req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3755 
3756 	for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3757 		hclge_cmd_setup_basic_desc
3758 			(&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3759 
3760 		req->start_table_index =
3761 			cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3762 		req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3763 
3764 		for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3765 			req->rss_result[j] =
3766 				indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3767 
3768 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3769 		if (ret) {
3770 			dev_err(&hdev->pdev->dev,
3771 				"Configure rss indir table fail,status = %d\n",
3772 				ret);
3773 			return ret;
3774 		}
3775 	}
3776 	return 0;
3777 }
3778 
3779 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3780 				 u16 *tc_size, u16 *tc_offset)
3781 {
3782 	struct hclge_rss_tc_mode_cmd *req;
3783 	struct hclge_desc desc;
3784 	int ret;
3785 	int i;
3786 
3787 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3788 	req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3789 
3790 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3791 		u16 mode = 0;
3792 
3793 		hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3794 		hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3795 				HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3796 		hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3797 				HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3798 
3799 		req->rss_tc_mode[i] = cpu_to_le16(mode);
3800 	}
3801 
3802 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3803 	if (ret)
3804 		dev_err(&hdev->pdev->dev,
3805 			"Configure rss tc mode fail, status = %d\n", ret);
3806 
3807 	return ret;
3808 }
3809 
3810 static void hclge_get_rss_type(struct hclge_vport *vport)
3811 {
3812 	if (vport->rss_tuple_sets.ipv4_tcp_en ||
3813 	    vport->rss_tuple_sets.ipv4_udp_en ||
3814 	    vport->rss_tuple_sets.ipv4_sctp_en ||
3815 	    vport->rss_tuple_sets.ipv6_tcp_en ||
3816 	    vport->rss_tuple_sets.ipv6_udp_en ||
3817 	    vport->rss_tuple_sets.ipv6_sctp_en)
3818 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
3819 	else if (vport->rss_tuple_sets.ipv4_fragment_en ||
3820 		 vport->rss_tuple_sets.ipv6_fragment_en)
3821 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
3822 	else
3823 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
3824 }
3825 
3826 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3827 {
3828 	struct hclge_rss_input_tuple_cmd *req;
3829 	struct hclge_desc desc;
3830 	int ret;
3831 
3832 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3833 
3834 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3835 
3836 	/* Get the tuple cfg from pf */
3837 	req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
3838 	req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
3839 	req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
3840 	req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
3841 	req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
3842 	req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
3843 	req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
3844 	req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
3845 	hclge_get_rss_type(&hdev->vport[0]);
3846 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3847 	if (ret)
3848 		dev_err(&hdev->pdev->dev,
3849 			"Configure rss input fail, status = %d\n", ret);
3850 	return ret;
3851 }
3852 
3853 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3854 			 u8 *key, u8 *hfunc)
3855 {
3856 	struct hclge_vport *vport = hclge_get_vport(handle);
3857 	int i;
3858 
3859 	/* Get hash algorithm */
3860 	if (hfunc) {
3861 		switch (vport->rss_algo) {
3862 		case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
3863 			*hfunc = ETH_RSS_HASH_TOP;
3864 			break;
3865 		case HCLGE_RSS_HASH_ALGO_SIMPLE:
3866 			*hfunc = ETH_RSS_HASH_XOR;
3867 			break;
3868 		default:
3869 			*hfunc = ETH_RSS_HASH_UNKNOWN;
3870 			break;
3871 		}
3872 	}
3873 
3874 	/* Get the RSS Key required by the user */
3875 	if (key)
3876 		memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3877 
3878 	/* Get indirect table */
3879 	if (indir)
3880 		for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3881 			indir[i] =  vport->rss_indirection_tbl[i];
3882 
3883 	return 0;
3884 }
3885 
3886 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3887 			 const  u8 *key, const  u8 hfunc)
3888 {
3889 	struct hclge_vport *vport = hclge_get_vport(handle);
3890 	struct hclge_dev *hdev = vport->back;
3891 	u8 hash_algo;
3892 	int ret, i;
3893 
3894 	/* Set the RSS Hash Key if specififed by the user */
3895 	if (key) {
3896 		switch (hfunc) {
3897 		case ETH_RSS_HASH_TOP:
3898 			hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3899 			break;
3900 		case ETH_RSS_HASH_XOR:
3901 			hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3902 			break;
3903 		case ETH_RSS_HASH_NO_CHANGE:
3904 			hash_algo = vport->rss_algo;
3905 			break;
3906 		default:
3907 			return -EINVAL;
3908 		}
3909 
3910 		ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3911 		if (ret)
3912 			return ret;
3913 
3914 		/* Update the shadow RSS key with user specified qids */
3915 		memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3916 		vport->rss_algo = hash_algo;
3917 	}
3918 
3919 	/* Update the shadow RSS table with user specified qids */
3920 	for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3921 		vport->rss_indirection_tbl[i] = indir[i];
3922 
3923 	/* Update the hardware */
3924 	return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
3925 }
3926 
3927 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3928 {
3929 	u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3930 
3931 	if (nfc->data & RXH_L4_B_2_3)
3932 		hash_sets |= HCLGE_D_PORT_BIT;
3933 	else
3934 		hash_sets &= ~HCLGE_D_PORT_BIT;
3935 
3936 	if (nfc->data & RXH_IP_SRC)
3937 		hash_sets |= HCLGE_S_IP_BIT;
3938 	else
3939 		hash_sets &= ~HCLGE_S_IP_BIT;
3940 
3941 	if (nfc->data & RXH_IP_DST)
3942 		hash_sets |= HCLGE_D_IP_BIT;
3943 	else
3944 		hash_sets &= ~HCLGE_D_IP_BIT;
3945 
3946 	if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3947 		hash_sets |= HCLGE_V_TAG_BIT;
3948 
3949 	return hash_sets;
3950 }
3951 
3952 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3953 			       struct ethtool_rxnfc *nfc)
3954 {
3955 	struct hclge_vport *vport = hclge_get_vport(handle);
3956 	struct hclge_dev *hdev = vport->back;
3957 	struct hclge_rss_input_tuple_cmd *req;
3958 	struct hclge_desc desc;
3959 	u8 tuple_sets;
3960 	int ret;
3961 
3962 	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3963 			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
3964 		return -EINVAL;
3965 
3966 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3967 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3968 
3969 	req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3970 	req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3971 	req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3972 	req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3973 	req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3974 	req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3975 	req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3976 	req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
3977 
3978 	tuple_sets = hclge_get_rss_hash_bits(nfc);
3979 	switch (nfc->flow_type) {
3980 	case TCP_V4_FLOW:
3981 		req->ipv4_tcp_en = tuple_sets;
3982 		break;
3983 	case TCP_V6_FLOW:
3984 		req->ipv6_tcp_en = tuple_sets;
3985 		break;
3986 	case UDP_V4_FLOW:
3987 		req->ipv4_udp_en = tuple_sets;
3988 		break;
3989 	case UDP_V6_FLOW:
3990 		req->ipv6_udp_en = tuple_sets;
3991 		break;
3992 	case SCTP_V4_FLOW:
3993 		req->ipv4_sctp_en = tuple_sets;
3994 		break;
3995 	case SCTP_V6_FLOW:
3996 		if ((nfc->data & RXH_L4_B_0_1) ||
3997 		    (nfc->data & RXH_L4_B_2_3))
3998 			return -EINVAL;
3999 
4000 		req->ipv6_sctp_en = tuple_sets;
4001 		break;
4002 	case IPV4_FLOW:
4003 		req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4004 		break;
4005 	case IPV6_FLOW:
4006 		req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4007 		break;
4008 	default:
4009 		return -EINVAL;
4010 	}
4011 
4012 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4013 	if (ret) {
4014 		dev_err(&hdev->pdev->dev,
4015 			"Set rss tuple fail, status = %d\n", ret);
4016 		return ret;
4017 	}
4018 
4019 	vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4020 	vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4021 	vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4022 	vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4023 	vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4024 	vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4025 	vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4026 	vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4027 	hclge_get_rss_type(vport);
4028 	return 0;
4029 }
4030 
4031 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4032 			       struct ethtool_rxnfc *nfc)
4033 {
4034 	struct hclge_vport *vport = hclge_get_vport(handle);
4035 	u8 tuple_sets;
4036 
4037 	nfc->data = 0;
4038 
4039 	switch (nfc->flow_type) {
4040 	case TCP_V4_FLOW:
4041 		tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4042 		break;
4043 	case UDP_V4_FLOW:
4044 		tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4045 		break;
4046 	case TCP_V6_FLOW:
4047 		tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4048 		break;
4049 	case UDP_V6_FLOW:
4050 		tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4051 		break;
4052 	case SCTP_V4_FLOW:
4053 		tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4054 		break;
4055 	case SCTP_V6_FLOW:
4056 		tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4057 		break;
4058 	case IPV4_FLOW:
4059 	case IPV6_FLOW:
4060 		tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4061 		break;
4062 	default:
4063 		return -EINVAL;
4064 	}
4065 
4066 	if (!tuple_sets)
4067 		return 0;
4068 
4069 	if (tuple_sets & HCLGE_D_PORT_BIT)
4070 		nfc->data |= RXH_L4_B_2_3;
4071 	if (tuple_sets & HCLGE_S_PORT_BIT)
4072 		nfc->data |= RXH_L4_B_0_1;
4073 	if (tuple_sets & HCLGE_D_IP_BIT)
4074 		nfc->data |= RXH_IP_DST;
4075 	if (tuple_sets & HCLGE_S_IP_BIT)
4076 		nfc->data |= RXH_IP_SRC;
4077 
4078 	return 0;
4079 }
4080 
4081 static int hclge_get_tc_size(struct hnae3_handle *handle)
4082 {
4083 	struct hclge_vport *vport = hclge_get_vport(handle);
4084 	struct hclge_dev *hdev = vport->back;
4085 
4086 	return hdev->rss_size_max;
4087 }
4088 
4089 int hclge_rss_init_hw(struct hclge_dev *hdev)
4090 {
4091 	struct hclge_vport *vport = hdev->vport;
4092 	u8 *rss_indir = vport[0].rss_indirection_tbl;
4093 	u16 rss_size = vport[0].alloc_rss_size;
4094 	u8 *key = vport[0].rss_hash_key;
4095 	u8 hfunc = vport[0].rss_algo;
4096 	u16 tc_offset[HCLGE_MAX_TC_NUM];
4097 	u16 tc_valid[HCLGE_MAX_TC_NUM];
4098 	u16 tc_size[HCLGE_MAX_TC_NUM];
4099 	u16 roundup_size;
4100 	unsigned int i;
4101 	int ret;
4102 
4103 	ret = hclge_set_rss_indir_table(hdev, rss_indir);
4104 	if (ret)
4105 		return ret;
4106 
4107 	ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4108 	if (ret)
4109 		return ret;
4110 
4111 	ret = hclge_set_rss_input_tuple(hdev);
4112 	if (ret)
4113 		return ret;
4114 
4115 	/* Each TC have the same queue size, and tc_size set to hardware is
4116 	 * the log2 of roundup power of two of rss_size, the acutal queue
4117 	 * size is limited by indirection table.
4118 	 */
4119 	if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4120 		dev_err(&hdev->pdev->dev,
4121 			"Configure rss tc size failed, invalid TC_SIZE = %d\n",
4122 			rss_size);
4123 		return -EINVAL;
4124 	}
4125 
4126 	roundup_size = roundup_pow_of_two(rss_size);
4127 	roundup_size = ilog2(roundup_size);
4128 
4129 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4130 		tc_valid[i] = 0;
4131 
4132 		if (!(hdev->hw_tc_map & BIT(i)))
4133 			continue;
4134 
4135 		tc_valid[i] = 1;
4136 		tc_size[i] = roundup_size;
4137 		tc_offset[i] = rss_size * i;
4138 	}
4139 
4140 	return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4141 }
4142 
4143 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4144 {
4145 	struct hclge_vport *vport = hdev->vport;
4146 	int i, j;
4147 
4148 	for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4149 		for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4150 			vport[j].rss_indirection_tbl[i] =
4151 				i % vport[j].alloc_rss_size;
4152 	}
4153 }
4154 
4155 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4156 {
4157 	int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4158 	struct hclge_vport *vport = hdev->vport;
4159 
4160 	if (hdev->pdev->revision >= 0x21)
4161 		rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4162 
4163 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4164 		vport[i].rss_tuple_sets.ipv4_tcp_en =
4165 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4166 		vport[i].rss_tuple_sets.ipv4_udp_en =
4167 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4168 		vport[i].rss_tuple_sets.ipv4_sctp_en =
4169 			HCLGE_RSS_INPUT_TUPLE_SCTP;
4170 		vport[i].rss_tuple_sets.ipv4_fragment_en =
4171 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4172 		vport[i].rss_tuple_sets.ipv6_tcp_en =
4173 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4174 		vport[i].rss_tuple_sets.ipv6_udp_en =
4175 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4176 		vport[i].rss_tuple_sets.ipv6_sctp_en =
4177 			HCLGE_RSS_INPUT_TUPLE_SCTP;
4178 		vport[i].rss_tuple_sets.ipv6_fragment_en =
4179 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4180 
4181 		vport[i].rss_algo = rss_algo;
4182 
4183 		memcpy(vport[i].rss_hash_key, hclge_hash_key,
4184 		       HCLGE_RSS_KEY_SIZE);
4185 	}
4186 
4187 	hclge_rss_indir_init_cfg(hdev);
4188 }
4189 
4190 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4191 				int vector_id, bool en,
4192 				struct hnae3_ring_chain_node *ring_chain)
4193 {
4194 	struct hclge_dev *hdev = vport->back;
4195 	struct hnae3_ring_chain_node *node;
4196 	struct hclge_desc desc;
4197 	struct hclge_ctrl_vector_chain_cmd *req
4198 		= (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4199 	enum hclge_cmd_status status;
4200 	enum hclge_opcode_type op;
4201 	u16 tqp_type_and_id;
4202 	int i;
4203 
4204 	op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4205 	hclge_cmd_setup_basic_desc(&desc, op, false);
4206 	req->int_vector_id = vector_id;
4207 
4208 	i = 0;
4209 	for (node = ring_chain; node; node = node->next) {
4210 		tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4211 		hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
4212 				HCLGE_INT_TYPE_S,
4213 				hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4214 		hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4215 				HCLGE_TQP_ID_S, node->tqp_index);
4216 		hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4217 				HCLGE_INT_GL_IDX_S,
4218 				hnae3_get_field(node->int_gl_idx,
4219 						HNAE3_RING_GL_IDX_M,
4220 						HNAE3_RING_GL_IDX_S));
4221 		req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4222 		if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4223 			req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4224 			req->vfid = vport->vport_id;
4225 
4226 			status = hclge_cmd_send(&hdev->hw, &desc, 1);
4227 			if (status) {
4228 				dev_err(&hdev->pdev->dev,
4229 					"Map TQP fail, status is %d.\n",
4230 					status);
4231 				return -EIO;
4232 			}
4233 			i = 0;
4234 
4235 			hclge_cmd_setup_basic_desc(&desc,
4236 						   op,
4237 						   false);
4238 			req->int_vector_id = vector_id;
4239 		}
4240 	}
4241 
4242 	if (i > 0) {
4243 		req->int_cause_num = i;
4244 		req->vfid = vport->vport_id;
4245 		status = hclge_cmd_send(&hdev->hw, &desc, 1);
4246 		if (status) {
4247 			dev_err(&hdev->pdev->dev,
4248 				"Map TQP fail, status is %d.\n", status);
4249 			return -EIO;
4250 		}
4251 	}
4252 
4253 	return 0;
4254 }
4255 
4256 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4257 				    struct hnae3_ring_chain_node *ring_chain)
4258 {
4259 	struct hclge_vport *vport = hclge_get_vport(handle);
4260 	struct hclge_dev *hdev = vport->back;
4261 	int vector_id;
4262 
4263 	vector_id = hclge_get_vector_index(hdev, vector);
4264 	if (vector_id < 0) {
4265 		dev_err(&hdev->pdev->dev,
4266 			"Get vector index fail. vector_id =%d\n", vector_id);
4267 		return vector_id;
4268 	}
4269 
4270 	return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4271 }
4272 
4273 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4274 				       struct hnae3_ring_chain_node *ring_chain)
4275 {
4276 	struct hclge_vport *vport = hclge_get_vport(handle);
4277 	struct hclge_dev *hdev = vport->back;
4278 	int vector_id, ret;
4279 
4280 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4281 		return 0;
4282 
4283 	vector_id = hclge_get_vector_index(hdev, vector);
4284 	if (vector_id < 0) {
4285 		dev_err(&handle->pdev->dev,
4286 			"Get vector index fail. ret =%d\n", vector_id);
4287 		return vector_id;
4288 	}
4289 
4290 	ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4291 	if (ret)
4292 		dev_err(&handle->pdev->dev,
4293 			"Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4294 			vector_id, ret);
4295 
4296 	return ret;
4297 }
4298 
4299 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4300 			       struct hclge_promisc_param *param)
4301 {
4302 	struct hclge_promisc_cfg_cmd *req;
4303 	struct hclge_desc desc;
4304 	int ret;
4305 
4306 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4307 
4308 	req = (struct hclge_promisc_cfg_cmd *)desc.data;
4309 	req->vf_id = param->vf_id;
4310 
4311 	/* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4312 	 * pdev revision(0x20), new revision support them. The
4313 	 * value of this two fields will not return error when driver
4314 	 * send command to fireware in revision(0x20).
4315 	 */
4316 	req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4317 		HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4318 
4319 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4320 	if (ret)
4321 		dev_err(&hdev->pdev->dev,
4322 			"Set promisc mode fail, status is %d.\n", ret);
4323 
4324 	return ret;
4325 }
4326 
4327 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
4328 			      bool en_mc, bool en_bc, int vport_id)
4329 {
4330 	if (!param)
4331 		return;
4332 
4333 	memset(param, 0, sizeof(struct hclge_promisc_param));
4334 	if (en_uc)
4335 		param->enable = HCLGE_PROMISC_EN_UC;
4336 	if (en_mc)
4337 		param->enable |= HCLGE_PROMISC_EN_MC;
4338 	if (en_bc)
4339 		param->enable |= HCLGE_PROMISC_EN_BC;
4340 	param->vf_id = vport_id;
4341 }
4342 
4343 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4344 				  bool en_mc_pmc)
4345 {
4346 	struct hclge_vport *vport = hclge_get_vport(handle);
4347 	struct hclge_dev *hdev = vport->back;
4348 	struct hclge_promisc_param param;
4349 	bool en_bc_pmc = true;
4350 
4351 	/* For revision 0x20, if broadcast promisc enabled, vlan filter is
4352 	 * always bypassed. So broadcast promisc should be disabled until
4353 	 * user enable promisc mode
4354 	 */
4355 	if (handle->pdev->revision == 0x20)
4356 		en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4357 
4358 	hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4359 				 vport->vport_id);
4360 	return hclge_cmd_set_promisc_mode(hdev, &param);
4361 }
4362 
4363 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4364 {
4365 	struct hclge_get_fd_mode_cmd *req;
4366 	struct hclge_desc desc;
4367 	int ret;
4368 
4369 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4370 
4371 	req = (struct hclge_get_fd_mode_cmd *)desc.data;
4372 
4373 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4374 	if (ret) {
4375 		dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4376 		return ret;
4377 	}
4378 
4379 	*fd_mode = req->mode;
4380 
4381 	return ret;
4382 }
4383 
4384 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4385 				   u32 *stage1_entry_num,
4386 				   u32 *stage2_entry_num,
4387 				   u16 *stage1_counter_num,
4388 				   u16 *stage2_counter_num)
4389 {
4390 	struct hclge_get_fd_allocation_cmd *req;
4391 	struct hclge_desc desc;
4392 	int ret;
4393 
4394 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4395 
4396 	req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4397 
4398 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4399 	if (ret) {
4400 		dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4401 			ret);
4402 		return ret;
4403 	}
4404 
4405 	*stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4406 	*stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4407 	*stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4408 	*stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4409 
4410 	return ret;
4411 }
4412 
4413 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4414 {
4415 	struct hclge_set_fd_key_config_cmd *req;
4416 	struct hclge_fd_key_cfg *stage;
4417 	struct hclge_desc desc;
4418 	int ret;
4419 
4420 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4421 
4422 	req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4423 	stage = &hdev->fd_cfg.key_cfg[stage_num];
4424 	req->stage = stage_num;
4425 	req->key_select = stage->key_sel;
4426 	req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4427 	req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4428 	req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4429 	req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4430 	req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4431 	req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4432 
4433 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4434 	if (ret)
4435 		dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4436 
4437 	return ret;
4438 }
4439 
4440 static int hclge_init_fd_config(struct hclge_dev *hdev)
4441 {
4442 #define LOW_2_WORDS		0x03
4443 	struct hclge_fd_key_cfg *key_cfg;
4444 	int ret;
4445 
4446 	if (!hnae3_dev_fd_supported(hdev))
4447 		return 0;
4448 
4449 	ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4450 	if (ret)
4451 		return ret;
4452 
4453 	switch (hdev->fd_cfg.fd_mode) {
4454 	case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4455 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4456 		break;
4457 	case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4458 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4459 		break;
4460 	default:
4461 		dev_err(&hdev->pdev->dev,
4462 			"Unsupported flow director mode %d\n",
4463 			hdev->fd_cfg.fd_mode);
4464 		return -EOPNOTSUPP;
4465 	}
4466 
4467 	hdev->fd_cfg.proto_support =
4468 		TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4469 		UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4470 	key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4471 	key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4472 	key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4473 	key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4474 	key_cfg->outer_sipv6_word_en = 0;
4475 	key_cfg->outer_dipv6_word_en = 0;
4476 
4477 	key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4478 				BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4479 				BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4480 				BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4481 
4482 	/* If use max 400bit key, we can support tuples for ether type */
4483 	if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4484 		hdev->fd_cfg.proto_support |= ETHER_FLOW;
4485 		key_cfg->tuple_active |=
4486 				BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4487 	}
4488 
4489 	/* roce_type is used to filter roce frames
4490 	 * dst_vport is used to specify the rule
4491 	 */
4492 	key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4493 
4494 	ret = hclge_get_fd_allocation(hdev,
4495 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4496 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4497 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4498 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4499 	if (ret)
4500 		return ret;
4501 
4502 	return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4503 }
4504 
4505 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4506 				int loc, u8 *key, bool is_add)
4507 {
4508 	struct hclge_fd_tcam_config_1_cmd *req1;
4509 	struct hclge_fd_tcam_config_2_cmd *req2;
4510 	struct hclge_fd_tcam_config_3_cmd *req3;
4511 	struct hclge_desc desc[3];
4512 	int ret;
4513 
4514 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4515 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4516 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4517 	desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4518 	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4519 
4520 	req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4521 	req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4522 	req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4523 
4524 	req1->stage = stage;
4525 	req1->xy_sel = sel_x ? 1 : 0;
4526 	hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4527 	req1->index = cpu_to_le32(loc);
4528 	req1->entry_vld = sel_x ? is_add : 0;
4529 
4530 	if (key) {
4531 		memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4532 		memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4533 		       sizeof(req2->tcam_data));
4534 		memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4535 		       sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4536 	}
4537 
4538 	ret = hclge_cmd_send(&hdev->hw, desc, 3);
4539 	if (ret)
4540 		dev_err(&hdev->pdev->dev,
4541 			"config tcam key fail, ret=%d\n",
4542 			ret);
4543 
4544 	return ret;
4545 }
4546 
4547 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4548 			      struct hclge_fd_ad_data *action)
4549 {
4550 	struct hclge_fd_ad_config_cmd *req;
4551 	struct hclge_desc desc;
4552 	u64 ad_data = 0;
4553 	int ret;
4554 
4555 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4556 
4557 	req = (struct hclge_fd_ad_config_cmd *)desc.data;
4558 	req->index = cpu_to_le32(loc);
4559 	req->stage = stage;
4560 
4561 	hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4562 		      action->write_rule_id_to_bd);
4563 	hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4564 			action->rule_id);
4565 	ad_data <<= 32;
4566 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4567 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4568 		      action->forward_to_direct_queue);
4569 	hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4570 			action->queue_id);
4571 	hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4572 	hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4573 			HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4574 	hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4575 	hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4576 			action->counter_id);
4577 
4578 	req->ad_data = cpu_to_le64(ad_data);
4579 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4580 	if (ret)
4581 		dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4582 
4583 	return ret;
4584 }
4585 
4586 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4587 				   struct hclge_fd_rule *rule)
4588 {
4589 	u16 tmp_x_s, tmp_y_s;
4590 	u32 tmp_x_l, tmp_y_l;
4591 	int i;
4592 
4593 	if (rule->unused_tuple & tuple_bit)
4594 		return true;
4595 
4596 	switch (tuple_bit) {
4597 	case 0:
4598 		return false;
4599 	case BIT(INNER_DST_MAC):
4600 		for (i = 0; i < ETH_ALEN; i++) {
4601 			calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
4602 			       rule->tuples_mask.dst_mac[i]);
4603 			calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
4604 			       rule->tuples_mask.dst_mac[i]);
4605 		}
4606 
4607 		return true;
4608 	case BIT(INNER_SRC_MAC):
4609 		for (i = 0; i < ETH_ALEN; i++) {
4610 			calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
4611 			       rule->tuples.src_mac[i]);
4612 			calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
4613 			       rule->tuples.src_mac[i]);
4614 		}
4615 
4616 		return true;
4617 	case BIT(INNER_VLAN_TAG_FST):
4618 		calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4619 		       rule->tuples_mask.vlan_tag1);
4620 		calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4621 		       rule->tuples_mask.vlan_tag1);
4622 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4623 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4624 
4625 		return true;
4626 	case BIT(INNER_ETH_TYPE):
4627 		calc_x(tmp_x_s, rule->tuples.ether_proto,
4628 		       rule->tuples_mask.ether_proto);
4629 		calc_y(tmp_y_s, rule->tuples.ether_proto,
4630 		       rule->tuples_mask.ether_proto);
4631 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4632 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4633 
4634 		return true;
4635 	case BIT(INNER_IP_TOS):
4636 		calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4637 		calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4638 
4639 		return true;
4640 	case BIT(INNER_IP_PROTO):
4641 		calc_x(*key_x, rule->tuples.ip_proto,
4642 		       rule->tuples_mask.ip_proto);
4643 		calc_y(*key_y, rule->tuples.ip_proto,
4644 		       rule->tuples_mask.ip_proto);
4645 
4646 		return true;
4647 	case BIT(INNER_SRC_IP):
4648 		calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
4649 		       rule->tuples_mask.src_ip[IPV4_INDEX]);
4650 		calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
4651 		       rule->tuples_mask.src_ip[IPV4_INDEX]);
4652 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4653 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4654 
4655 		return true;
4656 	case BIT(INNER_DST_IP):
4657 		calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
4658 		       rule->tuples_mask.dst_ip[IPV4_INDEX]);
4659 		calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
4660 		       rule->tuples_mask.dst_ip[IPV4_INDEX]);
4661 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4662 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4663 
4664 		return true;
4665 	case BIT(INNER_SRC_PORT):
4666 		calc_x(tmp_x_s, rule->tuples.src_port,
4667 		       rule->tuples_mask.src_port);
4668 		calc_y(tmp_y_s, rule->tuples.src_port,
4669 		       rule->tuples_mask.src_port);
4670 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4671 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4672 
4673 		return true;
4674 	case BIT(INNER_DST_PORT):
4675 		calc_x(tmp_x_s, rule->tuples.dst_port,
4676 		       rule->tuples_mask.dst_port);
4677 		calc_y(tmp_y_s, rule->tuples.dst_port,
4678 		       rule->tuples_mask.dst_port);
4679 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4680 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4681 
4682 		return true;
4683 	default:
4684 		return false;
4685 	}
4686 }
4687 
4688 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4689 				 u8 vf_id, u8 network_port_id)
4690 {
4691 	u32 port_number = 0;
4692 
4693 	if (port_type == HOST_PORT) {
4694 		hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4695 				pf_id);
4696 		hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4697 				vf_id);
4698 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4699 	} else {
4700 		hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4701 				HCLGE_NETWORK_PORT_ID_S, network_port_id);
4702 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4703 	}
4704 
4705 	return port_number;
4706 }
4707 
4708 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4709 				       __le32 *key_x, __le32 *key_y,
4710 				       struct hclge_fd_rule *rule)
4711 {
4712 	u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4713 	u8 cur_pos = 0, tuple_size, shift_bits;
4714 	unsigned int i;
4715 
4716 	for (i = 0; i < MAX_META_DATA; i++) {
4717 		tuple_size = meta_data_key_info[i].key_length;
4718 		tuple_bit = key_cfg->meta_data_active & BIT(i);
4719 
4720 		switch (tuple_bit) {
4721 		case BIT(ROCE_TYPE):
4722 			hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4723 			cur_pos += tuple_size;
4724 			break;
4725 		case BIT(DST_VPORT):
4726 			port_number = hclge_get_port_number(HOST_PORT, 0,
4727 							    rule->vf_id, 0);
4728 			hnae3_set_field(meta_data,
4729 					GENMASK(cur_pos + tuple_size, cur_pos),
4730 					cur_pos, port_number);
4731 			cur_pos += tuple_size;
4732 			break;
4733 		default:
4734 			break;
4735 		}
4736 	}
4737 
4738 	calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4739 	calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4740 	shift_bits = sizeof(meta_data) * 8 - cur_pos;
4741 
4742 	*key_x = cpu_to_le32(tmp_x << shift_bits);
4743 	*key_y = cpu_to_le32(tmp_y << shift_bits);
4744 }
4745 
4746 /* A complete key is combined with meta data key and tuple key.
4747  * Meta data key is stored at the MSB region, and tuple key is stored at
4748  * the LSB region, unused bits will be filled 0.
4749  */
4750 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
4751 			    struct hclge_fd_rule *rule)
4752 {
4753 	struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
4754 	u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
4755 	u8 *cur_key_x, *cur_key_y;
4756 	unsigned int i;
4757 	int ret, tuple_size;
4758 	u8 meta_data_region;
4759 
4760 	memset(key_x, 0, sizeof(key_x));
4761 	memset(key_y, 0, sizeof(key_y));
4762 	cur_key_x = key_x;
4763 	cur_key_y = key_y;
4764 
4765 	for (i = 0 ; i < MAX_TUPLE; i++) {
4766 		bool tuple_valid;
4767 		u32 check_tuple;
4768 
4769 		tuple_size = tuple_key_info[i].key_length / 8;
4770 		check_tuple = key_cfg->tuple_active & BIT(i);
4771 
4772 		tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
4773 						     cur_key_y, rule);
4774 		if (tuple_valid) {
4775 			cur_key_x += tuple_size;
4776 			cur_key_y += tuple_size;
4777 		}
4778 	}
4779 
4780 	meta_data_region = hdev->fd_cfg.max_key_length / 8 -
4781 			MAX_META_DATA_LENGTH / 8;
4782 
4783 	hclge_fd_convert_meta_data(key_cfg,
4784 				   (__le32 *)(key_x + meta_data_region),
4785 				   (__le32 *)(key_y + meta_data_region),
4786 				   rule);
4787 
4788 	ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
4789 				   true);
4790 	if (ret) {
4791 		dev_err(&hdev->pdev->dev,
4792 			"fd key_y config fail, loc=%d, ret=%d\n",
4793 			rule->queue_id, ret);
4794 		return ret;
4795 	}
4796 
4797 	ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
4798 				   true);
4799 	if (ret)
4800 		dev_err(&hdev->pdev->dev,
4801 			"fd key_x config fail, loc=%d, ret=%d\n",
4802 			rule->queue_id, ret);
4803 	return ret;
4804 }
4805 
4806 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
4807 			       struct hclge_fd_rule *rule)
4808 {
4809 	struct hclge_fd_ad_data ad_data;
4810 
4811 	ad_data.ad_id = rule->location;
4812 
4813 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4814 		ad_data.drop_packet = true;
4815 		ad_data.forward_to_direct_queue = false;
4816 		ad_data.queue_id = 0;
4817 	} else {
4818 		ad_data.drop_packet = false;
4819 		ad_data.forward_to_direct_queue = true;
4820 		ad_data.queue_id = rule->queue_id;
4821 	}
4822 
4823 	ad_data.use_counter = false;
4824 	ad_data.counter_id = 0;
4825 
4826 	ad_data.use_next_stage = false;
4827 	ad_data.next_input_key = 0;
4828 
4829 	ad_data.write_rule_id_to_bd = true;
4830 	ad_data.rule_id = rule->location;
4831 
4832 	return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
4833 }
4834 
4835 static int hclge_fd_check_spec(struct hclge_dev *hdev,
4836 			       struct ethtool_rx_flow_spec *fs, u32 *unused)
4837 {
4838 	struct ethtool_tcpip4_spec *tcp_ip4_spec;
4839 	struct ethtool_usrip4_spec *usr_ip4_spec;
4840 	struct ethtool_tcpip6_spec *tcp_ip6_spec;
4841 	struct ethtool_usrip6_spec *usr_ip6_spec;
4842 	struct ethhdr *ether_spec;
4843 
4844 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4845 		return -EINVAL;
4846 
4847 	if (!(fs->flow_type & hdev->fd_cfg.proto_support))
4848 		return -EOPNOTSUPP;
4849 
4850 	if ((fs->flow_type & FLOW_EXT) &&
4851 	    (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
4852 		dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
4853 		return -EOPNOTSUPP;
4854 	}
4855 
4856 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4857 	case SCTP_V4_FLOW:
4858 	case TCP_V4_FLOW:
4859 	case UDP_V4_FLOW:
4860 		tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
4861 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
4862 
4863 		if (!tcp_ip4_spec->ip4src)
4864 			*unused |= BIT(INNER_SRC_IP);
4865 
4866 		if (!tcp_ip4_spec->ip4dst)
4867 			*unused |= BIT(INNER_DST_IP);
4868 
4869 		if (!tcp_ip4_spec->psrc)
4870 			*unused |= BIT(INNER_SRC_PORT);
4871 
4872 		if (!tcp_ip4_spec->pdst)
4873 			*unused |= BIT(INNER_DST_PORT);
4874 
4875 		if (!tcp_ip4_spec->tos)
4876 			*unused |= BIT(INNER_IP_TOS);
4877 
4878 		break;
4879 	case IP_USER_FLOW:
4880 		usr_ip4_spec = &fs->h_u.usr_ip4_spec;
4881 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4882 			BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4883 
4884 		if (!usr_ip4_spec->ip4src)
4885 			*unused |= BIT(INNER_SRC_IP);
4886 
4887 		if (!usr_ip4_spec->ip4dst)
4888 			*unused |= BIT(INNER_DST_IP);
4889 
4890 		if (!usr_ip4_spec->tos)
4891 			*unused |= BIT(INNER_IP_TOS);
4892 
4893 		if (!usr_ip4_spec->proto)
4894 			*unused |= BIT(INNER_IP_PROTO);
4895 
4896 		if (usr_ip4_spec->l4_4_bytes)
4897 			return -EOPNOTSUPP;
4898 
4899 		if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
4900 			return -EOPNOTSUPP;
4901 
4902 		break;
4903 	case SCTP_V6_FLOW:
4904 	case TCP_V6_FLOW:
4905 	case UDP_V6_FLOW:
4906 		tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
4907 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4908 			BIT(INNER_IP_TOS);
4909 
4910 		/* check whether src/dst ip address used */
4911 		if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
4912 		    !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
4913 			*unused |= BIT(INNER_SRC_IP);
4914 
4915 		if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
4916 		    !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
4917 			*unused |= BIT(INNER_DST_IP);
4918 
4919 		if (!tcp_ip6_spec->psrc)
4920 			*unused |= BIT(INNER_SRC_PORT);
4921 
4922 		if (!tcp_ip6_spec->pdst)
4923 			*unused |= BIT(INNER_DST_PORT);
4924 
4925 		if (tcp_ip6_spec->tclass)
4926 			return -EOPNOTSUPP;
4927 
4928 		break;
4929 	case IPV6_USER_FLOW:
4930 		usr_ip6_spec = &fs->h_u.usr_ip6_spec;
4931 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4932 			BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
4933 			BIT(INNER_DST_PORT);
4934 
4935 		/* check whether src/dst ip address used */
4936 		if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
4937 		    !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
4938 			*unused |= BIT(INNER_SRC_IP);
4939 
4940 		if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
4941 		    !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
4942 			*unused |= BIT(INNER_DST_IP);
4943 
4944 		if (!usr_ip6_spec->l4_proto)
4945 			*unused |= BIT(INNER_IP_PROTO);
4946 
4947 		if (usr_ip6_spec->tclass)
4948 			return -EOPNOTSUPP;
4949 
4950 		if (usr_ip6_spec->l4_4_bytes)
4951 			return -EOPNOTSUPP;
4952 
4953 		break;
4954 	case ETHER_FLOW:
4955 		ether_spec = &fs->h_u.ether_spec;
4956 		*unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4957 			BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
4958 			BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
4959 
4960 		if (is_zero_ether_addr(ether_spec->h_source))
4961 			*unused |= BIT(INNER_SRC_MAC);
4962 
4963 		if (is_zero_ether_addr(ether_spec->h_dest))
4964 			*unused |= BIT(INNER_DST_MAC);
4965 
4966 		if (!ether_spec->h_proto)
4967 			*unused |= BIT(INNER_ETH_TYPE);
4968 
4969 		break;
4970 	default:
4971 		return -EOPNOTSUPP;
4972 	}
4973 
4974 	if ((fs->flow_type & FLOW_EXT)) {
4975 		if (fs->h_ext.vlan_etype)
4976 			return -EOPNOTSUPP;
4977 		if (!fs->h_ext.vlan_tci)
4978 			*unused |= BIT(INNER_VLAN_TAG_FST);
4979 
4980 		if (fs->m_ext.vlan_tci) {
4981 			if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
4982 				return -EINVAL;
4983 		}
4984 	} else {
4985 		*unused |= BIT(INNER_VLAN_TAG_FST);
4986 	}
4987 
4988 	if (fs->flow_type & FLOW_MAC_EXT) {
4989 		if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
4990 			return -EOPNOTSUPP;
4991 
4992 		if (is_zero_ether_addr(fs->h_ext.h_dest))
4993 			*unused |= BIT(INNER_DST_MAC);
4994 		else
4995 			*unused &= ~(BIT(INNER_DST_MAC));
4996 	}
4997 
4998 	return 0;
4999 }
5000 
5001 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5002 {
5003 	struct hclge_fd_rule *rule = NULL;
5004 	struct hlist_node *node2;
5005 
5006 	spin_lock_bh(&hdev->fd_rule_lock);
5007 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5008 		if (rule->location >= location)
5009 			break;
5010 	}
5011 
5012 	spin_unlock_bh(&hdev->fd_rule_lock);
5013 
5014 	return  rule && rule->location == location;
5015 }
5016 
5017 /* make sure being called after lock up with fd_rule_lock */
5018 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5019 				     struct hclge_fd_rule *new_rule,
5020 				     u16 location,
5021 				     bool is_add)
5022 {
5023 	struct hclge_fd_rule *rule = NULL, *parent = NULL;
5024 	struct hlist_node *node2;
5025 
5026 	if (is_add && !new_rule)
5027 		return -EINVAL;
5028 
5029 	hlist_for_each_entry_safe(rule, node2,
5030 				  &hdev->fd_rule_list, rule_node) {
5031 		if (rule->location >= location)
5032 			break;
5033 		parent = rule;
5034 	}
5035 
5036 	if (rule && rule->location == location) {
5037 		hlist_del(&rule->rule_node);
5038 		kfree(rule);
5039 		hdev->hclge_fd_rule_num--;
5040 
5041 		if (!is_add) {
5042 			if (!hdev->hclge_fd_rule_num)
5043 				hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5044 			clear_bit(location, hdev->fd_bmap);
5045 
5046 			return 0;
5047 		}
5048 	} else if (!is_add) {
5049 		dev_err(&hdev->pdev->dev,
5050 			"delete fail, rule %d is inexistent\n",
5051 			location);
5052 		return -EINVAL;
5053 	}
5054 
5055 	INIT_HLIST_NODE(&new_rule->rule_node);
5056 
5057 	if (parent)
5058 		hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5059 	else
5060 		hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5061 
5062 	set_bit(location, hdev->fd_bmap);
5063 	hdev->hclge_fd_rule_num++;
5064 	hdev->fd_active_type = new_rule->rule_type;
5065 
5066 	return 0;
5067 }
5068 
5069 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5070 			      struct ethtool_rx_flow_spec *fs,
5071 			      struct hclge_fd_rule *rule)
5072 {
5073 	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5074 
5075 	switch (flow_type) {
5076 	case SCTP_V4_FLOW:
5077 	case TCP_V4_FLOW:
5078 	case UDP_V4_FLOW:
5079 		rule->tuples.src_ip[IPV4_INDEX] =
5080 				be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5081 		rule->tuples_mask.src_ip[IPV4_INDEX] =
5082 				be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5083 
5084 		rule->tuples.dst_ip[IPV4_INDEX] =
5085 				be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5086 		rule->tuples_mask.dst_ip[IPV4_INDEX] =
5087 				be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5088 
5089 		rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5090 		rule->tuples_mask.src_port =
5091 				be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5092 
5093 		rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5094 		rule->tuples_mask.dst_port =
5095 				be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5096 
5097 		rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5098 		rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5099 
5100 		rule->tuples.ether_proto = ETH_P_IP;
5101 		rule->tuples_mask.ether_proto = 0xFFFF;
5102 
5103 		break;
5104 	case IP_USER_FLOW:
5105 		rule->tuples.src_ip[IPV4_INDEX] =
5106 				be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5107 		rule->tuples_mask.src_ip[IPV4_INDEX] =
5108 				be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5109 
5110 		rule->tuples.dst_ip[IPV4_INDEX] =
5111 				be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5112 		rule->tuples_mask.dst_ip[IPV4_INDEX] =
5113 				be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5114 
5115 		rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5116 		rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5117 
5118 		rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5119 		rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5120 
5121 		rule->tuples.ether_proto = ETH_P_IP;
5122 		rule->tuples_mask.ether_proto = 0xFFFF;
5123 
5124 		break;
5125 	case SCTP_V6_FLOW:
5126 	case TCP_V6_FLOW:
5127 	case UDP_V6_FLOW:
5128 		be32_to_cpu_array(rule->tuples.src_ip,
5129 				  fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5130 		be32_to_cpu_array(rule->tuples_mask.src_ip,
5131 				  fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5132 
5133 		be32_to_cpu_array(rule->tuples.dst_ip,
5134 				  fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5135 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
5136 				  fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5137 
5138 		rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5139 		rule->tuples_mask.src_port =
5140 				be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5141 
5142 		rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5143 		rule->tuples_mask.dst_port =
5144 				be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5145 
5146 		rule->tuples.ether_proto = ETH_P_IPV6;
5147 		rule->tuples_mask.ether_proto = 0xFFFF;
5148 
5149 		break;
5150 	case IPV6_USER_FLOW:
5151 		be32_to_cpu_array(rule->tuples.src_ip,
5152 				  fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5153 		be32_to_cpu_array(rule->tuples_mask.src_ip,
5154 				  fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5155 
5156 		be32_to_cpu_array(rule->tuples.dst_ip,
5157 				  fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5158 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
5159 				  fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5160 
5161 		rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5162 		rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5163 
5164 		rule->tuples.ether_proto = ETH_P_IPV6;
5165 		rule->tuples_mask.ether_proto = 0xFFFF;
5166 
5167 		break;
5168 	case ETHER_FLOW:
5169 		ether_addr_copy(rule->tuples.src_mac,
5170 				fs->h_u.ether_spec.h_source);
5171 		ether_addr_copy(rule->tuples_mask.src_mac,
5172 				fs->m_u.ether_spec.h_source);
5173 
5174 		ether_addr_copy(rule->tuples.dst_mac,
5175 				fs->h_u.ether_spec.h_dest);
5176 		ether_addr_copy(rule->tuples_mask.dst_mac,
5177 				fs->m_u.ether_spec.h_dest);
5178 
5179 		rule->tuples.ether_proto =
5180 				be16_to_cpu(fs->h_u.ether_spec.h_proto);
5181 		rule->tuples_mask.ether_proto =
5182 				be16_to_cpu(fs->m_u.ether_spec.h_proto);
5183 
5184 		break;
5185 	default:
5186 		return -EOPNOTSUPP;
5187 	}
5188 
5189 	switch (flow_type) {
5190 	case SCTP_V4_FLOW:
5191 	case SCTP_V6_FLOW:
5192 		rule->tuples.ip_proto = IPPROTO_SCTP;
5193 		rule->tuples_mask.ip_proto = 0xFF;
5194 		break;
5195 	case TCP_V4_FLOW:
5196 	case TCP_V6_FLOW:
5197 		rule->tuples.ip_proto = IPPROTO_TCP;
5198 		rule->tuples_mask.ip_proto = 0xFF;
5199 		break;
5200 	case UDP_V4_FLOW:
5201 	case UDP_V6_FLOW:
5202 		rule->tuples.ip_proto = IPPROTO_UDP;
5203 		rule->tuples_mask.ip_proto = 0xFF;
5204 		break;
5205 	default:
5206 		break;
5207 	}
5208 
5209 	if ((fs->flow_type & FLOW_EXT)) {
5210 		rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5211 		rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5212 	}
5213 
5214 	if (fs->flow_type & FLOW_MAC_EXT) {
5215 		ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5216 		ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5217 	}
5218 
5219 	return 0;
5220 }
5221 
5222 /* make sure being called after lock up with fd_rule_lock */
5223 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5224 				struct hclge_fd_rule *rule)
5225 {
5226 	int ret;
5227 
5228 	if (!rule) {
5229 		dev_err(&hdev->pdev->dev,
5230 			"The flow director rule is NULL\n");
5231 		return -EINVAL;
5232 	}
5233 
5234 	/* it will never fail here, so needn't to check return value */
5235 	hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5236 
5237 	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5238 	if (ret)
5239 		goto clear_rule;
5240 
5241 	ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5242 	if (ret)
5243 		goto clear_rule;
5244 
5245 	return 0;
5246 
5247 clear_rule:
5248 	hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5249 	return ret;
5250 }
5251 
5252 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5253 			      struct ethtool_rxnfc *cmd)
5254 {
5255 	struct hclge_vport *vport = hclge_get_vport(handle);
5256 	struct hclge_dev *hdev = vport->back;
5257 	u16 dst_vport_id = 0, q_index = 0;
5258 	struct ethtool_rx_flow_spec *fs;
5259 	struct hclge_fd_rule *rule;
5260 	u32 unused = 0;
5261 	u8 action;
5262 	int ret;
5263 
5264 	if (!hnae3_dev_fd_supported(hdev))
5265 		return -EOPNOTSUPP;
5266 
5267 	if (!hdev->fd_en) {
5268 		dev_warn(&hdev->pdev->dev,
5269 			 "Please enable flow director first\n");
5270 		return -EOPNOTSUPP;
5271 	}
5272 
5273 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5274 
5275 	ret = hclge_fd_check_spec(hdev, fs, &unused);
5276 	if (ret) {
5277 		dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5278 		return ret;
5279 	}
5280 
5281 	if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5282 		action = HCLGE_FD_ACTION_DROP_PACKET;
5283 	} else {
5284 		u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5285 		u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5286 		u16 tqps;
5287 
5288 		if (vf > hdev->num_req_vfs) {
5289 			dev_err(&hdev->pdev->dev,
5290 				"Error: vf id (%d) > max vf num (%d)\n",
5291 				vf, hdev->num_req_vfs);
5292 			return -EINVAL;
5293 		}
5294 
5295 		dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5296 		tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5297 
5298 		if (ring >= tqps) {
5299 			dev_err(&hdev->pdev->dev,
5300 				"Error: queue id (%d) > max tqp num (%d)\n",
5301 				ring, tqps - 1);
5302 			return -EINVAL;
5303 		}
5304 
5305 		action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5306 		q_index = ring;
5307 	}
5308 
5309 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5310 	if (!rule)
5311 		return -ENOMEM;
5312 
5313 	ret = hclge_fd_get_tuple(hdev, fs, rule);
5314 	if (ret) {
5315 		kfree(rule);
5316 		return ret;
5317 	}
5318 
5319 	rule->flow_type = fs->flow_type;
5320 
5321 	rule->location = fs->location;
5322 	rule->unused_tuple = unused;
5323 	rule->vf_id = dst_vport_id;
5324 	rule->queue_id = q_index;
5325 	rule->action = action;
5326 	rule->rule_type = HCLGE_FD_EP_ACTIVE;
5327 
5328 	/* to avoid rule conflict, when user configure rule by ethtool,
5329 	 * we need to clear all arfs rules
5330 	 */
5331 	hclge_clear_arfs_rules(handle);
5332 
5333 	spin_lock_bh(&hdev->fd_rule_lock);
5334 	ret = hclge_fd_config_rule(hdev, rule);
5335 
5336 	spin_unlock_bh(&hdev->fd_rule_lock);
5337 
5338 	return ret;
5339 }
5340 
5341 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5342 			      struct ethtool_rxnfc *cmd)
5343 {
5344 	struct hclge_vport *vport = hclge_get_vport(handle);
5345 	struct hclge_dev *hdev = vport->back;
5346 	struct ethtool_rx_flow_spec *fs;
5347 	int ret;
5348 
5349 	if (!hnae3_dev_fd_supported(hdev))
5350 		return -EOPNOTSUPP;
5351 
5352 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5353 
5354 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5355 		return -EINVAL;
5356 
5357 	if (!hclge_fd_rule_exist(hdev, fs->location)) {
5358 		dev_err(&hdev->pdev->dev,
5359 			"Delete fail, rule %d is inexistent\n", fs->location);
5360 		return -ENOENT;
5361 	}
5362 
5363 	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5364 				   NULL, false);
5365 	if (ret)
5366 		return ret;
5367 
5368 	spin_lock_bh(&hdev->fd_rule_lock);
5369 	ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5370 
5371 	spin_unlock_bh(&hdev->fd_rule_lock);
5372 
5373 	return ret;
5374 }
5375 
5376 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5377 				     bool clear_list)
5378 {
5379 	struct hclge_vport *vport = hclge_get_vport(handle);
5380 	struct hclge_dev *hdev = vport->back;
5381 	struct hclge_fd_rule *rule;
5382 	struct hlist_node *node;
5383 	u16 location;
5384 
5385 	if (!hnae3_dev_fd_supported(hdev))
5386 		return;
5387 
5388 	spin_lock_bh(&hdev->fd_rule_lock);
5389 	for_each_set_bit(location, hdev->fd_bmap,
5390 			 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5391 		hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5392 				     NULL, false);
5393 
5394 	if (clear_list) {
5395 		hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5396 					  rule_node) {
5397 			hlist_del(&rule->rule_node);
5398 			kfree(rule);
5399 		}
5400 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5401 		hdev->hclge_fd_rule_num = 0;
5402 		bitmap_zero(hdev->fd_bmap,
5403 			    hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5404 	}
5405 
5406 	spin_unlock_bh(&hdev->fd_rule_lock);
5407 }
5408 
5409 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5410 {
5411 	struct hclge_vport *vport = hclge_get_vport(handle);
5412 	struct hclge_dev *hdev = vport->back;
5413 	struct hclge_fd_rule *rule;
5414 	struct hlist_node *node;
5415 	int ret;
5416 
5417 	/* Return ok here, because reset error handling will check this
5418 	 * return value. If error is returned here, the reset process will
5419 	 * fail.
5420 	 */
5421 	if (!hnae3_dev_fd_supported(hdev))
5422 		return 0;
5423 
5424 	/* if fd is disabled, should not restore it when reset */
5425 	if (!hdev->fd_en)
5426 		return 0;
5427 
5428 	spin_lock_bh(&hdev->fd_rule_lock);
5429 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5430 		ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5431 		if (!ret)
5432 			ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5433 
5434 		if (ret) {
5435 			dev_warn(&hdev->pdev->dev,
5436 				 "Restore rule %d failed, remove it\n",
5437 				 rule->location);
5438 			clear_bit(rule->location, hdev->fd_bmap);
5439 			hlist_del(&rule->rule_node);
5440 			kfree(rule);
5441 			hdev->hclge_fd_rule_num--;
5442 		}
5443 	}
5444 
5445 	if (hdev->hclge_fd_rule_num)
5446 		hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5447 
5448 	spin_unlock_bh(&hdev->fd_rule_lock);
5449 
5450 	return 0;
5451 }
5452 
5453 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5454 				 struct ethtool_rxnfc *cmd)
5455 {
5456 	struct hclge_vport *vport = hclge_get_vport(handle);
5457 	struct hclge_dev *hdev = vport->back;
5458 
5459 	if (!hnae3_dev_fd_supported(hdev))
5460 		return -EOPNOTSUPP;
5461 
5462 	cmd->rule_cnt = hdev->hclge_fd_rule_num;
5463 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5464 
5465 	return 0;
5466 }
5467 
5468 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5469 				  struct ethtool_rxnfc *cmd)
5470 {
5471 	struct hclge_vport *vport = hclge_get_vport(handle);
5472 	struct hclge_fd_rule *rule = NULL;
5473 	struct hclge_dev *hdev = vport->back;
5474 	struct ethtool_rx_flow_spec *fs;
5475 	struct hlist_node *node2;
5476 
5477 	if (!hnae3_dev_fd_supported(hdev))
5478 		return -EOPNOTSUPP;
5479 
5480 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5481 
5482 	spin_lock_bh(&hdev->fd_rule_lock);
5483 
5484 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5485 		if (rule->location >= fs->location)
5486 			break;
5487 	}
5488 
5489 	if (!rule || fs->location != rule->location) {
5490 		spin_unlock_bh(&hdev->fd_rule_lock);
5491 
5492 		return -ENOENT;
5493 	}
5494 
5495 	fs->flow_type = rule->flow_type;
5496 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5497 	case SCTP_V4_FLOW:
5498 	case TCP_V4_FLOW:
5499 	case UDP_V4_FLOW:
5500 		fs->h_u.tcp_ip4_spec.ip4src =
5501 				cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5502 		fs->m_u.tcp_ip4_spec.ip4src =
5503 			rule->unused_tuple & BIT(INNER_SRC_IP) ?
5504 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5505 
5506 		fs->h_u.tcp_ip4_spec.ip4dst =
5507 				cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5508 		fs->m_u.tcp_ip4_spec.ip4dst =
5509 			rule->unused_tuple & BIT(INNER_DST_IP) ?
5510 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5511 
5512 		fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5513 		fs->m_u.tcp_ip4_spec.psrc =
5514 				rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5515 				0 : cpu_to_be16(rule->tuples_mask.src_port);
5516 
5517 		fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5518 		fs->m_u.tcp_ip4_spec.pdst =
5519 				rule->unused_tuple & BIT(INNER_DST_PORT) ?
5520 				0 : cpu_to_be16(rule->tuples_mask.dst_port);
5521 
5522 		fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5523 		fs->m_u.tcp_ip4_spec.tos =
5524 				rule->unused_tuple & BIT(INNER_IP_TOS) ?
5525 				0 : rule->tuples_mask.ip_tos;
5526 
5527 		break;
5528 	case IP_USER_FLOW:
5529 		fs->h_u.usr_ip4_spec.ip4src =
5530 				cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5531 		fs->m_u.tcp_ip4_spec.ip4src =
5532 			rule->unused_tuple & BIT(INNER_SRC_IP) ?
5533 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5534 
5535 		fs->h_u.usr_ip4_spec.ip4dst =
5536 				cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5537 		fs->m_u.usr_ip4_spec.ip4dst =
5538 			rule->unused_tuple & BIT(INNER_DST_IP) ?
5539 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5540 
5541 		fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5542 		fs->m_u.usr_ip4_spec.tos =
5543 				rule->unused_tuple & BIT(INNER_IP_TOS) ?
5544 				0 : rule->tuples_mask.ip_tos;
5545 
5546 		fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5547 		fs->m_u.usr_ip4_spec.proto =
5548 				rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5549 				0 : rule->tuples_mask.ip_proto;
5550 
5551 		fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5552 
5553 		break;
5554 	case SCTP_V6_FLOW:
5555 	case TCP_V6_FLOW:
5556 	case UDP_V6_FLOW:
5557 		cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5558 				  rule->tuples.src_ip, IPV6_SIZE);
5559 		if (rule->unused_tuple & BIT(INNER_SRC_IP))
5560 			memset(fs->m_u.tcp_ip6_spec.ip6src, 0,
5561 			       sizeof(int) * IPV6_SIZE);
5562 		else
5563 			cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5564 					  rule->tuples_mask.src_ip, IPV6_SIZE);
5565 
5566 		cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5567 				  rule->tuples.dst_ip, IPV6_SIZE);
5568 		if (rule->unused_tuple & BIT(INNER_DST_IP))
5569 			memset(fs->m_u.tcp_ip6_spec.ip6dst, 0,
5570 			       sizeof(int) * IPV6_SIZE);
5571 		else
5572 			cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5573 					  rule->tuples_mask.dst_ip, IPV6_SIZE);
5574 
5575 		fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5576 		fs->m_u.tcp_ip6_spec.psrc =
5577 				rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5578 				0 : cpu_to_be16(rule->tuples_mask.src_port);
5579 
5580 		fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5581 		fs->m_u.tcp_ip6_spec.pdst =
5582 				rule->unused_tuple & BIT(INNER_DST_PORT) ?
5583 				0 : cpu_to_be16(rule->tuples_mask.dst_port);
5584 
5585 		break;
5586 	case IPV6_USER_FLOW:
5587 		cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5588 				  rule->tuples.src_ip, IPV6_SIZE);
5589 		if (rule->unused_tuple & BIT(INNER_SRC_IP))
5590 			memset(fs->m_u.usr_ip6_spec.ip6src, 0,
5591 			       sizeof(int) * IPV6_SIZE);
5592 		else
5593 			cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5594 					  rule->tuples_mask.src_ip, IPV6_SIZE);
5595 
5596 		cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5597 				  rule->tuples.dst_ip, IPV6_SIZE);
5598 		if (rule->unused_tuple & BIT(INNER_DST_IP))
5599 			memset(fs->m_u.usr_ip6_spec.ip6dst, 0,
5600 			       sizeof(int) * IPV6_SIZE);
5601 		else
5602 			cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5603 					  rule->tuples_mask.dst_ip, IPV6_SIZE);
5604 
5605 		fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5606 		fs->m_u.usr_ip6_spec.l4_proto =
5607 				rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5608 				0 : rule->tuples_mask.ip_proto;
5609 
5610 		break;
5611 	case ETHER_FLOW:
5612 		ether_addr_copy(fs->h_u.ether_spec.h_source,
5613 				rule->tuples.src_mac);
5614 		if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5615 			eth_zero_addr(fs->m_u.ether_spec.h_source);
5616 		else
5617 			ether_addr_copy(fs->m_u.ether_spec.h_source,
5618 					rule->tuples_mask.src_mac);
5619 
5620 		ether_addr_copy(fs->h_u.ether_spec.h_dest,
5621 				rule->tuples.dst_mac);
5622 		if (rule->unused_tuple & BIT(INNER_DST_MAC))
5623 			eth_zero_addr(fs->m_u.ether_spec.h_dest);
5624 		else
5625 			ether_addr_copy(fs->m_u.ether_spec.h_dest,
5626 					rule->tuples_mask.dst_mac);
5627 
5628 		fs->h_u.ether_spec.h_proto =
5629 				cpu_to_be16(rule->tuples.ether_proto);
5630 		fs->m_u.ether_spec.h_proto =
5631 				rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
5632 				0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5633 
5634 		break;
5635 	default:
5636 		spin_unlock_bh(&hdev->fd_rule_lock);
5637 		return -EOPNOTSUPP;
5638 	}
5639 
5640 	if (fs->flow_type & FLOW_EXT) {
5641 		fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5642 		fs->m_ext.vlan_tci =
5643 				rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5644 				cpu_to_be16(VLAN_VID_MASK) :
5645 				cpu_to_be16(rule->tuples_mask.vlan_tag1);
5646 	}
5647 
5648 	if (fs->flow_type & FLOW_MAC_EXT) {
5649 		ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5650 		if (rule->unused_tuple & BIT(INNER_DST_MAC))
5651 			eth_zero_addr(fs->m_u.ether_spec.h_dest);
5652 		else
5653 			ether_addr_copy(fs->m_u.ether_spec.h_dest,
5654 					rule->tuples_mask.dst_mac);
5655 	}
5656 
5657 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5658 		fs->ring_cookie = RX_CLS_FLOW_DISC;
5659 	} else {
5660 		u64 vf_id;
5661 
5662 		fs->ring_cookie = rule->queue_id;
5663 		vf_id = rule->vf_id;
5664 		vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5665 		fs->ring_cookie |= vf_id;
5666 	}
5667 
5668 	spin_unlock_bh(&hdev->fd_rule_lock);
5669 
5670 	return 0;
5671 }
5672 
5673 static int hclge_get_all_rules(struct hnae3_handle *handle,
5674 			       struct ethtool_rxnfc *cmd, u32 *rule_locs)
5675 {
5676 	struct hclge_vport *vport = hclge_get_vport(handle);
5677 	struct hclge_dev *hdev = vport->back;
5678 	struct hclge_fd_rule *rule;
5679 	struct hlist_node *node2;
5680 	int cnt = 0;
5681 
5682 	if (!hnae3_dev_fd_supported(hdev))
5683 		return -EOPNOTSUPP;
5684 
5685 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5686 
5687 	spin_lock_bh(&hdev->fd_rule_lock);
5688 	hlist_for_each_entry_safe(rule, node2,
5689 				  &hdev->fd_rule_list, rule_node) {
5690 		if (cnt == cmd->rule_cnt) {
5691 			spin_unlock_bh(&hdev->fd_rule_lock);
5692 			return -EMSGSIZE;
5693 		}
5694 
5695 		rule_locs[cnt] = rule->location;
5696 		cnt++;
5697 	}
5698 
5699 	spin_unlock_bh(&hdev->fd_rule_lock);
5700 
5701 	cmd->rule_cnt = cnt;
5702 
5703 	return 0;
5704 }
5705 
5706 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
5707 				     struct hclge_fd_rule_tuples *tuples)
5708 {
5709 	tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
5710 	tuples->ip_proto = fkeys->basic.ip_proto;
5711 	tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
5712 
5713 	if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5714 		tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
5715 		tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
5716 	} else {
5717 		memcpy(tuples->src_ip,
5718 		       fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
5719 		       sizeof(tuples->src_ip));
5720 		memcpy(tuples->dst_ip,
5721 		       fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
5722 		       sizeof(tuples->dst_ip));
5723 	}
5724 }
5725 
5726 /* traverse all rules, check whether an existed rule has the same tuples */
5727 static struct hclge_fd_rule *
5728 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
5729 			  const struct hclge_fd_rule_tuples *tuples)
5730 {
5731 	struct hclge_fd_rule *rule = NULL;
5732 	struct hlist_node *node;
5733 
5734 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5735 		if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
5736 			return rule;
5737 	}
5738 
5739 	return NULL;
5740 }
5741 
5742 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
5743 				     struct hclge_fd_rule *rule)
5744 {
5745 	rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5746 			     BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
5747 			     BIT(INNER_SRC_PORT);
5748 	rule->action = 0;
5749 	rule->vf_id = 0;
5750 	rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
5751 	if (tuples->ether_proto == ETH_P_IP) {
5752 		if (tuples->ip_proto == IPPROTO_TCP)
5753 			rule->flow_type = TCP_V4_FLOW;
5754 		else
5755 			rule->flow_type = UDP_V4_FLOW;
5756 	} else {
5757 		if (tuples->ip_proto == IPPROTO_TCP)
5758 			rule->flow_type = TCP_V6_FLOW;
5759 		else
5760 			rule->flow_type = UDP_V6_FLOW;
5761 	}
5762 	memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
5763 	memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
5764 }
5765 
5766 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
5767 				      u16 flow_id, struct flow_keys *fkeys)
5768 {
5769 	struct hclge_vport *vport = hclge_get_vport(handle);
5770 	struct hclge_fd_rule_tuples new_tuples;
5771 	struct hclge_dev *hdev = vport->back;
5772 	struct hclge_fd_rule *rule;
5773 	u16 tmp_queue_id;
5774 	u16 bit_id;
5775 	int ret;
5776 
5777 	if (!hnae3_dev_fd_supported(hdev))
5778 		return -EOPNOTSUPP;
5779 
5780 	memset(&new_tuples, 0, sizeof(new_tuples));
5781 	hclge_fd_get_flow_tuples(fkeys, &new_tuples);
5782 
5783 	spin_lock_bh(&hdev->fd_rule_lock);
5784 
5785 	/* when there is already fd rule existed add by user,
5786 	 * arfs should not work
5787 	 */
5788 	if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
5789 		spin_unlock_bh(&hdev->fd_rule_lock);
5790 
5791 		return -EOPNOTSUPP;
5792 	}
5793 
5794 	/* check is there flow director filter existed for this flow,
5795 	 * if not, create a new filter for it;
5796 	 * if filter exist with different queue id, modify the filter;
5797 	 * if filter exist with same queue id, do nothing
5798 	 */
5799 	rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
5800 	if (!rule) {
5801 		bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
5802 		if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5803 			spin_unlock_bh(&hdev->fd_rule_lock);
5804 
5805 			return -ENOSPC;
5806 		}
5807 
5808 		rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5809 		if (!rule) {
5810 			spin_unlock_bh(&hdev->fd_rule_lock);
5811 
5812 			return -ENOMEM;
5813 		}
5814 
5815 		set_bit(bit_id, hdev->fd_bmap);
5816 		rule->location = bit_id;
5817 		rule->flow_id = flow_id;
5818 		rule->queue_id = queue_id;
5819 		hclge_fd_build_arfs_rule(&new_tuples, rule);
5820 		ret = hclge_fd_config_rule(hdev, rule);
5821 
5822 		spin_unlock_bh(&hdev->fd_rule_lock);
5823 
5824 		if (ret)
5825 			return ret;
5826 
5827 		return rule->location;
5828 	}
5829 
5830 	spin_unlock_bh(&hdev->fd_rule_lock);
5831 
5832 	if (rule->queue_id == queue_id)
5833 		return rule->location;
5834 
5835 	tmp_queue_id = rule->queue_id;
5836 	rule->queue_id = queue_id;
5837 	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5838 	if (ret) {
5839 		rule->queue_id = tmp_queue_id;
5840 		return ret;
5841 	}
5842 
5843 	return rule->location;
5844 }
5845 
5846 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
5847 {
5848 #ifdef CONFIG_RFS_ACCEL
5849 	struct hnae3_handle *handle = &hdev->vport[0].nic;
5850 	struct hclge_fd_rule *rule;
5851 	struct hlist_node *node;
5852 	HLIST_HEAD(del_list);
5853 
5854 	spin_lock_bh(&hdev->fd_rule_lock);
5855 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
5856 		spin_unlock_bh(&hdev->fd_rule_lock);
5857 		return;
5858 	}
5859 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5860 		if (rps_may_expire_flow(handle->netdev, rule->queue_id,
5861 					rule->flow_id, rule->location)) {
5862 			hlist_del_init(&rule->rule_node);
5863 			hlist_add_head(&rule->rule_node, &del_list);
5864 			hdev->hclge_fd_rule_num--;
5865 			clear_bit(rule->location, hdev->fd_bmap);
5866 		}
5867 	}
5868 	spin_unlock_bh(&hdev->fd_rule_lock);
5869 
5870 	hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
5871 		hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5872 				     rule->location, NULL, false);
5873 		kfree(rule);
5874 	}
5875 #endif
5876 }
5877 
5878 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
5879 {
5880 #ifdef CONFIG_RFS_ACCEL
5881 	struct hclge_vport *vport = hclge_get_vport(handle);
5882 	struct hclge_dev *hdev = vport->back;
5883 
5884 	if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
5885 		hclge_del_all_fd_entries(handle, true);
5886 #endif
5887 }
5888 
5889 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
5890 {
5891 	struct hclge_vport *vport = hclge_get_vport(handle);
5892 	struct hclge_dev *hdev = vport->back;
5893 
5894 	return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
5895 	       hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
5896 }
5897 
5898 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
5899 {
5900 	struct hclge_vport *vport = hclge_get_vport(handle);
5901 	struct hclge_dev *hdev = vport->back;
5902 
5903 	return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
5904 }
5905 
5906 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
5907 {
5908 	struct hclge_vport *vport = hclge_get_vport(handle);
5909 	struct hclge_dev *hdev = vport->back;
5910 
5911 	return hdev->rst_stats.hw_reset_done_cnt;
5912 }
5913 
5914 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
5915 {
5916 	struct hclge_vport *vport = hclge_get_vport(handle);
5917 	struct hclge_dev *hdev = vport->back;
5918 	bool clear;
5919 
5920 	hdev->fd_en = enable;
5921 	clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE ? true : false;
5922 	if (!enable)
5923 		hclge_del_all_fd_entries(handle, clear);
5924 	else
5925 		hclge_restore_fd_entries(handle);
5926 }
5927 
5928 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
5929 {
5930 	struct hclge_desc desc;
5931 	struct hclge_config_mac_mode_cmd *req =
5932 		(struct hclge_config_mac_mode_cmd *)desc.data;
5933 	u32 loop_en = 0;
5934 	int ret;
5935 
5936 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
5937 	hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
5938 	hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
5939 	hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
5940 	hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
5941 	hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
5942 	hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
5943 	hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
5944 	hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
5945 	hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
5946 	hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
5947 	hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
5948 	hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
5949 	hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
5950 	hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
5951 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5952 
5953 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5954 	if (ret)
5955 		dev_err(&hdev->pdev->dev,
5956 			"mac enable fail, ret =%d.\n", ret);
5957 }
5958 
5959 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
5960 {
5961 	struct hclge_config_mac_mode_cmd *req;
5962 	struct hclge_desc desc;
5963 	u32 loop_en;
5964 	int ret;
5965 
5966 	req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
5967 	/* 1 Read out the MAC mode config at first */
5968 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
5969 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5970 	if (ret) {
5971 		dev_err(&hdev->pdev->dev,
5972 			"mac loopback get fail, ret =%d.\n", ret);
5973 		return ret;
5974 	}
5975 
5976 	/* 2 Then setup the loopback flag */
5977 	loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
5978 	hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
5979 	hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
5980 	hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
5981 
5982 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5983 
5984 	/* 3 Config mac work mode with loopback flag
5985 	 * and its original configure parameters
5986 	 */
5987 	hclge_cmd_reuse_desc(&desc, false);
5988 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5989 	if (ret)
5990 		dev_err(&hdev->pdev->dev,
5991 			"mac loopback set fail, ret =%d.\n", ret);
5992 	return ret;
5993 }
5994 
5995 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
5996 				     enum hnae3_loop loop_mode)
5997 {
5998 #define HCLGE_SERDES_RETRY_MS	10
5999 #define HCLGE_SERDES_RETRY_NUM	100
6000 
6001 #define HCLGE_MAC_LINK_STATUS_MS   10
6002 #define HCLGE_MAC_LINK_STATUS_NUM  100
6003 #define HCLGE_MAC_LINK_STATUS_DOWN 0
6004 #define HCLGE_MAC_LINK_STATUS_UP   1
6005 
6006 	struct hclge_serdes_lb_cmd *req;
6007 	struct hclge_desc desc;
6008 	int mac_link_ret = 0;
6009 	int ret, i = 0;
6010 	u8 loop_mode_b;
6011 
6012 	req = (struct hclge_serdes_lb_cmd *)desc.data;
6013 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
6014 
6015 	switch (loop_mode) {
6016 	case HNAE3_LOOP_SERIAL_SERDES:
6017 		loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6018 		break;
6019 	case HNAE3_LOOP_PARALLEL_SERDES:
6020 		loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6021 		break;
6022 	default:
6023 		dev_err(&hdev->pdev->dev,
6024 			"unsupported serdes loopback mode %d\n", loop_mode);
6025 		return -ENOTSUPP;
6026 	}
6027 
6028 	if (en) {
6029 		req->enable = loop_mode_b;
6030 		req->mask = loop_mode_b;
6031 		mac_link_ret = HCLGE_MAC_LINK_STATUS_UP;
6032 	} else {
6033 		req->mask = loop_mode_b;
6034 		mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN;
6035 	}
6036 
6037 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6038 	if (ret) {
6039 		dev_err(&hdev->pdev->dev,
6040 			"serdes loopback set fail, ret = %d\n", ret);
6041 		return ret;
6042 	}
6043 
6044 	do {
6045 		msleep(HCLGE_SERDES_RETRY_MS);
6046 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
6047 					   true);
6048 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6049 		if (ret) {
6050 			dev_err(&hdev->pdev->dev,
6051 				"serdes loopback get, ret = %d\n", ret);
6052 			return ret;
6053 		}
6054 	} while (++i < HCLGE_SERDES_RETRY_NUM &&
6055 		 !(req->result & HCLGE_CMD_SERDES_DONE_B));
6056 
6057 	if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
6058 		dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
6059 		return -EBUSY;
6060 	} else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
6061 		dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
6062 		return -EIO;
6063 	}
6064 
6065 	hclge_cfg_mac_mode(hdev, en);
6066 
6067 	i = 0;
6068 	do {
6069 		/* serdes Internal loopback, independent of the network cable.*/
6070 		msleep(HCLGE_MAC_LINK_STATUS_MS);
6071 		ret = hclge_get_mac_link_status(hdev);
6072 		if (ret == mac_link_ret)
6073 			return 0;
6074 	} while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6075 
6076 	dev_err(&hdev->pdev->dev, "config mac mode timeout\n");
6077 
6078 	return -EBUSY;
6079 }
6080 
6081 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
6082 			    int stream_id, bool enable)
6083 {
6084 	struct hclge_desc desc;
6085 	struct hclge_cfg_com_tqp_queue_cmd *req =
6086 		(struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6087 	int ret;
6088 
6089 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6090 	req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6091 	req->stream_id = cpu_to_le16(stream_id);
6092 	if (enable)
6093 		req->enable |= 1U << HCLGE_TQP_ENABLE_B;
6094 
6095 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6096 	if (ret)
6097 		dev_err(&hdev->pdev->dev,
6098 			"Tqp enable fail, status =%d.\n", ret);
6099 	return ret;
6100 }
6101 
6102 static int hclge_set_loopback(struct hnae3_handle *handle,
6103 			      enum hnae3_loop loop_mode, bool en)
6104 {
6105 	struct hclge_vport *vport = hclge_get_vport(handle);
6106 	struct hnae3_knic_private_info *kinfo;
6107 	struct hclge_dev *hdev = vport->back;
6108 	int i, ret;
6109 
6110 	switch (loop_mode) {
6111 	case HNAE3_LOOP_APP:
6112 		ret = hclge_set_app_loopback(hdev, en);
6113 		break;
6114 	case HNAE3_LOOP_SERIAL_SERDES:
6115 	case HNAE3_LOOP_PARALLEL_SERDES:
6116 		ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6117 		break;
6118 	default:
6119 		ret = -ENOTSUPP;
6120 		dev_err(&hdev->pdev->dev,
6121 			"loop_mode %d is not supported\n", loop_mode);
6122 		break;
6123 	}
6124 
6125 	if (ret)
6126 		return ret;
6127 
6128 	kinfo = &vport->nic.kinfo;
6129 	for (i = 0; i < kinfo->num_tqps; i++) {
6130 		ret = hclge_tqp_enable(hdev, i, 0, en);
6131 		if (ret)
6132 			return ret;
6133 	}
6134 
6135 	return 0;
6136 }
6137 
6138 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6139 {
6140 	struct hclge_vport *vport = hclge_get_vport(handle);
6141 	struct hnae3_knic_private_info *kinfo;
6142 	struct hnae3_queue *queue;
6143 	struct hclge_tqp *tqp;
6144 	int i;
6145 
6146 	kinfo = &vport->nic.kinfo;
6147 	for (i = 0; i < kinfo->num_tqps; i++) {
6148 		queue = handle->kinfo.tqp[i];
6149 		tqp = container_of(queue, struct hclge_tqp, q);
6150 		memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6151 	}
6152 }
6153 
6154 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6155 {
6156 	struct hclge_vport *vport = hclge_get_vport(handle);
6157 	struct hclge_dev *hdev = vport->back;
6158 
6159 	if (enable) {
6160 		mod_timer(&hdev->service_timer, jiffies + HZ);
6161 	} else {
6162 		del_timer_sync(&hdev->service_timer);
6163 		cancel_work_sync(&hdev->service_task);
6164 		clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
6165 	}
6166 }
6167 
6168 static int hclge_ae_start(struct hnae3_handle *handle)
6169 {
6170 	struct hclge_vport *vport = hclge_get_vport(handle);
6171 	struct hclge_dev *hdev = vport->back;
6172 
6173 	/* mac enable */
6174 	hclge_cfg_mac_mode(hdev, true);
6175 	clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6176 	hdev->hw.mac.link = 0;
6177 
6178 	/* reset tqp stats */
6179 	hclge_reset_tqp_stats(handle);
6180 
6181 	hclge_mac_start_phy(hdev);
6182 
6183 	return 0;
6184 }
6185 
6186 static void hclge_ae_stop(struct hnae3_handle *handle)
6187 {
6188 	struct hclge_vport *vport = hclge_get_vport(handle);
6189 	struct hclge_dev *hdev = vport->back;
6190 	int i;
6191 
6192 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
6193 
6194 	hclge_clear_arfs_rules(handle);
6195 
6196 	/* If it is not PF reset, the firmware will disable the MAC,
6197 	 * so it only need to stop phy here.
6198 	 */
6199 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6200 	    hdev->reset_type != HNAE3_FUNC_RESET) {
6201 		hclge_mac_stop_phy(hdev);
6202 		return;
6203 	}
6204 
6205 	for (i = 0; i < handle->kinfo.num_tqps; i++)
6206 		hclge_reset_tqp(handle, i);
6207 
6208 	/* Mac disable */
6209 	hclge_cfg_mac_mode(hdev, false);
6210 
6211 	hclge_mac_stop_phy(hdev);
6212 
6213 	/* reset tqp stats */
6214 	hclge_reset_tqp_stats(handle);
6215 	hclge_update_link_status(hdev);
6216 }
6217 
6218 int hclge_vport_start(struct hclge_vport *vport)
6219 {
6220 	set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6221 	vport->last_active_jiffies = jiffies;
6222 	return 0;
6223 }
6224 
6225 void hclge_vport_stop(struct hclge_vport *vport)
6226 {
6227 	clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6228 }
6229 
6230 static int hclge_client_start(struct hnae3_handle *handle)
6231 {
6232 	struct hclge_vport *vport = hclge_get_vport(handle);
6233 
6234 	return hclge_vport_start(vport);
6235 }
6236 
6237 static void hclge_client_stop(struct hnae3_handle *handle)
6238 {
6239 	struct hclge_vport *vport = hclge_get_vport(handle);
6240 
6241 	hclge_vport_stop(vport);
6242 }
6243 
6244 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6245 					 u16 cmdq_resp, u8  resp_code,
6246 					 enum hclge_mac_vlan_tbl_opcode op)
6247 {
6248 	struct hclge_dev *hdev = vport->back;
6249 	int return_status = -EIO;
6250 
6251 	if (cmdq_resp) {
6252 		dev_err(&hdev->pdev->dev,
6253 			"cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
6254 			cmdq_resp);
6255 		return -EIO;
6256 	}
6257 
6258 	if (op == HCLGE_MAC_VLAN_ADD) {
6259 		if ((!resp_code) || (resp_code == 1)) {
6260 			return_status = 0;
6261 		} else if (resp_code == HCLGE_ADD_UC_OVERFLOW) {
6262 			return_status = -ENOSPC;
6263 			dev_err(&hdev->pdev->dev,
6264 				"add mac addr failed for uc_overflow.\n");
6265 		} else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
6266 			return_status = -ENOSPC;
6267 			dev_err(&hdev->pdev->dev,
6268 				"add mac addr failed for mc_overflow.\n");
6269 		} else {
6270 			dev_err(&hdev->pdev->dev,
6271 				"add mac addr failed for undefined, code=%d.\n",
6272 				resp_code);
6273 		}
6274 	} else if (op == HCLGE_MAC_VLAN_REMOVE) {
6275 		if (!resp_code) {
6276 			return_status = 0;
6277 		} else if (resp_code == 1) {
6278 			return_status = -ENOENT;
6279 			dev_dbg(&hdev->pdev->dev,
6280 				"remove mac addr failed for miss.\n");
6281 		} else {
6282 			dev_err(&hdev->pdev->dev,
6283 				"remove mac addr failed for undefined, code=%d.\n",
6284 				resp_code);
6285 		}
6286 	} else if (op == HCLGE_MAC_VLAN_LKUP) {
6287 		if (!resp_code) {
6288 			return_status = 0;
6289 		} else if (resp_code == 1) {
6290 			return_status = -ENOENT;
6291 			dev_dbg(&hdev->pdev->dev,
6292 				"lookup mac addr failed for miss.\n");
6293 		} else {
6294 			dev_err(&hdev->pdev->dev,
6295 				"lookup mac addr failed for undefined, code=%d.\n",
6296 				resp_code);
6297 		}
6298 	} else {
6299 		return_status = -EINVAL;
6300 		dev_err(&hdev->pdev->dev,
6301 			"unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
6302 			op);
6303 	}
6304 
6305 	return return_status;
6306 }
6307 
6308 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6309 {
6310 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
6311 
6312 	int word_num;
6313 	int bit_num;
6314 
6315 	if (vfid > 255 || vfid < 0)
6316 		return -EIO;
6317 
6318 	if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
6319 		word_num = vfid / 32;
6320 		bit_num  = vfid % 32;
6321 		if (clr)
6322 			desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6323 		else
6324 			desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6325 	} else {
6326 		word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
6327 		bit_num  = vfid % 32;
6328 		if (clr)
6329 			desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6330 		else
6331 			desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6332 	}
6333 
6334 	return 0;
6335 }
6336 
6337 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6338 {
6339 #define HCLGE_DESC_NUMBER 3
6340 #define HCLGE_FUNC_NUMBER_PER_DESC 6
6341 	int i, j;
6342 
6343 	for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6344 		for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6345 			if (desc[i].data[j])
6346 				return false;
6347 
6348 	return true;
6349 }
6350 
6351 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
6352 				   const u8 *addr, bool is_mc)
6353 {
6354 	const unsigned char *mac_addr = addr;
6355 	u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
6356 		       (mac_addr[0]) | (mac_addr[1] << 8);
6357 	u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
6358 
6359 	hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6360 	if (is_mc) {
6361 		hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
6362 		hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6363 	}
6364 
6365 	new_req->mac_addr_hi32 = cpu_to_le32(high_val);
6366 	new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
6367 }
6368 
6369 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
6370 				     struct hclge_mac_vlan_tbl_entry_cmd *req)
6371 {
6372 	struct hclge_dev *hdev = vport->back;
6373 	struct hclge_desc desc;
6374 	u8 resp_code;
6375 	u16 retval;
6376 	int ret;
6377 
6378 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
6379 
6380 	memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6381 
6382 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6383 	if (ret) {
6384 		dev_err(&hdev->pdev->dev,
6385 			"del mac addr failed for cmd_send, ret =%d.\n",
6386 			ret);
6387 		return ret;
6388 	}
6389 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6390 	retval = le16_to_cpu(desc.retval);
6391 
6392 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6393 					     HCLGE_MAC_VLAN_REMOVE);
6394 }
6395 
6396 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
6397 				     struct hclge_mac_vlan_tbl_entry_cmd *req,
6398 				     struct hclge_desc *desc,
6399 				     bool is_mc)
6400 {
6401 	struct hclge_dev *hdev = vport->back;
6402 	u8 resp_code;
6403 	u16 retval;
6404 	int ret;
6405 
6406 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
6407 	if (is_mc) {
6408 		desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6409 		memcpy(desc[0].data,
6410 		       req,
6411 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6412 		hclge_cmd_setup_basic_desc(&desc[1],
6413 					   HCLGE_OPC_MAC_VLAN_ADD,
6414 					   true);
6415 		desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6416 		hclge_cmd_setup_basic_desc(&desc[2],
6417 					   HCLGE_OPC_MAC_VLAN_ADD,
6418 					   true);
6419 		ret = hclge_cmd_send(&hdev->hw, desc, 3);
6420 	} else {
6421 		memcpy(desc[0].data,
6422 		       req,
6423 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6424 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
6425 	}
6426 	if (ret) {
6427 		dev_err(&hdev->pdev->dev,
6428 			"lookup mac addr failed for cmd_send, ret =%d.\n",
6429 			ret);
6430 		return ret;
6431 	}
6432 	resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
6433 	retval = le16_to_cpu(desc[0].retval);
6434 
6435 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6436 					     HCLGE_MAC_VLAN_LKUP);
6437 }
6438 
6439 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
6440 				  struct hclge_mac_vlan_tbl_entry_cmd *req,
6441 				  struct hclge_desc *mc_desc)
6442 {
6443 	struct hclge_dev *hdev = vport->back;
6444 	int cfg_status;
6445 	u8 resp_code;
6446 	u16 retval;
6447 	int ret;
6448 
6449 	if (!mc_desc) {
6450 		struct hclge_desc desc;
6451 
6452 		hclge_cmd_setup_basic_desc(&desc,
6453 					   HCLGE_OPC_MAC_VLAN_ADD,
6454 					   false);
6455 		memcpy(desc.data, req,
6456 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6457 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6458 		resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6459 		retval = le16_to_cpu(desc.retval);
6460 
6461 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6462 							   resp_code,
6463 							   HCLGE_MAC_VLAN_ADD);
6464 	} else {
6465 		hclge_cmd_reuse_desc(&mc_desc[0], false);
6466 		mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6467 		hclge_cmd_reuse_desc(&mc_desc[1], false);
6468 		mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6469 		hclge_cmd_reuse_desc(&mc_desc[2], false);
6470 		mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
6471 		memcpy(mc_desc[0].data, req,
6472 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6473 		ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
6474 		resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
6475 		retval = le16_to_cpu(mc_desc[0].retval);
6476 
6477 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6478 							   resp_code,
6479 							   HCLGE_MAC_VLAN_ADD);
6480 	}
6481 
6482 	if (ret) {
6483 		dev_err(&hdev->pdev->dev,
6484 			"add mac addr failed for cmd_send, ret =%d.\n",
6485 			ret);
6486 		return ret;
6487 	}
6488 
6489 	return cfg_status;
6490 }
6491 
6492 static int hclge_init_umv_space(struct hclge_dev *hdev)
6493 {
6494 	u16 allocated_size = 0;
6495 	int ret;
6496 
6497 	ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
6498 				  true);
6499 	if (ret)
6500 		return ret;
6501 
6502 	if (allocated_size < hdev->wanted_umv_size)
6503 		dev_warn(&hdev->pdev->dev,
6504 			 "Alloc umv space failed, want %d, get %d\n",
6505 			 hdev->wanted_umv_size, allocated_size);
6506 
6507 	mutex_init(&hdev->umv_mutex);
6508 	hdev->max_umv_size = allocated_size;
6509 	/* divide max_umv_size by (hdev->num_req_vfs + 2), in order to
6510 	 * preserve some unicast mac vlan table entries shared by pf
6511 	 * and its vfs.
6512 	 */
6513 	hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
6514 	hdev->share_umv_size = hdev->priv_umv_size +
6515 			hdev->max_umv_size % (hdev->num_req_vfs + 2);
6516 
6517 	return 0;
6518 }
6519 
6520 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
6521 {
6522 	int ret;
6523 
6524 	if (hdev->max_umv_size > 0) {
6525 		ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
6526 					  false);
6527 		if (ret)
6528 			return ret;
6529 		hdev->max_umv_size = 0;
6530 	}
6531 	mutex_destroy(&hdev->umv_mutex);
6532 
6533 	return 0;
6534 }
6535 
6536 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
6537 			       u16 *allocated_size, bool is_alloc)
6538 {
6539 	struct hclge_umv_spc_alc_cmd *req;
6540 	struct hclge_desc desc;
6541 	int ret;
6542 
6543 	req = (struct hclge_umv_spc_alc_cmd *)desc.data;
6544 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
6545 	if (!is_alloc)
6546 		hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, 1);
6547 
6548 	req->space_size = cpu_to_le32(space_size);
6549 
6550 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6551 	if (ret) {
6552 		dev_err(&hdev->pdev->dev,
6553 			"%s umv space failed for cmd_send, ret =%d\n",
6554 			is_alloc ? "allocate" : "free", ret);
6555 		return ret;
6556 	}
6557 
6558 	if (is_alloc && allocated_size)
6559 		*allocated_size = le32_to_cpu(desc.data[1]);
6560 
6561 	return 0;
6562 }
6563 
6564 static void hclge_reset_umv_space(struct hclge_dev *hdev)
6565 {
6566 	struct hclge_vport *vport;
6567 	int i;
6568 
6569 	for (i = 0; i < hdev->num_alloc_vport; i++) {
6570 		vport = &hdev->vport[i];
6571 		vport->used_umv_num = 0;
6572 	}
6573 
6574 	mutex_lock(&hdev->umv_mutex);
6575 	hdev->share_umv_size = hdev->priv_umv_size +
6576 			hdev->max_umv_size % (hdev->num_req_vfs + 2);
6577 	mutex_unlock(&hdev->umv_mutex);
6578 }
6579 
6580 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
6581 {
6582 	struct hclge_dev *hdev = vport->back;
6583 	bool is_full;
6584 
6585 	mutex_lock(&hdev->umv_mutex);
6586 	is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
6587 		   hdev->share_umv_size == 0);
6588 	mutex_unlock(&hdev->umv_mutex);
6589 
6590 	return is_full;
6591 }
6592 
6593 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
6594 {
6595 	struct hclge_dev *hdev = vport->back;
6596 
6597 	mutex_lock(&hdev->umv_mutex);
6598 	if (is_free) {
6599 		if (vport->used_umv_num > hdev->priv_umv_size)
6600 			hdev->share_umv_size++;
6601 
6602 		if (vport->used_umv_num > 0)
6603 			vport->used_umv_num--;
6604 	} else {
6605 		if (vport->used_umv_num >= hdev->priv_umv_size &&
6606 		    hdev->share_umv_size > 0)
6607 			hdev->share_umv_size--;
6608 		vport->used_umv_num++;
6609 	}
6610 	mutex_unlock(&hdev->umv_mutex);
6611 }
6612 
6613 static int hclge_add_uc_addr(struct hnae3_handle *handle,
6614 			     const unsigned char *addr)
6615 {
6616 	struct hclge_vport *vport = hclge_get_vport(handle);
6617 
6618 	return hclge_add_uc_addr_common(vport, addr);
6619 }
6620 
6621 int hclge_add_uc_addr_common(struct hclge_vport *vport,
6622 			     const unsigned char *addr)
6623 {
6624 	struct hclge_dev *hdev = vport->back;
6625 	struct hclge_mac_vlan_tbl_entry_cmd req;
6626 	struct hclge_desc desc;
6627 	u16 egress_port = 0;
6628 	int ret;
6629 
6630 	/* mac addr check */
6631 	if (is_zero_ether_addr(addr) ||
6632 	    is_broadcast_ether_addr(addr) ||
6633 	    is_multicast_ether_addr(addr)) {
6634 		dev_err(&hdev->pdev->dev,
6635 			"Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
6636 			 addr, is_zero_ether_addr(addr),
6637 			 is_broadcast_ether_addr(addr),
6638 			 is_multicast_ether_addr(addr));
6639 		return -EINVAL;
6640 	}
6641 
6642 	memset(&req, 0, sizeof(req));
6643 
6644 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
6645 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
6646 
6647 	req.egress_port = cpu_to_le16(egress_port);
6648 
6649 	hclge_prepare_mac_addr(&req, addr, false);
6650 
6651 	/* Lookup the mac address in the mac_vlan table, and add
6652 	 * it if the entry is inexistent. Repeated unicast entry
6653 	 * is not allowed in the mac vlan table.
6654 	 */
6655 	ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
6656 	if (ret == -ENOENT) {
6657 		if (!hclge_is_umv_space_full(vport)) {
6658 			ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
6659 			if (!ret)
6660 				hclge_update_umv_space(vport, false);
6661 			return ret;
6662 		}
6663 
6664 		dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
6665 			hdev->priv_umv_size);
6666 
6667 		return -ENOSPC;
6668 	}
6669 
6670 	/* check if we just hit the duplicate */
6671 	if (!ret) {
6672 		dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
6673 			 vport->vport_id, addr);
6674 		return 0;
6675 	}
6676 
6677 	dev_err(&hdev->pdev->dev,
6678 		"PF failed to add unicast entry(%pM) in the MAC table\n",
6679 		addr);
6680 
6681 	return ret;
6682 }
6683 
6684 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
6685 			    const unsigned char *addr)
6686 {
6687 	struct hclge_vport *vport = hclge_get_vport(handle);
6688 
6689 	return hclge_rm_uc_addr_common(vport, addr);
6690 }
6691 
6692 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
6693 			    const unsigned char *addr)
6694 {
6695 	struct hclge_dev *hdev = vport->back;
6696 	struct hclge_mac_vlan_tbl_entry_cmd req;
6697 	int ret;
6698 
6699 	/* mac addr check */
6700 	if (is_zero_ether_addr(addr) ||
6701 	    is_broadcast_ether_addr(addr) ||
6702 	    is_multicast_ether_addr(addr)) {
6703 		dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
6704 			addr);
6705 		return -EINVAL;
6706 	}
6707 
6708 	memset(&req, 0, sizeof(req));
6709 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6710 	hclge_prepare_mac_addr(&req, addr, false);
6711 	ret = hclge_remove_mac_vlan_tbl(vport, &req);
6712 	if (!ret)
6713 		hclge_update_umv_space(vport, true);
6714 
6715 	return ret;
6716 }
6717 
6718 static int hclge_add_mc_addr(struct hnae3_handle *handle,
6719 			     const unsigned char *addr)
6720 {
6721 	struct hclge_vport *vport = hclge_get_vport(handle);
6722 
6723 	return hclge_add_mc_addr_common(vport, addr);
6724 }
6725 
6726 int hclge_add_mc_addr_common(struct hclge_vport *vport,
6727 			     const unsigned char *addr)
6728 {
6729 	struct hclge_dev *hdev = vport->back;
6730 	struct hclge_mac_vlan_tbl_entry_cmd req;
6731 	struct hclge_desc desc[3];
6732 	int status;
6733 
6734 	/* mac addr check */
6735 	if (!is_multicast_ether_addr(addr)) {
6736 		dev_err(&hdev->pdev->dev,
6737 			"Add mc mac err! invalid mac:%pM.\n",
6738 			 addr);
6739 		return -EINVAL;
6740 	}
6741 	memset(&req, 0, sizeof(req));
6742 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6743 	hclge_prepare_mac_addr(&req, addr, true);
6744 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6745 	if (status) {
6746 		/* This mac addr do not exist, add new entry for it */
6747 		memset(desc[0].data, 0, sizeof(desc[0].data));
6748 		memset(desc[1].data, 0, sizeof(desc[0].data));
6749 		memset(desc[2].data, 0, sizeof(desc[0].data));
6750 	}
6751 	status = hclge_update_desc_vfid(desc, vport->vport_id, false);
6752 	if (status)
6753 		return status;
6754 	status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6755 
6756 	if (status == -ENOSPC)
6757 		dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
6758 
6759 	return status;
6760 }
6761 
6762 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
6763 			    const unsigned char *addr)
6764 {
6765 	struct hclge_vport *vport = hclge_get_vport(handle);
6766 
6767 	return hclge_rm_mc_addr_common(vport, addr);
6768 }
6769 
6770 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
6771 			    const unsigned char *addr)
6772 {
6773 	struct hclge_dev *hdev = vport->back;
6774 	struct hclge_mac_vlan_tbl_entry_cmd req;
6775 	enum hclge_cmd_status status;
6776 	struct hclge_desc desc[3];
6777 
6778 	/* mac addr check */
6779 	if (!is_multicast_ether_addr(addr)) {
6780 		dev_dbg(&hdev->pdev->dev,
6781 			"Remove mc mac err! invalid mac:%pM.\n",
6782 			 addr);
6783 		return -EINVAL;
6784 	}
6785 
6786 	memset(&req, 0, sizeof(req));
6787 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6788 	hclge_prepare_mac_addr(&req, addr, true);
6789 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6790 	if (!status) {
6791 		/* This mac addr exist, remove this handle's VFID for it */
6792 		status = hclge_update_desc_vfid(desc, vport->vport_id, true);
6793 		if (status)
6794 			return status;
6795 
6796 		if (hclge_is_all_function_id_zero(desc))
6797 			/* All the vfid is zero, so need to delete this entry */
6798 			status = hclge_remove_mac_vlan_tbl(vport, &req);
6799 		else
6800 			/* Not all the vfid is zero, update the vfid */
6801 			status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6802 
6803 	} else {
6804 		/* Maybe this mac address is in mta table, but it cannot be
6805 		 * deleted here because an entry of mta represents an address
6806 		 * range rather than a specific address. the delete action to
6807 		 * all entries will take effect in update_mta_status called by
6808 		 * hns3_nic_set_rx_mode.
6809 		 */
6810 		status = 0;
6811 	}
6812 
6813 	return status;
6814 }
6815 
6816 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6817 			       enum HCLGE_MAC_ADDR_TYPE mac_type)
6818 {
6819 	struct hclge_vport_mac_addr_cfg *mac_cfg;
6820 	struct list_head *list;
6821 
6822 	if (!vport->vport_id)
6823 		return;
6824 
6825 	mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
6826 	if (!mac_cfg)
6827 		return;
6828 
6829 	mac_cfg->hd_tbl_status = true;
6830 	memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
6831 
6832 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6833 	       &vport->uc_mac_list : &vport->mc_mac_list;
6834 
6835 	list_add_tail(&mac_cfg->node, list);
6836 }
6837 
6838 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6839 			      bool is_write_tbl,
6840 			      enum HCLGE_MAC_ADDR_TYPE mac_type)
6841 {
6842 	struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6843 	struct list_head *list;
6844 	bool uc_flag, mc_flag;
6845 
6846 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6847 	       &vport->uc_mac_list : &vport->mc_mac_list;
6848 
6849 	uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
6850 	mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
6851 
6852 	list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6853 		if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
6854 			if (uc_flag && mac_cfg->hd_tbl_status)
6855 				hclge_rm_uc_addr_common(vport, mac_addr);
6856 
6857 			if (mc_flag && mac_cfg->hd_tbl_status)
6858 				hclge_rm_mc_addr_common(vport, mac_addr);
6859 
6860 			list_del(&mac_cfg->node);
6861 			kfree(mac_cfg);
6862 			break;
6863 		}
6864 	}
6865 }
6866 
6867 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
6868 				  enum HCLGE_MAC_ADDR_TYPE mac_type)
6869 {
6870 	struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6871 	struct list_head *list;
6872 
6873 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6874 	       &vport->uc_mac_list : &vport->mc_mac_list;
6875 
6876 	list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6877 		if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
6878 			hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
6879 
6880 		if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
6881 			hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
6882 
6883 		mac_cfg->hd_tbl_status = false;
6884 		if (is_del_list) {
6885 			list_del(&mac_cfg->node);
6886 			kfree(mac_cfg);
6887 		}
6888 	}
6889 }
6890 
6891 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
6892 {
6893 	struct hclge_vport_mac_addr_cfg *mac, *tmp;
6894 	struct hclge_vport *vport;
6895 	int i;
6896 
6897 	mutex_lock(&hdev->vport_cfg_mutex);
6898 	for (i = 0; i < hdev->num_alloc_vport; i++) {
6899 		vport = &hdev->vport[i];
6900 		list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
6901 			list_del(&mac->node);
6902 			kfree(mac);
6903 		}
6904 
6905 		list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
6906 			list_del(&mac->node);
6907 			kfree(mac);
6908 		}
6909 	}
6910 	mutex_unlock(&hdev->vport_cfg_mutex);
6911 }
6912 
6913 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
6914 					      u16 cmdq_resp, u8 resp_code)
6915 {
6916 #define HCLGE_ETHERTYPE_SUCCESS_ADD		0
6917 #define HCLGE_ETHERTYPE_ALREADY_ADD		1
6918 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW	2
6919 #define HCLGE_ETHERTYPE_KEY_CONFLICT		3
6920 
6921 	int return_status;
6922 
6923 	if (cmdq_resp) {
6924 		dev_err(&hdev->pdev->dev,
6925 			"cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
6926 			cmdq_resp);
6927 		return -EIO;
6928 	}
6929 
6930 	switch (resp_code) {
6931 	case HCLGE_ETHERTYPE_SUCCESS_ADD:
6932 	case HCLGE_ETHERTYPE_ALREADY_ADD:
6933 		return_status = 0;
6934 		break;
6935 	case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
6936 		dev_err(&hdev->pdev->dev,
6937 			"add mac ethertype failed for manager table overflow.\n");
6938 		return_status = -EIO;
6939 		break;
6940 	case HCLGE_ETHERTYPE_KEY_CONFLICT:
6941 		dev_err(&hdev->pdev->dev,
6942 			"add mac ethertype failed for key conflict.\n");
6943 		return_status = -EIO;
6944 		break;
6945 	default:
6946 		dev_err(&hdev->pdev->dev,
6947 			"add mac ethertype failed for undefined, code=%d.\n",
6948 			resp_code);
6949 		return_status = -EIO;
6950 	}
6951 
6952 	return return_status;
6953 }
6954 
6955 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
6956 			     const struct hclge_mac_mgr_tbl_entry_cmd *req)
6957 {
6958 	struct hclge_desc desc;
6959 	u8 resp_code;
6960 	u16 retval;
6961 	int ret;
6962 
6963 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
6964 	memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
6965 
6966 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6967 	if (ret) {
6968 		dev_err(&hdev->pdev->dev,
6969 			"add mac ethertype failed for cmd_send, ret =%d.\n",
6970 			ret);
6971 		return ret;
6972 	}
6973 
6974 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6975 	retval = le16_to_cpu(desc.retval);
6976 
6977 	return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
6978 }
6979 
6980 static int init_mgr_tbl(struct hclge_dev *hdev)
6981 {
6982 	int ret;
6983 	int i;
6984 
6985 	for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
6986 		ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
6987 		if (ret) {
6988 			dev_err(&hdev->pdev->dev,
6989 				"add mac ethertype failed, ret =%d.\n",
6990 				ret);
6991 			return ret;
6992 		}
6993 	}
6994 
6995 	return 0;
6996 }
6997 
6998 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
6999 {
7000 	struct hclge_vport *vport = hclge_get_vport(handle);
7001 	struct hclge_dev *hdev = vport->back;
7002 
7003 	ether_addr_copy(p, hdev->hw.mac.mac_addr);
7004 }
7005 
7006 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
7007 			      bool is_first)
7008 {
7009 	const unsigned char *new_addr = (const unsigned char *)p;
7010 	struct hclge_vport *vport = hclge_get_vport(handle);
7011 	struct hclge_dev *hdev = vport->back;
7012 	int ret;
7013 
7014 	/* mac addr check */
7015 	if (is_zero_ether_addr(new_addr) ||
7016 	    is_broadcast_ether_addr(new_addr) ||
7017 	    is_multicast_ether_addr(new_addr)) {
7018 		dev_err(&hdev->pdev->dev,
7019 			"Change uc mac err! invalid mac:%p.\n",
7020 			 new_addr);
7021 		return -EINVAL;
7022 	}
7023 
7024 	if ((!is_first || is_kdump_kernel()) &&
7025 	    hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
7026 		dev_warn(&hdev->pdev->dev,
7027 			 "remove old uc mac address fail.\n");
7028 
7029 	ret = hclge_add_uc_addr(handle, new_addr);
7030 	if (ret) {
7031 		dev_err(&hdev->pdev->dev,
7032 			"add uc mac address fail, ret =%d.\n",
7033 			ret);
7034 
7035 		if (!is_first &&
7036 		    hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
7037 			dev_err(&hdev->pdev->dev,
7038 				"restore uc mac address fail.\n");
7039 
7040 		return -EIO;
7041 	}
7042 
7043 	ret = hclge_pause_addr_cfg(hdev, new_addr);
7044 	if (ret) {
7045 		dev_err(&hdev->pdev->dev,
7046 			"configure mac pause address fail, ret =%d.\n",
7047 			ret);
7048 		return -EIO;
7049 	}
7050 
7051 	ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
7052 
7053 	return 0;
7054 }
7055 
7056 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
7057 			  int cmd)
7058 {
7059 	struct hclge_vport *vport = hclge_get_vport(handle);
7060 	struct hclge_dev *hdev = vport->back;
7061 
7062 	if (!hdev->hw.mac.phydev)
7063 		return -EOPNOTSUPP;
7064 
7065 	return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
7066 }
7067 
7068 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
7069 				      u8 fe_type, bool filter_en, u8 vf_id)
7070 {
7071 	struct hclge_vlan_filter_ctrl_cmd *req;
7072 	struct hclge_desc desc;
7073 	int ret;
7074 
7075 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
7076 
7077 	req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
7078 	req->vlan_type = vlan_type;
7079 	req->vlan_fe = filter_en ? fe_type : 0;
7080 	req->vf_id = vf_id;
7081 
7082 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7083 	if (ret)
7084 		dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
7085 			ret);
7086 
7087 	return ret;
7088 }
7089 
7090 #define HCLGE_FILTER_TYPE_VF		0
7091 #define HCLGE_FILTER_TYPE_PORT		1
7092 #define HCLGE_FILTER_FE_EGRESS_V1_B	BIT(0)
7093 #define HCLGE_FILTER_FE_NIC_INGRESS_B	BIT(0)
7094 #define HCLGE_FILTER_FE_NIC_EGRESS_B	BIT(1)
7095 #define HCLGE_FILTER_FE_ROCE_INGRESS_B	BIT(2)
7096 #define HCLGE_FILTER_FE_ROCE_EGRESS_B	BIT(3)
7097 #define HCLGE_FILTER_FE_EGRESS		(HCLGE_FILTER_FE_NIC_EGRESS_B \
7098 					| HCLGE_FILTER_FE_ROCE_EGRESS_B)
7099 #define HCLGE_FILTER_FE_INGRESS		(HCLGE_FILTER_FE_NIC_INGRESS_B \
7100 					| HCLGE_FILTER_FE_ROCE_INGRESS_B)
7101 
7102 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
7103 {
7104 	struct hclge_vport *vport = hclge_get_vport(handle);
7105 	struct hclge_dev *hdev = vport->back;
7106 
7107 	if (hdev->pdev->revision >= 0x21) {
7108 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7109 					   HCLGE_FILTER_FE_EGRESS, enable, 0);
7110 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7111 					   HCLGE_FILTER_FE_INGRESS, enable, 0);
7112 	} else {
7113 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7114 					   HCLGE_FILTER_FE_EGRESS_V1_B, enable,
7115 					   0);
7116 	}
7117 	if (enable)
7118 		handle->netdev_flags |= HNAE3_VLAN_FLTR;
7119 	else
7120 		handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7121 }
7122 
7123 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
7124 				    bool is_kill, u16 vlan, u8 qos,
7125 				    __be16 proto)
7126 {
7127 #define HCLGE_MAX_VF_BYTES  16
7128 	struct hclge_vlan_filter_vf_cfg_cmd *req0;
7129 	struct hclge_vlan_filter_vf_cfg_cmd *req1;
7130 	struct hclge_desc desc[2];
7131 	u8 vf_byte_val;
7132 	u8 vf_byte_off;
7133 	int ret;
7134 
7135 	/* if vf vlan table is full, firmware will close vf vlan filter, it
7136 	 * is unable and unnecessary to add new vlan id to vf vlan filter
7137 	 */
7138 	if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill)
7139 		return 0;
7140 
7141 	hclge_cmd_setup_basic_desc(&desc[0],
7142 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7143 	hclge_cmd_setup_basic_desc(&desc[1],
7144 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7145 
7146 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7147 
7148 	vf_byte_off = vfid / 8;
7149 	vf_byte_val = 1 << (vfid % 8);
7150 
7151 	req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7152 	req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7153 
7154 	req0->vlan_id  = cpu_to_le16(vlan);
7155 	req0->vlan_cfg = is_kill;
7156 
7157 	if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7158 		req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7159 	else
7160 		req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7161 
7162 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
7163 	if (ret) {
7164 		dev_err(&hdev->pdev->dev,
7165 			"Send vf vlan command fail, ret =%d.\n",
7166 			ret);
7167 		return ret;
7168 	}
7169 
7170 	if (!is_kill) {
7171 #define HCLGE_VF_VLAN_NO_ENTRY	2
7172 		if (!req0->resp_code || req0->resp_code == 1)
7173 			return 0;
7174 
7175 		if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7176 			set_bit(vfid, hdev->vf_vlan_full);
7177 			dev_warn(&hdev->pdev->dev,
7178 				 "vf vlan table is full, vf vlan filter is disabled\n");
7179 			return 0;
7180 		}
7181 
7182 		dev_err(&hdev->pdev->dev,
7183 			"Add vf vlan filter fail, ret =%d.\n",
7184 			req0->resp_code);
7185 	} else {
7186 #define HCLGE_VF_VLAN_DEL_NO_FOUND	1
7187 		if (!req0->resp_code)
7188 			return 0;
7189 
7190 		/* vf vlan filter is disabled when vf vlan table is full,
7191 		 * then new vlan id will not be added into vf vlan table.
7192 		 * Just return 0 without warning, avoid massive verbose
7193 		 * print logs when unload.
7194 		 */
7195 		if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
7196 			return 0;
7197 
7198 		dev_err(&hdev->pdev->dev,
7199 			"Kill vf vlan filter fail, ret =%d.\n",
7200 			req0->resp_code);
7201 	}
7202 
7203 	return -EIO;
7204 }
7205 
7206 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7207 				      u16 vlan_id, bool is_kill)
7208 {
7209 	struct hclge_vlan_filter_pf_cfg_cmd *req;
7210 	struct hclge_desc desc;
7211 	u8 vlan_offset_byte_val;
7212 	u8 vlan_offset_byte;
7213 	u8 vlan_offset_160;
7214 	int ret;
7215 
7216 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7217 
7218 	vlan_offset_160 = vlan_id / 160;
7219 	vlan_offset_byte = (vlan_id % 160) / 8;
7220 	vlan_offset_byte_val = 1 << (vlan_id % 8);
7221 
7222 	req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7223 	req->vlan_offset = vlan_offset_160;
7224 	req->vlan_cfg = is_kill;
7225 	req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7226 
7227 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7228 	if (ret)
7229 		dev_err(&hdev->pdev->dev,
7230 			"port vlan command, send fail, ret =%d.\n", ret);
7231 	return ret;
7232 }
7233 
7234 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7235 				    u16 vport_id, u16 vlan_id, u8 qos,
7236 				    bool is_kill)
7237 {
7238 	u16 vport_idx, vport_num = 0;
7239 	int ret;
7240 
7241 	if (is_kill && !vlan_id)
7242 		return 0;
7243 
7244 	ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7245 				       0, proto);
7246 	if (ret) {
7247 		dev_err(&hdev->pdev->dev,
7248 			"Set %d vport vlan filter config fail, ret =%d.\n",
7249 			vport_id, ret);
7250 		return ret;
7251 	}
7252 
7253 	/* vlan 0 may be added twice when 8021q module is enabled */
7254 	if (!is_kill && !vlan_id &&
7255 	    test_bit(vport_id, hdev->vlan_table[vlan_id]))
7256 		return 0;
7257 
7258 	if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7259 		dev_err(&hdev->pdev->dev,
7260 			"Add port vlan failed, vport %d is already in vlan %d\n",
7261 			vport_id, vlan_id);
7262 		return -EINVAL;
7263 	}
7264 
7265 	if (is_kill &&
7266 	    !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7267 		dev_err(&hdev->pdev->dev,
7268 			"Delete port vlan failed, vport %d is not in vlan %d\n",
7269 			vport_id, vlan_id);
7270 		return -EINVAL;
7271 	}
7272 
7273 	for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7274 		vport_num++;
7275 
7276 	if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7277 		ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
7278 						 is_kill);
7279 
7280 	return ret;
7281 }
7282 
7283 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
7284 {
7285 	struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
7286 	struct hclge_vport_vtag_tx_cfg_cmd *req;
7287 	struct hclge_dev *hdev = vport->back;
7288 	struct hclge_desc desc;
7289 	int status;
7290 
7291 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
7292 
7293 	req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
7294 	req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
7295 	req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
7296 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
7297 		      vcfg->accept_tag1 ? 1 : 0);
7298 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
7299 		      vcfg->accept_untag1 ? 1 : 0);
7300 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
7301 		      vcfg->accept_tag2 ? 1 : 0);
7302 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
7303 		      vcfg->accept_untag2 ? 1 : 0);
7304 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
7305 		      vcfg->insert_tag1_en ? 1 : 0);
7306 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
7307 		      vcfg->insert_tag2_en ? 1 : 0);
7308 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
7309 
7310 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7311 	req->vf_bitmap[req->vf_offset] =
7312 		1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7313 
7314 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
7315 	if (status)
7316 		dev_err(&hdev->pdev->dev,
7317 			"Send port txvlan cfg command fail, ret =%d\n",
7318 			status);
7319 
7320 	return status;
7321 }
7322 
7323 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
7324 {
7325 	struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
7326 	struct hclge_vport_vtag_rx_cfg_cmd *req;
7327 	struct hclge_dev *hdev = vport->back;
7328 	struct hclge_desc desc;
7329 	int status;
7330 
7331 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
7332 
7333 	req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
7334 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
7335 		      vcfg->strip_tag1_en ? 1 : 0);
7336 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
7337 		      vcfg->strip_tag2_en ? 1 : 0);
7338 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
7339 		      vcfg->vlan1_vlan_prionly ? 1 : 0);
7340 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
7341 		      vcfg->vlan2_vlan_prionly ? 1 : 0);
7342 
7343 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7344 	req->vf_bitmap[req->vf_offset] =
7345 		1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7346 
7347 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
7348 	if (status)
7349 		dev_err(&hdev->pdev->dev,
7350 			"Send port rxvlan cfg command fail, ret =%d\n",
7351 			status);
7352 
7353 	return status;
7354 }
7355 
7356 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
7357 				  u16 port_base_vlan_state,
7358 				  u16 vlan_tag)
7359 {
7360 	int ret;
7361 
7362 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7363 		vport->txvlan_cfg.accept_tag1 = true;
7364 		vport->txvlan_cfg.insert_tag1_en = false;
7365 		vport->txvlan_cfg.default_tag1 = 0;
7366 	} else {
7367 		vport->txvlan_cfg.accept_tag1 = false;
7368 		vport->txvlan_cfg.insert_tag1_en = true;
7369 		vport->txvlan_cfg.default_tag1 = vlan_tag;
7370 	}
7371 
7372 	vport->txvlan_cfg.accept_untag1 = true;
7373 
7374 	/* accept_tag2 and accept_untag2 are not supported on
7375 	 * pdev revision(0x20), new revision support them,
7376 	 * this two fields can not be configured by user.
7377 	 */
7378 	vport->txvlan_cfg.accept_tag2 = true;
7379 	vport->txvlan_cfg.accept_untag2 = true;
7380 	vport->txvlan_cfg.insert_tag2_en = false;
7381 	vport->txvlan_cfg.default_tag2 = 0;
7382 
7383 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7384 		vport->rxvlan_cfg.strip_tag1_en = false;
7385 		vport->rxvlan_cfg.strip_tag2_en =
7386 				vport->rxvlan_cfg.rx_vlan_offload_en;
7387 	} else {
7388 		vport->rxvlan_cfg.strip_tag1_en =
7389 				vport->rxvlan_cfg.rx_vlan_offload_en;
7390 		vport->rxvlan_cfg.strip_tag2_en = true;
7391 	}
7392 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7393 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7394 
7395 	ret = hclge_set_vlan_tx_offload_cfg(vport);
7396 	if (ret)
7397 		return ret;
7398 
7399 	return hclge_set_vlan_rx_offload_cfg(vport);
7400 }
7401 
7402 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
7403 {
7404 	struct hclge_rx_vlan_type_cfg_cmd *rx_req;
7405 	struct hclge_tx_vlan_type_cfg_cmd *tx_req;
7406 	struct hclge_desc desc;
7407 	int status;
7408 
7409 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
7410 	rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
7411 	rx_req->ot_fst_vlan_type =
7412 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
7413 	rx_req->ot_sec_vlan_type =
7414 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
7415 	rx_req->in_fst_vlan_type =
7416 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
7417 	rx_req->in_sec_vlan_type =
7418 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
7419 
7420 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
7421 	if (status) {
7422 		dev_err(&hdev->pdev->dev,
7423 			"Send rxvlan protocol type command fail, ret =%d\n",
7424 			status);
7425 		return status;
7426 	}
7427 
7428 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
7429 
7430 	tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
7431 	tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
7432 	tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
7433 
7434 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
7435 	if (status)
7436 		dev_err(&hdev->pdev->dev,
7437 			"Send txvlan protocol type command fail, ret =%d\n",
7438 			status);
7439 
7440 	return status;
7441 }
7442 
7443 static int hclge_init_vlan_config(struct hclge_dev *hdev)
7444 {
7445 #define HCLGE_DEF_VLAN_TYPE		0x8100
7446 
7447 	struct hnae3_handle *handle = &hdev->vport[0].nic;
7448 	struct hclge_vport *vport;
7449 	int ret;
7450 	int i;
7451 
7452 	if (hdev->pdev->revision >= 0x21) {
7453 		/* for revision 0x21, vf vlan filter is per function */
7454 		for (i = 0; i < hdev->num_alloc_vport; i++) {
7455 			vport = &hdev->vport[i];
7456 			ret = hclge_set_vlan_filter_ctrl(hdev,
7457 							 HCLGE_FILTER_TYPE_VF,
7458 							 HCLGE_FILTER_FE_EGRESS,
7459 							 true,
7460 							 vport->vport_id);
7461 			if (ret)
7462 				return ret;
7463 		}
7464 
7465 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7466 						 HCLGE_FILTER_FE_INGRESS, true,
7467 						 0);
7468 		if (ret)
7469 			return ret;
7470 	} else {
7471 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7472 						 HCLGE_FILTER_FE_EGRESS_V1_B,
7473 						 true, 0);
7474 		if (ret)
7475 			return ret;
7476 	}
7477 
7478 	handle->netdev_flags |= HNAE3_VLAN_FLTR;
7479 
7480 	hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7481 	hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7482 	hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7483 	hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7484 	hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
7485 	hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
7486 
7487 	ret = hclge_set_vlan_protocol_type(hdev);
7488 	if (ret)
7489 		return ret;
7490 
7491 	for (i = 0; i < hdev->num_alloc_vport; i++) {
7492 		u16 vlan_tag;
7493 
7494 		vport = &hdev->vport[i];
7495 		vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7496 
7497 		ret = hclge_vlan_offload_cfg(vport,
7498 					     vport->port_base_vlan_cfg.state,
7499 					     vlan_tag);
7500 		if (ret)
7501 			return ret;
7502 	}
7503 
7504 	return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
7505 }
7506 
7507 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7508 				       bool writen_to_tbl)
7509 {
7510 	struct hclge_vport_vlan_cfg *vlan;
7511 
7512 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
7513 	if (!vlan)
7514 		return;
7515 
7516 	vlan->hd_tbl_status = writen_to_tbl;
7517 	vlan->vlan_id = vlan_id;
7518 
7519 	list_add_tail(&vlan->node, &vport->vlan_list);
7520 }
7521 
7522 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
7523 {
7524 	struct hclge_vport_vlan_cfg *vlan, *tmp;
7525 	struct hclge_dev *hdev = vport->back;
7526 	int ret;
7527 
7528 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7529 		if (!vlan->hd_tbl_status) {
7530 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
7531 						       vport->vport_id,
7532 						       vlan->vlan_id, 0, false);
7533 			if (ret) {
7534 				dev_err(&hdev->pdev->dev,
7535 					"restore vport vlan list failed, ret=%d\n",
7536 					ret);
7537 				return ret;
7538 			}
7539 		}
7540 		vlan->hd_tbl_status = true;
7541 	}
7542 
7543 	return 0;
7544 }
7545 
7546 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7547 				      bool is_write_tbl)
7548 {
7549 	struct hclge_vport_vlan_cfg *vlan, *tmp;
7550 	struct hclge_dev *hdev = vport->back;
7551 
7552 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7553 		if (vlan->vlan_id == vlan_id) {
7554 			if (is_write_tbl && vlan->hd_tbl_status)
7555 				hclge_set_vlan_filter_hw(hdev,
7556 							 htons(ETH_P_8021Q),
7557 							 vport->vport_id,
7558 							 vlan_id, 0,
7559 							 true);
7560 
7561 			list_del(&vlan->node);
7562 			kfree(vlan);
7563 			break;
7564 		}
7565 	}
7566 }
7567 
7568 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
7569 {
7570 	struct hclge_vport_vlan_cfg *vlan, *tmp;
7571 	struct hclge_dev *hdev = vport->back;
7572 
7573 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7574 		if (vlan->hd_tbl_status)
7575 			hclge_set_vlan_filter_hw(hdev,
7576 						 htons(ETH_P_8021Q),
7577 						 vport->vport_id,
7578 						 vlan->vlan_id, 0,
7579 						 true);
7580 
7581 		vlan->hd_tbl_status = false;
7582 		if (is_del_list) {
7583 			list_del(&vlan->node);
7584 			kfree(vlan);
7585 		}
7586 	}
7587 }
7588 
7589 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
7590 {
7591 	struct hclge_vport_vlan_cfg *vlan, *tmp;
7592 	struct hclge_vport *vport;
7593 	int i;
7594 
7595 	mutex_lock(&hdev->vport_cfg_mutex);
7596 	for (i = 0; i < hdev->num_alloc_vport; i++) {
7597 		vport = &hdev->vport[i];
7598 		list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7599 			list_del(&vlan->node);
7600 			kfree(vlan);
7601 		}
7602 	}
7603 	mutex_unlock(&hdev->vport_cfg_mutex);
7604 }
7605 
7606 static void hclge_restore_vlan_table(struct hnae3_handle *handle)
7607 {
7608 	struct hclge_vport *vport = hclge_get_vport(handle);
7609 	struct hclge_vport_vlan_cfg *vlan, *tmp;
7610 	struct hclge_dev *hdev = vport->back;
7611 	u16 vlan_proto, qos;
7612 	u16 state, vlan_id;
7613 	int i;
7614 
7615 	mutex_lock(&hdev->vport_cfg_mutex);
7616 	for (i = 0; i < hdev->num_alloc_vport; i++) {
7617 		vport = &hdev->vport[i];
7618 		vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
7619 		vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7620 		qos = vport->port_base_vlan_cfg.vlan_info.qos;
7621 		state = vport->port_base_vlan_cfg.state;
7622 
7623 		if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
7624 			hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
7625 						 vport->vport_id, vlan_id, qos,
7626 						 false);
7627 			continue;
7628 		}
7629 
7630 		list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7631 			if (vlan->hd_tbl_status)
7632 				hclge_set_vlan_filter_hw(hdev,
7633 							 htons(ETH_P_8021Q),
7634 							 vport->vport_id,
7635 							 vlan->vlan_id, 0,
7636 							 false);
7637 		}
7638 	}
7639 
7640 	mutex_unlock(&hdev->vport_cfg_mutex);
7641 }
7642 
7643 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
7644 {
7645 	struct hclge_vport *vport = hclge_get_vport(handle);
7646 
7647 	if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7648 		vport->rxvlan_cfg.strip_tag1_en = false;
7649 		vport->rxvlan_cfg.strip_tag2_en = enable;
7650 	} else {
7651 		vport->rxvlan_cfg.strip_tag1_en = enable;
7652 		vport->rxvlan_cfg.strip_tag2_en = true;
7653 	}
7654 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7655 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7656 	vport->rxvlan_cfg.rx_vlan_offload_en = enable;
7657 
7658 	return hclge_set_vlan_rx_offload_cfg(vport);
7659 }
7660 
7661 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
7662 					    u16 port_base_vlan_state,
7663 					    struct hclge_vlan_info *new_info,
7664 					    struct hclge_vlan_info *old_info)
7665 {
7666 	struct hclge_dev *hdev = vport->back;
7667 	int ret;
7668 
7669 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
7670 		hclge_rm_vport_all_vlan_table(vport, false);
7671 		return hclge_set_vlan_filter_hw(hdev,
7672 						 htons(new_info->vlan_proto),
7673 						 vport->vport_id,
7674 						 new_info->vlan_tag,
7675 						 new_info->qos, false);
7676 	}
7677 
7678 	ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
7679 				       vport->vport_id, old_info->vlan_tag,
7680 				       old_info->qos, true);
7681 	if (ret)
7682 		return ret;
7683 
7684 	return hclge_add_vport_all_vlan_table(vport);
7685 }
7686 
7687 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
7688 				    struct hclge_vlan_info *vlan_info)
7689 {
7690 	struct hnae3_handle *nic = &vport->nic;
7691 	struct hclge_vlan_info *old_vlan_info;
7692 	struct hclge_dev *hdev = vport->back;
7693 	int ret;
7694 
7695 	old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
7696 
7697 	ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
7698 	if (ret)
7699 		return ret;
7700 
7701 	if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
7702 		/* add new VLAN tag */
7703 		ret = hclge_set_vlan_filter_hw(hdev,
7704 					       htons(vlan_info->vlan_proto),
7705 					       vport->vport_id,
7706 					       vlan_info->vlan_tag,
7707 					       vlan_info->qos, false);
7708 		if (ret)
7709 			return ret;
7710 
7711 		/* remove old VLAN tag */
7712 		ret = hclge_set_vlan_filter_hw(hdev,
7713 					       htons(old_vlan_info->vlan_proto),
7714 					       vport->vport_id,
7715 					       old_vlan_info->vlan_tag,
7716 					       old_vlan_info->qos, true);
7717 		if (ret)
7718 			return ret;
7719 
7720 		goto update;
7721 	}
7722 
7723 	ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
7724 					       old_vlan_info);
7725 	if (ret)
7726 		return ret;
7727 
7728 	/* update state only when disable/enable port based VLAN */
7729 	vport->port_base_vlan_cfg.state = state;
7730 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
7731 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
7732 	else
7733 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
7734 
7735 update:
7736 	vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
7737 	vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
7738 	vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
7739 
7740 	return 0;
7741 }
7742 
7743 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
7744 					  enum hnae3_port_base_vlan_state state,
7745 					  u16 vlan)
7746 {
7747 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7748 		if (!vlan)
7749 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7750 		else
7751 			return HNAE3_PORT_BASE_VLAN_ENABLE;
7752 	} else {
7753 		if (!vlan)
7754 			return HNAE3_PORT_BASE_VLAN_DISABLE;
7755 		else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
7756 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7757 		else
7758 			return HNAE3_PORT_BASE_VLAN_MODIFY;
7759 	}
7760 }
7761 
7762 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
7763 				    u16 vlan, u8 qos, __be16 proto)
7764 {
7765 	struct hclge_vport *vport = hclge_get_vport(handle);
7766 	struct hclge_dev *hdev = vport->back;
7767 	struct hclge_vlan_info vlan_info;
7768 	u16 state;
7769 	int ret;
7770 
7771 	if (hdev->pdev->revision == 0x20)
7772 		return -EOPNOTSUPP;
7773 
7774 	/* qos is a 3 bits value, so can not be bigger than 7 */
7775 	if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
7776 		return -EINVAL;
7777 	if (proto != htons(ETH_P_8021Q))
7778 		return -EPROTONOSUPPORT;
7779 
7780 	vport = &hdev->vport[vfid];
7781 	state = hclge_get_port_base_vlan_state(vport,
7782 					       vport->port_base_vlan_cfg.state,
7783 					       vlan);
7784 	if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
7785 		return 0;
7786 
7787 	vlan_info.vlan_tag = vlan;
7788 	vlan_info.qos = qos;
7789 	vlan_info.vlan_proto = ntohs(proto);
7790 
7791 	/* update port based VLAN for PF */
7792 	if (!vfid) {
7793 		hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7794 		ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
7795 		hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7796 
7797 		return ret;
7798 	}
7799 
7800 	if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
7801 		return hclge_update_port_base_vlan_cfg(vport, state,
7802 						       &vlan_info);
7803 	} else {
7804 		ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
7805 							(u8)vfid, state,
7806 							vlan, qos,
7807 							ntohs(proto));
7808 		return ret;
7809 	}
7810 }
7811 
7812 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
7813 			  u16 vlan_id, bool is_kill)
7814 {
7815 	struct hclge_vport *vport = hclge_get_vport(handle);
7816 	struct hclge_dev *hdev = vport->back;
7817 	bool writen_to_tbl = false;
7818 	int ret = 0;
7819 
7820 	/* When device is resetting, firmware is unable to handle
7821 	 * mailbox. Just record the vlan id, and remove it after
7822 	 * reset finished.
7823 	 */
7824 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
7825 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
7826 		return -EBUSY;
7827 	}
7828 
7829 	/* When port base vlan enabled, we use port base vlan as the vlan
7830 	 * filter entry. In this case, we don't update vlan filter table
7831 	 * when user add new vlan or remove exist vlan, just update the vport
7832 	 * vlan list. The vlan id in vlan list will be writen in vlan filter
7833 	 * table until port base vlan disabled
7834 	 */
7835 	if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7836 		ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
7837 					       vlan_id, 0, is_kill);
7838 		writen_to_tbl = true;
7839 	}
7840 
7841 	if (!ret) {
7842 		if (is_kill)
7843 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
7844 		else
7845 			hclge_add_vport_vlan_table(vport, vlan_id,
7846 						   writen_to_tbl);
7847 	} else if (is_kill) {
7848 		/* When remove hw vlan filter failed, record the vlan id,
7849 		 * and try to remove it from hw later, to be consistence
7850 		 * with stack
7851 		 */
7852 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
7853 	}
7854 	return ret;
7855 }
7856 
7857 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
7858 {
7859 #define HCLGE_MAX_SYNC_COUNT	60
7860 
7861 	int i, ret, sync_cnt = 0;
7862 	u16 vlan_id;
7863 
7864 	/* start from vport 1 for PF is always alive */
7865 	for (i = 0; i < hdev->num_alloc_vport; i++) {
7866 		struct hclge_vport *vport = &hdev->vport[i];
7867 
7868 		vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
7869 					 VLAN_N_VID);
7870 		while (vlan_id != VLAN_N_VID) {
7871 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
7872 						       vport->vport_id, vlan_id,
7873 						       0, true);
7874 			if (ret && ret != -EINVAL)
7875 				return;
7876 
7877 			clear_bit(vlan_id, vport->vlan_del_fail_bmap);
7878 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
7879 
7880 			sync_cnt++;
7881 			if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
7882 				return;
7883 
7884 			vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
7885 						 VLAN_N_VID);
7886 		}
7887 	}
7888 }
7889 
7890 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
7891 {
7892 	struct hclge_config_max_frm_size_cmd *req;
7893 	struct hclge_desc desc;
7894 
7895 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
7896 
7897 	req = (struct hclge_config_max_frm_size_cmd *)desc.data;
7898 	req->max_frm_size = cpu_to_le16(new_mps);
7899 	req->min_frm_size = HCLGE_MAC_MIN_FRAME;
7900 
7901 	return hclge_cmd_send(&hdev->hw, &desc, 1);
7902 }
7903 
7904 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
7905 {
7906 	struct hclge_vport *vport = hclge_get_vport(handle);
7907 
7908 	return hclge_set_vport_mtu(vport, new_mtu);
7909 }
7910 
7911 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
7912 {
7913 	struct hclge_dev *hdev = vport->back;
7914 	int i, max_frm_size, ret;
7915 
7916 	max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7917 	if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
7918 	    max_frm_size > HCLGE_MAC_MAX_FRAME)
7919 		return -EINVAL;
7920 
7921 	max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
7922 	mutex_lock(&hdev->vport_lock);
7923 	/* VF's mps must fit within hdev->mps */
7924 	if (vport->vport_id && max_frm_size > hdev->mps) {
7925 		mutex_unlock(&hdev->vport_lock);
7926 		return -EINVAL;
7927 	} else if (vport->vport_id) {
7928 		vport->mps = max_frm_size;
7929 		mutex_unlock(&hdev->vport_lock);
7930 		return 0;
7931 	}
7932 
7933 	/* PF's mps must be greater then VF's mps */
7934 	for (i = 1; i < hdev->num_alloc_vport; i++)
7935 		if (max_frm_size < hdev->vport[i].mps) {
7936 			mutex_unlock(&hdev->vport_lock);
7937 			return -EINVAL;
7938 		}
7939 
7940 	hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7941 
7942 	ret = hclge_set_mac_mtu(hdev, max_frm_size);
7943 	if (ret) {
7944 		dev_err(&hdev->pdev->dev,
7945 			"Change mtu fail, ret =%d\n", ret);
7946 		goto out;
7947 	}
7948 
7949 	hdev->mps = max_frm_size;
7950 	vport->mps = max_frm_size;
7951 
7952 	ret = hclge_buffer_alloc(hdev);
7953 	if (ret)
7954 		dev_err(&hdev->pdev->dev,
7955 			"Allocate buffer fail, ret =%d\n", ret);
7956 
7957 out:
7958 	hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7959 	mutex_unlock(&hdev->vport_lock);
7960 	return ret;
7961 }
7962 
7963 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
7964 				    bool enable)
7965 {
7966 	struct hclge_reset_tqp_queue_cmd *req;
7967 	struct hclge_desc desc;
7968 	int ret;
7969 
7970 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
7971 
7972 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7973 	req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7974 	hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
7975 
7976 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7977 	if (ret) {
7978 		dev_err(&hdev->pdev->dev,
7979 			"Send tqp reset cmd error, status =%d\n", ret);
7980 		return ret;
7981 	}
7982 
7983 	return 0;
7984 }
7985 
7986 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
7987 {
7988 	struct hclge_reset_tqp_queue_cmd *req;
7989 	struct hclge_desc desc;
7990 	int ret;
7991 
7992 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
7993 
7994 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7995 	req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7996 
7997 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7998 	if (ret) {
7999 		dev_err(&hdev->pdev->dev,
8000 			"Get reset status error, status =%d\n", ret);
8001 		return ret;
8002 	}
8003 
8004 	return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
8005 }
8006 
8007 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
8008 {
8009 	struct hnae3_queue *queue;
8010 	struct hclge_tqp *tqp;
8011 
8012 	queue = handle->kinfo.tqp[queue_id];
8013 	tqp = container_of(queue, struct hclge_tqp, q);
8014 
8015 	return tqp->index;
8016 }
8017 
8018 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
8019 {
8020 	struct hclge_vport *vport = hclge_get_vport(handle);
8021 	struct hclge_dev *hdev = vport->back;
8022 	int reset_try_times = 0;
8023 	int reset_status;
8024 	u16 queue_gid;
8025 	int ret;
8026 
8027 	queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
8028 
8029 	ret = hclge_tqp_enable(hdev, queue_id, 0, false);
8030 	if (ret) {
8031 		dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
8032 		return ret;
8033 	}
8034 
8035 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8036 	if (ret) {
8037 		dev_err(&hdev->pdev->dev,
8038 			"Send reset tqp cmd fail, ret = %d\n", ret);
8039 		return ret;
8040 	}
8041 
8042 	while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8043 		/* Wait for tqp hw reset */
8044 		msleep(20);
8045 		reset_status = hclge_get_reset_status(hdev, queue_gid);
8046 		if (reset_status)
8047 			break;
8048 	}
8049 
8050 	if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8051 		dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
8052 		return ret;
8053 	}
8054 
8055 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8056 	if (ret)
8057 		dev_err(&hdev->pdev->dev,
8058 			"Deassert the soft reset fail, ret = %d\n", ret);
8059 
8060 	return ret;
8061 }
8062 
8063 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
8064 {
8065 	struct hclge_dev *hdev = vport->back;
8066 	int reset_try_times = 0;
8067 	int reset_status;
8068 	u16 queue_gid;
8069 	int ret;
8070 
8071 	queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
8072 
8073 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8074 	if (ret) {
8075 		dev_warn(&hdev->pdev->dev,
8076 			 "Send reset tqp cmd fail, ret = %d\n", ret);
8077 		return;
8078 	}
8079 
8080 	while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8081 		/* Wait for tqp hw reset */
8082 		msleep(20);
8083 		reset_status = hclge_get_reset_status(hdev, queue_gid);
8084 		if (reset_status)
8085 			break;
8086 	}
8087 
8088 	if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8089 		dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
8090 		return;
8091 	}
8092 
8093 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8094 	if (ret)
8095 		dev_warn(&hdev->pdev->dev,
8096 			 "Deassert the soft reset fail, ret = %d\n", ret);
8097 }
8098 
8099 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
8100 {
8101 	struct hclge_vport *vport = hclge_get_vport(handle);
8102 	struct hclge_dev *hdev = vport->back;
8103 
8104 	return hdev->fw_version;
8105 }
8106 
8107 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8108 {
8109 	struct phy_device *phydev = hdev->hw.mac.phydev;
8110 
8111 	if (!phydev)
8112 		return;
8113 
8114 	phy_set_asym_pause(phydev, rx_en, tx_en);
8115 }
8116 
8117 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8118 {
8119 	int ret;
8120 
8121 	if (rx_en && tx_en)
8122 		hdev->fc_mode_last_time = HCLGE_FC_FULL;
8123 	else if (rx_en && !tx_en)
8124 		hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
8125 	else if (!rx_en && tx_en)
8126 		hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
8127 	else
8128 		hdev->fc_mode_last_time = HCLGE_FC_NONE;
8129 
8130 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
8131 		return 0;
8132 
8133 	ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
8134 	if (ret) {
8135 		dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
8136 			ret);
8137 		return ret;
8138 	}
8139 
8140 	hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
8141 
8142 	return 0;
8143 }
8144 
8145 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
8146 {
8147 	struct phy_device *phydev = hdev->hw.mac.phydev;
8148 	u16 remote_advertising = 0;
8149 	u16 local_advertising;
8150 	u32 rx_pause, tx_pause;
8151 	u8 flowctl;
8152 
8153 	if (!phydev->link || !phydev->autoneg)
8154 		return 0;
8155 
8156 	local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
8157 
8158 	if (phydev->pause)
8159 		remote_advertising = LPA_PAUSE_CAP;
8160 
8161 	if (phydev->asym_pause)
8162 		remote_advertising |= LPA_PAUSE_ASYM;
8163 
8164 	flowctl = mii_resolve_flowctrl_fdx(local_advertising,
8165 					   remote_advertising);
8166 	tx_pause = flowctl & FLOW_CTRL_TX;
8167 	rx_pause = flowctl & FLOW_CTRL_RX;
8168 
8169 	if (phydev->duplex == HCLGE_MAC_HALF) {
8170 		tx_pause = 0;
8171 		rx_pause = 0;
8172 	}
8173 
8174 	return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8175 }
8176 
8177 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8178 				 u32 *rx_en, u32 *tx_en)
8179 {
8180 	struct hclge_vport *vport = hclge_get_vport(handle);
8181 	struct hclge_dev *hdev = vport->back;
8182 
8183 	*auto_neg = hclge_get_autoneg(handle);
8184 
8185 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8186 		*rx_en = 0;
8187 		*tx_en = 0;
8188 		return;
8189 	}
8190 
8191 	if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8192 		*rx_en = 1;
8193 		*tx_en = 0;
8194 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8195 		*tx_en = 1;
8196 		*rx_en = 0;
8197 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8198 		*rx_en = 1;
8199 		*tx_en = 1;
8200 	} else {
8201 		*rx_en = 0;
8202 		*tx_en = 0;
8203 	}
8204 }
8205 
8206 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8207 				u32 rx_en, u32 tx_en)
8208 {
8209 	struct hclge_vport *vport = hclge_get_vport(handle);
8210 	struct hclge_dev *hdev = vport->back;
8211 	struct phy_device *phydev = hdev->hw.mac.phydev;
8212 	u32 fc_autoneg;
8213 
8214 	fc_autoneg = hclge_get_autoneg(handle);
8215 	if (auto_neg != fc_autoneg) {
8216 		dev_info(&hdev->pdev->dev,
8217 			 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8218 		return -EOPNOTSUPP;
8219 	}
8220 
8221 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8222 		dev_info(&hdev->pdev->dev,
8223 			 "Priority flow control enabled. Cannot set link flow control.\n");
8224 		return -EOPNOTSUPP;
8225 	}
8226 
8227 	hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8228 
8229 	if (!fc_autoneg)
8230 		return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8231 
8232 	if (phydev)
8233 		return phy_start_aneg(phydev);
8234 
8235 	if (hdev->pdev->revision == 0x20)
8236 		return -EOPNOTSUPP;
8237 
8238 	return hclge_restart_autoneg(handle);
8239 }
8240 
8241 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8242 					  u8 *auto_neg, u32 *speed, u8 *duplex)
8243 {
8244 	struct hclge_vport *vport = hclge_get_vport(handle);
8245 	struct hclge_dev *hdev = vport->back;
8246 
8247 	if (speed)
8248 		*speed = hdev->hw.mac.speed;
8249 	if (duplex)
8250 		*duplex = hdev->hw.mac.duplex;
8251 	if (auto_neg)
8252 		*auto_neg = hdev->hw.mac.autoneg;
8253 }
8254 
8255 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8256 				 u8 *module_type)
8257 {
8258 	struct hclge_vport *vport = hclge_get_vport(handle);
8259 	struct hclge_dev *hdev = vport->back;
8260 
8261 	if (media_type)
8262 		*media_type = hdev->hw.mac.media_type;
8263 
8264 	if (module_type)
8265 		*module_type = hdev->hw.mac.module_type;
8266 }
8267 
8268 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8269 				u8 *tp_mdix_ctrl, u8 *tp_mdix)
8270 {
8271 	struct hclge_vport *vport = hclge_get_vport(handle);
8272 	struct hclge_dev *hdev = vport->back;
8273 	struct phy_device *phydev = hdev->hw.mac.phydev;
8274 	int mdix_ctrl, mdix, is_resolved;
8275 	unsigned int retval;
8276 
8277 	if (!phydev) {
8278 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8279 		*tp_mdix = ETH_TP_MDI_INVALID;
8280 		return;
8281 	}
8282 
8283 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
8284 
8285 	retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
8286 	mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
8287 				    HCLGE_PHY_MDIX_CTRL_S);
8288 
8289 	retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
8290 	mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
8291 	is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
8292 
8293 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
8294 
8295 	switch (mdix_ctrl) {
8296 	case 0x0:
8297 		*tp_mdix_ctrl = ETH_TP_MDI;
8298 		break;
8299 	case 0x1:
8300 		*tp_mdix_ctrl = ETH_TP_MDI_X;
8301 		break;
8302 	case 0x3:
8303 		*tp_mdix_ctrl = ETH_TP_MDI_AUTO;
8304 		break;
8305 	default:
8306 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8307 		break;
8308 	}
8309 
8310 	if (!is_resolved)
8311 		*tp_mdix = ETH_TP_MDI_INVALID;
8312 	else if (mdix)
8313 		*tp_mdix = ETH_TP_MDI_X;
8314 	else
8315 		*tp_mdix = ETH_TP_MDI;
8316 }
8317 
8318 static void hclge_info_show(struct hclge_dev *hdev)
8319 {
8320 	struct device *dev = &hdev->pdev->dev;
8321 
8322 	dev_info(dev, "PF info begin:\n");
8323 
8324 	dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
8325 	dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
8326 	dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
8327 	dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
8328 	dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport);
8329 	dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs);
8330 	dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
8331 	dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size);
8332 	dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size);
8333 	dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size);
8334 	dev_info(dev, "This is %s PF\n",
8335 		 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
8336 	dev_info(dev, "DCB %s\n",
8337 		 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
8338 	dev_info(dev, "MQPRIO %s\n",
8339 		 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
8340 
8341 	dev_info(dev, "PF info end.\n");
8342 }
8343 
8344 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
8345 					  struct hclge_vport *vport)
8346 {
8347 	struct hnae3_client *client = vport->nic.client;
8348 	struct hclge_dev *hdev = ae_dev->priv;
8349 	int rst_cnt;
8350 	int ret;
8351 
8352 	rst_cnt = hdev->rst_stats.reset_cnt;
8353 	ret = client->ops->init_instance(&vport->nic);
8354 	if (ret)
8355 		return ret;
8356 
8357 	set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8358 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
8359 	    rst_cnt != hdev->rst_stats.reset_cnt) {
8360 		ret = -EBUSY;
8361 		goto init_nic_err;
8362 	}
8363 
8364 	/* Enable nic hw error interrupts */
8365 	ret = hclge_config_nic_hw_error(hdev, true);
8366 	if (ret) {
8367 		dev_err(&ae_dev->pdev->dev,
8368 			"fail(%d) to enable hw error interrupts\n", ret);
8369 		goto init_nic_err;
8370 	}
8371 
8372 	hnae3_set_client_init_flag(client, ae_dev, 1);
8373 
8374 	if (netif_msg_drv(&hdev->vport->nic))
8375 		hclge_info_show(hdev);
8376 
8377 	return ret;
8378 
8379 init_nic_err:
8380 	clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8381 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8382 		msleep(HCLGE_WAIT_RESET_DONE);
8383 
8384 	client->ops->uninit_instance(&vport->nic, 0);
8385 
8386 	return ret;
8387 }
8388 
8389 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
8390 					   struct hclge_vport *vport)
8391 {
8392 	struct hnae3_client *client = vport->roce.client;
8393 	struct hclge_dev *hdev = ae_dev->priv;
8394 	int rst_cnt;
8395 	int ret;
8396 
8397 	if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
8398 	    !hdev->nic_client)
8399 		return 0;
8400 
8401 	client = hdev->roce_client;
8402 	ret = hclge_init_roce_base_info(vport);
8403 	if (ret)
8404 		return ret;
8405 
8406 	rst_cnt = hdev->rst_stats.reset_cnt;
8407 	ret = client->ops->init_instance(&vport->roce);
8408 	if (ret)
8409 		return ret;
8410 
8411 	set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8412 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
8413 	    rst_cnt != hdev->rst_stats.reset_cnt) {
8414 		ret = -EBUSY;
8415 		goto init_roce_err;
8416 	}
8417 
8418 	/* Enable roce ras interrupts */
8419 	ret = hclge_config_rocee_ras_interrupt(hdev, true);
8420 	if (ret) {
8421 		dev_err(&ae_dev->pdev->dev,
8422 			"fail(%d) to enable roce ras interrupts\n", ret);
8423 		goto init_roce_err;
8424 	}
8425 
8426 	hnae3_set_client_init_flag(client, ae_dev, 1);
8427 
8428 	return 0;
8429 
8430 init_roce_err:
8431 	clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8432 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8433 		msleep(HCLGE_WAIT_RESET_DONE);
8434 
8435 	hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
8436 
8437 	return ret;
8438 }
8439 
8440 static int hclge_init_client_instance(struct hnae3_client *client,
8441 				      struct hnae3_ae_dev *ae_dev)
8442 {
8443 	struct hclge_dev *hdev = ae_dev->priv;
8444 	struct hclge_vport *vport;
8445 	int i, ret;
8446 
8447 	for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
8448 		vport = &hdev->vport[i];
8449 
8450 		switch (client->type) {
8451 		case HNAE3_CLIENT_KNIC:
8452 
8453 			hdev->nic_client = client;
8454 			vport->nic.client = client;
8455 			ret = hclge_init_nic_client_instance(ae_dev, vport);
8456 			if (ret)
8457 				goto clear_nic;
8458 
8459 			ret = hclge_init_roce_client_instance(ae_dev, vport);
8460 			if (ret)
8461 				goto clear_roce;
8462 
8463 			break;
8464 		case HNAE3_CLIENT_ROCE:
8465 			if (hnae3_dev_roce_supported(hdev)) {
8466 				hdev->roce_client = client;
8467 				vport->roce.client = client;
8468 			}
8469 
8470 			ret = hclge_init_roce_client_instance(ae_dev, vport);
8471 			if (ret)
8472 				goto clear_roce;
8473 
8474 			break;
8475 		default:
8476 			return -EINVAL;
8477 		}
8478 	}
8479 
8480 	return ret;
8481 
8482 clear_nic:
8483 	hdev->nic_client = NULL;
8484 	vport->nic.client = NULL;
8485 	return ret;
8486 clear_roce:
8487 	hdev->roce_client = NULL;
8488 	vport->roce.client = NULL;
8489 	return ret;
8490 }
8491 
8492 static void hclge_uninit_client_instance(struct hnae3_client *client,
8493 					 struct hnae3_ae_dev *ae_dev)
8494 {
8495 	struct hclge_dev *hdev = ae_dev->priv;
8496 	struct hclge_vport *vport;
8497 	int i;
8498 
8499 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
8500 		vport = &hdev->vport[i];
8501 		if (hdev->roce_client) {
8502 			clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8503 			while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8504 				msleep(HCLGE_WAIT_RESET_DONE);
8505 
8506 			hdev->roce_client->ops->uninit_instance(&vport->roce,
8507 								0);
8508 			hdev->roce_client = NULL;
8509 			vport->roce.client = NULL;
8510 		}
8511 		if (client->type == HNAE3_CLIENT_ROCE)
8512 			return;
8513 		if (hdev->nic_client && client->ops->uninit_instance) {
8514 			clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8515 			while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8516 				msleep(HCLGE_WAIT_RESET_DONE);
8517 
8518 			client->ops->uninit_instance(&vport->nic, 0);
8519 			hdev->nic_client = NULL;
8520 			vport->nic.client = NULL;
8521 		}
8522 	}
8523 }
8524 
8525 static int hclge_pci_init(struct hclge_dev *hdev)
8526 {
8527 	struct pci_dev *pdev = hdev->pdev;
8528 	struct hclge_hw *hw;
8529 	int ret;
8530 
8531 	ret = pci_enable_device(pdev);
8532 	if (ret) {
8533 		dev_err(&pdev->dev, "failed to enable PCI device\n");
8534 		return ret;
8535 	}
8536 
8537 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
8538 	if (ret) {
8539 		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
8540 		if (ret) {
8541 			dev_err(&pdev->dev,
8542 				"can't set consistent PCI DMA");
8543 			goto err_disable_device;
8544 		}
8545 		dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
8546 	}
8547 
8548 	ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
8549 	if (ret) {
8550 		dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
8551 		goto err_disable_device;
8552 	}
8553 
8554 	pci_set_master(pdev);
8555 	hw = &hdev->hw;
8556 	hw->io_base = pcim_iomap(pdev, 2, 0);
8557 	if (!hw->io_base) {
8558 		dev_err(&pdev->dev, "Can't map configuration register space\n");
8559 		ret = -ENOMEM;
8560 		goto err_clr_master;
8561 	}
8562 
8563 	hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
8564 
8565 	return 0;
8566 err_clr_master:
8567 	pci_clear_master(pdev);
8568 	pci_release_regions(pdev);
8569 err_disable_device:
8570 	pci_disable_device(pdev);
8571 
8572 	return ret;
8573 }
8574 
8575 static void hclge_pci_uninit(struct hclge_dev *hdev)
8576 {
8577 	struct pci_dev *pdev = hdev->pdev;
8578 
8579 	pcim_iounmap(pdev, hdev->hw.io_base);
8580 	pci_free_irq_vectors(pdev);
8581 	pci_clear_master(pdev);
8582 	pci_release_mem_regions(pdev);
8583 	pci_disable_device(pdev);
8584 }
8585 
8586 static void hclge_state_init(struct hclge_dev *hdev)
8587 {
8588 	set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
8589 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
8590 	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
8591 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
8592 	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
8593 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
8594 }
8595 
8596 static void hclge_state_uninit(struct hclge_dev *hdev)
8597 {
8598 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
8599 	set_bit(HCLGE_STATE_REMOVING, &hdev->state);
8600 
8601 	if (hdev->service_timer.function)
8602 		del_timer_sync(&hdev->service_timer);
8603 	if (hdev->reset_timer.function)
8604 		del_timer_sync(&hdev->reset_timer);
8605 	if (hdev->service_task.func)
8606 		cancel_work_sync(&hdev->service_task);
8607 	if (hdev->rst_service_task.func)
8608 		cancel_work_sync(&hdev->rst_service_task);
8609 	if (hdev->mbx_service_task.func)
8610 		cancel_work_sync(&hdev->mbx_service_task);
8611 }
8612 
8613 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
8614 {
8615 #define HCLGE_FLR_WAIT_MS	100
8616 #define HCLGE_FLR_WAIT_CNT	50
8617 	struct hclge_dev *hdev = ae_dev->priv;
8618 	int cnt = 0;
8619 
8620 	clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
8621 	clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8622 	set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
8623 	hclge_reset_event(hdev->pdev, NULL);
8624 
8625 	while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
8626 	       cnt++ < HCLGE_FLR_WAIT_CNT)
8627 		msleep(HCLGE_FLR_WAIT_MS);
8628 
8629 	if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
8630 		dev_err(&hdev->pdev->dev,
8631 			"flr wait down timeout: %d\n", cnt);
8632 }
8633 
8634 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
8635 {
8636 	struct hclge_dev *hdev = ae_dev->priv;
8637 
8638 	set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8639 }
8640 
8641 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
8642 {
8643 	u16 i;
8644 
8645 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8646 		struct hclge_vport *vport = &hdev->vport[i];
8647 		int ret;
8648 
8649 		 /* Send cmd to clear VF's FUNC_RST_ING */
8650 		ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
8651 		if (ret)
8652 			dev_warn(&hdev->pdev->dev,
8653 				 "clear vf(%d) rst failed %d!\n",
8654 				 vport->vport_id, ret);
8655 	}
8656 }
8657 
8658 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
8659 {
8660 	struct pci_dev *pdev = ae_dev->pdev;
8661 	struct hclge_dev *hdev;
8662 	int ret;
8663 
8664 	hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
8665 	if (!hdev) {
8666 		ret = -ENOMEM;
8667 		goto out;
8668 	}
8669 
8670 	hdev->pdev = pdev;
8671 	hdev->ae_dev = ae_dev;
8672 	hdev->reset_type = HNAE3_NONE_RESET;
8673 	hdev->reset_level = HNAE3_FUNC_RESET;
8674 	ae_dev->priv = hdev;
8675 	hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8676 
8677 	mutex_init(&hdev->vport_lock);
8678 	mutex_init(&hdev->vport_cfg_mutex);
8679 	spin_lock_init(&hdev->fd_rule_lock);
8680 
8681 	ret = hclge_pci_init(hdev);
8682 	if (ret) {
8683 		dev_err(&pdev->dev, "PCI init failed\n");
8684 		goto out;
8685 	}
8686 
8687 	/* Firmware command queue initialize */
8688 	ret = hclge_cmd_queue_init(hdev);
8689 	if (ret) {
8690 		dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
8691 		goto err_pci_uninit;
8692 	}
8693 
8694 	/* Firmware command initialize */
8695 	ret = hclge_cmd_init(hdev);
8696 	if (ret)
8697 		goto err_cmd_uninit;
8698 
8699 	ret = hclge_get_cap(hdev);
8700 	if (ret) {
8701 		dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
8702 			ret);
8703 		goto err_cmd_uninit;
8704 	}
8705 
8706 	ret = hclge_configure(hdev);
8707 	if (ret) {
8708 		dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
8709 		goto err_cmd_uninit;
8710 	}
8711 
8712 	ret = hclge_init_msi(hdev);
8713 	if (ret) {
8714 		dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
8715 		goto err_cmd_uninit;
8716 	}
8717 
8718 	ret = hclge_misc_irq_init(hdev);
8719 	if (ret) {
8720 		dev_err(&pdev->dev,
8721 			"Misc IRQ(vector0) init error, ret = %d.\n",
8722 			ret);
8723 		goto err_msi_uninit;
8724 	}
8725 
8726 	ret = hclge_alloc_tqps(hdev);
8727 	if (ret) {
8728 		dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
8729 		goto err_msi_irq_uninit;
8730 	}
8731 
8732 	ret = hclge_alloc_vport(hdev);
8733 	if (ret) {
8734 		dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
8735 		goto err_msi_irq_uninit;
8736 	}
8737 
8738 	ret = hclge_map_tqp(hdev);
8739 	if (ret) {
8740 		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8741 		goto err_msi_irq_uninit;
8742 	}
8743 
8744 	if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
8745 		ret = hclge_mac_mdio_config(hdev);
8746 		if (ret) {
8747 			dev_err(&hdev->pdev->dev,
8748 				"mdio config fail ret=%d\n", ret);
8749 			goto err_msi_irq_uninit;
8750 		}
8751 	}
8752 
8753 	ret = hclge_init_umv_space(hdev);
8754 	if (ret) {
8755 		dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
8756 		goto err_mdiobus_unreg;
8757 	}
8758 
8759 	ret = hclge_mac_init(hdev);
8760 	if (ret) {
8761 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8762 		goto err_mdiobus_unreg;
8763 	}
8764 
8765 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8766 	if (ret) {
8767 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8768 		goto err_mdiobus_unreg;
8769 	}
8770 
8771 	ret = hclge_config_gro(hdev, true);
8772 	if (ret)
8773 		goto err_mdiobus_unreg;
8774 
8775 	ret = hclge_init_vlan_config(hdev);
8776 	if (ret) {
8777 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8778 		goto err_mdiobus_unreg;
8779 	}
8780 
8781 	ret = hclge_tm_schd_init(hdev);
8782 	if (ret) {
8783 		dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
8784 		goto err_mdiobus_unreg;
8785 	}
8786 
8787 	hclge_rss_init_cfg(hdev);
8788 	ret = hclge_rss_init_hw(hdev);
8789 	if (ret) {
8790 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8791 		goto err_mdiobus_unreg;
8792 	}
8793 
8794 	ret = init_mgr_tbl(hdev);
8795 	if (ret) {
8796 		dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
8797 		goto err_mdiobus_unreg;
8798 	}
8799 
8800 	ret = hclge_init_fd_config(hdev);
8801 	if (ret) {
8802 		dev_err(&pdev->dev,
8803 			"fd table init fail, ret=%d\n", ret);
8804 		goto err_mdiobus_unreg;
8805 	}
8806 
8807 	INIT_KFIFO(hdev->mac_tnl_log);
8808 
8809 	hclge_dcb_ops_set(hdev);
8810 
8811 	timer_setup(&hdev->service_timer, hclge_service_timer, 0);
8812 	timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
8813 	INIT_WORK(&hdev->service_task, hclge_service_task);
8814 	INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
8815 	INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
8816 
8817 	hclge_clear_all_event_cause(hdev);
8818 	hclge_clear_resetting_state(hdev);
8819 
8820 	/* Log and clear the hw errors those already occurred */
8821 	hclge_handle_all_hns_hw_errors(ae_dev);
8822 
8823 	/* request delayed reset for the error recovery because an immediate
8824 	 * global reset on a PF affecting pending initialization of other PFs
8825 	 */
8826 	if (ae_dev->hw_err_reset_req) {
8827 		enum hnae3_reset_type reset_level;
8828 
8829 		reset_level = hclge_get_reset_level(ae_dev,
8830 						    &ae_dev->hw_err_reset_req);
8831 		hclge_set_def_reset_request(ae_dev, reset_level);
8832 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
8833 	}
8834 
8835 	/* Enable MISC vector(vector0) */
8836 	hclge_enable_vector(&hdev->misc_vector, true);
8837 
8838 	hclge_state_init(hdev);
8839 	hdev->last_reset_time = jiffies;
8840 
8841 	pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
8842 	return 0;
8843 
8844 err_mdiobus_unreg:
8845 	if (hdev->hw.mac.phydev)
8846 		mdiobus_unregister(hdev->hw.mac.mdio_bus);
8847 err_msi_irq_uninit:
8848 	hclge_misc_irq_uninit(hdev);
8849 err_msi_uninit:
8850 	pci_free_irq_vectors(pdev);
8851 err_cmd_uninit:
8852 	hclge_cmd_uninit(hdev);
8853 err_pci_uninit:
8854 	pcim_iounmap(pdev, hdev->hw.io_base);
8855 	pci_clear_master(pdev);
8856 	pci_release_regions(pdev);
8857 	pci_disable_device(pdev);
8858 out:
8859 	return ret;
8860 }
8861 
8862 static void hclge_stats_clear(struct hclge_dev *hdev)
8863 {
8864 	memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
8865 }
8866 
8867 static void hclge_reset_vport_state(struct hclge_dev *hdev)
8868 {
8869 	struct hclge_vport *vport = hdev->vport;
8870 	int i;
8871 
8872 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8873 		hclge_vport_stop(vport);
8874 		vport++;
8875 	}
8876 }
8877 
8878 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
8879 {
8880 	struct hclge_dev *hdev = ae_dev->priv;
8881 	struct pci_dev *pdev = ae_dev->pdev;
8882 	int ret;
8883 
8884 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
8885 
8886 	hclge_stats_clear(hdev);
8887 	memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
8888 	memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
8889 
8890 	ret = hclge_cmd_init(hdev);
8891 	if (ret) {
8892 		dev_err(&pdev->dev, "Cmd queue init failed\n");
8893 		return ret;
8894 	}
8895 
8896 	ret = hclge_map_tqp(hdev);
8897 	if (ret) {
8898 		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8899 		return ret;
8900 	}
8901 
8902 	hclge_reset_umv_space(hdev);
8903 
8904 	ret = hclge_mac_init(hdev);
8905 	if (ret) {
8906 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8907 		return ret;
8908 	}
8909 
8910 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8911 	if (ret) {
8912 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8913 		return ret;
8914 	}
8915 
8916 	ret = hclge_config_gro(hdev, true);
8917 	if (ret)
8918 		return ret;
8919 
8920 	ret = hclge_init_vlan_config(hdev);
8921 	if (ret) {
8922 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8923 		return ret;
8924 	}
8925 
8926 	ret = hclge_tm_init_hw(hdev, true);
8927 	if (ret) {
8928 		dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
8929 		return ret;
8930 	}
8931 
8932 	ret = hclge_rss_init_hw(hdev);
8933 	if (ret) {
8934 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8935 		return ret;
8936 	}
8937 
8938 	ret = hclge_init_fd_config(hdev);
8939 	if (ret) {
8940 		dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
8941 		return ret;
8942 	}
8943 
8944 	/* Re-enable the hw error interrupts because
8945 	 * the interrupts get disabled on global reset.
8946 	 */
8947 	ret = hclge_config_nic_hw_error(hdev, true);
8948 	if (ret) {
8949 		dev_err(&pdev->dev,
8950 			"fail(%d) to re-enable NIC hw error interrupts\n",
8951 			ret);
8952 		return ret;
8953 	}
8954 
8955 	if (hdev->roce_client) {
8956 		ret = hclge_config_rocee_ras_interrupt(hdev, true);
8957 		if (ret) {
8958 			dev_err(&pdev->dev,
8959 				"fail(%d) to re-enable roce ras interrupts\n",
8960 				ret);
8961 			return ret;
8962 		}
8963 	}
8964 
8965 	hclge_reset_vport_state(hdev);
8966 
8967 	dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
8968 		 HCLGE_DRIVER_NAME);
8969 
8970 	return 0;
8971 }
8972 
8973 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
8974 {
8975 	struct hclge_dev *hdev = ae_dev->priv;
8976 	struct hclge_mac *mac = &hdev->hw.mac;
8977 
8978 	hclge_state_uninit(hdev);
8979 
8980 	if (mac->phydev)
8981 		mdiobus_unregister(mac->mdio_bus);
8982 
8983 	hclge_uninit_umv_space(hdev);
8984 
8985 	/* Disable MISC vector(vector0) */
8986 	hclge_enable_vector(&hdev->misc_vector, false);
8987 	synchronize_irq(hdev->misc_vector.vector_irq);
8988 
8989 	/* Disable all hw interrupts */
8990 	hclge_config_mac_tnl_int(hdev, false);
8991 	hclge_config_nic_hw_error(hdev, false);
8992 	hclge_config_rocee_ras_interrupt(hdev, false);
8993 
8994 	hclge_cmd_uninit(hdev);
8995 	hclge_misc_irq_uninit(hdev);
8996 	hclge_pci_uninit(hdev);
8997 	mutex_destroy(&hdev->vport_lock);
8998 	hclge_uninit_vport_mac_table(hdev);
8999 	hclge_uninit_vport_vlan_table(hdev);
9000 	mutex_destroy(&hdev->vport_cfg_mutex);
9001 	ae_dev->priv = NULL;
9002 }
9003 
9004 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
9005 {
9006 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9007 	struct hclge_vport *vport = hclge_get_vport(handle);
9008 	struct hclge_dev *hdev = vport->back;
9009 
9010 	return min_t(u32, hdev->rss_size_max,
9011 		     vport->alloc_tqps / kinfo->num_tc);
9012 }
9013 
9014 static void hclge_get_channels(struct hnae3_handle *handle,
9015 			       struct ethtool_channels *ch)
9016 {
9017 	ch->max_combined = hclge_get_max_channels(handle);
9018 	ch->other_count = 1;
9019 	ch->max_other = 1;
9020 	ch->combined_count = handle->kinfo.rss_size;
9021 }
9022 
9023 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
9024 					u16 *alloc_tqps, u16 *max_rss_size)
9025 {
9026 	struct hclge_vport *vport = hclge_get_vport(handle);
9027 	struct hclge_dev *hdev = vport->back;
9028 
9029 	*alloc_tqps = vport->alloc_tqps;
9030 	*max_rss_size = hdev->rss_size_max;
9031 }
9032 
9033 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
9034 			      bool rxfh_configured)
9035 {
9036 	struct hclge_vport *vport = hclge_get_vport(handle);
9037 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
9038 	struct hclge_dev *hdev = vport->back;
9039 	int cur_rss_size = kinfo->rss_size;
9040 	int cur_tqps = kinfo->num_tqps;
9041 	u16 tc_offset[HCLGE_MAX_TC_NUM];
9042 	u16 tc_valid[HCLGE_MAX_TC_NUM];
9043 	u16 tc_size[HCLGE_MAX_TC_NUM];
9044 	u16 roundup_size;
9045 	u32 *rss_indir;
9046 	unsigned int i;
9047 	int ret;
9048 
9049 	kinfo->req_rss_size = new_tqps_num;
9050 
9051 	ret = hclge_tm_vport_map_update(hdev);
9052 	if (ret) {
9053 		dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
9054 		return ret;
9055 	}
9056 
9057 	roundup_size = roundup_pow_of_two(kinfo->rss_size);
9058 	roundup_size = ilog2(roundup_size);
9059 	/* Set the RSS TC mode according to the new RSS size */
9060 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
9061 		tc_valid[i] = 0;
9062 
9063 		if (!(hdev->hw_tc_map & BIT(i)))
9064 			continue;
9065 
9066 		tc_valid[i] = 1;
9067 		tc_size[i] = roundup_size;
9068 		tc_offset[i] = kinfo->rss_size * i;
9069 	}
9070 	ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
9071 	if (ret)
9072 		return ret;
9073 
9074 	/* RSS indirection table has been configuared by user */
9075 	if (rxfh_configured)
9076 		goto out;
9077 
9078 	/* Reinitializes the rss indirect table according to the new RSS size */
9079 	rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
9080 	if (!rss_indir)
9081 		return -ENOMEM;
9082 
9083 	for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
9084 		rss_indir[i] = i % kinfo->rss_size;
9085 
9086 	ret = hclge_set_rss(handle, rss_indir, NULL, 0);
9087 	if (ret)
9088 		dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
9089 			ret);
9090 
9091 	kfree(rss_indir);
9092 
9093 out:
9094 	if (!ret)
9095 		dev_info(&hdev->pdev->dev,
9096 			 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
9097 			 cur_rss_size, kinfo->rss_size,
9098 			 cur_tqps, kinfo->rss_size * kinfo->num_tc);
9099 
9100 	return ret;
9101 }
9102 
9103 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
9104 			      u32 *regs_num_64_bit)
9105 {
9106 	struct hclge_desc desc;
9107 	u32 total_num;
9108 	int ret;
9109 
9110 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
9111 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9112 	if (ret) {
9113 		dev_err(&hdev->pdev->dev,
9114 			"Query register number cmd failed, ret = %d.\n", ret);
9115 		return ret;
9116 	}
9117 
9118 	*regs_num_32_bit = le32_to_cpu(desc.data[0]);
9119 	*regs_num_64_bit = le32_to_cpu(desc.data[1]);
9120 
9121 	total_num = *regs_num_32_bit + *regs_num_64_bit;
9122 	if (!total_num)
9123 		return -EINVAL;
9124 
9125 	return 0;
9126 }
9127 
9128 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
9129 				 void *data)
9130 {
9131 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
9132 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
9133 
9134 	struct hclge_desc *desc;
9135 	u32 *reg_val = data;
9136 	__le32 *desc_data;
9137 	int nodata_num;
9138 	int cmd_num;
9139 	int i, k, n;
9140 	int ret;
9141 
9142 	if (regs_num == 0)
9143 		return 0;
9144 
9145 	nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
9146 	cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
9147 			       HCLGE_32_BIT_REG_RTN_DATANUM);
9148 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
9149 	if (!desc)
9150 		return -ENOMEM;
9151 
9152 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
9153 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
9154 	if (ret) {
9155 		dev_err(&hdev->pdev->dev,
9156 			"Query 32 bit register cmd failed, ret = %d.\n", ret);
9157 		kfree(desc);
9158 		return ret;
9159 	}
9160 
9161 	for (i = 0; i < cmd_num; i++) {
9162 		if (i == 0) {
9163 			desc_data = (__le32 *)(&desc[i].data[0]);
9164 			n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
9165 		} else {
9166 			desc_data = (__le32 *)(&desc[i]);
9167 			n = HCLGE_32_BIT_REG_RTN_DATANUM;
9168 		}
9169 		for (k = 0; k < n; k++) {
9170 			*reg_val++ = le32_to_cpu(*desc_data++);
9171 
9172 			regs_num--;
9173 			if (!regs_num)
9174 				break;
9175 		}
9176 	}
9177 
9178 	kfree(desc);
9179 	return 0;
9180 }
9181 
9182 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
9183 				 void *data)
9184 {
9185 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
9186 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
9187 
9188 	struct hclge_desc *desc;
9189 	u64 *reg_val = data;
9190 	__le64 *desc_data;
9191 	int nodata_len;
9192 	int cmd_num;
9193 	int i, k, n;
9194 	int ret;
9195 
9196 	if (regs_num == 0)
9197 		return 0;
9198 
9199 	nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
9200 	cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
9201 			       HCLGE_64_BIT_REG_RTN_DATANUM);
9202 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
9203 	if (!desc)
9204 		return -ENOMEM;
9205 
9206 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
9207 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
9208 	if (ret) {
9209 		dev_err(&hdev->pdev->dev,
9210 			"Query 64 bit register cmd failed, ret = %d.\n", ret);
9211 		kfree(desc);
9212 		return ret;
9213 	}
9214 
9215 	for (i = 0; i < cmd_num; i++) {
9216 		if (i == 0) {
9217 			desc_data = (__le64 *)(&desc[i].data[0]);
9218 			n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
9219 		} else {
9220 			desc_data = (__le64 *)(&desc[i]);
9221 			n = HCLGE_64_BIT_REG_RTN_DATANUM;
9222 		}
9223 		for (k = 0; k < n; k++) {
9224 			*reg_val++ = le64_to_cpu(*desc_data++);
9225 
9226 			regs_num--;
9227 			if (!regs_num)
9228 				break;
9229 		}
9230 	}
9231 
9232 	kfree(desc);
9233 	return 0;
9234 }
9235 
9236 #define MAX_SEPARATE_NUM	4
9237 #define SEPARATOR_VALUE		0xFFFFFFFF
9238 #define REG_NUM_PER_LINE	4
9239 #define REG_LEN_PER_LINE	(REG_NUM_PER_LINE * sizeof(u32))
9240 
9241 static int hclge_get_regs_len(struct hnae3_handle *handle)
9242 {
9243 	int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
9244 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9245 	struct hclge_vport *vport = hclge_get_vport(handle);
9246 	struct hclge_dev *hdev = vport->back;
9247 	u32 regs_num_32_bit, regs_num_64_bit;
9248 	int ret;
9249 
9250 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
9251 	if (ret) {
9252 		dev_err(&hdev->pdev->dev,
9253 			"Get register number failed, ret = %d.\n", ret);
9254 		return -EOPNOTSUPP;
9255 	}
9256 
9257 	cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
9258 	common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
9259 	ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
9260 	tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
9261 
9262 	return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
9263 		tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
9264 		regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
9265 }
9266 
9267 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
9268 			   void *data)
9269 {
9270 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9271 	struct hclge_vport *vport = hclge_get_vport(handle);
9272 	struct hclge_dev *hdev = vport->back;
9273 	u32 regs_num_32_bit, regs_num_64_bit;
9274 	int i, j, reg_um, separator_num;
9275 	u32 *reg = data;
9276 	int ret;
9277 
9278 	*version = hdev->fw_version;
9279 
9280 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
9281 	if (ret) {
9282 		dev_err(&hdev->pdev->dev,
9283 			"Get register number failed, ret = %d.\n", ret);
9284 		return;
9285 	}
9286 
9287 	/* fetching per-PF registers valus from PF PCIe register space */
9288 	reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
9289 	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9290 	for (i = 0; i < reg_um; i++)
9291 		*reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
9292 	for (i = 0; i < separator_num; i++)
9293 		*reg++ = SEPARATOR_VALUE;
9294 
9295 	reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
9296 	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9297 	for (i = 0; i < reg_um; i++)
9298 		*reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
9299 	for (i = 0; i < separator_num; i++)
9300 		*reg++ = SEPARATOR_VALUE;
9301 
9302 	reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
9303 	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9304 	for (j = 0; j < kinfo->num_tqps; j++) {
9305 		for (i = 0; i < reg_um; i++)
9306 			*reg++ = hclge_read_dev(&hdev->hw,
9307 						ring_reg_addr_list[i] +
9308 						0x200 * j);
9309 		for (i = 0; i < separator_num; i++)
9310 			*reg++ = SEPARATOR_VALUE;
9311 	}
9312 
9313 	reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
9314 	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9315 	for (j = 0; j < hdev->num_msi_used - 1; j++) {
9316 		for (i = 0; i < reg_um; i++)
9317 			*reg++ = hclge_read_dev(&hdev->hw,
9318 						tqp_intr_reg_addr_list[i] +
9319 						4 * j);
9320 		for (i = 0; i < separator_num; i++)
9321 			*reg++ = SEPARATOR_VALUE;
9322 	}
9323 
9324 	/* fetching PF common registers values from firmware */
9325 	ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
9326 	if (ret) {
9327 		dev_err(&hdev->pdev->dev,
9328 			"Get 32 bit register failed, ret = %d.\n", ret);
9329 		return;
9330 	}
9331 
9332 	reg += regs_num_32_bit;
9333 	ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
9334 	if (ret)
9335 		dev_err(&hdev->pdev->dev,
9336 			"Get 64 bit register failed, ret = %d.\n", ret);
9337 }
9338 
9339 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
9340 {
9341 	struct hclge_set_led_state_cmd *req;
9342 	struct hclge_desc desc;
9343 	int ret;
9344 
9345 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
9346 
9347 	req = (struct hclge_set_led_state_cmd *)desc.data;
9348 	hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
9349 			HCLGE_LED_LOCATE_STATE_S, locate_led_status);
9350 
9351 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9352 	if (ret)
9353 		dev_err(&hdev->pdev->dev,
9354 			"Send set led state cmd error, ret =%d\n", ret);
9355 
9356 	return ret;
9357 }
9358 
9359 enum hclge_led_status {
9360 	HCLGE_LED_OFF,
9361 	HCLGE_LED_ON,
9362 	HCLGE_LED_NO_CHANGE = 0xFF,
9363 };
9364 
9365 static int hclge_set_led_id(struct hnae3_handle *handle,
9366 			    enum ethtool_phys_id_state status)
9367 {
9368 	struct hclge_vport *vport = hclge_get_vport(handle);
9369 	struct hclge_dev *hdev = vport->back;
9370 
9371 	switch (status) {
9372 	case ETHTOOL_ID_ACTIVE:
9373 		return hclge_set_led_status(hdev, HCLGE_LED_ON);
9374 	case ETHTOOL_ID_INACTIVE:
9375 		return hclge_set_led_status(hdev, HCLGE_LED_OFF);
9376 	default:
9377 		return -EINVAL;
9378 	}
9379 }
9380 
9381 static void hclge_get_link_mode(struct hnae3_handle *handle,
9382 				unsigned long *supported,
9383 				unsigned long *advertising)
9384 {
9385 	unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
9386 	struct hclge_vport *vport = hclge_get_vport(handle);
9387 	struct hclge_dev *hdev = vport->back;
9388 	unsigned int idx = 0;
9389 
9390 	for (; idx < size; idx++) {
9391 		supported[idx] = hdev->hw.mac.supported[idx];
9392 		advertising[idx] = hdev->hw.mac.advertising[idx];
9393 	}
9394 }
9395 
9396 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
9397 {
9398 	struct hclge_vport *vport = hclge_get_vport(handle);
9399 	struct hclge_dev *hdev = vport->back;
9400 
9401 	return hclge_config_gro(hdev, enable);
9402 }
9403 
9404 static const struct hnae3_ae_ops hclge_ops = {
9405 	.init_ae_dev = hclge_init_ae_dev,
9406 	.uninit_ae_dev = hclge_uninit_ae_dev,
9407 	.flr_prepare = hclge_flr_prepare,
9408 	.flr_done = hclge_flr_done,
9409 	.init_client_instance = hclge_init_client_instance,
9410 	.uninit_client_instance = hclge_uninit_client_instance,
9411 	.map_ring_to_vector = hclge_map_ring_to_vector,
9412 	.unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
9413 	.get_vector = hclge_get_vector,
9414 	.put_vector = hclge_put_vector,
9415 	.set_promisc_mode = hclge_set_promisc_mode,
9416 	.set_loopback = hclge_set_loopback,
9417 	.start = hclge_ae_start,
9418 	.stop = hclge_ae_stop,
9419 	.client_start = hclge_client_start,
9420 	.client_stop = hclge_client_stop,
9421 	.get_status = hclge_get_status,
9422 	.get_ksettings_an_result = hclge_get_ksettings_an_result,
9423 	.cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
9424 	.get_media_type = hclge_get_media_type,
9425 	.check_port_speed = hclge_check_port_speed,
9426 	.get_fec = hclge_get_fec,
9427 	.set_fec = hclge_set_fec,
9428 	.get_rss_key_size = hclge_get_rss_key_size,
9429 	.get_rss_indir_size = hclge_get_rss_indir_size,
9430 	.get_rss = hclge_get_rss,
9431 	.set_rss = hclge_set_rss,
9432 	.set_rss_tuple = hclge_set_rss_tuple,
9433 	.get_rss_tuple = hclge_get_rss_tuple,
9434 	.get_tc_size = hclge_get_tc_size,
9435 	.get_mac_addr = hclge_get_mac_addr,
9436 	.set_mac_addr = hclge_set_mac_addr,
9437 	.do_ioctl = hclge_do_ioctl,
9438 	.add_uc_addr = hclge_add_uc_addr,
9439 	.rm_uc_addr = hclge_rm_uc_addr,
9440 	.add_mc_addr = hclge_add_mc_addr,
9441 	.rm_mc_addr = hclge_rm_mc_addr,
9442 	.set_autoneg = hclge_set_autoneg,
9443 	.get_autoneg = hclge_get_autoneg,
9444 	.restart_autoneg = hclge_restart_autoneg,
9445 	.halt_autoneg = hclge_halt_autoneg,
9446 	.get_pauseparam = hclge_get_pauseparam,
9447 	.set_pauseparam = hclge_set_pauseparam,
9448 	.set_mtu = hclge_set_mtu,
9449 	.reset_queue = hclge_reset_tqp,
9450 	.get_stats = hclge_get_stats,
9451 	.get_mac_pause_stats = hclge_get_mac_pause_stat,
9452 	.update_stats = hclge_update_stats,
9453 	.get_strings = hclge_get_strings,
9454 	.get_sset_count = hclge_get_sset_count,
9455 	.get_fw_version = hclge_get_fw_version,
9456 	.get_mdix_mode = hclge_get_mdix_mode,
9457 	.enable_vlan_filter = hclge_enable_vlan_filter,
9458 	.set_vlan_filter = hclge_set_vlan_filter,
9459 	.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
9460 	.enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
9461 	.reset_event = hclge_reset_event,
9462 	.get_reset_level = hclge_get_reset_level,
9463 	.set_default_reset_request = hclge_set_def_reset_request,
9464 	.get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
9465 	.set_channels = hclge_set_channels,
9466 	.get_channels = hclge_get_channels,
9467 	.get_regs_len = hclge_get_regs_len,
9468 	.get_regs = hclge_get_regs,
9469 	.set_led_id = hclge_set_led_id,
9470 	.get_link_mode = hclge_get_link_mode,
9471 	.add_fd_entry = hclge_add_fd_entry,
9472 	.del_fd_entry = hclge_del_fd_entry,
9473 	.del_all_fd_entries = hclge_del_all_fd_entries,
9474 	.get_fd_rule_cnt = hclge_get_fd_rule_cnt,
9475 	.get_fd_rule_info = hclge_get_fd_rule_info,
9476 	.get_fd_all_rules = hclge_get_all_rules,
9477 	.restore_fd_rules = hclge_restore_fd_entries,
9478 	.enable_fd = hclge_enable_fd,
9479 	.add_arfs_entry = hclge_add_fd_entry_by_arfs,
9480 	.dbg_run_cmd = hclge_dbg_run_cmd,
9481 	.handle_hw_ras_error = hclge_handle_hw_ras_error,
9482 	.get_hw_reset_stat = hclge_get_hw_reset_stat,
9483 	.ae_dev_resetting = hclge_ae_dev_resetting,
9484 	.ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
9485 	.set_gro_en = hclge_gro_en,
9486 	.get_global_queue_id = hclge_covert_handle_qid_global,
9487 	.set_timer_task = hclge_set_timer_task,
9488 	.mac_connect_phy = hclge_mac_connect_phy,
9489 	.mac_disconnect_phy = hclge_mac_disconnect_phy,
9490 	.restore_vlan_table = hclge_restore_vlan_table,
9491 };
9492 
9493 static struct hnae3_ae_algo ae_algo = {
9494 	.ops = &hclge_ops,
9495 	.pdev_id_table = ae_algo_pci_tbl,
9496 };
9497 
9498 static int hclge_init(void)
9499 {
9500 	pr_info("%s is initializing\n", HCLGE_NAME);
9501 
9502 	hnae3_register_ae_algo(&ae_algo);
9503 
9504 	return 0;
9505 }
9506 
9507 static void hclge_exit(void)
9508 {
9509 	hnae3_unregister_ae_algo(&ae_algo);
9510 }
9511 module_init(hclge_init);
9512 module_exit(hclge_exit);
9513 
9514 MODULE_LICENSE("GPL");
9515 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
9516 MODULE_DESCRIPTION("HCLGE Driver");
9517 MODULE_VERSION(HCLGE_MOD_VERSION);
9518