xref: /linux/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c (revision 13b25489b6f8bd73ed65f07928f7c27a481f1820)
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/irq.h>
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/netdevice.h>
13 #include <linux/pci.h>
14 #include <linux/platform_device.h>
15 #include <linux/if_vlan.h>
16 #include <linux/crash_dump.h>
17 
18 #include <net/rtnetlink.h>
19 
20 #include "hclge_cmd.h"
21 #include "hclge_dcb.h"
22 #include "hclge_main.h"
23 #include "hclge_mbx.h"
24 #include "hclge_mdio.h"
25 #include "hclge_regs.h"
26 #include "hclge_tm.h"
27 #include "hclge_err.h"
28 #include "hnae3.h"
29 #include "hclge_devlink.h"
30 #include "hclge_comm_cmd.h"
31 
32 #include "hclge_trace.h"
33 
34 #define HCLGE_NAME			"hclge"
35 
36 #define HCLGE_BUF_SIZE_UNIT	256U
37 #define HCLGE_BUF_MUL_BY	2
38 #define HCLGE_BUF_DIV_BY	2
39 #define NEED_RESERVE_TC_NUM	2
40 #define BUF_MAX_PERCENT		100
41 #define BUF_RESERVE_PERCENT	90
42 
43 #define HCLGE_RESET_MAX_FAIL_CNT	5
44 #define HCLGE_RESET_SYNC_TIME		100
45 #define HCLGE_PF_RESET_SYNC_TIME	20
46 #define HCLGE_PF_RESET_SYNC_CNT		1500
47 
48 #define HCLGE_LINK_STATUS_MS	10
49 
50 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
51 static int hclge_init_vlan_config(struct hclge_dev *hdev);
52 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
53 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
54 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
55 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
56 static int hclge_clear_arfs_rules(struct hclge_dev *hdev);
57 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
58 						   unsigned long *addr);
59 static int hclge_set_default_loopback(struct hclge_dev *hdev);
60 
61 static void hclge_sync_mac_table(struct hclge_dev *hdev);
62 static void hclge_restore_hw_table(struct hclge_dev *hdev);
63 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
64 static void hclge_sync_fd_table(struct hclge_dev *hdev);
65 static void hclge_update_fec_stats(struct hclge_dev *hdev);
66 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret,
67 				      int wait_cnt);
68 static int hclge_update_port_info(struct hclge_dev *hdev);
69 
70 static struct hnae3_ae_algo ae_algo;
71 
72 static struct workqueue_struct *hclge_wq;
73 
74 static const struct pci_device_id ae_algo_pci_tbl[] = {
75 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
76 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
77 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
78 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
79 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
80 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
81 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
82 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
83 	/* required last entry */
84 	{0, }
85 };
86 
87 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
88 
89 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
90 	"External Loopback test",
91 	"App      Loopback test",
92 	"Serdes   serial Loopback test",
93 	"Serdes   parallel Loopback test",
94 	"Phy      Loopback test"
95 };
96 
97 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
98 	{"mac_tx_mac_pause_num", HCLGE_MAC_STATS_MAX_NUM_V1,
99 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
100 	{"mac_rx_mac_pause_num", HCLGE_MAC_STATS_MAX_NUM_V1,
101 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
102 	{"mac_tx_pause_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
103 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pause_xoff_time)},
104 	{"mac_rx_pause_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
105 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pause_xoff_time)},
106 	{"mac_tx_control_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
107 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
108 	{"mac_rx_control_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
109 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
110 	{"mac_tx_pfc_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
111 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
112 	{"mac_tx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
113 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
114 	{"mac_tx_pfc_pri1_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
115 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
116 	{"mac_tx_pfc_pri2_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
117 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
118 	{"mac_tx_pfc_pri3_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
119 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
120 	{"mac_tx_pfc_pri4_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
121 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
122 	{"mac_tx_pfc_pri5_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
123 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
124 	{"mac_tx_pfc_pri6_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
125 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
126 	{"mac_tx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
127 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
128 	{"mac_tx_pfc_pri0_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
129 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_xoff_time)},
130 	{"mac_tx_pfc_pri1_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
131 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_xoff_time)},
132 	{"mac_tx_pfc_pri2_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
133 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_xoff_time)},
134 	{"mac_tx_pfc_pri3_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
135 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_xoff_time)},
136 	{"mac_tx_pfc_pri4_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
137 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_xoff_time)},
138 	{"mac_tx_pfc_pri5_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
139 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_xoff_time)},
140 	{"mac_tx_pfc_pri6_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
141 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_xoff_time)},
142 	{"mac_tx_pfc_pri7_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
143 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_xoff_time)},
144 	{"mac_rx_pfc_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
145 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
146 	{"mac_rx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
147 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
148 	{"mac_rx_pfc_pri1_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
149 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
150 	{"mac_rx_pfc_pri2_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
151 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
152 	{"mac_rx_pfc_pri3_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
153 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
154 	{"mac_rx_pfc_pri4_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
155 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
156 	{"mac_rx_pfc_pri5_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
157 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
158 	{"mac_rx_pfc_pri6_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
159 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
160 	{"mac_rx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
161 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
162 	{"mac_rx_pfc_pri0_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
163 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_xoff_time)},
164 	{"mac_rx_pfc_pri1_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
165 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_xoff_time)},
166 	{"mac_rx_pfc_pri2_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
167 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_xoff_time)},
168 	{"mac_rx_pfc_pri3_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
169 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_xoff_time)},
170 	{"mac_rx_pfc_pri4_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
171 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_xoff_time)},
172 	{"mac_rx_pfc_pri5_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
173 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_xoff_time)},
174 	{"mac_rx_pfc_pri6_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
175 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_xoff_time)},
176 	{"mac_rx_pfc_pri7_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
177 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_xoff_time)},
178 	{"mac_tx_total_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
179 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
180 	{"mac_tx_total_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
181 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
182 	{"mac_tx_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
183 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
184 	{"mac_tx_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
185 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
186 	{"mac_tx_good_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
187 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
188 	{"mac_tx_bad_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
189 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
190 	{"mac_tx_uni_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
191 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
192 	{"mac_tx_multi_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
193 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
194 	{"mac_tx_broad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
195 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
196 	{"mac_tx_undersize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
197 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
198 	{"mac_tx_oversize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
199 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
200 	{"mac_tx_64_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
201 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
202 	{"mac_tx_65_127_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
203 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
204 	{"mac_tx_128_255_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
205 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
206 	{"mac_tx_256_511_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
207 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
208 	{"mac_tx_512_1023_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
209 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
210 	{"mac_tx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
211 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
212 	{"mac_tx_1519_2047_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
213 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
214 	{"mac_tx_2048_4095_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
215 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
216 	{"mac_tx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
217 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
218 	{"mac_tx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
219 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
220 	{"mac_tx_9217_12287_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
221 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
222 	{"mac_tx_12288_16383_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
223 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
224 	{"mac_tx_1519_max_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
225 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
226 	{"mac_tx_1519_max_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
227 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
228 	{"mac_rx_total_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
229 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
230 	{"mac_rx_total_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
231 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
232 	{"mac_rx_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
233 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
234 	{"mac_rx_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
235 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
236 	{"mac_rx_good_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
237 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
238 	{"mac_rx_bad_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
239 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
240 	{"mac_rx_uni_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
241 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
242 	{"mac_rx_multi_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
243 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
244 	{"mac_rx_broad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
245 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
246 	{"mac_rx_undersize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
247 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
248 	{"mac_rx_oversize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
249 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
250 	{"mac_rx_64_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
251 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
252 	{"mac_rx_65_127_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
253 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
254 	{"mac_rx_128_255_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
255 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
256 	{"mac_rx_256_511_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
257 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
258 	{"mac_rx_512_1023_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
259 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
260 	{"mac_rx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
261 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
262 	{"mac_rx_1519_2047_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
263 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
264 	{"mac_rx_2048_4095_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
265 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
266 	{"mac_rx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
267 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
268 	{"mac_rx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
269 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
270 	{"mac_rx_9217_12287_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
271 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
272 	{"mac_rx_12288_16383_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
273 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
274 	{"mac_rx_1519_max_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
275 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
276 	{"mac_rx_1519_max_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
277 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
278 
279 	{"mac_tx_fragment_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
280 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
281 	{"mac_tx_undermin_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
282 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
283 	{"mac_tx_jabber_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
284 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
285 	{"mac_tx_err_all_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
286 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
287 	{"mac_tx_from_app_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
288 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
289 	{"mac_tx_from_app_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
290 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
291 	{"mac_rx_fragment_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
292 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
293 	{"mac_rx_undermin_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
294 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
295 	{"mac_rx_jabber_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
296 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
297 	{"mac_rx_fcs_err_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
298 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
299 	{"mac_rx_send_app_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
300 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
301 	{"mac_rx_send_app_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
302 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
303 };
304 
305 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
306 	{
307 		.flags = HCLGE_MAC_MGR_MASK_VLAN_B,
308 		.ethter_type = cpu_to_le16(ETH_P_LLDP),
309 		.mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
310 		.i_port_bitmap = 0x1,
311 	},
312 };
313 
314 static const struct key_info meta_data_key_info[] = {
315 	{ PACKET_TYPE_ID, 6 },
316 	{ IP_FRAGEMENT, 1 },
317 	{ ROCE_TYPE, 1 },
318 	{ NEXT_KEY, 5 },
319 	{ VLAN_NUMBER, 2 },
320 	{ SRC_VPORT, 12 },
321 	{ DST_VPORT, 12 },
322 	{ TUNNEL_PACKET, 1 },
323 };
324 
325 static const struct key_info tuple_key_info[] = {
326 	{ OUTER_DST_MAC, 48, KEY_OPT_MAC, -1, -1 },
327 	{ OUTER_SRC_MAC, 48, KEY_OPT_MAC, -1, -1 },
328 	{ OUTER_VLAN_TAG_FST, 16, KEY_OPT_LE16, -1, -1 },
329 	{ OUTER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
330 	{ OUTER_ETH_TYPE, 16, KEY_OPT_LE16, -1, -1 },
331 	{ OUTER_L2_RSV, 16, KEY_OPT_LE16, -1, -1 },
332 	{ OUTER_IP_TOS, 8, KEY_OPT_U8, -1, -1 },
333 	{ OUTER_IP_PROTO, 8, KEY_OPT_U8, -1, -1 },
334 	{ OUTER_SRC_IP, 32, KEY_OPT_IP, -1, -1 },
335 	{ OUTER_DST_IP, 32, KEY_OPT_IP, -1, -1 },
336 	{ OUTER_L3_RSV, 16, KEY_OPT_LE16, -1, -1 },
337 	{ OUTER_SRC_PORT, 16, KEY_OPT_LE16, -1, -1 },
338 	{ OUTER_DST_PORT, 16, KEY_OPT_LE16, -1, -1 },
339 	{ OUTER_L4_RSV, 32, KEY_OPT_LE32, -1, -1 },
340 	{ OUTER_TUN_VNI, 24, KEY_OPT_VNI, -1, -1 },
341 	{ OUTER_TUN_FLOW_ID, 8, KEY_OPT_U8, -1, -1 },
342 	{ INNER_DST_MAC, 48, KEY_OPT_MAC,
343 	  offsetof(struct hclge_fd_rule, tuples.dst_mac),
344 	  offsetof(struct hclge_fd_rule, tuples_mask.dst_mac) },
345 	{ INNER_SRC_MAC, 48, KEY_OPT_MAC,
346 	  offsetof(struct hclge_fd_rule, tuples.src_mac),
347 	  offsetof(struct hclge_fd_rule, tuples_mask.src_mac) },
348 	{ INNER_VLAN_TAG_FST, 16, KEY_OPT_LE16,
349 	  offsetof(struct hclge_fd_rule, tuples.vlan_tag1),
350 	  offsetof(struct hclge_fd_rule, tuples_mask.vlan_tag1) },
351 	{ INNER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
352 	{ INNER_ETH_TYPE, 16, KEY_OPT_LE16,
353 	  offsetof(struct hclge_fd_rule, tuples.ether_proto),
354 	  offsetof(struct hclge_fd_rule, tuples_mask.ether_proto) },
355 	{ INNER_L2_RSV, 16, KEY_OPT_LE16,
356 	  offsetof(struct hclge_fd_rule, tuples.l2_user_def),
357 	  offsetof(struct hclge_fd_rule, tuples_mask.l2_user_def) },
358 	{ INNER_IP_TOS, 8, KEY_OPT_U8,
359 	  offsetof(struct hclge_fd_rule, tuples.ip_tos),
360 	  offsetof(struct hclge_fd_rule, tuples_mask.ip_tos) },
361 	{ INNER_IP_PROTO, 8, KEY_OPT_U8,
362 	  offsetof(struct hclge_fd_rule, tuples.ip_proto),
363 	  offsetof(struct hclge_fd_rule, tuples_mask.ip_proto) },
364 	{ INNER_SRC_IP, 32, KEY_OPT_IP,
365 	  offsetof(struct hclge_fd_rule, tuples.src_ip),
366 	  offsetof(struct hclge_fd_rule, tuples_mask.src_ip) },
367 	{ INNER_DST_IP, 32, KEY_OPT_IP,
368 	  offsetof(struct hclge_fd_rule, tuples.dst_ip),
369 	  offsetof(struct hclge_fd_rule, tuples_mask.dst_ip) },
370 	{ INNER_L3_RSV, 16, KEY_OPT_LE16,
371 	  offsetof(struct hclge_fd_rule, tuples.l3_user_def),
372 	  offsetof(struct hclge_fd_rule, tuples_mask.l3_user_def) },
373 	{ INNER_SRC_PORT, 16, KEY_OPT_LE16,
374 	  offsetof(struct hclge_fd_rule, tuples.src_port),
375 	  offsetof(struct hclge_fd_rule, tuples_mask.src_port) },
376 	{ INNER_DST_PORT, 16, KEY_OPT_LE16,
377 	  offsetof(struct hclge_fd_rule, tuples.dst_port),
378 	  offsetof(struct hclge_fd_rule, tuples_mask.dst_port) },
379 	{ INNER_L4_RSV, 32, KEY_OPT_LE32,
380 	  offsetof(struct hclge_fd_rule, tuples.l4_user_def),
381 	  offsetof(struct hclge_fd_rule, tuples_mask.l4_user_def) },
382 };
383 
384 /**
385  * hclge_cmd_send - send command to command queue
386  * @hw: pointer to the hw struct
387  * @desc: prefilled descriptor for describing the command
388  * @num : the number of descriptors to be sent
389  *
390  * This is the main send command for command queue, it
391  * sends the queue, cleans the queue, etc
392  **/
393 int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
394 {
395 	return hclge_comm_cmd_send(&hw->hw, desc, num);
396 }
397 
398 static void hclge_trace_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc,
399 				 int num, bool is_special)
400 {
401 	int i;
402 
403 	trace_hclge_pf_cmd_send(hw, desc, 0, num);
404 
405 	if (!is_special) {
406 		for (i = 1; i < num; i++)
407 			trace_hclge_pf_cmd_send(hw, &desc[i], i, num);
408 	} else {
409 		for (i = 1; i < num; i++)
410 			trace_hclge_pf_special_cmd_send(hw, (__le32 *)&desc[i],
411 							i, num);
412 	}
413 }
414 
415 static void hclge_trace_cmd_get(struct hclge_comm_hw *hw, struct hclge_desc *desc,
416 				int num, bool is_special)
417 {
418 	int i;
419 
420 	if (!HCLGE_COMM_SEND_SYNC(le16_to_cpu(desc->flag)))
421 		return;
422 
423 	trace_hclge_pf_cmd_get(hw, desc, 0, num);
424 
425 	if (!is_special) {
426 		for (i = 1; i < num; i++)
427 			trace_hclge_pf_cmd_get(hw, &desc[i], i, num);
428 	} else {
429 		for (i = 1; i < num; i++)
430 			trace_hclge_pf_special_cmd_get(hw, (__le32 *)&desc[i],
431 						       i, num);
432 	}
433 }
434 
435 static const struct hclge_comm_cmq_ops hclge_cmq_ops = {
436 	.trace_cmd_send = hclge_trace_cmd_send,
437 	.trace_cmd_get = hclge_trace_cmd_get,
438 };
439 
440 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
441 {
442 #define HCLGE_MAC_CMD_NUM 21
443 
444 	u64 *data = (u64 *)(&hdev->mac_stats);
445 	struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
446 	__le64 *desc_data;
447 	u32 data_size;
448 	int ret;
449 	u32 i;
450 
451 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
452 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
453 	if (ret) {
454 		dev_err(&hdev->pdev->dev,
455 			"Get MAC pkt stats fail, status = %d.\n", ret);
456 
457 		return ret;
458 	}
459 
460 	/* The first desc has a 64-bit header, so data size need to minus 1 */
461 	data_size = sizeof(desc) / (sizeof(u64)) - 1;
462 
463 	desc_data = (__le64 *)(&desc[0].data[0]);
464 	for (i = 0; i < data_size; i++) {
465 		/* data memory is continuous becase only the first desc has a
466 		 * header in this command
467 		 */
468 		*data += le64_to_cpu(*desc_data);
469 		data++;
470 		desc_data++;
471 	}
472 
473 	return 0;
474 }
475 
476 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev)
477 {
478 #define HCLGE_REG_NUM_PER_DESC		4
479 
480 	u32 reg_num = hdev->ae_dev->dev_specs.mac_stats_num;
481 	u64 *data = (u64 *)(&hdev->mac_stats);
482 	struct hclge_desc *desc;
483 	__le64 *desc_data;
484 	u32 data_size;
485 	u32 desc_num;
486 	int ret;
487 	u32 i;
488 
489 	/* The first desc has a 64-bit header, so need to consider it */
490 	desc_num = reg_num / HCLGE_REG_NUM_PER_DESC + 1;
491 
492 	/* This may be called inside atomic sections,
493 	 * so GFP_ATOMIC is more suitalbe here
494 	 */
495 	desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
496 	if (!desc)
497 		return -ENOMEM;
498 
499 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
500 	ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
501 	if (ret) {
502 		kfree(desc);
503 		return ret;
504 	}
505 
506 	data_size = min_t(u32, sizeof(hdev->mac_stats) / sizeof(u64), reg_num);
507 
508 	desc_data = (__le64 *)(&desc[0].data[0]);
509 	for (i = 0; i < data_size; i++) {
510 		/* data memory is continuous becase only the first desc has a
511 		 * header in this command
512 		 */
513 		*data += le64_to_cpu(*desc_data);
514 		data++;
515 		desc_data++;
516 	}
517 
518 	kfree(desc);
519 
520 	return 0;
521 }
522 
523 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *reg_num)
524 {
525 	struct hclge_desc desc;
526 	int ret;
527 
528 	/* Driver needs total register number of both valid registers and
529 	 * reserved registers, but the old firmware only returns number
530 	 * of valid registers in device V2. To be compatible with these
531 	 * devices, driver uses a fixed value.
532 	 */
533 	if (hdev->ae_dev->dev_version == HNAE3_DEVICE_VERSION_V2) {
534 		*reg_num = HCLGE_MAC_STATS_MAX_NUM_V1;
535 		return 0;
536 	}
537 
538 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
539 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
540 	if (ret) {
541 		dev_err(&hdev->pdev->dev,
542 			"failed to query mac statistic reg number, ret = %d\n",
543 			ret);
544 		return ret;
545 	}
546 
547 	*reg_num = le32_to_cpu(desc.data[0]);
548 	if (*reg_num == 0) {
549 		dev_err(&hdev->pdev->dev,
550 			"mac statistic reg number is invalid!\n");
551 		return -ENODATA;
552 	}
553 
554 	return 0;
555 }
556 
557 int hclge_mac_update_stats(struct hclge_dev *hdev)
558 {
559 	/* The firmware supports the new statistics acquisition method */
560 	if (hdev->ae_dev->dev_specs.mac_stats_num)
561 		return hclge_mac_update_stats_complete(hdev);
562 	else
563 		return hclge_mac_update_stats_defective(hdev);
564 }
565 
566 static int hclge_comm_get_count(struct hclge_dev *hdev,
567 				const struct hclge_comm_stats_str strs[],
568 				u32 size)
569 {
570 	int count = 0;
571 	u32 i;
572 
573 	for (i = 0; i < size; i++)
574 		if (strs[i].stats_num <= hdev->ae_dev->dev_specs.mac_stats_num)
575 			count++;
576 
577 	return count;
578 }
579 
580 static u64 *hclge_comm_get_stats(struct hclge_dev *hdev,
581 				 const struct hclge_comm_stats_str strs[],
582 				 int size, u64 *data)
583 {
584 	u64 *buf = data;
585 	u32 i;
586 
587 	for (i = 0; i < size; i++) {
588 		if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num)
589 			continue;
590 
591 		*buf = HCLGE_STATS_READ(&hdev->mac_stats, strs[i].offset);
592 		buf++;
593 	}
594 
595 	return buf;
596 }
597 
598 static u8 *hclge_comm_get_strings(struct hclge_dev *hdev, u32 stringset,
599 				  const struct hclge_comm_stats_str strs[],
600 				  int size, u8 *data)
601 {
602 	char *buff = (char *)data;
603 	u32 i;
604 
605 	if (stringset != ETH_SS_STATS)
606 		return buff;
607 
608 	for (i = 0; i < size; i++) {
609 		if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num)
610 			continue;
611 
612 		snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
613 		buff = buff + ETH_GSTRING_LEN;
614 	}
615 
616 	return (u8 *)buff;
617 }
618 
619 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
620 {
621 	struct hnae3_handle *handle;
622 	int status;
623 
624 	handle = &hdev->vport[0].nic;
625 	if (handle->client) {
626 		status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw);
627 		if (status) {
628 			dev_err(&hdev->pdev->dev,
629 				"Update TQPS stats fail, status = %d.\n",
630 				status);
631 		}
632 	}
633 
634 	hclge_update_fec_stats(hdev);
635 
636 	status = hclge_mac_update_stats(hdev);
637 	if (status)
638 		dev_err(&hdev->pdev->dev,
639 			"Update MAC stats fail, status = %d.\n", status);
640 }
641 
642 static void hclge_update_stats(struct hnae3_handle *handle)
643 {
644 	struct hclge_vport *vport = hclge_get_vport(handle);
645 	struct hclge_dev *hdev = vport->back;
646 	int status;
647 
648 	if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
649 		return;
650 
651 	status = hclge_mac_update_stats(hdev);
652 	if (status)
653 		dev_err(&hdev->pdev->dev,
654 			"Update MAC stats fail, status = %d.\n",
655 			status);
656 
657 	status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw);
658 	if (status)
659 		dev_err(&hdev->pdev->dev,
660 			"Update TQPS stats fail, status = %d.\n",
661 			status);
662 
663 	clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
664 }
665 
666 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
667 {
668 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK | \
669 		HNAE3_SUPPORT_PHY_LOOPBACK | \
670 		HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK | \
671 		HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK | \
672 		HNAE3_SUPPORT_EXTERNAL_LOOPBACK)
673 
674 	struct hclge_vport *vport = hclge_get_vport(handle);
675 	struct hclge_dev *hdev = vport->back;
676 	int count = 0;
677 
678 	/* Loopback test support rules:
679 	 * mac: only GE mode support
680 	 * serdes: all mac mode will support include GE/XGE/LGE/CGE
681 	 * phy: only support when phy device exist on board
682 	 */
683 	if (stringset == ETH_SS_TEST) {
684 		/* clear loopback bit flags at first */
685 		handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
686 		if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
687 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
688 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
689 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
690 			count += 1;
691 			handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
692 		}
693 
694 		if (hdev->ae_dev->dev_specs.hilink_version !=
695 		    HCLGE_HILINK_H60) {
696 			count += 1;
697 			handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
698 		}
699 
700 		count += 1;
701 		handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
702 		count += 1;
703 		handle->flags |= HNAE3_SUPPORT_EXTERNAL_LOOPBACK;
704 
705 		if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
706 		     hdev->hw.mac.phydev->drv->set_loopback) ||
707 		    hnae3_dev_phy_imp_supported(hdev)) {
708 			count += 1;
709 			handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
710 		}
711 	} else if (stringset == ETH_SS_STATS) {
712 		count = hclge_comm_get_count(hdev, g_mac_stats_string,
713 					     ARRAY_SIZE(g_mac_stats_string)) +
714 			hclge_comm_tqps_get_sset_count(handle);
715 	}
716 
717 	return count;
718 }
719 
720 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
721 			      u8 *data)
722 {
723 	struct hclge_vport *vport = hclge_get_vport(handle);
724 	struct hclge_dev *hdev = vport->back;
725 	u8 *p = (char *)data;
726 	int size;
727 
728 	if (stringset == ETH_SS_STATS) {
729 		size = ARRAY_SIZE(g_mac_stats_string);
730 		p = hclge_comm_get_strings(hdev, stringset, g_mac_stats_string,
731 					   size, p);
732 		p = hclge_comm_tqps_get_strings(handle, p);
733 	} else if (stringset == ETH_SS_TEST) {
734 		if (handle->flags & HNAE3_SUPPORT_EXTERNAL_LOOPBACK) {
735 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_EXTERNAL],
736 			       ETH_GSTRING_LEN);
737 			p += ETH_GSTRING_LEN;
738 		}
739 		if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
740 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
741 			       ETH_GSTRING_LEN);
742 			p += ETH_GSTRING_LEN;
743 		}
744 		if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
745 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
746 			       ETH_GSTRING_LEN);
747 			p += ETH_GSTRING_LEN;
748 		}
749 		if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
750 			memcpy(p,
751 			       hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
752 			       ETH_GSTRING_LEN);
753 			p += ETH_GSTRING_LEN;
754 		}
755 		if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
756 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
757 			       ETH_GSTRING_LEN);
758 			p += ETH_GSTRING_LEN;
759 		}
760 	}
761 }
762 
763 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
764 {
765 	struct hclge_vport *vport = hclge_get_vport(handle);
766 	struct hclge_dev *hdev = vport->back;
767 	u64 *p;
768 
769 	p = hclge_comm_get_stats(hdev, g_mac_stats_string,
770 				 ARRAY_SIZE(g_mac_stats_string), data);
771 	p = hclge_comm_tqps_get_stats(handle, p);
772 }
773 
774 static void hclge_get_mac_stat(struct hnae3_handle *handle,
775 			       struct hns3_mac_stats *mac_stats)
776 {
777 	struct hclge_vport *vport = hclge_get_vport(handle);
778 	struct hclge_dev *hdev = vport->back;
779 
780 	hclge_update_stats(handle);
781 
782 	mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
783 	mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
784 }
785 
786 static int hclge_parse_func_status(struct hclge_dev *hdev,
787 				   struct hclge_func_status_cmd *status)
788 {
789 #define HCLGE_MAC_ID_MASK	0xF
790 
791 	if (!(status->pf_state & HCLGE_PF_STATE_DONE))
792 		return -EINVAL;
793 
794 	/* Set the pf to main pf */
795 	if (status->pf_state & HCLGE_PF_STATE_MAIN)
796 		hdev->flag |= HCLGE_FLAG_MAIN;
797 	else
798 		hdev->flag &= ~HCLGE_FLAG_MAIN;
799 
800 	hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
801 	return 0;
802 }
803 
804 static int hclge_query_function_status(struct hclge_dev *hdev)
805 {
806 #define HCLGE_QUERY_MAX_CNT	5
807 
808 	struct hclge_func_status_cmd *req;
809 	struct hclge_desc desc;
810 	int timeout = 0;
811 	int ret;
812 
813 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
814 	req = (struct hclge_func_status_cmd *)desc.data;
815 
816 	do {
817 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
818 		if (ret) {
819 			dev_err(&hdev->pdev->dev,
820 				"query function status failed %d.\n", ret);
821 			return ret;
822 		}
823 
824 		/* Check pf reset is done */
825 		if (req->pf_state)
826 			break;
827 		usleep_range(1000, 2000);
828 	} while (timeout++ < HCLGE_QUERY_MAX_CNT);
829 
830 	return hclge_parse_func_status(hdev, req);
831 }
832 
833 static int hclge_query_pf_resource(struct hclge_dev *hdev)
834 {
835 	struct hclge_pf_res_cmd *req;
836 	struct hclge_desc desc;
837 	int ret;
838 
839 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
840 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
841 	if (ret) {
842 		dev_err(&hdev->pdev->dev,
843 			"query pf resource failed %d.\n", ret);
844 		return ret;
845 	}
846 
847 	req = (struct hclge_pf_res_cmd *)desc.data;
848 	hdev->num_tqps = le16_to_cpu(req->tqp_num) +
849 			 le16_to_cpu(req->ext_tqp_num);
850 	hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
851 
852 	if (req->tx_buf_size)
853 		hdev->tx_buf_size =
854 			le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
855 	else
856 		hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
857 
858 	hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
859 
860 	if (req->dv_buf_size)
861 		hdev->dv_buf_size =
862 			le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
863 	else
864 		hdev->dv_buf_size = HCLGE_DEFAULT_DV;
865 
866 	hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
867 
868 	hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
869 	if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
870 		dev_err(&hdev->pdev->dev,
871 			"only %u msi resources available, not enough for pf(min:2).\n",
872 			hdev->num_nic_msi);
873 		return -EINVAL;
874 	}
875 
876 	if (hnae3_dev_roce_supported(hdev)) {
877 		hdev->num_roce_msi =
878 			le16_to_cpu(req->pf_intr_vector_number_roce);
879 
880 		/* PF should have NIC vectors and Roce vectors,
881 		 * NIC vectors are queued before Roce vectors.
882 		 */
883 		hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
884 	} else {
885 		hdev->num_msi = hdev->num_nic_msi;
886 	}
887 
888 	return 0;
889 }
890 
891 static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
892 {
893 	switch (speed_cmd) {
894 	case HCLGE_FW_MAC_SPEED_10M:
895 		*speed = HCLGE_MAC_SPEED_10M;
896 		break;
897 	case HCLGE_FW_MAC_SPEED_100M:
898 		*speed = HCLGE_MAC_SPEED_100M;
899 		break;
900 	case HCLGE_FW_MAC_SPEED_1G:
901 		*speed = HCLGE_MAC_SPEED_1G;
902 		break;
903 	case HCLGE_FW_MAC_SPEED_10G:
904 		*speed = HCLGE_MAC_SPEED_10G;
905 		break;
906 	case HCLGE_FW_MAC_SPEED_25G:
907 		*speed = HCLGE_MAC_SPEED_25G;
908 		break;
909 	case HCLGE_FW_MAC_SPEED_40G:
910 		*speed = HCLGE_MAC_SPEED_40G;
911 		break;
912 	case HCLGE_FW_MAC_SPEED_50G:
913 		*speed = HCLGE_MAC_SPEED_50G;
914 		break;
915 	case HCLGE_FW_MAC_SPEED_100G:
916 		*speed = HCLGE_MAC_SPEED_100G;
917 		break;
918 	case HCLGE_FW_MAC_SPEED_200G:
919 		*speed = HCLGE_MAC_SPEED_200G;
920 		break;
921 	default:
922 		return -EINVAL;
923 	}
924 
925 	return 0;
926 }
927 
928 static const struct hclge_speed_bit_map speed_bit_map[] = {
929 	{HCLGE_MAC_SPEED_10M, HCLGE_SUPPORT_10M_BIT},
930 	{HCLGE_MAC_SPEED_100M, HCLGE_SUPPORT_100M_BIT},
931 	{HCLGE_MAC_SPEED_1G, HCLGE_SUPPORT_1G_BIT},
932 	{HCLGE_MAC_SPEED_10G, HCLGE_SUPPORT_10G_BIT},
933 	{HCLGE_MAC_SPEED_25G, HCLGE_SUPPORT_25G_BIT},
934 	{HCLGE_MAC_SPEED_40G, HCLGE_SUPPORT_40G_BIT},
935 	{HCLGE_MAC_SPEED_50G, HCLGE_SUPPORT_50G_BITS},
936 	{HCLGE_MAC_SPEED_100G, HCLGE_SUPPORT_100G_BITS},
937 	{HCLGE_MAC_SPEED_200G, HCLGE_SUPPORT_200G_BITS},
938 };
939 
940 static int hclge_get_speed_bit(u32 speed, u32 *speed_bit)
941 {
942 	u16 i;
943 
944 	for (i = 0; i < ARRAY_SIZE(speed_bit_map); i++) {
945 		if (speed == speed_bit_map[i].speed) {
946 			*speed_bit = speed_bit_map[i].speed_bit;
947 			return 0;
948 		}
949 	}
950 
951 	return -EINVAL;
952 }
953 
954 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
955 {
956 	struct hclge_vport *vport = hclge_get_vport(handle);
957 	struct hclge_dev *hdev = vport->back;
958 	u32 speed_ability = hdev->hw.mac.speed_ability;
959 	u32 speed_bit = 0;
960 	int ret;
961 
962 	ret = hclge_get_speed_bit(speed, &speed_bit);
963 	if (ret)
964 		return ret;
965 
966 	if (speed_bit & speed_ability)
967 		return 0;
968 
969 	return -EINVAL;
970 }
971 
972 static void hclge_update_fec_support(struct hclge_mac *mac)
973 {
974 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
975 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
976 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT, mac->supported);
977 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
978 
979 	if (mac->fec_ability & BIT(HNAE3_FEC_BASER))
980 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
981 				 mac->supported);
982 	if (mac->fec_ability & BIT(HNAE3_FEC_RS))
983 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
984 				 mac->supported);
985 	if (mac->fec_ability & BIT(HNAE3_FEC_LLRS))
986 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
987 				 mac->supported);
988 	if (mac->fec_ability & BIT(HNAE3_FEC_NONE))
989 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
990 				 mac->supported);
991 }
992 
993 static const struct hclge_link_mode_bmap hclge_sr_link_mode_bmap[] = {
994 	{HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseSR_Full_BIT},
995 	{HCLGE_SUPPORT_25G_BIT, ETHTOOL_LINK_MODE_25000baseSR_Full_BIT},
996 	{HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT},
997 	{HCLGE_SUPPORT_50G_R2_BIT, ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT},
998 	{HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseSR_Full_BIT},
999 	{HCLGE_SUPPORT_100G_R4_BIT, ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT},
1000 	{HCLGE_SUPPORT_100G_R2_BIT, ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT},
1001 	{HCLGE_SUPPORT_200G_R4_EXT_BIT,
1002 	 ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT},
1003 	{HCLGE_SUPPORT_200G_R4_BIT, ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT},
1004 };
1005 
1006 static const struct hclge_link_mode_bmap hclge_lr_link_mode_bmap[] = {
1007 	{HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseLR_Full_BIT},
1008 	{HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT},
1009 	{HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT},
1010 	{HCLGE_SUPPORT_100G_R4_BIT,
1011 	 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT},
1012 	{HCLGE_SUPPORT_100G_R2_BIT,
1013 	 ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT},
1014 	{HCLGE_SUPPORT_200G_R4_EXT_BIT,
1015 	 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT},
1016 	{HCLGE_SUPPORT_200G_R4_BIT,
1017 	 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT},
1018 };
1019 
1020 static const struct hclge_link_mode_bmap hclge_cr_link_mode_bmap[] = {
1021 	{HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseCR_Full_BIT},
1022 	{HCLGE_SUPPORT_25G_BIT, ETHTOOL_LINK_MODE_25000baseCR_Full_BIT},
1023 	{HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT},
1024 	{HCLGE_SUPPORT_50G_R2_BIT, ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT},
1025 	{HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseCR_Full_BIT},
1026 	{HCLGE_SUPPORT_100G_R4_BIT, ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT},
1027 	{HCLGE_SUPPORT_100G_R2_BIT, ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT},
1028 	{HCLGE_SUPPORT_200G_R4_EXT_BIT,
1029 	 ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT},
1030 	{HCLGE_SUPPORT_200G_R4_BIT, ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT},
1031 };
1032 
1033 static const struct hclge_link_mode_bmap hclge_kr_link_mode_bmap[] = {
1034 	{HCLGE_SUPPORT_1G_BIT, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT},
1035 	{HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT},
1036 	{HCLGE_SUPPORT_25G_BIT, ETHTOOL_LINK_MODE_25000baseKR_Full_BIT},
1037 	{HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT},
1038 	{HCLGE_SUPPORT_50G_R2_BIT, ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT},
1039 	{HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseKR_Full_BIT},
1040 	{HCLGE_SUPPORT_100G_R4_BIT, ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT},
1041 	{HCLGE_SUPPORT_100G_R2_BIT, ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT},
1042 	{HCLGE_SUPPORT_200G_R4_EXT_BIT,
1043 	 ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT},
1044 	{HCLGE_SUPPORT_200G_R4_BIT, ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT},
1045 };
1046 
1047 static void hclge_convert_setting_sr(u16 speed_ability,
1048 				     unsigned long *link_mode)
1049 {
1050 	int i;
1051 
1052 	for (i = 0; i < ARRAY_SIZE(hclge_sr_link_mode_bmap); i++) {
1053 		if (speed_ability & hclge_sr_link_mode_bmap[i].support_bit)
1054 			linkmode_set_bit(hclge_sr_link_mode_bmap[i].link_mode,
1055 					 link_mode);
1056 	}
1057 }
1058 
1059 static void hclge_convert_setting_lr(u16 speed_ability,
1060 				     unsigned long *link_mode)
1061 {
1062 	int i;
1063 
1064 	for (i = 0; i < ARRAY_SIZE(hclge_lr_link_mode_bmap); i++) {
1065 		if (speed_ability & hclge_lr_link_mode_bmap[i].support_bit)
1066 			linkmode_set_bit(hclge_lr_link_mode_bmap[i].link_mode,
1067 					 link_mode);
1068 	}
1069 }
1070 
1071 static void hclge_convert_setting_cr(u16 speed_ability,
1072 				     unsigned long *link_mode)
1073 {
1074 	int i;
1075 
1076 	for (i = 0; i < ARRAY_SIZE(hclge_cr_link_mode_bmap); i++) {
1077 		if (speed_ability & hclge_cr_link_mode_bmap[i].support_bit)
1078 			linkmode_set_bit(hclge_cr_link_mode_bmap[i].link_mode,
1079 					 link_mode);
1080 	}
1081 }
1082 
1083 static void hclge_convert_setting_kr(u16 speed_ability,
1084 				     unsigned long *link_mode)
1085 {
1086 	int i;
1087 
1088 	for (i = 0; i < ARRAY_SIZE(hclge_kr_link_mode_bmap); i++) {
1089 		if (speed_ability & hclge_kr_link_mode_bmap[i].support_bit)
1090 			linkmode_set_bit(hclge_kr_link_mode_bmap[i].link_mode,
1091 					 link_mode);
1092 	}
1093 }
1094 
1095 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1096 {
1097 	/* If firmware has reported fec_ability, don't need to convert by speed */
1098 	if (mac->fec_ability)
1099 		goto out;
1100 
1101 	switch (mac->speed) {
1102 	case HCLGE_MAC_SPEED_10G:
1103 	case HCLGE_MAC_SPEED_40G:
1104 		mac->fec_ability = BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO) |
1105 				   BIT(HNAE3_FEC_NONE);
1106 		break;
1107 	case HCLGE_MAC_SPEED_25G:
1108 	case HCLGE_MAC_SPEED_50G:
1109 		mac->fec_ability = BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1110 				   BIT(HNAE3_FEC_AUTO) | BIT(HNAE3_FEC_NONE);
1111 		break;
1112 	case HCLGE_MAC_SPEED_100G:
1113 		mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO) |
1114 				   BIT(HNAE3_FEC_NONE);
1115 		break;
1116 	case HCLGE_MAC_SPEED_200G:
1117 		mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO) |
1118 				   BIT(HNAE3_FEC_LLRS);
1119 		break;
1120 	default:
1121 		mac->fec_ability = 0;
1122 		break;
1123 	}
1124 
1125 out:
1126 	hclge_update_fec_support(mac);
1127 }
1128 
1129 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1130 					u16 speed_ability)
1131 {
1132 	struct hclge_mac *mac = &hdev->hw.mac;
1133 
1134 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1135 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1136 				 mac->supported);
1137 
1138 	hclge_convert_setting_sr(speed_ability, mac->supported);
1139 	hclge_convert_setting_lr(speed_ability, mac->supported);
1140 	hclge_convert_setting_cr(speed_ability, mac->supported);
1141 	if (hnae3_dev_fec_supported(hdev))
1142 		hclge_convert_setting_fec(mac);
1143 
1144 	if (hnae3_dev_pause_supported(hdev))
1145 		linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1146 
1147 	linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1148 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1149 }
1150 
1151 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1152 					    u16 speed_ability)
1153 {
1154 	struct hclge_mac *mac = &hdev->hw.mac;
1155 
1156 	hclge_convert_setting_kr(speed_ability, mac->supported);
1157 	if (hnae3_dev_fec_supported(hdev))
1158 		hclge_convert_setting_fec(mac);
1159 
1160 	if (hnae3_dev_pause_supported(hdev))
1161 		linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1162 
1163 	linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1164 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1165 }
1166 
1167 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1168 					 u16 speed_ability)
1169 {
1170 	unsigned long *supported = hdev->hw.mac.supported;
1171 
1172 	/* default to support all speed for GE port */
1173 	if (!speed_ability)
1174 		speed_ability = HCLGE_SUPPORT_GE;
1175 
1176 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1177 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1178 				 supported);
1179 
1180 	if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1181 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1182 				 supported);
1183 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1184 				 supported);
1185 	}
1186 
1187 	if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1188 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1189 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1190 	}
1191 
1192 	if (hnae3_dev_pause_supported(hdev)) {
1193 		linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1194 		linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1195 	}
1196 
1197 	linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1198 	linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1199 }
1200 
1201 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1202 {
1203 	u8 media_type = hdev->hw.mac.media_type;
1204 
1205 	if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1206 		hclge_parse_fiber_link_mode(hdev, speed_ability);
1207 	else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1208 		hclge_parse_copper_link_mode(hdev, speed_ability);
1209 	else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1210 		hclge_parse_backplane_link_mode(hdev, speed_ability);
1211 }
1212 
1213 static u32 hclge_get_max_speed(u16 speed_ability)
1214 {
1215 	if (speed_ability & HCLGE_SUPPORT_200G_BITS)
1216 		return HCLGE_MAC_SPEED_200G;
1217 
1218 	if (speed_ability & HCLGE_SUPPORT_100G_BITS)
1219 		return HCLGE_MAC_SPEED_100G;
1220 
1221 	if (speed_ability & HCLGE_SUPPORT_50G_BITS)
1222 		return HCLGE_MAC_SPEED_50G;
1223 
1224 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1225 		return HCLGE_MAC_SPEED_40G;
1226 
1227 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1228 		return HCLGE_MAC_SPEED_25G;
1229 
1230 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1231 		return HCLGE_MAC_SPEED_10G;
1232 
1233 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1234 		return HCLGE_MAC_SPEED_1G;
1235 
1236 	if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1237 		return HCLGE_MAC_SPEED_100M;
1238 
1239 	if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1240 		return HCLGE_MAC_SPEED_10M;
1241 
1242 	return HCLGE_MAC_SPEED_1G;
1243 }
1244 
1245 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1246 {
1247 #define HCLGE_TX_SPARE_SIZE_UNIT		4096
1248 #define SPEED_ABILITY_EXT_SHIFT			8
1249 
1250 	struct hclge_cfg_param_cmd *req;
1251 	u64 mac_addr_tmp_high;
1252 	u16 speed_ability_ext;
1253 	u64 mac_addr_tmp;
1254 	unsigned int i;
1255 
1256 	req = (struct hclge_cfg_param_cmd *)desc[0].data;
1257 
1258 	/* get the configuration */
1259 	cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1260 				      HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1261 	cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1262 					    HCLGE_CFG_TQP_DESC_N_M,
1263 					    HCLGE_CFG_TQP_DESC_N_S);
1264 
1265 	cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1266 					HCLGE_CFG_PHY_ADDR_M,
1267 					HCLGE_CFG_PHY_ADDR_S);
1268 	cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1269 					  HCLGE_CFG_MEDIA_TP_M,
1270 					  HCLGE_CFG_MEDIA_TP_S);
1271 	cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1272 					  HCLGE_CFG_RX_BUF_LEN_M,
1273 					  HCLGE_CFG_RX_BUF_LEN_S);
1274 	/* get mac_address */
1275 	mac_addr_tmp = __le32_to_cpu(req->param[2]);
1276 	mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1277 					    HCLGE_CFG_MAC_ADDR_H_M,
1278 					    HCLGE_CFG_MAC_ADDR_H_S);
1279 
1280 	mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1281 
1282 	cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1283 					     HCLGE_CFG_DEFAULT_SPEED_M,
1284 					     HCLGE_CFG_DEFAULT_SPEED_S);
1285 	cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1286 					       HCLGE_CFG_RSS_SIZE_M,
1287 					       HCLGE_CFG_RSS_SIZE_S);
1288 
1289 	for (i = 0; i < ETH_ALEN; i++)
1290 		cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1291 
1292 	req = (struct hclge_cfg_param_cmd *)desc[1].data;
1293 	cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1294 
1295 	cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1296 					     HCLGE_CFG_SPEED_ABILITY_M,
1297 					     HCLGE_CFG_SPEED_ABILITY_S);
1298 	speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1299 					    HCLGE_CFG_SPEED_ABILITY_EXT_M,
1300 					    HCLGE_CFG_SPEED_ABILITY_EXT_S);
1301 	cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1302 
1303 	cfg->vlan_fliter_cap = hnae3_get_field(__le32_to_cpu(req->param[1]),
1304 					       HCLGE_CFG_VLAN_FLTR_CAP_M,
1305 					       HCLGE_CFG_VLAN_FLTR_CAP_S);
1306 
1307 	cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1308 					 HCLGE_CFG_UMV_TBL_SPACE_M,
1309 					 HCLGE_CFG_UMV_TBL_SPACE_S);
1310 
1311 	cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
1312 					       HCLGE_CFG_PF_RSS_SIZE_M,
1313 					       HCLGE_CFG_PF_RSS_SIZE_S);
1314 
1315 	/* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a
1316 	 * power of 2, instead of reading out directly. This would
1317 	 * be more flexible for future changes and expansions.
1318 	 * When VF max  rss size field is HCLGE_CFG_RSS_SIZE_S,
1319 	 * it does not make sense if PF's field is 0. In this case, PF and VF
1320 	 * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S.
1321 	 */
1322 	cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
1323 			       1U << cfg->pf_rss_size_max :
1324 			       cfg->vf_rss_size_max;
1325 
1326 	/* The unit of the tx spare buffer size queried from configuration
1327 	 * file is HCLGE_TX_SPARE_SIZE_UNIT(4096) bytes, so a conversion is
1328 	 * needed here.
1329 	 */
1330 	cfg->tx_spare_buf_size = hnae3_get_field(__le32_to_cpu(req->param[2]),
1331 						 HCLGE_CFG_TX_SPARE_BUF_SIZE_M,
1332 						 HCLGE_CFG_TX_SPARE_BUF_SIZE_S);
1333 	cfg->tx_spare_buf_size *= HCLGE_TX_SPARE_SIZE_UNIT;
1334 }
1335 
1336 /* hclge_get_cfg: query the static parameter from flash
1337  * @hdev: pointer to struct hclge_dev
1338  * @hcfg: the config structure to be getted
1339  */
1340 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1341 {
1342 	struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1343 	struct hclge_cfg_param_cmd *req;
1344 	unsigned int i;
1345 	int ret;
1346 
1347 	for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1348 		u32 offset = 0;
1349 
1350 		req = (struct hclge_cfg_param_cmd *)desc[i].data;
1351 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1352 					   true);
1353 		hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1354 				HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1355 		/* Len should be united by 4 bytes when send to hardware */
1356 		hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1357 				HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1358 		req->offset = cpu_to_le32(offset);
1359 	}
1360 
1361 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1362 	if (ret) {
1363 		dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1364 		return ret;
1365 	}
1366 
1367 	hclge_parse_cfg(hcfg, desc);
1368 
1369 	return 0;
1370 }
1371 
1372 static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1373 {
1374 #define HCLGE_MAX_NON_TSO_BD_NUM			8U
1375 
1376 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1377 
1378 	ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1379 	ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1380 	ae_dev->dev_specs.rss_key_size = HCLGE_COMM_RSS_KEY_SIZE;
1381 	ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1382 	ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
1383 	ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
1384 	ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
1385 	ae_dev->dev_specs.umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1386 	ae_dev->dev_specs.tnl_num = 0;
1387 }
1388 
1389 static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1390 				  struct hclge_desc *desc)
1391 {
1392 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1393 	struct hclge_dev_specs_0_cmd *req0;
1394 	struct hclge_dev_specs_1_cmd *req1;
1395 
1396 	req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1397 	req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
1398 
1399 	ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1400 	ae_dev->dev_specs.rss_ind_tbl_size =
1401 		le16_to_cpu(req0->rss_ind_tbl_size);
1402 	ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
1403 	ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1404 	ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1405 	ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
1406 	ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
1407 	ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
1408 	ae_dev->dev_specs.umv_size = le16_to_cpu(req1->umv_size);
1409 	ae_dev->dev_specs.mc_mac_size = le16_to_cpu(req1->mc_mac_size);
1410 	ae_dev->dev_specs.tnl_num = req1->tnl_num;
1411 	ae_dev->dev_specs.hilink_version = req1->hilink_version;
1412 }
1413 
1414 static void hclge_check_dev_specs(struct hclge_dev *hdev)
1415 {
1416 	struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1417 
1418 	if (!dev_specs->max_non_tso_bd_num)
1419 		dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1420 	if (!dev_specs->rss_ind_tbl_size)
1421 		dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1422 	if (!dev_specs->rss_key_size)
1423 		dev_specs->rss_key_size = HCLGE_COMM_RSS_KEY_SIZE;
1424 	if (!dev_specs->max_tm_rate)
1425 		dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1426 	if (!dev_specs->max_qset_num)
1427 		dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM;
1428 	if (!dev_specs->max_int_gl)
1429 		dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
1430 	if (!dev_specs->max_frm_size)
1431 		dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
1432 	if (!dev_specs->umv_size)
1433 		dev_specs->umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1434 }
1435 
1436 static int hclge_query_mac_stats_num(struct hclge_dev *hdev)
1437 {
1438 	u32 reg_num = 0;
1439 	int ret;
1440 
1441 	ret = hclge_mac_query_reg_num(hdev, &reg_num);
1442 	if (ret && ret != -EOPNOTSUPP)
1443 		return ret;
1444 
1445 	hdev->ae_dev->dev_specs.mac_stats_num = reg_num;
1446 	return 0;
1447 }
1448 
1449 static int hclge_query_dev_specs(struct hclge_dev *hdev)
1450 {
1451 	struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1452 	int ret;
1453 	int i;
1454 
1455 	ret = hclge_query_mac_stats_num(hdev);
1456 	if (ret)
1457 		return ret;
1458 
1459 	/* set default specifications as devices lower than version V3 do not
1460 	 * support querying specifications from firmware.
1461 	 */
1462 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1463 		hclge_set_default_dev_specs(hdev);
1464 		return 0;
1465 	}
1466 
1467 	for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1468 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1469 					   true);
1470 		desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
1471 	}
1472 	hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1473 
1474 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1475 	if (ret)
1476 		return ret;
1477 
1478 	hclge_parse_dev_specs(hdev, desc);
1479 	hclge_check_dev_specs(hdev);
1480 
1481 	return 0;
1482 }
1483 
1484 static int hclge_get_cap(struct hclge_dev *hdev)
1485 {
1486 	int ret;
1487 
1488 	ret = hclge_query_function_status(hdev);
1489 	if (ret) {
1490 		dev_err(&hdev->pdev->dev,
1491 			"query function status error %d.\n", ret);
1492 		return ret;
1493 	}
1494 
1495 	/* get pf resource */
1496 	return hclge_query_pf_resource(hdev);
1497 }
1498 
1499 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1500 {
1501 #define HCLGE_MIN_TX_DESC	64
1502 #define HCLGE_MIN_RX_DESC	64
1503 
1504 	if (!is_kdump_kernel())
1505 		return;
1506 
1507 	dev_info(&hdev->pdev->dev,
1508 		 "Running kdump kernel. Using minimal resources\n");
1509 
1510 	/* minimal queue pairs equals to the number of vports */
1511 	hdev->num_tqps = hdev->num_req_vfs + 1;
1512 	hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1513 	hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1514 }
1515 
1516 static void hclge_init_tc_config(struct hclge_dev *hdev)
1517 {
1518 	unsigned int i;
1519 
1520 	if (hdev->tc_max > HNAE3_MAX_TC ||
1521 	    hdev->tc_max < 1) {
1522 		dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1523 			 hdev->tc_max);
1524 		hdev->tc_max = 1;
1525 	}
1526 
1527 	/* Dev does not support DCB */
1528 	if (!hnae3_dev_dcb_supported(hdev)) {
1529 		hdev->tc_max = 1;
1530 		hdev->pfc_max = 0;
1531 	} else {
1532 		hdev->pfc_max = hdev->tc_max;
1533 	}
1534 
1535 	hdev->tm_info.num_tc = 1;
1536 
1537 	/* Currently not support uncontiuous tc */
1538 	for (i = 0; i < hdev->tm_info.num_tc; i++)
1539 		hnae3_set_bit(hdev->hw_tc_map, i, 1);
1540 
1541 	hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1542 }
1543 
1544 static int hclge_configure(struct hclge_dev *hdev)
1545 {
1546 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1547 	struct hclge_cfg cfg;
1548 	int ret;
1549 
1550 	ret = hclge_get_cfg(hdev, &cfg);
1551 	if (ret)
1552 		return ret;
1553 
1554 	hdev->base_tqp_pid = 0;
1555 	hdev->vf_rss_size_max = cfg.vf_rss_size_max;
1556 	hdev->pf_rss_size_max = cfg.pf_rss_size_max;
1557 	hdev->rx_buf_len = cfg.rx_buf_len;
1558 	ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1559 	hdev->hw.mac.media_type = cfg.media_type;
1560 	hdev->hw.mac.phy_addr = cfg.phy_addr;
1561 	hdev->num_tx_desc = cfg.tqp_desc_num;
1562 	hdev->num_rx_desc = cfg.tqp_desc_num;
1563 	hdev->tm_info.num_pg = 1;
1564 	hdev->tc_max = cfg.tc_num;
1565 	hdev->tm_info.hw_pfc_map = 0;
1566 	if (cfg.umv_space)
1567 		hdev->wanted_umv_size = cfg.umv_space;
1568 	else
1569 		hdev->wanted_umv_size = hdev->ae_dev->dev_specs.umv_size;
1570 	hdev->tx_spare_buf_size = cfg.tx_spare_buf_size;
1571 	hdev->gro_en = true;
1572 	if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF)
1573 		set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
1574 
1575 	if (hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
1576 		hdev->fd_en = true;
1577 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1578 	}
1579 
1580 	ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1581 	if (ret) {
1582 		dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1583 			cfg.default_speed, ret);
1584 		return ret;
1585 	}
1586 	hdev->hw.mac.req_speed = hdev->hw.mac.speed;
1587 	hdev->hw.mac.req_autoneg = AUTONEG_ENABLE;
1588 	hdev->hw.mac.req_duplex = DUPLEX_FULL;
1589 
1590 	hclge_parse_link_mode(hdev, cfg.speed_ability);
1591 
1592 	hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1593 
1594 	hclge_init_tc_config(hdev);
1595 	hclge_init_kdump_kernel_config(hdev);
1596 
1597 	return ret;
1598 }
1599 
1600 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1601 			    u16 tso_mss_max)
1602 {
1603 	struct hclge_cfg_tso_status_cmd *req;
1604 	struct hclge_desc desc;
1605 
1606 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1607 
1608 	req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1609 	req->tso_mss_min = cpu_to_le16(tso_mss_min);
1610 	req->tso_mss_max = cpu_to_le16(tso_mss_max);
1611 
1612 	return hclge_cmd_send(&hdev->hw, &desc, 1);
1613 }
1614 
1615 static int hclge_config_gro(struct hclge_dev *hdev)
1616 {
1617 	struct hclge_cfg_gro_status_cmd *req;
1618 	struct hclge_desc desc;
1619 	int ret;
1620 
1621 	if (!hnae3_ae_dev_gro_supported(hdev->ae_dev))
1622 		return 0;
1623 
1624 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1625 	req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1626 
1627 	req->gro_en = hdev->gro_en ? 1 : 0;
1628 
1629 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1630 	if (ret)
1631 		dev_err(&hdev->pdev->dev,
1632 			"GRO hardware config cmd failed, ret = %d\n", ret);
1633 
1634 	return ret;
1635 }
1636 
1637 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1638 {
1639 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1640 	struct hclge_comm_tqp *tqp;
1641 	int i;
1642 
1643 	hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1644 				  sizeof(struct hclge_comm_tqp), GFP_KERNEL);
1645 	if (!hdev->htqp)
1646 		return -ENOMEM;
1647 
1648 	tqp = hdev->htqp;
1649 
1650 	for (i = 0; i < hdev->num_tqps; i++) {
1651 		tqp->dev = &hdev->pdev->dev;
1652 		tqp->index = i;
1653 
1654 		tqp->q.ae_algo = &ae_algo;
1655 		tqp->q.buf_size = hdev->rx_buf_len;
1656 		tqp->q.tx_desc_num = hdev->num_tx_desc;
1657 		tqp->q.rx_desc_num = hdev->num_rx_desc;
1658 
1659 		/* need an extended offset to configure queues >=
1660 		 * HCLGE_TQP_MAX_SIZE_DEV_V2
1661 		 */
1662 		if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
1663 			tqp->q.io_base = hdev->hw.hw.io_base +
1664 					 HCLGE_TQP_REG_OFFSET +
1665 					 i * HCLGE_TQP_REG_SIZE;
1666 		else
1667 			tqp->q.io_base = hdev->hw.hw.io_base +
1668 					 HCLGE_TQP_REG_OFFSET +
1669 					 HCLGE_TQP_EXT_REG_OFFSET +
1670 					 (i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
1671 					 HCLGE_TQP_REG_SIZE;
1672 
1673 		/* when device supports tx push and has device memory,
1674 		 * the queue can execute push mode or doorbell mode on
1675 		 * device memory.
1676 		 */
1677 		if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps))
1678 			tqp->q.mem_base = hdev->hw.hw.mem_base +
1679 					  HCLGE_TQP_MEM_OFFSET(hdev, i);
1680 
1681 		tqp++;
1682 	}
1683 
1684 	return 0;
1685 }
1686 
1687 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1688 				  u16 tqp_pid, u16 tqp_vid, bool is_pf)
1689 {
1690 	struct hclge_tqp_map_cmd *req;
1691 	struct hclge_desc desc;
1692 	int ret;
1693 
1694 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1695 
1696 	req = (struct hclge_tqp_map_cmd *)desc.data;
1697 	req->tqp_id = cpu_to_le16(tqp_pid);
1698 	req->tqp_vf = func_id;
1699 	req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1700 	if (!is_pf)
1701 		req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1702 	req->tqp_vid = cpu_to_le16(tqp_vid);
1703 
1704 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1705 	if (ret)
1706 		dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1707 
1708 	return ret;
1709 }
1710 
1711 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1712 {
1713 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1714 	struct hclge_dev *hdev = vport->back;
1715 	int i, alloced;
1716 
1717 	for (i = 0, alloced = 0; i < hdev->num_tqps &&
1718 	     alloced < num_tqps; i++) {
1719 		if (!hdev->htqp[i].alloced) {
1720 			hdev->htqp[i].q.handle = &vport->nic;
1721 			hdev->htqp[i].q.tqp_index = alloced;
1722 			hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1723 			hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1724 			kinfo->tqp[alloced] = &hdev->htqp[i].q;
1725 			hdev->htqp[i].alloced = true;
1726 			alloced++;
1727 		}
1728 	}
1729 	vport->alloc_tqps = alloced;
1730 	kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max,
1731 				vport->alloc_tqps / hdev->tm_info.num_tc);
1732 
1733 	/* ensure one to one mapping between irq and queue at default */
1734 	kinfo->rss_size = min_t(u16, kinfo->rss_size,
1735 				(hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1736 
1737 	return 0;
1738 }
1739 
1740 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1741 			    u16 num_tx_desc, u16 num_rx_desc)
1742 
1743 {
1744 	struct hnae3_handle *nic = &vport->nic;
1745 	struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1746 	struct hclge_dev *hdev = vport->back;
1747 	int ret;
1748 
1749 	kinfo->num_tx_desc = num_tx_desc;
1750 	kinfo->num_rx_desc = num_rx_desc;
1751 
1752 	kinfo->rx_buf_len = hdev->rx_buf_len;
1753 	kinfo->tx_spare_buf_size = hdev->tx_spare_buf_size;
1754 
1755 	kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1756 				  sizeof(struct hnae3_queue *), GFP_KERNEL);
1757 	if (!kinfo->tqp)
1758 		return -ENOMEM;
1759 
1760 	ret = hclge_assign_tqp(vport, num_tqps);
1761 	if (ret)
1762 		dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1763 
1764 	return ret;
1765 }
1766 
1767 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1768 				  struct hclge_vport *vport)
1769 {
1770 	struct hnae3_handle *nic = &vport->nic;
1771 	struct hnae3_knic_private_info *kinfo;
1772 	u16 i;
1773 
1774 	kinfo = &nic->kinfo;
1775 	for (i = 0; i < vport->alloc_tqps; i++) {
1776 		struct hclge_comm_tqp *q =
1777 			container_of(kinfo->tqp[i], struct hclge_comm_tqp, q);
1778 		bool is_pf;
1779 		int ret;
1780 
1781 		is_pf = !(vport->vport_id);
1782 		ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1783 					     i, is_pf);
1784 		if (ret)
1785 			return ret;
1786 	}
1787 
1788 	return 0;
1789 }
1790 
1791 static int hclge_map_tqp(struct hclge_dev *hdev)
1792 {
1793 	struct hclge_vport *vport = hdev->vport;
1794 	u16 i, num_vport;
1795 
1796 	num_vport = hdev->num_req_vfs + 1;
1797 	for (i = 0; i < num_vport; i++) {
1798 		int ret;
1799 
1800 		ret = hclge_map_tqp_to_vport(hdev, vport);
1801 		if (ret)
1802 			return ret;
1803 
1804 		vport++;
1805 	}
1806 
1807 	return 0;
1808 }
1809 
1810 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1811 {
1812 	struct hnae3_handle *nic = &vport->nic;
1813 	struct hclge_dev *hdev = vport->back;
1814 	int ret;
1815 
1816 	nic->pdev = hdev->pdev;
1817 	nic->ae_algo = &ae_algo;
1818 	bitmap_copy(nic->numa_node_mask.bits, hdev->numa_node_mask.bits,
1819 		    MAX_NUMNODES);
1820 	nic->kinfo.io_base = hdev->hw.hw.io_base;
1821 
1822 	ret = hclge_knic_setup(vport, num_tqps,
1823 			       hdev->num_tx_desc, hdev->num_rx_desc);
1824 	if (ret)
1825 		dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1826 
1827 	return ret;
1828 }
1829 
1830 static int hclge_alloc_vport(struct hclge_dev *hdev)
1831 {
1832 	struct pci_dev *pdev = hdev->pdev;
1833 	struct hclge_vport *vport;
1834 	u32 tqp_main_vport;
1835 	u32 tqp_per_vport;
1836 	int num_vport, i;
1837 	int ret;
1838 
1839 	/* We need to alloc a vport for main NIC of PF */
1840 	num_vport = hdev->num_req_vfs + 1;
1841 
1842 	if (hdev->num_tqps < num_vport) {
1843 		dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1844 			hdev->num_tqps, num_vport);
1845 		return -EINVAL;
1846 	}
1847 
1848 	/* Alloc the same number of TQPs for every vport */
1849 	tqp_per_vport = hdev->num_tqps / num_vport;
1850 	tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1851 
1852 	vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1853 			     GFP_KERNEL);
1854 	if (!vport)
1855 		return -ENOMEM;
1856 
1857 	hdev->vport = vport;
1858 	hdev->num_alloc_vport = num_vport;
1859 
1860 	if (IS_ENABLED(CONFIG_PCI_IOV))
1861 		hdev->num_alloc_vfs = hdev->num_req_vfs;
1862 
1863 	for (i = 0; i < num_vport; i++) {
1864 		vport->back = hdev;
1865 		vport->vport_id = i;
1866 		vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1867 		vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1868 		vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1869 		vport->port_base_vlan_cfg.tbl_sta = true;
1870 		vport->rxvlan_cfg.rx_vlan_offload_en = true;
1871 		vport->req_vlan_fltr_en = true;
1872 		INIT_LIST_HEAD(&vport->vlan_list);
1873 		INIT_LIST_HEAD(&vport->uc_mac_list);
1874 		INIT_LIST_HEAD(&vport->mc_mac_list);
1875 		spin_lock_init(&vport->mac_list_lock);
1876 
1877 		if (i == 0)
1878 			ret = hclge_vport_setup(vport, tqp_main_vport);
1879 		else
1880 			ret = hclge_vport_setup(vport, tqp_per_vport);
1881 		if (ret) {
1882 			dev_err(&pdev->dev,
1883 				"vport setup failed for vport %d, %d\n",
1884 				i, ret);
1885 			return ret;
1886 		}
1887 
1888 		vport++;
1889 	}
1890 
1891 	return 0;
1892 }
1893 
1894 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1895 				    struct hclge_pkt_buf_alloc *buf_alloc)
1896 {
1897 /* TX buffer size is unit by 128 byte */
1898 #define HCLGE_BUF_SIZE_UNIT_SHIFT	7
1899 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK	BIT(15)
1900 	struct hclge_tx_buff_alloc_cmd *req;
1901 	struct hclge_desc desc;
1902 	int ret;
1903 	u8 i;
1904 
1905 	req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1906 
1907 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1908 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1909 		u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1910 
1911 		req->tx_pkt_buff[i] =
1912 			cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1913 				     HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1914 	}
1915 
1916 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1917 	if (ret)
1918 		dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1919 			ret);
1920 
1921 	return ret;
1922 }
1923 
1924 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1925 				 struct hclge_pkt_buf_alloc *buf_alloc)
1926 {
1927 	int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1928 
1929 	if (ret)
1930 		dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1931 
1932 	return ret;
1933 }
1934 
1935 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1936 {
1937 	unsigned int i;
1938 	u32 cnt = 0;
1939 
1940 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1941 		if (hdev->hw_tc_map & BIT(i))
1942 			cnt++;
1943 	return cnt;
1944 }
1945 
1946 /* Get the number of pfc enabled TCs, which have private buffer */
1947 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1948 				  struct hclge_pkt_buf_alloc *buf_alloc)
1949 {
1950 	struct hclge_priv_buf *priv;
1951 	unsigned int i;
1952 	int cnt = 0;
1953 
1954 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1955 		priv = &buf_alloc->priv_buf[i];
1956 		if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1957 		    priv->enable)
1958 			cnt++;
1959 	}
1960 
1961 	return cnt;
1962 }
1963 
1964 /* Get the number of pfc disabled TCs, which have private buffer */
1965 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1966 				     struct hclge_pkt_buf_alloc *buf_alloc)
1967 {
1968 	struct hclge_priv_buf *priv;
1969 	unsigned int i;
1970 	int cnt = 0;
1971 
1972 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1973 		priv = &buf_alloc->priv_buf[i];
1974 		if (hdev->hw_tc_map & BIT(i) &&
1975 		    !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1976 		    priv->enable)
1977 			cnt++;
1978 	}
1979 
1980 	return cnt;
1981 }
1982 
1983 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1984 {
1985 	struct hclge_priv_buf *priv;
1986 	u32 rx_priv = 0;
1987 	int i;
1988 
1989 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1990 		priv = &buf_alloc->priv_buf[i];
1991 		if (priv->enable)
1992 			rx_priv += priv->buf_size;
1993 	}
1994 	return rx_priv;
1995 }
1996 
1997 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1998 {
1999 	u32 i, total_tx_size = 0;
2000 
2001 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
2002 		total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
2003 
2004 	return total_tx_size;
2005 }
2006 
2007 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
2008 				struct hclge_pkt_buf_alloc *buf_alloc,
2009 				u32 rx_all)
2010 {
2011 	u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
2012 	u32 tc_num = hclge_get_tc_num(hdev);
2013 	u32 shared_buf, aligned_mps;
2014 	u32 rx_priv;
2015 	int i;
2016 
2017 	aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2018 
2019 	if (hnae3_dev_dcb_supported(hdev))
2020 		shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
2021 					hdev->dv_buf_size;
2022 	else
2023 		shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
2024 					+ hdev->dv_buf_size;
2025 
2026 	shared_buf_tc = tc_num * aligned_mps + aligned_mps;
2027 	shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
2028 			     HCLGE_BUF_SIZE_UNIT);
2029 
2030 	rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
2031 	if (rx_all < rx_priv + shared_std)
2032 		return false;
2033 
2034 	shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
2035 	buf_alloc->s_buf.buf_size = shared_buf;
2036 	if (hnae3_dev_dcb_supported(hdev)) {
2037 		buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
2038 		buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
2039 			- roundup(aligned_mps / HCLGE_BUF_DIV_BY,
2040 				  HCLGE_BUF_SIZE_UNIT);
2041 	} else {
2042 		buf_alloc->s_buf.self.high = aligned_mps +
2043 						HCLGE_NON_DCB_ADDITIONAL_BUF;
2044 		buf_alloc->s_buf.self.low = aligned_mps;
2045 	}
2046 
2047 	if (hnae3_dev_dcb_supported(hdev)) {
2048 		hi_thrd = shared_buf - hdev->dv_buf_size;
2049 
2050 		if (tc_num <= NEED_RESERVE_TC_NUM)
2051 			hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
2052 					/ BUF_MAX_PERCENT;
2053 
2054 		if (tc_num)
2055 			hi_thrd = hi_thrd / tc_num;
2056 
2057 		hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
2058 		hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
2059 		lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
2060 	} else {
2061 		hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
2062 		lo_thrd = aligned_mps;
2063 	}
2064 
2065 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2066 		buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
2067 		buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
2068 	}
2069 
2070 	return true;
2071 }
2072 
2073 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
2074 				struct hclge_pkt_buf_alloc *buf_alloc)
2075 {
2076 	u32 i, total_size;
2077 
2078 	total_size = hdev->pkt_buf_size;
2079 
2080 	/* alloc tx buffer for all enabled tc */
2081 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2082 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2083 
2084 		if (hdev->hw_tc_map & BIT(i)) {
2085 			if (total_size < hdev->tx_buf_size)
2086 				return -ENOMEM;
2087 
2088 			priv->tx_buf_size = hdev->tx_buf_size;
2089 		} else {
2090 			priv->tx_buf_size = 0;
2091 		}
2092 
2093 		total_size -= priv->tx_buf_size;
2094 	}
2095 
2096 	return 0;
2097 }
2098 
2099 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2100 				  struct hclge_pkt_buf_alloc *buf_alloc)
2101 {
2102 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2103 	u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2104 	unsigned int i;
2105 
2106 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2107 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2108 
2109 		priv->enable = 0;
2110 		priv->wl.low = 0;
2111 		priv->wl.high = 0;
2112 		priv->buf_size = 0;
2113 
2114 		if (!(hdev->hw_tc_map & BIT(i)))
2115 			continue;
2116 
2117 		priv->enable = 1;
2118 
2119 		if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2120 			priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2121 			priv->wl.high = roundup(priv->wl.low + aligned_mps,
2122 						HCLGE_BUF_SIZE_UNIT);
2123 		} else {
2124 			priv->wl.low = 0;
2125 			priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2126 					aligned_mps;
2127 		}
2128 
2129 		priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2130 	}
2131 
2132 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2133 }
2134 
2135 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2136 					  struct hclge_pkt_buf_alloc *buf_alloc)
2137 {
2138 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2139 	int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2140 	int i;
2141 
2142 	/* let the last to be cleared first */
2143 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2144 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2145 		unsigned int mask = BIT((unsigned int)i);
2146 
2147 		if (hdev->hw_tc_map & mask &&
2148 		    !(hdev->tm_info.hw_pfc_map & mask)) {
2149 			/* Clear the no pfc TC private buffer */
2150 			priv->wl.low = 0;
2151 			priv->wl.high = 0;
2152 			priv->buf_size = 0;
2153 			priv->enable = 0;
2154 			no_pfc_priv_num--;
2155 		}
2156 
2157 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2158 		    no_pfc_priv_num == 0)
2159 			break;
2160 	}
2161 
2162 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2163 }
2164 
2165 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2166 					struct hclge_pkt_buf_alloc *buf_alloc)
2167 {
2168 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2169 	int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2170 	int i;
2171 
2172 	/* let the last to be cleared first */
2173 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2174 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2175 		unsigned int mask = BIT((unsigned int)i);
2176 
2177 		if (hdev->hw_tc_map & mask &&
2178 		    hdev->tm_info.hw_pfc_map & mask) {
2179 			/* Reduce the number of pfc TC with private buffer */
2180 			priv->wl.low = 0;
2181 			priv->enable = 0;
2182 			priv->wl.high = 0;
2183 			priv->buf_size = 0;
2184 			pfc_priv_num--;
2185 		}
2186 
2187 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2188 		    pfc_priv_num == 0)
2189 			break;
2190 	}
2191 
2192 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2193 }
2194 
2195 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2196 				      struct hclge_pkt_buf_alloc *buf_alloc)
2197 {
2198 #define COMPENSATE_BUFFER	0x3C00
2199 #define COMPENSATE_HALF_MPS_NUM	5
2200 #define PRIV_WL_GAP		0x1800
2201 
2202 	u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2203 	u32 tc_num = hclge_get_tc_num(hdev);
2204 	u32 half_mps = hdev->mps >> 1;
2205 	u32 min_rx_priv;
2206 	unsigned int i;
2207 
2208 	if (tc_num)
2209 		rx_priv = rx_priv / tc_num;
2210 
2211 	if (tc_num <= NEED_RESERVE_TC_NUM)
2212 		rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2213 
2214 	min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2215 			COMPENSATE_HALF_MPS_NUM * half_mps;
2216 	min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2217 	rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2218 	if (rx_priv < min_rx_priv)
2219 		return false;
2220 
2221 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2222 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2223 
2224 		priv->enable = 0;
2225 		priv->wl.low = 0;
2226 		priv->wl.high = 0;
2227 		priv->buf_size = 0;
2228 
2229 		if (!(hdev->hw_tc_map & BIT(i)))
2230 			continue;
2231 
2232 		priv->enable = 1;
2233 		priv->buf_size = rx_priv;
2234 		priv->wl.high = rx_priv - hdev->dv_buf_size;
2235 		priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2236 	}
2237 
2238 	buf_alloc->s_buf.buf_size = 0;
2239 
2240 	return true;
2241 }
2242 
2243 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2244  * @hdev: pointer to struct hclge_dev
2245  * @buf_alloc: pointer to buffer calculation data
2246  * @return: 0: calculate successful, negative: fail
2247  */
2248 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2249 				struct hclge_pkt_buf_alloc *buf_alloc)
2250 {
2251 	/* When DCB is not supported, rx private buffer is not allocated. */
2252 	if (!hnae3_dev_dcb_supported(hdev)) {
2253 		u32 rx_all = hdev->pkt_buf_size;
2254 
2255 		rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2256 		if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2257 			return -ENOMEM;
2258 
2259 		return 0;
2260 	}
2261 
2262 	if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2263 		return 0;
2264 
2265 	if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2266 		return 0;
2267 
2268 	/* try to decrease the buffer size */
2269 	if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2270 		return 0;
2271 
2272 	if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2273 		return 0;
2274 
2275 	if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2276 		return 0;
2277 
2278 	return -ENOMEM;
2279 }
2280 
2281 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2282 				   struct hclge_pkt_buf_alloc *buf_alloc)
2283 {
2284 	struct hclge_rx_priv_buff_cmd *req;
2285 	struct hclge_desc desc;
2286 	int ret;
2287 	int i;
2288 
2289 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2290 	req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2291 
2292 	/* Alloc private buffer TCs */
2293 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2294 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2295 
2296 		req->buf_num[i] =
2297 			cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2298 		req->buf_num[i] |=
2299 			cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2300 	}
2301 
2302 	req->shared_buf =
2303 		cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2304 			    (1 << HCLGE_TC0_PRI_BUF_EN_B));
2305 
2306 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2307 	if (ret)
2308 		dev_err(&hdev->pdev->dev,
2309 			"rx private buffer alloc cmd failed %d\n", ret);
2310 
2311 	return ret;
2312 }
2313 
2314 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2315 				   struct hclge_pkt_buf_alloc *buf_alloc)
2316 {
2317 	struct hclge_rx_priv_wl_buf *req;
2318 	struct hclge_priv_buf *priv;
2319 	struct hclge_desc desc[2];
2320 	int i, j;
2321 	int ret;
2322 
2323 	for (i = 0; i < 2; i++) {
2324 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2325 					   false);
2326 		req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2327 
2328 		/* The first descriptor set the NEXT bit to 1 */
2329 		if (i == 0)
2330 			desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
2331 		else
2332 			desc[i].flag &= ~cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
2333 
2334 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2335 			u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2336 
2337 			priv = &buf_alloc->priv_buf[idx];
2338 			req->tc_wl[j].high =
2339 				cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2340 			req->tc_wl[j].high |=
2341 				cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2342 			req->tc_wl[j].low =
2343 				cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2344 			req->tc_wl[j].low |=
2345 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2346 		}
2347 	}
2348 
2349 	/* Send 2 descriptor at one time */
2350 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2351 	if (ret)
2352 		dev_err(&hdev->pdev->dev,
2353 			"rx private waterline config cmd failed %d\n",
2354 			ret);
2355 	return ret;
2356 }
2357 
2358 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2359 				    struct hclge_pkt_buf_alloc *buf_alloc)
2360 {
2361 	struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2362 	struct hclge_rx_com_thrd *req;
2363 	struct hclge_desc desc[2];
2364 	struct hclge_tc_thrd *tc;
2365 	int i, j;
2366 	int ret;
2367 
2368 	for (i = 0; i < 2; i++) {
2369 		hclge_cmd_setup_basic_desc(&desc[i],
2370 					   HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2371 		req = (struct hclge_rx_com_thrd *)&desc[i].data;
2372 
2373 		/* The first descriptor set the NEXT bit to 1 */
2374 		if (i == 0)
2375 			desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
2376 		else
2377 			desc[i].flag &= ~cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
2378 
2379 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2380 			tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2381 
2382 			req->com_thrd[j].high =
2383 				cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2384 			req->com_thrd[j].high |=
2385 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2386 			req->com_thrd[j].low =
2387 				cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2388 			req->com_thrd[j].low |=
2389 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2390 		}
2391 	}
2392 
2393 	/* Send 2 descriptors at one time */
2394 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2395 	if (ret)
2396 		dev_err(&hdev->pdev->dev,
2397 			"common threshold config cmd failed %d\n", ret);
2398 	return ret;
2399 }
2400 
2401 static int hclge_common_wl_config(struct hclge_dev *hdev,
2402 				  struct hclge_pkt_buf_alloc *buf_alloc)
2403 {
2404 	struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2405 	struct hclge_rx_com_wl *req;
2406 	struct hclge_desc desc;
2407 	int ret;
2408 
2409 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2410 
2411 	req = (struct hclge_rx_com_wl *)desc.data;
2412 	req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2413 	req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2414 
2415 	req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2416 	req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2417 
2418 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2419 	if (ret)
2420 		dev_err(&hdev->pdev->dev,
2421 			"common waterline config cmd failed %d\n", ret);
2422 
2423 	return ret;
2424 }
2425 
2426 int hclge_buffer_alloc(struct hclge_dev *hdev)
2427 {
2428 	struct hclge_pkt_buf_alloc *pkt_buf;
2429 	int ret;
2430 
2431 	pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2432 	if (!pkt_buf)
2433 		return -ENOMEM;
2434 
2435 	ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2436 	if (ret) {
2437 		dev_err(&hdev->pdev->dev,
2438 			"could not calc tx buffer size for all TCs %d\n", ret);
2439 		goto out;
2440 	}
2441 
2442 	ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2443 	if (ret) {
2444 		dev_err(&hdev->pdev->dev,
2445 			"could not alloc tx buffers %d\n", ret);
2446 		goto out;
2447 	}
2448 
2449 	ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2450 	if (ret) {
2451 		dev_err(&hdev->pdev->dev,
2452 			"could not calc rx priv buffer size for all TCs %d\n",
2453 			ret);
2454 		goto out;
2455 	}
2456 
2457 	ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2458 	if (ret) {
2459 		dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2460 			ret);
2461 		goto out;
2462 	}
2463 
2464 	if (hnae3_dev_dcb_supported(hdev)) {
2465 		ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2466 		if (ret) {
2467 			dev_err(&hdev->pdev->dev,
2468 				"could not configure rx private waterline %d\n",
2469 				ret);
2470 			goto out;
2471 		}
2472 
2473 		ret = hclge_common_thrd_config(hdev, pkt_buf);
2474 		if (ret) {
2475 			dev_err(&hdev->pdev->dev,
2476 				"could not configure common threshold %d\n",
2477 				ret);
2478 			goto out;
2479 		}
2480 	}
2481 
2482 	ret = hclge_common_wl_config(hdev, pkt_buf);
2483 	if (ret)
2484 		dev_err(&hdev->pdev->dev,
2485 			"could not configure common waterline %d\n", ret);
2486 
2487 out:
2488 	kfree(pkt_buf);
2489 	return ret;
2490 }
2491 
2492 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2493 {
2494 	struct hnae3_handle *roce = &vport->roce;
2495 	struct hnae3_handle *nic = &vport->nic;
2496 	struct hclge_dev *hdev = vport->back;
2497 
2498 	roce->rinfo.num_vectors = vport->back->num_roce_msi;
2499 
2500 	if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
2501 		return -EINVAL;
2502 
2503 	roce->rinfo.base_vector = hdev->num_nic_msi;
2504 
2505 	roce->rinfo.netdev = nic->kinfo.netdev;
2506 	roce->rinfo.roce_io_base = hdev->hw.hw.io_base;
2507 	roce->rinfo.roce_mem_base = hdev->hw.hw.mem_base;
2508 
2509 	roce->pdev = nic->pdev;
2510 	roce->ae_algo = nic->ae_algo;
2511 	bitmap_copy(roce->numa_node_mask.bits, nic->numa_node_mask.bits,
2512 		    MAX_NUMNODES);
2513 
2514 	return 0;
2515 }
2516 
2517 static int hclge_init_msi(struct hclge_dev *hdev)
2518 {
2519 	struct pci_dev *pdev = hdev->pdev;
2520 	int vectors;
2521 	int i;
2522 
2523 	vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2524 					hdev->num_msi,
2525 					PCI_IRQ_MSI | PCI_IRQ_MSIX);
2526 	if (vectors < 0) {
2527 		dev_err(&pdev->dev,
2528 			"failed(%d) to allocate MSI/MSI-X vectors\n",
2529 			vectors);
2530 		return vectors;
2531 	}
2532 	if (vectors < hdev->num_msi)
2533 		dev_warn(&hdev->pdev->dev,
2534 			 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2535 			 hdev->num_msi, vectors);
2536 
2537 	hdev->num_msi = vectors;
2538 	hdev->num_msi_left = vectors;
2539 
2540 	hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2541 					   sizeof(u16), GFP_KERNEL);
2542 	if (!hdev->vector_status) {
2543 		pci_free_irq_vectors(pdev);
2544 		return -ENOMEM;
2545 	}
2546 
2547 	for (i = 0; i < hdev->num_msi; i++)
2548 		hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2549 
2550 	hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2551 					sizeof(int), GFP_KERNEL);
2552 	if (!hdev->vector_irq) {
2553 		pci_free_irq_vectors(pdev);
2554 		return -ENOMEM;
2555 	}
2556 
2557 	return 0;
2558 }
2559 
2560 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2561 {
2562 	if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2563 		duplex = HCLGE_MAC_FULL;
2564 
2565 	return duplex;
2566 }
2567 
2568 static struct hclge_mac_speed_map hclge_mac_speed_map_to_fw[] = {
2569 	{HCLGE_MAC_SPEED_10M, HCLGE_FW_MAC_SPEED_10M},
2570 	{HCLGE_MAC_SPEED_100M, HCLGE_FW_MAC_SPEED_100M},
2571 	{HCLGE_MAC_SPEED_1G, HCLGE_FW_MAC_SPEED_1G},
2572 	{HCLGE_MAC_SPEED_10G, HCLGE_FW_MAC_SPEED_10G},
2573 	{HCLGE_MAC_SPEED_25G, HCLGE_FW_MAC_SPEED_25G},
2574 	{HCLGE_MAC_SPEED_40G, HCLGE_FW_MAC_SPEED_40G},
2575 	{HCLGE_MAC_SPEED_50G, HCLGE_FW_MAC_SPEED_50G},
2576 	{HCLGE_MAC_SPEED_100G, HCLGE_FW_MAC_SPEED_100G},
2577 	{HCLGE_MAC_SPEED_200G, HCLGE_FW_MAC_SPEED_200G},
2578 };
2579 
2580 static int hclge_convert_to_fw_speed(u32 speed_drv, u32 *speed_fw)
2581 {
2582 	u16 i;
2583 
2584 	for (i = 0; i < ARRAY_SIZE(hclge_mac_speed_map_to_fw); i++) {
2585 		if (hclge_mac_speed_map_to_fw[i].speed_drv == speed_drv) {
2586 			*speed_fw = hclge_mac_speed_map_to_fw[i].speed_fw;
2587 			return 0;
2588 		}
2589 	}
2590 
2591 	return -EINVAL;
2592 }
2593 
2594 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2595 				      u8 duplex, u8 lane_num)
2596 {
2597 	struct hclge_config_mac_speed_dup_cmd *req;
2598 	struct hclge_desc desc;
2599 	u32 speed_fw;
2600 	int ret;
2601 
2602 	req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2603 
2604 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2605 
2606 	if (duplex)
2607 		hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2608 
2609 	ret = hclge_convert_to_fw_speed(speed, &speed_fw);
2610 	if (ret) {
2611 		dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2612 		return ret;
2613 	}
2614 
2615 	hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, HCLGE_CFG_SPEED_S,
2616 			speed_fw);
2617 	hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2618 		      1);
2619 	req->lane_num = lane_num;
2620 
2621 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2622 	if (ret) {
2623 		dev_err(&hdev->pdev->dev,
2624 			"mac speed/duplex config cmd failed %d.\n", ret);
2625 		return ret;
2626 	}
2627 
2628 	return 0;
2629 }
2630 
2631 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex, u8 lane_num)
2632 {
2633 	struct hclge_mac *mac = &hdev->hw.mac;
2634 	int ret;
2635 
2636 	duplex = hclge_check_speed_dup(duplex, speed);
2637 	if (!mac->support_autoneg && mac->speed == speed &&
2638 	    mac->duplex == duplex && (mac->lane_num == lane_num || lane_num == 0))
2639 		return 0;
2640 
2641 	ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex, lane_num);
2642 	if (ret)
2643 		return ret;
2644 
2645 	hdev->hw.mac.speed = speed;
2646 	hdev->hw.mac.duplex = duplex;
2647 	if (!lane_num)
2648 		hdev->hw.mac.lane_num = lane_num;
2649 
2650 	return 0;
2651 }
2652 
2653 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2654 				     u8 duplex, u8 lane_num)
2655 {
2656 	struct hclge_vport *vport = hclge_get_vport(handle);
2657 	struct hclge_dev *hdev = vport->back;
2658 	int ret;
2659 
2660 	ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex, lane_num);
2661 
2662 	if (ret)
2663 		return ret;
2664 
2665 	hdev->hw.mac.req_speed = speed;
2666 	hdev->hw.mac.req_duplex = duplex;
2667 
2668 	return 0;
2669 }
2670 
2671 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2672 {
2673 	struct hclge_config_auto_neg_cmd *req;
2674 	struct hclge_desc desc;
2675 	u32 flag = 0;
2676 	int ret;
2677 
2678 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2679 
2680 	req = (struct hclge_config_auto_neg_cmd *)desc.data;
2681 	if (enable)
2682 		hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2683 	req->cfg_an_cmd_flag = cpu_to_le32(flag);
2684 
2685 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2686 	if (ret)
2687 		dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2688 			ret);
2689 
2690 	return ret;
2691 }
2692 
2693 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2694 {
2695 	struct hclge_vport *vport = hclge_get_vport(handle);
2696 	struct hclge_dev *hdev = vport->back;
2697 
2698 	if (!hdev->hw.mac.support_autoneg) {
2699 		if (enable) {
2700 			dev_err(&hdev->pdev->dev,
2701 				"autoneg is not supported by current port\n");
2702 			return -EOPNOTSUPP;
2703 		} else {
2704 			return 0;
2705 		}
2706 	}
2707 
2708 	return hclge_set_autoneg_en(hdev, enable);
2709 }
2710 
2711 static int hclge_get_autoneg(struct hnae3_handle *handle)
2712 {
2713 	struct hclge_vport *vport = hclge_get_vport(handle);
2714 	struct hclge_dev *hdev = vport->back;
2715 	struct phy_device *phydev = hdev->hw.mac.phydev;
2716 
2717 	if (phydev)
2718 		return phydev->autoneg;
2719 
2720 	return hdev->hw.mac.autoneg;
2721 }
2722 
2723 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2724 {
2725 	struct hclge_vport *vport = hclge_get_vport(handle);
2726 	struct hclge_dev *hdev = vport->back;
2727 	int ret;
2728 
2729 	dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2730 
2731 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2732 	if (ret)
2733 		return ret;
2734 	return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2735 }
2736 
2737 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2738 {
2739 	struct hclge_vport *vport = hclge_get_vport(handle);
2740 	struct hclge_dev *hdev = vport->back;
2741 
2742 	if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2743 		return hclge_set_autoneg_en(hdev, !halt);
2744 
2745 	return 0;
2746 }
2747 
2748 static void hclge_parse_fec_stats_lanes(struct hclge_dev *hdev,
2749 					struct hclge_desc *desc, u32 desc_len)
2750 {
2751 	u32 lane_size = HCLGE_FEC_STATS_MAX_LANES * 2;
2752 	u32 desc_index = 0;
2753 	u32 data_index = 0;
2754 	u32 i;
2755 
2756 	for (i = 0; i < lane_size; i++) {
2757 		if (data_index >= HCLGE_DESC_DATA_LEN) {
2758 			desc_index++;
2759 			data_index = 0;
2760 		}
2761 
2762 		if (desc_index >= desc_len)
2763 			return;
2764 
2765 		hdev->fec_stats.per_lanes[i] +=
2766 			le32_to_cpu(desc[desc_index].data[data_index]);
2767 		data_index++;
2768 	}
2769 }
2770 
2771 static void hclge_parse_fec_stats(struct hclge_dev *hdev,
2772 				  struct hclge_desc *desc, u32 desc_len)
2773 {
2774 	struct hclge_query_fec_stats_cmd *req;
2775 
2776 	req = (struct hclge_query_fec_stats_cmd *)desc[0].data;
2777 
2778 	hdev->fec_stats.base_r_lane_num = req->base_r_lane_num;
2779 	hdev->fec_stats.rs_corr_blocks +=
2780 		le32_to_cpu(req->rs_fec_corr_blocks);
2781 	hdev->fec_stats.rs_uncorr_blocks +=
2782 		le32_to_cpu(req->rs_fec_uncorr_blocks);
2783 	hdev->fec_stats.rs_error_blocks +=
2784 		le32_to_cpu(req->rs_fec_error_blocks);
2785 	hdev->fec_stats.base_r_corr_blocks +=
2786 		le32_to_cpu(req->base_r_fec_corr_blocks);
2787 	hdev->fec_stats.base_r_uncorr_blocks +=
2788 		le32_to_cpu(req->base_r_fec_uncorr_blocks);
2789 
2790 	hclge_parse_fec_stats_lanes(hdev, &desc[1], desc_len - 1);
2791 }
2792 
2793 static int hclge_update_fec_stats_hw(struct hclge_dev *hdev)
2794 {
2795 	struct hclge_desc desc[HCLGE_FEC_STATS_CMD_NUM];
2796 	int ret;
2797 	u32 i;
2798 
2799 	for (i = 0; i < HCLGE_FEC_STATS_CMD_NUM; i++) {
2800 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_FEC_STATS,
2801 					   true);
2802 		if (i != (HCLGE_FEC_STATS_CMD_NUM - 1))
2803 			desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
2804 	}
2805 
2806 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_FEC_STATS_CMD_NUM);
2807 	if (ret)
2808 		return ret;
2809 
2810 	hclge_parse_fec_stats(hdev, desc, HCLGE_FEC_STATS_CMD_NUM);
2811 
2812 	return 0;
2813 }
2814 
2815 static void hclge_update_fec_stats(struct hclge_dev *hdev)
2816 {
2817 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
2818 	int ret;
2819 
2820 	if (!hnae3_ae_dev_fec_stats_supported(ae_dev) ||
2821 	    test_and_set_bit(HCLGE_STATE_FEC_STATS_UPDATING, &hdev->state))
2822 		return;
2823 
2824 	ret = hclge_update_fec_stats_hw(hdev);
2825 	if (ret)
2826 		dev_err(&hdev->pdev->dev,
2827 			"failed to update fec stats, ret = %d\n", ret);
2828 
2829 	clear_bit(HCLGE_STATE_FEC_STATS_UPDATING, &hdev->state);
2830 }
2831 
2832 static void hclge_get_fec_stats_total(struct hclge_dev *hdev,
2833 				      struct ethtool_fec_stats *fec_stats)
2834 {
2835 	fec_stats->corrected_blocks.total = hdev->fec_stats.rs_corr_blocks;
2836 	fec_stats->uncorrectable_blocks.total =
2837 		hdev->fec_stats.rs_uncorr_blocks;
2838 }
2839 
2840 static void hclge_get_fec_stats_lanes(struct hclge_dev *hdev,
2841 				      struct ethtool_fec_stats *fec_stats)
2842 {
2843 	u32 i;
2844 
2845 	if (hdev->fec_stats.base_r_lane_num == 0 ||
2846 	    hdev->fec_stats.base_r_lane_num > HCLGE_FEC_STATS_MAX_LANES) {
2847 		dev_err(&hdev->pdev->dev,
2848 			"fec stats lane number(%llu) is invalid\n",
2849 			hdev->fec_stats.base_r_lane_num);
2850 		return;
2851 	}
2852 
2853 	for (i = 0; i < hdev->fec_stats.base_r_lane_num; i++) {
2854 		fec_stats->corrected_blocks.lanes[i] =
2855 			hdev->fec_stats.base_r_corr_per_lanes[i];
2856 		fec_stats->uncorrectable_blocks.lanes[i] =
2857 			hdev->fec_stats.base_r_uncorr_per_lanes[i];
2858 	}
2859 }
2860 
2861 static void hclge_comm_get_fec_stats(struct hclge_dev *hdev,
2862 				     struct ethtool_fec_stats *fec_stats)
2863 {
2864 	u32 fec_mode = hdev->hw.mac.fec_mode;
2865 
2866 	switch (fec_mode) {
2867 	case BIT(HNAE3_FEC_RS):
2868 	case BIT(HNAE3_FEC_LLRS):
2869 		hclge_get_fec_stats_total(hdev, fec_stats);
2870 		break;
2871 	case BIT(HNAE3_FEC_BASER):
2872 		hclge_get_fec_stats_lanes(hdev, fec_stats);
2873 		break;
2874 	default:
2875 		dev_err(&hdev->pdev->dev,
2876 			"fec stats is not supported by current fec mode(0x%x)\n",
2877 			fec_mode);
2878 		break;
2879 	}
2880 }
2881 
2882 static void hclge_get_fec_stats(struct hnae3_handle *handle,
2883 				struct ethtool_fec_stats *fec_stats)
2884 {
2885 	struct hclge_vport *vport = hclge_get_vport(handle);
2886 	struct hclge_dev *hdev = vport->back;
2887 	u32 fec_mode = hdev->hw.mac.fec_mode;
2888 
2889 	if (fec_mode == BIT(HNAE3_FEC_NONE) ||
2890 	    fec_mode == BIT(HNAE3_FEC_AUTO) ||
2891 	    fec_mode == BIT(HNAE3_FEC_USER_DEF))
2892 		return;
2893 
2894 	hclge_update_fec_stats(hdev);
2895 
2896 	hclge_comm_get_fec_stats(hdev, fec_stats);
2897 }
2898 
2899 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2900 {
2901 	struct hclge_config_fec_cmd *req;
2902 	struct hclge_desc desc;
2903 	int ret;
2904 
2905 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2906 
2907 	req = (struct hclge_config_fec_cmd *)desc.data;
2908 	if (fec_mode & BIT(HNAE3_FEC_AUTO))
2909 		hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2910 	if (fec_mode & BIT(HNAE3_FEC_RS))
2911 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2912 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2913 	if (fec_mode & BIT(HNAE3_FEC_LLRS))
2914 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2915 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_LLRS);
2916 	if (fec_mode & BIT(HNAE3_FEC_BASER))
2917 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2918 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2919 
2920 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2921 	if (ret)
2922 		dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2923 
2924 	return ret;
2925 }
2926 
2927 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2928 {
2929 	struct hclge_vport *vport = hclge_get_vport(handle);
2930 	struct hclge_dev *hdev = vport->back;
2931 	struct hclge_mac *mac = &hdev->hw.mac;
2932 	int ret;
2933 
2934 	if (fec_mode && !(mac->fec_ability & fec_mode)) {
2935 		dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2936 		return -EINVAL;
2937 	}
2938 
2939 	ret = hclge_set_fec_hw(hdev, fec_mode);
2940 	if (ret)
2941 		return ret;
2942 
2943 	mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2944 	return 0;
2945 }
2946 
2947 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2948 			  u8 *fec_mode)
2949 {
2950 	struct hclge_vport *vport = hclge_get_vport(handle);
2951 	struct hclge_dev *hdev = vport->back;
2952 	struct hclge_mac *mac = &hdev->hw.mac;
2953 
2954 	if (fec_ability)
2955 		*fec_ability = mac->fec_ability;
2956 	if (fec_mode)
2957 		*fec_mode = mac->fec_mode;
2958 }
2959 
2960 static int hclge_mac_init(struct hclge_dev *hdev)
2961 {
2962 	struct hclge_mac *mac = &hdev->hw.mac;
2963 	int ret;
2964 
2965 	hdev->support_sfp_query = true;
2966 
2967 	if (!test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
2968 		hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2969 
2970 	if (hdev->hw.mac.support_autoneg) {
2971 		ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2972 		if (ret)
2973 			return ret;
2974 	}
2975 
2976 	if (!hdev->hw.mac.autoneg) {
2977 		ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.req_speed,
2978 						 hdev->hw.mac.req_duplex,
2979 						 hdev->hw.mac.lane_num);
2980 		if (ret)
2981 			return ret;
2982 	}
2983 
2984 	mac->link = 0;
2985 
2986 	if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2987 		ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2988 		if (ret)
2989 			return ret;
2990 	}
2991 
2992 	ret = hclge_set_mac_mtu(hdev, hdev->mps);
2993 	if (ret) {
2994 		dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2995 		return ret;
2996 	}
2997 
2998 	ret = hclge_set_default_loopback(hdev);
2999 	if (ret)
3000 		return ret;
3001 
3002 	ret = hclge_buffer_alloc(hdev);
3003 	if (ret)
3004 		dev_err(&hdev->pdev->dev,
3005 			"allocate buffer fail, ret=%d\n", ret);
3006 
3007 	return ret;
3008 }
3009 
3010 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
3011 {
3012 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
3013 	    !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) {
3014 		hdev->last_mbx_scheduled = jiffies;
3015 		mod_delayed_work(hclge_wq, &hdev->service_task, 0);
3016 	}
3017 }
3018 
3019 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
3020 {
3021 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
3022 	    test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state) &&
3023 	    !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) {
3024 		hdev->last_rst_scheduled = jiffies;
3025 		mod_delayed_work(hclge_wq, &hdev->service_task, 0);
3026 	}
3027 }
3028 
3029 static void hclge_errhand_task_schedule(struct hclge_dev *hdev)
3030 {
3031 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
3032 	    !test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
3033 		mod_delayed_work(hclge_wq, &hdev->service_task, 0);
3034 }
3035 
3036 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
3037 {
3038 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
3039 	    !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
3040 		mod_delayed_work(hclge_wq, &hdev->service_task, delay_time);
3041 }
3042 
3043 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
3044 {
3045 	struct hclge_link_status_cmd *req;
3046 	struct hclge_desc desc;
3047 	int ret;
3048 
3049 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
3050 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3051 	if (ret) {
3052 		dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
3053 			ret);
3054 		return ret;
3055 	}
3056 
3057 	req = (struct hclge_link_status_cmd *)desc.data;
3058 	*link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
3059 		HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
3060 
3061 	return 0;
3062 }
3063 
3064 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
3065 {
3066 	struct phy_device *phydev = hdev->hw.mac.phydev;
3067 
3068 	*link_status = HCLGE_LINK_STATUS_DOWN;
3069 
3070 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
3071 		return 0;
3072 
3073 	if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
3074 		return 0;
3075 
3076 	return hclge_get_mac_link_status(hdev, link_status);
3077 }
3078 
3079 static void hclge_push_link_status(struct hclge_dev *hdev)
3080 {
3081 	struct hclge_vport *vport;
3082 	int ret;
3083 	u16 i;
3084 
3085 	for (i = 0; i < pci_num_vf(hdev->pdev); i++) {
3086 		vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM];
3087 
3088 		if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) ||
3089 		    vport->vf_info.link_state != IFLA_VF_LINK_STATE_AUTO)
3090 			continue;
3091 
3092 		ret = hclge_push_vf_link_status(vport);
3093 		if (ret) {
3094 			dev_err(&hdev->pdev->dev,
3095 				"failed to push link status to vf%u, ret = %d\n",
3096 				i, ret);
3097 		}
3098 	}
3099 }
3100 
3101 static void hclge_update_link_status(struct hclge_dev *hdev)
3102 {
3103 	struct hnae3_handle *handle = &hdev->vport[0].nic;
3104 	struct hnae3_client *client = hdev->nic_client;
3105 	int state;
3106 	int ret;
3107 
3108 	if (!client)
3109 		return;
3110 
3111 	if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
3112 		return;
3113 
3114 	ret = hclge_get_mac_phy_link(hdev, &state);
3115 	if (ret) {
3116 		clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
3117 		return;
3118 	}
3119 
3120 	if (state != hdev->hw.mac.link) {
3121 		hdev->hw.mac.link = state;
3122 		if (state == HCLGE_LINK_STATUS_UP)
3123 			hclge_update_port_info(hdev);
3124 
3125 		client->ops->link_status_change(handle, state);
3126 		hclge_config_mac_tnl_int(hdev, state);
3127 
3128 		if (test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state)) {
3129 			struct hnae3_handle *rhandle = &hdev->vport[0].roce;
3130 			struct hnae3_client *rclient = hdev->roce_client;
3131 
3132 			if (rclient && rclient->ops->link_status_change)
3133 				rclient->ops->link_status_change(rhandle,
3134 								 state);
3135 		}
3136 
3137 		hclge_push_link_status(hdev);
3138 	}
3139 
3140 	clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
3141 }
3142 
3143 static void hclge_update_speed_advertising(struct hclge_mac *mac)
3144 {
3145 	u32 speed_ability;
3146 
3147 	if (hclge_get_speed_bit(mac->speed, &speed_ability))
3148 		return;
3149 
3150 	switch (mac->module_type) {
3151 	case HNAE3_MODULE_TYPE_FIBRE_LR:
3152 		hclge_convert_setting_lr(speed_ability, mac->advertising);
3153 		break;
3154 	case HNAE3_MODULE_TYPE_FIBRE_SR:
3155 	case HNAE3_MODULE_TYPE_AOC:
3156 		hclge_convert_setting_sr(speed_ability, mac->advertising);
3157 		break;
3158 	case HNAE3_MODULE_TYPE_CR:
3159 		hclge_convert_setting_cr(speed_ability, mac->advertising);
3160 		break;
3161 	case HNAE3_MODULE_TYPE_KR:
3162 		hclge_convert_setting_kr(speed_ability, mac->advertising);
3163 		break;
3164 	default:
3165 		break;
3166 	}
3167 }
3168 
3169 static void hclge_update_fec_advertising(struct hclge_mac *mac)
3170 {
3171 	if (mac->fec_mode & BIT(HNAE3_FEC_RS))
3172 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
3173 				 mac->advertising);
3174 	else if (mac->fec_mode & BIT(HNAE3_FEC_LLRS))
3175 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
3176 				 mac->advertising);
3177 	else if (mac->fec_mode & BIT(HNAE3_FEC_BASER))
3178 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
3179 				 mac->advertising);
3180 	else
3181 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
3182 				 mac->advertising);
3183 }
3184 
3185 static void hclge_update_pause_advertising(struct hclge_dev *hdev)
3186 {
3187 	struct hclge_mac *mac = &hdev->hw.mac;
3188 	bool rx_en, tx_en;
3189 
3190 	switch (hdev->fc_mode_last_time) {
3191 	case HCLGE_FC_RX_PAUSE:
3192 		rx_en = true;
3193 		tx_en = false;
3194 		break;
3195 	case HCLGE_FC_TX_PAUSE:
3196 		rx_en = false;
3197 		tx_en = true;
3198 		break;
3199 	case HCLGE_FC_FULL:
3200 		rx_en = true;
3201 		tx_en = true;
3202 		break;
3203 	default:
3204 		rx_en = false;
3205 		tx_en = false;
3206 		break;
3207 	}
3208 
3209 	linkmode_set_pause(mac->advertising, tx_en, rx_en);
3210 }
3211 
3212 static void hclge_update_advertising(struct hclge_dev *hdev)
3213 {
3214 	struct hclge_mac *mac = &hdev->hw.mac;
3215 
3216 	linkmode_zero(mac->advertising);
3217 	hclge_update_speed_advertising(mac);
3218 	hclge_update_fec_advertising(mac);
3219 	hclge_update_pause_advertising(hdev);
3220 }
3221 
3222 static void hclge_update_port_capability(struct hclge_dev *hdev,
3223 					 struct hclge_mac *mac)
3224 {
3225 	if (hnae3_dev_fec_supported(hdev))
3226 		hclge_convert_setting_fec(mac);
3227 
3228 	/* firmware can not identify back plane type, the media type
3229 	 * read from configuration can help deal it
3230 	 */
3231 	if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
3232 	    mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
3233 		mac->module_type = HNAE3_MODULE_TYPE_KR;
3234 	else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
3235 		mac->module_type = HNAE3_MODULE_TYPE_TP;
3236 
3237 	if (mac->support_autoneg) {
3238 		linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
3239 		linkmode_copy(mac->advertising, mac->supported);
3240 	} else {
3241 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
3242 				   mac->supported);
3243 		hclge_update_advertising(hdev);
3244 	}
3245 }
3246 
3247 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
3248 {
3249 	struct hclge_sfp_info_cmd *resp;
3250 	struct hclge_desc desc;
3251 	int ret;
3252 
3253 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
3254 	resp = (struct hclge_sfp_info_cmd *)desc.data;
3255 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3256 	if (ret == -EOPNOTSUPP) {
3257 		dev_warn(&hdev->pdev->dev,
3258 			 "IMP do not support get SFP speed %d\n", ret);
3259 		return ret;
3260 	} else if (ret) {
3261 		dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
3262 		return ret;
3263 	}
3264 
3265 	*speed = le32_to_cpu(resp->speed);
3266 
3267 	return 0;
3268 }
3269 
3270 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
3271 {
3272 	struct hclge_sfp_info_cmd *resp;
3273 	struct hclge_desc desc;
3274 	int ret;
3275 
3276 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
3277 	resp = (struct hclge_sfp_info_cmd *)desc.data;
3278 
3279 	resp->query_type = QUERY_ACTIVE_SPEED;
3280 
3281 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3282 	if (ret == -EOPNOTSUPP) {
3283 		dev_warn(&hdev->pdev->dev,
3284 			 "IMP does not support get SFP info %d\n", ret);
3285 		return ret;
3286 	} else if (ret) {
3287 		dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
3288 		return ret;
3289 	}
3290 
3291 	/* In some case, mac speed get from IMP may be 0, it shouldn't be
3292 	 * set to mac->speed.
3293 	 */
3294 	if (!le32_to_cpu(resp->speed))
3295 		return 0;
3296 
3297 	mac->speed = le32_to_cpu(resp->speed);
3298 	/* if resp->speed_ability is 0, it means it's an old version
3299 	 * firmware, do not update these params
3300 	 */
3301 	if (resp->speed_ability) {
3302 		mac->module_type = le32_to_cpu(resp->module_type);
3303 		mac->speed_ability = le32_to_cpu(resp->speed_ability);
3304 		mac->autoneg = resp->autoneg;
3305 		mac->support_autoneg = resp->autoneg_ability;
3306 		mac->speed_type = QUERY_ACTIVE_SPEED;
3307 		mac->lane_num = resp->lane_num;
3308 		if (!resp->active_fec)
3309 			mac->fec_mode = 0;
3310 		else
3311 			mac->fec_mode = BIT(resp->active_fec);
3312 		mac->fec_ability = resp->fec_ability;
3313 	} else {
3314 		mac->speed_type = QUERY_SFP_SPEED;
3315 	}
3316 
3317 	return 0;
3318 }
3319 
3320 static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle,
3321 					struct ethtool_link_ksettings *cmd)
3322 {
3323 	struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3324 	struct hclge_vport *vport = hclge_get_vport(handle);
3325 	struct hclge_phy_link_ksetting_0_cmd *req0;
3326 	struct hclge_phy_link_ksetting_1_cmd *req1;
3327 	u32 supported, advertising, lp_advertising;
3328 	struct hclge_dev *hdev = vport->back;
3329 	int ret;
3330 
3331 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3332 				   true);
3333 	desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
3334 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3335 				   true);
3336 
3337 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3338 	if (ret) {
3339 		dev_err(&hdev->pdev->dev,
3340 			"failed to get phy link ksetting, ret = %d.\n", ret);
3341 		return ret;
3342 	}
3343 
3344 	req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3345 	cmd->base.autoneg = req0->autoneg;
3346 	cmd->base.speed = le32_to_cpu(req0->speed);
3347 	cmd->base.duplex = req0->duplex;
3348 	cmd->base.port = req0->port;
3349 	cmd->base.transceiver = req0->transceiver;
3350 	cmd->base.phy_address = req0->phy_address;
3351 	cmd->base.eth_tp_mdix = req0->eth_tp_mdix;
3352 	cmd->base.eth_tp_mdix_ctrl = req0->eth_tp_mdix_ctrl;
3353 	supported = le32_to_cpu(req0->supported);
3354 	advertising = le32_to_cpu(req0->advertising);
3355 	lp_advertising = le32_to_cpu(req0->lp_advertising);
3356 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
3357 						supported);
3358 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
3359 						advertising);
3360 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
3361 						lp_advertising);
3362 
3363 	req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3364 	cmd->base.master_slave_cfg = req1->master_slave_cfg;
3365 	cmd->base.master_slave_state = req1->master_slave_state;
3366 
3367 	return 0;
3368 }
3369 
3370 static int
3371 hclge_set_phy_link_ksettings(struct hnae3_handle *handle,
3372 			     const struct ethtool_link_ksettings *cmd)
3373 {
3374 	struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3375 	struct hclge_vport *vport = hclge_get_vport(handle);
3376 	struct hclge_phy_link_ksetting_0_cmd *req0;
3377 	struct hclge_phy_link_ksetting_1_cmd *req1;
3378 	struct hclge_dev *hdev = vport->back;
3379 	u32 advertising;
3380 	int ret;
3381 
3382 	if (cmd->base.autoneg == AUTONEG_DISABLE &&
3383 	    ((cmd->base.speed != SPEED_100 && cmd->base.speed != SPEED_10) ||
3384 	     (cmd->base.duplex != DUPLEX_HALF &&
3385 	      cmd->base.duplex != DUPLEX_FULL)))
3386 		return -EINVAL;
3387 
3388 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3389 				   false);
3390 	desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
3391 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3392 				   false);
3393 
3394 	req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3395 	req0->autoneg = cmd->base.autoneg;
3396 	req0->speed = cpu_to_le32(cmd->base.speed);
3397 	req0->duplex = cmd->base.duplex;
3398 	ethtool_convert_link_mode_to_legacy_u32(&advertising,
3399 						cmd->link_modes.advertising);
3400 	req0->advertising = cpu_to_le32(advertising);
3401 	req0->eth_tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
3402 
3403 	req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3404 	req1->master_slave_cfg = cmd->base.master_slave_cfg;
3405 
3406 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3407 	if (ret) {
3408 		dev_err(&hdev->pdev->dev,
3409 			"failed to set phy link ksettings, ret = %d.\n", ret);
3410 		return ret;
3411 	}
3412 
3413 	hdev->hw.mac.req_autoneg = cmd->base.autoneg;
3414 	hdev->hw.mac.req_speed = cmd->base.speed;
3415 	hdev->hw.mac.req_duplex = cmd->base.duplex;
3416 	linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising);
3417 
3418 	return 0;
3419 }
3420 
3421 static int hclge_update_tp_port_info(struct hclge_dev *hdev)
3422 {
3423 	struct ethtool_link_ksettings cmd;
3424 	int ret;
3425 
3426 	if (!hnae3_dev_phy_imp_supported(hdev))
3427 		return 0;
3428 
3429 	ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd);
3430 	if (ret)
3431 		return ret;
3432 
3433 	hdev->hw.mac.autoneg = cmd.base.autoneg;
3434 	hdev->hw.mac.speed = cmd.base.speed;
3435 	hdev->hw.mac.duplex = cmd.base.duplex;
3436 	linkmode_copy(hdev->hw.mac.advertising, cmd.link_modes.advertising);
3437 
3438 	return 0;
3439 }
3440 
3441 static int hclge_tp_port_init(struct hclge_dev *hdev)
3442 {
3443 	struct ethtool_link_ksettings cmd;
3444 
3445 	if (!hnae3_dev_phy_imp_supported(hdev))
3446 		return 0;
3447 
3448 	cmd.base.autoneg = hdev->hw.mac.req_autoneg;
3449 	cmd.base.speed = hdev->hw.mac.req_speed;
3450 	cmd.base.duplex = hdev->hw.mac.req_duplex;
3451 	linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising);
3452 
3453 	return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd);
3454 }
3455 
3456 static int hclge_update_port_info(struct hclge_dev *hdev)
3457 {
3458 	struct hclge_mac *mac = &hdev->hw.mac;
3459 	int speed;
3460 	int ret;
3461 
3462 	/* get the port info from SFP cmd if not copper port */
3463 	if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
3464 		return hclge_update_tp_port_info(hdev);
3465 
3466 	/* if IMP does not support get SFP/qSFP info, return directly */
3467 	if (!hdev->support_sfp_query)
3468 		return 0;
3469 
3470 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3471 		speed = mac->speed;
3472 		ret = hclge_get_sfp_info(hdev, mac);
3473 	} else {
3474 		speed = HCLGE_MAC_SPEED_UNKNOWN;
3475 		ret = hclge_get_sfp_speed(hdev, &speed);
3476 	}
3477 
3478 	if (ret == -EOPNOTSUPP) {
3479 		hdev->support_sfp_query = false;
3480 		return ret;
3481 	} else if (ret) {
3482 		return ret;
3483 	}
3484 
3485 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3486 		if (mac->speed_type == QUERY_ACTIVE_SPEED) {
3487 			hclge_update_port_capability(hdev, mac);
3488 			if (mac->speed != speed)
3489 				(void)hclge_tm_port_shaper_cfg(hdev);
3490 			return 0;
3491 		}
3492 		return hclge_cfg_mac_speed_dup(hdev, mac->speed,
3493 					       HCLGE_MAC_FULL, mac->lane_num);
3494 	} else {
3495 		if (speed == HCLGE_MAC_SPEED_UNKNOWN)
3496 			return 0; /* do nothing if no SFP */
3497 
3498 		/* must config full duplex for SFP */
3499 		return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL, 0);
3500 	}
3501 }
3502 
3503 static int hclge_get_status(struct hnae3_handle *handle)
3504 {
3505 	struct hclge_vport *vport = hclge_get_vport(handle);
3506 	struct hclge_dev *hdev = vport->back;
3507 
3508 	hclge_update_link_status(hdev);
3509 
3510 	return hdev->hw.mac.link;
3511 }
3512 
3513 struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3514 {
3515 	if (!pci_num_vf(hdev->pdev)) {
3516 		dev_err(&hdev->pdev->dev,
3517 			"SRIOV is disabled, can not get vport(%d) info.\n", vf);
3518 		return NULL;
3519 	}
3520 
3521 	if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3522 		dev_err(&hdev->pdev->dev,
3523 			"vf id(%d) is out of range(0 <= vfid < %d)\n",
3524 			vf, pci_num_vf(hdev->pdev));
3525 		return NULL;
3526 	}
3527 
3528 	/* VF start from 1 in vport */
3529 	vf += HCLGE_VF_VPORT_START_NUM;
3530 	return &hdev->vport[vf];
3531 }
3532 
3533 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3534 			       struct ifla_vf_info *ivf)
3535 {
3536 	struct hclge_vport *vport = hclge_get_vport(handle);
3537 	struct hclge_dev *hdev = vport->back;
3538 
3539 	vport = hclge_get_vf_vport(hdev, vf);
3540 	if (!vport)
3541 		return -EINVAL;
3542 
3543 	ivf->vf = vf;
3544 	ivf->linkstate = vport->vf_info.link_state;
3545 	ivf->spoofchk = vport->vf_info.spoofchk;
3546 	ivf->trusted = vport->vf_info.trusted;
3547 	ivf->min_tx_rate = 0;
3548 	ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3549 	ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3550 	ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3551 	ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3552 	ether_addr_copy(ivf->mac, vport->vf_info.mac);
3553 
3554 	return 0;
3555 }
3556 
3557 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3558 				   int link_state)
3559 {
3560 	struct hclge_vport *vport = hclge_get_vport(handle);
3561 	struct hclge_dev *hdev = vport->back;
3562 	int link_state_old;
3563 	int ret;
3564 
3565 	vport = hclge_get_vf_vport(hdev, vf);
3566 	if (!vport)
3567 		return -EINVAL;
3568 
3569 	link_state_old = vport->vf_info.link_state;
3570 	vport->vf_info.link_state = link_state;
3571 
3572 	/* return success directly if the VF is unalive, VF will
3573 	 * query link state itself when it starts work.
3574 	 */
3575 	if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3576 		return 0;
3577 
3578 	ret = hclge_push_vf_link_status(vport);
3579 	if (ret) {
3580 		vport->vf_info.link_state = link_state_old;
3581 		dev_err(&hdev->pdev->dev,
3582 			"failed to push vf%d link status, ret = %d\n", vf, ret);
3583 	}
3584 
3585 	return ret;
3586 }
3587 
3588 static void hclge_set_reset_pending(struct hclge_dev *hdev,
3589 				    enum hnae3_reset_type reset_type)
3590 {
3591 	/* When an incorrect reset type is executed, the get_reset_level
3592 	 * function generates the HNAE3_NONE_RESET flag. As a result, this
3593 	 * type do not need to pending.
3594 	 */
3595 	if (reset_type != HNAE3_NONE_RESET)
3596 		set_bit(reset_type, &hdev->reset_pending);
3597 }
3598 
3599 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3600 {
3601 	u32 cmdq_src_reg, msix_src_reg, hw_err_src_reg;
3602 
3603 	/* fetch the events from their corresponding regs */
3604 	cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3605 	msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3606 	hw_err_src_reg = hclge_read_dev(&hdev->hw,
3607 					HCLGE_RAS_PF_OTHER_INT_STS_REG);
3608 
3609 	/* Assumption: If by any chance reset and mailbox events are reported
3610 	 * together then we will only process reset event in this go and will
3611 	 * defer the processing of the mailbox events. Since, we would have not
3612 	 * cleared RX CMDQ event this time we would receive again another
3613 	 * interrupt from H/W just for the mailbox.
3614 	 *
3615 	 * check for vector0 reset event sources
3616 	 */
3617 	if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3618 		dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3619 		hclge_set_reset_pending(hdev, HNAE3_IMP_RESET);
3620 		set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
3621 		*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3622 		hdev->rst_stats.imp_rst_cnt++;
3623 		return HCLGE_VECTOR0_EVENT_RST;
3624 	}
3625 
3626 	if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3627 		dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3628 		set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
3629 		hclge_set_reset_pending(hdev, HNAE3_GLOBAL_RESET);
3630 		*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3631 		hdev->rst_stats.global_rst_cnt++;
3632 		return HCLGE_VECTOR0_EVENT_RST;
3633 	}
3634 
3635 	/* check for vector0 msix event and hardware error event source */
3636 	if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK ||
3637 	    hw_err_src_reg & HCLGE_RAS_REG_ERR_MASK)
3638 		return HCLGE_VECTOR0_EVENT_ERR;
3639 
3640 	/* check for vector0 ptp event source */
3641 	if (BIT(HCLGE_VECTOR0_REG_PTP_INT_B) & msix_src_reg) {
3642 		*clearval = msix_src_reg;
3643 		return HCLGE_VECTOR0_EVENT_PTP;
3644 	}
3645 
3646 	/* check for vector0 mailbox(=CMDQ RX) event source */
3647 	if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3648 		cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3649 		*clearval = cmdq_src_reg;
3650 		return HCLGE_VECTOR0_EVENT_MBX;
3651 	}
3652 
3653 	/* print other vector0 event source */
3654 	dev_info(&hdev->pdev->dev,
3655 		 "INT status: CMDQ(%#x) HW errors(%#x) other(%#x)\n",
3656 		 cmdq_src_reg, hw_err_src_reg, msix_src_reg);
3657 
3658 	return HCLGE_VECTOR0_EVENT_OTHER;
3659 }
3660 
3661 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3662 				    u32 regclr)
3663 {
3664 #define HCLGE_IMP_RESET_DELAY		5
3665 
3666 	switch (event_type) {
3667 	case HCLGE_VECTOR0_EVENT_PTP:
3668 	case HCLGE_VECTOR0_EVENT_RST:
3669 		if (regclr == BIT(HCLGE_VECTOR0_IMPRESET_INT_B))
3670 			mdelay(HCLGE_IMP_RESET_DELAY);
3671 
3672 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3673 		break;
3674 	case HCLGE_VECTOR0_EVENT_MBX:
3675 		hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3676 		break;
3677 	default:
3678 		break;
3679 	}
3680 }
3681 
3682 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3683 {
3684 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3685 				BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3686 				BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3687 				BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3688 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3689 }
3690 
3691 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3692 {
3693 	writel(enable ? 1 : 0, vector->addr);
3694 }
3695 
3696 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3697 {
3698 	struct hclge_dev *hdev = data;
3699 	unsigned long flags;
3700 	u32 clearval = 0;
3701 	u32 event_cause;
3702 
3703 	hclge_enable_vector(&hdev->misc_vector, false);
3704 	event_cause = hclge_check_event_cause(hdev, &clearval);
3705 
3706 	/* vector 0 interrupt is shared with reset and mailbox source events. */
3707 	switch (event_cause) {
3708 	case HCLGE_VECTOR0_EVENT_ERR:
3709 		hclge_errhand_task_schedule(hdev);
3710 		break;
3711 	case HCLGE_VECTOR0_EVENT_RST:
3712 		hclge_reset_task_schedule(hdev);
3713 		break;
3714 	case HCLGE_VECTOR0_EVENT_PTP:
3715 		spin_lock_irqsave(&hdev->ptp->lock, flags);
3716 		hclge_ptp_clean_tx_hwts(hdev);
3717 		spin_unlock_irqrestore(&hdev->ptp->lock, flags);
3718 		break;
3719 	case HCLGE_VECTOR0_EVENT_MBX:
3720 		/* If we are here then,
3721 		 * 1. Either we are not handling any mbx task and we are not
3722 		 *    scheduled as well
3723 		 *                        OR
3724 		 * 2. We could be handling a mbx task but nothing more is
3725 		 *    scheduled.
3726 		 * In both cases, we should schedule mbx task as there are more
3727 		 * mbx messages reported by this interrupt.
3728 		 */
3729 		hclge_mbx_task_schedule(hdev);
3730 		break;
3731 	default:
3732 		dev_warn(&hdev->pdev->dev,
3733 			 "received unknown or unhandled event of vector0\n");
3734 		break;
3735 	}
3736 
3737 	hclge_clear_event_cause(hdev, event_cause, clearval);
3738 
3739 	/* Enable interrupt if it is not caused by reset event or error event */
3740 	if (event_cause == HCLGE_VECTOR0_EVENT_PTP ||
3741 	    event_cause == HCLGE_VECTOR0_EVENT_MBX ||
3742 	    event_cause == HCLGE_VECTOR0_EVENT_OTHER)
3743 		hclge_enable_vector(&hdev->misc_vector, true);
3744 
3745 	return IRQ_HANDLED;
3746 }
3747 
3748 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3749 {
3750 	if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3751 		dev_warn(&hdev->pdev->dev,
3752 			 "vector(vector_id %d) has been freed.\n", vector_id);
3753 		return;
3754 	}
3755 
3756 	hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3757 	hdev->num_msi_left += 1;
3758 	hdev->num_msi_used -= 1;
3759 }
3760 
3761 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3762 {
3763 	struct hclge_misc_vector *vector = &hdev->misc_vector;
3764 
3765 	vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3766 
3767 	vector->addr = hdev->hw.hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3768 	hdev->vector_status[0] = 0;
3769 
3770 	hdev->num_msi_left -= 1;
3771 	hdev->num_msi_used += 1;
3772 }
3773 
3774 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3775 {
3776 	int ret;
3777 
3778 	hclge_get_misc_vector(hdev);
3779 
3780 	/* this would be explicitly freed in the end */
3781 	snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3782 		 HCLGE_NAME, pci_name(hdev->pdev));
3783 	ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3784 			  IRQ_NOAUTOEN, hdev->misc_vector.name, hdev);
3785 	if (ret) {
3786 		hclge_free_vector(hdev, 0);
3787 		dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3788 			hdev->misc_vector.vector_irq);
3789 	}
3790 
3791 	return ret;
3792 }
3793 
3794 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3795 {
3796 	free_irq(hdev->misc_vector.vector_irq, hdev);
3797 	hclge_free_vector(hdev, 0);
3798 }
3799 
3800 int hclge_notify_client(struct hclge_dev *hdev,
3801 			enum hnae3_reset_notify_type type)
3802 {
3803 	struct hnae3_handle *handle = &hdev->vport[0].nic;
3804 	struct hnae3_client *client = hdev->nic_client;
3805 	int ret;
3806 
3807 	if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3808 		return 0;
3809 
3810 	if (!client->ops->reset_notify)
3811 		return -EOPNOTSUPP;
3812 
3813 	ret = client->ops->reset_notify(handle, type);
3814 	if (ret)
3815 		dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
3816 			type, ret);
3817 
3818 	return ret;
3819 }
3820 
3821 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3822 				    enum hnae3_reset_notify_type type)
3823 {
3824 	struct hnae3_handle *handle = &hdev->vport[0].roce;
3825 	struct hnae3_client *client = hdev->roce_client;
3826 	int ret;
3827 
3828 	if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3829 		return 0;
3830 
3831 	if (!client->ops->reset_notify)
3832 		return -EOPNOTSUPP;
3833 
3834 	ret = client->ops->reset_notify(handle, type);
3835 	if (ret)
3836 		dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)",
3837 			type, ret);
3838 
3839 	return ret;
3840 }
3841 
3842 static int hclge_reset_wait(struct hclge_dev *hdev)
3843 {
3844 #define HCLGE_RESET_WATI_MS	100
3845 #define HCLGE_RESET_WAIT_CNT	350
3846 
3847 	u32 val, reg, reg_bit;
3848 	u32 cnt = 0;
3849 
3850 	switch (hdev->reset_type) {
3851 	case HNAE3_IMP_RESET:
3852 		reg = HCLGE_GLOBAL_RESET_REG;
3853 		reg_bit = HCLGE_IMP_RESET_BIT;
3854 		break;
3855 	case HNAE3_GLOBAL_RESET:
3856 		reg = HCLGE_GLOBAL_RESET_REG;
3857 		reg_bit = HCLGE_GLOBAL_RESET_BIT;
3858 		break;
3859 	case HNAE3_FUNC_RESET:
3860 		reg = HCLGE_FUN_RST_ING;
3861 		reg_bit = HCLGE_FUN_RST_ING_B;
3862 		break;
3863 	default:
3864 		dev_err(&hdev->pdev->dev,
3865 			"Wait for unsupported reset type: %d\n",
3866 			hdev->reset_type);
3867 		return -EINVAL;
3868 	}
3869 
3870 	val = hclge_read_dev(&hdev->hw, reg);
3871 	while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3872 		msleep(HCLGE_RESET_WATI_MS);
3873 		val = hclge_read_dev(&hdev->hw, reg);
3874 		cnt++;
3875 	}
3876 
3877 	if (cnt >= HCLGE_RESET_WAIT_CNT) {
3878 		dev_warn(&hdev->pdev->dev,
3879 			 "Wait for reset timeout: %d\n", hdev->reset_type);
3880 		return -EBUSY;
3881 	}
3882 
3883 	return 0;
3884 }
3885 
3886 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3887 {
3888 	struct hclge_vf_rst_cmd *req;
3889 	struct hclge_desc desc;
3890 
3891 	req = (struct hclge_vf_rst_cmd *)desc.data;
3892 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3893 	req->dest_vfid = func_id;
3894 
3895 	if (reset)
3896 		req->vf_rst = 0x1;
3897 
3898 	return hclge_cmd_send(&hdev->hw, &desc, 1);
3899 }
3900 
3901 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3902 {
3903 	int i;
3904 
3905 	for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++) {
3906 		struct hclge_vport *vport = &hdev->vport[i];
3907 		int ret;
3908 
3909 		/* Send cmd to set/clear VF's FUNC_RST_ING */
3910 		ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3911 		if (ret) {
3912 			dev_err(&hdev->pdev->dev,
3913 				"set vf(%u) rst failed %d!\n",
3914 				vport->vport_id - HCLGE_VF_VPORT_START_NUM,
3915 				ret);
3916 			return ret;
3917 		}
3918 
3919 		if (!reset ||
3920 		    !test_bit(HCLGE_VPORT_STATE_INITED, &vport->state))
3921 			continue;
3922 
3923 		if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) &&
3924 		    hdev->reset_type == HNAE3_FUNC_RESET) {
3925 			set_bit(HCLGE_VPORT_NEED_NOTIFY_RESET,
3926 				&vport->need_notify);
3927 			continue;
3928 		}
3929 
3930 		/* Inform VF to process the reset.
3931 		 * hclge_inform_reset_assert_to_vf may fail if VF
3932 		 * driver is not loaded.
3933 		 */
3934 		ret = hclge_inform_reset_assert_to_vf(vport);
3935 		if (ret)
3936 			dev_warn(&hdev->pdev->dev,
3937 				 "inform reset to vf(%u) failed %d!\n",
3938 				 vport->vport_id - HCLGE_VF_VPORT_START_NUM,
3939 				 ret);
3940 	}
3941 
3942 	return 0;
3943 }
3944 
3945 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3946 {
3947 	if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3948 	    test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state) ||
3949 	    test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3950 		return;
3951 
3952 	if (time_is_before_jiffies(hdev->last_mbx_scheduled +
3953 				   HCLGE_MBX_SCHED_TIMEOUT))
3954 		dev_warn(&hdev->pdev->dev,
3955 			 "mbx service task is scheduled after %ums on cpu%u!\n",
3956 			 jiffies_to_msecs(jiffies - hdev->last_mbx_scheduled),
3957 			 smp_processor_id());
3958 
3959 	hclge_mbx_handler(hdev);
3960 
3961 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3962 }
3963 
3964 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3965 {
3966 	struct hclge_pf_rst_sync_cmd *req;
3967 	struct hclge_desc desc;
3968 	int cnt = 0;
3969 	int ret;
3970 
3971 	req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3972 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3973 
3974 	do {
3975 		/* vf need to down netdev by mbx during PF or FLR reset */
3976 		hclge_mailbox_service_task(hdev);
3977 
3978 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3979 		/* for compatible with old firmware, wait
3980 		 * 100 ms for VF to stop IO
3981 		 */
3982 		if (ret == -EOPNOTSUPP) {
3983 			msleep(HCLGE_RESET_SYNC_TIME);
3984 			return;
3985 		} else if (ret) {
3986 			dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3987 				 ret);
3988 			return;
3989 		} else if (req->all_vf_ready) {
3990 			return;
3991 		}
3992 		msleep(HCLGE_PF_RESET_SYNC_TIME);
3993 		hclge_comm_cmd_reuse_desc(&desc, true);
3994 	} while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3995 
3996 	dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3997 }
3998 
3999 void hclge_report_hw_error(struct hclge_dev *hdev,
4000 			   enum hnae3_hw_error_type type)
4001 {
4002 	struct hnae3_client *client = hdev->nic_client;
4003 
4004 	if (!client || !client->ops->process_hw_error ||
4005 	    !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
4006 		return;
4007 
4008 	client->ops->process_hw_error(&hdev->vport[0].nic, type);
4009 }
4010 
4011 static void hclge_handle_imp_error(struct hclge_dev *hdev)
4012 {
4013 	u32 reg_val;
4014 
4015 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
4016 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
4017 		hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
4018 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
4019 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
4020 	}
4021 
4022 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
4023 		hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
4024 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
4025 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
4026 	}
4027 }
4028 
4029 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
4030 {
4031 	struct hclge_desc desc;
4032 	struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
4033 	int ret;
4034 
4035 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
4036 	hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
4037 	req->fun_reset_vfid = func_id;
4038 
4039 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4040 	if (ret)
4041 		dev_err(&hdev->pdev->dev,
4042 			"send function reset cmd fail, status =%d\n", ret);
4043 
4044 	return ret;
4045 }
4046 
4047 static void hclge_do_reset(struct hclge_dev *hdev)
4048 {
4049 	struct hnae3_handle *handle = &hdev->vport[0].nic;
4050 	struct pci_dev *pdev = hdev->pdev;
4051 	u32 val;
4052 
4053 	if (hclge_get_hw_reset_stat(handle)) {
4054 		dev_info(&pdev->dev, "hardware reset not finish\n");
4055 		dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
4056 			 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
4057 			 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
4058 		return;
4059 	}
4060 
4061 	switch (hdev->reset_type) {
4062 	case HNAE3_IMP_RESET:
4063 		dev_info(&pdev->dev, "IMP reset requested\n");
4064 		val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
4065 		hnae3_set_bit(val, HCLGE_TRIGGER_IMP_RESET_B, 1);
4066 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, val);
4067 		break;
4068 	case HNAE3_GLOBAL_RESET:
4069 		dev_info(&pdev->dev, "global reset requested\n");
4070 		val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
4071 		hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
4072 		hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
4073 		break;
4074 	case HNAE3_FUNC_RESET:
4075 		dev_info(&pdev->dev, "PF reset requested\n");
4076 		/* schedule again to check later */
4077 		hclge_set_reset_pending(hdev, HNAE3_FUNC_RESET);
4078 		hclge_reset_task_schedule(hdev);
4079 		break;
4080 	default:
4081 		dev_warn(&pdev->dev,
4082 			 "unsupported reset type: %d\n", hdev->reset_type);
4083 		break;
4084 	}
4085 }
4086 
4087 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
4088 						   unsigned long *addr)
4089 {
4090 	enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
4091 	struct hclge_dev *hdev = ae_dev->priv;
4092 
4093 	/* return the highest priority reset level amongst all */
4094 	if (test_bit(HNAE3_IMP_RESET, addr)) {
4095 		rst_level = HNAE3_IMP_RESET;
4096 		clear_bit(HNAE3_IMP_RESET, addr);
4097 		clear_bit(HNAE3_GLOBAL_RESET, addr);
4098 		clear_bit(HNAE3_FUNC_RESET, addr);
4099 	} else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
4100 		rst_level = HNAE3_GLOBAL_RESET;
4101 		clear_bit(HNAE3_GLOBAL_RESET, addr);
4102 		clear_bit(HNAE3_FUNC_RESET, addr);
4103 	} else if (test_bit(HNAE3_FUNC_RESET, addr)) {
4104 		rst_level = HNAE3_FUNC_RESET;
4105 		clear_bit(HNAE3_FUNC_RESET, addr);
4106 	} else if (test_bit(HNAE3_FLR_RESET, addr)) {
4107 		rst_level = HNAE3_FLR_RESET;
4108 		clear_bit(HNAE3_FLR_RESET, addr);
4109 	}
4110 
4111 	clear_bit(HNAE3_NONE_RESET, addr);
4112 
4113 	if (hdev->reset_type != HNAE3_NONE_RESET &&
4114 	    rst_level < hdev->reset_type)
4115 		return HNAE3_NONE_RESET;
4116 
4117 	return rst_level;
4118 }
4119 
4120 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
4121 {
4122 	u32 clearval = 0;
4123 
4124 	switch (hdev->reset_type) {
4125 	case HNAE3_IMP_RESET:
4126 		clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
4127 		break;
4128 	case HNAE3_GLOBAL_RESET:
4129 		clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
4130 		break;
4131 	default:
4132 		break;
4133 	}
4134 
4135 	if (!clearval)
4136 		return;
4137 
4138 	/* For revision 0x20, the reset interrupt source
4139 	 * can only be cleared after hardware reset done
4140 	 */
4141 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
4142 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
4143 				clearval);
4144 
4145 	hclge_enable_vector(&hdev->misc_vector, true);
4146 }
4147 
4148 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
4149 {
4150 	u32 reg_val;
4151 
4152 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG);
4153 	if (enable)
4154 		reg_val |= HCLGE_COMM_NIC_SW_RST_RDY;
4155 	else
4156 		reg_val &= ~HCLGE_COMM_NIC_SW_RST_RDY;
4157 
4158 	hclge_write_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, reg_val);
4159 }
4160 
4161 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
4162 {
4163 	int ret;
4164 
4165 	ret = hclge_set_all_vf_rst(hdev, true);
4166 	if (ret)
4167 		return ret;
4168 
4169 	hclge_func_reset_sync_vf(hdev);
4170 
4171 	return 0;
4172 }
4173 
4174 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
4175 {
4176 	u32 reg_val;
4177 	int ret = 0;
4178 
4179 	switch (hdev->reset_type) {
4180 	case HNAE3_FUNC_RESET:
4181 		ret = hclge_func_reset_notify_vf(hdev);
4182 		if (ret)
4183 			return ret;
4184 
4185 		ret = hclge_func_reset_cmd(hdev, 0);
4186 		if (ret) {
4187 			dev_err(&hdev->pdev->dev,
4188 				"asserting function reset fail %d!\n", ret);
4189 			return ret;
4190 		}
4191 
4192 		/* After performaning pf reset, it is not necessary to do the
4193 		 * mailbox handling or send any command to firmware, because
4194 		 * any mailbox handling or command to firmware is only valid
4195 		 * after hclge_comm_cmd_init is called.
4196 		 */
4197 		set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
4198 		hdev->rst_stats.pf_rst_cnt++;
4199 		break;
4200 	case HNAE3_FLR_RESET:
4201 		ret = hclge_func_reset_notify_vf(hdev);
4202 		if (ret)
4203 			return ret;
4204 		break;
4205 	case HNAE3_IMP_RESET:
4206 		hclge_handle_imp_error(hdev);
4207 		reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
4208 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
4209 				BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
4210 		break;
4211 	default:
4212 		break;
4213 	}
4214 
4215 	/* inform hardware that preparatory work is done */
4216 	msleep(HCLGE_RESET_SYNC_TIME);
4217 	hclge_reset_handshake(hdev, true);
4218 	dev_info(&hdev->pdev->dev, "prepare wait ok\n");
4219 
4220 	return ret;
4221 }
4222 
4223 static void hclge_show_rst_info(struct hclge_dev *hdev)
4224 {
4225 	char *buf;
4226 
4227 	buf = kzalloc(HCLGE_DBG_RESET_INFO_LEN, GFP_KERNEL);
4228 	if (!buf)
4229 		return;
4230 
4231 	hclge_dbg_dump_rst_info(hdev, buf, HCLGE_DBG_RESET_INFO_LEN);
4232 
4233 	dev_info(&hdev->pdev->dev, "dump reset info:\n%s", buf);
4234 
4235 	kfree(buf);
4236 }
4237 
4238 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
4239 {
4240 #define MAX_RESET_FAIL_CNT 5
4241 
4242 	if (hdev->reset_pending) {
4243 		dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
4244 			 hdev->reset_pending);
4245 		return true;
4246 	} else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
4247 		   HCLGE_RESET_INT_M) {
4248 		dev_info(&hdev->pdev->dev,
4249 			 "reset failed because new reset interrupt\n");
4250 		hclge_clear_reset_cause(hdev);
4251 		return false;
4252 	} else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
4253 		hdev->rst_stats.reset_fail_cnt++;
4254 		hclge_set_reset_pending(hdev, hdev->reset_type);
4255 		dev_info(&hdev->pdev->dev,
4256 			 "re-schedule reset task(%u)\n",
4257 			 hdev->rst_stats.reset_fail_cnt);
4258 		return true;
4259 	}
4260 
4261 	hclge_clear_reset_cause(hdev);
4262 
4263 	/* recover the handshake status when reset fail */
4264 	hclge_reset_handshake(hdev, true);
4265 
4266 	dev_err(&hdev->pdev->dev, "Reset fail!\n");
4267 
4268 	hclge_show_rst_info(hdev);
4269 
4270 	set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4271 
4272 	return false;
4273 }
4274 
4275 static void hclge_update_reset_level(struct hclge_dev *hdev)
4276 {
4277 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4278 	enum hnae3_reset_type reset_level;
4279 
4280 	/* reset request will not be set during reset, so clear
4281 	 * pending reset request to avoid unnecessary reset
4282 	 * caused by the same reason.
4283 	 */
4284 	hclge_get_reset_level(ae_dev, &hdev->reset_request);
4285 
4286 	/* if default_reset_request has a higher level reset request,
4287 	 * it should be handled as soon as possible. since some errors
4288 	 * need this kind of reset to fix.
4289 	 */
4290 	reset_level = hclge_get_reset_level(ae_dev,
4291 					    &hdev->default_reset_request);
4292 	if (reset_level != HNAE3_NONE_RESET)
4293 		set_bit(reset_level, &hdev->reset_request);
4294 }
4295 
4296 static int hclge_set_rst_done(struct hclge_dev *hdev)
4297 {
4298 	struct hclge_pf_rst_done_cmd *req;
4299 	struct hclge_desc desc;
4300 	int ret;
4301 
4302 	req = (struct hclge_pf_rst_done_cmd *)desc.data;
4303 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
4304 	req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
4305 
4306 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4307 	/* To be compatible with the old firmware, which does not support
4308 	 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
4309 	 * return success
4310 	 */
4311 	if (ret == -EOPNOTSUPP) {
4312 		dev_warn(&hdev->pdev->dev,
4313 			 "current firmware does not support command(0x%x)!\n",
4314 			 HCLGE_OPC_PF_RST_DONE);
4315 		return 0;
4316 	} else if (ret) {
4317 		dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
4318 			ret);
4319 	}
4320 
4321 	return ret;
4322 }
4323 
4324 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
4325 {
4326 	int ret = 0;
4327 
4328 	switch (hdev->reset_type) {
4329 	case HNAE3_FUNC_RESET:
4330 	case HNAE3_FLR_RESET:
4331 		ret = hclge_set_all_vf_rst(hdev, false);
4332 		break;
4333 	case HNAE3_GLOBAL_RESET:
4334 	case HNAE3_IMP_RESET:
4335 		ret = hclge_set_rst_done(hdev);
4336 		break;
4337 	default:
4338 		break;
4339 	}
4340 
4341 	/* clear up the handshake status after re-initialize done */
4342 	hclge_reset_handshake(hdev, false);
4343 
4344 	return ret;
4345 }
4346 
4347 static int hclge_reset_stack(struct hclge_dev *hdev)
4348 {
4349 	int ret;
4350 
4351 	ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
4352 	if (ret)
4353 		return ret;
4354 
4355 	ret = hclge_reset_ae_dev(hdev->ae_dev);
4356 	if (ret)
4357 		return ret;
4358 
4359 	return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
4360 }
4361 
4362 static int hclge_reset_prepare(struct hclge_dev *hdev)
4363 {
4364 	int ret;
4365 
4366 	hdev->rst_stats.reset_cnt++;
4367 	/* perform reset of the stack & ae device for a client */
4368 	ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
4369 	if (ret)
4370 		return ret;
4371 
4372 	rtnl_lock();
4373 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
4374 	rtnl_unlock();
4375 	if (ret)
4376 		return ret;
4377 
4378 	return hclge_reset_prepare_wait(hdev);
4379 }
4380 
4381 static int hclge_reset_rebuild(struct hclge_dev *hdev)
4382 {
4383 	int ret;
4384 
4385 	hdev->rst_stats.hw_reset_done_cnt++;
4386 
4387 	ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
4388 	if (ret)
4389 		return ret;
4390 
4391 	rtnl_lock();
4392 	ret = hclge_reset_stack(hdev);
4393 	rtnl_unlock();
4394 	if (ret)
4395 		return ret;
4396 
4397 	hclge_clear_reset_cause(hdev);
4398 
4399 	ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
4400 	/* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
4401 	 * times
4402 	 */
4403 	if (ret &&
4404 	    hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
4405 		return ret;
4406 
4407 	ret = hclge_reset_prepare_up(hdev);
4408 	if (ret)
4409 		return ret;
4410 
4411 	rtnl_lock();
4412 	ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
4413 	rtnl_unlock();
4414 	if (ret)
4415 		return ret;
4416 
4417 	ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
4418 	if (ret)
4419 		return ret;
4420 
4421 	hdev->last_reset_time = jiffies;
4422 	hdev->rst_stats.reset_fail_cnt = 0;
4423 	hdev->rst_stats.reset_done_cnt++;
4424 	clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4425 
4426 	hclge_update_reset_level(hdev);
4427 
4428 	return 0;
4429 }
4430 
4431 static void hclge_reset(struct hclge_dev *hdev)
4432 {
4433 	if (hclge_reset_prepare(hdev))
4434 		goto err_reset;
4435 
4436 	if (hclge_reset_wait(hdev))
4437 		goto err_reset;
4438 
4439 	if (hclge_reset_rebuild(hdev))
4440 		goto err_reset;
4441 
4442 	return;
4443 
4444 err_reset:
4445 	if (hclge_reset_err_handle(hdev))
4446 		hclge_reset_task_schedule(hdev);
4447 }
4448 
4449 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
4450 {
4451 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
4452 	struct hclge_dev *hdev = ae_dev->priv;
4453 
4454 	/* We might end up getting called broadly because of 2 below cases:
4455 	 * 1. Recoverable error was conveyed through APEI and only way to bring
4456 	 *    normalcy is to reset.
4457 	 * 2. A new reset request from the stack due to timeout
4458 	 *
4459 	 * check if this is a new reset request and we are not here just because
4460 	 * last reset attempt did not succeed and watchdog hit us again. We will
4461 	 * know this if last reset request did not occur very recently (watchdog
4462 	 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
4463 	 * In case of new request we reset the "reset level" to PF reset.
4464 	 * And if it is a repeat reset request of the most recent one then we
4465 	 * want to make sure we throttle the reset request. Therefore, we will
4466 	 * not allow it again before 3*HZ times.
4467 	 */
4468 
4469 	if (time_before(jiffies, (hdev->last_reset_time +
4470 				  HCLGE_RESET_INTERVAL))) {
4471 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
4472 		return;
4473 	}
4474 
4475 	if (hdev->default_reset_request) {
4476 		hdev->reset_level =
4477 			hclge_get_reset_level(ae_dev,
4478 					      &hdev->default_reset_request);
4479 	} else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
4480 		hdev->reset_level = HNAE3_FUNC_RESET;
4481 	}
4482 
4483 	dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
4484 		 hdev->reset_level);
4485 
4486 	/* request reset & schedule reset task */
4487 	set_bit(hdev->reset_level, &hdev->reset_request);
4488 	hclge_reset_task_schedule(hdev);
4489 
4490 	if (hdev->reset_level < HNAE3_GLOBAL_RESET)
4491 		hdev->reset_level++;
4492 }
4493 
4494 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
4495 					enum hnae3_reset_type rst_type)
4496 {
4497 #define HCLGE_SUPPORT_RESET_TYPE \
4498 	(BIT(HNAE3_FLR_RESET) | BIT(HNAE3_FUNC_RESET) | \
4499 	BIT(HNAE3_GLOBAL_RESET) | BIT(HNAE3_IMP_RESET))
4500 
4501 	struct hclge_dev *hdev = ae_dev->priv;
4502 
4503 	if (!(BIT(rst_type) & HCLGE_SUPPORT_RESET_TYPE)) {
4504 		/* To prevent reset triggered by hclge_reset_event */
4505 		set_bit(HNAE3_NONE_RESET, &hdev->default_reset_request);
4506 		dev_warn(&hdev->pdev->dev, "unsupported reset type %d\n",
4507 			 rst_type);
4508 		return;
4509 	}
4510 
4511 	set_bit(rst_type, &hdev->default_reset_request);
4512 }
4513 
4514 static void hclge_reset_timer(struct timer_list *t)
4515 {
4516 	struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
4517 
4518 	/* if default_reset_request has no value, it means that this reset
4519 	 * request has already be handled, so just return here
4520 	 */
4521 	if (!hdev->default_reset_request)
4522 		return;
4523 
4524 	dev_info(&hdev->pdev->dev,
4525 		 "triggering reset in reset timer\n");
4526 	hclge_reset_event(hdev->pdev, NULL);
4527 }
4528 
4529 static void hclge_reset_subtask(struct hclge_dev *hdev)
4530 {
4531 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4532 
4533 	/* check if there is any ongoing reset in the hardware. This status can
4534 	 * be checked from reset_pending. If there is then, we need to wait for
4535 	 * hardware to complete reset.
4536 	 *    a. If we are able to figure out in reasonable time that hardware
4537 	 *       has fully resetted then, we can proceed with driver, client
4538 	 *       reset.
4539 	 *    b. else, we can come back later to check this status so re-sched
4540 	 *       now.
4541 	 */
4542 	hdev->last_reset_time = jiffies;
4543 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
4544 	if (hdev->reset_type != HNAE3_NONE_RESET)
4545 		hclge_reset(hdev);
4546 
4547 	/* check if we got any *new* reset requests to be honored */
4548 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
4549 	if (hdev->reset_type != HNAE3_NONE_RESET)
4550 		hclge_do_reset(hdev);
4551 
4552 	hdev->reset_type = HNAE3_NONE_RESET;
4553 }
4554 
4555 static void hclge_handle_err_reset_request(struct hclge_dev *hdev)
4556 {
4557 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4558 	enum hnae3_reset_type reset_type;
4559 
4560 	if (ae_dev->hw_err_reset_req) {
4561 		reset_type = hclge_get_reset_level(ae_dev,
4562 						   &ae_dev->hw_err_reset_req);
4563 		hclge_set_def_reset_request(ae_dev, reset_type);
4564 	}
4565 
4566 	if (hdev->default_reset_request && ae_dev->ops->reset_event)
4567 		ae_dev->ops->reset_event(hdev->pdev, NULL);
4568 
4569 	/* enable interrupt after error handling complete */
4570 	hclge_enable_vector(&hdev->misc_vector, true);
4571 }
4572 
4573 static void hclge_handle_err_recovery(struct hclge_dev *hdev)
4574 {
4575 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4576 
4577 	ae_dev->hw_err_reset_req = 0;
4578 
4579 	if (hclge_find_error_source(hdev)) {
4580 		hclge_handle_error_info_log(ae_dev);
4581 		hclge_handle_mac_tnl(hdev);
4582 		hclge_handle_vf_queue_err_ras(hdev);
4583 	}
4584 
4585 	hclge_handle_err_reset_request(hdev);
4586 }
4587 
4588 static void hclge_misc_err_recovery(struct hclge_dev *hdev)
4589 {
4590 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4591 	struct device *dev = &hdev->pdev->dev;
4592 	u32 msix_sts_reg;
4593 
4594 	msix_sts_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
4595 	if (msix_sts_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
4596 		if (hclge_handle_hw_msix_error
4597 				(hdev, &hdev->default_reset_request))
4598 			dev_info(dev, "received msix interrupt 0x%x\n",
4599 				 msix_sts_reg);
4600 	}
4601 
4602 	hclge_handle_hw_ras_error(ae_dev);
4603 
4604 	hclge_handle_err_reset_request(hdev);
4605 }
4606 
4607 static void hclge_errhand_service_task(struct hclge_dev *hdev)
4608 {
4609 	if (!test_and_clear_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
4610 		return;
4611 
4612 	if (hnae3_dev_ras_imp_supported(hdev))
4613 		hclge_handle_err_recovery(hdev);
4614 	else
4615 		hclge_misc_err_recovery(hdev);
4616 }
4617 
4618 static void hclge_reset_service_task(struct hclge_dev *hdev)
4619 {
4620 	if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4621 		return;
4622 
4623 	if (time_is_before_jiffies(hdev->last_rst_scheduled +
4624 				   HCLGE_RESET_SCHED_TIMEOUT))
4625 		dev_warn(&hdev->pdev->dev,
4626 			 "reset service task is scheduled after %ums on cpu%u!\n",
4627 			 jiffies_to_msecs(jiffies - hdev->last_rst_scheduled),
4628 			 smp_processor_id());
4629 
4630 	down(&hdev->reset_sem);
4631 	set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4632 
4633 	hclge_reset_subtask(hdev);
4634 
4635 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4636 	up(&hdev->reset_sem);
4637 }
4638 
4639 static void hclge_update_vport_alive(struct hclge_dev *hdev)
4640 {
4641 #define HCLGE_ALIVE_SECONDS_NORMAL		8
4642 
4643 	unsigned long alive_time = HCLGE_ALIVE_SECONDS_NORMAL * HZ;
4644 	int i;
4645 
4646 	/* start from vport 1 for PF is always alive */
4647 	for (i = 1; i < hdev->num_alloc_vport; i++) {
4648 		struct hclge_vport *vport = &hdev->vport[i];
4649 
4650 		if (!test_bit(HCLGE_VPORT_STATE_INITED, &vport->state) ||
4651 		    !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4652 			continue;
4653 		if (time_after(jiffies, vport->last_active_jiffies +
4654 			       alive_time)) {
4655 			clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4656 			dev_warn(&hdev->pdev->dev,
4657 				 "VF %u heartbeat timeout\n",
4658 				 i - HCLGE_VF_VPORT_START_NUM);
4659 		}
4660 	}
4661 }
4662 
4663 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4664 {
4665 	unsigned long delta = round_jiffies_relative(HZ);
4666 
4667 	if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4668 		return;
4669 
4670 	/* Always handle the link updating to make sure link state is
4671 	 * updated when it is triggered by mbx.
4672 	 */
4673 	hclge_update_link_status(hdev);
4674 	hclge_sync_mac_table(hdev);
4675 	hclge_sync_promisc_mode(hdev);
4676 	hclge_sync_fd_table(hdev);
4677 
4678 	if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4679 		delta = jiffies - hdev->last_serv_processed;
4680 
4681 		if (delta < round_jiffies_relative(HZ)) {
4682 			delta = round_jiffies_relative(HZ) - delta;
4683 			goto out;
4684 		}
4685 	}
4686 
4687 	hdev->serv_processed_cnt++;
4688 	hclge_update_vport_alive(hdev);
4689 
4690 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4691 		hdev->last_serv_processed = jiffies;
4692 		goto out;
4693 	}
4694 
4695 	if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4696 		hclge_update_stats_for_all(hdev);
4697 
4698 	hclge_update_port_info(hdev);
4699 	hclge_sync_vlan_filter(hdev);
4700 
4701 	if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4702 		hclge_rfs_filter_expire(hdev);
4703 
4704 	hdev->last_serv_processed = jiffies;
4705 
4706 out:
4707 	hclge_task_schedule(hdev, delta);
4708 }
4709 
4710 static void hclge_ptp_service_task(struct hclge_dev *hdev)
4711 {
4712 	unsigned long flags;
4713 
4714 	if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state) ||
4715 	    !test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state) ||
4716 	    !time_is_before_jiffies(hdev->ptp->tx_start + HZ))
4717 		return;
4718 
4719 	/* to prevent concurrence with the irq handler */
4720 	spin_lock_irqsave(&hdev->ptp->lock, flags);
4721 
4722 	/* check HCLGE_STATE_PTP_TX_HANDLING here again, since the irq
4723 	 * handler may handle it just before spin_lock_irqsave().
4724 	 */
4725 	if (test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state))
4726 		hclge_ptp_clean_tx_hwts(hdev);
4727 
4728 	spin_unlock_irqrestore(&hdev->ptp->lock, flags);
4729 }
4730 
4731 static void hclge_service_task(struct work_struct *work)
4732 {
4733 	struct hclge_dev *hdev =
4734 		container_of(work, struct hclge_dev, service_task.work);
4735 
4736 	hclge_errhand_service_task(hdev);
4737 	hclge_reset_service_task(hdev);
4738 	hclge_ptp_service_task(hdev);
4739 	hclge_mailbox_service_task(hdev);
4740 	hclge_periodic_service_task(hdev);
4741 
4742 	/* Handle error recovery, reset and mbx again in case periodical task
4743 	 * delays the handling by calling hclge_task_schedule() in
4744 	 * hclge_periodic_service_task().
4745 	 */
4746 	hclge_errhand_service_task(hdev);
4747 	hclge_reset_service_task(hdev);
4748 	hclge_mailbox_service_task(hdev);
4749 }
4750 
4751 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4752 {
4753 	/* VF handle has no client */
4754 	if (!handle->client)
4755 		return container_of(handle, struct hclge_vport, nic);
4756 	else if (handle->client->type == HNAE3_CLIENT_ROCE)
4757 		return container_of(handle, struct hclge_vport, roce);
4758 	else
4759 		return container_of(handle, struct hclge_vport, nic);
4760 }
4761 
4762 static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
4763 				  struct hnae3_vector_info *vector_info)
4764 {
4765 #define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2	64
4766 
4767 	vector_info->vector = pci_irq_vector(hdev->pdev, idx);
4768 
4769 	/* need an extend offset to config vector >= 64 */
4770 	if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
4771 		vector_info->io_addr = hdev->hw.hw.io_base +
4772 				HCLGE_VECTOR_REG_BASE +
4773 				(idx - 1) * HCLGE_VECTOR_REG_OFFSET;
4774 	else
4775 		vector_info->io_addr = hdev->hw.hw.io_base +
4776 				HCLGE_VECTOR_EXT_REG_BASE +
4777 				(idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4778 				HCLGE_VECTOR_REG_OFFSET_H +
4779 				(idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4780 				HCLGE_VECTOR_REG_OFFSET;
4781 
4782 	hdev->vector_status[idx] = hdev->vport[0].vport_id;
4783 	hdev->vector_irq[idx] = vector_info->vector;
4784 }
4785 
4786 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4787 			    struct hnae3_vector_info *vector_info)
4788 {
4789 	struct hclge_vport *vport = hclge_get_vport(handle);
4790 	struct hnae3_vector_info *vector = vector_info;
4791 	struct hclge_dev *hdev = vport->back;
4792 	int alloc = 0;
4793 	u16 i = 0;
4794 	u16 j;
4795 
4796 	vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4797 	vector_num = min(hdev->num_msi_left, vector_num);
4798 
4799 	for (j = 0; j < vector_num; j++) {
4800 		while (++i < hdev->num_nic_msi) {
4801 			if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4802 				hclge_get_vector_info(hdev, i, vector);
4803 				vector++;
4804 				alloc++;
4805 
4806 				break;
4807 			}
4808 		}
4809 	}
4810 	hdev->num_msi_left -= alloc;
4811 	hdev->num_msi_used += alloc;
4812 
4813 	return alloc;
4814 }
4815 
4816 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4817 {
4818 	int i;
4819 
4820 	for (i = 0; i < hdev->num_msi; i++)
4821 		if (vector == hdev->vector_irq[i])
4822 			return i;
4823 
4824 	return -EINVAL;
4825 }
4826 
4827 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4828 {
4829 	struct hclge_vport *vport = hclge_get_vport(handle);
4830 	struct hclge_dev *hdev = vport->back;
4831 	int vector_id;
4832 
4833 	vector_id = hclge_get_vector_index(hdev, vector);
4834 	if (vector_id < 0) {
4835 		dev_err(&hdev->pdev->dev,
4836 			"Get vector index fail. vector = %d\n", vector);
4837 		return vector_id;
4838 	}
4839 
4840 	hclge_free_vector(hdev, vector_id);
4841 
4842 	return 0;
4843 }
4844 
4845 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4846 			 u8 *key, u8 *hfunc)
4847 {
4848 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4849 	struct hclge_vport *vport = hclge_get_vport(handle);
4850 	struct hclge_comm_rss_cfg *rss_cfg = &vport->back->rss_cfg;
4851 
4852 	hclge_comm_get_rss_hash_info(rss_cfg, key, hfunc);
4853 
4854 	hclge_comm_get_rss_indir_tbl(rss_cfg, indir,
4855 				     ae_dev->dev_specs.rss_ind_tbl_size);
4856 
4857 	return 0;
4858 }
4859 
4860 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4861 			 const  u8 *key, const  u8 hfunc)
4862 {
4863 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4864 	struct hclge_vport *vport = hclge_get_vport(handle);
4865 	struct hclge_dev *hdev = vport->back;
4866 	struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
4867 	int ret, i;
4868 
4869 	ret = hclge_comm_set_rss_hash_key(rss_cfg, &hdev->hw.hw, key, hfunc);
4870 	if (ret) {
4871 		dev_err(&hdev->pdev->dev, "invalid hfunc type %u\n", hfunc);
4872 		return ret;
4873 	}
4874 
4875 	/* Update the shadow RSS table with user specified qids */
4876 	for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4877 		rss_cfg->rss_indirection_tbl[i] = indir[i];
4878 
4879 	/* Update the hardware */
4880 	return hclge_comm_set_rss_indir_table(ae_dev, &hdev->hw.hw,
4881 					      rss_cfg->rss_indirection_tbl);
4882 }
4883 
4884 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4885 			       struct ethtool_rxnfc *nfc)
4886 {
4887 	struct hclge_vport *vport = hclge_get_vport(handle);
4888 	struct hclge_dev *hdev = vport->back;
4889 	int ret;
4890 
4891 	ret = hclge_comm_set_rss_tuple(hdev->ae_dev, &hdev->hw.hw,
4892 				       &hdev->rss_cfg, nfc);
4893 	if (ret) {
4894 		dev_err(&hdev->pdev->dev,
4895 			"failed to set rss tuple, ret = %d.\n", ret);
4896 		return ret;
4897 	}
4898 
4899 	return 0;
4900 }
4901 
4902 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4903 			       struct ethtool_rxnfc *nfc)
4904 {
4905 	struct hclge_vport *vport = hclge_get_vport(handle);
4906 	u8 tuple_sets;
4907 	int ret;
4908 
4909 	nfc->data = 0;
4910 
4911 	ret = hclge_comm_get_rss_tuple(&vport->back->rss_cfg, nfc->flow_type,
4912 				       &tuple_sets);
4913 	if (ret || !tuple_sets)
4914 		return ret;
4915 
4916 	nfc->data = hclge_comm_convert_rss_tuple(tuple_sets);
4917 
4918 	return 0;
4919 }
4920 
4921 static int hclge_get_tc_size(struct hnae3_handle *handle)
4922 {
4923 	struct hclge_vport *vport = hclge_get_vport(handle);
4924 	struct hclge_dev *hdev = vport->back;
4925 
4926 	return hdev->pf_rss_size_max;
4927 }
4928 
4929 static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
4930 {
4931 	struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
4932 	struct hclge_vport *vport = hdev->vport;
4933 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4934 	u16 tc_valid[HCLGE_MAX_TC_NUM] = {0};
4935 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4936 	struct hnae3_tc_info *tc_info;
4937 	u16 roundup_size;
4938 	u16 rss_size;
4939 	int i;
4940 
4941 	tc_info = &vport->nic.kinfo.tc_info;
4942 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4943 		rss_size = tc_info->tqp_count[i];
4944 		tc_valid[i] = 0;
4945 
4946 		if (!(hdev->hw_tc_map & BIT(i)))
4947 			continue;
4948 
4949 		/* tc_size set to hardware is the log2 of roundup power of two
4950 		 * of rss_size, the acutal queue size is limited by indirection
4951 		 * table.
4952 		 */
4953 		if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size ||
4954 		    rss_size == 0) {
4955 			dev_err(&hdev->pdev->dev,
4956 				"Configure rss tc size failed, invalid TC_SIZE = %u\n",
4957 				rss_size);
4958 			return -EINVAL;
4959 		}
4960 
4961 		roundup_size = roundup_pow_of_two(rss_size);
4962 		roundup_size = ilog2(roundup_size);
4963 
4964 		tc_valid[i] = 1;
4965 		tc_size[i] = roundup_size;
4966 		tc_offset[i] = tc_info->tqp_offset[i];
4967 	}
4968 
4969 	return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, tc_valid,
4970 					  tc_size);
4971 }
4972 
4973 int hclge_rss_init_hw(struct hclge_dev *hdev)
4974 {
4975 	u16 *rss_indir = hdev->rss_cfg.rss_indirection_tbl;
4976 	u8 *key = hdev->rss_cfg.rss_hash_key;
4977 	u8 hfunc = hdev->rss_cfg.rss_algo;
4978 	int ret;
4979 
4980 	ret = hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw,
4981 					     rss_indir);
4982 	if (ret)
4983 		return ret;
4984 
4985 	ret = hclge_comm_set_rss_algo_key(&hdev->hw.hw, hfunc, key);
4986 	if (ret)
4987 		return ret;
4988 
4989 	ret = hclge_comm_set_rss_input_tuple(&hdev->hw.hw, &hdev->rss_cfg);
4990 	if (ret)
4991 		return ret;
4992 
4993 	return hclge_init_rss_tc_mode(hdev);
4994 }
4995 
4996 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4997 				int vector_id, bool en,
4998 				struct hnae3_ring_chain_node *ring_chain)
4999 {
5000 	struct hclge_dev *hdev = vport->back;
5001 	struct hnae3_ring_chain_node *node;
5002 	struct hclge_desc desc;
5003 	struct hclge_ctrl_vector_chain_cmd *req =
5004 		(struct hclge_ctrl_vector_chain_cmd *)desc.data;
5005 	enum hclge_comm_cmd_status status;
5006 	enum hclge_opcode_type op;
5007 	u16 tqp_type_and_id;
5008 	int i;
5009 
5010 	op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
5011 	hclge_cmd_setup_basic_desc(&desc, op, false);
5012 	req->int_vector_id_l = hnae3_get_field(vector_id,
5013 					       HCLGE_VECTOR_ID_L_M,
5014 					       HCLGE_VECTOR_ID_L_S);
5015 	req->int_vector_id_h = hnae3_get_field(vector_id,
5016 					       HCLGE_VECTOR_ID_H_M,
5017 					       HCLGE_VECTOR_ID_H_S);
5018 
5019 	i = 0;
5020 	for (node = ring_chain; node; node = node->next) {
5021 		tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
5022 		hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
5023 				HCLGE_INT_TYPE_S,
5024 				hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
5025 		hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
5026 				HCLGE_TQP_ID_S, node->tqp_index);
5027 		hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
5028 				HCLGE_INT_GL_IDX_S,
5029 				hnae3_get_field(node->int_gl_idx,
5030 						HNAE3_RING_GL_IDX_M,
5031 						HNAE3_RING_GL_IDX_S));
5032 		req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
5033 		if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
5034 			req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
5035 			req->vfid = vport->vport_id;
5036 
5037 			status = hclge_cmd_send(&hdev->hw, &desc, 1);
5038 			if (status) {
5039 				dev_err(&hdev->pdev->dev,
5040 					"Map TQP fail, status is %d.\n",
5041 					status);
5042 				return -EIO;
5043 			}
5044 			i = 0;
5045 
5046 			hclge_cmd_setup_basic_desc(&desc,
5047 						   op,
5048 						   false);
5049 			req->int_vector_id_l =
5050 				hnae3_get_field(vector_id,
5051 						HCLGE_VECTOR_ID_L_M,
5052 						HCLGE_VECTOR_ID_L_S);
5053 			req->int_vector_id_h =
5054 				hnae3_get_field(vector_id,
5055 						HCLGE_VECTOR_ID_H_M,
5056 						HCLGE_VECTOR_ID_H_S);
5057 		}
5058 	}
5059 
5060 	if (i > 0) {
5061 		req->int_cause_num = i;
5062 		req->vfid = vport->vport_id;
5063 		status = hclge_cmd_send(&hdev->hw, &desc, 1);
5064 		if (status) {
5065 			dev_err(&hdev->pdev->dev,
5066 				"Map TQP fail, status is %d.\n", status);
5067 			return -EIO;
5068 		}
5069 	}
5070 
5071 	return 0;
5072 }
5073 
5074 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
5075 				    struct hnae3_ring_chain_node *ring_chain)
5076 {
5077 	struct hclge_vport *vport = hclge_get_vport(handle);
5078 	struct hclge_dev *hdev = vport->back;
5079 	int vector_id;
5080 
5081 	vector_id = hclge_get_vector_index(hdev, vector);
5082 	if (vector_id < 0) {
5083 		dev_err(&hdev->pdev->dev,
5084 			"failed to get vector index. vector=%d\n", vector);
5085 		return vector_id;
5086 	}
5087 
5088 	return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
5089 }
5090 
5091 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
5092 				       struct hnae3_ring_chain_node *ring_chain)
5093 {
5094 	struct hclge_vport *vport = hclge_get_vport(handle);
5095 	struct hclge_dev *hdev = vport->back;
5096 	int vector_id, ret;
5097 
5098 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
5099 		return 0;
5100 
5101 	vector_id = hclge_get_vector_index(hdev, vector);
5102 	if (vector_id < 0) {
5103 		dev_err(&handle->pdev->dev,
5104 			"Get vector index fail. ret =%d\n", vector_id);
5105 		return vector_id;
5106 	}
5107 
5108 	ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
5109 	if (ret)
5110 		dev_err(&handle->pdev->dev,
5111 			"Unmap ring from vector fail. vectorid=%d, ret =%d\n",
5112 			vector_id, ret);
5113 
5114 	return ret;
5115 }
5116 
5117 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id,
5118 				      bool en_uc, bool en_mc, bool en_bc)
5119 {
5120 	struct hclge_vport *vport = &hdev->vport[vf_id];
5121 	struct hnae3_handle *handle = &vport->nic;
5122 	struct hclge_promisc_cfg_cmd *req;
5123 	struct hclge_desc desc;
5124 	bool uc_tx_en = en_uc;
5125 	u8 promisc_cfg = 0;
5126 	int ret;
5127 
5128 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
5129 
5130 	req = (struct hclge_promisc_cfg_cmd *)desc.data;
5131 	req->vf_id = vf_id;
5132 
5133 	if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags))
5134 		uc_tx_en = false;
5135 
5136 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0);
5137 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0);
5138 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0);
5139 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0);
5140 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0);
5141 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0);
5142 	req->extend_promisc = promisc_cfg;
5143 
5144 	/* to be compatible with DEVICE_VERSION_V1/2 */
5145 	promisc_cfg = 0;
5146 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0);
5147 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0);
5148 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0);
5149 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1);
5150 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1);
5151 	req->promisc = promisc_cfg;
5152 
5153 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5154 	if (ret)
5155 		dev_err(&hdev->pdev->dev,
5156 			"failed to set vport %u promisc mode, ret = %d.\n",
5157 			vf_id, ret);
5158 
5159 	return ret;
5160 }
5161 
5162 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
5163 				 bool en_mc_pmc, bool en_bc_pmc)
5164 {
5165 	return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id,
5166 					  en_uc_pmc, en_mc_pmc, en_bc_pmc);
5167 }
5168 
5169 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
5170 				  bool en_mc_pmc)
5171 {
5172 	struct hclge_vport *vport = hclge_get_vport(handle);
5173 	struct hclge_dev *hdev = vport->back;
5174 	bool en_bc_pmc = true;
5175 
5176 	/* For device whose version below V2, if broadcast promisc enabled,
5177 	 * vlan filter is always bypassed. So broadcast promisc should be
5178 	 * disabled until user enable promisc mode
5179 	 */
5180 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
5181 		en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
5182 
5183 	return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
5184 					    en_bc_pmc);
5185 }
5186 
5187 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
5188 {
5189 	struct hclge_vport *vport = hclge_get_vport(handle);
5190 
5191 	set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
5192 }
5193 
5194 static void hclge_sync_fd_state(struct hclge_dev *hdev)
5195 {
5196 	if (hlist_empty(&hdev->fd_rule_list))
5197 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5198 }
5199 
5200 static void hclge_fd_inc_rule_cnt(struct hclge_dev *hdev, u16 location)
5201 {
5202 	if (!test_bit(location, hdev->fd_bmap)) {
5203 		set_bit(location, hdev->fd_bmap);
5204 		hdev->hclge_fd_rule_num++;
5205 	}
5206 }
5207 
5208 static void hclge_fd_dec_rule_cnt(struct hclge_dev *hdev, u16 location)
5209 {
5210 	if (test_bit(location, hdev->fd_bmap)) {
5211 		clear_bit(location, hdev->fd_bmap);
5212 		hdev->hclge_fd_rule_num--;
5213 	}
5214 }
5215 
5216 static void hclge_fd_free_node(struct hclge_dev *hdev,
5217 			       struct hclge_fd_rule *rule)
5218 {
5219 	hlist_del(&rule->rule_node);
5220 	kfree(rule);
5221 	hclge_sync_fd_state(hdev);
5222 }
5223 
5224 static void hclge_update_fd_rule_node(struct hclge_dev *hdev,
5225 				      struct hclge_fd_rule *old_rule,
5226 				      struct hclge_fd_rule *new_rule,
5227 				      enum HCLGE_FD_NODE_STATE state)
5228 {
5229 	switch (state) {
5230 	case HCLGE_FD_TO_ADD:
5231 	case HCLGE_FD_ACTIVE:
5232 		/* 1) if the new state is TO_ADD, just replace the old rule
5233 		 * with the same location, no matter its state, because the
5234 		 * new rule will be configured to the hardware.
5235 		 * 2) if the new state is ACTIVE, it means the new rule
5236 		 * has been configured to the hardware, so just replace
5237 		 * the old rule node with the same location.
5238 		 * 3) for it doesn't add a new node to the list, so it's
5239 		 * unnecessary to update the rule number and fd_bmap.
5240 		 */
5241 		new_rule->rule_node.next = old_rule->rule_node.next;
5242 		new_rule->rule_node.pprev = old_rule->rule_node.pprev;
5243 		memcpy(old_rule, new_rule, sizeof(*old_rule));
5244 		kfree(new_rule);
5245 		break;
5246 	case HCLGE_FD_DELETED:
5247 		hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5248 		hclge_fd_free_node(hdev, old_rule);
5249 		break;
5250 	case HCLGE_FD_TO_DEL:
5251 		/* if new request is TO_DEL, and old rule is existent
5252 		 * 1) the state of old rule is TO_DEL, we need do nothing,
5253 		 * because we delete rule by location, other rule content
5254 		 * is unncessary.
5255 		 * 2) the state of old rule is ACTIVE, we need to change its
5256 		 * state to TO_DEL, so the rule will be deleted when periodic
5257 		 * task being scheduled.
5258 		 * 3) the state of old rule is TO_ADD, it means the rule hasn't
5259 		 * been added to hardware, so we just delete the rule node from
5260 		 * fd_rule_list directly.
5261 		 */
5262 		if (old_rule->state == HCLGE_FD_TO_ADD) {
5263 			hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5264 			hclge_fd_free_node(hdev, old_rule);
5265 			return;
5266 		}
5267 		old_rule->state = HCLGE_FD_TO_DEL;
5268 		break;
5269 	}
5270 }
5271 
5272 static struct hclge_fd_rule *hclge_find_fd_rule(struct hlist_head *hlist,
5273 						u16 location,
5274 						struct hclge_fd_rule **parent)
5275 {
5276 	struct hclge_fd_rule *rule;
5277 	struct hlist_node *node;
5278 
5279 	hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
5280 		if (rule->location == location)
5281 			return rule;
5282 		else if (rule->location > location)
5283 			return NULL;
5284 		/* record the parent node, use to keep the nodes in fd_rule_list
5285 		 * in ascend order.
5286 		 */
5287 		*parent = rule;
5288 	}
5289 
5290 	return NULL;
5291 }
5292 
5293 /* insert fd rule node in ascend order according to rule->location */
5294 static void hclge_fd_insert_rule_node(struct hlist_head *hlist,
5295 				      struct hclge_fd_rule *rule,
5296 				      struct hclge_fd_rule *parent)
5297 {
5298 	INIT_HLIST_NODE(&rule->rule_node);
5299 
5300 	if (parent)
5301 		hlist_add_behind(&rule->rule_node, &parent->rule_node);
5302 	else
5303 		hlist_add_head(&rule->rule_node, hlist);
5304 }
5305 
5306 static int hclge_fd_set_user_def_cmd(struct hclge_dev *hdev,
5307 				     struct hclge_fd_user_def_cfg *cfg)
5308 {
5309 	struct hclge_fd_user_def_cfg_cmd *req;
5310 	struct hclge_desc desc;
5311 	u16 data = 0;
5312 	int ret;
5313 
5314 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_USER_DEF_OP, false);
5315 
5316 	req = (struct hclge_fd_user_def_cfg_cmd *)desc.data;
5317 
5318 	hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[0].ref_cnt > 0);
5319 	hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5320 			HCLGE_FD_USER_DEF_OFT_S, cfg[0].offset);
5321 	req->ol2_cfg = cpu_to_le16(data);
5322 
5323 	data = 0;
5324 	hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[1].ref_cnt > 0);
5325 	hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5326 			HCLGE_FD_USER_DEF_OFT_S, cfg[1].offset);
5327 	req->ol3_cfg = cpu_to_le16(data);
5328 
5329 	data = 0;
5330 	hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[2].ref_cnt > 0);
5331 	hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5332 			HCLGE_FD_USER_DEF_OFT_S, cfg[2].offset);
5333 	req->ol4_cfg = cpu_to_le16(data);
5334 
5335 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5336 	if (ret)
5337 		dev_err(&hdev->pdev->dev,
5338 			"failed to set fd user def data, ret= %d\n", ret);
5339 	return ret;
5340 }
5341 
5342 static void hclge_sync_fd_user_def_cfg(struct hclge_dev *hdev, bool locked)
5343 {
5344 	int ret;
5345 
5346 	if (!test_and_clear_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state))
5347 		return;
5348 
5349 	if (!locked)
5350 		spin_lock_bh(&hdev->fd_rule_lock);
5351 
5352 	ret = hclge_fd_set_user_def_cmd(hdev, hdev->fd_cfg.user_def_cfg);
5353 	if (ret)
5354 		set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5355 
5356 	if (!locked)
5357 		spin_unlock_bh(&hdev->fd_rule_lock);
5358 }
5359 
5360 static int hclge_fd_check_user_def_refcnt(struct hclge_dev *hdev,
5361 					  struct hclge_fd_rule *rule)
5362 {
5363 	struct hlist_head *hlist = &hdev->fd_rule_list;
5364 	struct hclge_fd_rule *fd_rule, *parent = NULL;
5365 	struct hclge_fd_user_def_info *info, *old_info;
5366 	struct hclge_fd_user_def_cfg *cfg;
5367 
5368 	if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5369 	    rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5370 		return 0;
5371 
5372 	/* for valid layer is start from 1, so need minus 1 to get the cfg */
5373 	cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5374 	info = &rule->ep.user_def;
5375 
5376 	if (!cfg->ref_cnt || cfg->offset == info->offset)
5377 		return 0;
5378 
5379 	if (cfg->ref_cnt > 1)
5380 		goto error;
5381 
5382 	fd_rule = hclge_find_fd_rule(hlist, rule->location, &parent);
5383 	if (fd_rule) {
5384 		old_info = &fd_rule->ep.user_def;
5385 		if (info->layer == old_info->layer)
5386 			return 0;
5387 	}
5388 
5389 error:
5390 	dev_err(&hdev->pdev->dev,
5391 		"No available offset for layer%d fd rule, each layer only support one user def offset.\n",
5392 		info->layer + 1);
5393 	return -ENOSPC;
5394 }
5395 
5396 static void hclge_fd_inc_user_def_refcnt(struct hclge_dev *hdev,
5397 					 struct hclge_fd_rule *rule)
5398 {
5399 	struct hclge_fd_user_def_cfg *cfg;
5400 
5401 	if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5402 	    rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5403 		return;
5404 
5405 	cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5406 	if (!cfg->ref_cnt) {
5407 		cfg->offset = rule->ep.user_def.offset;
5408 		set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5409 	}
5410 	cfg->ref_cnt++;
5411 }
5412 
5413 static void hclge_fd_dec_user_def_refcnt(struct hclge_dev *hdev,
5414 					 struct hclge_fd_rule *rule)
5415 {
5416 	struct hclge_fd_user_def_cfg *cfg;
5417 
5418 	if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5419 	    rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5420 		return;
5421 
5422 	cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5423 	if (!cfg->ref_cnt)
5424 		return;
5425 
5426 	cfg->ref_cnt--;
5427 	if (!cfg->ref_cnt) {
5428 		cfg->offset = 0;
5429 		set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5430 	}
5431 }
5432 
5433 static void hclge_update_fd_list(struct hclge_dev *hdev,
5434 				 enum HCLGE_FD_NODE_STATE state, u16 location,
5435 				 struct hclge_fd_rule *new_rule)
5436 {
5437 	struct hlist_head *hlist = &hdev->fd_rule_list;
5438 	struct hclge_fd_rule *fd_rule, *parent = NULL;
5439 
5440 	fd_rule = hclge_find_fd_rule(hlist, location, &parent);
5441 	if (fd_rule) {
5442 		hclge_fd_dec_user_def_refcnt(hdev, fd_rule);
5443 		if (state == HCLGE_FD_ACTIVE)
5444 			hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5445 		hclge_sync_fd_user_def_cfg(hdev, true);
5446 
5447 		hclge_update_fd_rule_node(hdev, fd_rule, new_rule, state);
5448 		return;
5449 	}
5450 
5451 	/* it's unlikely to fail here, because we have checked the rule
5452 	 * exist before.
5453 	 */
5454 	if (unlikely(state == HCLGE_FD_TO_DEL || state == HCLGE_FD_DELETED)) {
5455 		dev_warn(&hdev->pdev->dev,
5456 			 "failed to delete fd rule %u, it's inexistent\n",
5457 			 location);
5458 		return;
5459 	}
5460 
5461 	hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5462 	hclge_sync_fd_user_def_cfg(hdev, true);
5463 
5464 	hclge_fd_insert_rule_node(hlist, new_rule, parent);
5465 	hclge_fd_inc_rule_cnt(hdev, new_rule->location);
5466 
5467 	if (state == HCLGE_FD_TO_ADD) {
5468 		set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
5469 		hclge_task_schedule(hdev, 0);
5470 	}
5471 }
5472 
5473 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
5474 {
5475 	struct hclge_get_fd_mode_cmd *req;
5476 	struct hclge_desc desc;
5477 	int ret;
5478 
5479 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
5480 
5481 	req = (struct hclge_get_fd_mode_cmd *)desc.data;
5482 
5483 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5484 	if (ret) {
5485 		dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
5486 		return ret;
5487 	}
5488 
5489 	*fd_mode = req->mode;
5490 
5491 	return ret;
5492 }
5493 
5494 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
5495 				   u32 *stage1_entry_num,
5496 				   u32 *stage2_entry_num,
5497 				   u16 *stage1_counter_num,
5498 				   u16 *stage2_counter_num)
5499 {
5500 	struct hclge_get_fd_allocation_cmd *req;
5501 	struct hclge_desc desc;
5502 	int ret;
5503 
5504 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
5505 
5506 	req = (struct hclge_get_fd_allocation_cmd *)desc.data;
5507 
5508 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5509 	if (ret) {
5510 		dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
5511 			ret);
5512 		return ret;
5513 	}
5514 
5515 	*stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
5516 	*stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
5517 	*stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
5518 	*stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
5519 
5520 	return ret;
5521 }
5522 
5523 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
5524 				   enum HCLGE_FD_STAGE stage_num)
5525 {
5526 	struct hclge_set_fd_key_config_cmd *req;
5527 	struct hclge_fd_key_cfg *stage;
5528 	struct hclge_desc desc;
5529 	int ret;
5530 
5531 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
5532 
5533 	req = (struct hclge_set_fd_key_config_cmd *)desc.data;
5534 	stage = &hdev->fd_cfg.key_cfg[stage_num];
5535 	req->stage = stage_num;
5536 	req->key_select = stage->key_sel;
5537 	req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
5538 	req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
5539 	req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
5540 	req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
5541 	req->tuple_mask = cpu_to_le32(~stage->tuple_active);
5542 	req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
5543 
5544 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5545 	if (ret)
5546 		dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
5547 
5548 	return ret;
5549 }
5550 
5551 static void hclge_fd_disable_user_def(struct hclge_dev *hdev)
5552 {
5553 	struct hclge_fd_user_def_cfg *cfg = hdev->fd_cfg.user_def_cfg;
5554 
5555 	spin_lock_bh(&hdev->fd_rule_lock);
5556 	memset(cfg, 0, sizeof(hdev->fd_cfg.user_def_cfg));
5557 	spin_unlock_bh(&hdev->fd_rule_lock);
5558 
5559 	hclge_fd_set_user_def_cmd(hdev, cfg);
5560 }
5561 
5562 static int hclge_init_fd_config(struct hclge_dev *hdev)
5563 {
5564 #define LOW_2_WORDS		0x03
5565 	struct hclge_fd_key_cfg *key_cfg;
5566 	int ret;
5567 
5568 	if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
5569 		return 0;
5570 
5571 	ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
5572 	if (ret)
5573 		return ret;
5574 
5575 	switch (hdev->fd_cfg.fd_mode) {
5576 	case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
5577 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
5578 		break;
5579 	case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
5580 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
5581 		break;
5582 	default:
5583 		dev_err(&hdev->pdev->dev,
5584 			"Unsupported flow director mode %u\n",
5585 			hdev->fd_cfg.fd_mode);
5586 		return -EOPNOTSUPP;
5587 	}
5588 
5589 	key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
5590 	key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE;
5591 	key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
5592 	key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
5593 	key_cfg->outer_sipv6_word_en = 0;
5594 	key_cfg->outer_dipv6_word_en = 0;
5595 
5596 	key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
5597 				BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
5598 				BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5599 				BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5600 
5601 	/* If use max 400bit key, we can support tuples for ether type */
5602 	if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5603 		key_cfg->tuple_active |=
5604 				BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
5605 		if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
5606 			key_cfg->tuple_active |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
5607 	}
5608 
5609 	/* roce_type is used to filter roce frames
5610 	 * dst_vport is used to specify the rule
5611 	 */
5612 	key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5613 
5614 	ret = hclge_get_fd_allocation(hdev,
5615 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5616 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5617 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5618 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5619 	if (ret)
5620 		return ret;
5621 
5622 	return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5623 }
5624 
5625 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5626 				int loc, u8 *key, bool is_add)
5627 {
5628 	struct hclge_fd_tcam_config_1_cmd *req1;
5629 	struct hclge_fd_tcam_config_2_cmd *req2;
5630 	struct hclge_fd_tcam_config_3_cmd *req3;
5631 	struct hclge_desc desc[3];
5632 	int ret;
5633 
5634 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5635 	desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
5636 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5637 	desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
5638 	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5639 
5640 	req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5641 	req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5642 	req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5643 
5644 	req1->stage = stage;
5645 	req1->xy_sel = sel_x ? 1 : 0;
5646 	hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5647 	req1->index = cpu_to_le32(loc);
5648 	req1->entry_vld = sel_x ? is_add : 0;
5649 
5650 	if (key) {
5651 		memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5652 		memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5653 		       sizeof(req2->tcam_data));
5654 		memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5655 		       sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5656 	}
5657 
5658 	ret = hclge_cmd_send(&hdev->hw, desc, 3);
5659 	if (ret)
5660 		dev_err(&hdev->pdev->dev,
5661 			"config tcam key fail, ret=%d\n",
5662 			ret);
5663 
5664 	return ret;
5665 }
5666 
5667 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5668 			      struct hclge_fd_ad_data *action)
5669 {
5670 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
5671 	struct hclge_fd_ad_config_cmd *req;
5672 	struct hclge_desc desc;
5673 	u64 ad_data = 0;
5674 	int ret;
5675 
5676 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5677 
5678 	req = (struct hclge_fd_ad_config_cmd *)desc.data;
5679 	req->index = cpu_to_le32(loc);
5680 	req->stage = stage;
5681 
5682 	hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5683 		      action->write_rule_id_to_bd);
5684 	hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5685 			action->rule_id);
5686 	if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) {
5687 		hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B,
5688 			      action->override_tc);
5689 		hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M,
5690 				HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size);
5691 	}
5692 	ad_data <<= 32;
5693 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5694 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5695 		      action->forward_to_direct_queue);
5696 	hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5697 			action->queue_id);
5698 	hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5699 	hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5700 			HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5701 	hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5702 	hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5703 			action->counter_id);
5704 
5705 	req->ad_data = cpu_to_le64(ad_data);
5706 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5707 	if (ret)
5708 		dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5709 
5710 	return ret;
5711 }
5712 
5713 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5714 				   struct hclge_fd_rule *rule)
5715 {
5716 	int offset, moffset, ip_offset;
5717 	enum HCLGE_FD_KEY_OPT key_opt;
5718 	u16 tmp_x_s, tmp_y_s;
5719 	u32 tmp_x_l, tmp_y_l;
5720 	u8 *p = (u8 *)rule;
5721 	int i;
5722 
5723 	if (rule->unused_tuple & BIT(tuple_bit))
5724 		return true;
5725 
5726 	key_opt = tuple_key_info[tuple_bit].key_opt;
5727 	offset = tuple_key_info[tuple_bit].offset;
5728 	moffset = tuple_key_info[tuple_bit].moffset;
5729 
5730 	switch (key_opt) {
5731 	case KEY_OPT_U8:
5732 		calc_x(*key_x, p[offset], p[moffset]);
5733 		calc_y(*key_y, p[offset], p[moffset]);
5734 
5735 		return true;
5736 	case KEY_OPT_LE16:
5737 		calc_x(tmp_x_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5738 		calc_y(tmp_y_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5739 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5740 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5741 
5742 		return true;
5743 	case KEY_OPT_LE32:
5744 		calc_x(tmp_x_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5745 		calc_y(tmp_y_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5746 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5747 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5748 
5749 		return true;
5750 	case KEY_OPT_MAC:
5751 		for (i = 0; i < ETH_ALEN; i++) {
5752 			calc_x(key_x[ETH_ALEN - 1 - i], p[offset + i],
5753 			       p[moffset + i]);
5754 			calc_y(key_y[ETH_ALEN - 1 - i], p[offset + i],
5755 			       p[moffset + i]);
5756 		}
5757 
5758 		return true;
5759 	case KEY_OPT_IP:
5760 		ip_offset = IPV4_INDEX * sizeof(u32);
5761 		calc_x(tmp_x_l, *(u32 *)(&p[offset + ip_offset]),
5762 		       *(u32 *)(&p[moffset + ip_offset]));
5763 		calc_y(tmp_y_l, *(u32 *)(&p[offset + ip_offset]),
5764 		       *(u32 *)(&p[moffset + ip_offset]));
5765 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5766 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5767 
5768 		return true;
5769 	default:
5770 		return false;
5771 	}
5772 }
5773 
5774 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5775 				 u8 vf_id, u8 network_port_id)
5776 {
5777 	u32 port_number = 0;
5778 
5779 	if (port_type == HOST_PORT) {
5780 		hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5781 				pf_id);
5782 		hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5783 				vf_id);
5784 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5785 	} else {
5786 		hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5787 				HCLGE_NETWORK_PORT_ID_S, network_port_id);
5788 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5789 	}
5790 
5791 	return port_number;
5792 }
5793 
5794 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5795 				       __le32 *key_x, __le32 *key_y,
5796 				       struct hclge_fd_rule *rule)
5797 {
5798 	u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5799 	u8 cur_pos = 0, tuple_size, shift_bits;
5800 	unsigned int i;
5801 
5802 	for (i = 0; i < MAX_META_DATA; i++) {
5803 		tuple_size = meta_data_key_info[i].key_length;
5804 		tuple_bit = key_cfg->meta_data_active & BIT(i);
5805 
5806 		switch (tuple_bit) {
5807 		case BIT(ROCE_TYPE):
5808 			hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5809 			cur_pos += tuple_size;
5810 			break;
5811 		case BIT(DST_VPORT):
5812 			port_number = hclge_get_port_number(HOST_PORT, 0,
5813 							    rule->vf_id, 0);
5814 			hnae3_set_field(meta_data,
5815 					GENMASK(cur_pos + tuple_size, cur_pos),
5816 					cur_pos, port_number);
5817 			cur_pos += tuple_size;
5818 			break;
5819 		default:
5820 			break;
5821 		}
5822 	}
5823 
5824 	calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5825 	calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5826 	shift_bits = sizeof(meta_data) * 8 - cur_pos;
5827 
5828 	*key_x = cpu_to_le32(tmp_x << shift_bits);
5829 	*key_y = cpu_to_le32(tmp_y << shift_bits);
5830 }
5831 
5832 /* A complete key is combined with meta data key and tuple key.
5833  * Meta data key is stored at the MSB region, and tuple key is stored at
5834  * the LSB region, unused bits will be filled 0.
5835  */
5836 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5837 			    struct hclge_fd_rule *rule)
5838 {
5839 	struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5840 	u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5841 	u8 *cur_key_x, *cur_key_y;
5842 	u8 meta_data_region;
5843 	u8 tuple_size;
5844 	int ret;
5845 	u32 i;
5846 
5847 	memset(key_x, 0, sizeof(key_x));
5848 	memset(key_y, 0, sizeof(key_y));
5849 	cur_key_x = key_x;
5850 	cur_key_y = key_y;
5851 
5852 	for (i = 0; i < MAX_TUPLE; i++) {
5853 		bool tuple_valid;
5854 
5855 		tuple_size = tuple_key_info[i].key_length / 8;
5856 		if (!(key_cfg->tuple_active & BIT(i)))
5857 			continue;
5858 
5859 		tuple_valid = hclge_fd_convert_tuple(i, cur_key_x,
5860 						     cur_key_y, rule);
5861 		if (tuple_valid) {
5862 			cur_key_x += tuple_size;
5863 			cur_key_y += tuple_size;
5864 		}
5865 	}
5866 
5867 	meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5868 			MAX_META_DATA_LENGTH / 8;
5869 
5870 	hclge_fd_convert_meta_data(key_cfg,
5871 				   (__le32 *)(key_x + meta_data_region),
5872 				   (__le32 *)(key_y + meta_data_region),
5873 				   rule);
5874 
5875 	ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5876 				   true);
5877 	if (ret) {
5878 		dev_err(&hdev->pdev->dev,
5879 			"fd key_y config fail, loc=%u, ret=%d\n",
5880 			rule->queue_id, ret);
5881 		return ret;
5882 	}
5883 
5884 	ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5885 				   true);
5886 	if (ret)
5887 		dev_err(&hdev->pdev->dev,
5888 			"fd key_x config fail, loc=%u, ret=%d\n",
5889 			rule->queue_id, ret);
5890 	return ret;
5891 }
5892 
5893 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5894 			       struct hclge_fd_rule *rule)
5895 {
5896 	struct hclge_vport *vport = hdev->vport;
5897 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
5898 	struct hclge_fd_ad_data ad_data;
5899 
5900 	memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data));
5901 	ad_data.ad_id = rule->location;
5902 
5903 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5904 		ad_data.drop_packet = true;
5905 	} else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) {
5906 		ad_data.override_tc = true;
5907 		ad_data.queue_id =
5908 			kinfo->tc_info.tqp_offset[rule->cls_flower.tc];
5909 		ad_data.tc_size =
5910 			ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]);
5911 	} else {
5912 		ad_data.forward_to_direct_queue = true;
5913 		ad_data.queue_id = rule->queue_id;
5914 	}
5915 
5916 	if (hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1]) {
5917 		ad_data.use_counter = true;
5918 		ad_data.counter_id = rule->vf_id %
5919 				     hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1];
5920 	} else {
5921 		ad_data.use_counter = false;
5922 		ad_data.counter_id = 0;
5923 	}
5924 
5925 	ad_data.use_next_stage = false;
5926 	ad_data.next_input_key = 0;
5927 
5928 	ad_data.write_rule_id_to_bd = true;
5929 	ad_data.rule_id = rule->location;
5930 
5931 	return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5932 }
5933 
5934 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
5935 				       u32 *unused_tuple)
5936 {
5937 	if (!spec || !unused_tuple)
5938 		return -EINVAL;
5939 
5940 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5941 
5942 	if (!spec->ip4src)
5943 		*unused_tuple |= BIT(INNER_SRC_IP);
5944 
5945 	if (!spec->ip4dst)
5946 		*unused_tuple |= BIT(INNER_DST_IP);
5947 
5948 	if (!spec->psrc)
5949 		*unused_tuple |= BIT(INNER_SRC_PORT);
5950 
5951 	if (!spec->pdst)
5952 		*unused_tuple |= BIT(INNER_DST_PORT);
5953 
5954 	if (!spec->tos)
5955 		*unused_tuple |= BIT(INNER_IP_TOS);
5956 
5957 	return 0;
5958 }
5959 
5960 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
5961 				    u32 *unused_tuple)
5962 {
5963 	if (!spec || !unused_tuple)
5964 		return -EINVAL;
5965 
5966 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5967 		BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5968 
5969 	if (!spec->ip4src)
5970 		*unused_tuple |= BIT(INNER_SRC_IP);
5971 
5972 	if (!spec->ip4dst)
5973 		*unused_tuple |= BIT(INNER_DST_IP);
5974 
5975 	if (!spec->tos)
5976 		*unused_tuple |= BIT(INNER_IP_TOS);
5977 
5978 	if (!spec->proto)
5979 		*unused_tuple |= BIT(INNER_IP_PROTO);
5980 
5981 	if (spec->l4_4_bytes)
5982 		return -EOPNOTSUPP;
5983 
5984 	if (spec->ip_ver != ETH_RX_NFC_IP4)
5985 		return -EOPNOTSUPP;
5986 
5987 	return 0;
5988 }
5989 
5990 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
5991 				       u32 *unused_tuple)
5992 {
5993 	if (!spec || !unused_tuple)
5994 		return -EINVAL;
5995 
5996 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5997 
5998 	/* check whether src/dst ip address used */
5999 	if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
6000 		*unused_tuple |= BIT(INNER_SRC_IP);
6001 
6002 	if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
6003 		*unused_tuple |= BIT(INNER_DST_IP);
6004 
6005 	if (!spec->psrc)
6006 		*unused_tuple |= BIT(INNER_SRC_PORT);
6007 
6008 	if (!spec->pdst)
6009 		*unused_tuple |= BIT(INNER_DST_PORT);
6010 
6011 	if (!spec->tclass)
6012 		*unused_tuple |= BIT(INNER_IP_TOS);
6013 
6014 	return 0;
6015 }
6016 
6017 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
6018 				    u32 *unused_tuple)
6019 {
6020 	if (!spec || !unused_tuple)
6021 		return -EINVAL;
6022 
6023 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6024 			BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
6025 
6026 	/* check whether src/dst ip address used */
6027 	if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
6028 		*unused_tuple |= BIT(INNER_SRC_IP);
6029 
6030 	if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
6031 		*unused_tuple |= BIT(INNER_DST_IP);
6032 
6033 	if (!spec->l4_proto)
6034 		*unused_tuple |= BIT(INNER_IP_PROTO);
6035 
6036 	if (!spec->tclass)
6037 		*unused_tuple |= BIT(INNER_IP_TOS);
6038 
6039 	if (spec->l4_4_bytes)
6040 		return -EOPNOTSUPP;
6041 
6042 	return 0;
6043 }
6044 
6045 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
6046 {
6047 	if (!spec || !unused_tuple)
6048 		return -EINVAL;
6049 
6050 	*unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
6051 		BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
6052 		BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
6053 
6054 	if (is_zero_ether_addr(spec->h_source))
6055 		*unused_tuple |= BIT(INNER_SRC_MAC);
6056 
6057 	if (is_zero_ether_addr(spec->h_dest))
6058 		*unused_tuple |= BIT(INNER_DST_MAC);
6059 
6060 	if (!spec->h_proto)
6061 		*unused_tuple |= BIT(INNER_ETH_TYPE);
6062 
6063 	return 0;
6064 }
6065 
6066 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
6067 				    struct ethtool_rx_flow_spec *fs,
6068 				    u32 *unused_tuple)
6069 {
6070 	if (fs->flow_type & FLOW_EXT) {
6071 		if (fs->h_ext.vlan_etype) {
6072 			dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
6073 			return -EOPNOTSUPP;
6074 		}
6075 
6076 		if (!fs->h_ext.vlan_tci)
6077 			*unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6078 
6079 		if (fs->m_ext.vlan_tci &&
6080 		    be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
6081 			dev_err(&hdev->pdev->dev,
6082 				"failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n",
6083 				ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
6084 			return -EINVAL;
6085 		}
6086 	} else {
6087 		*unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6088 	}
6089 
6090 	if (fs->flow_type & FLOW_MAC_EXT) {
6091 		if (hdev->fd_cfg.fd_mode !=
6092 		    HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6093 			dev_err(&hdev->pdev->dev,
6094 				"FLOW_MAC_EXT is not supported in current fd mode!\n");
6095 			return -EOPNOTSUPP;
6096 		}
6097 
6098 		if (is_zero_ether_addr(fs->h_ext.h_dest))
6099 			*unused_tuple |= BIT(INNER_DST_MAC);
6100 		else
6101 			*unused_tuple &= ~BIT(INNER_DST_MAC);
6102 	}
6103 
6104 	return 0;
6105 }
6106 
6107 static int hclge_fd_get_user_def_layer(u32 flow_type, u32 *unused_tuple,
6108 				       struct hclge_fd_user_def_info *info)
6109 {
6110 	switch (flow_type) {
6111 	case ETHER_FLOW:
6112 		info->layer = HCLGE_FD_USER_DEF_L2;
6113 		*unused_tuple &= ~BIT(INNER_L2_RSV);
6114 		break;
6115 	case IP_USER_FLOW:
6116 	case IPV6_USER_FLOW:
6117 		info->layer = HCLGE_FD_USER_DEF_L3;
6118 		*unused_tuple &= ~BIT(INNER_L3_RSV);
6119 		break;
6120 	case TCP_V4_FLOW:
6121 	case UDP_V4_FLOW:
6122 	case TCP_V6_FLOW:
6123 	case UDP_V6_FLOW:
6124 		info->layer = HCLGE_FD_USER_DEF_L4;
6125 		*unused_tuple &= ~BIT(INNER_L4_RSV);
6126 		break;
6127 	default:
6128 		return -EOPNOTSUPP;
6129 	}
6130 
6131 	return 0;
6132 }
6133 
6134 static bool hclge_fd_is_user_def_all_masked(struct ethtool_rx_flow_spec *fs)
6135 {
6136 	return be32_to_cpu(fs->m_ext.data[1] | fs->m_ext.data[0]) == 0;
6137 }
6138 
6139 static int hclge_fd_parse_user_def_field(struct hclge_dev *hdev,
6140 					 struct ethtool_rx_flow_spec *fs,
6141 					 u32 *unused_tuple,
6142 					 struct hclge_fd_user_def_info *info)
6143 {
6144 	u32 tuple_active = hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1].tuple_active;
6145 	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6146 	u16 data, offset, data_mask, offset_mask;
6147 	int ret;
6148 
6149 	info->layer = HCLGE_FD_USER_DEF_NONE;
6150 	*unused_tuple |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
6151 
6152 	if (!(fs->flow_type & FLOW_EXT) || hclge_fd_is_user_def_all_masked(fs))
6153 		return 0;
6154 
6155 	/* user-def data from ethtool is 64 bit value, the bit0~15 is used
6156 	 * for data, and bit32~47 is used for offset.
6157 	 */
6158 	data = be32_to_cpu(fs->h_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6159 	data_mask = be32_to_cpu(fs->m_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6160 	offset = be32_to_cpu(fs->h_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6161 	offset_mask = be32_to_cpu(fs->m_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6162 
6163 	if (!(tuple_active & HCLGE_FD_TUPLE_USER_DEF_TUPLES)) {
6164 		dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
6165 		return -EOPNOTSUPP;
6166 	}
6167 
6168 	if (offset > HCLGE_FD_MAX_USER_DEF_OFFSET) {
6169 		dev_err(&hdev->pdev->dev,
6170 			"user-def offset[%u] should be no more than %u\n",
6171 			offset, HCLGE_FD_MAX_USER_DEF_OFFSET);
6172 		return -EINVAL;
6173 	}
6174 
6175 	if (offset_mask != HCLGE_FD_USER_DEF_OFFSET_UNMASK) {
6176 		dev_err(&hdev->pdev->dev, "user-def offset can't be masked\n");
6177 		return -EINVAL;
6178 	}
6179 
6180 	ret = hclge_fd_get_user_def_layer(flow_type, unused_tuple, info);
6181 	if (ret) {
6182 		dev_err(&hdev->pdev->dev,
6183 			"unsupported flow type for user-def bytes, ret = %d\n",
6184 			ret);
6185 		return ret;
6186 	}
6187 
6188 	info->data = data;
6189 	info->data_mask = data_mask;
6190 	info->offset = offset;
6191 
6192 	return 0;
6193 }
6194 
6195 static int hclge_fd_check_spec(struct hclge_dev *hdev,
6196 			       struct ethtool_rx_flow_spec *fs,
6197 			       u32 *unused_tuple,
6198 			       struct hclge_fd_user_def_info *info)
6199 {
6200 	u32 flow_type;
6201 	int ret;
6202 
6203 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6204 		dev_err(&hdev->pdev->dev,
6205 			"failed to config fd rules, invalid rule location: %u, max is %u\n.",
6206 			fs->location,
6207 			hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
6208 		return -EINVAL;
6209 	}
6210 
6211 	ret = hclge_fd_parse_user_def_field(hdev, fs, unused_tuple, info);
6212 	if (ret)
6213 		return ret;
6214 
6215 	flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6216 	switch (flow_type) {
6217 	case SCTP_V4_FLOW:
6218 	case TCP_V4_FLOW:
6219 	case UDP_V4_FLOW:
6220 		ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
6221 						  unused_tuple);
6222 		break;
6223 	case IP_USER_FLOW:
6224 		ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
6225 					       unused_tuple);
6226 		break;
6227 	case SCTP_V6_FLOW:
6228 	case TCP_V6_FLOW:
6229 	case UDP_V6_FLOW:
6230 		ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
6231 						  unused_tuple);
6232 		break;
6233 	case IPV6_USER_FLOW:
6234 		ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
6235 					       unused_tuple);
6236 		break;
6237 	case ETHER_FLOW:
6238 		if (hdev->fd_cfg.fd_mode !=
6239 			HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6240 			dev_err(&hdev->pdev->dev,
6241 				"ETHER_FLOW is not supported in current fd mode!\n");
6242 			return -EOPNOTSUPP;
6243 		}
6244 
6245 		ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
6246 						 unused_tuple);
6247 		break;
6248 	default:
6249 		dev_err(&hdev->pdev->dev,
6250 			"unsupported protocol type, protocol type = %#x\n",
6251 			flow_type);
6252 		return -EOPNOTSUPP;
6253 	}
6254 
6255 	if (ret) {
6256 		dev_err(&hdev->pdev->dev,
6257 			"failed to check flow union tuple, ret = %d\n",
6258 			ret);
6259 		return ret;
6260 	}
6261 
6262 	return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
6263 }
6264 
6265 static void hclge_fd_get_tcpip4_tuple(struct ethtool_rx_flow_spec *fs,
6266 				      struct hclge_fd_rule *rule, u8 ip_proto)
6267 {
6268 	rule->tuples.src_ip[IPV4_INDEX] =
6269 			be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
6270 	rule->tuples_mask.src_ip[IPV4_INDEX] =
6271 			be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
6272 
6273 	rule->tuples.dst_ip[IPV4_INDEX] =
6274 			be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
6275 	rule->tuples_mask.dst_ip[IPV4_INDEX] =
6276 			be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
6277 
6278 	rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
6279 	rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
6280 
6281 	rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
6282 	rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
6283 
6284 	rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
6285 	rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
6286 
6287 	rule->tuples.ether_proto = ETH_P_IP;
6288 	rule->tuples_mask.ether_proto = 0xFFFF;
6289 
6290 	rule->tuples.ip_proto = ip_proto;
6291 	rule->tuples_mask.ip_proto = 0xFF;
6292 }
6293 
6294 static void hclge_fd_get_ip4_tuple(struct ethtool_rx_flow_spec *fs,
6295 				   struct hclge_fd_rule *rule)
6296 {
6297 	rule->tuples.src_ip[IPV4_INDEX] =
6298 			be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
6299 	rule->tuples_mask.src_ip[IPV4_INDEX] =
6300 			be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
6301 
6302 	rule->tuples.dst_ip[IPV4_INDEX] =
6303 			be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
6304 	rule->tuples_mask.dst_ip[IPV4_INDEX] =
6305 			be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
6306 
6307 	rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
6308 	rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
6309 
6310 	rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
6311 	rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
6312 
6313 	rule->tuples.ether_proto = ETH_P_IP;
6314 	rule->tuples_mask.ether_proto = 0xFFFF;
6315 }
6316 
6317 static void hclge_fd_get_tcpip6_tuple(struct ethtool_rx_flow_spec *fs,
6318 				      struct hclge_fd_rule *rule, u8 ip_proto)
6319 {
6320 	ipv6_addr_be32_to_cpu(rule->tuples.src_ip,
6321 			      fs->h_u.tcp_ip6_spec.ip6src);
6322 	ipv6_addr_be32_to_cpu(rule->tuples_mask.src_ip,
6323 			      fs->m_u.tcp_ip6_spec.ip6src);
6324 
6325 	ipv6_addr_be32_to_cpu(rule->tuples.dst_ip,
6326 			      fs->h_u.tcp_ip6_spec.ip6dst);
6327 	ipv6_addr_be32_to_cpu(rule->tuples_mask.dst_ip,
6328 			      fs->m_u.tcp_ip6_spec.ip6dst);
6329 
6330 	rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
6331 	rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
6332 
6333 	rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
6334 	rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
6335 
6336 	rule->tuples.ether_proto = ETH_P_IPV6;
6337 	rule->tuples_mask.ether_proto = 0xFFFF;
6338 
6339 	rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6340 	rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6341 
6342 	rule->tuples.ip_proto = ip_proto;
6343 	rule->tuples_mask.ip_proto = 0xFF;
6344 }
6345 
6346 static void hclge_fd_get_ip6_tuple(struct ethtool_rx_flow_spec *fs,
6347 				   struct hclge_fd_rule *rule)
6348 {
6349 	ipv6_addr_be32_to_cpu(rule->tuples.src_ip,
6350 			      fs->h_u.usr_ip6_spec.ip6src);
6351 	ipv6_addr_be32_to_cpu(rule->tuples_mask.src_ip,
6352 			      fs->m_u.usr_ip6_spec.ip6src);
6353 
6354 	ipv6_addr_be32_to_cpu(rule->tuples.dst_ip,
6355 			      fs->h_u.usr_ip6_spec.ip6dst);
6356 	ipv6_addr_be32_to_cpu(rule->tuples_mask.dst_ip,
6357 			      fs->m_u.usr_ip6_spec.ip6dst);
6358 
6359 	rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
6360 	rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
6361 
6362 	rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6363 	rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6364 
6365 	rule->tuples.ether_proto = ETH_P_IPV6;
6366 	rule->tuples_mask.ether_proto = 0xFFFF;
6367 }
6368 
6369 static void hclge_fd_get_ether_tuple(struct ethtool_rx_flow_spec *fs,
6370 				     struct hclge_fd_rule *rule)
6371 {
6372 	ether_addr_copy(rule->tuples.src_mac, fs->h_u.ether_spec.h_source);
6373 	ether_addr_copy(rule->tuples_mask.src_mac, fs->m_u.ether_spec.h_source);
6374 
6375 	ether_addr_copy(rule->tuples.dst_mac, fs->h_u.ether_spec.h_dest);
6376 	ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_u.ether_spec.h_dest);
6377 
6378 	rule->tuples.ether_proto = be16_to_cpu(fs->h_u.ether_spec.h_proto);
6379 	rule->tuples_mask.ether_proto = be16_to_cpu(fs->m_u.ether_spec.h_proto);
6380 }
6381 
6382 static void hclge_fd_get_user_def_tuple(struct hclge_fd_user_def_info *info,
6383 					struct hclge_fd_rule *rule)
6384 {
6385 	switch (info->layer) {
6386 	case HCLGE_FD_USER_DEF_L2:
6387 		rule->tuples.l2_user_def = info->data;
6388 		rule->tuples_mask.l2_user_def = info->data_mask;
6389 		break;
6390 	case HCLGE_FD_USER_DEF_L3:
6391 		rule->tuples.l3_user_def = info->data;
6392 		rule->tuples_mask.l3_user_def = info->data_mask;
6393 		break;
6394 	case HCLGE_FD_USER_DEF_L4:
6395 		rule->tuples.l4_user_def = (u32)info->data << 16;
6396 		rule->tuples_mask.l4_user_def = (u32)info->data_mask << 16;
6397 		break;
6398 	default:
6399 		break;
6400 	}
6401 
6402 	rule->ep.user_def = *info;
6403 }
6404 
6405 static int hclge_fd_get_tuple(struct ethtool_rx_flow_spec *fs,
6406 			      struct hclge_fd_rule *rule,
6407 			      struct hclge_fd_user_def_info *info)
6408 {
6409 	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6410 
6411 	switch (flow_type) {
6412 	case SCTP_V4_FLOW:
6413 		hclge_fd_get_tcpip4_tuple(fs, rule, IPPROTO_SCTP);
6414 		break;
6415 	case TCP_V4_FLOW:
6416 		hclge_fd_get_tcpip4_tuple(fs, rule, IPPROTO_TCP);
6417 		break;
6418 	case UDP_V4_FLOW:
6419 		hclge_fd_get_tcpip4_tuple(fs, rule, IPPROTO_UDP);
6420 		break;
6421 	case IP_USER_FLOW:
6422 		hclge_fd_get_ip4_tuple(fs, rule);
6423 		break;
6424 	case SCTP_V6_FLOW:
6425 		hclge_fd_get_tcpip6_tuple(fs, rule, IPPROTO_SCTP);
6426 		break;
6427 	case TCP_V6_FLOW:
6428 		hclge_fd_get_tcpip6_tuple(fs, rule, IPPROTO_TCP);
6429 		break;
6430 	case UDP_V6_FLOW:
6431 		hclge_fd_get_tcpip6_tuple(fs, rule, IPPROTO_UDP);
6432 		break;
6433 	case IPV6_USER_FLOW:
6434 		hclge_fd_get_ip6_tuple(fs, rule);
6435 		break;
6436 	case ETHER_FLOW:
6437 		hclge_fd_get_ether_tuple(fs, rule);
6438 		break;
6439 	default:
6440 		return -EOPNOTSUPP;
6441 	}
6442 
6443 	if (fs->flow_type & FLOW_EXT) {
6444 		rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
6445 		rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
6446 		hclge_fd_get_user_def_tuple(info, rule);
6447 	}
6448 
6449 	if (fs->flow_type & FLOW_MAC_EXT) {
6450 		ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
6451 		ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
6452 	}
6453 
6454 	return 0;
6455 }
6456 
6457 static int hclge_fd_config_rule(struct hclge_dev *hdev,
6458 				struct hclge_fd_rule *rule)
6459 {
6460 	int ret;
6461 
6462 	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6463 	if (ret)
6464 		return ret;
6465 
6466 	return hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6467 }
6468 
6469 static int hclge_add_fd_entry_common(struct hclge_dev *hdev,
6470 				     struct hclge_fd_rule *rule)
6471 {
6472 	int ret;
6473 
6474 	spin_lock_bh(&hdev->fd_rule_lock);
6475 
6476 	if (hdev->fd_active_type != rule->rule_type &&
6477 	    (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6478 	     hdev->fd_active_type == HCLGE_FD_EP_ACTIVE)) {
6479 		dev_err(&hdev->pdev->dev,
6480 			"mode conflict(new type %d, active type %d), please delete existent rules first\n",
6481 			rule->rule_type, hdev->fd_active_type);
6482 		spin_unlock_bh(&hdev->fd_rule_lock);
6483 		return -EINVAL;
6484 	}
6485 
6486 	ret = hclge_fd_check_user_def_refcnt(hdev, rule);
6487 	if (ret)
6488 		goto out;
6489 
6490 	ret = hclge_clear_arfs_rules(hdev);
6491 	if (ret)
6492 		goto out;
6493 
6494 	ret = hclge_fd_config_rule(hdev, rule);
6495 	if (ret)
6496 		goto out;
6497 
6498 	rule->state = HCLGE_FD_ACTIVE;
6499 	hdev->fd_active_type = rule->rule_type;
6500 	hclge_update_fd_list(hdev, rule->state, rule->location, rule);
6501 
6502 out:
6503 	spin_unlock_bh(&hdev->fd_rule_lock);
6504 	return ret;
6505 }
6506 
6507 static bool hclge_is_cls_flower_active(struct hnae3_handle *handle)
6508 {
6509 	struct hclge_vport *vport = hclge_get_vport(handle);
6510 	struct hclge_dev *hdev = vport->back;
6511 
6512 	return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE;
6513 }
6514 
6515 static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
6516 				      u16 *vport_id, u8 *action, u16 *queue_id)
6517 {
6518 	struct hclge_vport *vport = hdev->vport;
6519 
6520 	if (ring_cookie == RX_CLS_FLOW_DISC) {
6521 		*action = HCLGE_FD_ACTION_DROP_PACKET;
6522 	} else {
6523 		u32 ring = ethtool_get_flow_spec_ring(ring_cookie);
6524 		u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
6525 		u16 tqps;
6526 
6527 		/* To keep consistent with user's configuration, minus 1 when
6528 		 * printing 'vf', because vf id from ethtool is added 1 for vf.
6529 		 */
6530 		if (vf > hdev->num_req_vfs) {
6531 			dev_err(&hdev->pdev->dev,
6532 				"Error: vf id (%u) should be less than %u\n",
6533 				vf - 1U, hdev->num_req_vfs);
6534 			return -EINVAL;
6535 		}
6536 
6537 		*vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
6538 		tqps = hdev->vport[vf].nic.kinfo.num_tqps;
6539 
6540 		if (ring >= tqps) {
6541 			dev_err(&hdev->pdev->dev,
6542 				"Error: queue id (%u) > max tqp num (%u)\n",
6543 				ring, tqps - 1U);
6544 			return -EINVAL;
6545 		}
6546 
6547 		*action = HCLGE_FD_ACTION_SELECT_QUEUE;
6548 		*queue_id = ring;
6549 	}
6550 
6551 	return 0;
6552 }
6553 
6554 static int hclge_add_fd_entry(struct hnae3_handle *handle,
6555 			      struct ethtool_rxnfc *cmd)
6556 {
6557 	struct hclge_vport *vport = hclge_get_vport(handle);
6558 	struct hclge_dev *hdev = vport->back;
6559 	struct hclge_fd_user_def_info info;
6560 	u16 dst_vport_id = 0, q_index = 0;
6561 	struct ethtool_rx_flow_spec *fs;
6562 	struct hclge_fd_rule *rule;
6563 	u32 unused = 0;
6564 	u8 action;
6565 	int ret;
6566 
6567 	if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
6568 		dev_err(&hdev->pdev->dev,
6569 			"flow table director is not supported\n");
6570 		return -EOPNOTSUPP;
6571 	}
6572 
6573 	if (!hdev->fd_en) {
6574 		dev_err(&hdev->pdev->dev,
6575 			"please enable flow director first\n");
6576 		return -EOPNOTSUPP;
6577 	}
6578 
6579 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6580 
6581 	ret = hclge_fd_check_spec(hdev, fs, &unused, &info);
6582 	if (ret)
6583 		return ret;
6584 
6585 	ret = hclge_fd_parse_ring_cookie(hdev, fs->ring_cookie, &dst_vport_id,
6586 					 &action, &q_index);
6587 	if (ret)
6588 		return ret;
6589 
6590 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6591 	if (!rule)
6592 		return -ENOMEM;
6593 
6594 	ret = hclge_fd_get_tuple(fs, rule, &info);
6595 	if (ret) {
6596 		kfree(rule);
6597 		return ret;
6598 	}
6599 
6600 	rule->flow_type = fs->flow_type;
6601 	rule->location = fs->location;
6602 	rule->unused_tuple = unused;
6603 	rule->vf_id = dst_vport_id;
6604 	rule->queue_id = q_index;
6605 	rule->action = action;
6606 	rule->rule_type = HCLGE_FD_EP_ACTIVE;
6607 
6608 	ret = hclge_add_fd_entry_common(hdev, rule);
6609 	if (ret)
6610 		kfree(rule);
6611 
6612 	return ret;
6613 }
6614 
6615 static int hclge_del_fd_entry(struct hnae3_handle *handle,
6616 			      struct ethtool_rxnfc *cmd)
6617 {
6618 	struct hclge_vport *vport = hclge_get_vport(handle);
6619 	struct hclge_dev *hdev = vport->back;
6620 	struct ethtool_rx_flow_spec *fs;
6621 	int ret;
6622 
6623 	if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
6624 		return -EOPNOTSUPP;
6625 
6626 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6627 
6628 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6629 		return -EINVAL;
6630 
6631 	spin_lock_bh(&hdev->fd_rule_lock);
6632 	if (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6633 	    !test_bit(fs->location, hdev->fd_bmap)) {
6634 		dev_err(&hdev->pdev->dev,
6635 			"Delete fail, rule %u is inexistent\n", fs->location);
6636 		spin_unlock_bh(&hdev->fd_rule_lock);
6637 		return -ENOENT;
6638 	}
6639 
6640 	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
6641 				   NULL, false);
6642 	if (ret)
6643 		goto out;
6644 
6645 	hclge_update_fd_list(hdev, HCLGE_FD_DELETED, fs->location, NULL);
6646 
6647 out:
6648 	spin_unlock_bh(&hdev->fd_rule_lock);
6649 	return ret;
6650 }
6651 
6652 static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev,
6653 					 bool clear_list)
6654 {
6655 	struct hclge_fd_rule *rule;
6656 	struct hlist_node *node;
6657 	u16 location;
6658 
6659 	spin_lock_bh(&hdev->fd_rule_lock);
6660 
6661 	for_each_set_bit(location, hdev->fd_bmap,
6662 			 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6663 		hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6664 				     NULL, false);
6665 
6666 	if (clear_list) {
6667 		hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6668 					  rule_node) {
6669 			hlist_del(&rule->rule_node);
6670 			kfree(rule);
6671 		}
6672 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6673 		hdev->hclge_fd_rule_num = 0;
6674 		bitmap_zero(hdev->fd_bmap,
6675 			    hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6676 	}
6677 
6678 	spin_unlock_bh(&hdev->fd_rule_lock);
6679 }
6680 
6681 static void hclge_del_all_fd_entries(struct hclge_dev *hdev)
6682 {
6683 	if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
6684 		return;
6685 
6686 	hclge_clear_fd_rules_in_list(hdev, true);
6687 	hclge_fd_disable_user_def(hdev);
6688 }
6689 
6690 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6691 {
6692 	struct hclge_vport *vport = hclge_get_vport(handle);
6693 	struct hclge_dev *hdev = vport->back;
6694 	struct hclge_fd_rule *rule;
6695 	struct hlist_node *node;
6696 
6697 	/* Return ok here, because reset error handling will check this
6698 	 * return value. If error is returned here, the reset process will
6699 	 * fail.
6700 	 */
6701 	if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
6702 		return 0;
6703 
6704 	/* if fd is disabled, should not restore it when reset */
6705 	if (!hdev->fd_en)
6706 		return 0;
6707 
6708 	spin_lock_bh(&hdev->fd_rule_lock);
6709 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6710 		if (rule->state == HCLGE_FD_ACTIVE)
6711 			rule->state = HCLGE_FD_TO_ADD;
6712 	}
6713 	spin_unlock_bh(&hdev->fd_rule_lock);
6714 	set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
6715 
6716 	return 0;
6717 }
6718 
6719 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6720 				 struct ethtool_rxnfc *cmd)
6721 {
6722 	struct hclge_vport *vport = hclge_get_vport(handle);
6723 	struct hclge_dev *hdev = vport->back;
6724 
6725 	if (!hnae3_ae_dev_fd_supported(hdev->ae_dev) || hclge_is_cls_flower_active(handle))
6726 		return -EOPNOTSUPP;
6727 
6728 	cmd->rule_cnt = hdev->hclge_fd_rule_num;
6729 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6730 
6731 	return 0;
6732 }
6733 
6734 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6735 				     struct ethtool_tcpip4_spec *spec,
6736 				     struct ethtool_tcpip4_spec *spec_mask)
6737 {
6738 	spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6739 	spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6740 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6741 
6742 	spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6743 	spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6744 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6745 
6746 	spec->psrc = cpu_to_be16(rule->tuples.src_port);
6747 	spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6748 			0 : cpu_to_be16(rule->tuples_mask.src_port);
6749 
6750 	spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6751 	spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6752 			0 : cpu_to_be16(rule->tuples_mask.dst_port);
6753 
6754 	spec->tos = rule->tuples.ip_tos;
6755 	spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6756 			0 : rule->tuples_mask.ip_tos;
6757 }
6758 
6759 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6760 				  struct ethtool_usrip4_spec *spec,
6761 				  struct ethtool_usrip4_spec *spec_mask)
6762 {
6763 	spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6764 	spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6765 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6766 
6767 	spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6768 	spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6769 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6770 
6771 	spec->tos = rule->tuples.ip_tos;
6772 	spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6773 			0 : rule->tuples_mask.ip_tos;
6774 
6775 	spec->proto = rule->tuples.ip_proto;
6776 	spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6777 			0 : rule->tuples_mask.ip_proto;
6778 
6779 	spec->ip_ver = ETH_RX_NFC_IP4;
6780 }
6781 
6782 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6783 				     struct ethtool_tcpip6_spec *spec,
6784 				     struct ethtool_tcpip6_spec *spec_mask)
6785 {
6786 	ipv6_addr_cpu_to_be32(spec->ip6src, rule->tuples.src_ip);
6787 	ipv6_addr_cpu_to_be32(spec->ip6dst, rule->tuples.dst_ip);
6788 	if (rule->unused_tuple & BIT(INNER_SRC_IP))
6789 		memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6790 	else
6791 		ipv6_addr_cpu_to_be32(spec_mask->ip6src,
6792 				      rule->tuples_mask.src_ip);
6793 
6794 	if (rule->unused_tuple & BIT(INNER_DST_IP))
6795 		memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6796 	else
6797 		ipv6_addr_cpu_to_be32(spec_mask->ip6dst,
6798 				      rule->tuples_mask.dst_ip);
6799 
6800 	spec->tclass = rule->tuples.ip_tos;
6801 	spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6802 			0 : rule->tuples_mask.ip_tos;
6803 
6804 	spec->psrc = cpu_to_be16(rule->tuples.src_port);
6805 	spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6806 			0 : cpu_to_be16(rule->tuples_mask.src_port);
6807 
6808 	spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6809 	spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6810 			0 : cpu_to_be16(rule->tuples_mask.dst_port);
6811 }
6812 
6813 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6814 				  struct ethtool_usrip6_spec *spec,
6815 				  struct ethtool_usrip6_spec *spec_mask)
6816 {
6817 	ipv6_addr_cpu_to_be32(spec->ip6src, rule->tuples.src_ip);
6818 	ipv6_addr_cpu_to_be32(spec->ip6dst, rule->tuples.dst_ip);
6819 	if (rule->unused_tuple & BIT(INNER_SRC_IP))
6820 		memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6821 	else
6822 		ipv6_addr_cpu_to_be32(spec_mask->ip6src,
6823 				      rule->tuples_mask.src_ip);
6824 
6825 	if (rule->unused_tuple & BIT(INNER_DST_IP))
6826 		memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6827 	else
6828 		ipv6_addr_cpu_to_be32(spec_mask->ip6dst,
6829 				      rule->tuples_mask.dst_ip);
6830 
6831 	spec->tclass = rule->tuples.ip_tos;
6832 	spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6833 			0 : rule->tuples_mask.ip_tos;
6834 
6835 	spec->l4_proto = rule->tuples.ip_proto;
6836 	spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6837 			0 : rule->tuples_mask.ip_proto;
6838 }
6839 
6840 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6841 				    struct ethhdr *spec,
6842 				    struct ethhdr *spec_mask)
6843 {
6844 	ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6845 	ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6846 
6847 	if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6848 		eth_zero_addr(spec_mask->h_source);
6849 	else
6850 		ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6851 
6852 	if (rule->unused_tuple & BIT(INNER_DST_MAC))
6853 		eth_zero_addr(spec_mask->h_dest);
6854 	else
6855 		ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6856 
6857 	spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6858 	spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6859 			0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6860 }
6861 
6862 static void hclge_fd_get_user_def_info(struct ethtool_rx_flow_spec *fs,
6863 				       struct hclge_fd_rule *rule)
6864 {
6865 	if ((rule->unused_tuple & HCLGE_FD_TUPLE_USER_DEF_TUPLES) ==
6866 	    HCLGE_FD_TUPLE_USER_DEF_TUPLES) {
6867 		fs->h_ext.data[0] = 0;
6868 		fs->h_ext.data[1] = 0;
6869 		fs->m_ext.data[0] = 0;
6870 		fs->m_ext.data[1] = 0;
6871 	} else {
6872 		fs->h_ext.data[0] = cpu_to_be32(rule->ep.user_def.offset);
6873 		fs->h_ext.data[1] = cpu_to_be32(rule->ep.user_def.data);
6874 		fs->m_ext.data[0] =
6875 				cpu_to_be32(HCLGE_FD_USER_DEF_OFFSET_UNMASK);
6876 		fs->m_ext.data[1] = cpu_to_be32(rule->ep.user_def.data_mask);
6877 	}
6878 }
6879 
6880 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6881 				  struct hclge_fd_rule *rule)
6882 {
6883 	if (fs->flow_type & FLOW_EXT) {
6884 		fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6885 		fs->m_ext.vlan_tci =
6886 				rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6887 				0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
6888 
6889 		hclge_fd_get_user_def_info(fs, rule);
6890 	}
6891 
6892 	if (fs->flow_type & FLOW_MAC_EXT) {
6893 		ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6894 		if (rule->unused_tuple & BIT(INNER_DST_MAC))
6895 			eth_zero_addr(fs->m_u.ether_spec.h_dest);
6896 		else
6897 			ether_addr_copy(fs->m_u.ether_spec.h_dest,
6898 					rule->tuples_mask.dst_mac);
6899 	}
6900 }
6901 
6902 static struct hclge_fd_rule *hclge_get_fd_rule(struct hclge_dev *hdev,
6903 					       u16 location)
6904 {
6905 	struct hclge_fd_rule *rule = NULL;
6906 	struct hlist_node *node2;
6907 
6908 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
6909 		if (rule->location == location)
6910 			return rule;
6911 		else if (rule->location > location)
6912 			return NULL;
6913 	}
6914 
6915 	return NULL;
6916 }
6917 
6918 static void hclge_fd_get_ring_cookie(struct ethtool_rx_flow_spec *fs,
6919 				     struct hclge_fd_rule *rule)
6920 {
6921 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6922 		fs->ring_cookie = RX_CLS_FLOW_DISC;
6923 	} else {
6924 		u64 vf_id;
6925 
6926 		fs->ring_cookie = rule->queue_id;
6927 		vf_id = rule->vf_id;
6928 		vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6929 		fs->ring_cookie |= vf_id;
6930 	}
6931 }
6932 
6933 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
6934 				  struct ethtool_rxnfc *cmd)
6935 {
6936 	struct hclge_vport *vport = hclge_get_vport(handle);
6937 	struct hclge_fd_rule *rule = NULL;
6938 	struct hclge_dev *hdev = vport->back;
6939 	struct ethtool_rx_flow_spec *fs;
6940 
6941 	if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
6942 		return -EOPNOTSUPP;
6943 
6944 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6945 
6946 	spin_lock_bh(&hdev->fd_rule_lock);
6947 
6948 	rule = hclge_get_fd_rule(hdev, fs->location);
6949 	if (!rule) {
6950 		spin_unlock_bh(&hdev->fd_rule_lock);
6951 		return -ENOENT;
6952 	}
6953 
6954 	fs->flow_type = rule->flow_type;
6955 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
6956 	case SCTP_V4_FLOW:
6957 	case TCP_V4_FLOW:
6958 	case UDP_V4_FLOW:
6959 		hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
6960 					 &fs->m_u.tcp_ip4_spec);
6961 		break;
6962 	case IP_USER_FLOW:
6963 		hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
6964 				      &fs->m_u.usr_ip4_spec);
6965 		break;
6966 	case SCTP_V6_FLOW:
6967 	case TCP_V6_FLOW:
6968 	case UDP_V6_FLOW:
6969 		hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
6970 					 &fs->m_u.tcp_ip6_spec);
6971 		break;
6972 	case IPV6_USER_FLOW:
6973 		hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
6974 				      &fs->m_u.usr_ip6_spec);
6975 		break;
6976 	/* The flow type of fd rule has been checked before adding in to rule
6977 	 * list. As other flow types have been handled, it must be ETHER_FLOW
6978 	 * for the default case
6979 	 */
6980 	default:
6981 		hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
6982 					&fs->m_u.ether_spec);
6983 		break;
6984 	}
6985 
6986 	hclge_fd_get_ext_info(fs, rule);
6987 
6988 	hclge_fd_get_ring_cookie(fs, rule);
6989 
6990 	spin_unlock_bh(&hdev->fd_rule_lock);
6991 
6992 	return 0;
6993 }
6994 
6995 static int hclge_get_all_rules(struct hnae3_handle *handle,
6996 			       struct ethtool_rxnfc *cmd, u32 *rule_locs)
6997 {
6998 	struct hclge_vport *vport = hclge_get_vport(handle);
6999 	struct hclge_dev *hdev = vport->back;
7000 	struct hclge_fd_rule *rule;
7001 	struct hlist_node *node2;
7002 	int cnt = 0;
7003 
7004 	if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
7005 		return -EOPNOTSUPP;
7006 
7007 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
7008 
7009 	spin_lock_bh(&hdev->fd_rule_lock);
7010 	hlist_for_each_entry_safe(rule, node2,
7011 				  &hdev->fd_rule_list, rule_node) {
7012 		if (cnt == cmd->rule_cnt) {
7013 			spin_unlock_bh(&hdev->fd_rule_lock);
7014 			return -EMSGSIZE;
7015 		}
7016 
7017 		if (rule->state == HCLGE_FD_TO_DEL)
7018 			continue;
7019 
7020 		rule_locs[cnt] = rule->location;
7021 		cnt++;
7022 	}
7023 
7024 	spin_unlock_bh(&hdev->fd_rule_lock);
7025 
7026 	cmd->rule_cnt = cnt;
7027 
7028 	return 0;
7029 }
7030 
7031 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
7032 				     struct hclge_fd_rule_tuples *tuples)
7033 {
7034 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
7035 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
7036 
7037 	tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
7038 	tuples->ip_proto = fkeys->basic.ip_proto;
7039 	tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
7040 
7041 	if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
7042 		tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
7043 		tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
7044 	} else {
7045 		int i;
7046 
7047 		for (i = 0; i < IPV6_ADDR_WORDS; i++) {
7048 			tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
7049 			tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
7050 		}
7051 	}
7052 }
7053 
7054 /* traverse all rules, check whether an existed rule has the same tuples */
7055 static struct hclge_fd_rule *
7056 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
7057 			  const struct hclge_fd_rule_tuples *tuples)
7058 {
7059 	struct hclge_fd_rule *rule = NULL;
7060 	struct hlist_node *node;
7061 
7062 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7063 		if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
7064 			return rule;
7065 	}
7066 
7067 	return NULL;
7068 }
7069 
7070 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
7071 				     struct hclge_fd_rule *rule)
7072 {
7073 	rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
7074 			     BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
7075 			     BIT(INNER_SRC_PORT);
7076 	rule->action = 0;
7077 	rule->vf_id = 0;
7078 	rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
7079 	rule->state = HCLGE_FD_TO_ADD;
7080 	if (tuples->ether_proto == ETH_P_IP) {
7081 		if (tuples->ip_proto == IPPROTO_TCP)
7082 			rule->flow_type = TCP_V4_FLOW;
7083 		else
7084 			rule->flow_type = UDP_V4_FLOW;
7085 	} else {
7086 		if (tuples->ip_proto == IPPROTO_TCP)
7087 			rule->flow_type = TCP_V6_FLOW;
7088 		else
7089 			rule->flow_type = UDP_V6_FLOW;
7090 	}
7091 	memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
7092 	memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
7093 }
7094 
7095 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
7096 				      u16 flow_id, struct flow_keys *fkeys)
7097 {
7098 	struct hclge_vport *vport = hclge_get_vport(handle);
7099 	struct hclge_fd_rule_tuples new_tuples = {};
7100 	struct hclge_dev *hdev = vport->back;
7101 	struct hclge_fd_rule *rule;
7102 	u16 bit_id;
7103 
7104 	if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
7105 		return -EOPNOTSUPP;
7106 
7107 	/* when there is already fd rule existed add by user,
7108 	 * arfs should not work
7109 	 */
7110 	spin_lock_bh(&hdev->fd_rule_lock);
7111 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE &&
7112 	    hdev->fd_active_type != HCLGE_FD_RULE_NONE) {
7113 		spin_unlock_bh(&hdev->fd_rule_lock);
7114 		return -EOPNOTSUPP;
7115 	}
7116 
7117 	hclge_fd_get_flow_tuples(fkeys, &new_tuples);
7118 
7119 	/* check is there flow director filter existed for this flow,
7120 	 * if not, create a new filter for it;
7121 	 * if filter exist with different queue id, modify the filter;
7122 	 * if filter exist with same queue id, do nothing
7123 	 */
7124 	rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
7125 	if (!rule) {
7126 		bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
7127 		if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7128 			spin_unlock_bh(&hdev->fd_rule_lock);
7129 			return -ENOSPC;
7130 		}
7131 
7132 		rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
7133 		if (!rule) {
7134 			spin_unlock_bh(&hdev->fd_rule_lock);
7135 			return -ENOMEM;
7136 		}
7137 
7138 		rule->location = bit_id;
7139 		rule->arfs.flow_id = flow_id;
7140 		rule->queue_id = queue_id;
7141 		hclge_fd_build_arfs_rule(&new_tuples, rule);
7142 		hclge_update_fd_list(hdev, rule->state, rule->location, rule);
7143 		hdev->fd_active_type = HCLGE_FD_ARFS_ACTIVE;
7144 	} else if (rule->queue_id != queue_id) {
7145 		rule->queue_id = queue_id;
7146 		rule->state = HCLGE_FD_TO_ADD;
7147 		set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7148 		hclge_task_schedule(hdev, 0);
7149 	}
7150 	spin_unlock_bh(&hdev->fd_rule_lock);
7151 	return rule->location;
7152 }
7153 
7154 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
7155 {
7156 #ifdef CONFIG_RFS_ACCEL
7157 	struct hnae3_handle *handle = &hdev->vport[0].nic;
7158 	struct hclge_fd_rule *rule;
7159 	struct hlist_node *node;
7160 
7161 	spin_lock_bh(&hdev->fd_rule_lock);
7162 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
7163 		spin_unlock_bh(&hdev->fd_rule_lock);
7164 		return;
7165 	}
7166 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7167 		if (rule->state != HCLGE_FD_ACTIVE)
7168 			continue;
7169 		if (rps_may_expire_flow(handle->netdev, rule->queue_id,
7170 					rule->arfs.flow_id, rule->location)) {
7171 			rule->state = HCLGE_FD_TO_DEL;
7172 			set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7173 		}
7174 	}
7175 	spin_unlock_bh(&hdev->fd_rule_lock);
7176 #endif
7177 }
7178 
7179 /* make sure being called after lock up with fd_rule_lock */
7180 static int hclge_clear_arfs_rules(struct hclge_dev *hdev)
7181 {
7182 #ifdef CONFIG_RFS_ACCEL
7183 	struct hclge_fd_rule *rule;
7184 	struct hlist_node *node;
7185 	int ret;
7186 
7187 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE)
7188 		return 0;
7189 
7190 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7191 		switch (rule->state) {
7192 		case HCLGE_FD_TO_DEL:
7193 		case HCLGE_FD_ACTIVE:
7194 			ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7195 						   rule->location, NULL, false);
7196 			if (ret)
7197 				return ret;
7198 			fallthrough;
7199 		case HCLGE_FD_TO_ADD:
7200 			hclge_fd_dec_rule_cnt(hdev, rule->location);
7201 			hlist_del(&rule->rule_node);
7202 			kfree(rule);
7203 			break;
7204 		default:
7205 			break;
7206 		}
7207 	}
7208 	hclge_sync_fd_state(hdev);
7209 
7210 #endif
7211 	return 0;
7212 }
7213 
7214 static void hclge_get_cls_key_basic(const struct flow_rule *flow,
7215 				    struct hclge_fd_rule *rule)
7216 {
7217 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) {
7218 		struct flow_match_basic match;
7219 		u16 ethtype_key, ethtype_mask;
7220 
7221 		flow_rule_match_basic(flow, &match);
7222 		ethtype_key = ntohs(match.key->n_proto);
7223 		ethtype_mask = ntohs(match.mask->n_proto);
7224 
7225 		if (ethtype_key == ETH_P_ALL) {
7226 			ethtype_key = 0;
7227 			ethtype_mask = 0;
7228 		}
7229 		rule->tuples.ether_proto = ethtype_key;
7230 		rule->tuples_mask.ether_proto = ethtype_mask;
7231 		rule->tuples.ip_proto = match.key->ip_proto;
7232 		rule->tuples_mask.ip_proto = match.mask->ip_proto;
7233 	} else {
7234 		rule->unused_tuple |= BIT(INNER_IP_PROTO);
7235 		rule->unused_tuple |= BIT(INNER_ETH_TYPE);
7236 	}
7237 }
7238 
7239 static void hclge_get_cls_key_mac(const struct flow_rule *flow,
7240 				  struct hclge_fd_rule *rule)
7241 {
7242 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
7243 		struct flow_match_eth_addrs match;
7244 
7245 		flow_rule_match_eth_addrs(flow, &match);
7246 		ether_addr_copy(rule->tuples.dst_mac, match.key->dst);
7247 		ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst);
7248 		ether_addr_copy(rule->tuples.src_mac, match.key->src);
7249 		ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src);
7250 	} else {
7251 		rule->unused_tuple |= BIT(INNER_DST_MAC);
7252 		rule->unused_tuple |= BIT(INNER_SRC_MAC);
7253 	}
7254 }
7255 
7256 static void hclge_get_cls_key_vlan(const struct flow_rule *flow,
7257 				   struct hclge_fd_rule *rule)
7258 {
7259 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
7260 		struct flow_match_vlan match;
7261 
7262 		flow_rule_match_vlan(flow, &match);
7263 		rule->tuples.vlan_tag1 = match.key->vlan_id |
7264 				(match.key->vlan_priority << VLAN_PRIO_SHIFT);
7265 		rule->tuples_mask.vlan_tag1 = match.mask->vlan_id |
7266 				(match.mask->vlan_priority << VLAN_PRIO_SHIFT);
7267 	} else {
7268 		rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST);
7269 	}
7270 }
7271 
7272 static int hclge_get_cls_key_ip(const struct flow_rule *flow,
7273 				struct hclge_fd_rule *rule,
7274 				struct netlink_ext_ack *extack)
7275 {
7276 	u16 addr_type = 0;
7277 
7278 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) {
7279 		struct flow_match_control match;
7280 
7281 		flow_rule_match_control(flow, &match);
7282 		addr_type = match.key->addr_type;
7283 
7284 		if (flow_rule_has_control_flags(match.mask->flags, extack))
7285 			return -EOPNOTSUPP;
7286 	}
7287 
7288 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
7289 		struct flow_match_ipv4_addrs match;
7290 
7291 		flow_rule_match_ipv4_addrs(flow, &match);
7292 		rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src);
7293 		rule->tuples_mask.src_ip[IPV4_INDEX] =
7294 						be32_to_cpu(match.mask->src);
7295 		rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst);
7296 		rule->tuples_mask.dst_ip[IPV4_INDEX] =
7297 						be32_to_cpu(match.mask->dst);
7298 	} else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
7299 		struct flow_match_ipv6_addrs match;
7300 
7301 		flow_rule_match_ipv6_addrs(flow, &match);
7302 		ipv6_addr_be32_to_cpu(rule->tuples.src_ip,
7303 				      match.key->src.s6_addr32);
7304 		ipv6_addr_be32_to_cpu(rule->tuples_mask.src_ip,
7305 				      match.mask->src.s6_addr32);
7306 		ipv6_addr_be32_to_cpu(rule->tuples.dst_ip,
7307 				      match.key->dst.s6_addr32);
7308 		ipv6_addr_be32_to_cpu(rule->tuples_mask.dst_ip,
7309 				      match.mask->dst.s6_addr32);
7310 	} else {
7311 		rule->unused_tuple |= BIT(INNER_SRC_IP);
7312 		rule->unused_tuple |= BIT(INNER_DST_IP);
7313 	}
7314 
7315 	return 0;
7316 }
7317 
7318 static void hclge_get_cls_key_port(const struct flow_rule *flow,
7319 				   struct hclge_fd_rule *rule)
7320 {
7321 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
7322 		struct flow_match_ports match;
7323 
7324 		flow_rule_match_ports(flow, &match);
7325 
7326 		rule->tuples.src_port = be16_to_cpu(match.key->src);
7327 		rule->tuples_mask.src_port = be16_to_cpu(match.mask->src);
7328 		rule->tuples.dst_port = be16_to_cpu(match.key->dst);
7329 		rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst);
7330 	} else {
7331 		rule->unused_tuple |= BIT(INNER_SRC_PORT);
7332 		rule->unused_tuple |= BIT(INNER_DST_PORT);
7333 	}
7334 }
7335 
7336 static int hclge_parse_cls_flower(struct hclge_dev *hdev,
7337 				  struct flow_cls_offload *cls_flower,
7338 				  struct hclge_fd_rule *rule)
7339 {
7340 	struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower);
7341 	struct netlink_ext_ack *extack = cls_flower->common.extack;
7342 	struct flow_dissector *dissector = flow->match.dissector;
7343 	int ret;
7344 
7345 	if (dissector->used_keys &
7346 	    ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
7347 	      BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
7348 	      BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
7349 	      BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) |
7350 	      BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
7351 	      BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
7352 	      BIT_ULL(FLOW_DISSECTOR_KEY_PORTS))) {
7353 		dev_err(&hdev->pdev->dev, "unsupported key set: %#llx\n",
7354 			dissector->used_keys);
7355 		return -EOPNOTSUPP;
7356 	}
7357 
7358 	hclge_get_cls_key_basic(flow, rule);
7359 	hclge_get_cls_key_mac(flow, rule);
7360 	hclge_get_cls_key_vlan(flow, rule);
7361 
7362 	ret = hclge_get_cls_key_ip(flow, rule, extack);
7363 	if (ret)
7364 		return ret;
7365 
7366 	hclge_get_cls_key_port(flow, rule);
7367 
7368 	return 0;
7369 }
7370 
7371 static int hclge_check_cls_flower(struct hclge_dev *hdev,
7372 				  struct flow_cls_offload *cls_flower, int tc)
7373 {
7374 	u32 prio = cls_flower->common.prio;
7375 
7376 	if (tc < 0 || tc > hdev->tc_max) {
7377 		dev_err(&hdev->pdev->dev, "invalid traffic class\n");
7378 		return -EINVAL;
7379 	}
7380 
7381 	if (prio == 0 ||
7382 	    prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7383 		dev_err(&hdev->pdev->dev,
7384 			"prio %u should be in range[1, %u]\n",
7385 			prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
7386 		return -EINVAL;
7387 	}
7388 
7389 	if (test_bit(prio - 1, hdev->fd_bmap)) {
7390 		dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio);
7391 		return -EINVAL;
7392 	}
7393 	return 0;
7394 }
7395 
7396 static int hclge_add_cls_flower(struct hnae3_handle *handle,
7397 				struct flow_cls_offload *cls_flower,
7398 				int tc)
7399 {
7400 	struct hclge_vport *vport = hclge_get_vport(handle);
7401 	struct hclge_dev *hdev = vport->back;
7402 	struct hclge_fd_rule *rule;
7403 	int ret;
7404 
7405 	if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
7406 		dev_err(&hdev->pdev->dev,
7407 			"cls flower is not supported\n");
7408 		return -EOPNOTSUPP;
7409 	}
7410 
7411 	ret = hclge_check_cls_flower(hdev, cls_flower, tc);
7412 	if (ret) {
7413 		dev_err(&hdev->pdev->dev,
7414 			"failed to check cls flower params, ret = %d\n", ret);
7415 		return ret;
7416 	}
7417 
7418 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
7419 	if (!rule)
7420 		return -ENOMEM;
7421 
7422 	ret = hclge_parse_cls_flower(hdev, cls_flower, rule);
7423 	if (ret) {
7424 		kfree(rule);
7425 		return ret;
7426 	}
7427 
7428 	rule->action = HCLGE_FD_ACTION_SELECT_TC;
7429 	rule->cls_flower.tc = tc;
7430 	rule->location = cls_flower->common.prio - 1;
7431 	rule->vf_id = 0;
7432 	rule->cls_flower.cookie = cls_flower->cookie;
7433 	rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE;
7434 
7435 	ret = hclge_add_fd_entry_common(hdev, rule);
7436 	if (ret)
7437 		kfree(rule);
7438 
7439 	return ret;
7440 }
7441 
7442 static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev,
7443 						   unsigned long cookie)
7444 {
7445 	struct hclge_fd_rule *rule;
7446 	struct hlist_node *node;
7447 
7448 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7449 		if (rule->cls_flower.cookie == cookie)
7450 			return rule;
7451 	}
7452 
7453 	return NULL;
7454 }
7455 
7456 static int hclge_del_cls_flower(struct hnae3_handle *handle,
7457 				struct flow_cls_offload *cls_flower)
7458 {
7459 	struct hclge_vport *vport = hclge_get_vport(handle);
7460 	struct hclge_dev *hdev = vport->back;
7461 	struct hclge_fd_rule *rule;
7462 	int ret;
7463 
7464 	if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
7465 		return -EOPNOTSUPP;
7466 
7467 	spin_lock_bh(&hdev->fd_rule_lock);
7468 
7469 	rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
7470 	if (!rule) {
7471 		spin_unlock_bh(&hdev->fd_rule_lock);
7472 		return -EINVAL;
7473 	}
7474 
7475 	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
7476 				   NULL, false);
7477 	if (ret) {
7478 		/* if tcam config fail, set rule state to TO_DEL,
7479 		 * so the rule will be deleted when periodic
7480 		 * task being scheduled.
7481 		 */
7482 		hclge_update_fd_list(hdev, HCLGE_FD_TO_DEL, rule->location, NULL);
7483 		set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7484 		spin_unlock_bh(&hdev->fd_rule_lock);
7485 		return ret;
7486 	}
7487 
7488 	hclge_update_fd_list(hdev, HCLGE_FD_DELETED, rule->location, NULL);
7489 	spin_unlock_bh(&hdev->fd_rule_lock);
7490 
7491 	return 0;
7492 }
7493 
7494 static void hclge_sync_fd_list(struct hclge_dev *hdev, struct hlist_head *hlist)
7495 {
7496 	struct hclge_fd_rule *rule;
7497 	struct hlist_node *node;
7498 	int ret = 0;
7499 
7500 	if (!test_and_clear_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state))
7501 		return;
7502 
7503 	spin_lock_bh(&hdev->fd_rule_lock);
7504 
7505 	hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
7506 		switch (rule->state) {
7507 		case HCLGE_FD_TO_ADD:
7508 			ret = hclge_fd_config_rule(hdev, rule);
7509 			if (ret)
7510 				goto out;
7511 			rule->state = HCLGE_FD_ACTIVE;
7512 			break;
7513 		case HCLGE_FD_TO_DEL:
7514 			ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7515 						   rule->location, NULL, false);
7516 			if (ret)
7517 				goto out;
7518 			hclge_fd_dec_rule_cnt(hdev, rule->location);
7519 			hclge_fd_free_node(hdev, rule);
7520 			break;
7521 		default:
7522 			break;
7523 		}
7524 	}
7525 
7526 out:
7527 	if (ret)
7528 		set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7529 
7530 	spin_unlock_bh(&hdev->fd_rule_lock);
7531 }
7532 
7533 static void hclge_sync_fd_table(struct hclge_dev *hdev)
7534 {
7535 	if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
7536 		return;
7537 
7538 	if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) {
7539 		bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
7540 
7541 		hclge_clear_fd_rules_in_list(hdev, clear_list);
7542 	}
7543 
7544 	hclge_sync_fd_user_def_cfg(hdev, false);
7545 
7546 	hclge_sync_fd_list(hdev, &hdev->fd_rule_list);
7547 }
7548 
7549 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
7550 {
7551 	struct hclge_vport *vport = hclge_get_vport(handle);
7552 	struct hclge_dev *hdev = vport->back;
7553 
7554 	return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
7555 	       hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
7556 }
7557 
7558 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
7559 {
7560 	struct hclge_vport *vport = hclge_get_vport(handle);
7561 	struct hclge_dev *hdev = vport->back;
7562 
7563 	return test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
7564 }
7565 
7566 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
7567 {
7568 	struct hclge_vport *vport = hclge_get_vport(handle);
7569 	struct hclge_dev *hdev = vport->back;
7570 
7571 	return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7572 }
7573 
7574 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
7575 {
7576 	struct hclge_vport *vport = hclge_get_vport(handle);
7577 	struct hclge_dev *hdev = vport->back;
7578 
7579 	return hdev->rst_stats.hw_reset_done_cnt;
7580 }
7581 
7582 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
7583 {
7584 	struct hclge_vport *vport = hclge_get_vport(handle);
7585 	struct hclge_dev *hdev = vport->back;
7586 
7587 	hdev->fd_en = enable;
7588 
7589 	if (!enable)
7590 		set_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state);
7591 	else
7592 		hclge_restore_fd_entries(handle);
7593 
7594 	hclge_task_schedule(hdev, 0);
7595 }
7596 
7597 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
7598 {
7599 #define HCLGE_LINK_STATUS_WAIT_CNT  3
7600 
7601 	struct hclge_desc desc;
7602 	struct hclge_config_mac_mode_cmd *req =
7603 		(struct hclge_config_mac_mode_cmd *)desc.data;
7604 	u32 loop_en = 0;
7605 	int ret;
7606 
7607 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
7608 
7609 	if (enable) {
7610 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
7611 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
7612 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
7613 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
7614 		hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
7615 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
7616 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
7617 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
7618 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
7619 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
7620 	}
7621 
7622 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7623 
7624 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7625 	if (ret) {
7626 		dev_err(&hdev->pdev->dev,
7627 			"mac enable fail, ret =%d.\n", ret);
7628 		return;
7629 	}
7630 
7631 	if (!enable)
7632 		hclge_mac_link_status_wait(hdev, HCLGE_LINK_STATUS_DOWN,
7633 					   HCLGE_LINK_STATUS_WAIT_CNT);
7634 }
7635 
7636 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
7637 				     u8 switch_param, u8 param_mask)
7638 {
7639 	struct hclge_mac_vlan_switch_cmd *req;
7640 	struct hclge_desc desc;
7641 	u32 func_id;
7642 	int ret;
7643 
7644 	func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
7645 	req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
7646 
7647 	/* read current config parameter */
7648 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
7649 				   true);
7650 	req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
7651 	req->func_id = cpu_to_le32(func_id);
7652 
7653 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7654 	if (ret) {
7655 		dev_err(&hdev->pdev->dev,
7656 			"read mac vlan switch parameter fail, ret = %d\n", ret);
7657 		return ret;
7658 	}
7659 
7660 	/* modify and write new config parameter */
7661 	hclge_comm_cmd_reuse_desc(&desc, false);
7662 	req->switch_param = (req->switch_param & param_mask) | switch_param;
7663 	req->param_mask = param_mask;
7664 
7665 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7666 	if (ret)
7667 		dev_err(&hdev->pdev->dev,
7668 			"set mac vlan switch parameter fail, ret = %d\n", ret);
7669 	return ret;
7670 }
7671 
7672 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
7673 				       int link_ret)
7674 {
7675 #define HCLGE_PHY_LINK_STATUS_NUM  200
7676 
7677 	struct phy_device *phydev = hdev->hw.mac.phydev;
7678 	int i = 0;
7679 	int ret;
7680 
7681 	do {
7682 		ret = phy_read_status(phydev);
7683 		if (ret) {
7684 			dev_err(&hdev->pdev->dev,
7685 				"phy update link status fail, ret = %d\n", ret);
7686 			return;
7687 		}
7688 
7689 		if (phydev->link == link_ret)
7690 			break;
7691 
7692 		msleep(HCLGE_LINK_STATUS_MS);
7693 	} while (++i < HCLGE_PHY_LINK_STATUS_NUM);
7694 }
7695 
7696 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret,
7697 				      int wait_cnt)
7698 {
7699 	int link_status;
7700 	int i = 0;
7701 	int ret;
7702 
7703 	do {
7704 		ret = hclge_get_mac_link_status(hdev, &link_status);
7705 		if (ret)
7706 			return ret;
7707 		if (link_status == link_ret)
7708 			return 0;
7709 
7710 		msleep(HCLGE_LINK_STATUS_MS);
7711 	} while (++i < wait_cnt);
7712 	return -EBUSY;
7713 }
7714 
7715 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
7716 					  bool is_phy)
7717 {
7718 #define HCLGE_MAC_LINK_STATUS_NUM  100
7719 
7720 	int link_ret;
7721 
7722 	link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
7723 
7724 	if (is_phy)
7725 		hclge_phy_link_status_wait(hdev, link_ret);
7726 
7727 	return hclge_mac_link_status_wait(hdev, link_ret,
7728 					  HCLGE_MAC_LINK_STATUS_NUM);
7729 }
7730 
7731 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
7732 {
7733 	struct hclge_config_mac_mode_cmd *req;
7734 	struct hclge_desc desc;
7735 	u32 loop_en;
7736 	int ret;
7737 
7738 	req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
7739 	/* 1 Read out the MAC mode config at first */
7740 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
7741 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7742 	if (ret) {
7743 		dev_err(&hdev->pdev->dev,
7744 			"mac loopback get fail, ret =%d.\n", ret);
7745 		return ret;
7746 	}
7747 
7748 	/* 2 Then setup the loopback flag */
7749 	loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
7750 	hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
7751 
7752 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7753 
7754 	/* 3 Config mac work mode with loopback flag
7755 	 * and its original configure parameters
7756 	 */
7757 	hclge_comm_cmd_reuse_desc(&desc, false);
7758 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7759 	if (ret)
7760 		dev_err(&hdev->pdev->dev,
7761 			"mac loopback set fail, ret =%d.\n", ret);
7762 	return ret;
7763 }
7764 
7765 static int hclge_cfg_common_loopback_cmd_send(struct hclge_dev *hdev, bool en,
7766 					      enum hnae3_loop loop_mode)
7767 {
7768 	struct hclge_common_lb_cmd *req;
7769 	struct hclge_desc desc;
7770 	u8 loop_mode_b;
7771 	int ret;
7772 
7773 	req = (struct hclge_common_lb_cmd *)desc.data;
7774 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, false);
7775 
7776 	switch (loop_mode) {
7777 	case HNAE3_LOOP_SERIAL_SERDES:
7778 		loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
7779 		break;
7780 	case HNAE3_LOOP_PARALLEL_SERDES:
7781 		loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
7782 		break;
7783 	case HNAE3_LOOP_PHY:
7784 		loop_mode_b = HCLGE_CMD_GE_PHY_INNER_LOOP_B;
7785 		break;
7786 	default:
7787 		dev_err(&hdev->pdev->dev,
7788 			"unsupported loopback mode %d\n", loop_mode);
7789 		return -ENOTSUPP;
7790 	}
7791 
7792 	req->mask = loop_mode_b;
7793 	if (en)
7794 		req->enable = loop_mode_b;
7795 
7796 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7797 	if (ret)
7798 		dev_err(&hdev->pdev->dev,
7799 			"failed to send loopback cmd, loop_mode = %d, ret = %d\n",
7800 			loop_mode, ret);
7801 
7802 	return ret;
7803 }
7804 
7805 static int hclge_cfg_common_loopback_wait(struct hclge_dev *hdev)
7806 {
7807 #define HCLGE_COMMON_LB_RETRY_MS	10
7808 #define HCLGE_COMMON_LB_RETRY_NUM	100
7809 
7810 	struct hclge_common_lb_cmd *req;
7811 	struct hclge_desc desc;
7812 	u32 i = 0;
7813 	int ret;
7814 
7815 	req = (struct hclge_common_lb_cmd *)desc.data;
7816 
7817 	do {
7818 		msleep(HCLGE_COMMON_LB_RETRY_MS);
7819 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK,
7820 					   true);
7821 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7822 		if (ret) {
7823 			dev_err(&hdev->pdev->dev,
7824 				"failed to get loopback done status, ret = %d\n",
7825 				ret);
7826 			return ret;
7827 		}
7828 	} while (++i < HCLGE_COMMON_LB_RETRY_NUM &&
7829 		 !(req->result & HCLGE_CMD_COMMON_LB_DONE_B));
7830 
7831 	if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) {
7832 		dev_err(&hdev->pdev->dev, "wait loopback timeout\n");
7833 		return -EBUSY;
7834 	} else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) {
7835 		dev_err(&hdev->pdev->dev, "failed to do loopback test\n");
7836 		return -EIO;
7837 	}
7838 
7839 	return 0;
7840 }
7841 
7842 static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
7843 				     enum hnae3_loop loop_mode)
7844 {
7845 	int ret;
7846 
7847 	ret = hclge_cfg_common_loopback_cmd_send(hdev, en, loop_mode);
7848 	if (ret)
7849 		return ret;
7850 
7851 	return hclge_cfg_common_loopback_wait(hdev);
7852 }
7853 
7854 static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en,
7855 				     enum hnae3_loop loop_mode)
7856 {
7857 	int ret;
7858 
7859 	ret = hclge_cfg_common_loopback(hdev, en, loop_mode);
7860 	if (ret)
7861 		return ret;
7862 
7863 	hclge_cfg_mac_mode(hdev, en);
7864 
7865 	ret = hclge_mac_phy_link_status_wait(hdev, en, false);
7866 	if (ret)
7867 		dev_err(&hdev->pdev->dev,
7868 			"serdes loopback config mac mode timeout\n");
7869 
7870 	return ret;
7871 }
7872 
7873 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
7874 				     struct phy_device *phydev)
7875 {
7876 	int ret;
7877 
7878 	if (!phydev->suspended) {
7879 		ret = phy_suspend(phydev);
7880 		if (ret)
7881 			return ret;
7882 	}
7883 
7884 	ret = phy_resume(phydev);
7885 	if (ret)
7886 		return ret;
7887 
7888 	return phy_loopback(phydev, true);
7889 }
7890 
7891 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
7892 				      struct phy_device *phydev)
7893 {
7894 	int ret;
7895 
7896 	ret = phy_loopback(phydev, false);
7897 	if (ret)
7898 		return ret;
7899 
7900 	return phy_suspend(phydev);
7901 }
7902 
7903 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
7904 {
7905 	struct phy_device *phydev = hdev->hw.mac.phydev;
7906 	int ret;
7907 
7908 	if (!phydev) {
7909 		if (hnae3_dev_phy_imp_supported(hdev))
7910 			return hclge_set_common_loopback(hdev, en,
7911 							 HNAE3_LOOP_PHY);
7912 		return -ENOTSUPP;
7913 	}
7914 
7915 	if (en)
7916 		ret = hclge_enable_phy_loopback(hdev, phydev);
7917 	else
7918 		ret = hclge_disable_phy_loopback(hdev, phydev);
7919 	if (ret) {
7920 		dev_err(&hdev->pdev->dev,
7921 			"set phy loopback fail, ret = %d\n", ret);
7922 		return ret;
7923 	}
7924 
7925 	hclge_cfg_mac_mode(hdev, en);
7926 
7927 	ret = hclge_mac_phy_link_status_wait(hdev, en, true);
7928 	if (ret)
7929 		dev_err(&hdev->pdev->dev,
7930 			"phy loopback config mac mode timeout\n");
7931 
7932 	return ret;
7933 }
7934 
7935 static int hclge_tqp_enable_cmd_send(struct hclge_dev *hdev, u16 tqp_id,
7936 				     u16 stream_id, bool enable)
7937 {
7938 	struct hclge_desc desc;
7939 	struct hclge_cfg_com_tqp_queue_cmd *req =
7940 		(struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
7941 
7942 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
7943 	req->tqp_id = cpu_to_le16(tqp_id);
7944 	req->stream_id = cpu_to_le16(stream_id);
7945 	if (enable)
7946 		req->enable |= 1U << HCLGE_TQP_ENABLE_B;
7947 
7948 	return hclge_cmd_send(&hdev->hw, &desc, 1);
7949 }
7950 
7951 static int hclge_tqp_enable(struct hnae3_handle *handle, bool enable)
7952 {
7953 	struct hclge_vport *vport = hclge_get_vport(handle);
7954 	struct hclge_dev *hdev = vport->back;
7955 	int ret;
7956 	u16 i;
7957 
7958 	for (i = 0; i < handle->kinfo.num_tqps; i++) {
7959 		ret = hclge_tqp_enable_cmd_send(hdev, i, 0, enable);
7960 		if (ret)
7961 			return ret;
7962 	}
7963 	return 0;
7964 }
7965 
7966 static int hclge_set_loopback(struct hnae3_handle *handle,
7967 			      enum hnae3_loop loop_mode, bool en)
7968 {
7969 	struct hclge_vport *vport = hclge_get_vport(handle);
7970 	struct hclge_dev *hdev = vport->back;
7971 	int ret = 0;
7972 
7973 	/* Loopback can be enabled in three places: SSU, MAC, and serdes. By
7974 	 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
7975 	 * the same, the packets are looped back in the SSU. If SSU loopback
7976 	 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
7977 	 */
7978 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
7979 		u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
7980 
7981 		ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
7982 						HCLGE_SWITCH_ALW_LPBK_MASK);
7983 		if (ret)
7984 			return ret;
7985 	}
7986 
7987 	switch (loop_mode) {
7988 	case HNAE3_LOOP_APP:
7989 		ret = hclge_set_app_loopback(hdev, en);
7990 		break;
7991 	case HNAE3_LOOP_SERIAL_SERDES:
7992 	case HNAE3_LOOP_PARALLEL_SERDES:
7993 		ret = hclge_set_common_loopback(hdev, en, loop_mode);
7994 		break;
7995 	case HNAE3_LOOP_PHY:
7996 		ret = hclge_set_phy_loopback(hdev, en);
7997 		break;
7998 	case HNAE3_LOOP_EXTERNAL:
7999 		break;
8000 	default:
8001 		ret = -ENOTSUPP;
8002 		dev_err(&hdev->pdev->dev,
8003 			"loop_mode %d is not supported\n", loop_mode);
8004 		break;
8005 	}
8006 
8007 	if (ret)
8008 		return ret;
8009 
8010 	ret = hclge_tqp_enable(handle, en);
8011 	if (ret)
8012 		dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n",
8013 			en ? "enable" : "disable", ret);
8014 
8015 	return ret;
8016 }
8017 
8018 static int hclge_set_default_loopback(struct hclge_dev *hdev)
8019 {
8020 	int ret;
8021 
8022 	ret = hclge_set_app_loopback(hdev, false);
8023 	if (ret)
8024 		return ret;
8025 
8026 	ret = hclge_cfg_common_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
8027 	if (ret)
8028 		return ret;
8029 
8030 	return hclge_cfg_common_loopback(hdev, false,
8031 					 HNAE3_LOOP_PARALLEL_SERDES);
8032 }
8033 
8034 static void hclge_flush_link_update(struct hclge_dev *hdev)
8035 {
8036 #define HCLGE_FLUSH_LINK_TIMEOUT	100000
8037 
8038 	unsigned long last = hdev->serv_processed_cnt;
8039 	int i = 0;
8040 
8041 	while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
8042 	       i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
8043 	       last == hdev->serv_processed_cnt)
8044 		usleep_range(1, 1);
8045 }
8046 
8047 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
8048 {
8049 	struct hclge_vport *vport = hclge_get_vport(handle);
8050 	struct hclge_dev *hdev = vport->back;
8051 
8052 	if (enable) {
8053 		hclge_task_schedule(hdev, 0);
8054 	} else {
8055 		/* Set the DOWN flag here to disable link updating */
8056 		set_bit(HCLGE_STATE_DOWN, &hdev->state);
8057 
8058 		smp_mb__after_atomic(); /* flush memory to make sure DOWN is seen by service task */
8059 		hclge_flush_link_update(hdev);
8060 	}
8061 }
8062 
8063 static int hclge_ae_start(struct hnae3_handle *handle)
8064 {
8065 	struct hclge_vport *vport = hclge_get_vport(handle);
8066 	struct hclge_dev *hdev = vport->back;
8067 
8068 	/* mac enable */
8069 	hclge_cfg_mac_mode(hdev, true);
8070 	clear_bit(HCLGE_STATE_DOWN, &hdev->state);
8071 	hdev->hw.mac.link = 0;
8072 
8073 	/* reset tqp stats */
8074 	hclge_comm_reset_tqp_stats(handle);
8075 
8076 	hclge_mac_start_phy(hdev);
8077 
8078 	return 0;
8079 }
8080 
8081 static void hclge_ae_stop(struct hnae3_handle *handle)
8082 {
8083 	struct hclge_vport *vport = hclge_get_vport(handle);
8084 	struct hclge_dev *hdev = vport->back;
8085 
8086 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
8087 	spin_lock_bh(&hdev->fd_rule_lock);
8088 	hclge_clear_arfs_rules(hdev);
8089 	spin_unlock_bh(&hdev->fd_rule_lock);
8090 
8091 	/* If it is not PF reset or FLR, the firmware will disable the MAC,
8092 	 * so it only need to stop phy here.
8093 	 */
8094 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) {
8095 		hclge_pfc_pause_en_cfg(hdev, HCLGE_PFC_TX_RX_DISABLE,
8096 				       HCLGE_PFC_DISABLE);
8097 		if (hdev->reset_type != HNAE3_FUNC_RESET &&
8098 		    hdev->reset_type != HNAE3_FLR_RESET) {
8099 			hclge_mac_stop_phy(hdev);
8100 			hclge_update_link_status(hdev);
8101 			return;
8102 		}
8103 	}
8104 
8105 	hclge_reset_tqp(handle);
8106 
8107 	hclge_config_mac_tnl_int(hdev, false);
8108 
8109 	/* Mac disable */
8110 	hclge_cfg_mac_mode(hdev, false);
8111 
8112 	hclge_mac_stop_phy(hdev);
8113 
8114 	/* reset tqp stats */
8115 	hclge_comm_reset_tqp_stats(handle);
8116 	hclge_update_link_status(hdev);
8117 }
8118 
8119 int hclge_vport_start(struct hclge_vport *vport)
8120 {
8121 	struct hclge_dev *hdev = vport->back;
8122 
8123 	set_bit(HCLGE_VPORT_STATE_INITED, &vport->state);
8124 	set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8125 	set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
8126 	vport->last_active_jiffies = jiffies;
8127 	vport->need_notify = 0;
8128 
8129 	if (test_bit(vport->vport_id, hdev->vport_config_block)) {
8130 		if (vport->vport_id) {
8131 			hclge_restore_mac_table_common(vport);
8132 			hclge_restore_vport_vlan_table(vport);
8133 		} else {
8134 			hclge_restore_hw_table(hdev);
8135 		}
8136 	}
8137 
8138 	clear_bit(vport->vport_id, hdev->vport_config_block);
8139 
8140 	return 0;
8141 }
8142 
8143 void hclge_vport_stop(struct hclge_vport *vport)
8144 {
8145 	clear_bit(HCLGE_VPORT_STATE_INITED, &vport->state);
8146 	clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8147 	vport->need_notify = 0;
8148 }
8149 
8150 static int hclge_client_start(struct hnae3_handle *handle)
8151 {
8152 	struct hclge_vport *vport = hclge_get_vport(handle);
8153 
8154 	return hclge_vport_start(vport);
8155 }
8156 
8157 static void hclge_client_stop(struct hnae3_handle *handle)
8158 {
8159 	struct hclge_vport *vport = hclge_get_vport(handle);
8160 
8161 	hclge_vport_stop(vport);
8162 }
8163 
8164 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
8165 					 u16 cmdq_resp, u8  resp_code,
8166 					 enum hclge_mac_vlan_tbl_opcode op)
8167 {
8168 	struct hclge_dev *hdev = vport->back;
8169 
8170 	if (cmdq_resp) {
8171 		dev_err(&hdev->pdev->dev,
8172 			"cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
8173 			cmdq_resp);
8174 		return -EIO;
8175 	}
8176 
8177 	if (op == HCLGE_MAC_VLAN_ADD) {
8178 		if (!resp_code || resp_code == 1)
8179 			return 0;
8180 		else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
8181 			 resp_code == HCLGE_ADD_MC_OVERFLOW)
8182 			return -ENOSPC;
8183 
8184 		dev_err(&hdev->pdev->dev,
8185 			"add mac addr failed for undefined, code=%u.\n",
8186 			resp_code);
8187 		return -EIO;
8188 	} else if (op == HCLGE_MAC_VLAN_REMOVE) {
8189 		if (!resp_code) {
8190 			return 0;
8191 		} else if (resp_code == 1) {
8192 			dev_dbg(&hdev->pdev->dev,
8193 				"remove mac addr failed for miss.\n");
8194 			return -ENOENT;
8195 		}
8196 
8197 		dev_err(&hdev->pdev->dev,
8198 			"remove mac addr failed for undefined, code=%u.\n",
8199 			resp_code);
8200 		return -EIO;
8201 	} else if (op == HCLGE_MAC_VLAN_LKUP) {
8202 		if (!resp_code) {
8203 			return 0;
8204 		} else if (resp_code == 1) {
8205 			dev_dbg(&hdev->pdev->dev,
8206 				"lookup mac addr failed for miss.\n");
8207 			return -ENOENT;
8208 		}
8209 
8210 		dev_err(&hdev->pdev->dev,
8211 			"lookup mac addr failed for undefined, code=%u.\n",
8212 			resp_code);
8213 		return -EIO;
8214 	}
8215 
8216 	dev_err(&hdev->pdev->dev,
8217 		"unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
8218 
8219 	return -EINVAL;
8220 }
8221 
8222 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
8223 {
8224 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
8225 
8226 	unsigned int word_num;
8227 	unsigned int bit_num;
8228 
8229 	if (vfid > 255 || vfid < 0)
8230 		return -EIO;
8231 
8232 	if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
8233 		word_num = vfid / 32;
8234 		bit_num  = vfid % 32;
8235 		if (clr)
8236 			desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8237 		else
8238 			desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
8239 	} else {
8240 		word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
8241 		bit_num  = vfid % 32;
8242 		if (clr)
8243 			desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8244 		else
8245 			desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
8246 	}
8247 
8248 	return 0;
8249 }
8250 
8251 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
8252 {
8253 #define HCLGE_DESC_NUMBER 3
8254 #define HCLGE_FUNC_NUMBER_PER_DESC 6
8255 	int i, j;
8256 
8257 	for (i = 1; i < HCLGE_DESC_NUMBER; i++)
8258 		for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
8259 			if (desc[i].data[j])
8260 				return false;
8261 
8262 	return true;
8263 }
8264 
8265 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
8266 				   const u8 *addr, bool is_mc)
8267 {
8268 	const unsigned char *mac_addr = addr;
8269 	u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
8270 		       (mac_addr[0]) | (mac_addr[1] << 8);
8271 	u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
8272 
8273 	hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8274 	if (is_mc) {
8275 		hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
8276 		hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8277 	}
8278 
8279 	new_req->mac_addr_hi32 = cpu_to_le32(high_val);
8280 	new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
8281 }
8282 
8283 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
8284 				     struct hclge_mac_vlan_tbl_entry_cmd *req)
8285 {
8286 	struct hclge_dev *hdev = vport->back;
8287 	struct hclge_desc desc;
8288 	u8 resp_code;
8289 	u16 retval;
8290 	int ret;
8291 
8292 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
8293 
8294 	memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8295 
8296 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8297 	if (ret) {
8298 		dev_err(&hdev->pdev->dev,
8299 			"del mac addr failed for cmd_send, ret =%d.\n",
8300 			ret);
8301 		return ret;
8302 	}
8303 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8304 	retval = le16_to_cpu(desc.retval);
8305 
8306 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8307 					     HCLGE_MAC_VLAN_REMOVE);
8308 }
8309 
8310 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
8311 				     struct hclge_mac_vlan_tbl_entry_cmd *req,
8312 				     struct hclge_desc *desc,
8313 				     bool is_mc)
8314 {
8315 	struct hclge_dev *hdev = vport->back;
8316 	u8 resp_code;
8317 	u16 retval;
8318 	int ret;
8319 
8320 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
8321 	if (is_mc) {
8322 		desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
8323 		memcpy(desc[0].data,
8324 		       req,
8325 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8326 		hclge_cmd_setup_basic_desc(&desc[1],
8327 					   HCLGE_OPC_MAC_VLAN_ADD,
8328 					   true);
8329 		desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
8330 		hclge_cmd_setup_basic_desc(&desc[2],
8331 					   HCLGE_OPC_MAC_VLAN_ADD,
8332 					   true);
8333 		ret = hclge_cmd_send(&hdev->hw, desc, 3);
8334 	} else {
8335 		memcpy(desc[0].data,
8336 		       req,
8337 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8338 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
8339 	}
8340 	if (ret) {
8341 		dev_err(&hdev->pdev->dev,
8342 			"lookup mac addr failed for cmd_send, ret =%d.\n",
8343 			ret);
8344 		return ret;
8345 	}
8346 	resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
8347 	retval = le16_to_cpu(desc[0].retval);
8348 
8349 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8350 					     HCLGE_MAC_VLAN_LKUP);
8351 }
8352 
8353 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
8354 				  struct hclge_mac_vlan_tbl_entry_cmd *req,
8355 				  struct hclge_desc *mc_desc)
8356 {
8357 	struct hclge_dev *hdev = vport->back;
8358 	int cfg_status;
8359 	u8 resp_code;
8360 	u16 retval;
8361 	int ret;
8362 
8363 	if (!mc_desc) {
8364 		struct hclge_desc desc;
8365 
8366 		hclge_cmd_setup_basic_desc(&desc,
8367 					   HCLGE_OPC_MAC_VLAN_ADD,
8368 					   false);
8369 		memcpy(desc.data, req,
8370 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8371 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8372 		resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8373 		retval = le16_to_cpu(desc.retval);
8374 
8375 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8376 							   resp_code,
8377 							   HCLGE_MAC_VLAN_ADD);
8378 	} else {
8379 		hclge_comm_cmd_reuse_desc(&mc_desc[0], false);
8380 		mc_desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
8381 		hclge_comm_cmd_reuse_desc(&mc_desc[1], false);
8382 		mc_desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
8383 		hclge_comm_cmd_reuse_desc(&mc_desc[2], false);
8384 		mc_desc[2].flag &= cpu_to_le16(~HCLGE_COMM_CMD_FLAG_NEXT);
8385 		memcpy(mc_desc[0].data, req,
8386 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8387 		ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
8388 		resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
8389 		retval = le16_to_cpu(mc_desc[0].retval);
8390 
8391 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8392 							   resp_code,
8393 							   HCLGE_MAC_VLAN_ADD);
8394 	}
8395 
8396 	if (ret) {
8397 		dev_err(&hdev->pdev->dev,
8398 			"add mac addr failed for cmd_send, ret =%d.\n",
8399 			ret);
8400 		return ret;
8401 	}
8402 
8403 	return cfg_status;
8404 }
8405 
8406 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
8407 			       u16 *allocated_size)
8408 {
8409 	struct hclge_umv_spc_alc_cmd *req;
8410 	struct hclge_desc desc;
8411 	int ret;
8412 
8413 	req = (struct hclge_umv_spc_alc_cmd *)desc.data;
8414 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
8415 
8416 	req->space_size = cpu_to_le32(space_size);
8417 
8418 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8419 	if (ret) {
8420 		dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
8421 			ret);
8422 		return ret;
8423 	}
8424 
8425 	*allocated_size = le32_to_cpu(desc.data[1]);
8426 
8427 	return 0;
8428 }
8429 
8430 static int hclge_init_umv_space(struct hclge_dev *hdev)
8431 {
8432 	u16 allocated_size = 0;
8433 	int ret;
8434 
8435 	ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
8436 	if (ret)
8437 		return ret;
8438 
8439 	if (allocated_size < hdev->wanted_umv_size)
8440 		dev_warn(&hdev->pdev->dev,
8441 			 "failed to alloc umv space, want %u, get %u\n",
8442 			 hdev->wanted_umv_size, allocated_size);
8443 
8444 	hdev->max_umv_size = allocated_size;
8445 	hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
8446 	hdev->share_umv_size = hdev->priv_umv_size +
8447 			hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8448 
8449 	if (hdev->ae_dev->dev_specs.mc_mac_size)
8450 		set_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, hdev->ae_dev->caps);
8451 
8452 	return 0;
8453 }
8454 
8455 static void hclge_reset_umv_space(struct hclge_dev *hdev)
8456 {
8457 	struct hclge_vport *vport;
8458 	int i;
8459 
8460 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8461 		vport = &hdev->vport[i];
8462 		vport->used_umv_num = 0;
8463 	}
8464 
8465 	mutex_lock(&hdev->vport_lock);
8466 	hdev->share_umv_size = hdev->priv_umv_size +
8467 			hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8468 	mutex_unlock(&hdev->vport_lock);
8469 
8470 	hdev->used_mc_mac_num = 0;
8471 }
8472 
8473 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
8474 {
8475 	struct hclge_dev *hdev = vport->back;
8476 	bool is_full;
8477 
8478 	if (need_lock)
8479 		mutex_lock(&hdev->vport_lock);
8480 
8481 	is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
8482 		   hdev->share_umv_size == 0);
8483 
8484 	if (need_lock)
8485 		mutex_unlock(&hdev->vport_lock);
8486 
8487 	return is_full;
8488 }
8489 
8490 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
8491 {
8492 	struct hclge_dev *hdev = vport->back;
8493 
8494 	if (is_free) {
8495 		if (vport->used_umv_num > hdev->priv_umv_size)
8496 			hdev->share_umv_size++;
8497 
8498 		if (vport->used_umv_num > 0)
8499 			vport->used_umv_num--;
8500 	} else {
8501 		if (vport->used_umv_num >= hdev->priv_umv_size &&
8502 		    hdev->share_umv_size > 0)
8503 			hdev->share_umv_size--;
8504 		vport->used_umv_num++;
8505 	}
8506 }
8507 
8508 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
8509 						  const u8 *mac_addr)
8510 {
8511 	struct hclge_mac_node *mac_node, *tmp;
8512 
8513 	list_for_each_entry_safe(mac_node, tmp, list, node)
8514 		if (ether_addr_equal(mac_addr, mac_node->mac_addr))
8515 			return mac_node;
8516 
8517 	return NULL;
8518 }
8519 
8520 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
8521 				  enum HCLGE_MAC_NODE_STATE state)
8522 {
8523 	switch (state) {
8524 	/* from set_rx_mode or tmp_add_list */
8525 	case HCLGE_MAC_TO_ADD:
8526 		if (mac_node->state == HCLGE_MAC_TO_DEL)
8527 			mac_node->state = HCLGE_MAC_ACTIVE;
8528 		break;
8529 	/* only from set_rx_mode */
8530 	case HCLGE_MAC_TO_DEL:
8531 		if (mac_node->state == HCLGE_MAC_TO_ADD) {
8532 			list_del(&mac_node->node);
8533 			kfree(mac_node);
8534 		} else {
8535 			mac_node->state = HCLGE_MAC_TO_DEL;
8536 		}
8537 		break;
8538 	/* only from tmp_add_list, the mac_node->state won't be
8539 	 * ACTIVE.
8540 	 */
8541 	case HCLGE_MAC_ACTIVE:
8542 		if (mac_node->state == HCLGE_MAC_TO_ADD)
8543 			mac_node->state = HCLGE_MAC_ACTIVE;
8544 
8545 		break;
8546 	}
8547 }
8548 
8549 int hclge_update_mac_list(struct hclge_vport *vport,
8550 			  enum HCLGE_MAC_NODE_STATE state,
8551 			  enum HCLGE_MAC_ADDR_TYPE mac_type,
8552 			  const unsigned char *addr)
8553 {
8554 	char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
8555 	struct hclge_dev *hdev = vport->back;
8556 	struct hclge_mac_node *mac_node;
8557 	struct list_head *list;
8558 
8559 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8560 		&vport->uc_mac_list : &vport->mc_mac_list;
8561 
8562 	spin_lock_bh(&vport->mac_list_lock);
8563 
8564 	/* if the mac addr is already in the mac list, no need to add a new
8565 	 * one into it, just check the mac addr state, convert it to a new
8566 	 * state, or just remove it, or do nothing.
8567 	 */
8568 	mac_node = hclge_find_mac_node(list, addr);
8569 	if (mac_node) {
8570 		hclge_update_mac_node(mac_node, state);
8571 		spin_unlock_bh(&vport->mac_list_lock);
8572 		set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8573 		return 0;
8574 	}
8575 
8576 	/* if this address is never added, unnecessary to delete */
8577 	if (state == HCLGE_MAC_TO_DEL) {
8578 		spin_unlock_bh(&vport->mac_list_lock);
8579 		hnae3_format_mac_addr(format_mac_addr, addr);
8580 		dev_err(&hdev->pdev->dev,
8581 			"failed to delete address %s from mac list\n",
8582 			format_mac_addr);
8583 		return -ENOENT;
8584 	}
8585 
8586 	mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
8587 	if (!mac_node) {
8588 		spin_unlock_bh(&vport->mac_list_lock);
8589 		return -ENOMEM;
8590 	}
8591 
8592 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8593 
8594 	mac_node->state = state;
8595 	ether_addr_copy(mac_node->mac_addr, addr);
8596 	list_add_tail(&mac_node->node, list);
8597 
8598 	spin_unlock_bh(&vport->mac_list_lock);
8599 
8600 	return 0;
8601 }
8602 
8603 static int hclge_add_uc_addr(struct hnae3_handle *handle,
8604 			     const unsigned char *addr)
8605 {
8606 	struct hclge_vport *vport = hclge_get_vport(handle);
8607 
8608 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
8609 				     addr);
8610 }
8611 
8612 int hclge_add_uc_addr_common(struct hclge_vport *vport,
8613 			     const unsigned char *addr)
8614 {
8615 	char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
8616 	struct hclge_dev *hdev = vport->back;
8617 	struct hclge_mac_vlan_tbl_entry_cmd req;
8618 	struct hclge_desc desc;
8619 	u16 egress_port = 0;
8620 	int ret;
8621 
8622 	/* mac addr check */
8623 	if (is_zero_ether_addr(addr) ||
8624 	    is_broadcast_ether_addr(addr) ||
8625 	    is_multicast_ether_addr(addr)) {
8626 		hnae3_format_mac_addr(format_mac_addr, addr);
8627 		dev_err(&hdev->pdev->dev,
8628 			"Set_uc mac err! invalid mac:%s. is_zero:%d,is_br=%d,is_mul=%d\n",
8629 			 format_mac_addr, is_zero_ether_addr(addr),
8630 			 is_broadcast_ether_addr(addr),
8631 			 is_multicast_ether_addr(addr));
8632 		return -EINVAL;
8633 	}
8634 
8635 	memset(&req, 0, sizeof(req));
8636 
8637 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8638 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8639 
8640 	req.egress_port = cpu_to_le16(egress_port);
8641 
8642 	hclge_prepare_mac_addr(&req, addr, false);
8643 
8644 	/* Lookup the mac address in the mac_vlan table, and add
8645 	 * it if the entry is inexistent. Repeated unicast entry
8646 	 * is not allowed in the mac vlan table.
8647 	 */
8648 	ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
8649 	if (ret == -ENOENT) {
8650 		mutex_lock(&hdev->vport_lock);
8651 		if (!hclge_is_umv_space_full(vport, false)) {
8652 			ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
8653 			if (!ret)
8654 				hclge_update_umv_space(vport, false);
8655 			mutex_unlock(&hdev->vport_lock);
8656 			return ret;
8657 		}
8658 		mutex_unlock(&hdev->vport_lock);
8659 
8660 		if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
8661 			dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
8662 				hdev->priv_umv_size);
8663 
8664 		return -ENOSPC;
8665 	}
8666 
8667 	/* check if we just hit the duplicate */
8668 	if (!ret)
8669 		return -EEXIST;
8670 
8671 	return ret;
8672 }
8673 
8674 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
8675 			    const unsigned char *addr)
8676 {
8677 	struct hclge_vport *vport = hclge_get_vport(handle);
8678 
8679 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
8680 				     addr);
8681 }
8682 
8683 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
8684 			    const unsigned char *addr)
8685 {
8686 	char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
8687 	struct hclge_dev *hdev = vport->back;
8688 	struct hclge_mac_vlan_tbl_entry_cmd req;
8689 	int ret;
8690 
8691 	/* mac addr check */
8692 	if (is_zero_ether_addr(addr) ||
8693 	    is_broadcast_ether_addr(addr) ||
8694 	    is_multicast_ether_addr(addr)) {
8695 		hnae3_format_mac_addr(format_mac_addr, addr);
8696 		dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%s.\n",
8697 			format_mac_addr);
8698 		return -EINVAL;
8699 	}
8700 
8701 	memset(&req, 0, sizeof(req));
8702 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
8703 	hclge_prepare_mac_addr(&req, addr, false);
8704 	ret = hclge_remove_mac_vlan_tbl(vport, &req);
8705 	if (!ret || ret == -ENOENT) {
8706 		mutex_lock(&hdev->vport_lock);
8707 		hclge_update_umv_space(vport, true);
8708 		mutex_unlock(&hdev->vport_lock);
8709 		return 0;
8710 	}
8711 
8712 	return ret;
8713 }
8714 
8715 static int hclge_add_mc_addr(struct hnae3_handle *handle,
8716 			     const unsigned char *addr)
8717 {
8718 	struct hclge_vport *vport = hclge_get_vport(handle);
8719 
8720 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
8721 				     addr);
8722 }
8723 
8724 int hclge_add_mc_addr_common(struct hclge_vport *vport,
8725 			     const unsigned char *addr)
8726 {
8727 	char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
8728 	struct hclge_dev *hdev = vport->back;
8729 	struct hclge_mac_vlan_tbl_entry_cmd req;
8730 	struct hclge_desc desc[3];
8731 	bool is_new_addr = false;
8732 	int status;
8733 
8734 	/* mac addr check */
8735 	if (!is_multicast_ether_addr(addr)) {
8736 		hnae3_format_mac_addr(format_mac_addr, addr);
8737 		dev_err(&hdev->pdev->dev,
8738 			"Add mc mac err! invalid mac:%s.\n",
8739 			 format_mac_addr);
8740 		return -EINVAL;
8741 	}
8742 	memset(&req, 0, sizeof(req));
8743 	hclge_prepare_mac_addr(&req, addr, true);
8744 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8745 	if (status) {
8746 		if (hnae3_ae_dev_mc_mac_mng_supported(hdev->ae_dev) &&
8747 		    hdev->used_mc_mac_num >=
8748 		    hdev->ae_dev->dev_specs.mc_mac_size)
8749 			goto err_no_space;
8750 
8751 		is_new_addr = true;
8752 
8753 		/* This mac addr do not exist, add new entry for it */
8754 		memset(desc[0].data, 0, sizeof(desc[0].data));
8755 		memset(desc[1].data, 0, sizeof(desc[0].data));
8756 		memset(desc[2].data, 0, sizeof(desc[0].data));
8757 	}
8758 	status = hclge_update_desc_vfid(desc, vport->vport_id, false);
8759 	if (status)
8760 		return status;
8761 	status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8762 	if (status == -ENOSPC)
8763 		goto err_no_space;
8764 	else if (!status && is_new_addr)
8765 		hdev->used_mc_mac_num++;
8766 
8767 	return status;
8768 
8769 err_no_space:
8770 	/* if already overflow, not to print each time */
8771 	if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE)) {
8772 		vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
8773 		dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
8774 	}
8775 
8776 	return -ENOSPC;
8777 }
8778 
8779 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
8780 			    const unsigned char *addr)
8781 {
8782 	struct hclge_vport *vport = hclge_get_vport(handle);
8783 
8784 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
8785 				     addr);
8786 }
8787 
8788 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
8789 			    const unsigned char *addr)
8790 {
8791 	char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
8792 	struct hclge_dev *hdev = vport->back;
8793 	struct hclge_mac_vlan_tbl_entry_cmd req;
8794 	enum hclge_comm_cmd_status status;
8795 	struct hclge_desc desc[3];
8796 
8797 	/* mac addr check */
8798 	if (!is_multicast_ether_addr(addr)) {
8799 		hnae3_format_mac_addr(format_mac_addr, addr);
8800 		dev_dbg(&hdev->pdev->dev,
8801 			"Remove mc mac err! invalid mac:%s.\n",
8802 			 format_mac_addr);
8803 		return -EINVAL;
8804 	}
8805 
8806 	memset(&req, 0, sizeof(req));
8807 	hclge_prepare_mac_addr(&req, addr, true);
8808 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8809 	if (!status) {
8810 		/* This mac addr exist, remove this handle's VFID for it */
8811 		status = hclge_update_desc_vfid(desc, vport->vport_id, true);
8812 		if (status)
8813 			return status;
8814 
8815 		if (hclge_is_all_function_id_zero(desc)) {
8816 			/* All the vfid is zero, so need to delete this entry */
8817 			status = hclge_remove_mac_vlan_tbl(vport, &req);
8818 			if (!status)
8819 				hdev->used_mc_mac_num--;
8820 		} else {
8821 			/* Not all the vfid is zero, update the vfid */
8822 			status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8823 		}
8824 	} else if (status == -ENOENT) {
8825 		status = 0;
8826 	}
8827 
8828 	return status;
8829 }
8830 
8831 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
8832 				      struct list_head *list,
8833 				      enum HCLGE_MAC_ADDR_TYPE mac_type)
8834 {
8835 	int (*sync)(struct hclge_vport *vport, const unsigned char *addr);
8836 	struct hclge_mac_node *mac_node, *tmp;
8837 	int ret;
8838 
8839 	if (mac_type == HCLGE_MAC_ADDR_UC)
8840 		sync = hclge_add_uc_addr_common;
8841 	else
8842 		sync = hclge_add_mc_addr_common;
8843 
8844 	list_for_each_entry_safe(mac_node, tmp, list, node) {
8845 		ret = sync(vport, mac_node->mac_addr);
8846 		if (!ret) {
8847 			mac_node->state = HCLGE_MAC_ACTIVE;
8848 		} else {
8849 			set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8850 				&vport->state);
8851 
8852 			/* If one unicast mac address is existing in hardware,
8853 			 * we need to try whether other unicast mac addresses
8854 			 * are new addresses that can be added.
8855 			 * Multicast mac address can be reusable, even though
8856 			 * there is no space to add new multicast mac address,
8857 			 * we should check whether other mac addresses are
8858 			 * existing in hardware for reuse.
8859 			 */
8860 			if ((mac_type == HCLGE_MAC_ADDR_UC && ret != -EEXIST) ||
8861 			    (mac_type == HCLGE_MAC_ADDR_MC && ret != -ENOSPC))
8862 				break;
8863 		}
8864 	}
8865 }
8866 
8867 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
8868 					struct list_head *list,
8869 					enum HCLGE_MAC_ADDR_TYPE mac_type)
8870 {
8871 	int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
8872 	struct hclge_mac_node *mac_node, *tmp;
8873 	int ret;
8874 
8875 	if (mac_type == HCLGE_MAC_ADDR_UC)
8876 		unsync = hclge_rm_uc_addr_common;
8877 	else
8878 		unsync = hclge_rm_mc_addr_common;
8879 
8880 	list_for_each_entry_safe(mac_node, tmp, list, node) {
8881 		ret = unsync(vport, mac_node->mac_addr);
8882 		if (!ret || ret == -ENOENT) {
8883 			list_del(&mac_node->node);
8884 			kfree(mac_node);
8885 		} else {
8886 			set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8887 				&vport->state);
8888 			break;
8889 		}
8890 	}
8891 }
8892 
8893 static bool hclge_sync_from_add_list(struct list_head *add_list,
8894 				     struct list_head *mac_list)
8895 {
8896 	struct hclge_mac_node *mac_node, *tmp, *new_node;
8897 	bool all_added = true;
8898 
8899 	list_for_each_entry_safe(mac_node, tmp, add_list, node) {
8900 		if (mac_node->state == HCLGE_MAC_TO_ADD)
8901 			all_added = false;
8902 
8903 		/* if the mac address from tmp_add_list is not in the
8904 		 * uc/mc_mac_list, it means have received a TO_DEL request
8905 		 * during the time window of adding the mac address into mac
8906 		 * table. if mac_node state is ACTIVE, then change it to TO_DEL,
8907 		 * then it will be removed at next time. else it must be TO_ADD,
8908 		 * this address hasn't been added into mac table,
8909 		 * so just remove the mac node.
8910 		 */
8911 		new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8912 		if (new_node) {
8913 			hclge_update_mac_node(new_node, mac_node->state);
8914 			list_del(&mac_node->node);
8915 			kfree(mac_node);
8916 		} else if (mac_node->state == HCLGE_MAC_ACTIVE) {
8917 			mac_node->state = HCLGE_MAC_TO_DEL;
8918 			list_move_tail(&mac_node->node, mac_list);
8919 		} else {
8920 			list_del(&mac_node->node);
8921 			kfree(mac_node);
8922 		}
8923 	}
8924 
8925 	return all_added;
8926 }
8927 
8928 static void hclge_sync_from_del_list(struct list_head *del_list,
8929 				     struct list_head *mac_list)
8930 {
8931 	struct hclge_mac_node *mac_node, *tmp, *new_node;
8932 
8933 	list_for_each_entry_safe(mac_node, tmp, del_list, node) {
8934 		new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8935 		if (new_node) {
8936 			/* If the mac addr exists in the mac list, it means
8937 			 * received a new TO_ADD request during the time window
8938 			 * of configuring the mac address. For the mac node
8939 			 * state is TO_ADD, and the address is already in the
8940 			 * in the hardware(due to delete fail), so we just need
8941 			 * to change the mac node state to ACTIVE.
8942 			 */
8943 			new_node->state = HCLGE_MAC_ACTIVE;
8944 			list_del(&mac_node->node);
8945 			kfree(mac_node);
8946 		} else {
8947 			list_move_tail(&mac_node->node, mac_list);
8948 		}
8949 	}
8950 }
8951 
8952 static void hclge_update_overflow_flags(struct hclge_vport *vport,
8953 					enum HCLGE_MAC_ADDR_TYPE mac_type,
8954 					bool is_all_added)
8955 {
8956 	if (mac_type == HCLGE_MAC_ADDR_UC) {
8957 		if (is_all_added)
8958 			vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
8959 		else if (hclge_is_umv_space_full(vport, true))
8960 			vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
8961 	} else {
8962 		if (is_all_added)
8963 			vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
8964 		else
8965 			vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
8966 	}
8967 }
8968 
8969 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
8970 				       enum HCLGE_MAC_ADDR_TYPE mac_type)
8971 {
8972 	struct hclge_mac_node *mac_node, *tmp, *new_node;
8973 	struct list_head tmp_add_list, tmp_del_list;
8974 	struct list_head *list;
8975 	bool all_added;
8976 
8977 	INIT_LIST_HEAD(&tmp_add_list);
8978 	INIT_LIST_HEAD(&tmp_del_list);
8979 
8980 	/* move the mac addr to the tmp_add_list and tmp_del_list, then
8981 	 * we can add/delete these mac addr outside the spin lock
8982 	 */
8983 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8984 		&vport->uc_mac_list : &vport->mc_mac_list;
8985 
8986 	spin_lock_bh(&vport->mac_list_lock);
8987 
8988 	list_for_each_entry_safe(mac_node, tmp, list, node) {
8989 		switch (mac_node->state) {
8990 		case HCLGE_MAC_TO_DEL:
8991 			list_move_tail(&mac_node->node, &tmp_del_list);
8992 			break;
8993 		case HCLGE_MAC_TO_ADD:
8994 			new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8995 			if (!new_node)
8996 				goto stop_traverse;
8997 			ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
8998 			new_node->state = mac_node->state;
8999 			list_add_tail(&new_node->node, &tmp_add_list);
9000 			break;
9001 		default:
9002 			break;
9003 		}
9004 	}
9005 
9006 stop_traverse:
9007 	spin_unlock_bh(&vport->mac_list_lock);
9008 
9009 	/* delete first, in order to get max mac table space for adding */
9010 	hclge_unsync_vport_mac_list(vport, &tmp_del_list, mac_type);
9011 	hclge_sync_vport_mac_list(vport, &tmp_add_list, mac_type);
9012 
9013 	/* if some mac addresses were added/deleted fail, move back to the
9014 	 * mac_list, and retry at next time.
9015 	 */
9016 	spin_lock_bh(&vport->mac_list_lock);
9017 
9018 	hclge_sync_from_del_list(&tmp_del_list, list);
9019 	all_added = hclge_sync_from_add_list(&tmp_add_list, list);
9020 
9021 	spin_unlock_bh(&vport->mac_list_lock);
9022 
9023 	hclge_update_overflow_flags(vport, mac_type, all_added);
9024 }
9025 
9026 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
9027 {
9028 	struct hclge_dev *hdev = vport->back;
9029 
9030 	if (test_bit(vport->vport_id, hdev->vport_config_block))
9031 		return false;
9032 
9033 	if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
9034 		return true;
9035 
9036 	return false;
9037 }
9038 
9039 static void hclge_sync_mac_table(struct hclge_dev *hdev)
9040 {
9041 	int i;
9042 
9043 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9044 		struct hclge_vport *vport = &hdev->vport[i];
9045 
9046 		if (!hclge_need_sync_mac_table(vport))
9047 			continue;
9048 
9049 		hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
9050 		hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
9051 	}
9052 }
9053 
9054 static void hclge_build_del_list(struct list_head *list,
9055 				 bool is_del_list,
9056 				 struct list_head *tmp_del_list)
9057 {
9058 	struct hclge_mac_node *mac_cfg, *tmp;
9059 
9060 	list_for_each_entry_safe(mac_cfg, tmp, list, node) {
9061 		switch (mac_cfg->state) {
9062 		case HCLGE_MAC_TO_DEL:
9063 		case HCLGE_MAC_ACTIVE:
9064 			list_move_tail(&mac_cfg->node, tmp_del_list);
9065 			break;
9066 		case HCLGE_MAC_TO_ADD:
9067 			if (is_del_list) {
9068 				list_del(&mac_cfg->node);
9069 				kfree(mac_cfg);
9070 			}
9071 			break;
9072 		}
9073 	}
9074 }
9075 
9076 static void hclge_unsync_del_list(struct hclge_vport *vport,
9077 				  int (*unsync)(struct hclge_vport *vport,
9078 						const unsigned char *addr),
9079 				  bool is_del_list,
9080 				  struct list_head *tmp_del_list)
9081 {
9082 	struct hclge_mac_node *mac_cfg, *tmp;
9083 	int ret;
9084 
9085 	list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) {
9086 		ret = unsync(vport, mac_cfg->mac_addr);
9087 		if (!ret || ret == -ENOENT) {
9088 			/* clear all mac addr from hardware, but remain these
9089 			 * mac addr in the mac list, and restore them after
9090 			 * vf reset finished.
9091 			 */
9092 			if (!is_del_list &&
9093 			    mac_cfg->state == HCLGE_MAC_ACTIVE) {
9094 				mac_cfg->state = HCLGE_MAC_TO_ADD;
9095 			} else {
9096 				list_del(&mac_cfg->node);
9097 				kfree(mac_cfg);
9098 			}
9099 		} else if (is_del_list) {
9100 			mac_cfg->state = HCLGE_MAC_TO_DEL;
9101 		}
9102 	}
9103 }
9104 
9105 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
9106 				  enum HCLGE_MAC_ADDR_TYPE mac_type)
9107 {
9108 	int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
9109 	struct hclge_dev *hdev = vport->back;
9110 	struct list_head tmp_del_list, *list;
9111 
9112 	if (mac_type == HCLGE_MAC_ADDR_UC) {
9113 		list = &vport->uc_mac_list;
9114 		unsync = hclge_rm_uc_addr_common;
9115 	} else {
9116 		list = &vport->mc_mac_list;
9117 		unsync = hclge_rm_mc_addr_common;
9118 	}
9119 
9120 	INIT_LIST_HEAD(&tmp_del_list);
9121 
9122 	if (!is_del_list)
9123 		set_bit(vport->vport_id, hdev->vport_config_block);
9124 
9125 	spin_lock_bh(&vport->mac_list_lock);
9126 
9127 	hclge_build_del_list(list, is_del_list, &tmp_del_list);
9128 
9129 	spin_unlock_bh(&vport->mac_list_lock);
9130 
9131 	hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list);
9132 
9133 	spin_lock_bh(&vport->mac_list_lock);
9134 
9135 	hclge_sync_from_del_list(&tmp_del_list, list);
9136 
9137 	spin_unlock_bh(&vport->mac_list_lock);
9138 }
9139 
9140 /* remove all mac address when uninitailize */
9141 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
9142 					enum HCLGE_MAC_ADDR_TYPE mac_type)
9143 {
9144 	struct hclge_mac_node *mac_node, *tmp;
9145 	struct hclge_dev *hdev = vport->back;
9146 	struct list_head tmp_del_list, *list;
9147 
9148 	INIT_LIST_HEAD(&tmp_del_list);
9149 
9150 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
9151 		&vport->uc_mac_list : &vport->mc_mac_list;
9152 
9153 	spin_lock_bh(&vport->mac_list_lock);
9154 
9155 	list_for_each_entry_safe(mac_node, tmp, list, node) {
9156 		switch (mac_node->state) {
9157 		case HCLGE_MAC_TO_DEL:
9158 		case HCLGE_MAC_ACTIVE:
9159 			list_move_tail(&mac_node->node, &tmp_del_list);
9160 			break;
9161 		case HCLGE_MAC_TO_ADD:
9162 			list_del(&mac_node->node);
9163 			kfree(mac_node);
9164 			break;
9165 		}
9166 	}
9167 
9168 	spin_unlock_bh(&vport->mac_list_lock);
9169 
9170 	hclge_unsync_vport_mac_list(vport, &tmp_del_list, mac_type);
9171 
9172 	if (!list_empty(&tmp_del_list))
9173 		dev_warn(&hdev->pdev->dev,
9174 			 "uninit %s mac list for vport %u not completely.\n",
9175 			 mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
9176 			 vport->vport_id);
9177 
9178 	list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
9179 		list_del(&mac_node->node);
9180 		kfree(mac_node);
9181 	}
9182 }
9183 
9184 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
9185 {
9186 	struct hclge_vport *vport;
9187 	int i;
9188 
9189 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9190 		vport = &hdev->vport[i];
9191 		hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
9192 		hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
9193 	}
9194 }
9195 
9196 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
9197 					      u16 cmdq_resp, u8 resp_code)
9198 {
9199 #define HCLGE_ETHERTYPE_SUCCESS_ADD		0
9200 #define HCLGE_ETHERTYPE_ALREADY_ADD		1
9201 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW	2
9202 #define HCLGE_ETHERTYPE_KEY_CONFLICT		3
9203 
9204 	int return_status;
9205 
9206 	if (cmdq_resp) {
9207 		dev_err(&hdev->pdev->dev,
9208 			"cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
9209 			cmdq_resp);
9210 		return -EIO;
9211 	}
9212 
9213 	switch (resp_code) {
9214 	case HCLGE_ETHERTYPE_SUCCESS_ADD:
9215 	case HCLGE_ETHERTYPE_ALREADY_ADD:
9216 		return_status = 0;
9217 		break;
9218 	case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
9219 		dev_err(&hdev->pdev->dev,
9220 			"add mac ethertype failed for manager table overflow.\n");
9221 		return_status = -EIO;
9222 		break;
9223 	case HCLGE_ETHERTYPE_KEY_CONFLICT:
9224 		dev_err(&hdev->pdev->dev,
9225 			"add mac ethertype failed for key conflict.\n");
9226 		return_status = -EIO;
9227 		break;
9228 	default:
9229 		dev_err(&hdev->pdev->dev,
9230 			"add mac ethertype failed for undefined, code=%u.\n",
9231 			resp_code);
9232 		return_status = -EIO;
9233 	}
9234 
9235 	return return_status;
9236 }
9237 
9238 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
9239 			    u8 *mac_addr)
9240 {
9241 	struct hclge_vport *vport = hclge_get_vport(handle);
9242 	char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
9243 	struct hclge_dev *hdev = vport->back;
9244 
9245 	vport = hclge_get_vf_vport(hdev, vf);
9246 	if (!vport)
9247 		return -EINVAL;
9248 
9249 	hnae3_format_mac_addr(format_mac_addr, mac_addr);
9250 	if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
9251 		dev_info(&hdev->pdev->dev,
9252 			 "Specified MAC(=%s) is same as before, no change committed!\n",
9253 			 format_mac_addr);
9254 		return 0;
9255 	}
9256 
9257 	ether_addr_copy(vport->vf_info.mac, mac_addr);
9258 
9259 	/* there is a timewindow for PF to know VF unalive, it may
9260 	 * cause send mailbox fail, but it doesn't matter, VF will
9261 	 * query it when reinit.
9262 	 */
9263 	if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
9264 		dev_info(&hdev->pdev->dev,
9265 			 "MAC of VF %d has been set to %s, and it will be reinitialized!\n",
9266 			 vf, format_mac_addr);
9267 		(void)hclge_inform_reset_assert_to_vf(vport);
9268 		return 0;
9269 	}
9270 
9271 	dev_info(&hdev->pdev->dev,
9272 		 "MAC of VF %d has been set to %s, will be active after VF reset\n",
9273 		 vf, format_mac_addr);
9274 	return 0;
9275 }
9276 
9277 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
9278 			     const struct hclge_mac_mgr_tbl_entry_cmd *req)
9279 {
9280 	struct hclge_desc desc;
9281 	u8 resp_code;
9282 	u16 retval;
9283 	int ret;
9284 
9285 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
9286 	memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
9287 
9288 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9289 	if (ret) {
9290 		dev_err(&hdev->pdev->dev,
9291 			"add mac ethertype failed for cmd_send, ret =%d.\n",
9292 			ret);
9293 		return ret;
9294 	}
9295 
9296 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
9297 	retval = le16_to_cpu(desc.retval);
9298 
9299 	return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
9300 }
9301 
9302 static int init_mgr_tbl(struct hclge_dev *hdev)
9303 {
9304 	int ret;
9305 	int i;
9306 
9307 	for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
9308 		ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
9309 		if (ret) {
9310 			dev_err(&hdev->pdev->dev,
9311 				"add mac ethertype failed, ret =%d.\n",
9312 				ret);
9313 			return ret;
9314 		}
9315 	}
9316 
9317 	return 0;
9318 }
9319 
9320 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
9321 {
9322 	struct hclge_vport *vport = hclge_get_vport(handle);
9323 	struct hclge_dev *hdev = vport->back;
9324 
9325 	ether_addr_copy(p, hdev->hw.mac.mac_addr);
9326 }
9327 
9328 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
9329 				       const u8 *old_addr, const u8 *new_addr)
9330 {
9331 	struct list_head *list = &vport->uc_mac_list;
9332 	struct hclge_mac_node *old_node, *new_node;
9333 
9334 	new_node = hclge_find_mac_node(list, new_addr);
9335 	if (!new_node) {
9336 		new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
9337 		if (!new_node)
9338 			return -ENOMEM;
9339 
9340 		new_node->state = HCLGE_MAC_TO_ADD;
9341 		ether_addr_copy(new_node->mac_addr, new_addr);
9342 		list_add(&new_node->node, list);
9343 	} else {
9344 		if (new_node->state == HCLGE_MAC_TO_DEL)
9345 			new_node->state = HCLGE_MAC_ACTIVE;
9346 
9347 		/* make sure the new addr is in the list head, avoid dev
9348 		 * addr may be not re-added into mac table for the umv space
9349 		 * limitation after global/imp reset which will clear mac
9350 		 * table by hardware.
9351 		 */
9352 		list_move(&new_node->node, list);
9353 	}
9354 
9355 	if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
9356 		old_node = hclge_find_mac_node(list, old_addr);
9357 		if (old_node) {
9358 			if (old_node->state == HCLGE_MAC_TO_ADD) {
9359 				list_del(&old_node->node);
9360 				kfree(old_node);
9361 			} else {
9362 				old_node->state = HCLGE_MAC_TO_DEL;
9363 			}
9364 		}
9365 	}
9366 
9367 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
9368 
9369 	return 0;
9370 }
9371 
9372 static int hclge_set_mac_addr(struct hnae3_handle *handle, const void *p,
9373 			      bool is_first)
9374 {
9375 	const unsigned char *new_addr = (const unsigned char *)p;
9376 	struct hclge_vport *vport = hclge_get_vport(handle);
9377 	char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
9378 	struct hclge_dev *hdev = vport->back;
9379 	unsigned char *old_addr = NULL;
9380 	int ret;
9381 
9382 	/* mac addr check */
9383 	if (is_zero_ether_addr(new_addr) ||
9384 	    is_broadcast_ether_addr(new_addr) ||
9385 	    is_multicast_ether_addr(new_addr)) {
9386 		hnae3_format_mac_addr(format_mac_addr, new_addr);
9387 		dev_err(&hdev->pdev->dev,
9388 			"change uc mac err! invalid mac: %s.\n",
9389 			 format_mac_addr);
9390 		return -EINVAL;
9391 	}
9392 
9393 	ret = hclge_pause_addr_cfg(hdev, new_addr);
9394 	if (ret) {
9395 		dev_err(&hdev->pdev->dev,
9396 			"failed to configure mac pause address, ret = %d\n",
9397 			ret);
9398 		return ret;
9399 	}
9400 
9401 	if (!is_first)
9402 		old_addr = hdev->hw.mac.mac_addr;
9403 
9404 	spin_lock_bh(&vport->mac_list_lock);
9405 	ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
9406 	if (ret) {
9407 		hnae3_format_mac_addr(format_mac_addr, new_addr);
9408 		dev_err(&hdev->pdev->dev,
9409 			"failed to change the mac addr:%s, ret = %d\n",
9410 			format_mac_addr, ret);
9411 		spin_unlock_bh(&vport->mac_list_lock);
9412 
9413 		if (!is_first)
9414 			hclge_pause_addr_cfg(hdev, old_addr);
9415 
9416 		return ret;
9417 	}
9418 	/* we must update dev addr with spin lock protect, preventing dev addr
9419 	 * being removed by set_rx_mode path.
9420 	 */
9421 	ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
9422 	spin_unlock_bh(&vport->mac_list_lock);
9423 
9424 	hclge_task_schedule(hdev, 0);
9425 
9426 	return 0;
9427 }
9428 
9429 static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd)
9430 {
9431 	struct mii_ioctl_data *data = if_mii(ifr);
9432 
9433 	if (!hnae3_dev_phy_imp_supported(hdev))
9434 		return -EOPNOTSUPP;
9435 
9436 	switch (cmd) {
9437 	case SIOCGMIIPHY:
9438 		data->phy_id = hdev->hw.mac.phy_addr;
9439 		/* this command reads phy id and register at the same time */
9440 		fallthrough;
9441 	case SIOCGMIIREG:
9442 		data->val_out = hclge_read_phy_reg(hdev, data->reg_num);
9443 		return 0;
9444 
9445 	case SIOCSMIIREG:
9446 		return hclge_write_phy_reg(hdev, data->reg_num, data->val_in);
9447 	default:
9448 		return -EOPNOTSUPP;
9449 	}
9450 }
9451 
9452 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
9453 			  int cmd)
9454 {
9455 	struct hclge_vport *vport = hclge_get_vport(handle);
9456 	struct hclge_dev *hdev = vport->back;
9457 
9458 	switch (cmd) {
9459 	case SIOCGHWTSTAMP:
9460 		return hclge_ptp_get_cfg(hdev, ifr);
9461 	case SIOCSHWTSTAMP:
9462 		return hclge_ptp_set_cfg(hdev, ifr);
9463 	default:
9464 		if (!hdev->hw.mac.phydev)
9465 			return hclge_mii_ioctl(hdev, ifr, cmd);
9466 	}
9467 
9468 	return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
9469 }
9470 
9471 static int hclge_set_port_vlan_filter_bypass(struct hclge_dev *hdev, u8 vf_id,
9472 					     bool bypass_en)
9473 {
9474 	struct hclge_port_vlan_filter_bypass_cmd *req;
9475 	struct hclge_desc desc;
9476 	int ret;
9477 
9478 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, false);
9479 	req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data;
9480 	req->vf_id = vf_id;
9481 	hnae3_set_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B,
9482 		      bypass_en ? 1 : 0);
9483 
9484 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9485 	if (ret)
9486 		dev_err(&hdev->pdev->dev,
9487 			"failed to set vport%u port vlan filter bypass state, ret = %d.\n",
9488 			vf_id, ret);
9489 
9490 	return ret;
9491 }
9492 
9493 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
9494 				      u8 fe_type, bool filter_en, u8 vf_id)
9495 {
9496 	struct hclge_vlan_filter_ctrl_cmd *req;
9497 	struct hclge_desc desc;
9498 	int ret;
9499 
9500 	/* read current vlan filter parameter */
9501 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
9502 	req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
9503 	req->vlan_type = vlan_type;
9504 	req->vf_id = vf_id;
9505 
9506 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9507 	if (ret) {
9508 		dev_err(&hdev->pdev->dev, "failed to get vport%u vlan filter config, ret = %d.\n",
9509 			vf_id, ret);
9510 		return ret;
9511 	}
9512 
9513 	/* modify and write new config parameter */
9514 	hclge_comm_cmd_reuse_desc(&desc, false);
9515 	req->vlan_fe = filter_en ?
9516 			(req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
9517 
9518 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9519 	if (ret)
9520 		dev_err(&hdev->pdev->dev, "failed to set vport%u vlan filter, ret = %d.\n",
9521 			vf_id, ret);
9522 
9523 	return ret;
9524 }
9525 
9526 static int hclge_set_vport_vlan_filter(struct hclge_vport *vport, bool enable)
9527 {
9528 	struct hclge_dev *hdev = vport->back;
9529 	struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
9530 	int ret;
9531 
9532 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9533 		return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9534 						  HCLGE_FILTER_FE_EGRESS_V1_B,
9535 						  enable, vport->vport_id);
9536 
9537 	ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9538 					 HCLGE_FILTER_FE_EGRESS, enable,
9539 					 vport->vport_id);
9540 	if (ret)
9541 		return ret;
9542 
9543 	if (test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps)) {
9544 		ret = hclge_set_port_vlan_filter_bypass(hdev, vport->vport_id,
9545 							!enable);
9546 	} else if (!vport->vport_id) {
9547 		if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps))
9548 			enable = false;
9549 
9550 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9551 						 HCLGE_FILTER_FE_INGRESS,
9552 						 enable, 0);
9553 	}
9554 
9555 	return ret;
9556 }
9557 
9558 static bool hclge_need_enable_vport_vlan_filter(struct hclge_vport *vport)
9559 {
9560 	struct hnae3_handle *handle = &vport->nic;
9561 	struct hclge_vport_vlan_cfg *vlan, *tmp;
9562 	struct hclge_dev *hdev = vport->back;
9563 
9564 	if (vport->vport_id) {
9565 		if (vport->port_base_vlan_cfg.state !=
9566 			HNAE3_PORT_BASE_VLAN_DISABLE)
9567 			return true;
9568 
9569 		if (vport->vf_info.trusted && vport->vf_info.request_uc_en)
9570 			return false;
9571 	} else if (handle->netdev_flags & HNAE3_USER_UPE) {
9572 		return false;
9573 	}
9574 
9575 	if (!vport->req_vlan_fltr_en)
9576 		return false;
9577 
9578 	/* compatible with former device, always enable vlan filter */
9579 	if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
9580 		return true;
9581 
9582 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
9583 		if (vlan->vlan_id != 0)
9584 			return true;
9585 
9586 	return false;
9587 }
9588 
9589 int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en)
9590 {
9591 	struct hclge_dev *hdev = vport->back;
9592 	bool need_en;
9593 	int ret;
9594 
9595 	mutex_lock(&hdev->vport_lock);
9596 
9597 	vport->req_vlan_fltr_en = request_en;
9598 
9599 	need_en = hclge_need_enable_vport_vlan_filter(vport);
9600 	if (need_en == vport->cur_vlan_fltr_en) {
9601 		mutex_unlock(&hdev->vport_lock);
9602 		return 0;
9603 	}
9604 
9605 	ret = hclge_set_vport_vlan_filter(vport, need_en);
9606 	if (ret) {
9607 		mutex_unlock(&hdev->vport_lock);
9608 		return ret;
9609 	}
9610 
9611 	vport->cur_vlan_fltr_en = need_en;
9612 
9613 	mutex_unlock(&hdev->vport_lock);
9614 
9615 	return 0;
9616 }
9617 
9618 static int hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
9619 {
9620 	struct hclge_vport *vport = hclge_get_vport(handle);
9621 
9622 	return hclge_enable_vport_vlan_filter(vport, enable);
9623 }
9624 
9625 static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
9626 					bool is_kill, u16 vlan,
9627 					struct hclge_desc *desc)
9628 {
9629 	struct hclge_vlan_filter_vf_cfg_cmd *req0;
9630 	struct hclge_vlan_filter_vf_cfg_cmd *req1;
9631 	u8 vf_byte_val;
9632 	u8 vf_byte_off;
9633 	int ret;
9634 
9635 	hclge_cmd_setup_basic_desc(&desc[0],
9636 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9637 	hclge_cmd_setup_basic_desc(&desc[1],
9638 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9639 
9640 	desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
9641 
9642 	vf_byte_off = vfid / 8;
9643 	vf_byte_val = 1 << (vfid % 8);
9644 
9645 	req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9646 	req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
9647 
9648 	req0->vlan_id  = cpu_to_le16(vlan);
9649 	req0->vlan_cfg = is_kill;
9650 
9651 	if (vf_byte_off < HCLGE_MAX_VF_BYTES)
9652 		req0->vf_bitmap[vf_byte_off] = vf_byte_val;
9653 	else
9654 		req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
9655 
9656 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
9657 	if (ret) {
9658 		dev_err(&hdev->pdev->dev,
9659 			"Send vf vlan command fail, ret =%d.\n",
9660 			ret);
9661 		return ret;
9662 	}
9663 
9664 	return 0;
9665 }
9666 
9667 static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid,
9668 					  bool is_kill, struct hclge_desc *desc)
9669 {
9670 	struct hclge_vlan_filter_vf_cfg_cmd *req;
9671 
9672 	req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9673 
9674 	if (!is_kill) {
9675 #define HCLGE_VF_VLAN_NO_ENTRY	2
9676 		if (!req->resp_code || req->resp_code == 1)
9677 			return 0;
9678 
9679 		if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
9680 			set_bit(vfid, hdev->vf_vlan_full);
9681 			dev_warn(&hdev->pdev->dev,
9682 				 "vf vlan table is full, vf vlan filter is disabled\n");
9683 			return 0;
9684 		}
9685 
9686 		dev_err(&hdev->pdev->dev,
9687 			"Add vf vlan filter fail, ret =%u.\n",
9688 			req->resp_code);
9689 	} else {
9690 #define HCLGE_VF_VLAN_DEL_NO_FOUND	1
9691 		if (!req->resp_code)
9692 			return 0;
9693 
9694 		/* vf vlan filter is disabled when vf vlan table is full,
9695 		 * then new vlan id will not be added into vf vlan table.
9696 		 * Just return 0 without warning, avoid massive verbose
9697 		 * print logs when unload.
9698 		 */
9699 		if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
9700 			return 0;
9701 
9702 		dev_err(&hdev->pdev->dev,
9703 			"Kill vf vlan filter fail, ret =%u.\n",
9704 			req->resp_code);
9705 	}
9706 
9707 	return -EIO;
9708 }
9709 
9710 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
9711 				    bool is_kill, u16 vlan)
9712 {
9713 	struct hclge_vport *vport = &hdev->vport[vfid];
9714 	struct hclge_desc desc[2];
9715 	int ret;
9716 
9717 	/* if vf vlan table is full, firmware will close vf vlan filter, it
9718 	 * is unable and unnecessary to add new vlan id to vf vlan filter.
9719 	 * If spoof check is enable, and vf vlan is full, it shouldn't add
9720 	 * new vlan, because tx packets with these vlan id will be dropped.
9721 	 */
9722 	if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
9723 		if (vport->vf_info.spoofchk && vlan) {
9724 			dev_err(&hdev->pdev->dev,
9725 				"Can't add vlan due to spoof check is on and vf vlan table is full\n");
9726 			return -EPERM;
9727 		}
9728 		return 0;
9729 	}
9730 
9731 	ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc);
9732 	if (ret)
9733 		return ret;
9734 
9735 	return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc);
9736 }
9737 
9738 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
9739 				      u16 vlan_id, bool is_kill)
9740 {
9741 	struct hclge_vlan_filter_pf_cfg_cmd *req;
9742 	struct hclge_desc desc;
9743 	u8 vlan_offset_byte_val;
9744 	u8 vlan_offset_byte;
9745 	u8 vlan_offset_160;
9746 	int ret;
9747 
9748 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
9749 
9750 	vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
9751 	vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
9752 			   HCLGE_VLAN_BYTE_SIZE;
9753 	vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
9754 
9755 	req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
9756 	req->vlan_offset = vlan_offset_160;
9757 	req->vlan_cfg = is_kill;
9758 	req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
9759 
9760 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9761 	if (ret)
9762 		dev_err(&hdev->pdev->dev,
9763 			"port vlan command, send fail, ret =%d.\n", ret);
9764 	return ret;
9765 }
9766 
9767 static bool hclge_need_update_port_vlan(struct hclge_dev *hdev, u16 vport_id,
9768 					u16 vlan_id, bool is_kill)
9769 {
9770 	/* vlan 0 may be added twice when 8021q module is enabled */
9771 	if (!is_kill && !vlan_id &&
9772 	    test_bit(vport_id, hdev->vlan_table[vlan_id]))
9773 		return false;
9774 
9775 	if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
9776 		dev_warn(&hdev->pdev->dev,
9777 			 "Add port vlan failed, vport %u is already in vlan %u\n",
9778 			 vport_id, vlan_id);
9779 		return false;
9780 	}
9781 
9782 	if (is_kill &&
9783 	    !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
9784 		dev_warn(&hdev->pdev->dev,
9785 			 "Delete port vlan failed, vport %u is not in vlan %u\n",
9786 			 vport_id, vlan_id);
9787 		return false;
9788 	}
9789 
9790 	return true;
9791 }
9792 
9793 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
9794 				    u16 vport_id, u16 vlan_id,
9795 				    bool is_kill)
9796 {
9797 	u16 vport_idx, vport_num = 0;
9798 	int ret;
9799 
9800 	if (is_kill && !vlan_id)
9801 		return 0;
9802 
9803 	if (vlan_id >= VLAN_N_VID)
9804 		return -EINVAL;
9805 
9806 	ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id);
9807 	if (ret) {
9808 		dev_err(&hdev->pdev->dev,
9809 			"Set %u vport vlan filter config fail, ret =%d.\n",
9810 			vport_id, ret);
9811 		return ret;
9812 	}
9813 
9814 	if (!hclge_need_update_port_vlan(hdev, vport_id, vlan_id, is_kill))
9815 		return 0;
9816 
9817 	for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
9818 		vport_num++;
9819 
9820 	if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
9821 		ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
9822 						 is_kill);
9823 
9824 	return ret;
9825 }
9826 
9827 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
9828 {
9829 	struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
9830 	struct hclge_vport_vtag_tx_cfg_cmd *req;
9831 	struct hclge_dev *hdev = vport->back;
9832 	struct hclge_desc desc;
9833 	u16 bmap_index;
9834 	int status;
9835 
9836 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
9837 
9838 	req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
9839 	req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
9840 	req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
9841 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
9842 		      vcfg->accept_tag1 ? 1 : 0);
9843 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
9844 		      vcfg->accept_untag1 ? 1 : 0);
9845 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
9846 		      vcfg->accept_tag2 ? 1 : 0);
9847 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
9848 		      vcfg->accept_untag2 ? 1 : 0);
9849 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
9850 		      vcfg->insert_tag1_en ? 1 : 0);
9851 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
9852 		      vcfg->insert_tag2_en ? 1 : 0);
9853 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B,
9854 		      vcfg->tag_shift_mode_en ? 1 : 0);
9855 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
9856 
9857 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9858 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9859 			HCLGE_VF_NUM_PER_BYTE;
9860 	req->vf_bitmap[bmap_index] =
9861 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9862 
9863 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
9864 	if (status)
9865 		dev_err(&hdev->pdev->dev,
9866 			"Send port txvlan cfg command fail, ret =%d\n",
9867 			status);
9868 
9869 	return status;
9870 }
9871 
9872 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
9873 {
9874 	struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
9875 	struct hclge_vport_vtag_rx_cfg_cmd *req;
9876 	struct hclge_dev *hdev = vport->back;
9877 	struct hclge_desc desc;
9878 	u16 bmap_index;
9879 	int status;
9880 
9881 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
9882 
9883 	req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
9884 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
9885 		      vcfg->strip_tag1_en ? 1 : 0);
9886 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
9887 		      vcfg->strip_tag2_en ? 1 : 0);
9888 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
9889 		      vcfg->vlan1_vlan_prionly ? 1 : 0);
9890 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
9891 		      vcfg->vlan2_vlan_prionly ? 1 : 0);
9892 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B,
9893 		      vcfg->strip_tag1_discard_en ? 1 : 0);
9894 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B,
9895 		      vcfg->strip_tag2_discard_en ? 1 : 0);
9896 
9897 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9898 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9899 			HCLGE_VF_NUM_PER_BYTE;
9900 	req->vf_bitmap[bmap_index] =
9901 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9902 
9903 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
9904 	if (status)
9905 		dev_err(&hdev->pdev->dev,
9906 			"Send port rxvlan cfg command fail, ret =%d\n",
9907 			status);
9908 
9909 	return status;
9910 }
9911 
9912 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
9913 				  u16 port_base_vlan_state,
9914 				  u16 vlan_tag, u8 qos)
9915 {
9916 	int ret;
9917 
9918 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9919 		vport->txvlan_cfg.accept_tag1 = true;
9920 		vport->txvlan_cfg.insert_tag1_en = false;
9921 		vport->txvlan_cfg.default_tag1 = 0;
9922 	} else {
9923 		struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev);
9924 
9925 		vport->txvlan_cfg.accept_tag1 =
9926 			ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3;
9927 		vport->txvlan_cfg.insert_tag1_en = true;
9928 		vport->txvlan_cfg.default_tag1 = (qos << VLAN_PRIO_SHIFT) |
9929 						 vlan_tag;
9930 	}
9931 
9932 	vport->txvlan_cfg.accept_untag1 = true;
9933 
9934 	/* accept_tag2 and accept_untag2 are not supported on
9935 	 * pdev revision(0x20), new revision support them,
9936 	 * this two fields can not be configured by user.
9937 	 */
9938 	vport->txvlan_cfg.accept_tag2 = true;
9939 	vport->txvlan_cfg.accept_untag2 = true;
9940 	vport->txvlan_cfg.insert_tag2_en = false;
9941 	vport->txvlan_cfg.default_tag2 = 0;
9942 	vport->txvlan_cfg.tag_shift_mode_en = true;
9943 
9944 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9945 		vport->rxvlan_cfg.strip_tag1_en = false;
9946 		vport->rxvlan_cfg.strip_tag2_en =
9947 				vport->rxvlan_cfg.rx_vlan_offload_en;
9948 		vport->rxvlan_cfg.strip_tag2_discard_en = false;
9949 	} else {
9950 		vport->rxvlan_cfg.strip_tag1_en =
9951 				vport->rxvlan_cfg.rx_vlan_offload_en;
9952 		vport->rxvlan_cfg.strip_tag2_en = true;
9953 		vport->rxvlan_cfg.strip_tag2_discard_en = true;
9954 	}
9955 
9956 	vport->rxvlan_cfg.strip_tag1_discard_en = false;
9957 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9958 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9959 
9960 	ret = hclge_set_vlan_tx_offload_cfg(vport);
9961 	if (ret)
9962 		return ret;
9963 
9964 	return hclge_set_vlan_rx_offload_cfg(vport);
9965 }
9966 
9967 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
9968 {
9969 	struct hclge_rx_vlan_type_cfg_cmd *rx_req;
9970 	struct hclge_tx_vlan_type_cfg_cmd *tx_req;
9971 	struct hclge_desc desc;
9972 	int status;
9973 
9974 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
9975 	rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
9976 	rx_req->ot_fst_vlan_type =
9977 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
9978 	rx_req->ot_sec_vlan_type =
9979 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
9980 	rx_req->in_fst_vlan_type =
9981 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
9982 	rx_req->in_sec_vlan_type =
9983 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
9984 
9985 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
9986 	if (status) {
9987 		dev_err(&hdev->pdev->dev,
9988 			"Send rxvlan protocol type command fail, ret =%d\n",
9989 			status);
9990 		return status;
9991 	}
9992 
9993 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
9994 
9995 	tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
9996 	tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
9997 	tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
9998 
9999 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
10000 	if (status)
10001 		dev_err(&hdev->pdev->dev,
10002 			"Send txvlan protocol type command fail, ret =%d\n",
10003 			status);
10004 
10005 	return status;
10006 }
10007 
10008 static int hclge_init_vlan_filter(struct hclge_dev *hdev)
10009 {
10010 	struct hclge_vport *vport;
10011 	bool enable = true;
10012 	int ret;
10013 	int i;
10014 
10015 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10016 		return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
10017 						  HCLGE_FILTER_FE_EGRESS_V1_B,
10018 						  true, 0);
10019 
10020 	/* for revision 0x21, vf vlan filter is per function */
10021 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10022 		vport = &hdev->vport[i];
10023 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
10024 						 HCLGE_FILTER_FE_EGRESS, true,
10025 						 vport->vport_id);
10026 		if (ret)
10027 			return ret;
10028 		vport->cur_vlan_fltr_en = true;
10029 	}
10030 
10031 	if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps) &&
10032 	    !test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, hdev->ae_dev->caps))
10033 		enable = false;
10034 
10035 	return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
10036 					  HCLGE_FILTER_FE_INGRESS, enable, 0);
10037 }
10038 
10039 static int hclge_init_vlan_type(struct hclge_dev *hdev)
10040 {
10041 	hdev->vlan_type_cfg.rx_in_fst_vlan_type = ETH_P_8021Q;
10042 	hdev->vlan_type_cfg.rx_in_sec_vlan_type = ETH_P_8021Q;
10043 	hdev->vlan_type_cfg.rx_ot_fst_vlan_type = ETH_P_8021Q;
10044 	hdev->vlan_type_cfg.rx_ot_sec_vlan_type = ETH_P_8021Q;
10045 	hdev->vlan_type_cfg.tx_ot_vlan_type = ETH_P_8021Q;
10046 	hdev->vlan_type_cfg.tx_in_vlan_type = ETH_P_8021Q;
10047 
10048 	return hclge_set_vlan_protocol_type(hdev);
10049 }
10050 
10051 static int hclge_init_vport_vlan_offload(struct hclge_dev *hdev)
10052 {
10053 	struct hclge_port_base_vlan_config *cfg;
10054 	struct hclge_vport *vport;
10055 	int ret;
10056 	int i;
10057 
10058 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10059 		vport = &hdev->vport[i];
10060 		cfg = &vport->port_base_vlan_cfg;
10061 
10062 		ret = hclge_vlan_offload_cfg(vport, cfg->state,
10063 					     cfg->vlan_info.vlan_tag,
10064 					     cfg->vlan_info.qos);
10065 		if (ret)
10066 			return ret;
10067 	}
10068 	return 0;
10069 }
10070 
10071 static int hclge_init_vlan_config(struct hclge_dev *hdev)
10072 {
10073 	struct hnae3_handle *handle = &hdev->vport[0].nic;
10074 	int ret;
10075 
10076 	ret = hclge_init_vlan_filter(hdev);
10077 	if (ret)
10078 		return ret;
10079 
10080 	ret = hclge_init_vlan_type(hdev);
10081 	if (ret)
10082 		return ret;
10083 
10084 	ret = hclge_init_vport_vlan_offload(hdev);
10085 	if (ret)
10086 		return ret;
10087 
10088 	return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
10089 }
10090 
10091 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
10092 				       bool writen_to_tbl)
10093 {
10094 	struct hclge_vport_vlan_cfg *vlan, *tmp;
10095 	struct hclge_dev *hdev = vport->back;
10096 
10097 	mutex_lock(&hdev->vport_lock);
10098 
10099 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10100 		if (vlan->vlan_id == vlan_id) {
10101 			mutex_unlock(&hdev->vport_lock);
10102 			return;
10103 		}
10104 	}
10105 
10106 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
10107 	if (!vlan) {
10108 		mutex_unlock(&hdev->vport_lock);
10109 		return;
10110 	}
10111 
10112 	vlan->hd_tbl_status = writen_to_tbl;
10113 	vlan->vlan_id = vlan_id;
10114 
10115 	list_add_tail(&vlan->node, &vport->vlan_list);
10116 	mutex_unlock(&hdev->vport_lock);
10117 }
10118 
10119 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
10120 {
10121 	struct hclge_vport_vlan_cfg *vlan, *tmp;
10122 	struct hclge_dev *hdev = vport->back;
10123 	int ret;
10124 
10125 	mutex_lock(&hdev->vport_lock);
10126 
10127 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10128 		if (!vlan->hd_tbl_status) {
10129 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10130 						       vport->vport_id,
10131 						       vlan->vlan_id, false);
10132 			if (ret) {
10133 				dev_err(&hdev->pdev->dev,
10134 					"restore vport vlan list failed, ret=%d\n",
10135 					ret);
10136 
10137 				mutex_unlock(&hdev->vport_lock);
10138 				return ret;
10139 			}
10140 		}
10141 		vlan->hd_tbl_status = true;
10142 	}
10143 
10144 	mutex_unlock(&hdev->vport_lock);
10145 
10146 	return 0;
10147 }
10148 
10149 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
10150 				      bool is_write_tbl)
10151 {
10152 	struct hclge_vport_vlan_cfg *vlan, *tmp;
10153 	struct hclge_dev *hdev = vport->back;
10154 
10155 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10156 		if (vlan->vlan_id == vlan_id) {
10157 			if (is_write_tbl && vlan->hd_tbl_status)
10158 				hclge_set_vlan_filter_hw(hdev,
10159 							 htons(ETH_P_8021Q),
10160 							 vport->vport_id,
10161 							 vlan_id,
10162 							 true);
10163 
10164 			list_del(&vlan->node);
10165 			kfree(vlan);
10166 			break;
10167 		}
10168 	}
10169 }
10170 
10171 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
10172 {
10173 	struct hclge_vport_vlan_cfg *vlan, *tmp;
10174 	struct hclge_dev *hdev = vport->back;
10175 
10176 	mutex_lock(&hdev->vport_lock);
10177 
10178 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10179 		if (vlan->hd_tbl_status)
10180 			hclge_set_vlan_filter_hw(hdev,
10181 						 htons(ETH_P_8021Q),
10182 						 vport->vport_id,
10183 						 vlan->vlan_id,
10184 						 true);
10185 
10186 		vlan->hd_tbl_status = false;
10187 		if (is_del_list) {
10188 			list_del(&vlan->node);
10189 			kfree(vlan);
10190 		}
10191 	}
10192 	clear_bit(vport->vport_id, hdev->vf_vlan_full);
10193 	mutex_unlock(&hdev->vport_lock);
10194 }
10195 
10196 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
10197 {
10198 	struct hclge_vport_vlan_cfg *vlan, *tmp;
10199 	struct hclge_vport *vport;
10200 	int i;
10201 
10202 	mutex_lock(&hdev->vport_lock);
10203 
10204 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10205 		vport = &hdev->vport[i];
10206 		list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10207 			list_del(&vlan->node);
10208 			kfree(vlan);
10209 		}
10210 	}
10211 
10212 	mutex_unlock(&hdev->vport_lock);
10213 }
10214 
10215 void hclge_restore_vport_port_base_vlan_config(struct hclge_dev *hdev)
10216 {
10217 	struct hclge_vlan_info *vlan_info;
10218 	struct hclge_vport *vport;
10219 	u16 vlan_proto;
10220 	u16 vlan_id;
10221 	u16 state;
10222 	int vf_id;
10223 	int ret;
10224 
10225 	/* PF should restore all vfs port base vlan */
10226 	for (vf_id = 0; vf_id < hdev->num_alloc_vfs; vf_id++) {
10227 		vport = &hdev->vport[vf_id + HCLGE_VF_VPORT_START_NUM];
10228 		vlan_info = vport->port_base_vlan_cfg.tbl_sta ?
10229 			    &vport->port_base_vlan_cfg.vlan_info :
10230 			    &vport->port_base_vlan_cfg.old_vlan_info;
10231 
10232 		vlan_id = vlan_info->vlan_tag;
10233 		vlan_proto = vlan_info->vlan_proto;
10234 		state = vport->port_base_vlan_cfg.state;
10235 
10236 		if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
10237 			clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
10238 			ret = hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
10239 						       vport->vport_id,
10240 						       vlan_id, false);
10241 			vport->port_base_vlan_cfg.tbl_sta = ret == 0;
10242 		}
10243 	}
10244 }
10245 
10246 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
10247 {
10248 	struct hclge_vport_vlan_cfg *vlan, *tmp;
10249 	struct hclge_dev *hdev = vport->back;
10250 	int ret;
10251 
10252 	mutex_lock(&hdev->vport_lock);
10253 
10254 	if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10255 		list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10256 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10257 						       vport->vport_id,
10258 						       vlan->vlan_id, false);
10259 			if (ret)
10260 				break;
10261 			vlan->hd_tbl_status = true;
10262 		}
10263 	}
10264 
10265 	mutex_unlock(&hdev->vport_lock);
10266 }
10267 
10268 /* For global reset and imp reset, hardware will clear the mac table,
10269  * so we change the mac address state from ACTIVE to TO_ADD, then they
10270  * can be restored in the service task after reset complete. Furtherly,
10271  * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
10272  * be restored after reset, so just remove these mac nodes from mac_list.
10273  */
10274 static void hclge_mac_node_convert_for_reset(struct list_head *list)
10275 {
10276 	struct hclge_mac_node *mac_node, *tmp;
10277 
10278 	list_for_each_entry_safe(mac_node, tmp, list, node) {
10279 		if (mac_node->state == HCLGE_MAC_ACTIVE) {
10280 			mac_node->state = HCLGE_MAC_TO_ADD;
10281 		} else if (mac_node->state == HCLGE_MAC_TO_DEL) {
10282 			list_del(&mac_node->node);
10283 			kfree(mac_node);
10284 		}
10285 	}
10286 }
10287 
10288 void hclge_restore_mac_table_common(struct hclge_vport *vport)
10289 {
10290 	spin_lock_bh(&vport->mac_list_lock);
10291 
10292 	hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
10293 	hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
10294 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
10295 
10296 	spin_unlock_bh(&vport->mac_list_lock);
10297 }
10298 
10299 static void hclge_restore_hw_table(struct hclge_dev *hdev)
10300 {
10301 	struct hclge_vport *vport = &hdev->vport[0];
10302 	struct hnae3_handle *handle = &vport->nic;
10303 
10304 	hclge_restore_mac_table_common(vport);
10305 	hclge_restore_vport_port_base_vlan_config(hdev);
10306 	hclge_restore_vport_vlan_table(vport);
10307 	set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
10308 	hclge_restore_fd_entries(handle);
10309 }
10310 
10311 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
10312 {
10313 	struct hclge_vport *vport = hclge_get_vport(handle);
10314 
10315 	if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10316 		vport->rxvlan_cfg.strip_tag1_en = false;
10317 		vport->rxvlan_cfg.strip_tag2_en = enable;
10318 		vport->rxvlan_cfg.strip_tag2_discard_en = false;
10319 	} else {
10320 		vport->rxvlan_cfg.strip_tag1_en = enable;
10321 		vport->rxvlan_cfg.strip_tag2_en = true;
10322 		vport->rxvlan_cfg.strip_tag2_discard_en = true;
10323 	}
10324 
10325 	vport->rxvlan_cfg.strip_tag1_discard_en = false;
10326 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
10327 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
10328 	vport->rxvlan_cfg.rx_vlan_offload_en = enable;
10329 
10330 	return hclge_set_vlan_rx_offload_cfg(vport);
10331 }
10332 
10333 static void hclge_set_vport_vlan_fltr_change(struct hclge_vport *vport)
10334 {
10335 	struct hclge_dev *hdev = vport->back;
10336 
10337 	if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
10338 		set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, &vport->state);
10339 }
10340 
10341 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
10342 					    u16 port_base_vlan_state,
10343 					    struct hclge_vlan_info *new_info,
10344 					    struct hclge_vlan_info *old_info)
10345 {
10346 	struct hclge_dev *hdev = vport->back;
10347 	int ret;
10348 
10349 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
10350 		hclge_rm_vport_all_vlan_table(vport, false);
10351 		/* force clear VLAN 0 */
10352 		ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, true, 0);
10353 		if (ret)
10354 			return ret;
10355 		return hclge_set_vlan_filter_hw(hdev,
10356 						 htons(new_info->vlan_proto),
10357 						 vport->vport_id,
10358 						 new_info->vlan_tag,
10359 						 false);
10360 	}
10361 
10362 	vport->port_base_vlan_cfg.tbl_sta = false;
10363 
10364 	/* force add VLAN 0 */
10365 	ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, false, 0);
10366 	if (ret)
10367 		return ret;
10368 
10369 	ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
10370 				       vport->vport_id, old_info->vlan_tag,
10371 				       true);
10372 	if (ret)
10373 		return ret;
10374 
10375 	return hclge_add_vport_all_vlan_table(vport);
10376 }
10377 
10378 static bool hclge_need_update_vlan_filter(const struct hclge_vlan_info *new_cfg,
10379 					  const struct hclge_vlan_info *old_cfg)
10380 {
10381 	if (new_cfg->vlan_tag != old_cfg->vlan_tag)
10382 		return true;
10383 
10384 	if (new_cfg->vlan_tag == 0 && (new_cfg->qos == 0 || old_cfg->qos == 0))
10385 		return true;
10386 
10387 	return false;
10388 }
10389 
10390 static int hclge_modify_port_base_vlan_tag(struct hclge_vport *vport,
10391 					   struct hclge_vlan_info *new_info,
10392 					   struct hclge_vlan_info *old_info)
10393 {
10394 	struct hclge_dev *hdev = vport->back;
10395 	int ret;
10396 
10397 	/* add new VLAN tag */
10398 	ret = hclge_set_vlan_filter_hw(hdev, htons(new_info->vlan_proto),
10399 				       vport->vport_id, new_info->vlan_tag,
10400 				       false);
10401 	if (ret)
10402 		return ret;
10403 
10404 	vport->port_base_vlan_cfg.tbl_sta = false;
10405 	/* remove old VLAN tag */
10406 	if (old_info->vlan_tag == 0)
10407 		ret = hclge_set_vf_vlan_common(hdev, vport->vport_id,
10408 					       true, 0);
10409 	else
10410 		ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10411 					       vport->vport_id,
10412 					       old_info->vlan_tag, true);
10413 	if (ret)
10414 		dev_err(&hdev->pdev->dev,
10415 			"failed to clear vport%u port base vlan %u, ret = %d.\n",
10416 			vport->vport_id, old_info->vlan_tag, ret);
10417 
10418 	return ret;
10419 }
10420 
10421 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
10422 				    struct hclge_vlan_info *vlan_info)
10423 {
10424 	struct hnae3_handle *nic = &vport->nic;
10425 	struct hclge_vlan_info *old_vlan_info;
10426 	int ret;
10427 
10428 	old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10429 
10430 	ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag,
10431 				     vlan_info->qos);
10432 	if (ret)
10433 		return ret;
10434 
10435 	if (!hclge_need_update_vlan_filter(vlan_info, old_vlan_info))
10436 		goto out;
10437 
10438 	if (state == HNAE3_PORT_BASE_VLAN_MODIFY)
10439 		ret = hclge_modify_port_base_vlan_tag(vport, vlan_info,
10440 						      old_vlan_info);
10441 	else
10442 		ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
10443 						       old_vlan_info);
10444 	if (ret)
10445 		return ret;
10446 
10447 out:
10448 	vport->port_base_vlan_cfg.state = state;
10449 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
10450 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
10451 	else
10452 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
10453 
10454 	vport->port_base_vlan_cfg.old_vlan_info = *old_vlan_info;
10455 	vport->port_base_vlan_cfg.vlan_info = *vlan_info;
10456 	vport->port_base_vlan_cfg.tbl_sta = true;
10457 	hclge_set_vport_vlan_fltr_change(vport);
10458 
10459 	return 0;
10460 }
10461 
10462 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
10463 					  enum hnae3_port_base_vlan_state state,
10464 					  u16 vlan, u8 qos)
10465 {
10466 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10467 		if (!vlan && !qos)
10468 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10469 
10470 		return HNAE3_PORT_BASE_VLAN_ENABLE;
10471 	}
10472 
10473 	if (!vlan && !qos)
10474 		return HNAE3_PORT_BASE_VLAN_DISABLE;
10475 
10476 	if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan &&
10477 	    vport->port_base_vlan_cfg.vlan_info.qos == qos)
10478 		return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10479 
10480 	return HNAE3_PORT_BASE_VLAN_MODIFY;
10481 }
10482 
10483 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
10484 				    u16 vlan, u8 qos, __be16 proto)
10485 {
10486 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
10487 	struct hclge_vport *vport = hclge_get_vport(handle);
10488 	struct hclge_dev *hdev = vport->back;
10489 	struct hclge_vlan_info vlan_info;
10490 	u16 state;
10491 	int ret;
10492 
10493 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10494 		return -EOPNOTSUPP;
10495 
10496 	vport = hclge_get_vf_vport(hdev, vfid);
10497 	if (!vport)
10498 		return -EINVAL;
10499 
10500 	/* qos is a 3 bits value, so can not be bigger than 7 */
10501 	if (vlan > VLAN_N_VID - 1 || qos > 7)
10502 		return -EINVAL;
10503 	if (proto != htons(ETH_P_8021Q))
10504 		return -EPROTONOSUPPORT;
10505 
10506 	state = hclge_get_port_base_vlan_state(vport,
10507 					       vport->port_base_vlan_cfg.state,
10508 					       vlan, qos);
10509 	if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
10510 		return 0;
10511 
10512 	vlan_info.vlan_tag = vlan;
10513 	vlan_info.qos = qos;
10514 	vlan_info.vlan_proto = ntohs(proto);
10515 
10516 	ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
10517 	if (ret) {
10518 		dev_err(&hdev->pdev->dev,
10519 			"failed to update port base vlan for vf %d, ret = %d\n",
10520 			vfid, ret);
10521 		return ret;
10522 	}
10523 
10524 	/* there is a timewindow for PF to know VF unalive, it may
10525 	 * cause send mailbox fail, but it doesn't matter, VF will
10526 	 * query it when reinit.
10527 	 * for DEVICE_VERSION_V3, vf doesn't need to know about the port based
10528 	 * VLAN state.
10529 	 */
10530 	if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
10531 		if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
10532 			(void)hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
10533 								vport->vport_id,
10534 								state,
10535 								&vlan_info);
10536 		else
10537 			set_bit(HCLGE_VPORT_NEED_NOTIFY_VF_VLAN,
10538 				&vport->need_notify);
10539 	}
10540 	return 0;
10541 }
10542 
10543 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
10544 {
10545 	struct hclge_vlan_info *vlan_info;
10546 	struct hclge_vport *vport;
10547 	int ret;
10548 	int vf;
10549 
10550 	/* clear port base vlan for all vf */
10551 	for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10552 		vport = &hdev->vport[vf];
10553 		vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10554 
10555 		ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10556 					       vport->vport_id,
10557 					       vlan_info->vlan_tag, true);
10558 		if (ret)
10559 			dev_err(&hdev->pdev->dev,
10560 				"failed to clear vf vlan for vf%d, ret = %d\n",
10561 				vf - HCLGE_VF_VPORT_START_NUM, ret);
10562 	}
10563 }
10564 
10565 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
10566 			  u16 vlan_id, bool is_kill)
10567 {
10568 	struct hclge_vport *vport = hclge_get_vport(handle);
10569 	struct hclge_dev *hdev = vport->back;
10570 	bool writen_to_tbl = false;
10571 	int ret = 0;
10572 
10573 	/* When device is resetting or reset failed, firmware is unable to
10574 	 * handle mailbox. Just record the vlan id, and remove it after
10575 	 * reset finished.
10576 	 */
10577 	mutex_lock(&hdev->vport_lock);
10578 	if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10579 	     test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
10580 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
10581 		mutex_unlock(&hdev->vport_lock);
10582 		return -EBUSY;
10583 	} else if (!is_kill && test_bit(vlan_id, vport->vlan_del_fail_bmap)) {
10584 		clear_bit(vlan_id, vport->vlan_del_fail_bmap);
10585 	}
10586 	mutex_unlock(&hdev->vport_lock);
10587 
10588 	/* when port base vlan enabled, we use port base vlan as the vlan
10589 	 * filter entry. In this case, we don't update vlan filter table
10590 	 * when user add new vlan or remove exist vlan, just update the vport
10591 	 * vlan list. The vlan id in vlan list will be writen in vlan filter
10592 	 * table until port base vlan disabled
10593 	 */
10594 	if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10595 		ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
10596 					       vlan_id, is_kill);
10597 		writen_to_tbl = true;
10598 	}
10599 
10600 	if (!ret) {
10601 		if (!is_kill) {
10602 			hclge_add_vport_vlan_table(vport, vlan_id,
10603 						   writen_to_tbl);
10604 		} else if (is_kill && vlan_id != 0) {
10605 			mutex_lock(&hdev->vport_lock);
10606 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
10607 			mutex_unlock(&hdev->vport_lock);
10608 		}
10609 	} else if (is_kill) {
10610 		/* when remove hw vlan filter failed, record the vlan id,
10611 		 * and try to remove it from hw later, to be consistence
10612 		 * with stack
10613 		 */
10614 		mutex_lock(&hdev->vport_lock);
10615 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
10616 		mutex_unlock(&hdev->vport_lock);
10617 	}
10618 
10619 	hclge_set_vport_vlan_fltr_change(vport);
10620 
10621 	return ret;
10622 }
10623 
10624 static void hclge_sync_vlan_fltr_state(struct hclge_dev *hdev)
10625 {
10626 	struct hclge_vport *vport;
10627 	int ret;
10628 	u16 i;
10629 
10630 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10631 		vport = &hdev->vport[i];
10632 		if (!test_and_clear_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10633 					&vport->state))
10634 			continue;
10635 
10636 		ret = hclge_enable_vport_vlan_filter(vport,
10637 						     vport->req_vlan_fltr_en);
10638 		if (ret) {
10639 			dev_err(&hdev->pdev->dev,
10640 				"failed to sync vlan filter state for vport%u, ret = %d\n",
10641 				vport->vport_id, ret);
10642 			set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10643 				&vport->state);
10644 			return;
10645 		}
10646 	}
10647 }
10648 
10649 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
10650 {
10651 #define HCLGE_MAX_SYNC_COUNT	60
10652 
10653 	int i, ret, sync_cnt = 0;
10654 	u16 vlan_id;
10655 
10656 	mutex_lock(&hdev->vport_lock);
10657 	/* start from vport 1 for PF is always alive */
10658 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10659 		struct hclge_vport *vport = &hdev->vport[i];
10660 
10661 		vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10662 					 VLAN_N_VID);
10663 		while (vlan_id != VLAN_N_VID) {
10664 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10665 						       vport->vport_id, vlan_id,
10666 						       true);
10667 			if (ret && ret != -EINVAL) {
10668 				mutex_unlock(&hdev->vport_lock);
10669 				return;
10670 			}
10671 
10672 			clear_bit(vlan_id, vport->vlan_del_fail_bmap);
10673 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
10674 			hclge_set_vport_vlan_fltr_change(vport);
10675 
10676 			sync_cnt++;
10677 			if (sync_cnt >= HCLGE_MAX_SYNC_COUNT) {
10678 				mutex_unlock(&hdev->vport_lock);
10679 				return;
10680 			}
10681 
10682 			vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10683 						 VLAN_N_VID);
10684 		}
10685 	}
10686 	mutex_unlock(&hdev->vport_lock);
10687 
10688 	hclge_sync_vlan_fltr_state(hdev);
10689 }
10690 
10691 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
10692 {
10693 	struct hclge_config_max_frm_size_cmd *req;
10694 	struct hclge_desc desc;
10695 
10696 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
10697 
10698 	req = (struct hclge_config_max_frm_size_cmd *)desc.data;
10699 	req->max_frm_size = cpu_to_le16(new_mps);
10700 	req->min_frm_size = HCLGE_MAC_MIN_FRAME;
10701 
10702 	return hclge_cmd_send(&hdev->hw, &desc, 1);
10703 }
10704 
10705 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
10706 {
10707 	struct hclge_vport *vport = hclge_get_vport(handle);
10708 
10709 	return hclge_set_vport_mtu(vport, new_mtu);
10710 }
10711 
10712 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
10713 {
10714 	struct hclge_dev *hdev = vport->back;
10715 	int i, max_frm_size, ret;
10716 
10717 	/* HW supprt 2 layer vlan */
10718 	max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10719 	if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
10720 	    max_frm_size > hdev->ae_dev->dev_specs.max_frm_size)
10721 		return -EINVAL;
10722 
10723 	max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
10724 	mutex_lock(&hdev->vport_lock);
10725 	/* VF's mps must fit within hdev->mps */
10726 	if (vport->vport_id && max_frm_size > hdev->mps) {
10727 		mutex_unlock(&hdev->vport_lock);
10728 		return -EINVAL;
10729 	} else if (vport->vport_id) {
10730 		vport->mps = max_frm_size;
10731 		mutex_unlock(&hdev->vport_lock);
10732 		return 0;
10733 	}
10734 
10735 	/* PF's mps must be greater then VF's mps */
10736 	for (i = 1; i < hdev->num_alloc_vport; i++)
10737 		if (max_frm_size < hdev->vport[i].mps) {
10738 			dev_err(&hdev->pdev->dev,
10739 				"failed to set pf mtu for less than vport %d, mps = %u.\n",
10740 				i, hdev->vport[i].mps);
10741 			mutex_unlock(&hdev->vport_lock);
10742 			return -EINVAL;
10743 		}
10744 
10745 	hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
10746 
10747 	ret = hclge_set_mac_mtu(hdev, max_frm_size);
10748 	if (ret) {
10749 		dev_err(&hdev->pdev->dev,
10750 			"Change mtu fail, ret =%d\n", ret);
10751 		goto out;
10752 	}
10753 
10754 	hdev->mps = max_frm_size;
10755 	vport->mps = max_frm_size;
10756 
10757 	ret = hclge_buffer_alloc(hdev);
10758 	if (ret)
10759 		dev_err(&hdev->pdev->dev,
10760 			"Allocate buffer fail, ret =%d\n", ret);
10761 
10762 out:
10763 	hclge_notify_client(hdev, HNAE3_UP_CLIENT);
10764 	mutex_unlock(&hdev->vport_lock);
10765 	return ret;
10766 }
10767 
10768 static int hclge_reset_tqp_cmd_send(struct hclge_dev *hdev, u16 queue_id,
10769 				    bool enable)
10770 {
10771 	struct hclge_reset_tqp_queue_cmd *req;
10772 	struct hclge_desc desc;
10773 	int ret;
10774 
10775 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
10776 
10777 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10778 	req->tqp_id = cpu_to_le16(queue_id);
10779 	if (enable)
10780 		hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
10781 
10782 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10783 	if (ret) {
10784 		dev_err(&hdev->pdev->dev,
10785 			"Send tqp reset cmd error, status =%d\n", ret);
10786 		return ret;
10787 	}
10788 
10789 	return 0;
10790 }
10791 
10792 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id,
10793 				  u8 *reset_status)
10794 {
10795 	struct hclge_reset_tqp_queue_cmd *req;
10796 	struct hclge_desc desc;
10797 	int ret;
10798 
10799 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
10800 
10801 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10802 	req->tqp_id = cpu_to_le16(queue_id);
10803 
10804 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10805 	if (ret) {
10806 		dev_err(&hdev->pdev->dev,
10807 			"Get reset status error, status =%d\n", ret);
10808 		return ret;
10809 	}
10810 
10811 	*reset_status = hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
10812 
10813 	return 0;
10814 }
10815 
10816 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
10817 {
10818 	struct hclge_comm_tqp *tqp;
10819 	struct hnae3_queue *queue;
10820 
10821 	queue = handle->kinfo.tqp[queue_id];
10822 	tqp = container_of(queue, struct hclge_comm_tqp, q);
10823 
10824 	return tqp->index;
10825 }
10826 
10827 static int hclge_reset_tqp_cmd(struct hnae3_handle *handle)
10828 {
10829 	struct hclge_vport *vport = hclge_get_vport(handle);
10830 	struct hclge_dev *hdev = vport->back;
10831 	u16 reset_try_times = 0;
10832 	u8 reset_status;
10833 	u16 queue_gid;
10834 	int ret;
10835 	u16 i;
10836 
10837 	for (i = 0; i < handle->kinfo.num_tqps; i++) {
10838 		queue_gid = hclge_covert_handle_qid_global(handle, i);
10839 		ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, true);
10840 		if (ret) {
10841 			dev_err(&hdev->pdev->dev,
10842 				"failed to send reset tqp cmd, ret = %d\n",
10843 				ret);
10844 			return ret;
10845 		}
10846 
10847 		while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
10848 			ret = hclge_get_reset_status(hdev, queue_gid,
10849 						     &reset_status);
10850 			if (ret)
10851 				return ret;
10852 
10853 			if (reset_status)
10854 				break;
10855 
10856 			/* Wait for tqp hw reset */
10857 			usleep_range(1000, 1200);
10858 		}
10859 
10860 		if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
10861 			dev_err(&hdev->pdev->dev,
10862 				"wait for tqp hw reset timeout\n");
10863 			return -ETIME;
10864 		}
10865 
10866 		ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, false);
10867 		if (ret) {
10868 			dev_err(&hdev->pdev->dev,
10869 				"failed to deassert soft reset, ret = %d\n",
10870 				ret);
10871 			return ret;
10872 		}
10873 		reset_try_times = 0;
10874 	}
10875 	return 0;
10876 }
10877 
10878 static int hclge_reset_rcb(struct hnae3_handle *handle)
10879 {
10880 #define HCLGE_RESET_RCB_NOT_SUPPORT	0U
10881 #define HCLGE_RESET_RCB_SUCCESS		1U
10882 
10883 	struct hclge_vport *vport = hclge_get_vport(handle);
10884 	struct hclge_dev *hdev = vport->back;
10885 	struct hclge_reset_cmd *req;
10886 	struct hclge_desc desc;
10887 	u8 return_status;
10888 	u16 queue_gid;
10889 	int ret;
10890 
10891 	queue_gid = hclge_covert_handle_qid_global(handle, 0);
10892 
10893 	req = (struct hclge_reset_cmd *)desc.data;
10894 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
10895 	hnae3_set_bit(req->fun_reset_rcb, HCLGE_CFG_RESET_RCB_B, 1);
10896 	req->fun_reset_rcb_vqid_start = cpu_to_le16(queue_gid);
10897 	req->fun_reset_rcb_vqid_num = cpu_to_le16(handle->kinfo.num_tqps);
10898 
10899 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10900 	if (ret) {
10901 		dev_err(&hdev->pdev->dev,
10902 			"failed to send rcb reset cmd, ret = %d\n", ret);
10903 		return ret;
10904 	}
10905 
10906 	return_status = req->fun_reset_rcb_return_status;
10907 	if (return_status == HCLGE_RESET_RCB_SUCCESS)
10908 		return 0;
10909 
10910 	if (return_status != HCLGE_RESET_RCB_NOT_SUPPORT) {
10911 		dev_err(&hdev->pdev->dev, "failed to reset rcb, ret = %u\n",
10912 			return_status);
10913 		return -EIO;
10914 	}
10915 
10916 	/* if reset rcb cmd is unsupported, we need to send reset tqp cmd
10917 	 * again to reset all tqps
10918 	 */
10919 	return hclge_reset_tqp_cmd(handle);
10920 }
10921 
10922 int hclge_reset_tqp(struct hnae3_handle *handle)
10923 {
10924 	struct hclge_vport *vport = hclge_get_vport(handle);
10925 	struct hclge_dev *hdev = vport->back;
10926 	int ret;
10927 
10928 	/* only need to disable PF's tqp */
10929 	if (!vport->vport_id) {
10930 		ret = hclge_tqp_enable(handle, false);
10931 		if (ret) {
10932 			dev_err(&hdev->pdev->dev,
10933 				"failed to disable tqp, ret = %d\n", ret);
10934 			return ret;
10935 		}
10936 	}
10937 
10938 	return hclge_reset_rcb(handle);
10939 }
10940 
10941 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
10942 {
10943 	struct hclge_vport *vport = hclge_get_vport(handle);
10944 	struct hclge_dev *hdev = vport->back;
10945 
10946 	return hdev->fw_version;
10947 }
10948 
10949 int hclge_query_scc_version(struct hclge_dev *hdev, u32 *scc_version)
10950 {
10951 	struct hclge_comm_query_scc_cmd *resp;
10952 	struct hclge_desc desc;
10953 	int ret;
10954 
10955 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_SCC_VER, 1);
10956 	resp = (struct hclge_comm_query_scc_cmd *)desc.data;
10957 
10958 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10959 	if (ret)
10960 		return ret;
10961 
10962 	*scc_version = le32_to_cpu(resp->scc_version);
10963 
10964 	return 0;
10965 }
10966 
10967 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10968 {
10969 	struct phy_device *phydev = hdev->hw.mac.phydev;
10970 
10971 	if (!phydev)
10972 		return;
10973 
10974 	phy_set_asym_pause(phydev, rx_en, tx_en);
10975 }
10976 
10977 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10978 {
10979 	int ret;
10980 
10981 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
10982 		return 0;
10983 
10984 	ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
10985 	if (ret)
10986 		dev_err(&hdev->pdev->dev,
10987 			"configure pauseparam error, ret = %d.\n", ret);
10988 
10989 	return ret;
10990 }
10991 
10992 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
10993 {
10994 	struct phy_device *phydev = hdev->hw.mac.phydev;
10995 	u16 remote_advertising = 0;
10996 	u16 local_advertising;
10997 	u32 rx_pause, tx_pause;
10998 	u8 flowctl;
10999 
11000 	if (!phydev->link)
11001 		return 0;
11002 
11003 	if (!phydev->autoneg)
11004 		return hclge_mac_pause_setup_hw(hdev);
11005 
11006 	local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
11007 
11008 	if (phydev->pause)
11009 		remote_advertising = LPA_PAUSE_CAP;
11010 
11011 	if (phydev->asym_pause)
11012 		remote_advertising |= LPA_PAUSE_ASYM;
11013 
11014 	flowctl = mii_resolve_flowctrl_fdx(local_advertising,
11015 					   remote_advertising);
11016 	tx_pause = flowctl & FLOW_CTRL_TX;
11017 	rx_pause = flowctl & FLOW_CTRL_RX;
11018 
11019 	if (phydev->duplex == HCLGE_MAC_HALF) {
11020 		tx_pause = 0;
11021 		rx_pause = 0;
11022 	}
11023 
11024 	return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
11025 }
11026 
11027 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
11028 				 u32 *rx_en, u32 *tx_en)
11029 {
11030 	struct hclge_vport *vport = hclge_get_vport(handle);
11031 	struct hclge_dev *hdev = vport->back;
11032 	u8 media_type = hdev->hw.mac.media_type;
11033 
11034 	*auto_neg = (media_type == HNAE3_MEDIA_TYPE_COPPER) ?
11035 		    hclge_get_autoneg(handle) : 0;
11036 
11037 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
11038 		*rx_en = 0;
11039 		*tx_en = 0;
11040 		return;
11041 	}
11042 
11043 	if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
11044 		*rx_en = 1;
11045 		*tx_en = 0;
11046 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
11047 		*tx_en = 1;
11048 		*rx_en = 0;
11049 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
11050 		*rx_en = 1;
11051 		*tx_en = 1;
11052 	} else {
11053 		*rx_en = 0;
11054 		*tx_en = 0;
11055 	}
11056 }
11057 
11058 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
11059 					 u32 rx_en, u32 tx_en)
11060 {
11061 	if (rx_en && tx_en)
11062 		hdev->fc_mode_last_time = HCLGE_FC_FULL;
11063 	else if (rx_en && !tx_en)
11064 		hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
11065 	else if (!rx_en && tx_en)
11066 		hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
11067 	else
11068 		hdev->fc_mode_last_time = HCLGE_FC_NONE;
11069 
11070 	hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
11071 }
11072 
11073 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
11074 				u32 rx_en, u32 tx_en)
11075 {
11076 	struct hclge_vport *vport = hclge_get_vport(handle);
11077 	struct hclge_dev *hdev = vport->back;
11078 	struct phy_device *phydev = hdev->hw.mac.phydev;
11079 	u32 fc_autoneg;
11080 
11081 	if (phydev || hnae3_dev_phy_imp_supported(hdev)) {
11082 		fc_autoneg = hclge_get_autoneg(handle);
11083 		if (auto_neg != fc_autoneg) {
11084 			dev_info(&hdev->pdev->dev,
11085 				 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
11086 			return -EOPNOTSUPP;
11087 		}
11088 	}
11089 
11090 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
11091 		dev_info(&hdev->pdev->dev,
11092 			 "Priority flow control enabled. Cannot set link flow control.\n");
11093 		return -EOPNOTSUPP;
11094 	}
11095 
11096 	hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
11097 
11098 	hclge_record_user_pauseparam(hdev, rx_en, tx_en);
11099 
11100 	if (!auto_neg || hnae3_dev_phy_imp_supported(hdev))
11101 		return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
11102 
11103 	if (phydev)
11104 		return phy_start_aneg(phydev);
11105 
11106 	return -EOPNOTSUPP;
11107 }
11108 
11109 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
11110 					  u8 *auto_neg, u32 *speed, u8 *duplex, u32 *lane_num)
11111 {
11112 	struct hclge_vport *vport = hclge_get_vport(handle);
11113 	struct hclge_dev *hdev = vport->back;
11114 
11115 	if (speed)
11116 		*speed = hdev->hw.mac.speed;
11117 	if (duplex)
11118 		*duplex = hdev->hw.mac.duplex;
11119 	if (auto_neg)
11120 		*auto_neg = hdev->hw.mac.autoneg;
11121 	if (lane_num)
11122 		*lane_num = hdev->hw.mac.lane_num;
11123 }
11124 
11125 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
11126 				 u8 *module_type)
11127 {
11128 	struct hclge_vport *vport = hclge_get_vport(handle);
11129 	struct hclge_dev *hdev = vport->back;
11130 
11131 	/* When nic is down, the service task is not running, doesn't update
11132 	 * the port information per second. Query the port information before
11133 	 * return the media type, ensure getting the correct media information.
11134 	 */
11135 	hclge_update_port_info(hdev);
11136 
11137 	if (media_type)
11138 		*media_type = hdev->hw.mac.media_type;
11139 
11140 	if (module_type)
11141 		*module_type = hdev->hw.mac.module_type;
11142 }
11143 
11144 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
11145 				u8 *tp_mdix_ctrl, u8 *tp_mdix)
11146 {
11147 	struct hclge_vport *vport = hclge_get_vport(handle);
11148 	struct hclge_dev *hdev = vport->back;
11149 	struct phy_device *phydev = hdev->hw.mac.phydev;
11150 	int mdix_ctrl, mdix, is_resolved;
11151 	unsigned int retval;
11152 
11153 	if (!phydev) {
11154 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
11155 		*tp_mdix = ETH_TP_MDI_INVALID;
11156 		return;
11157 	}
11158 
11159 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
11160 
11161 	retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
11162 	mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
11163 				    HCLGE_PHY_MDIX_CTRL_S);
11164 
11165 	retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
11166 	mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
11167 	is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
11168 
11169 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
11170 
11171 	switch (mdix_ctrl) {
11172 	case 0x0:
11173 		*tp_mdix_ctrl = ETH_TP_MDI;
11174 		break;
11175 	case 0x1:
11176 		*tp_mdix_ctrl = ETH_TP_MDI_X;
11177 		break;
11178 	case 0x3:
11179 		*tp_mdix_ctrl = ETH_TP_MDI_AUTO;
11180 		break;
11181 	default:
11182 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
11183 		break;
11184 	}
11185 
11186 	if (!is_resolved)
11187 		*tp_mdix = ETH_TP_MDI_INVALID;
11188 	else if (mdix)
11189 		*tp_mdix = ETH_TP_MDI_X;
11190 	else
11191 		*tp_mdix = ETH_TP_MDI;
11192 }
11193 
11194 static void hclge_info_show(struct hclge_dev *hdev)
11195 {
11196 	struct hnae3_handle *handle = &hdev->vport->nic;
11197 	struct device *dev = &hdev->pdev->dev;
11198 
11199 	dev_info(dev, "PF info begin:\n");
11200 
11201 	dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
11202 	dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
11203 	dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
11204 	dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
11205 	dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
11206 	dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
11207 	dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
11208 	dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
11209 	dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
11210 	dev_info(dev, "This is %s PF\n",
11211 		 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
11212 	dev_info(dev, "DCB %s\n",
11213 		 handle->kinfo.tc_info.dcb_ets_active ? "enable" : "disable");
11214 	dev_info(dev, "MQPRIO %s\n",
11215 		 handle->kinfo.tc_info.mqprio_active ? "enable" : "disable");
11216 	dev_info(dev, "Default tx spare buffer size: %u\n",
11217 		 hdev->tx_spare_buf_size);
11218 
11219 	dev_info(dev, "PF info end.\n");
11220 }
11221 
11222 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
11223 					  struct hclge_vport *vport)
11224 {
11225 	struct hnae3_client *client = vport->nic.client;
11226 	struct hclge_dev *hdev = ae_dev->priv;
11227 	int rst_cnt = hdev->rst_stats.reset_cnt;
11228 	int ret;
11229 
11230 	ret = client->ops->init_instance(&vport->nic);
11231 	if (ret)
11232 		return ret;
11233 
11234 	set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11235 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11236 	    rst_cnt != hdev->rst_stats.reset_cnt) {
11237 		ret = -EBUSY;
11238 		goto init_nic_err;
11239 	}
11240 
11241 	/* Enable nic hw error interrupts */
11242 	ret = hclge_config_nic_hw_error(hdev, true);
11243 	if (ret) {
11244 		dev_err(&ae_dev->pdev->dev,
11245 			"fail(%d) to enable hw error interrupts\n", ret);
11246 		goto init_nic_err;
11247 	}
11248 
11249 	hnae3_set_client_init_flag(client, ae_dev, 1);
11250 
11251 	if (netif_msg_drv(&hdev->vport->nic))
11252 		hclge_info_show(hdev);
11253 
11254 	return ret;
11255 
11256 init_nic_err:
11257 	clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11258 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11259 		msleep(HCLGE_WAIT_RESET_DONE);
11260 
11261 	client->ops->uninit_instance(&vport->nic, 0);
11262 
11263 	return ret;
11264 }
11265 
11266 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
11267 					   struct hclge_vport *vport)
11268 {
11269 	struct hclge_dev *hdev = ae_dev->priv;
11270 	struct hnae3_client *client;
11271 	int rst_cnt;
11272 	int ret;
11273 
11274 	if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
11275 	    !hdev->nic_client)
11276 		return 0;
11277 
11278 	client = hdev->roce_client;
11279 	ret = hclge_init_roce_base_info(vport);
11280 	if (ret)
11281 		return ret;
11282 
11283 	rst_cnt = hdev->rst_stats.reset_cnt;
11284 	ret = client->ops->init_instance(&vport->roce);
11285 	if (ret)
11286 		return ret;
11287 
11288 	set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11289 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11290 	    rst_cnt != hdev->rst_stats.reset_cnt) {
11291 		ret = -EBUSY;
11292 		goto init_roce_err;
11293 	}
11294 
11295 	/* Enable roce ras interrupts */
11296 	ret = hclge_config_rocee_ras_interrupt(hdev, true);
11297 	if (ret) {
11298 		dev_err(&ae_dev->pdev->dev,
11299 			"fail(%d) to enable roce ras interrupts\n", ret);
11300 		goto init_roce_err;
11301 	}
11302 
11303 	hnae3_set_client_init_flag(client, ae_dev, 1);
11304 
11305 	return 0;
11306 
11307 init_roce_err:
11308 	clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11309 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11310 		msleep(HCLGE_WAIT_RESET_DONE);
11311 
11312 	hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11313 
11314 	return ret;
11315 }
11316 
11317 static int hclge_init_client_instance(struct hnae3_client *client,
11318 				      struct hnae3_ae_dev *ae_dev)
11319 {
11320 	struct hclge_dev *hdev = ae_dev->priv;
11321 	struct hclge_vport *vport = &hdev->vport[0];
11322 	int ret;
11323 
11324 	switch (client->type) {
11325 	case HNAE3_CLIENT_KNIC:
11326 		hdev->nic_client = client;
11327 		vport->nic.client = client;
11328 		ret = hclge_init_nic_client_instance(ae_dev, vport);
11329 		if (ret)
11330 			goto clear_nic;
11331 
11332 		ret = hclge_init_roce_client_instance(ae_dev, vport);
11333 		if (ret)
11334 			goto clear_roce;
11335 
11336 		break;
11337 	case HNAE3_CLIENT_ROCE:
11338 		if (hnae3_dev_roce_supported(hdev)) {
11339 			hdev->roce_client = client;
11340 			vport->roce.client = client;
11341 		}
11342 
11343 		ret = hclge_init_roce_client_instance(ae_dev, vport);
11344 		if (ret)
11345 			goto clear_roce;
11346 
11347 		break;
11348 	default:
11349 		return -EINVAL;
11350 	}
11351 
11352 	return 0;
11353 
11354 clear_nic:
11355 	hdev->nic_client = NULL;
11356 	vport->nic.client = NULL;
11357 	return ret;
11358 clear_roce:
11359 	hdev->roce_client = NULL;
11360 	vport->roce.client = NULL;
11361 	return ret;
11362 }
11363 
11364 static bool hclge_uninit_need_wait(struct hclge_dev *hdev)
11365 {
11366 	return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11367 	       test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
11368 }
11369 
11370 static void hclge_uninit_client_instance(struct hnae3_client *client,
11371 					 struct hnae3_ae_dev *ae_dev)
11372 {
11373 	struct hclge_dev *hdev = ae_dev->priv;
11374 	struct hclge_vport *vport = &hdev->vport[0];
11375 
11376 	if (hdev->roce_client) {
11377 		clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11378 		while (hclge_uninit_need_wait(hdev))
11379 			msleep(HCLGE_WAIT_RESET_DONE);
11380 
11381 		hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11382 		hdev->roce_client = NULL;
11383 		vport->roce.client = NULL;
11384 	}
11385 	if (client->type == HNAE3_CLIENT_ROCE)
11386 		return;
11387 	if (hdev->nic_client && client->ops->uninit_instance) {
11388 		clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11389 		while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11390 			msleep(HCLGE_WAIT_RESET_DONE);
11391 
11392 		client->ops->uninit_instance(&vport->nic, 0);
11393 		hdev->nic_client = NULL;
11394 		vport->nic.client = NULL;
11395 	}
11396 }
11397 
11398 static int hclge_dev_mem_map(struct hclge_dev *hdev)
11399 {
11400 	struct pci_dev *pdev = hdev->pdev;
11401 	struct hclge_hw *hw = &hdev->hw;
11402 
11403 	/* for device does not have device memory, return directly */
11404 	if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
11405 		return 0;
11406 
11407 	hw->hw.mem_base =
11408 		devm_ioremap_wc(&pdev->dev,
11409 				pci_resource_start(pdev, HCLGE_MEM_BAR),
11410 				pci_resource_len(pdev, HCLGE_MEM_BAR));
11411 	if (!hw->hw.mem_base) {
11412 		dev_err(&pdev->dev, "failed to map device memory\n");
11413 		return -EFAULT;
11414 	}
11415 
11416 	return 0;
11417 }
11418 
11419 static int hclge_pci_init(struct hclge_dev *hdev)
11420 {
11421 	struct pci_dev *pdev = hdev->pdev;
11422 	struct hclge_hw *hw;
11423 	int ret;
11424 
11425 	ret = pci_enable_device(pdev);
11426 	if (ret) {
11427 		dev_err(&pdev->dev, "failed to enable PCI device\n");
11428 		return ret;
11429 	}
11430 
11431 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
11432 	if (ret) {
11433 		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
11434 		if (ret) {
11435 			dev_err(&pdev->dev,
11436 				"can't set consistent PCI DMA");
11437 			goto err_disable_device;
11438 		}
11439 		dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
11440 	}
11441 
11442 	ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
11443 	if (ret) {
11444 		dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
11445 		goto err_disable_device;
11446 	}
11447 
11448 	pci_set_master(pdev);
11449 	hw = &hdev->hw;
11450 	hw->hw.io_base = pcim_iomap(pdev, 2, 0);
11451 	if (!hw->hw.io_base) {
11452 		dev_err(&pdev->dev, "Can't map configuration register space\n");
11453 		ret = -ENOMEM;
11454 		goto err_release_regions;
11455 	}
11456 
11457 	ret = hclge_dev_mem_map(hdev);
11458 	if (ret)
11459 		goto err_unmap_io_base;
11460 
11461 	hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
11462 
11463 	return 0;
11464 
11465 err_unmap_io_base:
11466 	pcim_iounmap(pdev, hdev->hw.hw.io_base);
11467 err_release_regions:
11468 	pci_release_regions(pdev);
11469 err_disable_device:
11470 	pci_disable_device(pdev);
11471 
11472 	return ret;
11473 }
11474 
11475 static void hclge_pci_uninit(struct hclge_dev *hdev)
11476 {
11477 	struct pci_dev *pdev = hdev->pdev;
11478 
11479 	if (hdev->hw.hw.mem_base)
11480 		devm_iounmap(&pdev->dev, hdev->hw.hw.mem_base);
11481 
11482 	pcim_iounmap(pdev, hdev->hw.hw.io_base);
11483 	pci_free_irq_vectors(pdev);
11484 	pci_release_regions(pdev);
11485 	pci_disable_device(pdev);
11486 }
11487 
11488 static void hclge_state_init(struct hclge_dev *hdev)
11489 {
11490 	set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
11491 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
11492 	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
11493 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11494 	clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
11495 	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
11496 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
11497 }
11498 
11499 static void hclge_state_uninit(struct hclge_dev *hdev)
11500 {
11501 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
11502 	set_bit(HCLGE_STATE_REMOVING, &hdev->state);
11503 
11504 	if (hdev->reset_timer.function)
11505 		del_timer_sync(&hdev->reset_timer);
11506 	if (hdev->service_task.work.func)
11507 		cancel_delayed_work_sync(&hdev->service_task);
11508 }
11509 
11510 static void hclge_reset_prepare_general(struct hnae3_ae_dev *ae_dev,
11511 					enum hnae3_reset_type rst_type)
11512 {
11513 #define HCLGE_RESET_RETRY_WAIT_MS	500
11514 #define HCLGE_RESET_RETRY_CNT	5
11515 
11516 	struct hclge_dev *hdev = ae_dev->priv;
11517 	int retry_cnt = 0;
11518 	int ret;
11519 
11520 	while (retry_cnt++ < HCLGE_RESET_RETRY_CNT) {
11521 		down(&hdev->reset_sem);
11522 		set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11523 		hdev->reset_type = rst_type;
11524 		ret = hclge_reset_prepare(hdev);
11525 		if (!ret && !hdev->reset_pending)
11526 			break;
11527 
11528 		dev_err(&hdev->pdev->dev,
11529 			"failed to prepare to reset, ret=%d, reset_pending:0x%lx, retry_cnt:%d\n",
11530 			ret, hdev->reset_pending, retry_cnt);
11531 		clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11532 		up(&hdev->reset_sem);
11533 		msleep(HCLGE_RESET_RETRY_WAIT_MS);
11534 	}
11535 
11536 	/* disable misc vector before reset done */
11537 	hclge_enable_vector(&hdev->misc_vector, false);
11538 	set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
11539 
11540 	if (hdev->reset_type == HNAE3_FLR_RESET)
11541 		hdev->rst_stats.flr_rst_cnt++;
11542 }
11543 
11544 static void hclge_reset_done(struct hnae3_ae_dev *ae_dev)
11545 {
11546 	struct hclge_dev *hdev = ae_dev->priv;
11547 	int ret;
11548 
11549 	hclge_enable_vector(&hdev->misc_vector, true);
11550 
11551 	ret = hclge_reset_rebuild(hdev);
11552 	if (ret)
11553 		dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
11554 
11555 	hdev->reset_type = HNAE3_NONE_RESET;
11556 	if (test_and_clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11557 		up(&hdev->reset_sem);
11558 }
11559 
11560 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
11561 {
11562 	u16 i;
11563 
11564 	for (i = 0; i < hdev->num_alloc_vport; i++) {
11565 		struct hclge_vport *vport = &hdev->vport[i];
11566 		int ret;
11567 
11568 		 /* Send cmd to clear vport's FUNC_RST_ING */
11569 		ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
11570 		if (ret)
11571 			dev_warn(&hdev->pdev->dev,
11572 				 "clear vport(%u) rst failed %d!\n",
11573 				 vport->vport_id, ret);
11574 	}
11575 }
11576 
11577 static int hclge_clear_hw_resource(struct hclge_dev *hdev)
11578 {
11579 	struct hclge_desc desc;
11580 	int ret;
11581 
11582 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_HW_RESOURCE, false);
11583 
11584 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11585 	/* This new command is only supported by new firmware, it will
11586 	 * fail with older firmware. Error value -EOPNOSUPP can only be
11587 	 * returned by older firmware running this command, to keep code
11588 	 * backward compatible we will override this value and return
11589 	 * success.
11590 	 */
11591 	if (ret && ret != -EOPNOTSUPP) {
11592 		dev_err(&hdev->pdev->dev,
11593 			"failed to clear hw resource, ret = %d\n", ret);
11594 		return ret;
11595 	}
11596 	return 0;
11597 }
11598 
11599 static void hclge_init_rxd_adv_layout(struct hclge_dev *hdev)
11600 {
11601 	if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11602 		hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 1);
11603 }
11604 
11605 static void hclge_uninit_rxd_adv_layout(struct hclge_dev *hdev)
11606 {
11607 	if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11608 		hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 0);
11609 }
11610 
11611 static struct hclge_wol_info *hclge_get_wol_info(struct hnae3_handle *handle)
11612 {
11613 	struct hclge_vport *vport = hclge_get_vport(handle);
11614 
11615 	return &vport->back->hw.mac.wol;
11616 }
11617 
11618 static int hclge_get_wol_supported_mode(struct hclge_dev *hdev,
11619 					u32 *wol_supported)
11620 {
11621 	struct hclge_query_wol_supported_cmd *wol_supported_cmd;
11622 	struct hclge_desc desc;
11623 	int ret;
11624 
11625 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_WOL_GET_SUPPORTED_MODE,
11626 				   true);
11627 	wol_supported_cmd = (struct hclge_query_wol_supported_cmd *)desc.data;
11628 
11629 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11630 	if (ret) {
11631 		dev_err(&hdev->pdev->dev,
11632 			"failed to query wol supported, ret = %d\n", ret);
11633 		return ret;
11634 	}
11635 
11636 	*wol_supported = le32_to_cpu(wol_supported_cmd->supported_wake_mode);
11637 
11638 	return 0;
11639 }
11640 
11641 static int hclge_set_wol_cfg(struct hclge_dev *hdev,
11642 			     struct hclge_wol_info *wol_info)
11643 {
11644 	struct hclge_wol_cfg_cmd *wol_cfg_cmd;
11645 	struct hclge_desc desc;
11646 	int ret;
11647 
11648 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_WOL_CFG, false);
11649 	wol_cfg_cmd = (struct hclge_wol_cfg_cmd *)desc.data;
11650 	wol_cfg_cmd->wake_on_lan_mode = cpu_to_le32(wol_info->wol_current_mode);
11651 	wol_cfg_cmd->sopass_size = wol_info->wol_sopass_size;
11652 	memcpy(wol_cfg_cmd->sopass, wol_info->wol_sopass, SOPASS_MAX);
11653 
11654 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11655 	if (ret)
11656 		dev_err(&hdev->pdev->dev,
11657 			"failed to set wol config, ret = %d\n", ret);
11658 
11659 	return ret;
11660 }
11661 
11662 static int hclge_update_wol(struct hclge_dev *hdev)
11663 {
11664 	struct hclge_wol_info *wol_info = &hdev->hw.mac.wol;
11665 
11666 	if (!hnae3_ae_dev_wol_supported(hdev->ae_dev))
11667 		return 0;
11668 
11669 	return hclge_set_wol_cfg(hdev, wol_info);
11670 }
11671 
11672 static int hclge_init_wol(struct hclge_dev *hdev)
11673 {
11674 	struct hclge_wol_info *wol_info = &hdev->hw.mac.wol;
11675 	int ret;
11676 
11677 	if (!hnae3_ae_dev_wol_supported(hdev->ae_dev))
11678 		return 0;
11679 
11680 	memset(wol_info, 0, sizeof(struct hclge_wol_info));
11681 	ret = hclge_get_wol_supported_mode(hdev,
11682 					   &wol_info->wol_support_mode);
11683 	if (ret) {
11684 		wol_info->wol_support_mode = 0;
11685 		return ret;
11686 	}
11687 
11688 	return hclge_update_wol(hdev);
11689 }
11690 
11691 static void hclge_get_wol(struct hnae3_handle *handle,
11692 			  struct ethtool_wolinfo *wol)
11693 {
11694 	struct hclge_wol_info *wol_info = hclge_get_wol_info(handle);
11695 
11696 	wol->supported = wol_info->wol_support_mode;
11697 	wol->wolopts = wol_info->wol_current_mode;
11698 	if (wol_info->wol_current_mode & WAKE_MAGICSECURE)
11699 		memcpy(wol->sopass, wol_info->wol_sopass, SOPASS_MAX);
11700 }
11701 
11702 static int hclge_set_wol(struct hnae3_handle *handle,
11703 			 struct ethtool_wolinfo *wol)
11704 {
11705 	struct hclge_wol_info *wol_info = hclge_get_wol_info(handle);
11706 	struct hclge_vport *vport = hclge_get_vport(handle);
11707 	u32 wol_mode;
11708 	int ret;
11709 
11710 	wol_mode = wol->wolopts;
11711 	if (wol_mode & ~wol_info->wol_support_mode)
11712 		return -EINVAL;
11713 
11714 	wol_info->wol_current_mode = wol_mode;
11715 	if (wol_mode & WAKE_MAGICSECURE) {
11716 		memcpy(wol_info->wol_sopass, wol->sopass, SOPASS_MAX);
11717 		wol_info->wol_sopass_size = SOPASS_MAX;
11718 	} else {
11719 		wol_info->wol_sopass_size = 0;
11720 	}
11721 
11722 	ret = hclge_set_wol_cfg(vport->back, wol_info);
11723 	if (ret)
11724 		wol_info->wol_current_mode = 0;
11725 
11726 	return ret;
11727 }
11728 
11729 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
11730 {
11731 	struct pci_dev *pdev = ae_dev->pdev;
11732 	struct hclge_dev *hdev;
11733 	int ret;
11734 
11735 	hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
11736 	if (!hdev)
11737 		return -ENOMEM;
11738 
11739 	hdev->pdev = pdev;
11740 	hdev->ae_dev = ae_dev;
11741 	hdev->reset_type = HNAE3_NONE_RESET;
11742 	hdev->reset_level = HNAE3_FUNC_RESET;
11743 	ae_dev->priv = hdev;
11744 
11745 	/* HW supprt 2 layer vlan */
11746 	hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
11747 
11748 	mutex_init(&hdev->vport_lock);
11749 	spin_lock_init(&hdev->fd_rule_lock);
11750 	sema_init(&hdev->reset_sem, 1);
11751 
11752 	ret = hclge_pci_init(hdev);
11753 	if (ret)
11754 		goto out;
11755 
11756 	/* Firmware command queue initialize */
11757 	ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw);
11758 	if (ret)
11759 		goto err_pci_uninit;
11760 
11761 	/* Firmware command initialize */
11762 	hclge_comm_cmd_init_ops(&hdev->hw.hw, &hclge_cmq_ops);
11763 	ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version,
11764 				  true, hdev->reset_pending);
11765 	if (ret)
11766 		goto err_cmd_uninit;
11767 
11768 	ret  = hclge_clear_hw_resource(hdev);
11769 	if (ret)
11770 		goto err_cmd_uninit;
11771 
11772 	ret = hclge_get_cap(hdev);
11773 	if (ret)
11774 		goto err_cmd_uninit;
11775 
11776 	ret = hclge_query_dev_specs(hdev);
11777 	if (ret) {
11778 		dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
11779 			ret);
11780 		goto err_cmd_uninit;
11781 	}
11782 
11783 	ret = hclge_configure(hdev);
11784 	if (ret) {
11785 		dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
11786 		goto err_cmd_uninit;
11787 	}
11788 
11789 	ret = hclge_init_msi(hdev);
11790 	if (ret) {
11791 		dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
11792 		goto err_cmd_uninit;
11793 	}
11794 
11795 	ret = hclge_misc_irq_init(hdev);
11796 	if (ret)
11797 		goto err_msi_uninit;
11798 
11799 	ret = hclge_alloc_tqps(hdev);
11800 	if (ret) {
11801 		dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
11802 		goto err_msi_irq_uninit;
11803 	}
11804 
11805 	ret = hclge_alloc_vport(hdev);
11806 	if (ret)
11807 		goto err_msi_irq_uninit;
11808 
11809 	ret = hclge_map_tqp(hdev);
11810 	if (ret)
11811 		goto err_msi_irq_uninit;
11812 
11813 	if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
11814 		clear_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps);
11815 		if (hnae3_dev_phy_imp_supported(hdev))
11816 			ret = hclge_update_tp_port_info(hdev);
11817 		else
11818 			ret = hclge_mac_mdio_config(hdev);
11819 
11820 		if (ret)
11821 			goto err_msi_irq_uninit;
11822 	}
11823 
11824 	ret = hclge_init_umv_space(hdev);
11825 	if (ret)
11826 		goto err_mdiobus_unreg;
11827 
11828 	ret = hclge_mac_init(hdev);
11829 	if (ret) {
11830 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11831 		goto err_mdiobus_unreg;
11832 	}
11833 
11834 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11835 	if (ret) {
11836 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11837 		goto err_mdiobus_unreg;
11838 	}
11839 
11840 	ret = hclge_config_gro(hdev);
11841 	if (ret)
11842 		goto err_mdiobus_unreg;
11843 
11844 	ret = hclge_init_vlan_config(hdev);
11845 	if (ret) {
11846 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11847 		goto err_mdiobus_unreg;
11848 	}
11849 
11850 	ret = hclge_tm_schd_init(hdev);
11851 	if (ret) {
11852 		dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
11853 		goto err_mdiobus_unreg;
11854 	}
11855 
11856 	ret = hclge_comm_rss_init_cfg(&hdev->vport->nic, hdev->ae_dev,
11857 				      &hdev->rss_cfg);
11858 	if (ret) {
11859 		dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
11860 		goto err_mdiobus_unreg;
11861 	}
11862 
11863 	ret = hclge_rss_init_hw(hdev);
11864 	if (ret) {
11865 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11866 		goto err_mdiobus_unreg;
11867 	}
11868 
11869 	ret = init_mgr_tbl(hdev);
11870 	if (ret) {
11871 		dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
11872 		goto err_mdiobus_unreg;
11873 	}
11874 
11875 	ret = hclge_init_fd_config(hdev);
11876 	if (ret) {
11877 		dev_err(&pdev->dev,
11878 			"fd table init fail, ret=%d\n", ret);
11879 		goto err_mdiobus_unreg;
11880 	}
11881 
11882 	ret = hclge_ptp_init(hdev);
11883 	if (ret)
11884 		goto err_mdiobus_unreg;
11885 
11886 	ret = hclge_update_port_info(hdev);
11887 	if (ret)
11888 		goto err_ptp_uninit;
11889 
11890 	INIT_KFIFO(hdev->mac_tnl_log);
11891 
11892 	hclge_dcb_ops_set(hdev);
11893 
11894 	timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
11895 	INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
11896 
11897 	hclge_clear_all_event_cause(hdev);
11898 	hclge_clear_resetting_state(hdev);
11899 
11900 	/* Log and clear the hw errors those already occurred */
11901 	if (hnae3_dev_ras_imp_supported(hdev))
11902 		hclge_handle_occurred_error(hdev);
11903 	else
11904 		hclge_handle_all_hns_hw_errors(ae_dev);
11905 
11906 	/* request delayed reset for the error recovery because an immediate
11907 	 * global reset on a PF affecting pending initialization of other PFs
11908 	 */
11909 	if (ae_dev->hw_err_reset_req) {
11910 		enum hnae3_reset_type reset_level;
11911 
11912 		reset_level = hclge_get_reset_level(ae_dev,
11913 						    &ae_dev->hw_err_reset_req);
11914 		hclge_set_def_reset_request(ae_dev, reset_level);
11915 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
11916 	}
11917 
11918 	hclge_init_rxd_adv_layout(hdev);
11919 
11920 	ret = hclge_init_wol(hdev);
11921 	if (ret)
11922 		dev_warn(&pdev->dev,
11923 			 "failed to wake on lan init, ret = %d\n", ret);
11924 
11925 	ret = hclge_devlink_init(hdev);
11926 	if (ret)
11927 		goto err_ptp_uninit;
11928 
11929 	hclge_state_init(hdev);
11930 	hdev->last_reset_time = jiffies;
11931 
11932 	/* Enable MISC vector(vector0) */
11933 	enable_irq(hdev->misc_vector.vector_irq);
11934 	hclge_enable_vector(&hdev->misc_vector, true);
11935 
11936 	dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
11937 		 HCLGE_DRIVER_NAME);
11938 
11939 	hclge_task_schedule(hdev, round_jiffies_relative(HZ));
11940 	return 0;
11941 
11942 err_ptp_uninit:
11943 	hclge_ptp_uninit(hdev);
11944 err_mdiobus_unreg:
11945 	if (hdev->hw.mac.phydev)
11946 		mdiobus_unregister(hdev->hw.mac.mdio_bus);
11947 err_msi_irq_uninit:
11948 	hclge_misc_irq_uninit(hdev);
11949 err_msi_uninit:
11950 	pci_free_irq_vectors(pdev);
11951 err_cmd_uninit:
11952 	hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
11953 err_pci_uninit:
11954 	pcim_iounmap(pdev, hdev->hw.hw.io_base);
11955 	pci_release_regions(pdev);
11956 	pci_disable_device(pdev);
11957 out:
11958 	mutex_destroy(&hdev->vport_lock);
11959 	return ret;
11960 }
11961 
11962 static void hclge_stats_clear(struct hclge_dev *hdev)
11963 {
11964 	memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
11965 	memset(&hdev->fec_stats, 0, sizeof(hdev->fec_stats));
11966 }
11967 
11968 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11969 {
11970 	return hclge_config_switch_param(hdev, vf, enable,
11971 					 HCLGE_SWITCH_ANTI_SPOOF_MASK);
11972 }
11973 
11974 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11975 {
11976 	return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
11977 					  HCLGE_FILTER_FE_NIC_INGRESS_B,
11978 					  enable, vf);
11979 }
11980 
11981 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
11982 {
11983 	int ret;
11984 
11985 	ret = hclge_set_mac_spoofchk(hdev, vf, enable);
11986 	if (ret) {
11987 		dev_err(&hdev->pdev->dev,
11988 			"Set vf %d mac spoof check %s failed, ret=%d\n",
11989 			vf, enable ? "on" : "off", ret);
11990 		return ret;
11991 	}
11992 
11993 	ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
11994 	if (ret)
11995 		dev_err(&hdev->pdev->dev,
11996 			"Set vf %d vlan spoof check %s failed, ret=%d\n",
11997 			vf, enable ? "on" : "off", ret);
11998 
11999 	return ret;
12000 }
12001 
12002 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
12003 				 bool enable)
12004 {
12005 	struct hclge_vport *vport = hclge_get_vport(handle);
12006 	struct hclge_dev *hdev = vport->back;
12007 	u32 new_spoofchk = enable ? 1 : 0;
12008 	int ret;
12009 
12010 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
12011 		return -EOPNOTSUPP;
12012 
12013 	vport = hclge_get_vf_vport(hdev, vf);
12014 	if (!vport)
12015 		return -EINVAL;
12016 
12017 	if (vport->vf_info.spoofchk == new_spoofchk)
12018 		return 0;
12019 
12020 	if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
12021 		dev_warn(&hdev->pdev->dev,
12022 			 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
12023 			 vf);
12024 	else if (enable && hclge_is_umv_space_full(vport, true))
12025 		dev_warn(&hdev->pdev->dev,
12026 			 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
12027 			 vf);
12028 
12029 	ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
12030 	if (ret)
12031 		return ret;
12032 
12033 	vport->vf_info.spoofchk = new_spoofchk;
12034 	return 0;
12035 }
12036 
12037 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
12038 {
12039 	struct hclge_vport *vport = hdev->vport;
12040 	int ret;
12041 	int i;
12042 
12043 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
12044 		return 0;
12045 
12046 	/* resume the vf spoof check state after reset */
12047 	for (i = 0; i < hdev->num_alloc_vport; i++) {
12048 		ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
12049 					       vport->vf_info.spoofchk);
12050 		if (ret)
12051 			return ret;
12052 
12053 		vport++;
12054 	}
12055 
12056 	return 0;
12057 }
12058 
12059 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
12060 {
12061 	struct hclge_vport *vport = hclge_get_vport(handle);
12062 	struct hclge_dev *hdev = vport->back;
12063 	u32 new_trusted = enable ? 1 : 0;
12064 
12065 	vport = hclge_get_vf_vport(hdev, vf);
12066 	if (!vport)
12067 		return -EINVAL;
12068 
12069 	if (vport->vf_info.trusted == new_trusted)
12070 		return 0;
12071 
12072 	vport->vf_info.trusted = new_trusted;
12073 	set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
12074 	hclge_task_schedule(hdev, 0);
12075 
12076 	return 0;
12077 }
12078 
12079 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
12080 {
12081 	int ret;
12082 	int vf;
12083 
12084 	/* reset vf rate to default value */
12085 	for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
12086 		struct hclge_vport *vport = &hdev->vport[vf];
12087 
12088 		vport->vf_info.max_tx_rate = 0;
12089 		ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
12090 		if (ret)
12091 			dev_err(&hdev->pdev->dev,
12092 				"vf%d failed to reset to default, ret=%d\n",
12093 				vf - HCLGE_VF_VPORT_START_NUM, ret);
12094 	}
12095 }
12096 
12097 static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
12098 				     int min_tx_rate, int max_tx_rate)
12099 {
12100 	if (min_tx_rate != 0 ||
12101 	    max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
12102 		dev_err(&hdev->pdev->dev,
12103 			"min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
12104 			min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
12105 		return -EINVAL;
12106 	}
12107 
12108 	return 0;
12109 }
12110 
12111 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
12112 			     int min_tx_rate, int max_tx_rate, bool force)
12113 {
12114 	struct hclge_vport *vport = hclge_get_vport(handle);
12115 	struct hclge_dev *hdev = vport->back;
12116 	int ret;
12117 
12118 	ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate);
12119 	if (ret)
12120 		return ret;
12121 
12122 	vport = hclge_get_vf_vport(hdev, vf);
12123 	if (!vport)
12124 		return -EINVAL;
12125 
12126 	if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
12127 		return 0;
12128 
12129 	ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
12130 	if (ret)
12131 		return ret;
12132 
12133 	vport->vf_info.max_tx_rate = max_tx_rate;
12134 
12135 	return 0;
12136 }
12137 
12138 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
12139 {
12140 	struct hnae3_handle *handle = &hdev->vport->nic;
12141 	struct hclge_vport *vport;
12142 	int ret;
12143 	int vf;
12144 
12145 	/* resume the vf max_tx_rate after reset */
12146 	for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
12147 		vport = hclge_get_vf_vport(hdev, vf);
12148 		if (!vport)
12149 			return -EINVAL;
12150 
12151 		/* zero means max rate, after reset, firmware already set it to
12152 		 * max rate, so just continue.
12153 		 */
12154 		if (!vport->vf_info.max_tx_rate)
12155 			continue;
12156 
12157 		ret = hclge_set_vf_rate(handle, vf, 0,
12158 					vport->vf_info.max_tx_rate, true);
12159 		if (ret) {
12160 			dev_err(&hdev->pdev->dev,
12161 				"vf%d failed to resume tx_rate:%u, ret=%d\n",
12162 				vf, vport->vf_info.max_tx_rate, ret);
12163 			return ret;
12164 		}
12165 	}
12166 
12167 	return 0;
12168 }
12169 
12170 static void hclge_reset_vport_state(struct hclge_dev *hdev)
12171 {
12172 	struct hclge_vport *vport = hdev->vport;
12173 	int i;
12174 
12175 	for (i = 0; i < hdev->num_alloc_vport; i++) {
12176 		clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
12177 		vport++;
12178 	}
12179 }
12180 
12181 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
12182 {
12183 	struct hclge_dev *hdev = ae_dev->priv;
12184 	struct pci_dev *pdev = ae_dev->pdev;
12185 	int ret;
12186 
12187 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
12188 
12189 	hclge_stats_clear(hdev);
12190 	/* NOTE: pf reset needn't to clear or restore pf and vf table entry.
12191 	 * so here should not clean table in memory.
12192 	 */
12193 	if (hdev->reset_type == HNAE3_IMP_RESET ||
12194 	    hdev->reset_type == HNAE3_GLOBAL_RESET) {
12195 		memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
12196 		memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
12197 		bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
12198 		hclge_reset_umv_space(hdev);
12199 	}
12200 
12201 	ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version,
12202 				  true, hdev->reset_pending);
12203 	if (ret) {
12204 		dev_err(&pdev->dev, "Cmd queue init failed\n");
12205 		return ret;
12206 	}
12207 
12208 	ret = hclge_map_tqp(hdev);
12209 	if (ret) {
12210 		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
12211 		return ret;
12212 	}
12213 
12214 	ret = hclge_mac_init(hdev);
12215 	if (ret) {
12216 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
12217 		return ret;
12218 	}
12219 
12220 	ret = hclge_tp_port_init(hdev);
12221 	if (ret) {
12222 		dev_err(&pdev->dev, "failed to init tp port, ret = %d\n",
12223 			ret);
12224 		return ret;
12225 	}
12226 
12227 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
12228 	if (ret) {
12229 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
12230 		return ret;
12231 	}
12232 
12233 	ret = hclge_config_gro(hdev);
12234 	if (ret)
12235 		return ret;
12236 
12237 	ret = hclge_init_vlan_config(hdev);
12238 	if (ret) {
12239 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
12240 		return ret;
12241 	}
12242 
12243 	hclge_reset_tc_config(hdev);
12244 
12245 	ret = hclge_tm_init_hw(hdev, true);
12246 	if (ret) {
12247 		dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
12248 		return ret;
12249 	}
12250 
12251 	ret = hclge_rss_init_hw(hdev);
12252 	if (ret) {
12253 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
12254 		return ret;
12255 	}
12256 
12257 	ret = init_mgr_tbl(hdev);
12258 	if (ret) {
12259 		dev_err(&pdev->dev,
12260 			"failed to reinit manager table, ret = %d\n", ret);
12261 		return ret;
12262 	}
12263 
12264 	ret = hclge_init_fd_config(hdev);
12265 	if (ret) {
12266 		dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
12267 		return ret;
12268 	}
12269 
12270 	ret = hclge_ptp_init(hdev);
12271 	if (ret)
12272 		return ret;
12273 
12274 	/* Log and clear the hw errors those already occurred */
12275 	if (hnae3_dev_ras_imp_supported(hdev))
12276 		hclge_handle_occurred_error(hdev);
12277 	else
12278 		hclge_handle_all_hns_hw_errors(ae_dev);
12279 
12280 	/* Re-enable the hw error interrupts because
12281 	 * the interrupts get disabled on global reset.
12282 	 */
12283 	ret = hclge_config_nic_hw_error(hdev, true);
12284 	if (ret) {
12285 		dev_err(&pdev->dev,
12286 			"fail(%d) to re-enable NIC hw error interrupts\n",
12287 			ret);
12288 		return ret;
12289 	}
12290 
12291 	if (hdev->roce_client) {
12292 		ret = hclge_config_rocee_ras_interrupt(hdev, true);
12293 		if (ret) {
12294 			dev_err(&pdev->dev,
12295 				"fail(%d) to re-enable roce ras interrupts\n",
12296 				ret);
12297 			return ret;
12298 		}
12299 	}
12300 
12301 	hclge_reset_vport_state(hdev);
12302 	ret = hclge_reset_vport_spoofchk(hdev);
12303 	if (ret)
12304 		return ret;
12305 
12306 	ret = hclge_resume_vf_rate(hdev);
12307 	if (ret)
12308 		return ret;
12309 
12310 	hclge_init_rxd_adv_layout(hdev);
12311 
12312 	ret = hclge_update_wol(hdev);
12313 	if (ret)
12314 		dev_warn(&pdev->dev,
12315 			 "failed to update wol config, ret = %d\n", ret);
12316 
12317 	dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
12318 		 HCLGE_DRIVER_NAME);
12319 
12320 	return 0;
12321 }
12322 
12323 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
12324 {
12325 	struct hclge_dev *hdev = ae_dev->priv;
12326 	struct hclge_mac *mac = &hdev->hw.mac;
12327 
12328 	hclge_reset_vf_rate(hdev);
12329 	hclge_clear_vf_vlan(hdev);
12330 	hclge_state_uninit(hdev);
12331 	hclge_ptp_uninit(hdev);
12332 	hclge_uninit_rxd_adv_layout(hdev);
12333 	hclge_uninit_mac_table(hdev);
12334 	hclge_del_all_fd_entries(hdev);
12335 
12336 	if (mac->phydev)
12337 		mdiobus_unregister(mac->mdio_bus);
12338 
12339 	/* Disable MISC vector(vector0) */
12340 	hclge_enable_vector(&hdev->misc_vector, false);
12341 	disable_irq(hdev->misc_vector.vector_irq);
12342 
12343 	/* Disable all hw interrupts */
12344 	hclge_config_mac_tnl_int(hdev, false);
12345 	hclge_config_nic_hw_error(hdev, false);
12346 	hclge_config_rocee_ras_interrupt(hdev, false);
12347 
12348 	hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
12349 	hclge_misc_irq_uninit(hdev);
12350 	hclge_devlink_uninit(hdev);
12351 	hclge_pci_uninit(hdev);
12352 	hclge_uninit_vport_vlan_table(hdev);
12353 	mutex_destroy(&hdev->vport_lock);
12354 	ae_dev->priv = NULL;
12355 }
12356 
12357 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
12358 {
12359 	struct hclge_vport *vport = hclge_get_vport(handle);
12360 	struct hclge_dev *hdev = vport->back;
12361 
12362 	return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps);
12363 }
12364 
12365 static void hclge_get_channels(struct hnae3_handle *handle,
12366 			       struct ethtool_channels *ch)
12367 {
12368 	ch->max_combined = hclge_get_max_channels(handle);
12369 	ch->other_count = 1;
12370 	ch->max_other = 1;
12371 	ch->combined_count = handle->kinfo.rss_size;
12372 }
12373 
12374 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
12375 					u16 *alloc_tqps, u16 *max_rss_size)
12376 {
12377 	struct hclge_vport *vport = hclge_get_vport(handle);
12378 	struct hclge_dev *hdev = vport->back;
12379 
12380 	*alloc_tqps = vport->alloc_tqps;
12381 	*max_rss_size = hdev->pf_rss_size_max;
12382 }
12383 
12384 static int hclge_set_rss_tc_mode_cfg(struct hnae3_handle *handle)
12385 {
12386 	struct hclge_vport *vport = hclge_get_vport(handle);
12387 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
12388 	struct hclge_dev *hdev = vport->back;
12389 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
12390 	u16 tc_valid[HCLGE_MAX_TC_NUM];
12391 	u16 roundup_size;
12392 	unsigned int i;
12393 
12394 	roundup_size = roundup_pow_of_two(vport->nic.kinfo.rss_size);
12395 	roundup_size = ilog2(roundup_size);
12396 	/* Set the RSS TC mode according to the new RSS size */
12397 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
12398 		tc_valid[i] = 0;
12399 
12400 		if (!(hdev->hw_tc_map & BIT(i)))
12401 			continue;
12402 
12403 		tc_valid[i] = 1;
12404 		tc_size[i] = roundup_size;
12405 		tc_offset[i] = vport->nic.kinfo.rss_size * i;
12406 	}
12407 
12408 	return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, tc_valid,
12409 					  tc_size);
12410 }
12411 
12412 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
12413 			      bool rxfh_configured)
12414 {
12415 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
12416 	struct hclge_vport *vport = hclge_get_vport(handle);
12417 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
12418 	struct hclge_dev *hdev = vport->back;
12419 	u16 cur_rss_size = kinfo->rss_size;
12420 	u16 cur_tqps = kinfo->num_tqps;
12421 	u32 *rss_indir;
12422 	unsigned int i;
12423 	int ret;
12424 
12425 	kinfo->req_rss_size = new_tqps_num;
12426 
12427 	ret = hclge_tm_vport_map_update(hdev);
12428 	if (ret) {
12429 		dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
12430 		return ret;
12431 	}
12432 
12433 	ret = hclge_set_rss_tc_mode_cfg(handle);
12434 	if (ret)
12435 		return ret;
12436 
12437 	/* RSS indirection table has been configured by user */
12438 	if (rxfh_configured)
12439 		goto out;
12440 
12441 	/* Reinitializes the rss indirect table according to the new RSS size */
12442 	rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32),
12443 			    GFP_KERNEL);
12444 	if (!rss_indir)
12445 		return -ENOMEM;
12446 
12447 	for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
12448 		rss_indir[i] = i % kinfo->rss_size;
12449 
12450 	ret = hclge_set_rss(handle, rss_indir, NULL, 0);
12451 	if (ret)
12452 		dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
12453 			ret);
12454 
12455 	kfree(rss_indir);
12456 
12457 out:
12458 	if (!ret)
12459 		dev_info(&hdev->pdev->dev,
12460 			 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
12461 			 cur_rss_size, kinfo->rss_size,
12462 			 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
12463 
12464 	return ret;
12465 }
12466 
12467 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
12468 {
12469 	struct hclge_set_led_state_cmd *req;
12470 	struct hclge_desc desc;
12471 	int ret;
12472 
12473 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
12474 
12475 	req = (struct hclge_set_led_state_cmd *)desc.data;
12476 	hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
12477 			HCLGE_LED_LOCATE_STATE_S, locate_led_status);
12478 
12479 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12480 	if (ret)
12481 		dev_err(&hdev->pdev->dev,
12482 			"Send set led state cmd error, ret =%d\n", ret);
12483 
12484 	return ret;
12485 }
12486 
12487 enum hclge_led_status {
12488 	HCLGE_LED_OFF,
12489 	HCLGE_LED_ON,
12490 	HCLGE_LED_NO_CHANGE = 0xFF,
12491 };
12492 
12493 static int hclge_set_led_id(struct hnae3_handle *handle,
12494 			    enum ethtool_phys_id_state status)
12495 {
12496 	struct hclge_vport *vport = hclge_get_vport(handle);
12497 	struct hclge_dev *hdev = vport->back;
12498 
12499 	switch (status) {
12500 	case ETHTOOL_ID_ACTIVE:
12501 		return hclge_set_led_status(hdev, HCLGE_LED_ON);
12502 	case ETHTOOL_ID_INACTIVE:
12503 		return hclge_set_led_status(hdev, HCLGE_LED_OFF);
12504 	default:
12505 		return -EINVAL;
12506 	}
12507 }
12508 
12509 static void hclge_get_link_mode(struct hnae3_handle *handle,
12510 				unsigned long *supported,
12511 				unsigned long *advertising)
12512 {
12513 	unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
12514 	struct hclge_vport *vport = hclge_get_vport(handle);
12515 	struct hclge_dev *hdev = vport->back;
12516 	unsigned int idx = 0;
12517 
12518 	for (; idx < size; idx++) {
12519 		supported[idx] = hdev->hw.mac.supported[idx];
12520 		advertising[idx] = hdev->hw.mac.advertising[idx];
12521 	}
12522 }
12523 
12524 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
12525 {
12526 	struct hclge_vport *vport = hclge_get_vport(handle);
12527 	struct hclge_dev *hdev = vport->back;
12528 	bool gro_en_old = hdev->gro_en;
12529 	int ret;
12530 
12531 	hdev->gro_en = enable;
12532 	ret = hclge_config_gro(hdev);
12533 	if (ret)
12534 		hdev->gro_en = gro_en_old;
12535 
12536 	return ret;
12537 }
12538 
12539 static int hclge_sync_vport_promisc_mode(struct hclge_vport *vport)
12540 {
12541 	struct hnae3_handle *handle = &vport->nic;
12542 	struct hclge_dev *hdev = vport->back;
12543 	bool uc_en = false;
12544 	bool mc_en = false;
12545 	u8 tmp_flags;
12546 	bool bc_en;
12547 	int ret;
12548 
12549 	if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
12550 		set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
12551 		vport->last_promisc_flags = vport->overflow_promisc_flags;
12552 	}
12553 
12554 	if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12555 				&vport->state))
12556 		return 0;
12557 
12558 	/* for PF */
12559 	if (!vport->vport_id) {
12560 		tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
12561 		ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
12562 					     tmp_flags & HNAE3_MPE);
12563 		if (!ret)
12564 			set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
12565 				&vport->state);
12566 		else
12567 			set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12568 				&vport->state);
12569 		return ret;
12570 	}
12571 
12572 	/* for VF */
12573 	if (vport->vf_info.trusted) {
12574 		uc_en = vport->vf_info.request_uc_en > 0 ||
12575 			vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE;
12576 		mc_en = vport->vf_info.request_mc_en > 0 ||
12577 			vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE;
12578 	}
12579 	bc_en = vport->vf_info.request_bc_en > 0;
12580 
12581 	ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en,
12582 					 mc_en, bc_en);
12583 	if (ret) {
12584 		set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
12585 		return ret;
12586 	}
12587 	hclge_set_vport_vlan_fltr_change(vport);
12588 
12589 	return 0;
12590 }
12591 
12592 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
12593 {
12594 	struct hclge_vport *vport;
12595 	int ret;
12596 	u16 i;
12597 
12598 	for (i = 0; i < hdev->num_alloc_vport; i++) {
12599 		vport = &hdev->vport[i];
12600 
12601 		ret = hclge_sync_vport_promisc_mode(vport);
12602 		if (ret)
12603 			return;
12604 	}
12605 }
12606 
12607 static bool hclge_module_existed(struct hclge_dev *hdev)
12608 {
12609 	struct hclge_desc desc;
12610 	u32 existed;
12611 	int ret;
12612 
12613 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
12614 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12615 	if (ret) {
12616 		dev_err(&hdev->pdev->dev,
12617 			"failed to get SFP exist state, ret = %d\n", ret);
12618 		return false;
12619 	}
12620 
12621 	existed = le32_to_cpu(desc.data[0]);
12622 
12623 	return existed != 0;
12624 }
12625 
12626 /* need 6 bds(total 140 bytes) in one reading
12627  * return the number of bytes actually read, 0 means read failed.
12628  */
12629 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
12630 				     u32 len, u8 *data)
12631 {
12632 	struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
12633 	struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
12634 	u16 read_len;
12635 	u16 copy_len;
12636 	int ret;
12637 	int i;
12638 
12639 	/* setup all 6 bds to read module eeprom info. */
12640 	for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12641 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
12642 					   true);
12643 
12644 		/* bd0~bd4 need next flag */
12645 		if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
12646 			desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
12647 	}
12648 
12649 	/* setup bd0, this bd contains offset and read length. */
12650 	sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
12651 	sfp_info_bd0->offset = cpu_to_le16((u16)offset);
12652 	read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
12653 	sfp_info_bd0->read_len = cpu_to_le16(read_len);
12654 
12655 	ret = hclge_cmd_send(&hdev->hw, desc, i);
12656 	if (ret) {
12657 		dev_err(&hdev->pdev->dev,
12658 			"failed to get SFP eeprom info, ret = %d\n", ret);
12659 		return 0;
12660 	}
12661 
12662 	/* copy sfp info from bd0 to out buffer. */
12663 	copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
12664 	memcpy(data, sfp_info_bd0->data, copy_len);
12665 	read_len = copy_len;
12666 
12667 	/* copy sfp info from bd1~bd5 to out buffer if needed. */
12668 	for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12669 		if (read_len >= len)
12670 			return read_len;
12671 
12672 		copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
12673 		memcpy(data + read_len, desc[i].data, copy_len);
12674 		read_len += copy_len;
12675 	}
12676 
12677 	return read_len;
12678 }
12679 
12680 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
12681 				   u32 len, u8 *data)
12682 {
12683 	struct hclge_vport *vport = hclge_get_vport(handle);
12684 	struct hclge_dev *hdev = vport->back;
12685 	u32 read_len = 0;
12686 	u16 data_len;
12687 
12688 	if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
12689 		return -EOPNOTSUPP;
12690 
12691 	if (!hclge_module_existed(hdev))
12692 		return -ENXIO;
12693 
12694 	while (read_len < len) {
12695 		data_len = hclge_get_sfp_eeprom_info(hdev,
12696 						     offset + read_len,
12697 						     len - read_len,
12698 						     data + read_len);
12699 		if (!data_len)
12700 			return -EIO;
12701 
12702 		read_len += data_len;
12703 	}
12704 
12705 	return 0;
12706 }
12707 
12708 static int hclge_get_link_diagnosis_info(struct hnae3_handle *handle,
12709 					 u32 *status_code)
12710 {
12711 	struct hclge_vport *vport = hclge_get_vport(handle);
12712 	struct hclge_dev *hdev = vport->back;
12713 	struct hclge_desc desc;
12714 	int ret;
12715 
12716 	if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2)
12717 		return -EOPNOTSUPP;
12718 
12719 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_DIAGNOSIS, true);
12720 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12721 	if (ret) {
12722 		dev_err(&hdev->pdev->dev,
12723 			"failed to query link diagnosis info, ret = %d\n", ret);
12724 		return ret;
12725 	}
12726 
12727 	*status_code = le32_to_cpu(desc.data[0]);
12728 	return 0;
12729 }
12730 
12731 /* After disable sriov, VF still has some config and info need clean,
12732  * which configed by PF.
12733  */
12734 static void hclge_clear_vport_vf_info(struct hclge_vport *vport, int vfid)
12735 {
12736 	struct hclge_dev *hdev = vport->back;
12737 	struct hclge_vlan_info vlan_info;
12738 	int ret;
12739 
12740 	clear_bit(HCLGE_VPORT_STATE_INITED, &vport->state);
12741 	clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
12742 	vport->need_notify = 0;
12743 	vport->mps = 0;
12744 
12745 	/* after disable sriov, clean VF rate configured by PF */
12746 	ret = hclge_tm_qs_shaper_cfg(vport, 0);
12747 	if (ret)
12748 		dev_err(&hdev->pdev->dev,
12749 			"failed to clean vf%d rate config, ret = %d\n",
12750 			vfid, ret);
12751 
12752 	vlan_info.vlan_tag = 0;
12753 	vlan_info.qos = 0;
12754 	vlan_info.vlan_proto = ETH_P_8021Q;
12755 	ret = hclge_update_port_base_vlan_cfg(vport,
12756 					      HNAE3_PORT_BASE_VLAN_DISABLE,
12757 					      &vlan_info);
12758 	if (ret)
12759 		dev_err(&hdev->pdev->dev,
12760 			"failed to clean vf%d port base vlan, ret = %d\n",
12761 			vfid, ret);
12762 
12763 	ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, false);
12764 	if (ret)
12765 		dev_err(&hdev->pdev->dev,
12766 			"failed to clean vf%d spoof config, ret = %d\n",
12767 			vfid, ret);
12768 
12769 	memset(&vport->vf_info, 0, sizeof(vport->vf_info));
12770 }
12771 
12772 static void hclge_clean_vport_config(struct hnae3_ae_dev *ae_dev, int num_vfs)
12773 {
12774 	struct hclge_dev *hdev = ae_dev->priv;
12775 	struct hclge_vport *vport;
12776 	int i;
12777 
12778 	for (i = 0; i < num_vfs; i++) {
12779 		vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM];
12780 
12781 		hclge_clear_vport_vf_info(vport, i);
12782 	}
12783 }
12784 
12785 static int hclge_get_dscp_prio(struct hnae3_handle *h, u8 dscp, u8 *tc_mode,
12786 			       u8 *priority)
12787 {
12788 	struct hclge_vport *vport = hclge_get_vport(h);
12789 
12790 	if (dscp >= HNAE3_MAX_DSCP)
12791 		return -EINVAL;
12792 
12793 	if (tc_mode)
12794 		*tc_mode = vport->nic.kinfo.tc_map_mode;
12795 	if (priority)
12796 		*priority = vport->nic.kinfo.dscp_prio[dscp] == HNAE3_PRIO_ID_INVALID ? 0 :
12797 			    vport->nic.kinfo.dscp_prio[dscp];
12798 
12799 	return 0;
12800 }
12801 
12802 static const struct hnae3_ae_ops hclge_ops = {
12803 	.init_ae_dev = hclge_init_ae_dev,
12804 	.uninit_ae_dev = hclge_uninit_ae_dev,
12805 	.reset_prepare = hclge_reset_prepare_general,
12806 	.reset_done = hclge_reset_done,
12807 	.init_client_instance = hclge_init_client_instance,
12808 	.uninit_client_instance = hclge_uninit_client_instance,
12809 	.map_ring_to_vector = hclge_map_ring_to_vector,
12810 	.unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
12811 	.get_vector = hclge_get_vector,
12812 	.put_vector = hclge_put_vector,
12813 	.set_promisc_mode = hclge_set_promisc_mode,
12814 	.request_update_promisc_mode = hclge_request_update_promisc_mode,
12815 	.set_loopback = hclge_set_loopback,
12816 	.start = hclge_ae_start,
12817 	.stop = hclge_ae_stop,
12818 	.client_start = hclge_client_start,
12819 	.client_stop = hclge_client_stop,
12820 	.get_status = hclge_get_status,
12821 	.get_ksettings_an_result = hclge_get_ksettings_an_result,
12822 	.cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
12823 	.get_media_type = hclge_get_media_type,
12824 	.check_port_speed = hclge_check_port_speed,
12825 	.get_fec_stats = hclge_get_fec_stats,
12826 	.get_fec = hclge_get_fec,
12827 	.set_fec = hclge_set_fec,
12828 	.get_rss_key_size = hclge_comm_get_rss_key_size,
12829 	.get_rss = hclge_get_rss,
12830 	.set_rss = hclge_set_rss,
12831 	.set_rss_tuple = hclge_set_rss_tuple,
12832 	.get_rss_tuple = hclge_get_rss_tuple,
12833 	.get_tc_size = hclge_get_tc_size,
12834 	.get_mac_addr = hclge_get_mac_addr,
12835 	.set_mac_addr = hclge_set_mac_addr,
12836 	.do_ioctl = hclge_do_ioctl,
12837 	.add_uc_addr = hclge_add_uc_addr,
12838 	.rm_uc_addr = hclge_rm_uc_addr,
12839 	.add_mc_addr = hclge_add_mc_addr,
12840 	.rm_mc_addr = hclge_rm_mc_addr,
12841 	.set_autoneg = hclge_set_autoneg,
12842 	.get_autoneg = hclge_get_autoneg,
12843 	.restart_autoneg = hclge_restart_autoneg,
12844 	.halt_autoneg = hclge_halt_autoneg,
12845 	.get_pauseparam = hclge_get_pauseparam,
12846 	.set_pauseparam = hclge_set_pauseparam,
12847 	.set_mtu = hclge_set_mtu,
12848 	.reset_queue = hclge_reset_tqp,
12849 	.get_stats = hclge_get_stats,
12850 	.get_mac_stats = hclge_get_mac_stat,
12851 	.update_stats = hclge_update_stats,
12852 	.get_strings = hclge_get_strings,
12853 	.get_sset_count = hclge_get_sset_count,
12854 	.get_fw_version = hclge_get_fw_version,
12855 	.get_mdix_mode = hclge_get_mdix_mode,
12856 	.enable_vlan_filter = hclge_enable_vlan_filter,
12857 	.set_vlan_filter = hclge_set_vlan_filter,
12858 	.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
12859 	.enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
12860 	.reset_event = hclge_reset_event,
12861 	.get_reset_level = hclge_get_reset_level,
12862 	.set_default_reset_request = hclge_set_def_reset_request,
12863 	.get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
12864 	.set_channels = hclge_set_channels,
12865 	.get_channels = hclge_get_channels,
12866 	.get_regs_len = hclge_get_regs_len,
12867 	.get_regs = hclge_get_regs,
12868 	.set_led_id = hclge_set_led_id,
12869 	.get_link_mode = hclge_get_link_mode,
12870 	.add_fd_entry = hclge_add_fd_entry,
12871 	.del_fd_entry = hclge_del_fd_entry,
12872 	.get_fd_rule_cnt = hclge_get_fd_rule_cnt,
12873 	.get_fd_rule_info = hclge_get_fd_rule_info,
12874 	.get_fd_all_rules = hclge_get_all_rules,
12875 	.enable_fd = hclge_enable_fd,
12876 	.add_arfs_entry = hclge_add_fd_entry_by_arfs,
12877 	.dbg_read_cmd = hclge_dbg_read_cmd,
12878 	.handle_hw_ras_error = hclge_handle_hw_ras_error,
12879 	.get_hw_reset_stat = hclge_get_hw_reset_stat,
12880 	.ae_dev_resetting = hclge_ae_dev_resetting,
12881 	.ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
12882 	.set_gro_en = hclge_gro_en,
12883 	.get_global_queue_id = hclge_covert_handle_qid_global,
12884 	.set_timer_task = hclge_set_timer_task,
12885 	.mac_connect_phy = hclge_mac_connect_phy,
12886 	.mac_disconnect_phy = hclge_mac_disconnect_phy,
12887 	.get_vf_config = hclge_get_vf_config,
12888 	.set_vf_link_state = hclge_set_vf_link_state,
12889 	.set_vf_spoofchk = hclge_set_vf_spoofchk,
12890 	.set_vf_trust = hclge_set_vf_trust,
12891 	.set_vf_rate = hclge_set_vf_rate,
12892 	.set_vf_mac = hclge_set_vf_mac,
12893 	.get_module_eeprom = hclge_get_module_eeprom,
12894 	.get_cmdq_stat = hclge_get_cmdq_stat,
12895 	.add_cls_flower = hclge_add_cls_flower,
12896 	.del_cls_flower = hclge_del_cls_flower,
12897 	.cls_flower_active = hclge_is_cls_flower_active,
12898 	.get_phy_link_ksettings = hclge_get_phy_link_ksettings,
12899 	.set_phy_link_ksettings = hclge_set_phy_link_ksettings,
12900 	.set_tx_hwts_info = hclge_ptp_set_tx_info,
12901 	.get_rx_hwts = hclge_ptp_get_rx_hwts,
12902 	.get_ts_info = hclge_ptp_get_ts_info,
12903 	.get_link_diagnosis_info = hclge_get_link_diagnosis_info,
12904 	.clean_vf_config = hclge_clean_vport_config,
12905 	.get_dscp_prio = hclge_get_dscp_prio,
12906 	.get_wol = hclge_get_wol,
12907 	.set_wol = hclge_set_wol,
12908 };
12909 
12910 static struct hnae3_ae_algo ae_algo = {
12911 	.ops = &hclge_ops,
12912 	.pdev_id_table = ae_algo_pci_tbl,
12913 };
12914 
12915 static int __init hclge_init(void)
12916 {
12917 	pr_info("%s is initializing\n", HCLGE_NAME);
12918 
12919 	hclge_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGE_NAME);
12920 	if (!hclge_wq) {
12921 		pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
12922 		return -ENOMEM;
12923 	}
12924 
12925 	hnae3_register_ae_algo(&ae_algo);
12926 
12927 	return 0;
12928 }
12929 
12930 static void __exit hclge_exit(void)
12931 {
12932 	hnae3_unregister_ae_algo_prepare(&ae_algo);
12933 	hnae3_unregister_ae_algo(&ae_algo);
12934 	destroy_workqueue(hclge_wq);
12935 }
12936 module_init(hclge_init);
12937 module_exit(hclge_exit);
12938 
12939 MODULE_LICENSE("GPL");
12940 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
12941 MODULE_DESCRIPTION("HCLGE Driver");
12942 MODULE_VERSION(HCLGE_MOD_VERSION);
12943