1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16
17 #include <net/rtnetlink.h>
18
19 #include "hclge_cmd.h"
20 #include "hclge_dcb.h"
21 #include "hclge_main.h"
22 #include "hclge_mbx.h"
23 #include "hclge_mdio.h"
24 #include "hclge_regs.h"
25 #include "hclge_tm.h"
26 #include "hclge_err.h"
27 #include "hnae3.h"
28 #include "hclge_devlink.h"
29 #include "hclge_comm_cmd.h"
30
31 #include "hclge_trace.h"
32
33 #define HCLGE_NAME "hclge"
34
35 #define HCLGE_BUF_SIZE_UNIT 256U
36 #define HCLGE_BUF_MUL_BY 2
37 #define HCLGE_BUF_DIV_BY 2
38 #define NEED_RESERVE_TC_NUM 2
39 #define BUF_MAX_PERCENT 100
40 #define BUF_RESERVE_PERCENT 90
41
42 #define HCLGE_RESET_MAX_FAIL_CNT 5
43 #define HCLGE_RESET_SYNC_TIME 100
44 #define HCLGE_PF_RESET_SYNC_TIME 20
45 #define HCLGE_PF_RESET_SYNC_CNT 1500
46
47 #define HCLGE_LINK_STATUS_MS 10
48
49 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
50 static int hclge_init_vlan_config(struct hclge_dev *hdev);
51 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
52 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
53 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
54 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
55 static int hclge_clear_arfs_rules(struct hclge_dev *hdev);
56 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
57 unsigned long *addr);
58 static int hclge_set_default_loopback(struct hclge_dev *hdev);
59
60 static void hclge_sync_mac_table(struct hclge_dev *hdev);
61 static void hclge_restore_hw_table(struct hclge_dev *hdev);
62 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
63 static void hclge_sync_fd_table(struct hclge_dev *hdev);
64 static void hclge_update_fec_stats(struct hclge_dev *hdev);
65 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret,
66 int wait_cnt);
67 static int hclge_update_port_info(struct hclge_dev *hdev);
68
69 static struct hnae3_ae_algo ae_algo;
70
71 static struct workqueue_struct *hclge_wq;
72
73 static const struct pci_device_id ae_algo_pci_tbl[] = {
74 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
75 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
76 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
77 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
78 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
79 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
80 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
81 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
82 /* required last entry */
83 {0, }
84 };
85
86 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
87
88 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
89 "External Loopback test",
90 "App Loopback test",
91 "Serdes serial Loopback test",
92 "Serdes parallel Loopback test",
93 "Phy Loopback test"
94 };
95
96 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
97 {"mac_tx_mac_pause_num", HCLGE_MAC_STATS_MAX_NUM_V1,
98 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
99 {"mac_rx_mac_pause_num", HCLGE_MAC_STATS_MAX_NUM_V1,
100 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
101 {"mac_tx_pause_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
102 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pause_xoff_time)},
103 {"mac_rx_pause_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
104 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pause_xoff_time)},
105 {"mac_tx_control_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
106 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
107 {"mac_rx_control_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
108 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
109 {"mac_tx_pfc_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
110 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
111 {"mac_tx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
112 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
113 {"mac_tx_pfc_pri1_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
114 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
115 {"mac_tx_pfc_pri2_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
116 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
117 {"mac_tx_pfc_pri3_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
118 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
119 {"mac_tx_pfc_pri4_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
120 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
121 {"mac_tx_pfc_pri5_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
122 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
123 {"mac_tx_pfc_pri6_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
124 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
125 {"mac_tx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
126 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
127 {"mac_tx_pfc_pri0_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
128 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_xoff_time)},
129 {"mac_tx_pfc_pri1_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
130 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_xoff_time)},
131 {"mac_tx_pfc_pri2_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
132 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_xoff_time)},
133 {"mac_tx_pfc_pri3_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
134 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_xoff_time)},
135 {"mac_tx_pfc_pri4_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
136 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_xoff_time)},
137 {"mac_tx_pfc_pri5_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
138 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_xoff_time)},
139 {"mac_tx_pfc_pri6_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
140 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_xoff_time)},
141 {"mac_tx_pfc_pri7_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
142 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_xoff_time)},
143 {"mac_rx_pfc_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
144 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
145 {"mac_rx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
146 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
147 {"mac_rx_pfc_pri1_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
148 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
149 {"mac_rx_pfc_pri2_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
150 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
151 {"mac_rx_pfc_pri3_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
152 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
153 {"mac_rx_pfc_pri4_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
154 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
155 {"mac_rx_pfc_pri5_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
156 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
157 {"mac_rx_pfc_pri6_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
158 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
159 {"mac_rx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
160 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
161 {"mac_rx_pfc_pri0_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
162 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_xoff_time)},
163 {"mac_rx_pfc_pri1_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
164 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_xoff_time)},
165 {"mac_rx_pfc_pri2_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
166 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_xoff_time)},
167 {"mac_rx_pfc_pri3_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
168 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_xoff_time)},
169 {"mac_rx_pfc_pri4_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
170 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_xoff_time)},
171 {"mac_rx_pfc_pri5_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
172 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_xoff_time)},
173 {"mac_rx_pfc_pri6_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
174 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_xoff_time)},
175 {"mac_rx_pfc_pri7_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
176 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_xoff_time)},
177 {"mac_tx_total_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
179 {"mac_tx_total_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
180 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
181 {"mac_tx_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
182 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
183 {"mac_tx_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
184 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
185 {"mac_tx_good_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
186 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
187 {"mac_tx_bad_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
188 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
189 {"mac_tx_uni_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
190 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
191 {"mac_tx_multi_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
192 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
193 {"mac_tx_broad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
194 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
195 {"mac_tx_undersize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
196 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
197 {"mac_tx_oversize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
198 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
199 {"mac_tx_64_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
200 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
201 {"mac_tx_65_127_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
202 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
203 {"mac_tx_128_255_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
204 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
205 {"mac_tx_256_511_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
206 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
207 {"mac_tx_512_1023_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
208 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
209 {"mac_tx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
210 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
211 {"mac_tx_1519_2047_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
212 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
213 {"mac_tx_2048_4095_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
214 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
215 {"mac_tx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
216 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
217 {"mac_tx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
218 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
219 {"mac_tx_9217_12287_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
220 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
221 {"mac_tx_12288_16383_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
222 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
223 {"mac_tx_1519_max_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
224 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
225 {"mac_tx_1519_max_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
226 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
227 {"mac_rx_total_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
228 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
229 {"mac_rx_total_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
230 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
231 {"mac_rx_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
232 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
233 {"mac_rx_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
234 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
235 {"mac_rx_good_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
236 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
237 {"mac_rx_bad_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
238 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
239 {"mac_rx_uni_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
240 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
241 {"mac_rx_multi_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
242 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
243 {"mac_rx_broad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
244 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
245 {"mac_rx_undersize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
246 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
247 {"mac_rx_oversize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
248 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
249 {"mac_rx_64_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
250 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
251 {"mac_rx_65_127_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
252 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
253 {"mac_rx_128_255_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
255 {"mac_rx_256_511_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
257 {"mac_rx_512_1023_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
259 {"mac_rx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
261 {"mac_rx_1519_2047_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
262 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
263 {"mac_rx_2048_4095_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
264 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
265 {"mac_rx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
266 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
267 {"mac_rx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
268 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
269 {"mac_rx_9217_12287_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
270 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
271 {"mac_rx_12288_16383_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
272 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
273 {"mac_rx_1519_max_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
274 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
275 {"mac_rx_1519_max_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
276 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
277
278 {"mac_tx_fragment_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
279 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
280 {"mac_tx_undermin_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
281 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
282 {"mac_tx_jabber_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
283 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
284 {"mac_tx_err_all_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
285 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
286 {"mac_tx_from_app_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
287 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
288 {"mac_tx_from_app_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
289 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
290 {"mac_rx_fragment_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
291 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
292 {"mac_rx_undermin_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
293 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
294 {"mac_rx_jabber_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
295 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
296 {"mac_rx_fcs_err_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
297 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
298 {"mac_rx_send_app_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
299 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
300 {"mac_rx_send_app_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
301 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
302 };
303
304 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
305 {
306 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
307 .ethter_type = cpu_to_le16(ETH_P_LLDP),
308 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
309 .i_port_bitmap = 0x1,
310 },
311 };
312
313 static const struct key_info meta_data_key_info[] = {
314 { PACKET_TYPE_ID, 6 },
315 { IP_FRAGEMENT, 1 },
316 { ROCE_TYPE, 1 },
317 { NEXT_KEY, 5 },
318 { VLAN_NUMBER, 2 },
319 { SRC_VPORT, 12 },
320 { DST_VPORT, 12 },
321 { TUNNEL_PACKET, 1 },
322 };
323
324 static const struct key_info tuple_key_info[] = {
325 { OUTER_DST_MAC, 48, KEY_OPT_MAC, -1, -1 },
326 { OUTER_SRC_MAC, 48, KEY_OPT_MAC, -1, -1 },
327 { OUTER_VLAN_TAG_FST, 16, KEY_OPT_LE16, -1, -1 },
328 { OUTER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
329 { OUTER_ETH_TYPE, 16, KEY_OPT_LE16, -1, -1 },
330 { OUTER_L2_RSV, 16, KEY_OPT_LE16, -1, -1 },
331 { OUTER_IP_TOS, 8, KEY_OPT_U8, -1, -1 },
332 { OUTER_IP_PROTO, 8, KEY_OPT_U8, -1, -1 },
333 { OUTER_SRC_IP, 32, KEY_OPT_IP, -1, -1 },
334 { OUTER_DST_IP, 32, KEY_OPT_IP, -1, -1 },
335 { OUTER_L3_RSV, 16, KEY_OPT_LE16, -1, -1 },
336 { OUTER_SRC_PORT, 16, KEY_OPT_LE16, -1, -1 },
337 { OUTER_DST_PORT, 16, KEY_OPT_LE16, -1, -1 },
338 { OUTER_L4_RSV, 32, KEY_OPT_LE32, -1, -1 },
339 { OUTER_TUN_VNI, 24, KEY_OPT_VNI, -1, -1 },
340 { OUTER_TUN_FLOW_ID, 8, KEY_OPT_U8, -1, -1 },
341 { INNER_DST_MAC, 48, KEY_OPT_MAC,
342 offsetof(struct hclge_fd_rule, tuples.dst_mac),
343 offsetof(struct hclge_fd_rule, tuples_mask.dst_mac) },
344 { INNER_SRC_MAC, 48, KEY_OPT_MAC,
345 offsetof(struct hclge_fd_rule, tuples.src_mac),
346 offsetof(struct hclge_fd_rule, tuples_mask.src_mac) },
347 { INNER_VLAN_TAG_FST, 16, KEY_OPT_LE16,
348 offsetof(struct hclge_fd_rule, tuples.vlan_tag1),
349 offsetof(struct hclge_fd_rule, tuples_mask.vlan_tag1) },
350 { INNER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
351 { INNER_ETH_TYPE, 16, KEY_OPT_LE16,
352 offsetof(struct hclge_fd_rule, tuples.ether_proto),
353 offsetof(struct hclge_fd_rule, tuples_mask.ether_proto) },
354 { INNER_L2_RSV, 16, KEY_OPT_LE16,
355 offsetof(struct hclge_fd_rule, tuples.l2_user_def),
356 offsetof(struct hclge_fd_rule, tuples_mask.l2_user_def) },
357 { INNER_IP_TOS, 8, KEY_OPT_U8,
358 offsetof(struct hclge_fd_rule, tuples.ip_tos),
359 offsetof(struct hclge_fd_rule, tuples_mask.ip_tos) },
360 { INNER_IP_PROTO, 8, KEY_OPT_U8,
361 offsetof(struct hclge_fd_rule, tuples.ip_proto),
362 offsetof(struct hclge_fd_rule, tuples_mask.ip_proto) },
363 { INNER_SRC_IP, 32, KEY_OPT_IP,
364 offsetof(struct hclge_fd_rule, tuples.src_ip),
365 offsetof(struct hclge_fd_rule, tuples_mask.src_ip) },
366 { INNER_DST_IP, 32, KEY_OPT_IP,
367 offsetof(struct hclge_fd_rule, tuples.dst_ip),
368 offsetof(struct hclge_fd_rule, tuples_mask.dst_ip) },
369 { INNER_L3_RSV, 16, KEY_OPT_LE16,
370 offsetof(struct hclge_fd_rule, tuples.l3_user_def),
371 offsetof(struct hclge_fd_rule, tuples_mask.l3_user_def) },
372 { INNER_SRC_PORT, 16, KEY_OPT_LE16,
373 offsetof(struct hclge_fd_rule, tuples.src_port),
374 offsetof(struct hclge_fd_rule, tuples_mask.src_port) },
375 { INNER_DST_PORT, 16, KEY_OPT_LE16,
376 offsetof(struct hclge_fd_rule, tuples.dst_port),
377 offsetof(struct hclge_fd_rule, tuples_mask.dst_port) },
378 { INNER_L4_RSV, 32, KEY_OPT_LE32,
379 offsetof(struct hclge_fd_rule, tuples.l4_user_def),
380 offsetof(struct hclge_fd_rule, tuples_mask.l4_user_def) },
381 };
382
383 /**
384 * hclge_cmd_send - send command to command queue
385 * @hw: pointer to the hw struct
386 * @desc: prefilled descriptor for describing the command
387 * @num : the number of descriptors to be sent
388 *
389 * This is the main send command for command queue, it
390 * sends the queue, cleans the queue, etc
391 **/
hclge_cmd_send(struct hclge_hw * hw,struct hclge_desc * desc,int num)392 int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
393 {
394 return hclge_comm_cmd_send(&hw->hw, desc, num);
395 }
396
hclge_trace_cmd_send(struct hclge_comm_hw * hw,struct hclge_desc * desc,int num,bool is_special)397 static void hclge_trace_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc,
398 int num, bool is_special)
399 {
400 int i;
401
402 trace_hclge_pf_cmd_send(hw, desc, 0, num);
403
404 if (!is_special) {
405 for (i = 1; i < num; i++)
406 trace_hclge_pf_cmd_send(hw, &desc[i], i, num);
407 } else {
408 for (i = 1; i < num; i++)
409 trace_hclge_pf_special_cmd_send(hw, (__le32 *)&desc[i],
410 i, num);
411 }
412 }
413
hclge_trace_cmd_get(struct hclge_comm_hw * hw,struct hclge_desc * desc,int num,bool is_special)414 static void hclge_trace_cmd_get(struct hclge_comm_hw *hw, struct hclge_desc *desc,
415 int num, bool is_special)
416 {
417 int i;
418
419 if (!HCLGE_COMM_SEND_SYNC(le16_to_cpu(desc->flag)))
420 return;
421
422 trace_hclge_pf_cmd_get(hw, desc, 0, num);
423
424 if (!is_special) {
425 for (i = 1; i < num; i++)
426 trace_hclge_pf_cmd_get(hw, &desc[i], i, num);
427 } else {
428 for (i = 1; i < num; i++)
429 trace_hclge_pf_special_cmd_get(hw, (__le32 *)&desc[i],
430 i, num);
431 }
432 }
433
434 static const struct hclge_comm_cmq_ops hclge_cmq_ops = {
435 .trace_cmd_send = hclge_trace_cmd_send,
436 .trace_cmd_get = hclge_trace_cmd_get,
437 };
438
hclge_mac_update_stats_defective(struct hclge_dev * hdev)439 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
440 {
441 #define HCLGE_MAC_CMD_NUM 21
442
443 u64 *data = (u64 *)(&hdev->mac_stats);
444 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
445 __le64 *desc_data;
446 u32 data_size;
447 int ret;
448 u32 i;
449
450 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
451 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
452 if (ret) {
453 dev_err(&hdev->pdev->dev,
454 "Get MAC pkt stats fail, status = %d.\n", ret);
455
456 return ret;
457 }
458
459 /* The first desc has a 64-bit header, so data size need to minus 1 */
460 data_size = sizeof(desc) / (sizeof(u64)) - 1;
461
462 desc_data = (__le64 *)(&desc[0].data[0]);
463 for (i = 0; i < data_size; i++) {
464 /* data memory is continuous becase only the first desc has a
465 * header in this command
466 */
467 *data += le64_to_cpu(*desc_data);
468 data++;
469 desc_data++;
470 }
471
472 return 0;
473 }
474
hclge_mac_update_stats_complete(struct hclge_dev * hdev)475 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev)
476 {
477 #define HCLGE_REG_NUM_PER_DESC 4
478
479 u32 reg_num = hdev->ae_dev->dev_specs.mac_stats_num;
480 u64 *data = (u64 *)(&hdev->mac_stats);
481 struct hclge_desc *desc;
482 __le64 *desc_data;
483 u32 data_size;
484 u32 desc_num;
485 int ret;
486 u32 i;
487
488 /* The first desc has a 64-bit header, so need to consider it */
489 desc_num = reg_num / HCLGE_REG_NUM_PER_DESC + 1;
490
491 /* This may be called inside atomic sections,
492 * so GFP_ATOMIC is more suitalbe here
493 */
494 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
495 if (!desc)
496 return -ENOMEM;
497
498 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
499 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
500 if (ret) {
501 kfree(desc);
502 return ret;
503 }
504
505 data_size = min_t(u32, sizeof(hdev->mac_stats) / sizeof(u64), reg_num);
506
507 desc_data = (__le64 *)(&desc[0].data[0]);
508 for (i = 0; i < data_size; i++) {
509 /* data memory is continuous becase only the first desc has a
510 * header in this command
511 */
512 *data += le64_to_cpu(*desc_data);
513 data++;
514 desc_data++;
515 }
516
517 kfree(desc);
518
519 return 0;
520 }
521
hclge_mac_query_reg_num(struct hclge_dev * hdev,u32 * reg_num)522 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *reg_num)
523 {
524 struct hclge_desc desc;
525 int ret;
526
527 /* Driver needs total register number of both valid registers and
528 * reserved registers, but the old firmware only returns number
529 * of valid registers in device V2. To be compatible with these
530 * devices, driver uses a fixed value.
531 */
532 if (hdev->ae_dev->dev_version == HNAE3_DEVICE_VERSION_V2) {
533 *reg_num = HCLGE_MAC_STATS_MAX_NUM_V1;
534 return 0;
535 }
536
537 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
538 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
539 if (ret) {
540 dev_err(&hdev->pdev->dev,
541 "failed to query mac statistic reg number, ret = %d\n",
542 ret);
543 return ret;
544 }
545
546 *reg_num = le32_to_cpu(desc.data[0]);
547 if (*reg_num == 0) {
548 dev_err(&hdev->pdev->dev,
549 "mac statistic reg number is invalid!\n");
550 return -ENODATA;
551 }
552
553 return 0;
554 }
555
hclge_mac_update_stats(struct hclge_dev * hdev)556 int hclge_mac_update_stats(struct hclge_dev *hdev)
557 {
558 /* The firmware supports the new statistics acquisition method */
559 if (hdev->ae_dev->dev_specs.mac_stats_num)
560 return hclge_mac_update_stats_complete(hdev);
561 else
562 return hclge_mac_update_stats_defective(hdev);
563 }
564
hclge_comm_get_count(struct hclge_dev * hdev,const struct hclge_comm_stats_str strs[],u32 size)565 static int hclge_comm_get_count(struct hclge_dev *hdev,
566 const struct hclge_comm_stats_str strs[],
567 u32 size)
568 {
569 int count = 0;
570 u32 i;
571
572 for (i = 0; i < size; i++)
573 if (strs[i].stats_num <= hdev->ae_dev->dev_specs.mac_stats_num)
574 count++;
575
576 return count;
577 }
578
hclge_comm_get_stats(struct hclge_dev * hdev,const struct hclge_comm_stats_str strs[],int size,u64 * data)579 static u64 *hclge_comm_get_stats(struct hclge_dev *hdev,
580 const struct hclge_comm_stats_str strs[],
581 int size, u64 *data)
582 {
583 u64 *buf = data;
584 u32 i;
585
586 for (i = 0; i < size; i++) {
587 if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num)
588 continue;
589
590 *buf = HCLGE_STATS_READ(&hdev->mac_stats, strs[i].offset);
591 buf++;
592 }
593
594 return buf;
595 }
596
hclge_comm_get_strings(struct hclge_dev * hdev,u32 stringset,const struct hclge_comm_stats_str strs[],int size,u8 * data)597 static u8 *hclge_comm_get_strings(struct hclge_dev *hdev, u32 stringset,
598 const struct hclge_comm_stats_str strs[],
599 int size, u8 *data)
600 {
601 char *buff = (char *)data;
602 u32 i;
603
604 if (stringset != ETH_SS_STATS)
605 return buff;
606
607 for (i = 0; i < size; i++) {
608 if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num)
609 continue;
610
611 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
612 buff = buff + ETH_GSTRING_LEN;
613 }
614
615 return (u8 *)buff;
616 }
617
hclge_update_stats_for_all(struct hclge_dev * hdev)618 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
619 {
620 struct hnae3_handle *handle;
621 int status;
622
623 handle = &hdev->vport[0].nic;
624 if (handle->client) {
625 status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw);
626 if (status) {
627 dev_err(&hdev->pdev->dev,
628 "Update TQPS stats fail, status = %d.\n",
629 status);
630 }
631 }
632
633 hclge_update_fec_stats(hdev);
634
635 status = hclge_mac_update_stats(hdev);
636 if (status)
637 dev_err(&hdev->pdev->dev,
638 "Update MAC stats fail, status = %d.\n", status);
639 }
640
hclge_update_stats(struct hnae3_handle * handle)641 static void hclge_update_stats(struct hnae3_handle *handle)
642 {
643 struct hclge_vport *vport = hclge_get_vport(handle);
644 struct hclge_dev *hdev = vport->back;
645 int status;
646
647 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
648 return;
649
650 status = hclge_mac_update_stats(hdev);
651 if (status)
652 dev_err(&hdev->pdev->dev,
653 "Update MAC stats fail, status = %d.\n",
654 status);
655
656 status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw);
657 if (status)
658 dev_err(&hdev->pdev->dev,
659 "Update TQPS stats fail, status = %d.\n",
660 status);
661
662 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
663 }
664
hclge_get_sset_count(struct hnae3_handle * handle,int stringset)665 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
666 {
667 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK | \
668 HNAE3_SUPPORT_PHY_LOOPBACK | \
669 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK | \
670 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK | \
671 HNAE3_SUPPORT_EXTERNAL_LOOPBACK)
672
673 struct hclge_vport *vport = hclge_get_vport(handle);
674 struct hclge_dev *hdev = vport->back;
675 int count = 0;
676
677 /* Loopback test support rules:
678 * mac: only GE mode support
679 * serdes: all mac mode will support include GE/XGE/LGE/CGE
680 * phy: only support when phy device exist on board
681 */
682 if (stringset == ETH_SS_TEST) {
683 /* clear loopback bit flags at first */
684 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
685 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
686 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
687 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
688 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
689 count += 1;
690 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
691 }
692
693 if (hdev->ae_dev->dev_specs.hilink_version !=
694 HCLGE_HILINK_H60) {
695 count += 1;
696 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
697 }
698
699 count += 1;
700 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
701 count += 1;
702 handle->flags |= HNAE3_SUPPORT_EXTERNAL_LOOPBACK;
703
704 if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
705 hdev->hw.mac.phydev->drv->set_loopback) ||
706 hnae3_dev_phy_imp_supported(hdev)) {
707 count += 1;
708 handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
709 }
710 } else if (stringset == ETH_SS_STATS) {
711 count = hclge_comm_get_count(hdev, g_mac_stats_string,
712 ARRAY_SIZE(g_mac_stats_string)) +
713 hclge_comm_tqps_get_sset_count(handle);
714 }
715
716 return count;
717 }
718
hclge_get_strings(struct hnae3_handle * handle,u32 stringset,u8 * data)719 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
720 u8 *data)
721 {
722 struct hclge_vport *vport = hclge_get_vport(handle);
723 struct hclge_dev *hdev = vport->back;
724 u8 *p = (char *)data;
725 int size;
726
727 if (stringset == ETH_SS_STATS) {
728 size = ARRAY_SIZE(g_mac_stats_string);
729 p = hclge_comm_get_strings(hdev, stringset, g_mac_stats_string,
730 size, p);
731 p = hclge_comm_tqps_get_strings(handle, p);
732 } else if (stringset == ETH_SS_TEST) {
733 if (handle->flags & HNAE3_SUPPORT_EXTERNAL_LOOPBACK) {
734 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_EXTERNAL],
735 ETH_GSTRING_LEN);
736 p += ETH_GSTRING_LEN;
737 }
738 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
739 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
740 ETH_GSTRING_LEN);
741 p += ETH_GSTRING_LEN;
742 }
743 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
744 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
745 ETH_GSTRING_LEN);
746 p += ETH_GSTRING_LEN;
747 }
748 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
749 memcpy(p,
750 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
751 ETH_GSTRING_LEN);
752 p += ETH_GSTRING_LEN;
753 }
754 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
755 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
756 ETH_GSTRING_LEN);
757 p += ETH_GSTRING_LEN;
758 }
759 }
760 }
761
hclge_get_stats(struct hnae3_handle * handle,u64 * data)762 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
763 {
764 struct hclge_vport *vport = hclge_get_vport(handle);
765 struct hclge_dev *hdev = vport->back;
766 u64 *p;
767
768 p = hclge_comm_get_stats(hdev, g_mac_stats_string,
769 ARRAY_SIZE(g_mac_stats_string), data);
770 p = hclge_comm_tqps_get_stats(handle, p);
771 }
772
hclge_get_mac_stat(struct hnae3_handle * handle,struct hns3_mac_stats * mac_stats)773 static void hclge_get_mac_stat(struct hnae3_handle *handle,
774 struct hns3_mac_stats *mac_stats)
775 {
776 struct hclge_vport *vport = hclge_get_vport(handle);
777 struct hclge_dev *hdev = vport->back;
778
779 hclge_update_stats(handle);
780
781 mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
782 mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
783 }
784
hclge_parse_func_status(struct hclge_dev * hdev,struct hclge_func_status_cmd * status)785 static int hclge_parse_func_status(struct hclge_dev *hdev,
786 struct hclge_func_status_cmd *status)
787 {
788 #define HCLGE_MAC_ID_MASK 0xF
789
790 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
791 return -EINVAL;
792
793 /* Set the pf to main pf */
794 if (status->pf_state & HCLGE_PF_STATE_MAIN)
795 hdev->flag |= HCLGE_FLAG_MAIN;
796 else
797 hdev->flag &= ~HCLGE_FLAG_MAIN;
798
799 hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
800 return 0;
801 }
802
hclge_query_function_status(struct hclge_dev * hdev)803 static int hclge_query_function_status(struct hclge_dev *hdev)
804 {
805 #define HCLGE_QUERY_MAX_CNT 5
806
807 struct hclge_func_status_cmd *req;
808 struct hclge_desc desc;
809 int timeout = 0;
810 int ret;
811
812 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
813 req = (struct hclge_func_status_cmd *)desc.data;
814
815 do {
816 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
817 if (ret) {
818 dev_err(&hdev->pdev->dev,
819 "query function status failed %d.\n", ret);
820 return ret;
821 }
822
823 /* Check pf reset is done */
824 if (req->pf_state)
825 break;
826 usleep_range(1000, 2000);
827 } while (timeout++ < HCLGE_QUERY_MAX_CNT);
828
829 return hclge_parse_func_status(hdev, req);
830 }
831
hclge_query_pf_resource(struct hclge_dev * hdev)832 static int hclge_query_pf_resource(struct hclge_dev *hdev)
833 {
834 struct hclge_pf_res_cmd *req;
835 struct hclge_desc desc;
836 int ret;
837
838 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
839 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
840 if (ret) {
841 dev_err(&hdev->pdev->dev,
842 "query pf resource failed %d.\n", ret);
843 return ret;
844 }
845
846 req = (struct hclge_pf_res_cmd *)desc.data;
847 hdev->num_tqps = le16_to_cpu(req->tqp_num) +
848 le16_to_cpu(req->ext_tqp_num);
849 hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
850
851 if (req->tx_buf_size)
852 hdev->tx_buf_size =
853 le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
854 else
855 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
856
857 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
858
859 if (req->dv_buf_size)
860 hdev->dv_buf_size =
861 le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
862 else
863 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
864
865 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
866
867 hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
868 if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
869 dev_err(&hdev->pdev->dev,
870 "only %u msi resources available, not enough for pf(min:2).\n",
871 hdev->num_nic_msi);
872 return -EINVAL;
873 }
874
875 if (hnae3_dev_roce_supported(hdev)) {
876 hdev->num_roce_msi =
877 le16_to_cpu(req->pf_intr_vector_number_roce);
878
879 /* PF should have NIC vectors and Roce vectors,
880 * NIC vectors are queued before Roce vectors.
881 */
882 hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
883 } else {
884 hdev->num_msi = hdev->num_nic_msi;
885 }
886
887 return 0;
888 }
889
hclge_parse_speed(u8 speed_cmd,u32 * speed)890 static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
891 {
892 switch (speed_cmd) {
893 case HCLGE_FW_MAC_SPEED_10M:
894 *speed = HCLGE_MAC_SPEED_10M;
895 break;
896 case HCLGE_FW_MAC_SPEED_100M:
897 *speed = HCLGE_MAC_SPEED_100M;
898 break;
899 case HCLGE_FW_MAC_SPEED_1G:
900 *speed = HCLGE_MAC_SPEED_1G;
901 break;
902 case HCLGE_FW_MAC_SPEED_10G:
903 *speed = HCLGE_MAC_SPEED_10G;
904 break;
905 case HCLGE_FW_MAC_SPEED_25G:
906 *speed = HCLGE_MAC_SPEED_25G;
907 break;
908 case HCLGE_FW_MAC_SPEED_40G:
909 *speed = HCLGE_MAC_SPEED_40G;
910 break;
911 case HCLGE_FW_MAC_SPEED_50G:
912 *speed = HCLGE_MAC_SPEED_50G;
913 break;
914 case HCLGE_FW_MAC_SPEED_100G:
915 *speed = HCLGE_MAC_SPEED_100G;
916 break;
917 case HCLGE_FW_MAC_SPEED_200G:
918 *speed = HCLGE_MAC_SPEED_200G;
919 break;
920 default:
921 return -EINVAL;
922 }
923
924 return 0;
925 }
926
927 static const struct hclge_speed_bit_map speed_bit_map[] = {
928 {HCLGE_MAC_SPEED_10M, HCLGE_SUPPORT_10M_BIT},
929 {HCLGE_MAC_SPEED_100M, HCLGE_SUPPORT_100M_BIT},
930 {HCLGE_MAC_SPEED_1G, HCLGE_SUPPORT_1G_BIT},
931 {HCLGE_MAC_SPEED_10G, HCLGE_SUPPORT_10G_BIT},
932 {HCLGE_MAC_SPEED_25G, HCLGE_SUPPORT_25G_BIT},
933 {HCLGE_MAC_SPEED_40G, HCLGE_SUPPORT_40G_BIT},
934 {HCLGE_MAC_SPEED_50G, HCLGE_SUPPORT_50G_BITS},
935 {HCLGE_MAC_SPEED_100G, HCLGE_SUPPORT_100G_BITS},
936 {HCLGE_MAC_SPEED_200G, HCLGE_SUPPORT_200G_BITS},
937 };
938
hclge_get_speed_bit(u32 speed,u32 * speed_bit)939 static int hclge_get_speed_bit(u32 speed, u32 *speed_bit)
940 {
941 u16 i;
942
943 for (i = 0; i < ARRAY_SIZE(speed_bit_map); i++) {
944 if (speed == speed_bit_map[i].speed) {
945 *speed_bit = speed_bit_map[i].speed_bit;
946 return 0;
947 }
948 }
949
950 return -EINVAL;
951 }
952
hclge_check_port_speed(struct hnae3_handle * handle,u32 speed)953 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
954 {
955 struct hclge_vport *vport = hclge_get_vport(handle);
956 struct hclge_dev *hdev = vport->back;
957 u32 speed_ability = hdev->hw.mac.speed_ability;
958 u32 speed_bit = 0;
959 int ret;
960
961 ret = hclge_get_speed_bit(speed, &speed_bit);
962 if (ret)
963 return ret;
964
965 if (speed_bit & speed_ability)
966 return 0;
967
968 return -EINVAL;
969 }
970
hclge_update_fec_support(struct hclge_mac * mac)971 static void hclge_update_fec_support(struct hclge_mac *mac)
972 {
973 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
974 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
975 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT, mac->supported);
976 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
977
978 if (mac->fec_ability & BIT(HNAE3_FEC_BASER))
979 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
980 mac->supported);
981 if (mac->fec_ability & BIT(HNAE3_FEC_RS))
982 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
983 mac->supported);
984 if (mac->fec_ability & BIT(HNAE3_FEC_LLRS))
985 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
986 mac->supported);
987 if (mac->fec_ability & BIT(HNAE3_FEC_NONE))
988 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
989 mac->supported);
990 }
991
992 static const struct hclge_link_mode_bmap hclge_sr_link_mode_bmap[] = {
993 {HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseSR_Full_BIT},
994 {HCLGE_SUPPORT_25G_BIT, ETHTOOL_LINK_MODE_25000baseSR_Full_BIT},
995 {HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT},
996 {HCLGE_SUPPORT_50G_R2_BIT, ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT},
997 {HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseSR_Full_BIT},
998 {HCLGE_SUPPORT_100G_R4_BIT, ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT},
999 {HCLGE_SUPPORT_100G_R2_BIT, ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT},
1000 {HCLGE_SUPPORT_200G_R4_EXT_BIT,
1001 ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT},
1002 {HCLGE_SUPPORT_200G_R4_BIT, ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT},
1003 };
1004
1005 static const struct hclge_link_mode_bmap hclge_lr_link_mode_bmap[] = {
1006 {HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseLR_Full_BIT},
1007 {HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT},
1008 {HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT},
1009 {HCLGE_SUPPORT_100G_R4_BIT,
1010 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT},
1011 {HCLGE_SUPPORT_100G_R2_BIT,
1012 ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT},
1013 {HCLGE_SUPPORT_200G_R4_EXT_BIT,
1014 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT},
1015 {HCLGE_SUPPORT_200G_R4_BIT,
1016 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT},
1017 };
1018
1019 static const struct hclge_link_mode_bmap hclge_cr_link_mode_bmap[] = {
1020 {HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseCR_Full_BIT},
1021 {HCLGE_SUPPORT_25G_BIT, ETHTOOL_LINK_MODE_25000baseCR_Full_BIT},
1022 {HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT},
1023 {HCLGE_SUPPORT_50G_R2_BIT, ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT},
1024 {HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseCR_Full_BIT},
1025 {HCLGE_SUPPORT_100G_R4_BIT, ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT},
1026 {HCLGE_SUPPORT_100G_R2_BIT, ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT},
1027 {HCLGE_SUPPORT_200G_R4_EXT_BIT,
1028 ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT},
1029 {HCLGE_SUPPORT_200G_R4_BIT, ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT},
1030 };
1031
1032 static const struct hclge_link_mode_bmap hclge_kr_link_mode_bmap[] = {
1033 {HCLGE_SUPPORT_1G_BIT, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT},
1034 {HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT},
1035 {HCLGE_SUPPORT_25G_BIT, ETHTOOL_LINK_MODE_25000baseKR_Full_BIT},
1036 {HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT},
1037 {HCLGE_SUPPORT_50G_R2_BIT, ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT},
1038 {HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseKR_Full_BIT},
1039 {HCLGE_SUPPORT_100G_R4_BIT, ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT},
1040 {HCLGE_SUPPORT_100G_R2_BIT, ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT},
1041 {HCLGE_SUPPORT_200G_R4_EXT_BIT,
1042 ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT},
1043 {HCLGE_SUPPORT_200G_R4_BIT, ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT},
1044 };
1045
hclge_convert_setting_sr(u16 speed_ability,unsigned long * link_mode)1046 static void hclge_convert_setting_sr(u16 speed_ability,
1047 unsigned long *link_mode)
1048 {
1049 int i;
1050
1051 for (i = 0; i < ARRAY_SIZE(hclge_sr_link_mode_bmap); i++) {
1052 if (speed_ability & hclge_sr_link_mode_bmap[i].support_bit)
1053 linkmode_set_bit(hclge_sr_link_mode_bmap[i].link_mode,
1054 link_mode);
1055 }
1056 }
1057
hclge_convert_setting_lr(u16 speed_ability,unsigned long * link_mode)1058 static void hclge_convert_setting_lr(u16 speed_ability,
1059 unsigned long *link_mode)
1060 {
1061 int i;
1062
1063 for (i = 0; i < ARRAY_SIZE(hclge_lr_link_mode_bmap); i++) {
1064 if (speed_ability & hclge_lr_link_mode_bmap[i].support_bit)
1065 linkmode_set_bit(hclge_lr_link_mode_bmap[i].link_mode,
1066 link_mode);
1067 }
1068 }
1069
hclge_convert_setting_cr(u16 speed_ability,unsigned long * link_mode)1070 static void hclge_convert_setting_cr(u16 speed_ability,
1071 unsigned long *link_mode)
1072 {
1073 int i;
1074
1075 for (i = 0; i < ARRAY_SIZE(hclge_cr_link_mode_bmap); i++) {
1076 if (speed_ability & hclge_cr_link_mode_bmap[i].support_bit)
1077 linkmode_set_bit(hclge_cr_link_mode_bmap[i].link_mode,
1078 link_mode);
1079 }
1080 }
1081
hclge_convert_setting_kr(u16 speed_ability,unsigned long * link_mode)1082 static void hclge_convert_setting_kr(u16 speed_ability,
1083 unsigned long *link_mode)
1084 {
1085 int i;
1086
1087 for (i = 0; i < ARRAY_SIZE(hclge_kr_link_mode_bmap); i++) {
1088 if (speed_ability & hclge_kr_link_mode_bmap[i].support_bit)
1089 linkmode_set_bit(hclge_kr_link_mode_bmap[i].link_mode,
1090 link_mode);
1091 }
1092 }
1093
hclge_convert_setting_fec(struct hclge_mac * mac)1094 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1095 {
1096 /* If firmware has reported fec_ability, don't need to convert by speed */
1097 if (mac->fec_ability)
1098 goto out;
1099
1100 switch (mac->speed) {
1101 case HCLGE_MAC_SPEED_10G:
1102 case HCLGE_MAC_SPEED_40G:
1103 mac->fec_ability = BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO) |
1104 BIT(HNAE3_FEC_NONE);
1105 break;
1106 case HCLGE_MAC_SPEED_25G:
1107 case HCLGE_MAC_SPEED_50G:
1108 mac->fec_ability = BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1109 BIT(HNAE3_FEC_AUTO) | BIT(HNAE3_FEC_NONE);
1110 break;
1111 case HCLGE_MAC_SPEED_100G:
1112 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO) |
1113 BIT(HNAE3_FEC_NONE);
1114 break;
1115 case HCLGE_MAC_SPEED_200G:
1116 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO) |
1117 BIT(HNAE3_FEC_LLRS);
1118 break;
1119 default:
1120 mac->fec_ability = 0;
1121 break;
1122 }
1123
1124 out:
1125 hclge_update_fec_support(mac);
1126 }
1127
hclge_parse_fiber_link_mode(struct hclge_dev * hdev,u16 speed_ability)1128 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1129 u16 speed_ability)
1130 {
1131 struct hclge_mac *mac = &hdev->hw.mac;
1132
1133 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1134 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1135 mac->supported);
1136
1137 hclge_convert_setting_sr(speed_ability, mac->supported);
1138 hclge_convert_setting_lr(speed_ability, mac->supported);
1139 hclge_convert_setting_cr(speed_ability, mac->supported);
1140 if (hnae3_dev_fec_supported(hdev))
1141 hclge_convert_setting_fec(mac);
1142
1143 if (hnae3_dev_pause_supported(hdev))
1144 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1145
1146 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1147 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1148 }
1149
hclge_parse_backplane_link_mode(struct hclge_dev * hdev,u16 speed_ability)1150 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1151 u16 speed_ability)
1152 {
1153 struct hclge_mac *mac = &hdev->hw.mac;
1154
1155 hclge_convert_setting_kr(speed_ability, mac->supported);
1156 if (hnae3_dev_fec_supported(hdev))
1157 hclge_convert_setting_fec(mac);
1158
1159 if (hnae3_dev_pause_supported(hdev))
1160 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1161
1162 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1163 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1164 }
1165
hclge_parse_copper_link_mode(struct hclge_dev * hdev,u16 speed_ability)1166 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1167 u16 speed_ability)
1168 {
1169 unsigned long *supported = hdev->hw.mac.supported;
1170
1171 /* default to support all speed for GE port */
1172 if (!speed_ability)
1173 speed_ability = HCLGE_SUPPORT_GE;
1174
1175 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1176 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1177 supported);
1178
1179 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1180 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1181 supported);
1182 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1183 supported);
1184 }
1185
1186 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1187 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1188 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1189 }
1190
1191 if (hnae3_dev_pause_supported(hdev)) {
1192 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1193 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1194 }
1195
1196 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1197 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1198 }
1199
hclge_parse_link_mode(struct hclge_dev * hdev,u16 speed_ability)1200 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1201 {
1202 u8 media_type = hdev->hw.mac.media_type;
1203
1204 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1205 hclge_parse_fiber_link_mode(hdev, speed_ability);
1206 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1207 hclge_parse_copper_link_mode(hdev, speed_ability);
1208 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1209 hclge_parse_backplane_link_mode(hdev, speed_ability);
1210 }
1211
hclge_get_max_speed(u16 speed_ability)1212 static u32 hclge_get_max_speed(u16 speed_ability)
1213 {
1214 if (speed_ability & HCLGE_SUPPORT_200G_BITS)
1215 return HCLGE_MAC_SPEED_200G;
1216
1217 if (speed_ability & HCLGE_SUPPORT_100G_BITS)
1218 return HCLGE_MAC_SPEED_100G;
1219
1220 if (speed_ability & HCLGE_SUPPORT_50G_BITS)
1221 return HCLGE_MAC_SPEED_50G;
1222
1223 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1224 return HCLGE_MAC_SPEED_40G;
1225
1226 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1227 return HCLGE_MAC_SPEED_25G;
1228
1229 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1230 return HCLGE_MAC_SPEED_10G;
1231
1232 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1233 return HCLGE_MAC_SPEED_1G;
1234
1235 if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1236 return HCLGE_MAC_SPEED_100M;
1237
1238 if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1239 return HCLGE_MAC_SPEED_10M;
1240
1241 return HCLGE_MAC_SPEED_1G;
1242 }
1243
hclge_parse_cfg(struct hclge_cfg * cfg,struct hclge_desc * desc)1244 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1245 {
1246 #define HCLGE_TX_SPARE_SIZE_UNIT 4096
1247 #define SPEED_ABILITY_EXT_SHIFT 8
1248
1249 struct hclge_cfg_param_cmd *req;
1250 u64 mac_addr_tmp_high;
1251 u16 speed_ability_ext;
1252 u64 mac_addr_tmp;
1253 unsigned int i;
1254
1255 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1256
1257 /* get the configuration */
1258 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1259 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1260 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1261 HCLGE_CFG_TQP_DESC_N_M,
1262 HCLGE_CFG_TQP_DESC_N_S);
1263
1264 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1265 HCLGE_CFG_PHY_ADDR_M,
1266 HCLGE_CFG_PHY_ADDR_S);
1267 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1268 HCLGE_CFG_MEDIA_TP_M,
1269 HCLGE_CFG_MEDIA_TP_S);
1270 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1271 HCLGE_CFG_RX_BUF_LEN_M,
1272 HCLGE_CFG_RX_BUF_LEN_S);
1273 /* get mac_address */
1274 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1275 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1276 HCLGE_CFG_MAC_ADDR_H_M,
1277 HCLGE_CFG_MAC_ADDR_H_S);
1278
1279 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1280
1281 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1282 HCLGE_CFG_DEFAULT_SPEED_M,
1283 HCLGE_CFG_DEFAULT_SPEED_S);
1284 cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1285 HCLGE_CFG_RSS_SIZE_M,
1286 HCLGE_CFG_RSS_SIZE_S);
1287
1288 for (i = 0; i < ETH_ALEN; i++)
1289 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1290
1291 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1292 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1293
1294 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1295 HCLGE_CFG_SPEED_ABILITY_M,
1296 HCLGE_CFG_SPEED_ABILITY_S);
1297 speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1298 HCLGE_CFG_SPEED_ABILITY_EXT_M,
1299 HCLGE_CFG_SPEED_ABILITY_EXT_S);
1300 cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1301
1302 cfg->vlan_fliter_cap = hnae3_get_field(__le32_to_cpu(req->param[1]),
1303 HCLGE_CFG_VLAN_FLTR_CAP_M,
1304 HCLGE_CFG_VLAN_FLTR_CAP_S);
1305
1306 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1307 HCLGE_CFG_UMV_TBL_SPACE_M,
1308 HCLGE_CFG_UMV_TBL_SPACE_S);
1309
1310 cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
1311 HCLGE_CFG_PF_RSS_SIZE_M,
1312 HCLGE_CFG_PF_RSS_SIZE_S);
1313
1314 /* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a
1315 * power of 2, instead of reading out directly. This would
1316 * be more flexible for future changes and expansions.
1317 * When VF max rss size field is HCLGE_CFG_RSS_SIZE_S,
1318 * it does not make sense if PF's field is 0. In this case, PF and VF
1319 * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S.
1320 */
1321 cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
1322 1U << cfg->pf_rss_size_max :
1323 cfg->vf_rss_size_max;
1324
1325 /* The unit of the tx spare buffer size queried from configuration
1326 * file is HCLGE_TX_SPARE_SIZE_UNIT(4096) bytes, so a conversion is
1327 * needed here.
1328 */
1329 cfg->tx_spare_buf_size = hnae3_get_field(__le32_to_cpu(req->param[2]),
1330 HCLGE_CFG_TX_SPARE_BUF_SIZE_M,
1331 HCLGE_CFG_TX_SPARE_BUF_SIZE_S);
1332 cfg->tx_spare_buf_size *= HCLGE_TX_SPARE_SIZE_UNIT;
1333 }
1334
1335 /* hclge_get_cfg: query the static parameter from flash
1336 * @hdev: pointer to struct hclge_dev
1337 * @hcfg: the config structure to be getted
1338 */
hclge_get_cfg(struct hclge_dev * hdev,struct hclge_cfg * hcfg)1339 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1340 {
1341 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1342 struct hclge_cfg_param_cmd *req;
1343 unsigned int i;
1344 int ret;
1345
1346 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1347 u32 offset = 0;
1348
1349 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1350 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1351 true);
1352 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1353 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1354 /* Len should be united by 4 bytes when send to hardware */
1355 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1356 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1357 req->offset = cpu_to_le32(offset);
1358 }
1359
1360 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1361 if (ret) {
1362 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1363 return ret;
1364 }
1365
1366 hclge_parse_cfg(hcfg, desc);
1367
1368 return 0;
1369 }
1370
hclge_set_default_dev_specs(struct hclge_dev * hdev)1371 static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1372 {
1373 #define HCLGE_MAX_NON_TSO_BD_NUM 8U
1374
1375 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1376
1377 ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1378 ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1379 ae_dev->dev_specs.rss_key_size = HCLGE_COMM_RSS_KEY_SIZE;
1380 ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1381 ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
1382 ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
1383 ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
1384 ae_dev->dev_specs.umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1385 ae_dev->dev_specs.tnl_num = 0;
1386 }
1387
hclge_parse_dev_specs(struct hclge_dev * hdev,struct hclge_desc * desc)1388 static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1389 struct hclge_desc *desc)
1390 {
1391 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1392 struct hclge_dev_specs_0_cmd *req0;
1393 struct hclge_dev_specs_1_cmd *req1;
1394
1395 req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1396 req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
1397
1398 ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1399 ae_dev->dev_specs.rss_ind_tbl_size =
1400 le16_to_cpu(req0->rss_ind_tbl_size);
1401 ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
1402 ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1403 ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1404 ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
1405 ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
1406 ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
1407 ae_dev->dev_specs.umv_size = le16_to_cpu(req1->umv_size);
1408 ae_dev->dev_specs.mc_mac_size = le16_to_cpu(req1->mc_mac_size);
1409 ae_dev->dev_specs.tnl_num = req1->tnl_num;
1410 ae_dev->dev_specs.hilink_version = req1->hilink_version;
1411 }
1412
hclge_check_dev_specs(struct hclge_dev * hdev)1413 static void hclge_check_dev_specs(struct hclge_dev *hdev)
1414 {
1415 struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1416
1417 if (!dev_specs->max_non_tso_bd_num)
1418 dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1419 if (!dev_specs->rss_ind_tbl_size)
1420 dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1421 if (!dev_specs->rss_key_size)
1422 dev_specs->rss_key_size = HCLGE_COMM_RSS_KEY_SIZE;
1423 if (!dev_specs->max_tm_rate)
1424 dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1425 if (!dev_specs->max_qset_num)
1426 dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM;
1427 if (!dev_specs->max_int_gl)
1428 dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
1429 if (!dev_specs->max_frm_size)
1430 dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
1431 if (!dev_specs->umv_size)
1432 dev_specs->umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1433 }
1434
hclge_query_mac_stats_num(struct hclge_dev * hdev)1435 static int hclge_query_mac_stats_num(struct hclge_dev *hdev)
1436 {
1437 u32 reg_num = 0;
1438 int ret;
1439
1440 ret = hclge_mac_query_reg_num(hdev, ®_num);
1441 if (ret && ret != -EOPNOTSUPP)
1442 return ret;
1443
1444 hdev->ae_dev->dev_specs.mac_stats_num = reg_num;
1445 return 0;
1446 }
1447
hclge_query_dev_specs(struct hclge_dev * hdev)1448 static int hclge_query_dev_specs(struct hclge_dev *hdev)
1449 {
1450 struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1451 int ret;
1452 int i;
1453
1454 ret = hclge_query_mac_stats_num(hdev);
1455 if (ret)
1456 return ret;
1457
1458 /* set default specifications as devices lower than version V3 do not
1459 * support querying specifications from firmware.
1460 */
1461 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1462 hclge_set_default_dev_specs(hdev);
1463 return 0;
1464 }
1465
1466 for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1467 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1468 true);
1469 desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
1470 }
1471 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1472
1473 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1474 if (ret)
1475 return ret;
1476
1477 hclge_parse_dev_specs(hdev, desc);
1478 hclge_check_dev_specs(hdev);
1479
1480 return 0;
1481 }
1482
hclge_get_cap(struct hclge_dev * hdev)1483 static int hclge_get_cap(struct hclge_dev *hdev)
1484 {
1485 int ret;
1486
1487 ret = hclge_query_function_status(hdev);
1488 if (ret) {
1489 dev_err(&hdev->pdev->dev,
1490 "query function status error %d.\n", ret);
1491 return ret;
1492 }
1493
1494 /* get pf resource */
1495 return hclge_query_pf_resource(hdev);
1496 }
1497
hclge_init_kdump_kernel_config(struct hclge_dev * hdev)1498 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1499 {
1500 #define HCLGE_MIN_TX_DESC 64
1501 #define HCLGE_MIN_RX_DESC 64
1502
1503 if (!is_kdump_kernel())
1504 return;
1505
1506 dev_info(&hdev->pdev->dev,
1507 "Running kdump kernel. Using minimal resources\n");
1508
1509 /* minimal queue pairs equals to the number of vports */
1510 hdev->num_tqps = hdev->num_req_vfs + 1;
1511 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1512 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1513 }
1514
hclge_init_tc_config(struct hclge_dev * hdev)1515 static void hclge_init_tc_config(struct hclge_dev *hdev)
1516 {
1517 unsigned int i;
1518
1519 if (hdev->tc_max > HNAE3_MAX_TC ||
1520 hdev->tc_max < 1) {
1521 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1522 hdev->tc_max);
1523 hdev->tc_max = 1;
1524 }
1525
1526 /* Dev does not support DCB */
1527 if (!hnae3_dev_dcb_supported(hdev)) {
1528 hdev->tc_max = 1;
1529 hdev->pfc_max = 0;
1530 } else {
1531 hdev->pfc_max = hdev->tc_max;
1532 }
1533
1534 hdev->tm_info.num_tc = 1;
1535
1536 /* Currently not support uncontiuous tc */
1537 for (i = 0; i < hdev->tm_info.num_tc; i++)
1538 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1539
1540 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1541 }
1542
hclge_configure(struct hclge_dev * hdev)1543 static int hclge_configure(struct hclge_dev *hdev)
1544 {
1545 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1546 struct hclge_cfg cfg;
1547 int ret;
1548
1549 ret = hclge_get_cfg(hdev, &cfg);
1550 if (ret)
1551 return ret;
1552
1553 hdev->base_tqp_pid = 0;
1554 hdev->vf_rss_size_max = cfg.vf_rss_size_max;
1555 hdev->pf_rss_size_max = cfg.pf_rss_size_max;
1556 hdev->rx_buf_len = cfg.rx_buf_len;
1557 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1558 hdev->hw.mac.media_type = cfg.media_type;
1559 hdev->hw.mac.phy_addr = cfg.phy_addr;
1560 hdev->num_tx_desc = cfg.tqp_desc_num;
1561 hdev->num_rx_desc = cfg.tqp_desc_num;
1562 hdev->tm_info.num_pg = 1;
1563 hdev->tc_max = cfg.tc_num;
1564 hdev->tm_info.hw_pfc_map = 0;
1565 if (cfg.umv_space)
1566 hdev->wanted_umv_size = cfg.umv_space;
1567 else
1568 hdev->wanted_umv_size = hdev->ae_dev->dev_specs.umv_size;
1569 hdev->tx_spare_buf_size = cfg.tx_spare_buf_size;
1570 hdev->gro_en = true;
1571 if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF)
1572 set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
1573
1574 if (hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
1575 hdev->fd_en = true;
1576 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1577 }
1578
1579 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1580 if (ret) {
1581 dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1582 cfg.default_speed, ret);
1583 return ret;
1584 }
1585 hdev->hw.mac.req_speed = hdev->hw.mac.speed;
1586 hdev->hw.mac.req_autoneg = AUTONEG_ENABLE;
1587 hdev->hw.mac.req_duplex = DUPLEX_FULL;
1588
1589 hclge_parse_link_mode(hdev, cfg.speed_ability);
1590
1591 hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1592
1593 hclge_init_tc_config(hdev);
1594 hclge_init_kdump_kernel_config(hdev);
1595
1596 return ret;
1597 }
1598
hclge_config_tso(struct hclge_dev * hdev,u16 tso_mss_min,u16 tso_mss_max)1599 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1600 u16 tso_mss_max)
1601 {
1602 struct hclge_cfg_tso_status_cmd *req;
1603 struct hclge_desc desc;
1604
1605 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1606
1607 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1608 req->tso_mss_min = cpu_to_le16(tso_mss_min);
1609 req->tso_mss_max = cpu_to_le16(tso_mss_max);
1610
1611 return hclge_cmd_send(&hdev->hw, &desc, 1);
1612 }
1613
hclge_config_gro(struct hclge_dev * hdev)1614 static int hclge_config_gro(struct hclge_dev *hdev)
1615 {
1616 struct hclge_cfg_gro_status_cmd *req;
1617 struct hclge_desc desc;
1618 int ret;
1619
1620 if (!hnae3_ae_dev_gro_supported(hdev->ae_dev))
1621 return 0;
1622
1623 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1624 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1625
1626 req->gro_en = hdev->gro_en ? 1 : 0;
1627
1628 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1629 if (ret)
1630 dev_err(&hdev->pdev->dev,
1631 "GRO hardware config cmd failed, ret = %d\n", ret);
1632
1633 return ret;
1634 }
1635
hclge_alloc_tqps(struct hclge_dev * hdev)1636 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1637 {
1638 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1639 struct hclge_comm_tqp *tqp;
1640 int i;
1641
1642 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1643 sizeof(struct hclge_comm_tqp), GFP_KERNEL);
1644 if (!hdev->htqp)
1645 return -ENOMEM;
1646
1647 tqp = hdev->htqp;
1648
1649 for (i = 0; i < hdev->num_tqps; i++) {
1650 tqp->dev = &hdev->pdev->dev;
1651 tqp->index = i;
1652
1653 tqp->q.ae_algo = &ae_algo;
1654 tqp->q.buf_size = hdev->rx_buf_len;
1655 tqp->q.tx_desc_num = hdev->num_tx_desc;
1656 tqp->q.rx_desc_num = hdev->num_rx_desc;
1657
1658 /* need an extended offset to configure queues >=
1659 * HCLGE_TQP_MAX_SIZE_DEV_V2
1660 */
1661 if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
1662 tqp->q.io_base = hdev->hw.hw.io_base +
1663 HCLGE_TQP_REG_OFFSET +
1664 i * HCLGE_TQP_REG_SIZE;
1665 else
1666 tqp->q.io_base = hdev->hw.hw.io_base +
1667 HCLGE_TQP_REG_OFFSET +
1668 HCLGE_TQP_EXT_REG_OFFSET +
1669 (i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
1670 HCLGE_TQP_REG_SIZE;
1671
1672 /* when device supports tx push and has device memory,
1673 * the queue can execute push mode or doorbell mode on
1674 * device memory.
1675 */
1676 if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps))
1677 tqp->q.mem_base = hdev->hw.hw.mem_base +
1678 HCLGE_TQP_MEM_OFFSET(hdev, i);
1679
1680 tqp++;
1681 }
1682
1683 return 0;
1684 }
1685
hclge_map_tqps_to_func(struct hclge_dev * hdev,u16 func_id,u16 tqp_pid,u16 tqp_vid,bool is_pf)1686 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1687 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1688 {
1689 struct hclge_tqp_map_cmd *req;
1690 struct hclge_desc desc;
1691 int ret;
1692
1693 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1694
1695 req = (struct hclge_tqp_map_cmd *)desc.data;
1696 req->tqp_id = cpu_to_le16(tqp_pid);
1697 req->tqp_vf = func_id;
1698 req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1699 if (!is_pf)
1700 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1701 req->tqp_vid = cpu_to_le16(tqp_vid);
1702
1703 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1704 if (ret)
1705 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1706
1707 return ret;
1708 }
1709
hclge_assign_tqp(struct hclge_vport * vport,u16 num_tqps)1710 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1711 {
1712 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1713 struct hclge_dev *hdev = vport->back;
1714 int i, alloced;
1715
1716 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1717 alloced < num_tqps; i++) {
1718 if (!hdev->htqp[i].alloced) {
1719 hdev->htqp[i].q.handle = &vport->nic;
1720 hdev->htqp[i].q.tqp_index = alloced;
1721 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1722 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1723 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1724 hdev->htqp[i].alloced = true;
1725 alloced++;
1726 }
1727 }
1728 vport->alloc_tqps = alloced;
1729 kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max,
1730 vport->alloc_tqps / hdev->tm_info.num_tc);
1731
1732 /* ensure one to one mapping between irq and queue at default */
1733 kinfo->rss_size = min_t(u16, kinfo->rss_size,
1734 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1735
1736 return 0;
1737 }
1738
hclge_knic_setup(struct hclge_vport * vport,u16 num_tqps,u16 num_tx_desc,u16 num_rx_desc)1739 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1740 u16 num_tx_desc, u16 num_rx_desc)
1741
1742 {
1743 struct hnae3_handle *nic = &vport->nic;
1744 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1745 struct hclge_dev *hdev = vport->back;
1746 int ret;
1747
1748 kinfo->num_tx_desc = num_tx_desc;
1749 kinfo->num_rx_desc = num_rx_desc;
1750
1751 kinfo->rx_buf_len = hdev->rx_buf_len;
1752 kinfo->tx_spare_buf_size = hdev->tx_spare_buf_size;
1753
1754 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1755 sizeof(struct hnae3_queue *), GFP_KERNEL);
1756 if (!kinfo->tqp)
1757 return -ENOMEM;
1758
1759 ret = hclge_assign_tqp(vport, num_tqps);
1760 if (ret)
1761 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1762
1763 return ret;
1764 }
1765
hclge_map_tqp_to_vport(struct hclge_dev * hdev,struct hclge_vport * vport)1766 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1767 struct hclge_vport *vport)
1768 {
1769 struct hnae3_handle *nic = &vport->nic;
1770 struct hnae3_knic_private_info *kinfo;
1771 u16 i;
1772
1773 kinfo = &nic->kinfo;
1774 for (i = 0; i < vport->alloc_tqps; i++) {
1775 struct hclge_comm_tqp *q =
1776 container_of(kinfo->tqp[i], struct hclge_comm_tqp, q);
1777 bool is_pf;
1778 int ret;
1779
1780 is_pf = !(vport->vport_id);
1781 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1782 i, is_pf);
1783 if (ret)
1784 return ret;
1785 }
1786
1787 return 0;
1788 }
1789
hclge_map_tqp(struct hclge_dev * hdev)1790 static int hclge_map_tqp(struct hclge_dev *hdev)
1791 {
1792 struct hclge_vport *vport = hdev->vport;
1793 u16 i, num_vport;
1794
1795 num_vport = hdev->num_req_vfs + 1;
1796 for (i = 0; i < num_vport; i++) {
1797 int ret;
1798
1799 ret = hclge_map_tqp_to_vport(hdev, vport);
1800 if (ret)
1801 return ret;
1802
1803 vport++;
1804 }
1805
1806 return 0;
1807 }
1808
hclge_vport_setup(struct hclge_vport * vport,u16 num_tqps)1809 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1810 {
1811 struct hnae3_handle *nic = &vport->nic;
1812 struct hclge_dev *hdev = vport->back;
1813 int ret;
1814
1815 nic->pdev = hdev->pdev;
1816 nic->ae_algo = &ae_algo;
1817 bitmap_copy(nic->numa_node_mask.bits, hdev->numa_node_mask.bits,
1818 MAX_NUMNODES);
1819 nic->kinfo.io_base = hdev->hw.hw.io_base;
1820
1821 ret = hclge_knic_setup(vport, num_tqps,
1822 hdev->num_tx_desc, hdev->num_rx_desc);
1823 if (ret)
1824 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1825
1826 return ret;
1827 }
1828
hclge_alloc_vport(struct hclge_dev * hdev)1829 static int hclge_alloc_vport(struct hclge_dev *hdev)
1830 {
1831 struct pci_dev *pdev = hdev->pdev;
1832 struct hclge_vport *vport;
1833 u32 tqp_main_vport;
1834 u32 tqp_per_vport;
1835 int num_vport, i;
1836 int ret;
1837
1838 /* We need to alloc a vport for main NIC of PF */
1839 num_vport = hdev->num_req_vfs + 1;
1840
1841 if (hdev->num_tqps < num_vport) {
1842 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1843 hdev->num_tqps, num_vport);
1844 return -EINVAL;
1845 }
1846
1847 /* Alloc the same number of TQPs for every vport */
1848 tqp_per_vport = hdev->num_tqps / num_vport;
1849 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1850
1851 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1852 GFP_KERNEL);
1853 if (!vport)
1854 return -ENOMEM;
1855
1856 hdev->vport = vport;
1857 hdev->num_alloc_vport = num_vport;
1858
1859 if (IS_ENABLED(CONFIG_PCI_IOV))
1860 hdev->num_alloc_vfs = hdev->num_req_vfs;
1861
1862 for (i = 0; i < num_vport; i++) {
1863 vport->back = hdev;
1864 vport->vport_id = i;
1865 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1866 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1867 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1868 vport->port_base_vlan_cfg.tbl_sta = true;
1869 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1870 vport->req_vlan_fltr_en = true;
1871 INIT_LIST_HEAD(&vport->vlan_list);
1872 INIT_LIST_HEAD(&vport->uc_mac_list);
1873 INIT_LIST_HEAD(&vport->mc_mac_list);
1874 spin_lock_init(&vport->mac_list_lock);
1875
1876 if (i == 0)
1877 ret = hclge_vport_setup(vport, tqp_main_vport);
1878 else
1879 ret = hclge_vport_setup(vport, tqp_per_vport);
1880 if (ret) {
1881 dev_err(&pdev->dev,
1882 "vport setup failed for vport %d, %d\n",
1883 i, ret);
1884 return ret;
1885 }
1886
1887 vport++;
1888 }
1889
1890 return 0;
1891 }
1892
hclge_cmd_alloc_tx_buff(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)1893 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1894 struct hclge_pkt_buf_alloc *buf_alloc)
1895 {
1896 /* TX buffer size is unit by 128 byte */
1897 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1898 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1899 struct hclge_tx_buff_alloc_cmd *req;
1900 struct hclge_desc desc;
1901 int ret;
1902 u8 i;
1903
1904 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1905
1906 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1907 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1908 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1909
1910 req->tx_pkt_buff[i] =
1911 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1912 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1913 }
1914
1915 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1916 if (ret)
1917 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1918 ret);
1919
1920 return ret;
1921 }
1922
hclge_tx_buffer_alloc(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)1923 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1924 struct hclge_pkt_buf_alloc *buf_alloc)
1925 {
1926 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1927
1928 if (ret)
1929 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1930
1931 return ret;
1932 }
1933
hclge_get_tc_num(struct hclge_dev * hdev)1934 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1935 {
1936 unsigned int i;
1937 u32 cnt = 0;
1938
1939 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1940 if (hdev->hw_tc_map & BIT(i))
1941 cnt++;
1942 return cnt;
1943 }
1944
1945 /* Get the number of pfc enabled TCs, which have private buffer */
hclge_get_pfc_priv_num(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)1946 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1947 struct hclge_pkt_buf_alloc *buf_alloc)
1948 {
1949 struct hclge_priv_buf *priv;
1950 unsigned int i;
1951 int cnt = 0;
1952
1953 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1954 priv = &buf_alloc->priv_buf[i];
1955 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1956 priv->enable)
1957 cnt++;
1958 }
1959
1960 return cnt;
1961 }
1962
1963 /* Get the number of pfc disabled TCs, which have private buffer */
hclge_get_no_pfc_priv_num(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)1964 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1965 struct hclge_pkt_buf_alloc *buf_alloc)
1966 {
1967 struct hclge_priv_buf *priv;
1968 unsigned int i;
1969 int cnt = 0;
1970
1971 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1972 priv = &buf_alloc->priv_buf[i];
1973 if (hdev->hw_tc_map & BIT(i) &&
1974 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1975 priv->enable)
1976 cnt++;
1977 }
1978
1979 return cnt;
1980 }
1981
hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc * buf_alloc)1982 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1983 {
1984 struct hclge_priv_buf *priv;
1985 u32 rx_priv = 0;
1986 int i;
1987
1988 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1989 priv = &buf_alloc->priv_buf[i];
1990 if (priv->enable)
1991 rx_priv += priv->buf_size;
1992 }
1993 return rx_priv;
1994 }
1995
hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc * buf_alloc)1996 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1997 {
1998 u32 i, total_tx_size = 0;
1999
2000 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
2001 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
2002
2003 return total_tx_size;
2004 }
2005
hclge_is_rx_buf_ok(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc,u32 rx_all)2006 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
2007 struct hclge_pkt_buf_alloc *buf_alloc,
2008 u32 rx_all)
2009 {
2010 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
2011 u32 tc_num = hclge_get_tc_num(hdev);
2012 u32 shared_buf, aligned_mps;
2013 u32 rx_priv;
2014 int i;
2015
2016 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2017
2018 if (hnae3_dev_dcb_supported(hdev))
2019 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
2020 hdev->dv_buf_size;
2021 else
2022 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
2023 + hdev->dv_buf_size;
2024
2025 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
2026 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
2027 HCLGE_BUF_SIZE_UNIT);
2028
2029 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
2030 if (rx_all < rx_priv + shared_std)
2031 return false;
2032
2033 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
2034 buf_alloc->s_buf.buf_size = shared_buf;
2035 if (hnae3_dev_dcb_supported(hdev)) {
2036 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
2037 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
2038 - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
2039 HCLGE_BUF_SIZE_UNIT);
2040 } else {
2041 buf_alloc->s_buf.self.high = aligned_mps +
2042 HCLGE_NON_DCB_ADDITIONAL_BUF;
2043 buf_alloc->s_buf.self.low = aligned_mps;
2044 }
2045
2046 if (hnae3_dev_dcb_supported(hdev)) {
2047 hi_thrd = shared_buf - hdev->dv_buf_size;
2048
2049 if (tc_num <= NEED_RESERVE_TC_NUM)
2050 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
2051 / BUF_MAX_PERCENT;
2052
2053 if (tc_num)
2054 hi_thrd = hi_thrd / tc_num;
2055
2056 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
2057 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
2058 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
2059 } else {
2060 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
2061 lo_thrd = aligned_mps;
2062 }
2063
2064 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2065 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
2066 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
2067 }
2068
2069 return true;
2070 }
2071
hclge_tx_buffer_calc(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2072 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
2073 struct hclge_pkt_buf_alloc *buf_alloc)
2074 {
2075 u32 i, total_size;
2076
2077 total_size = hdev->pkt_buf_size;
2078
2079 /* alloc tx buffer for all enabled tc */
2080 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2081 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2082
2083 if (hdev->hw_tc_map & BIT(i)) {
2084 if (total_size < hdev->tx_buf_size)
2085 return -ENOMEM;
2086
2087 priv->tx_buf_size = hdev->tx_buf_size;
2088 } else {
2089 priv->tx_buf_size = 0;
2090 }
2091
2092 total_size -= priv->tx_buf_size;
2093 }
2094
2095 return 0;
2096 }
2097
hclge_rx_buf_calc_all(struct hclge_dev * hdev,bool max,struct hclge_pkt_buf_alloc * buf_alloc)2098 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2099 struct hclge_pkt_buf_alloc *buf_alloc)
2100 {
2101 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2102 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2103 unsigned int i;
2104
2105 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2106 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2107
2108 priv->enable = 0;
2109 priv->wl.low = 0;
2110 priv->wl.high = 0;
2111 priv->buf_size = 0;
2112
2113 if (!(hdev->hw_tc_map & BIT(i)))
2114 continue;
2115
2116 priv->enable = 1;
2117
2118 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2119 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2120 priv->wl.high = roundup(priv->wl.low + aligned_mps,
2121 HCLGE_BUF_SIZE_UNIT);
2122 } else {
2123 priv->wl.low = 0;
2124 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2125 aligned_mps;
2126 }
2127
2128 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2129 }
2130
2131 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2132 }
2133
hclge_drop_nopfc_buf_till_fit(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2134 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2135 struct hclge_pkt_buf_alloc *buf_alloc)
2136 {
2137 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2138 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2139 int i;
2140
2141 /* let the last to be cleared first */
2142 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2143 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2144 unsigned int mask = BIT((unsigned int)i);
2145
2146 if (hdev->hw_tc_map & mask &&
2147 !(hdev->tm_info.hw_pfc_map & mask)) {
2148 /* Clear the no pfc TC private buffer */
2149 priv->wl.low = 0;
2150 priv->wl.high = 0;
2151 priv->buf_size = 0;
2152 priv->enable = 0;
2153 no_pfc_priv_num--;
2154 }
2155
2156 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2157 no_pfc_priv_num == 0)
2158 break;
2159 }
2160
2161 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2162 }
2163
hclge_drop_pfc_buf_till_fit(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2164 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2165 struct hclge_pkt_buf_alloc *buf_alloc)
2166 {
2167 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2168 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2169 int i;
2170
2171 /* let the last to be cleared first */
2172 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2173 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2174 unsigned int mask = BIT((unsigned int)i);
2175
2176 if (hdev->hw_tc_map & mask &&
2177 hdev->tm_info.hw_pfc_map & mask) {
2178 /* Reduce the number of pfc TC with private buffer */
2179 priv->wl.low = 0;
2180 priv->enable = 0;
2181 priv->wl.high = 0;
2182 priv->buf_size = 0;
2183 pfc_priv_num--;
2184 }
2185
2186 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2187 pfc_priv_num == 0)
2188 break;
2189 }
2190
2191 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2192 }
2193
hclge_only_alloc_priv_buff(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2194 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2195 struct hclge_pkt_buf_alloc *buf_alloc)
2196 {
2197 #define COMPENSATE_BUFFER 0x3C00
2198 #define COMPENSATE_HALF_MPS_NUM 5
2199 #define PRIV_WL_GAP 0x1800
2200
2201 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2202 u32 tc_num = hclge_get_tc_num(hdev);
2203 u32 half_mps = hdev->mps >> 1;
2204 u32 min_rx_priv;
2205 unsigned int i;
2206
2207 if (tc_num)
2208 rx_priv = rx_priv / tc_num;
2209
2210 if (tc_num <= NEED_RESERVE_TC_NUM)
2211 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2212
2213 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2214 COMPENSATE_HALF_MPS_NUM * half_mps;
2215 min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2216 rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2217 if (rx_priv < min_rx_priv)
2218 return false;
2219
2220 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2221 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2222
2223 priv->enable = 0;
2224 priv->wl.low = 0;
2225 priv->wl.high = 0;
2226 priv->buf_size = 0;
2227
2228 if (!(hdev->hw_tc_map & BIT(i)))
2229 continue;
2230
2231 priv->enable = 1;
2232 priv->buf_size = rx_priv;
2233 priv->wl.high = rx_priv - hdev->dv_buf_size;
2234 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2235 }
2236
2237 buf_alloc->s_buf.buf_size = 0;
2238
2239 return true;
2240 }
2241
2242 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2243 * @hdev: pointer to struct hclge_dev
2244 * @buf_alloc: pointer to buffer calculation data
2245 * @return: 0: calculate successful, negative: fail
2246 */
hclge_rx_buffer_calc(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2247 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2248 struct hclge_pkt_buf_alloc *buf_alloc)
2249 {
2250 /* When DCB is not supported, rx private buffer is not allocated. */
2251 if (!hnae3_dev_dcb_supported(hdev)) {
2252 u32 rx_all = hdev->pkt_buf_size;
2253
2254 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2255 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2256 return -ENOMEM;
2257
2258 return 0;
2259 }
2260
2261 if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2262 return 0;
2263
2264 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2265 return 0;
2266
2267 /* try to decrease the buffer size */
2268 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2269 return 0;
2270
2271 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2272 return 0;
2273
2274 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2275 return 0;
2276
2277 return -ENOMEM;
2278 }
2279
hclge_rx_priv_buf_alloc(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2280 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2281 struct hclge_pkt_buf_alloc *buf_alloc)
2282 {
2283 struct hclge_rx_priv_buff_cmd *req;
2284 struct hclge_desc desc;
2285 int ret;
2286 int i;
2287
2288 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2289 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2290
2291 /* Alloc private buffer TCs */
2292 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2293 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2294
2295 req->buf_num[i] =
2296 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2297 req->buf_num[i] |=
2298 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2299 }
2300
2301 req->shared_buf =
2302 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2303 (1 << HCLGE_TC0_PRI_BUF_EN_B));
2304
2305 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2306 if (ret)
2307 dev_err(&hdev->pdev->dev,
2308 "rx private buffer alloc cmd failed %d\n", ret);
2309
2310 return ret;
2311 }
2312
hclge_rx_priv_wl_config(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2313 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2314 struct hclge_pkt_buf_alloc *buf_alloc)
2315 {
2316 struct hclge_rx_priv_wl_buf *req;
2317 struct hclge_priv_buf *priv;
2318 struct hclge_desc desc[2];
2319 int i, j;
2320 int ret;
2321
2322 for (i = 0; i < 2; i++) {
2323 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2324 false);
2325 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2326
2327 /* The first descriptor set the NEXT bit to 1 */
2328 if (i == 0)
2329 desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
2330 else
2331 desc[i].flag &= ~cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
2332
2333 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2334 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2335
2336 priv = &buf_alloc->priv_buf[idx];
2337 req->tc_wl[j].high =
2338 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2339 req->tc_wl[j].high |=
2340 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2341 req->tc_wl[j].low =
2342 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2343 req->tc_wl[j].low |=
2344 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2345 }
2346 }
2347
2348 /* Send 2 descriptor at one time */
2349 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2350 if (ret)
2351 dev_err(&hdev->pdev->dev,
2352 "rx private waterline config cmd failed %d\n",
2353 ret);
2354 return ret;
2355 }
2356
hclge_common_thrd_config(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2357 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2358 struct hclge_pkt_buf_alloc *buf_alloc)
2359 {
2360 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2361 struct hclge_rx_com_thrd *req;
2362 struct hclge_desc desc[2];
2363 struct hclge_tc_thrd *tc;
2364 int i, j;
2365 int ret;
2366
2367 for (i = 0; i < 2; i++) {
2368 hclge_cmd_setup_basic_desc(&desc[i],
2369 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2370 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2371
2372 /* The first descriptor set the NEXT bit to 1 */
2373 if (i == 0)
2374 desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
2375 else
2376 desc[i].flag &= ~cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
2377
2378 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2379 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2380
2381 req->com_thrd[j].high =
2382 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2383 req->com_thrd[j].high |=
2384 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2385 req->com_thrd[j].low =
2386 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2387 req->com_thrd[j].low |=
2388 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2389 }
2390 }
2391
2392 /* Send 2 descriptors at one time */
2393 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2394 if (ret)
2395 dev_err(&hdev->pdev->dev,
2396 "common threshold config cmd failed %d\n", ret);
2397 return ret;
2398 }
2399
hclge_common_wl_config(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2400 static int hclge_common_wl_config(struct hclge_dev *hdev,
2401 struct hclge_pkt_buf_alloc *buf_alloc)
2402 {
2403 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2404 struct hclge_rx_com_wl *req;
2405 struct hclge_desc desc;
2406 int ret;
2407
2408 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2409
2410 req = (struct hclge_rx_com_wl *)desc.data;
2411 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2412 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2413
2414 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2415 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2416
2417 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2418 if (ret)
2419 dev_err(&hdev->pdev->dev,
2420 "common waterline config cmd failed %d\n", ret);
2421
2422 return ret;
2423 }
2424
hclge_buffer_alloc(struct hclge_dev * hdev)2425 int hclge_buffer_alloc(struct hclge_dev *hdev)
2426 {
2427 struct hclge_pkt_buf_alloc *pkt_buf;
2428 int ret;
2429
2430 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2431 if (!pkt_buf)
2432 return -ENOMEM;
2433
2434 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2435 if (ret) {
2436 dev_err(&hdev->pdev->dev,
2437 "could not calc tx buffer size for all TCs %d\n", ret);
2438 goto out;
2439 }
2440
2441 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2442 if (ret) {
2443 dev_err(&hdev->pdev->dev,
2444 "could not alloc tx buffers %d\n", ret);
2445 goto out;
2446 }
2447
2448 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2449 if (ret) {
2450 dev_err(&hdev->pdev->dev,
2451 "could not calc rx priv buffer size for all TCs %d\n",
2452 ret);
2453 goto out;
2454 }
2455
2456 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2457 if (ret) {
2458 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2459 ret);
2460 goto out;
2461 }
2462
2463 if (hnae3_dev_dcb_supported(hdev)) {
2464 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2465 if (ret) {
2466 dev_err(&hdev->pdev->dev,
2467 "could not configure rx private waterline %d\n",
2468 ret);
2469 goto out;
2470 }
2471
2472 ret = hclge_common_thrd_config(hdev, pkt_buf);
2473 if (ret) {
2474 dev_err(&hdev->pdev->dev,
2475 "could not configure common threshold %d\n",
2476 ret);
2477 goto out;
2478 }
2479 }
2480
2481 ret = hclge_common_wl_config(hdev, pkt_buf);
2482 if (ret)
2483 dev_err(&hdev->pdev->dev,
2484 "could not configure common waterline %d\n", ret);
2485
2486 out:
2487 kfree(pkt_buf);
2488 return ret;
2489 }
2490
hclge_init_roce_base_info(struct hclge_vport * vport)2491 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2492 {
2493 struct hnae3_handle *roce = &vport->roce;
2494 struct hnae3_handle *nic = &vport->nic;
2495 struct hclge_dev *hdev = vport->back;
2496
2497 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2498
2499 if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
2500 return -EINVAL;
2501
2502 roce->rinfo.base_vector = hdev->num_nic_msi;
2503
2504 roce->rinfo.netdev = nic->kinfo.netdev;
2505 roce->rinfo.roce_io_base = hdev->hw.hw.io_base;
2506 roce->rinfo.roce_mem_base = hdev->hw.hw.mem_base;
2507
2508 roce->pdev = nic->pdev;
2509 roce->ae_algo = nic->ae_algo;
2510 bitmap_copy(roce->numa_node_mask.bits, nic->numa_node_mask.bits,
2511 MAX_NUMNODES);
2512
2513 return 0;
2514 }
2515
hclge_init_msi(struct hclge_dev * hdev)2516 static int hclge_init_msi(struct hclge_dev *hdev)
2517 {
2518 struct pci_dev *pdev = hdev->pdev;
2519 int vectors;
2520 int i;
2521
2522 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2523 hdev->num_msi,
2524 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2525 if (vectors < 0) {
2526 dev_err(&pdev->dev,
2527 "failed(%d) to allocate MSI/MSI-X vectors\n",
2528 vectors);
2529 return vectors;
2530 }
2531 if (vectors < hdev->num_msi)
2532 dev_warn(&hdev->pdev->dev,
2533 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2534 hdev->num_msi, vectors);
2535
2536 hdev->num_msi = vectors;
2537 hdev->num_msi_left = vectors;
2538
2539 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2540 sizeof(u16), GFP_KERNEL);
2541 if (!hdev->vector_status) {
2542 pci_free_irq_vectors(pdev);
2543 return -ENOMEM;
2544 }
2545
2546 for (i = 0; i < hdev->num_msi; i++)
2547 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2548
2549 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2550 sizeof(int), GFP_KERNEL);
2551 if (!hdev->vector_irq) {
2552 pci_free_irq_vectors(pdev);
2553 return -ENOMEM;
2554 }
2555
2556 return 0;
2557 }
2558
hclge_check_speed_dup(u8 duplex,int speed)2559 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2560 {
2561 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2562 duplex = HCLGE_MAC_FULL;
2563
2564 return duplex;
2565 }
2566
2567 static struct hclge_mac_speed_map hclge_mac_speed_map_to_fw[] = {
2568 {HCLGE_MAC_SPEED_10M, HCLGE_FW_MAC_SPEED_10M},
2569 {HCLGE_MAC_SPEED_100M, HCLGE_FW_MAC_SPEED_100M},
2570 {HCLGE_MAC_SPEED_1G, HCLGE_FW_MAC_SPEED_1G},
2571 {HCLGE_MAC_SPEED_10G, HCLGE_FW_MAC_SPEED_10G},
2572 {HCLGE_MAC_SPEED_25G, HCLGE_FW_MAC_SPEED_25G},
2573 {HCLGE_MAC_SPEED_40G, HCLGE_FW_MAC_SPEED_40G},
2574 {HCLGE_MAC_SPEED_50G, HCLGE_FW_MAC_SPEED_50G},
2575 {HCLGE_MAC_SPEED_100G, HCLGE_FW_MAC_SPEED_100G},
2576 {HCLGE_MAC_SPEED_200G, HCLGE_FW_MAC_SPEED_200G},
2577 };
2578
hclge_convert_to_fw_speed(u32 speed_drv,u32 * speed_fw)2579 static int hclge_convert_to_fw_speed(u32 speed_drv, u32 *speed_fw)
2580 {
2581 u16 i;
2582
2583 for (i = 0; i < ARRAY_SIZE(hclge_mac_speed_map_to_fw); i++) {
2584 if (hclge_mac_speed_map_to_fw[i].speed_drv == speed_drv) {
2585 *speed_fw = hclge_mac_speed_map_to_fw[i].speed_fw;
2586 return 0;
2587 }
2588 }
2589
2590 return -EINVAL;
2591 }
2592
hclge_cfg_mac_speed_dup_hw(struct hclge_dev * hdev,int speed,u8 duplex,u8 lane_num)2593 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2594 u8 duplex, u8 lane_num)
2595 {
2596 struct hclge_config_mac_speed_dup_cmd *req;
2597 struct hclge_desc desc;
2598 u32 speed_fw;
2599 int ret;
2600
2601 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2602
2603 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2604
2605 if (duplex)
2606 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2607
2608 ret = hclge_convert_to_fw_speed(speed, &speed_fw);
2609 if (ret) {
2610 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2611 return ret;
2612 }
2613
2614 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, HCLGE_CFG_SPEED_S,
2615 speed_fw);
2616 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2617 1);
2618 req->lane_num = lane_num;
2619
2620 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2621 if (ret) {
2622 dev_err(&hdev->pdev->dev,
2623 "mac speed/duplex config cmd failed %d.\n", ret);
2624 return ret;
2625 }
2626
2627 return 0;
2628 }
2629
hclge_cfg_mac_speed_dup(struct hclge_dev * hdev,int speed,u8 duplex,u8 lane_num)2630 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex, u8 lane_num)
2631 {
2632 struct hclge_mac *mac = &hdev->hw.mac;
2633 int ret;
2634
2635 duplex = hclge_check_speed_dup(duplex, speed);
2636 if (!mac->support_autoneg && mac->speed == speed &&
2637 mac->duplex == duplex && (mac->lane_num == lane_num || lane_num == 0))
2638 return 0;
2639
2640 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex, lane_num);
2641 if (ret)
2642 return ret;
2643
2644 hdev->hw.mac.speed = speed;
2645 hdev->hw.mac.duplex = duplex;
2646 if (!lane_num)
2647 hdev->hw.mac.lane_num = lane_num;
2648
2649 return 0;
2650 }
2651
hclge_cfg_mac_speed_dup_h(struct hnae3_handle * handle,int speed,u8 duplex,u8 lane_num)2652 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2653 u8 duplex, u8 lane_num)
2654 {
2655 struct hclge_vport *vport = hclge_get_vport(handle);
2656 struct hclge_dev *hdev = vport->back;
2657 int ret;
2658
2659 ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex, lane_num);
2660
2661 if (ret)
2662 return ret;
2663
2664 hdev->hw.mac.req_speed = speed;
2665 hdev->hw.mac.req_duplex = duplex;
2666
2667 return 0;
2668 }
2669
hclge_set_autoneg_en(struct hclge_dev * hdev,bool enable)2670 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2671 {
2672 struct hclge_config_auto_neg_cmd *req;
2673 struct hclge_desc desc;
2674 u32 flag = 0;
2675 int ret;
2676
2677 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2678
2679 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2680 if (enable)
2681 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2682 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2683
2684 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2685 if (ret)
2686 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2687 ret);
2688
2689 return ret;
2690 }
2691
hclge_set_autoneg(struct hnae3_handle * handle,bool enable)2692 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2693 {
2694 struct hclge_vport *vport = hclge_get_vport(handle);
2695 struct hclge_dev *hdev = vport->back;
2696
2697 if (!hdev->hw.mac.support_autoneg) {
2698 if (enable) {
2699 dev_err(&hdev->pdev->dev,
2700 "autoneg is not supported by current port\n");
2701 return -EOPNOTSUPP;
2702 } else {
2703 return 0;
2704 }
2705 }
2706
2707 return hclge_set_autoneg_en(hdev, enable);
2708 }
2709
hclge_get_autoneg(struct hnae3_handle * handle)2710 static int hclge_get_autoneg(struct hnae3_handle *handle)
2711 {
2712 struct hclge_vport *vport = hclge_get_vport(handle);
2713 struct hclge_dev *hdev = vport->back;
2714 struct phy_device *phydev = hdev->hw.mac.phydev;
2715
2716 if (phydev)
2717 return phydev->autoneg;
2718
2719 return hdev->hw.mac.autoneg;
2720 }
2721
hclge_restart_autoneg(struct hnae3_handle * handle)2722 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2723 {
2724 struct hclge_vport *vport = hclge_get_vport(handle);
2725 struct hclge_dev *hdev = vport->back;
2726 int ret;
2727
2728 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2729
2730 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2731 if (ret)
2732 return ret;
2733 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2734 }
2735
hclge_halt_autoneg(struct hnae3_handle * handle,bool halt)2736 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2737 {
2738 struct hclge_vport *vport = hclge_get_vport(handle);
2739 struct hclge_dev *hdev = vport->back;
2740
2741 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2742 return hclge_set_autoneg_en(hdev, !halt);
2743
2744 return 0;
2745 }
2746
hclge_parse_fec_stats_lanes(struct hclge_dev * hdev,struct hclge_desc * desc,u32 desc_len)2747 static void hclge_parse_fec_stats_lanes(struct hclge_dev *hdev,
2748 struct hclge_desc *desc, u32 desc_len)
2749 {
2750 u32 lane_size = HCLGE_FEC_STATS_MAX_LANES * 2;
2751 u32 desc_index = 0;
2752 u32 data_index = 0;
2753 u32 i;
2754
2755 for (i = 0; i < lane_size; i++) {
2756 if (data_index >= HCLGE_DESC_DATA_LEN) {
2757 desc_index++;
2758 data_index = 0;
2759 }
2760
2761 if (desc_index >= desc_len)
2762 return;
2763
2764 hdev->fec_stats.per_lanes[i] +=
2765 le32_to_cpu(desc[desc_index].data[data_index]);
2766 data_index++;
2767 }
2768 }
2769
hclge_parse_fec_stats(struct hclge_dev * hdev,struct hclge_desc * desc,u32 desc_len)2770 static void hclge_parse_fec_stats(struct hclge_dev *hdev,
2771 struct hclge_desc *desc, u32 desc_len)
2772 {
2773 struct hclge_query_fec_stats_cmd *req;
2774
2775 req = (struct hclge_query_fec_stats_cmd *)desc[0].data;
2776
2777 hdev->fec_stats.base_r_lane_num = req->base_r_lane_num;
2778 hdev->fec_stats.rs_corr_blocks +=
2779 le32_to_cpu(req->rs_fec_corr_blocks);
2780 hdev->fec_stats.rs_uncorr_blocks +=
2781 le32_to_cpu(req->rs_fec_uncorr_blocks);
2782 hdev->fec_stats.rs_error_blocks +=
2783 le32_to_cpu(req->rs_fec_error_blocks);
2784 hdev->fec_stats.base_r_corr_blocks +=
2785 le32_to_cpu(req->base_r_fec_corr_blocks);
2786 hdev->fec_stats.base_r_uncorr_blocks +=
2787 le32_to_cpu(req->base_r_fec_uncorr_blocks);
2788
2789 hclge_parse_fec_stats_lanes(hdev, &desc[1], desc_len - 1);
2790 }
2791
hclge_update_fec_stats_hw(struct hclge_dev * hdev)2792 static int hclge_update_fec_stats_hw(struct hclge_dev *hdev)
2793 {
2794 struct hclge_desc desc[HCLGE_FEC_STATS_CMD_NUM];
2795 int ret;
2796 u32 i;
2797
2798 for (i = 0; i < HCLGE_FEC_STATS_CMD_NUM; i++) {
2799 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_FEC_STATS,
2800 true);
2801 if (i != (HCLGE_FEC_STATS_CMD_NUM - 1))
2802 desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
2803 }
2804
2805 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_FEC_STATS_CMD_NUM);
2806 if (ret)
2807 return ret;
2808
2809 hclge_parse_fec_stats(hdev, desc, HCLGE_FEC_STATS_CMD_NUM);
2810
2811 return 0;
2812 }
2813
hclge_update_fec_stats(struct hclge_dev * hdev)2814 static void hclge_update_fec_stats(struct hclge_dev *hdev)
2815 {
2816 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
2817 int ret;
2818
2819 if (!hnae3_ae_dev_fec_stats_supported(ae_dev) ||
2820 test_and_set_bit(HCLGE_STATE_FEC_STATS_UPDATING, &hdev->state))
2821 return;
2822
2823 ret = hclge_update_fec_stats_hw(hdev);
2824 if (ret)
2825 dev_err(&hdev->pdev->dev,
2826 "failed to update fec stats, ret = %d\n", ret);
2827
2828 clear_bit(HCLGE_STATE_FEC_STATS_UPDATING, &hdev->state);
2829 }
2830
hclge_get_fec_stats_total(struct hclge_dev * hdev,struct ethtool_fec_stats * fec_stats)2831 static void hclge_get_fec_stats_total(struct hclge_dev *hdev,
2832 struct ethtool_fec_stats *fec_stats)
2833 {
2834 fec_stats->corrected_blocks.total = hdev->fec_stats.rs_corr_blocks;
2835 fec_stats->uncorrectable_blocks.total =
2836 hdev->fec_stats.rs_uncorr_blocks;
2837 }
2838
hclge_get_fec_stats_lanes(struct hclge_dev * hdev,struct ethtool_fec_stats * fec_stats)2839 static void hclge_get_fec_stats_lanes(struct hclge_dev *hdev,
2840 struct ethtool_fec_stats *fec_stats)
2841 {
2842 u32 i;
2843
2844 if (hdev->fec_stats.base_r_lane_num == 0 ||
2845 hdev->fec_stats.base_r_lane_num > HCLGE_FEC_STATS_MAX_LANES) {
2846 dev_err(&hdev->pdev->dev,
2847 "fec stats lane number(%llu) is invalid\n",
2848 hdev->fec_stats.base_r_lane_num);
2849 return;
2850 }
2851
2852 for (i = 0; i < hdev->fec_stats.base_r_lane_num; i++) {
2853 fec_stats->corrected_blocks.lanes[i] =
2854 hdev->fec_stats.base_r_corr_per_lanes[i];
2855 fec_stats->uncorrectable_blocks.lanes[i] =
2856 hdev->fec_stats.base_r_uncorr_per_lanes[i];
2857 }
2858 }
2859
hclge_comm_get_fec_stats(struct hclge_dev * hdev,struct ethtool_fec_stats * fec_stats)2860 static void hclge_comm_get_fec_stats(struct hclge_dev *hdev,
2861 struct ethtool_fec_stats *fec_stats)
2862 {
2863 u32 fec_mode = hdev->hw.mac.fec_mode;
2864
2865 switch (fec_mode) {
2866 case BIT(HNAE3_FEC_RS):
2867 case BIT(HNAE3_FEC_LLRS):
2868 hclge_get_fec_stats_total(hdev, fec_stats);
2869 break;
2870 case BIT(HNAE3_FEC_BASER):
2871 hclge_get_fec_stats_lanes(hdev, fec_stats);
2872 break;
2873 default:
2874 dev_err(&hdev->pdev->dev,
2875 "fec stats is not supported by current fec mode(0x%x)\n",
2876 fec_mode);
2877 break;
2878 }
2879 }
2880
hclge_get_fec_stats(struct hnae3_handle * handle,struct ethtool_fec_stats * fec_stats)2881 static void hclge_get_fec_stats(struct hnae3_handle *handle,
2882 struct ethtool_fec_stats *fec_stats)
2883 {
2884 struct hclge_vport *vport = hclge_get_vport(handle);
2885 struct hclge_dev *hdev = vport->back;
2886 u32 fec_mode = hdev->hw.mac.fec_mode;
2887
2888 if (fec_mode == BIT(HNAE3_FEC_NONE) ||
2889 fec_mode == BIT(HNAE3_FEC_AUTO) ||
2890 fec_mode == BIT(HNAE3_FEC_USER_DEF))
2891 return;
2892
2893 hclge_update_fec_stats(hdev);
2894
2895 hclge_comm_get_fec_stats(hdev, fec_stats);
2896 }
2897
hclge_set_fec_hw(struct hclge_dev * hdev,u32 fec_mode)2898 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2899 {
2900 struct hclge_config_fec_cmd *req;
2901 struct hclge_desc desc;
2902 int ret;
2903
2904 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2905
2906 req = (struct hclge_config_fec_cmd *)desc.data;
2907 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2908 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2909 if (fec_mode & BIT(HNAE3_FEC_RS))
2910 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2911 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2912 if (fec_mode & BIT(HNAE3_FEC_LLRS))
2913 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2914 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_LLRS);
2915 if (fec_mode & BIT(HNAE3_FEC_BASER))
2916 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2917 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2918
2919 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2920 if (ret)
2921 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2922
2923 return ret;
2924 }
2925
hclge_set_fec(struct hnae3_handle * handle,u32 fec_mode)2926 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2927 {
2928 struct hclge_vport *vport = hclge_get_vport(handle);
2929 struct hclge_dev *hdev = vport->back;
2930 struct hclge_mac *mac = &hdev->hw.mac;
2931 int ret;
2932
2933 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2934 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2935 return -EINVAL;
2936 }
2937
2938 ret = hclge_set_fec_hw(hdev, fec_mode);
2939 if (ret)
2940 return ret;
2941
2942 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2943 return 0;
2944 }
2945
hclge_get_fec(struct hnae3_handle * handle,u8 * fec_ability,u8 * fec_mode)2946 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2947 u8 *fec_mode)
2948 {
2949 struct hclge_vport *vport = hclge_get_vport(handle);
2950 struct hclge_dev *hdev = vport->back;
2951 struct hclge_mac *mac = &hdev->hw.mac;
2952
2953 if (fec_ability)
2954 *fec_ability = mac->fec_ability;
2955 if (fec_mode)
2956 *fec_mode = mac->fec_mode;
2957 }
2958
hclge_mac_init(struct hclge_dev * hdev)2959 static int hclge_mac_init(struct hclge_dev *hdev)
2960 {
2961 struct hclge_mac *mac = &hdev->hw.mac;
2962 int ret;
2963
2964 hdev->support_sfp_query = true;
2965
2966 if (!test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
2967 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2968
2969 if (hdev->hw.mac.support_autoneg) {
2970 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2971 if (ret)
2972 return ret;
2973 }
2974
2975 if (!hdev->hw.mac.autoneg) {
2976 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.req_speed,
2977 hdev->hw.mac.req_duplex,
2978 hdev->hw.mac.lane_num);
2979 if (ret)
2980 return ret;
2981 }
2982
2983 mac->link = 0;
2984
2985 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2986 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2987 if (ret)
2988 return ret;
2989 }
2990
2991 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2992 if (ret) {
2993 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2994 return ret;
2995 }
2996
2997 ret = hclge_set_default_loopback(hdev);
2998 if (ret)
2999 return ret;
3000
3001 ret = hclge_buffer_alloc(hdev);
3002 if (ret)
3003 dev_err(&hdev->pdev->dev,
3004 "allocate buffer fail, ret=%d\n", ret);
3005
3006 return ret;
3007 }
3008
hclge_mbx_task_schedule(struct hclge_dev * hdev)3009 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
3010 {
3011 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
3012 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) {
3013 hdev->last_mbx_scheduled = jiffies;
3014 mod_delayed_work(hclge_wq, &hdev->service_task, 0);
3015 }
3016 }
3017
hclge_reset_task_schedule(struct hclge_dev * hdev)3018 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
3019 {
3020 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
3021 test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state) &&
3022 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) {
3023 hdev->last_rst_scheduled = jiffies;
3024 mod_delayed_work(hclge_wq, &hdev->service_task, 0);
3025 }
3026 }
3027
hclge_errhand_task_schedule(struct hclge_dev * hdev)3028 static void hclge_errhand_task_schedule(struct hclge_dev *hdev)
3029 {
3030 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
3031 !test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
3032 mod_delayed_work(hclge_wq, &hdev->service_task, 0);
3033 }
3034
hclge_task_schedule(struct hclge_dev * hdev,unsigned long delay_time)3035 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
3036 {
3037 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
3038 !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
3039 mod_delayed_work(hclge_wq, &hdev->service_task, delay_time);
3040 }
3041
hclge_get_mac_link_status(struct hclge_dev * hdev,int * link_status)3042 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
3043 {
3044 struct hclge_link_status_cmd *req;
3045 struct hclge_desc desc;
3046 int ret;
3047
3048 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
3049 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3050 if (ret) {
3051 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
3052 ret);
3053 return ret;
3054 }
3055
3056 req = (struct hclge_link_status_cmd *)desc.data;
3057 *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
3058 HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
3059
3060 return 0;
3061 }
3062
hclge_get_mac_phy_link(struct hclge_dev * hdev,int * link_status)3063 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
3064 {
3065 struct phy_device *phydev = hdev->hw.mac.phydev;
3066
3067 *link_status = HCLGE_LINK_STATUS_DOWN;
3068
3069 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
3070 return 0;
3071
3072 if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
3073 return 0;
3074
3075 return hclge_get_mac_link_status(hdev, link_status);
3076 }
3077
hclge_push_link_status(struct hclge_dev * hdev)3078 static void hclge_push_link_status(struct hclge_dev *hdev)
3079 {
3080 struct hclge_vport *vport;
3081 int ret;
3082 u16 i;
3083
3084 for (i = 0; i < pci_num_vf(hdev->pdev); i++) {
3085 vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM];
3086
3087 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) ||
3088 vport->vf_info.link_state != IFLA_VF_LINK_STATE_AUTO)
3089 continue;
3090
3091 ret = hclge_push_vf_link_status(vport);
3092 if (ret) {
3093 dev_err(&hdev->pdev->dev,
3094 "failed to push link status to vf%u, ret = %d\n",
3095 i, ret);
3096 }
3097 }
3098 }
3099
hclge_update_link_status(struct hclge_dev * hdev)3100 static void hclge_update_link_status(struct hclge_dev *hdev)
3101 {
3102 struct hnae3_handle *handle = &hdev->vport[0].nic;
3103 struct hnae3_client *client = hdev->nic_client;
3104 int state;
3105 int ret;
3106
3107 if (!client)
3108 return;
3109
3110 if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
3111 return;
3112
3113 ret = hclge_get_mac_phy_link(hdev, &state);
3114 if (ret) {
3115 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
3116 return;
3117 }
3118
3119 if (state != hdev->hw.mac.link) {
3120 hdev->hw.mac.link = state;
3121 if (state == HCLGE_LINK_STATUS_UP)
3122 hclge_update_port_info(hdev);
3123
3124 client->ops->link_status_change(handle, state);
3125 hclge_config_mac_tnl_int(hdev, state);
3126
3127 if (test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state)) {
3128 struct hnae3_handle *rhandle = &hdev->vport[0].roce;
3129 struct hnae3_client *rclient = hdev->roce_client;
3130
3131 if (rclient && rclient->ops->link_status_change)
3132 rclient->ops->link_status_change(rhandle,
3133 state);
3134 }
3135
3136 hclge_push_link_status(hdev);
3137 }
3138
3139 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
3140 }
3141
hclge_update_speed_advertising(struct hclge_mac * mac)3142 static void hclge_update_speed_advertising(struct hclge_mac *mac)
3143 {
3144 u32 speed_ability;
3145
3146 if (hclge_get_speed_bit(mac->speed, &speed_ability))
3147 return;
3148
3149 switch (mac->module_type) {
3150 case HNAE3_MODULE_TYPE_FIBRE_LR:
3151 hclge_convert_setting_lr(speed_ability, mac->advertising);
3152 break;
3153 case HNAE3_MODULE_TYPE_FIBRE_SR:
3154 case HNAE3_MODULE_TYPE_AOC:
3155 hclge_convert_setting_sr(speed_ability, mac->advertising);
3156 break;
3157 case HNAE3_MODULE_TYPE_CR:
3158 hclge_convert_setting_cr(speed_ability, mac->advertising);
3159 break;
3160 case HNAE3_MODULE_TYPE_KR:
3161 hclge_convert_setting_kr(speed_ability, mac->advertising);
3162 break;
3163 default:
3164 break;
3165 }
3166 }
3167
hclge_update_fec_advertising(struct hclge_mac * mac)3168 static void hclge_update_fec_advertising(struct hclge_mac *mac)
3169 {
3170 if (mac->fec_mode & BIT(HNAE3_FEC_RS))
3171 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
3172 mac->advertising);
3173 else if (mac->fec_mode & BIT(HNAE3_FEC_LLRS))
3174 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
3175 mac->advertising);
3176 else if (mac->fec_mode & BIT(HNAE3_FEC_BASER))
3177 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
3178 mac->advertising);
3179 else
3180 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
3181 mac->advertising);
3182 }
3183
hclge_update_pause_advertising(struct hclge_dev * hdev)3184 static void hclge_update_pause_advertising(struct hclge_dev *hdev)
3185 {
3186 struct hclge_mac *mac = &hdev->hw.mac;
3187 bool rx_en, tx_en;
3188
3189 switch (hdev->fc_mode_last_time) {
3190 case HCLGE_FC_RX_PAUSE:
3191 rx_en = true;
3192 tx_en = false;
3193 break;
3194 case HCLGE_FC_TX_PAUSE:
3195 rx_en = false;
3196 tx_en = true;
3197 break;
3198 case HCLGE_FC_FULL:
3199 rx_en = true;
3200 tx_en = true;
3201 break;
3202 default:
3203 rx_en = false;
3204 tx_en = false;
3205 break;
3206 }
3207
3208 linkmode_set_pause(mac->advertising, tx_en, rx_en);
3209 }
3210
hclge_update_advertising(struct hclge_dev * hdev)3211 static void hclge_update_advertising(struct hclge_dev *hdev)
3212 {
3213 struct hclge_mac *mac = &hdev->hw.mac;
3214
3215 linkmode_zero(mac->advertising);
3216 hclge_update_speed_advertising(mac);
3217 hclge_update_fec_advertising(mac);
3218 hclge_update_pause_advertising(hdev);
3219 }
3220
hclge_update_port_capability(struct hclge_dev * hdev,struct hclge_mac * mac)3221 static void hclge_update_port_capability(struct hclge_dev *hdev,
3222 struct hclge_mac *mac)
3223 {
3224 if (hnae3_dev_fec_supported(hdev))
3225 hclge_convert_setting_fec(mac);
3226
3227 /* firmware can not identify back plane type, the media type
3228 * read from configuration can help deal it
3229 */
3230 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
3231 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
3232 mac->module_type = HNAE3_MODULE_TYPE_KR;
3233 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
3234 mac->module_type = HNAE3_MODULE_TYPE_TP;
3235
3236 if (mac->support_autoneg) {
3237 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
3238 linkmode_copy(mac->advertising, mac->supported);
3239 } else {
3240 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
3241 mac->supported);
3242 hclge_update_advertising(hdev);
3243 }
3244 }
3245
hclge_get_sfp_speed(struct hclge_dev * hdev,u32 * speed)3246 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
3247 {
3248 struct hclge_sfp_info_cmd *resp;
3249 struct hclge_desc desc;
3250 int ret;
3251
3252 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
3253 resp = (struct hclge_sfp_info_cmd *)desc.data;
3254 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3255 if (ret == -EOPNOTSUPP) {
3256 dev_warn(&hdev->pdev->dev,
3257 "IMP do not support get SFP speed %d\n", ret);
3258 return ret;
3259 } else if (ret) {
3260 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
3261 return ret;
3262 }
3263
3264 *speed = le32_to_cpu(resp->speed);
3265
3266 return 0;
3267 }
3268
hclge_get_sfp_info(struct hclge_dev * hdev,struct hclge_mac * mac)3269 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
3270 {
3271 struct hclge_sfp_info_cmd *resp;
3272 struct hclge_desc desc;
3273 int ret;
3274
3275 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
3276 resp = (struct hclge_sfp_info_cmd *)desc.data;
3277
3278 resp->query_type = QUERY_ACTIVE_SPEED;
3279
3280 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3281 if (ret == -EOPNOTSUPP) {
3282 dev_warn(&hdev->pdev->dev,
3283 "IMP does not support get SFP info %d\n", ret);
3284 return ret;
3285 } else if (ret) {
3286 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
3287 return ret;
3288 }
3289
3290 /* In some case, mac speed get from IMP may be 0, it shouldn't be
3291 * set to mac->speed.
3292 */
3293 if (!le32_to_cpu(resp->speed))
3294 return 0;
3295
3296 mac->speed = le32_to_cpu(resp->speed);
3297 /* if resp->speed_ability is 0, it means it's an old version
3298 * firmware, do not update these params
3299 */
3300 if (resp->speed_ability) {
3301 mac->module_type = le32_to_cpu(resp->module_type);
3302 mac->speed_ability = le32_to_cpu(resp->speed_ability);
3303 mac->autoneg = resp->autoneg;
3304 mac->support_autoneg = resp->autoneg_ability;
3305 mac->speed_type = QUERY_ACTIVE_SPEED;
3306 mac->lane_num = resp->lane_num;
3307 if (!resp->active_fec)
3308 mac->fec_mode = 0;
3309 else
3310 mac->fec_mode = BIT(resp->active_fec);
3311 mac->fec_ability = resp->fec_ability;
3312 } else {
3313 mac->speed_type = QUERY_SFP_SPEED;
3314 }
3315
3316 return 0;
3317 }
3318
hclge_get_phy_link_ksettings(struct hnae3_handle * handle,struct ethtool_link_ksettings * cmd)3319 static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle,
3320 struct ethtool_link_ksettings *cmd)
3321 {
3322 struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3323 struct hclge_vport *vport = hclge_get_vport(handle);
3324 struct hclge_phy_link_ksetting_0_cmd *req0;
3325 struct hclge_phy_link_ksetting_1_cmd *req1;
3326 u32 supported, advertising, lp_advertising;
3327 struct hclge_dev *hdev = vport->back;
3328 int ret;
3329
3330 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3331 true);
3332 desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
3333 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3334 true);
3335
3336 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3337 if (ret) {
3338 dev_err(&hdev->pdev->dev,
3339 "failed to get phy link ksetting, ret = %d.\n", ret);
3340 return ret;
3341 }
3342
3343 req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3344 cmd->base.autoneg = req0->autoneg;
3345 cmd->base.speed = le32_to_cpu(req0->speed);
3346 cmd->base.duplex = req0->duplex;
3347 cmd->base.port = req0->port;
3348 cmd->base.transceiver = req0->transceiver;
3349 cmd->base.phy_address = req0->phy_address;
3350 cmd->base.eth_tp_mdix = req0->eth_tp_mdix;
3351 cmd->base.eth_tp_mdix_ctrl = req0->eth_tp_mdix_ctrl;
3352 supported = le32_to_cpu(req0->supported);
3353 advertising = le32_to_cpu(req0->advertising);
3354 lp_advertising = le32_to_cpu(req0->lp_advertising);
3355 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
3356 supported);
3357 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
3358 advertising);
3359 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
3360 lp_advertising);
3361
3362 req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3363 cmd->base.master_slave_cfg = req1->master_slave_cfg;
3364 cmd->base.master_slave_state = req1->master_slave_state;
3365
3366 return 0;
3367 }
3368
3369 static int
hclge_set_phy_link_ksettings(struct hnae3_handle * handle,const struct ethtool_link_ksettings * cmd)3370 hclge_set_phy_link_ksettings(struct hnae3_handle *handle,
3371 const struct ethtool_link_ksettings *cmd)
3372 {
3373 struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3374 struct hclge_vport *vport = hclge_get_vport(handle);
3375 struct hclge_phy_link_ksetting_0_cmd *req0;
3376 struct hclge_phy_link_ksetting_1_cmd *req1;
3377 struct hclge_dev *hdev = vport->back;
3378 u32 advertising;
3379 int ret;
3380
3381 if (cmd->base.autoneg == AUTONEG_DISABLE &&
3382 ((cmd->base.speed != SPEED_100 && cmd->base.speed != SPEED_10) ||
3383 (cmd->base.duplex != DUPLEX_HALF &&
3384 cmd->base.duplex != DUPLEX_FULL)))
3385 return -EINVAL;
3386
3387 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3388 false);
3389 desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
3390 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3391 false);
3392
3393 req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3394 req0->autoneg = cmd->base.autoneg;
3395 req0->speed = cpu_to_le32(cmd->base.speed);
3396 req0->duplex = cmd->base.duplex;
3397 ethtool_convert_link_mode_to_legacy_u32(&advertising,
3398 cmd->link_modes.advertising);
3399 req0->advertising = cpu_to_le32(advertising);
3400 req0->eth_tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
3401
3402 req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3403 req1->master_slave_cfg = cmd->base.master_slave_cfg;
3404
3405 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3406 if (ret) {
3407 dev_err(&hdev->pdev->dev,
3408 "failed to set phy link ksettings, ret = %d.\n", ret);
3409 return ret;
3410 }
3411
3412 hdev->hw.mac.req_autoneg = cmd->base.autoneg;
3413 hdev->hw.mac.req_speed = cmd->base.speed;
3414 hdev->hw.mac.req_duplex = cmd->base.duplex;
3415 linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising);
3416
3417 return 0;
3418 }
3419
hclge_update_tp_port_info(struct hclge_dev * hdev)3420 static int hclge_update_tp_port_info(struct hclge_dev *hdev)
3421 {
3422 struct ethtool_link_ksettings cmd;
3423 int ret;
3424
3425 if (!hnae3_dev_phy_imp_supported(hdev))
3426 return 0;
3427
3428 ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd);
3429 if (ret)
3430 return ret;
3431
3432 hdev->hw.mac.autoneg = cmd.base.autoneg;
3433 hdev->hw.mac.speed = cmd.base.speed;
3434 hdev->hw.mac.duplex = cmd.base.duplex;
3435 linkmode_copy(hdev->hw.mac.advertising, cmd.link_modes.advertising);
3436
3437 return 0;
3438 }
3439
hclge_tp_port_init(struct hclge_dev * hdev)3440 static int hclge_tp_port_init(struct hclge_dev *hdev)
3441 {
3442 struct ethtool_link_ksettings cmd;
3443
3444 if (!hnae3_dev_phy_imp_supported(hdev))
3445 return 0;
3446
3447 cmd.base.autoneg = hdev->hw.mac.req_autoneg;
3448 cmd.base.speed = hdev->hw.mac.req_speed;
3449 cmd.base.duplex = hdev->hw.mac.req_duplex;
3450 linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising);
3451
3452 return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd);
3453 }
3454
hclge_update_port_info(struct hclge_dev * hdev)3455 static int hclge_update_port_info(struct hclge_dev *hdev)
3456 {
3457 struct hclge_mac *mac = &hdev->hw.mac;
3458 int speed;
3459 int ret;
3460
3461 /* get the port info from SFP cmd if not copper port */
3462 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
3463 return hclge_update_tp_port_info(hdev);
3464
3465 /* if IMP does not support get SFP/qSFP info, return directly */
3466 if (!hdev->support_sfp_query)
3467 return 0;
3468
3469 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3470 speed = mac->speed;
3471 ret = hclge_get_sfp_info(hdev, mac);
3472 } else {
3473 speed = HCLGE_MAC_SPEED_UNKNOWN;
3474 ret = hclge_get_sfp_speed(hdev, &speed);
3475 }
3476
3477 if (ret == -EOPNOTSUPP) {
3478 hdev->support_sfp_query = false;
3479 return ret;
3480 } else if (ret) {
3481 return ret;
3482 }
3483
3484 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3485 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
3486 hclge_update_port_capability(hdev, mac);
3487 if (mac->speed != speed)
3488 (void)hclge_tm_port_shaper_cfg(hdev);
3489 return 0;
3490 }
3491 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
3492 HCLGE_MAC_FULL, mac->lane_num);
3493 } else {
3494 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
3495 return 0; /* do nothing if no SFP */
3496
3497 /* must config full duplex for SFP */
3498 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL, 0);
3499 }
3500 }
3501
hclge_get_status(struct hnae3_handle * handle)3502 static int hclge_get_status(struct hnae3_handle *handle)
3503 {
3504 struct hclge_vport *vport = hclge_get_vport(handle);
3505 struct hclge_dev *hdev = vport->back;
3506
3507 hclge_update_link_status(hdev);
3508
3509 return hdev->hw.mac.link;
3510 }
3511
hclge_get_vf_vport(struct hclge_dev * hdev,int vf)3512 struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3513 {
3514 if (!pci_num_vf(hdev->pdev)) {
3515 dev_err(&hdev->pdev->dev,
3516 "SRIOV is disabled, can not get vport(%d) info.\n", vf);
3517 return NULL;
3518 }
3519
3520 if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3521 dev_err(&hdev->pdev->dev,
3522 "vf id(%d) is out of range(0 <= vfid < %d)\n",
3523 vf, pci_num_vf(hdev->pdev));
3524 return NULL;
3525 }
3526
3527 /* VF start from 1 in vport */
3528 vf += HCLGE_VF_VPORT_START_NUM;
3529 return &hdev->vport[vf];
3530 }
3531
hclge_get_vf_config(struct hnae3_handle * handle,int vf,struct ifla_vf_info * ivf)3532 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3533 struct ifla_vf_info *ivf)
3534 {
3535 struct hclge_vport *vport = hclge_get_vport(handle);
3536 struct hclge_dev *hdev = vport->back;
3537
3538 vport = hclge_get_vf_vport(hdev, vf);
3539 if (!vport)
3540 return -EINVAL;
3541
3542 ivf->vf = vf;
3543 ivf->linkstate = vport->vf_info.link_state;
3544 ivf->spoofchk = vport->vf_info.spoofchk;
3545 ivf->trusted = vport->vf_info.trusted;
3546 ivf->min_tx_rate = 0;
3547 ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3548 ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3549 ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3550 ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3551 ether_addr_copy(ivf->mac, vport->vf_info.mac);
3552
3553 return 0;
3554 }
3555
hclge_set_vf_link_state(struct hnae3_handle * handle,int vf,int link_state)3556 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3557 int link_state)
3558 {
3559 struct hclge_vport *vport = hclge_get_vport(handle);
3560 struct hclge_dev *hdev = vport->back;
3561 int link_state_old;
3562 int ret;
3563
3564 vport = hclge_get_vf_vport(hdev, vf);
3565 if (!vport)
3566 return -EINVAL;
3567
3568 link_state_old = vport->vf_info.link_state;
3569 vport->vf_info.link_state = link_state;
3570
3571 /* return success directly if the VF is unalive, VF will
3572 * query link state itself when it starts work.
3573 */
3574 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3575 return 0;
3576
3577 ret = hclge_push_vf_link_status(vport);
3578 if (ret) {
3579 vport->vf_info.link_state = link_state_old;
3580 dev_err(&hdev->pdev->dev,
3581 "failed to push vf%d link status, ret = %d\n", vf, ret);
3582 }
3583
3584 return ret;
3585 }
3586
hclge_check_event_cause(struct hclge_dev * hdev,u32 * clearval)3587 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3588 {
3589 u32 cmdq_src_reg, msix_src_reg, hw_err_src_reg;
3590
3591 /* fetch the events from their corresponding regs */
3592 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3593 msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3594 hw_err_src_reg = hclge_read_dev(&hdev->hw,
3595 HCLGE_RAS_PF_OTHER_INT_STS_REG);
3596
3597 /* Assumption: If by any chance reset and mailbox events are reported
3598 * together then we will only process reset event in this go and will
3599 * defer the processing of the mailbox events. Since, we would have not
3600 * cleared RX CMDQ event this time we would receive again another
3601 * interrupt from H/W just for the mailbox.
3602 *
3603 * check for vector0 reset event sources
3604 */
3605 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3606 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3607 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3608 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
3609 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3610 hdev->rst_stats.imp_rst_cnt++;
3611 return HCLGE_VECTOR0_EVENT_RST;
3612 }
3613
3614 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3615 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3616 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
3617 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3618 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3619 hdev->rst_stats.global_rst_cnt++;
3620 return HCLGE_VECTOR0_EVENT_RST;
3621 }
3622
3623 /* check for vector0 msix event and hardware error event source */
3624 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK ||
3625 hw_err_src_reg & HCLGE_RAS_REG_ERR_MASK)
3626 return HCLGE_VECTOR0_EVENT_ERR;
3627
3628 /* check for vector0 ptp event source */
3629 if (BIT(HCLGE_VECTOR0_REG_PTP_INT_B) & msix_src_reg) {
3630 *clearval = msix_src_reg;
3631 return HCLGE_VECTOR0_EVENT_PTP;
3632 }
3633
3634 /* check for vector0 mailbox(=CMDQ RX) event source */
3635 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3636 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3637 *clearval = cmdq_src_reg;
3638 return HCLGE_VECTOR0_EVENT_MBX;
3639 }
3640
3641 /* print other vector0 event source */
3642 dev_info(&hdev->pdev->dev,
3643 "INT status: CMDQ(%#x) HW errors(%#x) other(%#x)\n",
3644 cmdq_src_reg, hw_err_src_reg, msix_src_reg);
3645
3646 return HCLGE_VECTOR0_EVENT_OTHER;
3647 }
3648
hclge_clear_event_cause(struct hclge_dev * hdev,u32 event_type,u32 regclr)3649 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3650 u32 regclr)
3651 {
3652 #define HCLGE_IMP_RESET_DELAY 5
3653
3654 switch (event_type) {
3655 case HCLGE_VECTOR0_EVENT_PTP:
3656 case HCLGE_VECTOR0_EVENT_RST:
3657 if (regclr == BIT(HCLGE_VECTOR0_IMPRESET_INT_B))
3658 mdelay(HCLGE_IMP_RESET_DELAY);
3659
3660 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3661 break;
3662 case HCLGE_VECTOR0_EVENT_MBX:
3663 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3664 break;
3665 default:
3666 break;
3667 }
3668 }
3669
hclge_clear_all_event_cause(struct hclge_dev * hdev)3670 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3671 {
3672 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3673 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3674 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3675 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3676 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3677 }
3678
hclge_enable_vector(struct hclge_misc_vector * vector,bool enable)3679 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3680 {
3681 writel(enable ? 1 : 0, vector->addr);
3682 }
3683
hclge_misc_irq_handle(int irq,void * data)3684 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3685 {
3686 struct hclge_dev *hdev = data;
3687 unsigned long flags;
3688 u32 clearval = 0;
3689 u32 event_cause;
3690
3691 hclge_enable_vector(&hdev->misc_vector, false);
3692 event_cause = hclge_check_event_cause(hdev, &clearval);
3693
3694 /* vector 0 interrupt is shared with reset and mailbox source events. */
3695 switch (event_cause) {
3696 case HCLGE_VECTOR0_EVENT_ERR:
3697 hclge_errhand_task_schedule(hdev);
3698 break;
3699 case HCLGE_VECTOR0_EVENT_RST:
3700 hclge_reset_task_schedule(hdev);
3701 break;
3702 case HCLGE_VECTOR0_EVENT_PTP:
3703 spin_lock_irqsave(&hdev->ptp->lock, flags);
3704 hclge_ptp_clean_tx_hwts(hdev);
3705 spin_unlock_irqrestore(&hdev->ptp->lock, flags);
3706 break;
3707 case HCLGE_VECTOR0_EVENT_MBX:
3708 /* If we are here then,
3709 * 1. Either we are not handling any mbx task and we are not
3710 * scheduled as well
3711 * OR
3712 * 2. We could be handling a mbx task but nothing more is
3713 * scheduled.
3714 * In both cases, we should schedule mbx task as there are more
3715 * mbx messages reported by this interrupt.
3716 */
3717 hclge_mbx_task_schedule(hdev);
3718 break;
3719 default:
3720 dev_warn(&hdev->pdev->dev,
3721 "received unknown or unhandled event of vector0\n");
3722 break;
3723 }
3724
3725 hclge_clear_event_cause(hdev, event_cause, clearval);
3726
3727 /* Enable interrupt if it is not caused by reset event or error event */
3728 if (event_cause == HCLGE_VECTOR0_EVENT_PTP ||
3729 event_cause == HCLGE_VECTOR0_EVENT_MBX ||
3730 event_cause == HCLGE_VECTOR0_EVENT_OTHER)
3731 hclge_enable_vector(&hdev->misc_vector, true);
3732
3733 return IRQ_HANDLED;
3734 }
3735
hclge_free_vector(struct hclge_dev * hdev,int vector_id)3736 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3737 {
3738 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3739 dev_warn(&hdev->pdev->dev,
3740 "vector(vector_id %d) has been freed.\n", vector_id);
3741 return;
3742 }
3743
3744 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3745 hdev->num_msi_left += 1;
3746 hdev->num_msi_used -= 1;
3747 }
3748
hclge_get_misc_vector(struct hclge_dev * hdev)3749 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3750 {
3751 struct hclge_misc_vector *vector = &hdev->misc_vector;
3752
3753 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3754
3755 vector->addr = hdev->hw.hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3756 hdev->vector_status[0] = 0;
3757
3758 hdev->num_msi_left -= 1;
3759 hdev->num_msi_used += 1;
3760 }
3761
hclge_misc_irq_init(struct hclge_dev * hdev)3762 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3763 {
3764 int ret;
3765
3766 hclge_get_misc_vector(hdev);
3767
3768 /* this would be explicitly freed in the end */
3769 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3770 HCLGE_NAME, pci_name(hdev->pdev));
3771 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3772 0, hdev->misc_vector.name, hdev);
3773 if (ret) {
3774 hclge_free_vector(hdev, 0);
3775 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3776 hdev->misc_vector.vector_irq);
3777 }
3778
3779 return ret;
3780 }
3781
hclge_misc_irq_uninit(struct hclge_dev * hdev)3782 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3783 {
3784 free_irq(hdev->misc_vector.vector_irq, hdev);
3785 hclge_free_vector(hdev, 0);
3786 }
3787
hclge_notify_client(struct hclge_dev * hdev,enum hnae3_reset_notify_type type)3788 int hclge_notify_client(struct hclge_dev *hdev,
3789 enum hnae3_reset_notify_type type)
3790 {
3791 struct hnae3_handle *handle = &hdev->vport[0].nic;
3792 struct hnae3_client *client = hdev->nic_client;
3793 int ret;
3794
3795 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3796 return 0;
3797
3798 if (!client->ops->reset_notify)
3799 return -EOPNOTSUPP;
3800
3801 ret = client->ops->reset_notify(handle, type);
3802 if (ret)
3803 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
3804 type, ret);
3805
3806 return ret;
3807 }
3808
hclge_notify_roce_client(struct hclge_dev * hdev,enum hnae3_reset_notify_type type)3809 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3810 enum hnae3_reset_notify_type type)
3811 {
3812 struct hnae3_handle *handle = &hdev->vport[0].roce;
3813 struct hnae3_client *client = hdev->roce_client;
3814 int ret;
3815
3816 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3817 return 0;
3818
3819 if (!client->ops->reset_notify)
3820 return -EOPNOTSUPP;
3821
3822 ret = client->ops->reset_notify(handle, type);
3823 if (ret)
3824 dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)",
3825 type, ret);
3826
3827 return ret;
3828 }
3829
hclge_reset_wait(struct hclge_dev * hdev)3830 static int hclge_reset_wait(struct hclge_dev *hdev)
3831 {
3832 #define HCLGE_RESET_WATI_MS 100
3833 #define HCLGE_RESET_WAIT_CNT 350
3834
3835 u32 val, reg, reg_bit;
3836 u32 cnt = 0;
3837
3838 switch (hdev->reset_type) {
3839 case HNAE3_IMP_RESET:
3840 reg = HCLGE_GLOBAL_RESET_REG;
3841 reg_bit = HCLGE_IMP_RESET_BIT;
3842 break;
3843 case HNAE3_GLOBAL_RESET:
3844 reg = HCLGE_GLOBAL_RESET_REG;
3845 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3846 break;
3847 case HNAE3_FUNC_RESET:
3848 reg = HCLGE_FUN_RST_ING;
3849 reg_bit = HCLGE_FUN_RST_ING_B;
3850 break;
3851 default:
3852 dev_err(&hdev->pdev->dev,
3853 "Wait for unsupported reset type: %d\n",
3854 hdev->reset_type);
3855 return -EINVAL;
3856 }
3857
3858 val = hclge_read_dev(&hdev->hw, reg);
3859 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3860 msleep(HCLGE_RESET_WATI_MS);
3861 val = hclge_read_dev(&hdev->hw, reg);
3862 cnt++;
3863 }
3864
3865 if (cnt >= HCLGE_RESET_WAIT_CNT) {
3866 dev_warn(&hdev->pdev->dev,
3867 "Wait for reset timeout: %d\n", hdev->reset_type);
3868 return -EBUSY;
3869 }
3870
3871 return 0;
3872 }
3873
hclge_set_vf_rst(struct hclge_dev * hdev,int func_id,bool reset)3874 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3875 {
3876 struct hclge_vf_rst_cmd *req;
3877 struct hclge_desc desc;
3878
3879 req = (struct hclge_vf_rst_cmd *)desc.data;
3880 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3881 req->dest_vfid = func_id;
3882
3883 if (reset)
3884 req->vf_rst = 0x1;
3885
3886 return hclge_cmd_send(&hdev->hw, &desc, 1);
3887 }
3888
hclge_set_all_vf_rst(struct hclge_dev * hdev,bool reset)3889 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3890 {
3891 int i;
3892
3893 for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++) {
3894 struct hclge_vport *vport = &hdev->vport[i];
3895 int ret;
3896
3897 /* Send cmd to set/clear VF's FUNC_RST_ING */
3898 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3899 if (ret) {
3900 dev_err(&hdev->pdev->dev,
3901 "set vf(%u) rst failed %d!\n",
3902 vport->vport_id - HCLGE_VF_VPORT_START_NUM,
3903 ret);
3904 return ret;
3905 }
3906
3907 if (!reset ||
3908 !test_bit(HCLGE_VPORT_STATE_INITED, &vport->state))
3909 continue;
3910
3911 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) &&
3912 hdev->reset_type == HNAE3_FUNC_RESET) {
3913 set_bit(HCLGE_VPORT_NEED_NOTIFY_RESET,
3914 &vport->need_notify);
3915 continue;
3916 }
3917
3918 /* Inform VF to process the reset.
3919 * hclge_inform_reset_assert_to_vf may fail if VF
3920 * driver is not loaded.
3921 */
3922 ret = hclge_inform_reset_assert_to_vf(vport);
3923 if (ret)
3924 dev_warn(&hdev->pdev->dev,
3925 "inform reset to vf(%u) failed %d!\n",
3926 vport->vport_id - HCLGE_VF_VPORT_START_NUM,
3927 ret);
3928 }
3929
3930 return 0;
3931 }
3932
hclge_mailbox_service_task(struct hclge_dev * hdev)3933 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3934 {
3935 if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3936 test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state) ||
3937 test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3938 return;
3939
3940 if (time_is_before_jiffies(hdev->last_mbx_scheduled +
3941 HCLGE_MBX_SCHED_TIMEOUT))
3942 dev_warn(&hdev->pdev->dev,
3943 "mbx service task is scheduled after %ums on cpu%u!\n",
3944 jiffies_to_msecs(jiffies - hdev->last_mbx_scheduled),
3945 smp_processor_id());
3946
3947 hclge_mbx_handler(hdev);
3948
3949 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3950 }
3951
hclge_func_reset_sync_vf(struct hclge_dev * hdev)3952 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3953 {
3954 struct hclge_pf_rst_sync_cmd *req;
3955 struct hclge_desc desc;
3956 int cnt = 0;
3957 int ret;
3958
3959 req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3960 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3961
3962 do {
3963 /* vf need to down netdev by mbx during PF or FLR reset */
3964 hclge_mailbox_service_task(hdev);
3965
3966 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3967 /* for compatible with old firmware, wait
3968 * 100 ms for VF to stop IO
3969 */
3970 if (ret == -EOPNOTSUPP) {
3971 msleep(HCLGE_RESET_SYNC_TIME);
3972 return;
3973 } else if (ret) {
3974 dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3975 ret);
3976 return;
3977 } else if (req->all_vf_ready) {
3978 return;
3979 }
3980 msleep(HCLGE_PF_RESET_SYNC_TIME);
3981 hclge_comm_cmd_reuse_desc(&desc, true);
3982 } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3983
3984 dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3985 }
3986
hclge_report_hw_error(struct hclge_dev * hdev,enum hnae3_hw_error_type type)3987 void hclge_report_hw_error(struct hclge_dev *hdev,
3988 enum hnae3_hw_error_type type)
3989 {
3990 struct hnae3_client *client = hdev->nic_client;
3991
3992 if (!client || !client->ops->process_hw_error ||
3993 !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3994 return;
3995
3996 client->ops->process_hw_error(&hdev->vport[0].nic, type);
3997 }
3998
hclge_handle_imp_error(struct hclge_dev * hdev)3999 static void hclge_handle_imp_error(struct hclge_dev *hdev)
4000 {
4001 u32 reg_val;
4002
4003 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
4004 if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
4005 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
4006 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
4007 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
4008 }
4009
4010 if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
4011 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
4012 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
4013 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
4014 }
4015 }
4016
hclge_func_reset_cmd(struct hclge_dev * hdev,int func_id)4017 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
4018 {
4019 struct hclge_desc desc;
4020 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
4021 int ret;
4022
4023 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
4024 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
4025 req->fun_reset_vfid = func_id;
4026
4027 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4028 if (ret)
4029 dev_err(&hdev->pdev->dev,
4030 "send function reset cmd fail, status =%d\n", ret);
4031
4032 return ret;
4033 }
4034
hclge_do_reset(struct hclge_dev * hdev)4035 static void hclge_do_reset(struct hclge_dev *hdev)
4036 {
4037 struct hnae3_handle *handle = &hdev->vport[0].nic;
4038 struct pci_dev *pdev = hdev->pdev;
4039 u32 val;
4040
4041 if (hclge_get_hw_reset_stat(handle)) {
4042 dev_info(&pdev->dev, "hardware reset not finish\n");
4043 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
4044 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
4045 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
4046 return;
4047 }
4048
4049 switch (hdev->reset_type) {
4050 case HNAE3_IMP_RESET:
4051 dev_info(&pdev->dev, "IMP reset requested\n");
4052 val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
4053 hnae3_set_bit(val, HCLGE_TRIGGER_IMP_RESET_B, 1);
4054 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, val);
4055 break;
4056 case HNAE3_GLOBAL_RESET:
4057 dev_info(&pdev->dev, "global reset requested\n");
4058 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
4059 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
4060 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
4061 break;
4062 case HNAE3_FUNC_RESET:
4063 dev_info(&pdev->dev, "PF reset requested\n");
4064 /* schedule again to check later */
4065 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
4066 hclge_reset_task_schedule(hdev);
4067 break;
4068 default:
4069 dev_warn(&pdev->dev,
4070 "unsupported reset type: %d\n", hdev->reset_type);
4071 break;
4072 }
4073 }
4074
hclge_get_reset_level(struct hnae3_ae_dev * ae_dev,unsigned long * addr)4075 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
4076 unsigned long *addr)
4077 {
4078 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
4079 struct hclge_dev *hdev = ae_dev->priv;
4080
4081 /* return the highest priority reset level amongst all */
4082 if (test_bit(HNAE3_IMP_RESET, addr)) {
4083 rst_level = HNAE3_IMP_RESET;
4084 clear_bit(HNAE3_IMP_RESET, addr);
4085 clear_bit(HNAE3_GLOBAL_RESET, addr);
4086 clear_bit(HNAE3_FUNC_RESET, addr);
4087 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
4088 rst_level = HNAE3_GLOBAL_RESET;
4089 clear_bit(HNAE3_GLOBAL_RESET, addr);
4090 clear_bit(HNAE3_FUNC_RESET, addr);
4091 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
4092 rst_level = HNAE3_FUNC_RESET;
4093 clear_bit(HNAE3_FUNC_RESET, addr);
4094 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
4095 rst_level = HNAE3_FLR_RESET;
4096 clear_bit(HNAE3_FLR_RESET, addr);
4097 }
4098
4099 if (hdev->reset_type != HNAE3_NONE_RESET &&
4100 rst_level < hdev->reset_type)
4101 return HNAE3_NONE_RESET;
4102
4103 return rst_level;
4104 }
4105
hclge_clear_reset_cause(struct hclge_dev * hdev)4106 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
4107 {
4108 u32 clearval = 0;
4109
4110 switch (hdev->reset_type) {
4111 case HNAE3_IMP_RESET:
4112 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
4113 break;
4114 case HNAE3_GLOBAL_RESET:
4115 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
4116 break;
4117 default:
4118 break;
4119 }
4120
4121 if (!clearval)
4122 return;
4123
4124 /* For revision 0x20, the reset interrupt source
4125 * can only be cleared after hardware reset done
4126 */
4127 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
4128 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
4129 clearval);
4130
4131 hclge_enable_vector(&hdev->misc_vector, true);
4132 }
4133
hclge_reset_handshake(struct hclge_dev * hdev,bool enable)4134 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
4135 {
4136 u32 reg_val;
4137
4138 reg_val = hclge_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG);
4139 if (enable)
4140 reg_val |= HCLGE_COMM_NIC_SW_RST_RDY;
4141 else
4142 reg_val &= ~HCLGE_COMM_NIC_SW_RST_RDY;
4143
4144 hclge_write_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, reg_val);
4145 }
4146
hclge_func_reset_notify_vf(struct hclge_dev * hdev)4147 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
4148 {
4149 int ret;
4150
4151 ret = hclge_set_all_vf_rst(hdev, true);
4152 if (ret)
4153 return ret;
4154
4155 hclge_func_reset_sync_vf(hdev);
4156
4157 return 0;
4158 }
4159
hclge_reset_prepare_wait(struct hclge_dev * hdev)4160 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
4161 {
4162 u32 reg_val;
4163 int ret = 0;
4164
4165 switch (hdev->reset_type) {
4166 case HNAE3_FUNC_RESET:
4167 ret = hclge_func_reset_notify_vf(hdev);
4168 if (ret)
4169 return ret;
4170
4171 ret = hclge_func_reset_cmd(hdev, 0);
4172 if (ret) {
4173 dev_err(&hdev->pdev->dev,
4174 "asserting function reset fail %d!\n", ret);
4175 return ret;
4176 }
4177
4178 /* After performaning pf reset, it is not necessary to do the
4179 * mailbox handling or send any command to firmware, because
4180 * any mailbox handling or command to firmware is only valid
4181 * after hclge_comm_cmd_init is called.
4182 */
4183 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
4184 hdev->rst_stats.pf_rst_cnt++;
4185 break;
4186 case HNAE3_FLR_RESET:
4187 ret = hclge_func_reset_notify_vf(hdev);
4188 if (ret)
4189 return ret;
4190 break;
4191 case HNAE3_IMP_RESET:
4192 hclge_handle_imp_error(hdev);
4193 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
4194 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
4195 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
4196 break;
4197 default:
4198 break;
4199 }
4200
4201 /* inform hardware that preparatory work is done */
4202 msleep(HCLGE_RESET_SYNC_TIME);
4203 hclge_reset_handshake(hdev, true);
4204 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
4205
4206 return ret;
4207 }
4208
hclge_show_rst_info(struct hclge_dev * hdev)4209 static void hclge_show_rst_info(struct hclge_dev *hdev)
4210 {
4211 char *buf;
4212
4213 buf = kzalloc(HCLGE_DBG_RESET_INFO_LEN, GFP_KERNEL);
4214 if (!buf)
4215 return;
4216
4217 hclge_dbg_dump_rst_info(hdev, buf, HCLGE_DBG_RESET_INFO_LEN);
4218
4219 dev_info(&hdev->pdev->dev, "dump reset info:\n%s", buf);
4220
4221 kfree(buf);
4222 }
4223
hclge_reset_err_handle(struct hclge_dev * hdev)4224 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
4225 {
4226 #define MAX_RESET_FAIL_CNT 5
4227
4228 if (hdev->reset_pending) {
4229 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
4230 hdev->reset_pending);
4231 return true;
4232 } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
4233 HCLGE_RESET_INT_M) {
4234 dev_info(&hdev->pdev->dev,
4235 "reset failed because new reset interrupt\n");
4236 hclge_clear_reset_cause(hdev);
4237 return false;
4238 } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
4239 hdev->rst_stats.reset_fail_cnt++;
4240 set_bit(hdev->reset_type, &hdev->reset_pending);
4241 dev_info(&hdev->pdev->dev,
4242 "re-schedule reset task(%u)\n",
4243 hdev->rst_stats.reset_fail_cnt);
4244 return true;
4245 }
4246
4247 hclge_clear_reset_cause(hdev);
4248
4249 /* recover the handshake status when reset fail */
4250 hclge_reset_handshake(hdev, true);
4251
4252 dev_err(&hdev->pdev->dev, "Reset fail!\n");
4253
4254 hclge_show_rst_info(hdev);
4255
4256 set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4257
4258 return false;
4259 }
4260
hclge_update_reset_level(struct hclge_dev * hdev)4261 static void hclge_update_reset_level(struct hclge_dev *hdev)
4262 {
4263 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4264 enum hnae3_reset_type reset_level;
4265
4266 /* reset request will not be set during reset, so clear
4267 * pending reset request to avoid unnecessary reset
4268 * caused by the same reason.
4269 */
4270 hclge_get_reset_level(ae_dev, &hdev->reset_request);
4271
4272 /* if default_reset_request has a higher level reset request,
4273 * it should be handled as soon as possible. since some errors
4274 * need this kind of reset to fix.
4275 */
4276 reset_level = hclge_get_reset_level(ae_dev,
4277 &hdev->default_reset_request);
4278 if (reset_level != HNAE3_NONE_RESET)
4279 set_bit(reset_level, &hdev->reset_request);
4280 }
4281
hclge_set_rst_done(struct hclge_dev * hdev)4282 static int hclge_set_rst_done(struct hclge_dev *hdev)
4283 {
4284 struct hclge_pf_rst_done_cmd *req;
4285 struct hclge_desc desc;
4286 int ret;
4287
4288 req = (struct hclge_pf_rst_done_cmd *)desc.data;
4289 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
4290 req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
4291
4292 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4293 /* To be compatible with the old firmware, which does not support
4294 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
4295 * return success
4296 */
4297 if (ret == -EOPNOTSUPP) {
4298 dev_warn(&hdev->pdev->dev,
4299 "current firmware does not support command(0x%x)!\n",
4300 HCLGE_OPC_PF_RST_DONE);
4301 return 0;
4302 } else if (ret) {
4303 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
4304 ret);
4305 }
4306
4307 return ret;
4308 }
4309
hclge_reset_prepare_up(struct hclge_dev * hdev)4310 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
4311 {
4312 int ret = 0;
4313
4314 switch (hdev->reset_type) {
4315 case HNAE3_FUNC_RESET:
4316 case HNAE3_FLR_RESET:
4317 ret = hclge_set_all_vf_rst(hdev, false);
4318 break;
4319 case HNAE3_GLOBAL_RESET:
4320 case HNAE3_IMP_RESET:
4321 ret = hclge_set_rst_done(hdev);
4322 break;
4323 default:
4324 break;
4325 }
4326
4327 /* clear up the handshake status after re-initialize done */
4328 hclge_reset_handshake(hdev, false);
4329
4330 return ret;
4331 }
4332
hclge_reset_stack(struct hclge_dev * hdev)4333 static int hclge_reset_stack(struct hclge_dev *hdev)
4334 {
4335 int ret;
4336
4337 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
4338 if (ret)
4339 return ret;
4340
4341 ret = hclge_reset_ae_dev(hdev->ae_dev);
4342 if (ret)
4343 return ret;
4344
4345 return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
4346 }
4347
hclge_reset_prepare(struct hclge_dev * hdev)4348 static int hclge_reset_prepare(struct hclge_dev *hdev)
4349 {
4350 int ret;
4351
4352 hdev->rst_stats.reset_cnt++;
4353 /* perform reset of the stack & ae device for a client */
4354 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
4355 if (ret)
4356 return ret;
4357
4358 rtnl_lock();
4359 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
4360 rtnl_unlock();
4361 if (ret)
4362 return ret;
4363
4364 return hclge_reset_prepare_wait(hdev);
4365 }
4366
hclge_reset_rebuild(struct hclge_dev * hdev)4367 static int hclge_reset_rebuild(struct hclge_dev *hdev)
4368 {
4369 int ret;
4370
4371 hdev->rst_stats.hw_reset_done_cnt++;
4372
4373 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
4374 if (ret)
4375 return ret;
4376
4377 rtnl_lock();
4378 ret = hclge_reset_stack(hdev);
4379 rtnl_unlock();
4380 if (ret)
4381 return ret;
4382
4383 hclge_clear_reset_cause(hdev);
4384
4385 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
4386 /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
4387 * times
4388 */
4389 if (ret &&
4390 hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
4391 return ret;
4392
4393 ret = hclge_reset_prepare_up(hdev);
4394 if (ret)
4395 return ret;
4396
4397 rtnl_lock();
4398 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
4399 rtnl_unlock();
4400 if (ret)
4401 return ret;
4402
4403 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
4404 if (ret)
4405 return ret;
4406
4407 hdev->last_reset_time = jiffies;
4408 hdev->rst_stats.reset_fail_cnt = 0;
4409 hdev->rst_stats.reset_done_cnt++;
4410 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4411
4412 hclge_update_reset_level(hdev);
4413
4414 return 0;
4415 }
4416
hclge_reset(struct hclge_dev * hdev)4417 static void hclge_reset(struct hclge_dev *hdev)
4418 {
4419 if (hclge_reset_prepare(hdev))
4420 goto err_reset;
4421
4422 if (hclge_reset_wait(hdev))
4423 goto err_reset;
4424
4425 if (hclge_reset_rebuild(hdev))
4426 goto err_reset;
4427
4428 return;
4429
4430 err_reset:
4431 if (hclge_reset_err_handle(hdev))
4432 hclge_reset_task_schedule(hdev);
4433 }
4434
hclge_reset_event(struct pci_dev * pdev,struct hnae3_handle * handle)4435 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
4436 {
4437 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
4438 struct hclge_dev *hdev = ae_dev->priv;
4439
4440 /* We might end up getting called broadly because of 2 below cases:
4441 * 1. Recoverable error was conveyed through APEI and only way to bring
4442 * normalcy is to reset.
4443 * 2. A new reset request from the stack due to timeout
4444 *
4445 * check if this is a new reset request and we are not here just because
4446 * last reset attempt did not succeed and watchdog hit us again. We will
4447 * know this if last reset request did not occur very recently (watchdog
4448 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
4449 * In case of new request we reset the "reset level" to PF reset.
4450 * And if it is a repeat reset request of the most recent one then we
4451 * want to make sure we throttle the reset request. Therefore, we will
4452 * not allow it again before 3*HZ times.
4453 */
4454
4455 if (time_before(jiffies, (hdev->last_reset_time +
4456 HCLGE_RESET_INTERVAL))) {
4457 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
4458 return;
4459 }
4460
4461 if (hdev->default_reset_request) {
4462 hdev->reset_level =
4463 hclge_get_reset_level(ae_dev,
4464 &hdev->default_reset_request);
4465 } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
4466 hdev->reset_level = HNAE3_FUNC_RESET;
4467 }
4468
4469 dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
4470 hdev->reset_level);
4471
4472 /* request reset & schedule reset task */
4473 set_bit(hdev->reset_level, &hdev->reset_request);
4474 hclge_reset_task_schedule(hdev);
4475
4476 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
4477 hdev->reset_level++;
4478 }
4479
hclge_set_def_reset_request(struct hnae3_ae_dev * ae_dev,enum hnae3_reset_type rst_type)4480 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
4481 enum hnae3_reset_type rst_type)
4482 {
4483 struct hclge_dev *hdev = ae_dev->priv;
4484
4485 set_bit(rst_type, &hdev->default_reset_request);
4486 }
4487
hclge_reset_timer(struct timer_list * t)4488 static void hclge_reset_timer(struct timer_list *t)
4489 {
4490 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
4491
4492 /* if default_reset_request has no value, it means that this reset
4493 * request has already be handled, so just return here
4494 */
4495 if (!hdev->default_reset_request)
4496 return;
4497
4498 dev_info(&hdev->pdev->dev,
4499 "triggering reset in reset timer\n");
4500 hclge_reset_event(hdev->pdev, NULL);
4501 }
4502
hclge_reset_subtask(struct hclge_dev * hdev)4503 static void hclge_reset_subtask(struct hclge_dev *hdev)
4504 {
4505 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4506
4507 /* check if there is any ongoing reset in the hardware. This status can
4508 * be checked from reset_pending. If there is then, we need to wait for
4509 * hardware to complete reset.
4510 * a. If we are able to figure out in reasonable time that hardware
4511 * has fully resetted then, we can proceed with driver, client
4512 * reset.
4513 * b. else, we can come back later to check this status so re-sched
4514 * now.
4515 */
4516 hdev->last_reset_time = jiffies;
4517 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
4518 if (hdev->reset_type != HNAE3_NONE_RESET)
4519 hclge_reset(hdev);
4520
4521 /* check if we got any *new* reset requests to be honored */
4522 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
4523 if (hdev->reset_type != HNAE3_NONE_RESET)
4524 hclge_do_reset(hdev);
4525
4526 hdev->reset_type = HNAE3_NONE_RESET;
4527 }
4528
hclge_handle_err_reset_request(struct hclge_dev * hdev)4529 static void hclge_handle_err_reset_request(struct hclge_dev *hdev)
4530 {
4531 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4532 enum hnae3_reset_type reset_type;
4533
4534 if (ae_dev->hw_err_reset_req) {
4535 reset_type = hclge_get_reset_level(ae_dev,
4536 &ae_dev->hw_err_reset_req);
4537 hclge_set_def_reset_request(ae_dev, reset_type);
4538 }
4539
4540 if (hdev->default_reset_request && ae_dev->ops->reset_event)
4541 ae_dev->ops->reset_event(hdev->pdev, NULL);
4542
4543 /* enable interrupt after error handling complete */
4544 hclge_enable_vector(&hdev->misc_vector, true);
4545 }
4546
hclge_handle_err_recovery(struct hclge_dev * hdev)4547 static void hclge_handle_err_recovery(struct hclge_dev *hdev)
4548 {
4549 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4550
4551 ae_dev->hw_err_reset_req = 0;
4552
4553 if (hclge_find_error_source(hdev)) {
4554 hclge_handle_error_info_log(ae_dev);
4555 hclge_handle_mac_tnl(hdev);
4556 hclge_handle_vf_queue_err_ras(hdev);
4557 }
4558
4559 hclge_handle_err_reset_request(hdev);
4560 }
4561
hclge_misc_err_recovery(struct hclge_dev * hdev)4562 static void hclge_misc_err_recovery(struct hclge_dev *hdev)
4563 {
4564 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4565 struct device *dev = &hdev->pdev->dev;
4566 u32 msix_sts_reg;
4567
4568 msix_sts_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
4569 if (msix_sts_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
4570 if (hclge_handle_hw_msix_error
4571 (hdev, &hdev->default_reset_request))
4572 dev_info(dev, "received msix interrupt 0x%x\n",
4573 msix_sts_reg);
4574 }
4575
4576 hclge_handle_hw_ras_error(ae_dev);
4577
4578 hclge_handle_err_reset_request(hdev);
4579 }
4580
hclge_errhand_service_task(struct hclge_dev * hdev)4581 static void hclge_errhand_service_task(struct hclge_dev *hdev)
4582 {
4583 if (!test_and_clear_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
4584 return;
4585
4586 if (hnae3_dev_ras_imp_supported(hdev))
4587 hclge_handle_err_recovery(hdev);
4588 else
4589 hclge_misc_err_recovery(hdev);
4590 }
4591
hclge_reset_service_task(struct hclge_dev * hdev)4592 static void hclge_reset_service_task(struct hclge_dev *hdev)
4593 {
4594 if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4595 return;
4596
4597 if (time_is_before_jiffies(hdev->last_rst_scheduled +
4598 HCLGE_RESET_SCHED_TIMEOUT))
4599 dev_warn(&hdev->pdev->dev,
4600 "reset service task is scheduled after %ums on cpu%u!\n",
4601 jiffies_to_msecs(jiffies - hdev->last_rst_scheduled),
4602 smp_processor_id());
4603
4604 down(&hdev->reset_sem);
4605 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4606
4607 hclge_reset_subtask(hdev);
4608
4609 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4610 up(&hdev->reset_sem);
4611 }
4612
hclge_update_vport_alive(struct hclge_dev * hdev)4613 static void hclge_update_vport_alive(struct hclge_dev *hdev)
4614 {
4615 #define HCLGE_ALIVE_SECONDS_NORMAL 8
4616
4617 unsigned long alive_time = HCLGE_ALIVE_SECONDS_NORMAL * HZ;
4618 int i;
4619
4620 /* start from vport 1 for PF is always alive */
4621 for (i = 1; i < hdev->num_alloc_vport; i++) {
4622 struct hclge_vport *vport = &hdev->vport[i];
4623
4624 if (!test_bit(HCLGE_VPORT_STATE_INITED, &vport->state) ||
4625 !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4626 continue;
4627 if (time_after(jiffies, vport->last_active_jiffies +
4628 alive_time)) {
4629 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4630 dev_warn(&hdev->pdev->dev,
4631 "VF %u heartbeat timeout\n",
4632 i - HCLGE_VF_VPORT_START_NUM);
4633 }
4634 }
4635 }
4636
hclge_periodic_service_task(struct hclge_dev * hdev)4637 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4638 {
4639 unsigned long delta = round_jiffies_relative(HZ);
4640
4641 if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4642 return;
4643
4644 /* Always handle the link updating to make sure link state is
4645 * updated when it is triggered by mbx.
4646 */
4647 hclge_update_link_status(hdev);
4648 hclge_sync_mac_table(hdev);
4649 hclge_sync_promisc_mode(hdev);
4650 hclge_sync_fd_table(hdev);
4651
4652 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4653 delta = jiffies - hdev->last_serv_processed;
4654
4655 if (delta < round_jiffies_relative(HZ)) {
4656 delta = round_jiffies_relative(HZ) - delta;
4657 goto out;
4658 }
4659 }
4660
4661 hdev->serv_processed_cnt++;
4662 hclge_update_vport_alive(hdev);
4663
4664 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4665 hdev->last_serv_processed = jiffies;
4666 goto out;
4667 }
4668
4669 if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4670 hclge_update_stats_for_all(hdev);
4671
4672 hclge_update_port_info(hdev);
4673 hclge_sync_vlan_filter(hdev);
4674
4675 if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4676 hclge_rfs_filter_expire(hdev);
4677
4678 hdev->last_serv_processed = jiffies;
4679
4680 out:
4681 hclge_task_schedule(hdev, delta);
4682 }
4683
hclge_ptp_service_task(struct hclge_dev * hdev)4684 static void hclge_ptp_service_task(struct hclge_dev *hdev)
4685 {
4686 unsigned long flags;
4687
4688 if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state) ||
4689 !test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state) ||
4690 !time_is_before_jiffies(hdev->ptp->tx_start + HZ))
4691 return;
4692
4693 /* to prevent concurrence with the irq handler */
4694 spin_lock_irqsave(&hdev->ptp->lock, flags);
4695
4696 /* check HCLGE_STATE_PTP_TX_HANDLING here again, since the irq
4697 * handler may handle it just before spin_lock_irqsave().
4698 */
4699 if (test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state))
4700 hclge_ptp_clean_tx_hwts(hdev);
4701
4702 spin_unlock_irqrestore(&hdev->ptp->lock, flags);
4703 }
4704
hclge_service_task(struct work_struct * work)4705 static void hclge_service_task(struct work_struct *work)
4706 {
4707 struct hclge_dev *hdev =
4708 container_of(work, struct hclge_dev, service_task.work);
4709
4710 hclge_errhand_service_task(hdev);
4711 hclge_reset_service_task(hdev);
4712 hclge_ptp_service_task(hdev);
4713 hclge_mailbox_service_task(hdev);
4714 hclge_periodic_service_task(hdev);
4715
4716 /* Handle error recovery, reset and mbx again in case periodical task
4717 * delays the handling by calling hclge_task_schedule() in
4718 * hclge_periodic_service_task().
4719 */
4720 hclge_errhand_service_task(hdev);
4721 hclge_reset_service_task(hdev);
4722 hclge_mailbox_service_task(hdev);
4723 }
4724
hclge_get_vport(struct hnae3_handle * handle)4725 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4726 {
4727 /* VF handle has no client */
4728 if (!handle->client)
4729 return container_of(handle, struct hclge_vport, nic);
4730 else if (handle->client->type == HNAE3_CLIENT_ROCE)
4731 return container_of(handle, struct hclge_vport, roce);
4732 else
4733 return container_of(handle, struct hclge_vport, nic);
4734 }
4735
hclge_get_vector_info(struct hclge_dev * hdev,u16 idx,struct hnae3_vector_info * vector_info)4736 static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
4737 struct hnae3_vector_info *vector_info)
4738 {
4739 #define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 64
4740
4741 vector_info->vector = pci_irq_vector(hdev->pdev, idx);
4742
4743 /* need an extend offset to config vector >= 64 */
4744 if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
4745 vector_info->io_addr = hdev->hw.hw.io_base +
4746 HCLGE_VECTOR_REG_BASE +
4747 (idx - 1) * HCLGE_VECTOR_REG_OFFSET;
4748 else
4749 vector_info->io_addr = hdev->hw.hw.io_base +
4750 HCLGE_VECTOR_EXT_REG_BASE +
4751 (idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4752 HCLGE_VECTOR_REG_OFFSET_H +
4753 (idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4754 HCLGE_VECTOR_REG_OFFSET;
4755
4756 hdev->vector_status[idx] = hdev->vport[0].vport_id;
4757 hdev->vector_irq[idx] = vector_info->vector;
4758 }
4759
hclge_get_vector(struct hnae3_handle * handle,u16 vector_num,struct hnae3_vector_info * vector_info)4760 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4761 struct hnae3_vector_info *vector_info)
4762 {
4763 struct hclge_vport *vport = hclge_get_vport(handle);
4764 struct hnae3_vector_info *vector = vector_info;
4765 struct hclge_dev *hdev = vport->back;
4766 int alloc = 0;
4767 u16 i = 0;
4768 u16 j;
4769
4770 vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4771 vector_num = min(hdev->num_msi_left, vector_num);
4772
4773 for (j = 0; j < vector_num; j++) {
4774 while (++i < hdev->num_nic_msi) {
4775 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4776 hclge_get_vector_info(hdev, i, vector);
4777 vector++;
4778 alloc++;
4779
4780 break;
4781 }
4782 }
4783 }
4784 hdev->num_msi_left -= alloc;
4785 hdev->num_msi_used += alloc;
4786
4787 return alloc;
4788 }
4789
hclge_get_vector_index(struct hclge_dev * hdev,int vector)4790 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4791 {
4792 int i;
4793
4794 for (i = 0; i < hdev->num_msi; i++)
4795 if (vector == hdev->vector_irq[i])
4796 return i;
4797
4798 return -EINVAL;
4799 }
4800
hclge_put_vector(struct hnae3_handle * handle,int vector)4801 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4802 {
4803 struct hclge_vport *vport = hclge_get_vport(handle);
4804 struct hclge_dev *hdev = vport->back;
4805 int vector_id;
4806
4807 vector_id = hclge_get_vector_index(hdev, vector);
4808 if (vector_id < 0) {
4809 dev_err(&hdev->pdev->dev,
4810 "Get vector index fail. vector = %d\n", vector);
4811 return vector_id;
4812 }
4813
4814 hclge_free_vector(hdev, vector_id);
4815
4816 return 0;
4817 }
4818
hclge_get_rss(struct hnae3_handle * handle,u32 * indir,u8 * key,u8 * hfunc)4819 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4820 u8 *key, u8 *hfunc)
4821 {
4822 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4823 struct hclge_vport *vport = hclge_get_vport(handle);
4824 struct hclge_comm_rss_cfg *rss_cfg = &vport->back->rss_cfg;
4825
4826 hclge_comm_get_rss_hash_info(rss_cfg, key, hfunc);
4827
4828 hclge_comm_get_rss_indir_tbl(rss_cfg, indir,
4829 ae_dev->dev_specs.rss_ind_tbl_size);
4830
4831 return 0;
4832 }
4833
hclge_set_rss(struct hnae3_handle * handle,const u32 * indir,const u8 * key,const u8 hfunc)4834 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4835 const u8 *key, const u8 hfunc)
4836 {
4837 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4838 struct hclge_vport *vport = hclge_get_vport(handle);
4839 struct hclge_dev *hdev = vport->back;
4840 struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
4841 int ret, i;
4842
4843 ret = hclge_comm_set_rss_hash_key(rss_cfg, &hdev->hw.hw, key, hfunc);
4844 if (ret) {
4845 dev_err(&hdev->pdev->dev, "invalid hfunc type %u\n", hfunc);
4846 return ret;
4847 }
4848
4849 /* Update the shadow RSS table with user specified qids */
4850 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4851 rss_cfg->rss_indirection_tbl[i] = indir[i];
4852
4853 /* Update the hardware */
4854 return hclge_comm_set_rss_indir_table(ae_dev, &hdev->hw.hw,
4855 rss_cfg->rss_indirection_tbl);
4856 }
4857
hclge_set_rss_tuple(struct hnae3_handle * handle,struct ethtool_rxnfc * nfc)4858 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4859 struct ethtool_rxnfc *nfc)
4860 {
4861 struct hclge_vport *vport = hclge_get_vport(handle);
4862 struct hclge_dev *hdev = vport->back;
4863 int ret;
4864
4865 ret = hclge_comm_set_rss_tuple(hdev->ae_dev, &hdev->hw.hw,
4866 &hdev->rss_cfg, nfc);
4867 if (ret) {
4868 dev_err(&hdev->pdev->dev,
4869 "failed to set rss tuple, ret = %d.\n", ret);
4870 return ret;
4871 }
4872
4873 return 0;
4874 }
4875
hclge_get_rss_tuple(struct hnae3_handle * handle,struct ethtool_rxnfc * nfc)4876 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4877 struct ethtool_rxnfc *nfc)
4878 {
4879 struct hclge_vport *vport = hclge_get_vport(handle);
4880 u8 tuple_sets;
4881 int ret;
4882
4883 nfc->data = 0;
4884
4885 ret = hclge_comm_get_rss_tuple(&vport->back->rss_cfg, nfc->flow_type,
4886 &tuple_sets);
4887 if (ret || !tuple_sets)
4888 return ret;
4889
4890 nfc->data = hclge_comm_convert_rss_tuple(tuple_sets);
4891
4892 return 0;
4893 }
4894
hclge_get_tc_size(struct hnae3_handle * handle)4895 static int hclge_get_tc_size(struct hnae3_handle *handle)
4896 {
4897 struct hclge_vport *vport = hclge_get_vport(handle);
4898 struct hclge_dev *hdev = vport->back;
4899
4900 return hdev->pf_rss_size_max;
4901 }
4902
hclge_init_rss_tc_mode(struct hclge_dev * hdev)4903 static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
4904 {
4905 struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
4906 struct hclge_vport *vport = hdev->vport;
4907 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4908 u16 tc_valid[HCLGE_MAX_TC_NUM] = {0};
4909 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4910 struct hnae3_tc_info *tc_info;
4911 u16 roundup_size;
4912 u16 rss_size;
4913 int i;
4914
4915 tc_info = &vport->nic.kinfo.tc_info;
4916 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4917 rss_size = tc_info->tqp_count[i];
4918 tc_valid[i] = 0;
4919
4920 if (!(hdev->hw_tc_map & BIT(i)))
4921 continue;
4922
4923 /* tc_size set to hardware is the log2 of roundup power of two
4924 * of rss_size, the acutal queue size is limited by indirection
4925 * table.
4926 */
4927 if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size ||
4928 rss_size == 0) {
4929 dev_err(&hdev->pdev->dev,
4930 "Configure rss tc size failed, invalid TC_SIZE = %u\n",
4931 rss_size);
4932 return -EINVAL;
4933 }
4934
4935 roundup_size = roundup_pow_of_two(rss_size);
4936 roundup_size = ilog2(roundup_size);
4937
4938 tc_valid[i] = 1;
4939 tc_size[i] = roundup_size;
4940 tc_offset[i] = tc_info->tqp_offset[i];
4941 }
4942
4943 return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, tc_valid,
4944 tc_size);
4945 }
4946
hclge_rss_init_hw(struct hclge_dev * hdev)4947 int hclge_rss_init_hw(struct hclge_dev *hdev)
4948 {
4949 u16 *rss_indir = hdev->rss_cfg.rss_indirection_tbl;
4950 u8 *key = hdev->rss_cfg.rss_hash_key;
4951 u8 hfunc = hdev->rss_cfg.rss_algo;
4952 int ret;
4953
4954 ret = hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw,
4955 rss_indir);
4956 if (ret)
4957 return ret;
4958
4959 ret = hclge_comm_set_rss_algo_key(&hdev->hw.hw, hfunc, key);
4960 if (ret)
4961 return ret;
4962
4963 ret = hclge_comm_set_rss_input_tuple(&hdev->hw.hw, &hdev->rss_cfg);
4964 if (ret)
4965 return ret;
4966
4967 return hclge_init_rss_tc_mode(hdev);
4968 }
4969
hclge_bind_ring_with_vector(struct hclge_vport * vport,int vector_id,bool en,struct hnae3_ring_chain_node * ring_chain)4970 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4971 int vector_id, bool en,
4972 struct hnae3_ring_chain_node *ring_chain)
4973 {
4974 struct hclge_dev *hdev = vport->back;
4975 struct hnae3_ring_chain_node *node;
4976 struct hclge_desc desc;
4977 struct hclge_ctrl_vector_chain_cmd *req =
4978 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4979 enum hclge_comm_cmd_status status;
4980 enum hclge_opcode_type op;
4981 u16 tqp_type_and_id;
4982 int i;
4983
4984 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4985 hclge_cmd_setup_basic_desc(&desc, op, false);
4986 req->int_vector_id_l = hnae3_get_field(vector_id,
4987 HCLGE_VECTOR_ID_L_M,
4988 HCLGE_VECTOR_ID_L_S);
4989 req->int_vector_id_h = hnae3_get_field(vector_id,
4990 HCLGE_VECTOR_ID_H_M,
4991 HCLGE_VECTOR_ID_H_S);
4992
4993 i = 0;
4994 for (node = ring_chain; node; node = node->next) {
4995 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4996 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
4997 HCLGE_INT_TYPE_S,
4998 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4999 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
5000 HCLGE_TQP_ID_S, node->tqp_index);
5001 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
5002 HCLGE_INT_GL_IDX_S,
5003 hnae3_get_field(node->int_gl_idx,
5004 HNAE3_RING_GL_IDX_M,
5005 HNAE3_RING_GL_IDX_S));
5006 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
5007 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
5008 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
5009 req->vfid = vport->vport_id;
5010
5011 status = hclge_cmd_send(&hdev->hw, &desc, 1);
5012 if (status) {
5013 dev_err(&hdev->pdev->dev,
5014 "Map TQP fail, status is %d.\n",
5015 status);
5016 return -EIO;
5017 }
5018 i = 0;
5019
5020 hclge_cmd_setup_basic_desc(&desc,
5021 op,
5022 false);
5023 req->int_vector_id_l =
5024 hnae3_get_field(vector_id,
5025 HCLGE_VECTOR_ID_L_M,
5026 HCLGE_VECTOR_ID_L_S);
5027 req->int_vector_id_h =
5028 hnae3_get_field(vector_id,
5029 HCLGE_VECTOR_ID_H_M,
5030 HCLGE_VECTOR_ID_H_S);
5031 }
5032 }
5033
5034 if (i > 0) {
5035 req->int_cause_num = i;
5036 req->vfid = vport->vport_id;
5037 status = hclge_cmd_send(&hdev->hw, &desc, 1);
5038 if (status) {
5039 dev_err(&hdev->pdev->dev,
5040 "Map TQP fail, status is %d.\n", status);
5041 return -EIO;
5042 }
5043 }
5044
5045 return 0;
5046 }
5047
hclge_map_ring_to_vector(struct hnae3_handle * handle,int vector,struct hnae3_ring_chain_node * ring_chain)5048 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
5049 struct hnae3_ring_chain_node *ring_chain)
5050 {
5051 struct hclge_vport *vport = hclge_get_vport(handle);
5052 struct hclge_dev *hdev = vport->back;
5053 int vector_id;
5054
5055 vector_id = hclge_get_vector_index(hdev, vector);
5056 if (vector_id < 0) {
5057 dev_err(&hdev->pdev->dev,
5058 "failed to get vector index. vector=%d\n", vector);
5059 return vector_id;
5060 }
5061
5062 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
5063 }
5064
hclge_unmap_ring_frm_vector(struct hnae3_handle * handle,int vector,struct hnae3_ring_chain_node * ring_chain)5065 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
5066 struct hnae3_ring_chain_node *ring_chain)
5067 {
5068 struct hclge_vport *vport = hclge_get_vport(handle);
5069 struct hclge_dev *hdev = vport->back;
5070 int vector_id, ret;
5071
5072 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
5073 return 0;
5074
5075 vector_id = hclge_get_vector_index(hdev, vector);
5076 if (vector_id < 0) {
5077 dev_err(&handle->pdev->dev,
5078 "Get vector index fail. ret =%d\n", vector_id);
5079 return vector_id;
5080 }
5081
5082 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
5083 if (ret)
5084 dev_err(&handle->pdev->dev,
5085 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
5086 vector_id, ret);
5087
5088 return ret;
5089 }
5090
hclge_cmd_set_promisc_mode(struct hclge_dev * hdev,u8 vf_id,bool en_uc,bool en_mc,bool en_bc)5091 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id,
5092 bool en_uc, bool en_mc, bool en_bc)
5093 {
5094 struct hclge_vport *vport = &hdev->vport[vf_id];
5095 struct hnae3_handle *handle = &vport->nic;
5096 struct hclge_promisc_cfg_cmd *req;
5097 struct hclge_desc desc;
5098 bool uc_tx_en = en_uc;
5099 u8 promisc_cfg = 0;
5100 int ret;
5101
5102 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
5103
5104 req = (struct hclge_promisc_cfg_cmd *)desc.data;
5105 req->vf_id = vf_id;
5106
5107 if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags))
5108 uc_tx_en = false;
5109
5110 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0);
5111 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0);
5112 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0);
5113 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0);
5114 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0);
5115 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0);
5116 req->extend_promisc = promisc_cfg;
5117
5118 /* to be compatible with DEVICE_VERSION_V1/2 */
5119 promisc_cfg = 0;
5120 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0);
5121 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0);
5122 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0);
5123 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1);
5124 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1);
5125 req->promisc = promisc_cfg;
5126
5127 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5128 if (ret)
5129 dev_err(&hdev->pdev->dev,
5130 "failed to set vport %u promisc mode, ret = %d.\n",
5131 vf_id, ret);
5132
5133 return ret;
5134 }
5135
hclge_set_vport_promisc_mode(struct hclge_vport * vport,bool en_uc_pmc,bool en_mc_pmc,bool en_bc_pmc)5136 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
5137 bool en_mc_pmc, bool en_bc_pmc)
5138 {
5139 return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id,
5140 en_uc_pmc, en_mc_pmc, en_bc_pmc);
5141 }
5142
hclge_set_promisc_mode(struct hnae3_handle * handle,bool en_uc_pmc,bool en_mc_pmc)5143 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
5144 bool en_mc_pmc)
5145 {
5146 struct hclge_vport *vport = hclge_get_vport(handle);
5147 struct hclge_dev *hdev = vport->back;
5148 bool en_bc_pmc = true;
5149
5150 /* For device whose version below V2, if broadcast promisc enabled,
5151 * vlan filter is always bypassed. So broadcast promisc should be
5152 * disabled until user enable promisc mode
5153 */
5154 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
5155 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
5156
5157 return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
5158 en_bc_pmc);
5159 }
5160
hclge_request_update_promisc_mode(struct hnae3_handle * handle)5161 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
5162 {
5163 struct hclge_vport *vport = hclge_get_vport(handle);
5164
5165 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
5166 }
5167
hclge_sync_fd_state(struct hclge_dev * hdev)5168 static void hclge_sync_fd_state(struct hclge_dev *hdev)
5169 {
5170 if (hlist_empty(&hdev->fd_rule_list))
5171 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5172 }
5173
hclge_fd_inc_rule_cnt(struct hclge_dev * hdev,u16 location)5174 static void hclge_fd_inc_rule_cnt(struct hclge_dev *hdev, u16 location)
5175 {
5176 if (!test_bit(location, hdev->fd_bmap)) {
5177 set_bit(location, hdev->fd_bmap);
5178 hdev->hclge_fd_rule_num++;
5179 }
5180 }
5181
hclge_fd_dec_rule_cnt(struct hclge_dev * hdev,u16 location)5182 static void hclge_fd_dec_rule_cnt(struct hclge_dev *hdev, u16 location)
5183 {
5184 if (test_bit(location, hdev->fd_bmap)) {
5185 clear_bit(location, hdev->fd_bmap);
5186 hdev->hclge_fd_rule_num--;
5187 }
5188 }
5189
hclge_fd_free_node(struct hclge_dev * hdev,struct hclge_fd_rule * rule)5190 static void hclge_fd_free_node(struct hclge_dev *hdev,
5191 struct hclge_fd_rule *rule)
5192 {
5193 hlist_del(&rule->rule_node);
5194 kfree(rule);
5195 hclge_sync_fd_state(hdev);
5196 }
5197
hclge_update_fd_rule_node(struct hclge_dev * hdev,struct hclge_fd_rule * old_rule,struct hclge_fd_rule * new_rule,enum HCLGE_FD_NODE_STATE state)5198 static void hclge_update_fd_rule_node(struct hclge_dev *hdev,
5199 struct hclge_fd_rule *old_rule,
5200 struct hclge_fd_rule *new_rule,
5201 enum HCLGE_FD_NODE_STATE state)
5202 {
5203 switch (state) {
5204 case HCLGE_FD_TO_ADD:
5205 case HCLGE_FD_ACTIVE:
5206 /* 1) if the new state is TO_ADD, just replace the old rule
5207 * with the same location, no matter its state, because the
5208 * new rule will be configured to the hardware.
5209 * 2) if the new state is ACTIVE, it means the new rule
5210 * has been configured to the hardware, so just replace
5211 * the old rule node with the same location.
5212 * 3) for it doesn't add a new node to the list, so it's
5213 * unnecessary to update the rule number and fd_bmap.
5214 */
5215 new_rule->rule_node.next = old_rule->rule_node.next;
5216 new_rule->rule_node.pprev = old_rule->rule_node.pprev;
5217 memcpy(old_rule, new_rule, sizeof(*old_rule));
5218 kfree(new_rule);
5219 break;
5220 case HCLGE_FD_DELETED:
5221 hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5222 hclge_fd_free_node(hdev, old_rule);
5223 break;
5224 case HCLGE_FD_TO_DEL:
5225 /* if new request is TO_DEL, and old rule is existent
5226 * 1) the state of old rule is TO_DEL, we need do nothing,
5227 * because we delete rule by location, other rule content
5228 * is unncessary.
5229 * 2) the state of old rule is ACTIVE, we need to change its
5230 * state to TO_DEL, so the rule will be deleted when periodic
5231 * task being scheduled.
5232 * 3) the state of old rule is TO_ADD, it means the rule hasn't
5233 * been added to hardware, so we just delete the rule node from
5234 * fd_rule_list directly.
5235 */
5236 if (old_rule->state == HCLGE_FD_TO_ADD) {
5237 hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5238 hclge_fd_free_node(hdev, old_rule);
5239 return;
5240 }
5241 old_rule->state = HCLGE_FD_TO_DEL;
5242 break;
5243 }
5244 }
5245
hclge_find_fd_rule(struct hlist_head * hlist,u16 location,struct hclge_fd_rule ** parent)5246 static struct hclge_fd_rule *hclge_find_fd_rule(struct hlist_head *hlist,
5247 u16 location,
5248 struct hclge_fd_rule **parent)
5249 {
5250 struct hclge_fd_rule *rule;
5251 struct hlist_node *node;
5252
5253 hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
5254 if (rule->location == location)
5255 return rule;
5256 else if (rule->location > location)
5257 return NULL;
5258 /* record the parent node, use to keep the nodes in fd_rule_list
5259 * in ascend order.
5260 */
5261 *parent = rule;
5262 }
5263
5264 return NULL;
5265 }
5266
5267 /* insert fd rule node in ascend order according to rule->location */
hclge_fd_insert_rule_node(struct hlist_head * hlist,struct hclge_fd_rule * rule,struct hclge_fd_rule * parent)5268 static void hclge_fd_insert_rule_node(struct hlist_head *hlist,
5269 struct hclge_fd_rule *rule,
5270 struct hclge_fd_rule *parent)
5271 {
5272 INIT_HLIST_NODE(&rule->rule_node);
5273
5274 if (parent)
5275 hlist_add_behind(&rule->rule_node, &parent->rule_node);
5276 else
5277 hlist_add_head(&rule->rule_node, hlist);
5278 }
5279
hclge_fd_set_user_def_cmd(struct hclge_dev * hdev,struct hclge_fd_user_def_cfg * cfg)5280 static int hclge_fd_set_user_def_cmd(struct hclge_dev *hdev,
5281 struct hclge_fd_user_def_cfg *cfg)
5282 {
5283 struct hclge_fd_user_def_cfg_cmd *req;
5284 struct hclge_desc desc;
5285 u16 data = 0;
5286 int ret;
5287
5288 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_USER_DEF_OP, false);
5289
5290 req = (struct hclge_fd_user_def_cfg_cmd *)desc.data;
5291
5292 hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[0].ref_cnt > 0);
5293 hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5294 HCLGE_FD_USER_DEF_OFT_S, cfg[0].offset);
5295 req->ol2_cfg = cpu_to_le16(data);
5296
5297 data = 0;
5298 hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[1].ref_cnt > 0);
5299 hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5300 HCLGE_FD_USER_DEF_OFT_S, cfg[1].offset);
5301 req->ol3_cfg = cpu_to_le16(data);
5302
5303 data = 0;
5304 hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[2].ref_cnt > 0);
5305 hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5306 HCLGE_FD_USER_DEF_OFT_S, cfg[2].offset);
5307 req->ol4_cfg = cpu_to_le16(data);
5308
5309 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5310 if (ret)
5311 dev_err(&hdev->pdev->dev,
5312 "failed to set fd user def data, ret= %d\n", ret);
5313 return ret;
5314 }
5315
hclge_sync_fd_user_def_cfg(struct hclge_dev * hdev,bool locked)5316 static void hclge_sync_fd_user_def_cfg(struct hclge_dev *hdev, bool locked)
5317 {
5318 int ret;
5319
5320 if (!test_and_clear_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state))
5321 return;
5322
5323 if (!locked)
5324 spin_lock_bh(&hdev->fd_rule_lock);
5325
5326 ret = hclge_fd_set_user_def_cmd(hdev, hdev->fd_cfg.user_def_cfg);
5327 if (ret)
5328 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5329
5330 if (!locked)
5331 spin_unlock_bh(&hdev->fd_rule_lock);
5332 }
5333
hclge_fd_check_user_def_refcnt(struct hclge_dev * hdev,struct hclge_fd_rule * rule)5334 static int hclge_fd_check_user_def_refcnt(struct hclge_dev *hdev,
5335 struct hclge_fd_rule *rule)
5336 {
5337 struct hlist_head *hlist = &hdev->fd_rule_list;
5338 struct hclge_fd_rule *fd_rule, *parent = NULL;
5339 struct hclge_fd_user_def_info *info, *old_info;
5340 struct hclge_fd_user_def_cfg *cfg;
5341
5342 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5343 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5344 return 0;
5345
5346 /* for valid layer is start from 1, so need minus 1 to get the cfg */
5347 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5348 info = &rule->ep.user_def;
5349
5350 if (!cfg->ref_cnt || cfg->offset == info->offset)
5351 return 0;
5352
5353 if (cfg->ref_cnt > 1)
5354 goto error;
5355
5356 fd_rule = hclge_find_fd_rule(hlist, rule->location, &parent);
5357 if (fd_rule) {
5358 old_info = &fd_rule->ep.user_def;
5359 if (info->layer == old_info->layer)
5360 return 0;
5361 }
5362
5363 error:
5364 dev_err(&hdev->pdev->dev,
5365 "No available offset for layer%d fd rule, each layer only support one user def offset.\n",
5366 info->layer + 1);
5367 return -ENOSPC;
5368 }
5369
hclge_fd_inc_user_def_refcnt(struct hclge_dev * hdev,struct hclge_fd_rule * rule)5370 static void hclge_fd_inc_user_def_refcnt(struct hclge_dev *hdev,
5371 struct hclge_fd_rule *rule)
5372 {
5373 struct hclge_fd_user_def_cfg *cfg;
5374
5375 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5376 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5377 return;
5378
5379 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5380 if (!cfg->ref_cnt) {
5381 cfg->offset = rule->ep.user_def.offset;
5382 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5383 }
5384 cfg->ref_cnt++;
5385 }
5386
hclge_fd_dec_user_def_refcnt(struct hclge_dev * hdev,struct hclge_fd_rule * rule)5387 static void hclge_fd_dec_user_def_refcnt(struct hclge_dev *hdev,
5388 struct hclge_fd_rule *rule)
5389 {
5390 struct hclge_fd_user_def_cfg *cfg;
5391
5392 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5393 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5394 return;
5395
5396 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5397 if (!cfg->ref_cnt)
5398 return;
5399
5400 cfg->ref_cnt--;
5401 if (!cfg->ref_cnt) {
5402 cfg->offset = 0;
5403 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5404 }
5405 }
5406
hclge_update_fd_list(struct hclge_dev * hdev,enum HCLGE_FD_NODE_STATE state,u16 location,struct hclge_fd_rule * new_rule)5407 static void hclge_update_fd_list(struct hclge_dev *hdev,
5408 enum HCLGE_FD_NODE_STATE state, u16 location,
5409 struct hclge_fd_rule *new_rule)
5410 {
5411 struct hlist_head *hlist = &hdev->fd_rule_list;
5412 struct hclge_fd_rule *fd_rule, *parent = NULL;
5413
5414 fd_rule = hclge_find_fd_rule(hlist, location, &parent);
5415 if (fd_rule) {
5416 hclge_fd_dec_user_def_refcnt(hdev, fd_rule);
5417 if (state == HCLGE_FD_ACTIVE)
5418 hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5419 hclge_sync_fd_user_def_cfg(hdev, true);
5420
5421 hclge_update_fd_rule_node(hdev, fd_rule, new_rule, state);
5422 return;
5423 }
5424
5425 /* it's unlikely to fail here, because we have checked the rule
5426 * exist before.
5427 */
5428 if (unlikely(state == HCLGE_FD_TO_DEL || state == HCLGE_FD_DELETED)) {
5429 dev_warn(&hdev->pdev->dev,
5430 "failed to delete fd rule %u, it's inexistent\n",
5431 location);
5432 return;
5433 }
5434
5435 hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5436 hclge_sync_fd_user_def_cfg(hdev, true);
5437
5438 hclge_fd_insert_rule_node(hlist, new_rule, parent);
5439 hclge_fd_inc_rule_cnt(hdev, new_rule->location);
5440
5441 if (state == HCLGE_FD_TO_ADD) {
5442 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
5443 hclge_task_schedule(hdev, 0);
5444 }
5445 }
5446
hclge_get_fd_mode(struct hclge_dev * hdev,u8 * fd_mode)5447 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
5448 {
5449 struct hclge_get_fd_mode_cmd *req;
5450 struct hclge_desc desc;
5451 int ret;
5452
5453 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
5454
5455 req = (struct hclge_get_fd_mode_cmd *)desc.data;
5456
5457 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5458 if (ret) {
5459 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
5460 return ret;
5461 }
5462
5463 *fd_mode = req->mode;
5464
5465 return ret;
5466 }
5467
hclge_get_fd_allocation(struct hclge_dev * hdev,u32 * stage1_entry_num,u32 * stage2_entry_num,u16 * stage1_counter_num,u16 * stage2_counter_num)5468 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
5469 u32 *stage1_entry_num,
5470 u32 *stage2_entry_num,
5471 u16 *stage1_counter_num,
5472 u16 *stage2_counter_num)
5473 {
5474 struct hclge_get_fd_allocation_cmd *req;
5475 struct hclge_desc desc;
5476 int ret;
5477
5478 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
5479
5480 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
5481
5482 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5483 if (ret) {
5484 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
5485 ret);
5486 return ret;
5487 }
5488
5489 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
5490 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
5491 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
5492 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
5493
5494 return ret;
5495 }
5496
hclge_set_fd_key_config(struct hclge_dev * hdev,enum HCLGE_FD_STAGE stage_num)5497 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
5498 enum HCLGE_FD_STAGE stage_num)
5499 {
5500 struct hclge_set_fd_key_config_cmd *req;
5501 struct hclge_fd_key_cfg *stage;
5502 struct hclge_desc desc;
5503 int ret;
5504
5505 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
5506
5507 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
5508 stage = &hdev->fd_cfg.key_cfg[stage_num];
5509 req->stage = stage_num;
5510 req->key_select = stage->key_sel;
5511 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
5512 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
5513 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
5514 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
5515 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
5516 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
5517
5518 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5519 if (ret)
5520 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
5521
5522 return ret;
5523 }
5524
hclge_fd_disable_user_def(struct hclge_dev * hdev)5525 static void hclge_fd_disable_user_def(struct hclge_dev *hdev)
5526 {
5527 struct hclge_fd_user_def_cfg *cfg = hdev->fd_cfg.user_def_cfg;
5528
5529 spin_lock_bh(&hdev->fd_rule_lock);
5530 memset(cfg, 0, sizeof(hdev->fd_cfg.user_def_cfg));
5531 spin_unlock_bh(&hdev->fd_rule_lock);
5532
5533 hclge_fd_set_user_def_cmd(hdev, cfg);
5534 }
5535
hclge_init_fd_config(struct hclge_dev * hdev)5536 static int hclge_init_fd_config(struct hclge_dev *hdev)
5537 {
5538 #define LOW_2_WORDS 0x03
5539 struct hclge_fd_key_cfg *key_cfg;
5540 int ret;
5541
5542 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
5543 return 0;
5544
5545 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
5546 if (ret)
5547 return ret;
5548
5549 switch (hdev->fd_cfg.fd_mode) {
5550 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
5551 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
5552 break;
5553 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
5554 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
5555 break;
5556 default:
5557 dev_err(&hdev->pdev->dev,
5558 "Unsupported flow director mode %u\n",
5559 hdev->fd_cfg.fd_mode);
5560 return -EOPNOTSUPP;
5561 }
5562
5563 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
5564 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE;
5565 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
5566 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
5567 key_cfg->outer_sipv6_word_en = 0;
5568 key_cfg->outer_dipv6_word_en = 0;
5569
5570 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
5571 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
5572 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5573 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5574
5575 /* If use max 400bit key, we can support tuples for ether type */
5576 if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5577 key_cfg->tuple_active |=
5578 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
5579 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
5580 key_cfg->tuple_active |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
5581 }
5582
5583 /* roce_type is used to filter roce frames
5584 * dst_vport is used to specify the rule
5585 */
5586 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5587
5588 ret = hclge_get_fd_allocation(hdev,
5589 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5590 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5591 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5592 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5593 if (ret)
5594 return ret;
5595
5596 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5597 }
5598
hclge_fd_tcam_config(struct hclge_dev * hdev,u8 stage,bool sel_x,int loc,u8 * key,bool is_add)5599 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5600 int loc, u8 *key, bool is_add)
5601 {
5602 struct hclge_fd_tcam_config_1_cmd *req1;
5603 struct hclge_fd_tcam_config_2_cmd *req2;
5604 struct hclge_fd_tcam_config_3_cmd *req3;
5605 struct hclge_desc desc[3];
5606 int ret;
5607
5608 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5609 desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
5610 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5611 desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
5612 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5613
5614 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5615 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5616 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5617
5618 req1->stage = stage;
5619 req1->xy_sel = sel_x ? 1 : 0;
5620 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5621 req1->index = cpu_to_le32(loc);
5622 req1->entry_vld = sel_x ? is_add : 0;
5623
5624 if (key) {
5625 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5626 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5627 sizeof(req2->tcam_data));
5628 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5629 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5630 }
5631
5632 ret = hclge_cmd_send(&hdev->hw, desc, 3);
5633 if (ret)
5634 dev_err(&hdev->pdev->dev,
5635 "config tcam key fail, ret=%d\n",
5636 ret);
5637
5638 return ret;
5639 }
5640
hclge_fd_ad_config(struct hclge_dev * hdev,u8 stage,int loc,struct hclge_fd_ad_data * action)5641 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5642 struct hclge_fd_ad_data *action)
5643 {
5644 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
5645 struct hclge_fd_ad_config_cmd *req;
5646 struct hclge_desc desc;
5647 u64 ad_data = 0;
5648 int ret;
5649
5650 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5651
5652 req = (struct hclge_fd_ad_config_cmd *)desc.data;
5653 req->index = cpu_to_le32(loc);
5654 req->stage = stage;
5655
5656 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5657 action->write_rule_id_to_bd);
5658 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5659 action->rule_id);
5660 if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) {
5661 hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B,
5662 action->override_tc);
5663 hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M,
5664 HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size);
5665 }
5666 ad_data <<= 32;
5667 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5668 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5669 action->forward_to_direct_queue);
5670 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5671 action->queue_id);
5672 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5673 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5674 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5675 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5676 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5677 action->counter_id);
5678
5679 req->ad_data = cpu_to_le64(ad_data);
5680 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5681 if (ret)
5682 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5683
5684 return ret;
5685 }
5686
hclge_fd_convert_tuple(u32 tuple_bit,u8 * key_x,u8 * key_y,struct hclge_fd_rule * rule)5687 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5688 struct hclge_fd_rule *rule)
5689 {
5690 int offset, moffset, ip_offset;
5691 enum HCLGE_FD_KEY_OPT key_opt;
5692 u16 tmp_x_s, tmp_y_s;
5693 u32 tmp_x_l, tmp_y_l;
5694 u8 *p = (u8 *)rule;
5695 int i;
5696
5697 if (rule->unused_tuple & BIT(tuple_bit))
5698 return true;
5699
5700 key_opt = tuple_key_info[tuple_bit].key_opt;
5701 offset = tuple_key_info[tuple_bit].offset;
5702 moffset = tuple_key_info[tuple_bit].moffset;
5703
5704 switch (key_opt) {
5705 case KEY_OPT_U8:
5706 calc_x(*key_x, p[offset], p[moffset]);
5707 calc_y(*key_y, p[offset], p[moffset]);
5708
5709 return true;
5710 case KEY_OPT_LE16:
5711 calc_x(tmp_x_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5712 calc_y(tmp_y_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5713 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5714 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5715
5716 return true;
5717 case KEY_OPT_LE32:
5718 calc_x(tmp_x_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5719 calc_y(tmp_y_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5720 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5721 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5722
5723 return true;
5724 case KEY_OPT_MAC:
5725 for (i = 0; i < ETH_ALEN; i++) {
5726 calc_x(key_x[ETH_ALEN - 1 - i], p[offset + i],
5727 p[moffset + i]);
5728 calc_y(key_y[ETH_ALEN - 1 - i], p[offset + i],
5729 p[moffset + i]);
5730 }
5731
5732 return true;
5733 case KEY_OPT_IP:
5734 ip_offset = IPV4_INDEX * sizeof(u32);
5735 calc_x(tmp_x_l, *(u32 *)(&p[offset + ip_offset]),
5736 *(u32 *)(&p[moffset + ip_offset]));
5737 calc_y(tmp_y_l, *(u32 *)(&p[offset + ip_offset]),
5738 *(u32 *)(&p[moffset + ip_offset]));
5739 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5740 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5741
5742 return true;
5743 default:
5744 return false;
5745 }
5746 }
5747
hclge_get_port_number(enum HLCGE_PORT_TYPE port_type,u8 pf_id,u8 vf_id,u8 network_port_id)5748 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5749 u8 vf_id, u8 network_port_id)
5750 {
5751 u32 port_number = 0;
5752
5753 if (port_type == HOST_PORT) {
5754 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5755 pf_id);
5756 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5757 vf_id);
5758 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5759 } else {
5760 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5761 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5762 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5763 }
5764
5765 return port_number;
5766 }
5767
hclge_fd_convert_meta_data(struct hclge_fd_key_cfg * key_cfg,__le32 * key_x,__le32 * key_y,struct hclge_fd_rule * rule)5768 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5769 __le32 *key_x, __le32 *key_y,
5770 struct hclge_fd_rule *rule)
5771 {
5772 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5773 u8 cur_pos = 0, tuple_size, shift_bits;
5774 unsigned int i;
5775
5776 for (i = 0; i < MAX_META_DATA; i++) {
5777 tuple_size = meta_data_key_info[i].key_length;
5778 tuple_bit = key_cfg->meta_data_active & BIT(i);
5779
5780 switch (tuple_bit) {
5781 case BIT(ROCE_TYPE):
5782 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5783 cur_pos += tuple_size;
5784 break;
5785 case BIT(DST_VPORT):
5786 port_number = hclge_get_port_number(HOST_PORT, 0,
5787 rule->vf_id, 0);
5788 hnae3_set_field(meta_data,
5789 GENMASK(cur_pos + tuple_size, cur_pos),
5790 cur_pos, port_number);
5791 cur_pos += tuple_size;
5792 break;
5793 default:
5794 break;
5795 }
5796 }
5797
5798 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5799 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5800 shift_bits = sizeof(meta_data) * 8 - cur_pos;
5801
5802 *key_x = cpu_to_le32(tmp_x << shift_bits);
5803 *key_y = cpu_to_le32(tmp_y << shift_bits);
5804 }
5805
5806 /* A complete key is combined with meta data key and tuple key.
5807 * Meta data key is stored at the MSB region, and tuple key is stored at
5808 * the LSB region, unused bits will be filled 0.
5809 */
hclge_config_key(struct hclge_dev * hdev,u8 stage,struct hclge_fd_rule * rule)5810 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5811 struct hclge_fd_rule *rule)
5812 {
5813 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5814 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5815 u8 *cur_key_x, *cur_key_y;
5816 u8 meta_data_region;
5817 u8 tuple_size;
5818 int ret;
5819 u32 i;
5820
5821 memset(key_x, 0, sizeof(key_x));
5822 memset(key_y, 0, sizeof(key_y));
5823 cur_key_x = key_x;
5824 cur_key_y = key_y;
5825
5826 for (i = 0; i < MAX_TUPLE; i++) {
5827 bool tuple_valid;
5828
5829 tuple_size = tuple_key_info[i].key_length / 8;
5830 if (!(key_cfg->tuple_active & BIT(i)))
5831 continue;
5832
5833 tuple_valid = hclge_fd_convert_tuple(i, cur_key_x,
5834 cur_key_y, rule);
5835 if (tuple_valid) {
5836 cur_key_x += tuple_size;
5837 cur_key_y += tuple_size;
5838 }
5839 }
5840
5841 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5842 MAX_META_DATA_LENGTH / 8;
5843
5844 hclge_fd_convert_meta_data(key_cfg,
5845 (__le32 *)(key_x + meta_data_region),
5846 (__le32 *)(key_y + meta_data_region),
5847 rule);
5848
5849 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5850 true);
5851 if (ret) {
5852 dev_err(&hdev->pdev->dev,
5853 "fd key_y config fail, loc=%u, ret=%d\n",
5854 rule->queue_id, ret);
5855 return ret;
5856 }
5857
5858 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5859 true);
5860 if (ret)
5861 dev_err(&hdev->pdev->dev,
5862 "fd key_x config fail, loc=%u, ret=%d\n",
5863 rule->queue_id, ret);
5864 return ret;
5865 }
5866
hclge_config_action(struct hclge_dev * hdev,u8 stage,struct hclge_fd_rule * rule)5867 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5868 struct hclge_fd_rule *rule)
5869 {
5870 struct hclge_vport *vport = hdev->vport;
5871 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
5872 struct hclge_fd_ad_data ad_data;
5873
5874 memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data));
5875 ad_data.ad_id = rule->location;
5876
5877 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5878 ad_data.drop_packet = true;
5879 } else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) {
5880 ad_data.override_tc = true;
5881 ad_data.queue_id =
5882 kinfo->tc_info.tqp_offset[rule->cls_flower.tc];
5883 ad_data.tc_size =
5884 ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]);
5885 } else {
5886 ad_data.forward_to_direct_queue = true;
5887 ad_data.queue_id = rule->queue_id;
5888 }
5889
5890 if (hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1]) {
5891 ad_data.use_counter = true;
5892 ad_data.counter_id = rule->vf_id %
5893 hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1];
5894 } else {
5895 ad_data.use_counter = false;
5896 ad_data.counter_id = 0;
5897 }
5898
5899 ad_data.use_next_stage = false;
5900 ad_data.next_input_key = 0;
5901
5902 ad_data.write_rule_id_to_bd = true;
5903 ad_data.rule_id = rule->location;
5904
5905 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5906 }
5907
hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec * spec,u32 * unused_tuple)5908 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
5909 u32 *unused_tuple)
5910 {
5911 if (!spec || !unused_tuple)
5912 return -EINVAL;
5913
5914 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5915
5916 if (!spec->ip4src)
5917 *unused_tuple |= BIT(INNER_SRC_IP);
5918
5919 if (!spec->ip4dst)
5920 *unused_tuple |= BIT(INNER_DST_IP);
5921
5922 if (!spec->psrc)
5923 *unused_tuple |= BIT(INNER_SRC_PORT);
5924
5925 if (!spec->pdst)
5926 *unused_tuple |= BIT(INNER_DST_PORT);
5927
5928 if (!spec->tos)
5929 *unused_tuple |= BIT(INNER_IP_TOS);
5930
5931 return 0;
5932 }
5933
hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec * spec,u32 * unused_tuple)5934 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
5935 u32 *unused_tuple)
5936 {
5937 if (!spec || !unused_tuple)
5938 return -EINVAL;
5939
5940 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5941 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5942
5943 if (!spec->ip4src)
5944 *unused_tuple |= BIT(INNER_SRC_IP);
5945
5946 if (!spec->ip4dst)
5947 *unused_tuple |= BIT(INNER_DST_IP);
5948
5949 if (!spec->tos)
5950 *unused_tuple |= BIT(INNER_IP_TOS);
5951
5952 if (!spec->proto)
5953 *unused_tuple |= BIT(INNER_IP_PROTO);
5954
5955 if (spec->l4_4_bytes)
5956 return -EOPNOTSUPP;
5957
5958 if (spec->ip_ver != ETH_RX_NFC_IP4)
5959 return -EOPNOTSUPP;
5960
5961 return 0;
5962 }
5963
hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec * spec,u32 * unused_tuple)5964 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
5965 u32 *unused_tuple)
5966 {
5967 if (!spec || !unused_tuple)
5968 return -EINVAL;
5969
5970 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5971
5972 /* check whether src/dst ip address used */
5973 if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
5974 *unused_tuple |= BIT(INNER_SRC_IP);
5975
5976 if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
5977 *unused_tuple |= BIT(INNER_DST_IP);
5978
5979 if (!spec->psrc)
5980 *unused_tuple |= BIT(INNER_SRC_PORT);
5981
5982 if (!spec->pdst)
5983 *unused_tuple |= BIT(INNER_DST_PORT);
5984
5985 if (!spec->tclass)
5986 *unused_tuple |= BIT(INNER_IP_TOS);
5987
5988 return 0;
5989 }
5990
hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec * spec,u32 * unused_tuple)5991 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
5992 u32 *unused_tuple)
5993 {
5994 if (!spec || !unused_tuple)
5995 return -EINVAL;
5996
5997 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5998 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5999
6000 /* check whether src/dst ip address used */
6001 if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
6002 *unused_tuple |= BIT(INNER_SRC_IP);
6003
6004 if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
6005 *unused_tuple |= BIT(INNER_DST_IP);
6006
6007 if (!spec->l4_proto)
6008 *unused_tuple |= BIT(INNER_IP_PROTO);
6009
6010 if (!spec->tclass)
6011 *unused_tuple |= BIT(INNER_IP_TOS);
6012
6013 if (spec->l4_4_bytes)
6014 return -EOPNOTSUPP;
6015
6016 return 0;
6017 }
6018
hclge_fd_check_ether_tuple(struct ethhdr * spec,u32 * unused_tuple)6019 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
6020 {
6021 if (!spec || !unused_tuple)
6022 return -EINVAL;
6023
6024 *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
6025 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
6026 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
6027
6028 if (is_zero_ether_addr(spec->h_source))
6029 *unused_tuple |= BIT(INNER_SRC_MAC);
6030
6031 if (is_zero_ether_addr(spec->h_dest))
6032 *unused_tuple |= BIT(INNER_DST_MAC);
6033
6034 if (!spec->h_proto)
6035 *unused_tuple |= BIT(INNER_ETH_TYPE);
6036
6037 return 0;
6038 }
6039
hclge_fd_check_ext_tuple(struct hclge_dev * hdev,struct ethtool_rx_flow_spec * fs,u32 * unused_tuple)6040 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
6041 struct ethtool_rx_flow_spec *fs,
6042 u32 *unused_tuple)
6043 {
6044 if (fs->flow_type & FLOW_EXT) {
6045 if (fs->h_ext.vlan_etype) {
6046 dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
6047 return -EOPNOTSUPP;
6048 }
6049
6050 if (!fs->h_ext.vlan_tci)
6051 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6052
6053 if (fs->m_ext.vlan_tci &&
6054 be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
6055 dev_err(&hdev->pdev->dev,
6056 "failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n",
6057 ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
6058 return -EINVAL;
6059 }
6060 } else {
6061 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6062 }
6063
6064 if (fs->flow_type & FLOW_MAC_EXT) {
6065 if (hdev->fd_cfg.fd_mode !=
6066 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6067 dev_err(&hdev->pdev->dev,
6068 "FLOW_MAC_EXT is not supported in current fd mode!\n");
6069 return -EOPNOTSUPP;
6070 }
6071
6072 if (is_zero_ether_addr(fs->h_ext.h_dest))
6073 *unused_tuple |= BIT(INNER_DST_MAC);
6074 else
6075 *unused_tuple &= ~BIT(INNER_DST_MAC);
6076 }
6077
6078 return 0;
6079 }
6080
hclge_fd_get_user_def_layer(u32 flow_type,u32 * unused_tuple,struct hclge_fd_user_def_info * info)6081 static int hclge_fd_get_user_def_layer(u32 flow_type, u32 *unused_tuple,
6082 struct hclge_fd_user_def_info *info)
6083 {
6084 switch (flow_type) {
6085 case ETHER_FLOW:
6086 info->layer = HCLGE_FD_USER_DEF_L2;
6087 *unused_tuple &= ~BIT(INNER_L2_RSV);
6088 break;
6089 case IP_USER_FLOW:
6090 case IPV6_USER_FLOW:
6091 info->layer = HCLGE_FD_USER_DEF_L3;
6092 *unused_tuple &= ~BIT(INNER_L3_RSV);
6093 break;
6094 case TCP_V4_FLOW:
6095 case UDP_V4_FLOW:
6096 case TCP_V6_FLOW:
6097 case UDP_V6_FLOW:
6098 info->layer = HCLGE_FD_USER_DEF_L4;
6099 *unused_tuple &= ~BIT(INNER_L4_RSV);
6100 break;
6101 default:
6102 return -EOPNOTSUPP;
6103 }
6104
6105 return 0;
6106 }
6107
hclge_fd_is_user_def_all_masked(struct ethtool_rx_flow_spec * fs)6108 static bool hclge_fd_is_user_def_all_masked(struct ethtool_rx_flow_spec *fs)
6109 {
6110 return be32_to_cpu(fs->m_ext.data[1] | fs->m_ext.data[0]) == 0;
6111 }
6112
hclge_fd_parse_user_def_field(struct hclge_dev * hdev,struct ethtool_rx_flow_spec * fs,u32 * unused_tuple,struct hclge_fd_user_def_info * info)6113 static int hclge_fd_parse_user_def_field(struct hclge_dev *hdev,
6114 struct ethtool_rx_flow_spec *fs,
6115 u32 *unused_tuple,
6116 struct hclge_fd_user_def_info *info)
6117 {
6118 u32 tuple_active = hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1].tuple_active;
6119 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6120 u16 data, offset, data_mask, offset_mask;
6121 int ret;
6122
6123 info->layer = HCLGE_FD_USER_DEF_NONE;
6124 *unused_tuple |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
6125
6126 if (!(fs->flow_type & FLOW_EXT) || hclge_fd_is_user_def_all_masked(fs))
6127 return 0;
6128
6129 /* user-def data from ethtool is 64 bit value, the bit0~15 is used
6130 * for data, and bit32~47 is used for offset.
6131 */
6132 data = be32_to_cpu(fs->h_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6133 data_mask = be32_to_cpu(fs->m_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6134 offset = be32_to_cpu(fs->h_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6135 offset_mask = be32_to_cpu(fs->m_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6136
6137 if (!(tuple_active & HCLGE_FD_TUPLE_USER_DEF_TUPLES)) {
6138 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
6139 return -EOPNOTSUPP;
6140 }
6141
6142 if (offset > HCLGE_FD_MAX_USER_DEF_OFFSET) {
6143 dev_err(&hdev->pdev->dev,
6144 "user-def offset[%u] should be no more than %u\n",
6145 offset, HCLGE_FD_MAX_USER_DEF_OFFSET);
6146 return -EINVAL;
6147 }
6148
6149 if (offset_mask != HCLGE_FD_USER_DEF_OFFSET_UNMASK) {
6150 dev_err(&hdev->pdev->dev, "user-def offset can't be masked\n");
6151 return -EINVAL;
6152 }
6153
6154 ret = hclge_fd_get_user_def_layer(flow_type, unused_tuple, info);
6155 if (ret) {
6156 dev_err(&hdev->pdev->dev,
6157 "unsupported flow type for user-def bytes, ret = %d\n",
6158 ret);
6159 return ret;
6160 }
6161
6162 info->data = data;
6163 info->data_mask = data_mask;
6164 info->offset = offset;
6165
6166 return 0;
6167 }
6168
hclge_fd_check_spec(struct hclge_dev * hdev,struct ethtool_rx_flow_spec * fs,u32 * unused_tuple,struct hclge_fd_user_def_info * info)6169 static int hclge_fd_check_spec(struct hclge_dev *hdev,
6170 struct ethtool_rx_flow_spec *fs,
6171 u32 *unused_tuple,
6172 struct hclge_fd_user_def_info *info)
6173 {
6174 u32 flow_type;
6175 int ret;
6176
6177 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6178 dev_err(&hdev->pdev->dev,
6179 "failed to config fd rules, invalid rule location: %u, max is %u\n.",
6180 fs->location,
6181 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
6182 return -EINVAL;
6183 }
6184
6185 ret = hclge_fd_parse_user_def_field(hdev, fs, unused_tuple, info);
6186 if (ret)
6187 return ret;
6188
6189 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6190 switch (flow_type) {
6191 case SCTP_V4_FLOW:
6192 case TCP_V4_FLOW:
6193 case UDP_V4_FLOW:
6194 ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
6195 unused_tuple);
6196 break;
6197 case IP_USER_FLOW:
6198 ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
6199 unused_tuple);
6200 break;
6201 case SCTP_V6_FLOW:
6202 case TCP_V6_FLOW:
6203 case UDP_V6_FLOW:
6204 ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
6205 unused_tuple);
6206 break;
6207 case IPV6_USER_FLOW:
6208 ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
6209 unused_tuple);
6210 break;
6211 case ETHER_FLOW:
6212 if (hdev->fd_cfg.fd_mode !=
6213 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6214 dev_err(&hdev->pdev->dev,
6215 "ETHER_FLOW is not supported in current fd mode!\n");
6216 return -EOPNOTSUPP;
6217 }
6218
6219 ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
6220 unused_tuple);
6221 break;
6222 default:
6223 dev_err(&hdev->pdev->dev,
6224 "unsupported protocol type, protocol type = %#x\n",
6225 flow_type);
6226 return -EOPNOTSUPP;
6227 }
6228
6229 if (ret) {
6230 dev_err(&hdev->pdev->dev,
6231 "failed to check flow union tuple, ret = %d\n",
6232 ret);
6233 return ret;
6234 }
6235
6236 return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
6237 }
6238
hclge_fd_get_tcpip4_tuple(struct ethtool_rx_flow_spec * fs,struct hclge_fd_rule * rule,u8 ip_proto)6239 static void hclge_fd_get_tcpip4_tuple(struct ethtool_rx_flow_spec *fs,
6240 struct hclge_fd_rule *rule, u8 ip_proto)
6241 {
6242 rule->tuples.src_ip[IPV4_INDEX] =
6243 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
6244 rule->tuples_mask.src_ip[IPV4_INDEX] =
6245 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
6246
6247 rule->tuples.dst_ip[IPV4_INDEX] =
6248 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
6249 rule->tuples_mask.dst_ip[IPV4_INDEX] =
6250 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
6251
6252 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
6253 rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
6254
6255 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
6256 rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
6257
6258 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
6259 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
6260
6261 rule->tuples.ether_proto = ETH_P_IP;
6262 rule->tuples_mask.ether_proto = 0xFFFF;
6263
6264 rule->tuples.ip_proto = ip_proto;
6265 rule->tuples_mask.ip_proto = 0xFF;
6266 }
6267
hclge_fd_get_ip4_tuple(struct ethtool_rx_flow_spec * fs,struct hclge_fd_rule * rule)6268 static void hclge_fd_get_ip4_tuple(struct ethtool_rx_flow_spec *fs,
6269 struct hclge_fd_rule *rule)
6270 {
6271 rule->tuples.src_ip[IPV4_INDEX] =
6272 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
6273 rule->tuples_mask.src_ip[IPV4_INDEX] =
6274 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
6275
6276 rule->tuples.dst_ip[IPV4_INDEX] =
6277 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
6278 rule->tuples_mask.dst_ip[IPV4_INDEX] =
6279 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
6280
6281 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
6282 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
6283
6284 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
6285 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
6286
6287 rule->tuples.ether_proto = ETH_P_IP;
6288 rule->tuples_mask.ether_proto = 0xFFFF;
6289 }
6290
hclge_fd_get_tcpip6_tuple(struct ethtool_rx_flow_spec * fs,struct hclge_fd_rule * rule,u8 ip_proto)6291 static void hclge_fd_get_tcpip6_tuple(struct ethtool_rx_flow_spec *fs,
6292 struct hclge_fd_rule *rule, u8 ip_proto)
6293 {
6294 ipv6_addr_be32_to_cpu(rule->tuples.src_ip,
6295 fs->h_u.tcp_ip6_spec.ip6src);
6296 ipv6_addr_be32_to_cpu(rule->tuples_mask.src_ip,
6297 fs->m_u.tcp_ip6_spec.ip6src);
6298
6299 ipv6_addr_be32_to_cpu(rule->tuples.dst_ip,
6300 fs->h_u.tcp_ip6_spec.ip6dst);
6301 ipv6_addr_be32_to_cpu(rule->tuples_mask.dst_ip,
6302 fs->m_u.tcp_ip6_spec.ip6dst);
6303
6304 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
6305 rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
6306
6307 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
6308 rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
6309
6310 rule->tuples.ether_proto = ETH_P_IPV6;
6311 rule->tuples_mask.ether_proto = 0xFFFF;
6312
6313 rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6314 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6315
6316 rule->tuples.ip_proto = ip_proto;
6317 rule->tuples_mask.ip_proto = 0xFF;
6318 }
6319
hclge_fd_get_ip6_tuple(struct ethtool_rx_flow_spec * fs,struct hclge_fd_rule * rule)6320 static void hclge_fd_get_ip6_tuple(struct ethtool_rx_flow_spec *fs,
6321 struct hclge_fd_rule *rule)
6322 {
6323 ipv6_addr_be32_to_cpu(rule->tuples.src_ip,
6324 fs->h_u.usr_ip6_spec.ip6src);
6325 ipv6_addr_be32_to_cpu(rule->tuples_mask.src_ip,
6326 fs->m_u.usr_ip6_spec.ip6src);
6327
6328 ipv6_addr_be32_to_cpu(rule->tuples.dst_ip,
6329 fs->h_u.usr_ip6_spec.ip6dst);
6330 ipv6_addr_be32_to_cpu(rule->tuples_mask.dst_ip,
6331 fs->m_u.usr_ip6_spec.ip6dst);
6332
6333 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
6334 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
6335
6336 rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6337 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6338
6339 rule->tuples.ether_proto = ETH_P_IPV6;
6340 rule->tuples_mask.ether_proto = 0xFFFF;
6341 }
6342
hclge_fd_get_ether_tuple(struct ethtool_rx_flow_spec * fs,struct hclge_fd_rule * rule)6343 static void hclge_fd_get_ether_tuple(struct ethtool_rx_flow_spec *fs,
6344 struct hclge_fd_rule *rule)
6345 {
6346 ether_addr_copy(rule->tuples.src_mac, fs->h_u.ether_spec.h_source);
6347 ether_addr_copy(rule->tuples_mask.src_mac, fs->m_u.ether_spec.h_source);
6348
6349 ether_addr_copy(rule->tuples.dst_mac, fs->h_u.ether_spec.h_dest);
6350 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_u.ether_spec.h_dest);
6351
6352 rule->tuples.ether_proto = be16_to_cpu(fs->h_u.ether_spec.h_proto);
6353 rule->tuples_mask.ether_proto = be16_to_cpu(fs->m_u.ether_spec.h_proto);
6354 }
6355
hclge_fd_get_user_def_tuple(struct hclge_fd_user_def_info * info,struct hclge_fd_rule * rule)6356 static void hclge_fd_get_user_def_tuple(struct hclge_fd_user_def_info *info,
6357 struct hclge_fd_rule *rule)
6358 {
6359 switch (info->layer) {
6360 case HCLGE_FD_USER_DEF_L2:
6361 rule->tuples.l2_user_def = info->data;
6362 rule->tuples_mask.l2_user_def = info->data_mask;
6363 break;
6364 case HCLGE_FD_USER_DEF_L3:
6365 rule->tuples.l3_user_def = info->data;
6366 rule->tuples_mask.l3_user_def = info->data_mask;
6367 break;
6368 case HCLGE_FD_USER_DEF_L4:
6369 rule->tuples.l4_user_def = (u32)info->data << 16;
6370 rule->tuples_mask.l4_user_def = (u32)info->data_mask << 16;
6371 break;
6372 default:
6373 break;
6374 }
6375
6376 rule->ep.user_def = *info;
6377 }
6378
hclge_fd_get_tuple(struct ethtool_rx_flow_spec * fs,struct hclge_fd_rule * rule,struct hclge_fd_user_def_info * info)6379 static int hclge_fd_get_tuple(struct ethtool_rx_flow_spec *fs,
6380 struct hclge_fd_rule *rule,
6381 struct hclge_fd_user_def_info *info)
6382 {
6383 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6384
6385 switch (flow_type) {
6386 case SCTP_V4_FLOW:
6387 hclge_fd_get_tcpip4_tuple(fs, rule, IPPROTO_SCTP);
6388 break;
6389 case TCP_V4_FLOW:
6390 hclge_fd_get_tcpip4_tuple(fs, rule, IPPROTO_TCP);
6391 break;
6392 case UDP_V4_FLOW:
6393 hclge_fd_get_tcpip4_tuple(fs, rule, IPPROTO_UDP);
6394 break;
6395 case IP_USER_FLOW:
6396 hclge_fd_get_ip4_tuple(fs, rule);
6397 break;
6398 case SCTP_V6_FLOW:
6399 hclge_fd_get_tcpip6_tuple(fs, rule, IPPROTO_SCTP);
6400 break;
6401 case TCP_V6_FLOW:
6402 hclge_fd_get_tcpip6_tuple(fs, rule, IPPROTO_TCP);
6403 break;
6404 case UDP_V6_FLOW:
6405 hclge_fd_get_tcpip6_tuple(fs, rule, IPPROTO_UDP);
6406 break;
6407 case IPV6_USER_FLOW:
6408 hclge_fd_get_ip6_tuple(fs, rule);
6409 break;
6410 case ETHER_FLOW:
6411 hclge_fd_get_ether_tuple(fs, rule);
6412 break;
6413 default:
6414 return -EOPNOTSUPP;
6415 }
6416
6417 if (fs->flow_type & FLOW_EXT) {
6418 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
6419 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
6420 hclge_fd_get_user_def_tuple(info, rule);
6421 }
6422
6423 if (fs->flow_type & FLOW_MAC_EXT) {
6424 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
6425 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
6426 }
6427
6428 return 0;
6429 }
6430
hclge_fd_config_rule(struct hclge_dev * hdev,struct hclge_fd_rule * rule)6431 static int hclge_fd_config_rule(struct hclge_dev *hdev,
6432 struct hclge_fd_rule *rule)
6433 {
6434 int ret;
6435
6436 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6437 if (ret)
6438 return ret;
6439
6440 return hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6441 }
6442
hclge_add_fd_entry_common(struct hclge_dev * hdev,struct hclge_fd_rule * rule)6443 static int hclge_add_fd_entry_common(struct hclge_dev *hdev,
6444 struct hclge_fd_rule *rule)
6445 {
6446 int ret;
6447
6448 spin_lock_bh(&hdev->fd_rule_lock);
6449
6450 if (hdev->fd_active_type != rule->rule_type &&
6451 (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6452 hdev->fd_active_type == HCLGE_FD_EP_ACTIVE)) {
6453 dev_err(&hdev->pdev->dev,
6454 "mode conflict(new type %d, active type %d), please delete existent rules first\n",
6455 rule->rule_type, hdev->fd_active_type);
6456 spin_unlock_bh(&hdev->fd_rule_lock);
6457 return -EINVAL;
6458 }
6459
6460 ret = hclge_fd_check_user_def_refcnt(hdev, rule);
6461 if (ret)
6462 goto out;
6463
6464 ret = hclge_clear_arfs_rules(hdev);
6465 if (ret)
6466 goto out;
6467
6468 ret = hclge_fd_config_rule(hdev, rule);
6469 if (ret)
6470 goto out;
6471
6472 rule->state = HCLGE_FD_ACTIVE;
6473 hdev->fd_active_type = rule->rule_type;
6474 hclge_update_fd_list(hdev, rule->state, rule->location, rule);
6475
6476 out:
6477 spin_unlock_bh(&hdev->fd_rule_lock);
6478 return ret;
6479 }
6480
hclge_is_cls_flower_active(struct hnae3_handle * handle)6481 static bool hclge_is_cls_flower_active(struct hnae3_handle *handle)
6482 {
6483 struct hclge_vport *vport = hclge_get_vport(handle);
6484 struct hclge_dev *hdev = vport->back;
6485
6486 return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE;
6487 }
6488
hclge_fd_parse_ring_cookie(struct hclge_dev * hdev,u64 ring_cookie,u16 * vport_id,u8 * action,u16 * queue_id)6489 static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
6490 u16 *vport_id, u8 *action, u16 *queue_id)
6491 {
6492 struct hclge_vport *vport = hdev->vport;
6493
6494 if (ring_cookie == RX_CLS_FLOW_DISC) {
6495 *action = HCLGE_FD_ACTION_DROP_PACKET;
6496 } else {
6497 u32 ring = ethtool_get_flow_spec_ring(ring_cookie);
6498 u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
6499 u16 tqps;
6500
6501 /* To keep consistent with user's configuration, minus 1 when
6502 * printing 'vf', because vf id from ethtool is added 1 for vf.
6503 */
6504 if (vf > hdev->num_req_vfs) {
6505 dev_err(&hdev->pdev->dev,
6506 "Error: vf id (%u) should be less than %u\n",
6507 vf - 1U, hdev->num_req_vfs);
6508 return -EINVAL;
6509 }
6510
6511 *vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
6512 tqps = hdev->vport[vf].nic.kinfo.num_tqps;
6513
6514 if (ring >= tqps) {
6515 dev_err(&hdev->pdev->dev,
6516 "Error: queue id (%u) > max tqp num (%u)\n",
6517 ring, tqps - 1U);
6518 return -EINVAL;
6519 }
6520
6521 *action = HCLGE_FD_ACTION_SELECT_QUEUE;
6522 *queue_id = ring;
6523 }
6524
6525 return 0;
6526 }
6527
hclge_add_fd_entry(struct hnae3_handle * handle,struct ethtool_rxnfc * cmd)6528 static int hclge_add_fd_entry(struct hnae3_handle *handle,
6529 struct ethtool_rxnfc *cmd)
6530 {
6531 struct hclge_vport *vport = hclge_get_vport(handle);
6532 struct hclge_dev *hdev = vport->back;
6533 struct hclge_fd_user_def_info info;
6534 u16 dst_vport_id = 0, q_index = 0;
6535 struct ethtool_rx_flow_spec *fs;
6536 struct hclge_fd_rule *rule;
6537 u32 unused = 0;
6538 u8 action;
6539 int ret;
6540
6541 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
6542 dev_err(&hdev->pdev->dev,
6543 "flow table director is not supported\n");
6544 return -EOPNOTSUPP;
6545 }
6546
6547 if (!hdev->fd_en) {
6548 dev_err(&hdev->pdev->dev,
6549 "please enable flow director first\n");
6550 return -EOPNOTSUPP;
6551 }
6552
6553 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6554
6555 ret = hclge_fd_check_spec(hdev, fs, &unused, &info);
6556 if (ret)
6557 return ret;
6558
6559 ret = hclge_fd_parse_ring_cookie(hdev, fs->ring_cookie, &dst_vport_id,
6560 &action, &q_index);
6561 if (ret)
6562 return ret;
6563
6564 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6565 if (!rule)
6566 return -ENOMEM;
6567
6568 ret = hclge_fd_get_tuple(fs, rule, &info);
6569 if (ret) {
6570 kfree(rule);
6571 return ret;
6572 }
6573
6574 rule->flow_type = fs->flow_type;
6575 rule->location = fs->location;
6576 rule->unused_tuple = unused;
6577 rule->vf_id = dst_vport_id;
6578 rule->queue_id = q_index;
6579 rule->action = action;
6580 rule->rule_type = HCLGE_FD_EP_ACTIVE;
6581
6582 ret = hclge_add_fd_entry_common(hdev, rule);
6583 if (ret)
6584 kfree(rule);
6585
6586 return ret;
6587 }
6588
hclge_del_fd_entry(struct hnae3_handle * handle,struct ethtool_rxnfc * cmd)6589 static int hclge_del_fd_entry(struct hnae3_handle *handle,
6590 struct ethtool_rxnfc *cmd)
6591 {
6592 struct hclge_vport *vport = hclge_get_vport(handle);
6593 struct hclge_dev *hdev = vport->back;
6594 struct ethtool_rx_flow_spec *fs;
6595 int ret;
6596
6597 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
6598 return -EOPNOTSUPP;
6599
6600 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6601
6602 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6603 return -EINVAL;
6604
6605 spin_lock_bh(&hdev->fd_rule_lock);
6606 if (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6607 !test_bit(fs->location, hdev->fd_bmap)) {
6608 dev_err(&hdev->pdev->dev,
6609 "Delete fail, rule %u is inexistent\n", fs->location);
6610 spin_unlock_bh(&hdev->fd_rule_lock);
6611 return -ENOENT;
6612 }
6613
6614 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
6615 NULL, false);
6616 if (ret)
6617 goto out;
6618
6619 hclge_update_fd_list(hdev, HCLGE_FD_DELETED, fs->location, NULL);
6620
6621 out:
6622 spin_unlock_bh(&hdev->fd_rule_lock);
6623 return ret;
6624 }
6625
hclge_clear_fd_rules_in_list(struct hclge_dev * hdev,bool clear_list)6626 static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev,
6627 bool clear_list)
6628 {
6629 struct hclge_fd_rule *rule;
6630 struct hlist_node *node;
6631 u16 location;
6632
6633 spin_lock_bh(&hdev->fd_rule_lock);
6634
6635 for_each_set_bit(location, hdev->fd_bmap,
6636 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6637 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6638 NULL, false);
6639
6640 if (clear_list) {
6641 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6642 rule_node) {
6643 hlist_del(&rule->rule_node);
6644 kfree(rule);
6645 }
6646 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6647 hdev->hclge_fd_rule_num = 0;
6648 bitmap_zero(hdev->fd_bmap,
6649 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6650 }
6651
6652 spin_unlock_bh(&hdev->fd_rule_lock);
6653 }
6654
hclge_del_all_fd_entries(struct hclge_dev * hdev)6655 static void hclge_del_all_fd_entries(struct hclge_dev *hdev)
6656 {
6657 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
6658 return;
6659
6660 hclge_clear_fd_rules_in_list(hdev, true);
6661 hclge_fd_disable_user_def(hdev);
6662 }
6663
hclge_restore_fd_entries(struct hnae3_handle * handle)6664 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6665 {
6666 struct hclge_vport *vport = hclge_get_vport(handle);
6667 struct hclge_dev *hdev = vport->back;
6668 struct hclge_fd_rule *rule;
6669 struct hlist_node *node;
6670
6671 /* Return ok here, because reset error handling will check this
6672 * return value. If error is returned here, the reset process will
6673 * fail.
6674 */
6675 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
6676 return 0;
6677
6678 /* if fd is disabled, should not restore it when reset */
6679 if (!hdev->fd_en)
6680 return 0;
6681
6682 spin_lock_bh(&hdev->fd_rule_lock);
6683 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6684 if (rule->state == HCLGE_FD_ACTIVE)
6685 rule->state = HCLGE_FD_TO_ADD;
6686 }
6687 spin_unlock_bh(&hdev->fd_rule_lock);
6688 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
6689
6690 return 0;
6691 }
6692
hclge_get_fd_rule_cnt(struct hnae3_handle * handle,struct ethtool_rxnfc * cmd)6693 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6694 struct ethtool_rxnfc *cmd)
6695 {
6696 struct hclge_vport *vport = hclge_get_vport(handle);
6697 struct hclge_dev *hdev = vport->back;
6698
6699 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev) || hclge_is_cls_flower_active(handle))
6700 return -EOPNOTSUPP;
6701
6702 cmd->rule_cnt = hdev->hclge_fd_rule_num;
6703 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6704
6705 return 0;
6706 }
6707
hclge_fd_get_tcpip4_info(struct hclge_fd_rule * rule,struct ethtool_tcpip4_spec * spec,struct ethtool_tcpip4_spec * spec_mask)6708 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6709 struct ethtool_tcpip4_spec *spec,
6710 struct ethtool_tcpip4_spec *spec_mask)
6711 {
6712 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6713 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6714 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6715
6716 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6717 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6718 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6719
6720 spec->psrc = cpu_to_be16(rule->tuples.src_port);
6721 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6722 0 : cpu_to_be16(rule->tuples_mask.src_port);
6723
6724 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6725 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6726 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6727
6728 spec->tos = rule->tuples.ip_tos;
6729 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6730 0 : rule->tuples_mask.ip_tos;
6731 }
6732
hclge_fd_get_ip4_info(struct hclge_fd_rule * rule,struct ethtool_usrip4_spec * spec,struct ethtool_usrip4_spec * spec_mask)6733 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6734 struct ethtool_usrip4_spec *spec,
6735 struct ethtool_usrip4_spec *spec_mask)
6736 {
6737 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6738 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6739 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6740
6741 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6742 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6743 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6744
6745 spec->tos = rule->tuples.ip_tos;
6746 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6747 0 : rule->tuples_mask.ip_tos;
6748
6749 spec->proto = rule->tuples.ip_proto;
6750 spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6751 0 : rule->tuples_mask.ip_proto;
6752
6753 spec->ip_ver = ETH_RX_NFC_IP4;
6754 }
6755
hclge_fd_get_tcpip6_info(struct hclge_fd_rule * rule,struct ethtool_tcpip6_spec * spec,struct ethtool_tcpip6_spec * spec_mask)6756 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6757 struct ethtool_tcpip6_spec *spec,
6758 struct ethtool_tcpip6_spec *spec_mask)
6759 {
6760 ipv6_addr_cpu_to_be32(spec->ip6src, rule->tuples.src_ip);
6761 ipv6_addr_cpu_to_be32(spec->ip6dst, rule->tuples.dst_ip);
6762 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6763 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6764 else
6765 ipv6_addr_cpu_to_be32(spec_mask->ip6src,
6766 rule->tuples_mask.src_ip);
6767
6768 if (rule->unused_tuple & BIT(INNER_DST_IP))
6769 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6770 else
6771 ipv6_addr_cpu_to_be32(spec_mask->ip6dst,
6772 rule->tuples_mask.dst_ip);
6773
6774 spec->tclass = rule->tuples.ip_tos;
6775 spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6776 0 : rule->tuples_mask.ip_tos;
6777
6778 spec->psrc = cpu_to_be16(rule->tuples.src_port);
6779 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6780 0 : cpu_to_be16(rule->tuples_mask.src_port);
6781
6782 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6783 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6784 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6785 }
6786
hclge_fd_get_ip6_info(struct hclge_fd_rule * rule,struct ethtool_usrip6_spec * spec,struct ethtool_usrip6_spec * spec_mask)6787 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6788 struct ethtool_usrip6_spec *spec,
6789 struct ethtool_usrip6_spec *spec_mask)
6790 {
6791 ipv6_addr_cpu_to_be32(spec->ip6src, rule->tuples.src_ip);
6792 ipv6_addr_cpu_to_be32(spec->ip6dst, rule->tuples.dst_ip);
6793 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6794 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6795 else
6796 ipv6_addr_cpu_to_be32(spec_mask->ip6src,
6797 rule->tuples_mask.src_ip);
6798
6799 if (rule->unused_tuple & BIT(INNER_DST_IP))
6800 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6801 else
6802 ipv6_addr_cpu_to_be32(spec_mask->ip6dst,
6803 rule->tuples_mask.dst_ip);
6804
6805 spec->tclass = rule->tuples.ip_tos;
6806 spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6807 0 : rule->tuples_mask.ip_tos;
6808
6809 spec->l4_proto = rule->tuples.ip_proto;
6810 spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6811 0 : rule->tuples_mask.ip_proto;
6812 }
6813
hclge_fd_get_ether_info(struct hclge_fd_rule * rule,struct ethhdr * spec,struct ethhdr * spec_mask)6814 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6815 struct ethhdr *spec,
6816 struct ethhdr *spec_mask)
6817 {
6818 ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6819 ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6820
6821 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6822 eth_zero_addr(spec_mask->h_source);
6823 else
6824 ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6825
6826 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6827 eth_zero_addr(spec_mask->h_dest);
6828 else
6829 ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6830
6831 spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6832 spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6833 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6834 }
6835
hclge_fd_get_user_def_info(struct ethtool_rx_flow_spec * fs,struct hclge_fd_rule * rule)6836 static void hclge_fd_get_user_def_info(struct ethtool_rx_flow_spec *fs,
6837 struct hclge_fd_rule *rule)
6838 {
6839 if ((rule->unused_tuple & HCLGE_FD_TUPLE_USER_DEF_TUPLES) ==
6840 HCLGE_FD_TUPLE_USER_DEF_TUPLES) {
6841 fs->h_ext.data[0] = 0;
6842 fs->h_ext.data[1] = 0;
6843 fs->m_ext.data[0] = 0;
6844 fs->m_ext.data[1] = 0;
6845 } else {
6846 fs->h_ext.data[0] = cpu_to_be32(rule->ep.user_def.offset);
6847 fs->h_ext.data[1] = cpu_to_be32(rule->ep.user_def.data);
6848 fs->m_ext.data[0] =
6849 cpu_to_be32(HCLGE_FD_USER_DEF_OFFSET_UNMASK);
6850 fs->m_ext.data[1] = cpu_to_be32(rule->ep.user_def.data_mask);
6851 }
6852 }
6853
hclge_fd_get_ext_info(struct ethtool_rx_flow_spec * fs,struct hclge_fd_rule * rule)6854 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6855 struct hclge_fd_rule *rule)
6856 {
6857 if (fs->flow_type & FLOW_EXT) {
6858 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6859 fs->m_ext.vlan_tci =
6860 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6861 0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
6862
6863 hclge_fd_get_user_def_info(fs, rule);
6864 }
6865
6866 if (fs->flow_type & FLOW_MAC_EXT) {
6867 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6868 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6869 eth_zero_addr(fs->m_u.ether_spec.h_dest);
6870 else
6871 ether_addr_copy(fs->m_u.ether_spec.h_dest,
6872 rule->tuples_mask.dst_mac);
6873 }
6874 }
6875
hclge_get_fd_rule(struct hclge_dev * hdev,u16 location)6876 static struct hclge_fd_rule *hclge_get_fd_rule(struct hclge_dev *hdev,
6877 u16 location)
6878 {
6879 struct hclge_fd_rule *rule = NULL;
6880 struct hlist_node *node2;
6881
6882 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
6883 if (rule->location == location)
6884 return rule;
6885 else if (rule->location > location)
6886 return NULL;
6887 }
6888
6889 return NULL;
6890 }
6891
hclge_fd_get_ring_cookie(struct ethtool_rx_flow_spec * fs,struct hclge_fd_rule * rule)6892 static void hclge_fd_get_ring_cookie(struct ethtool_rx_flow_spec *fs,
6893 struct hclge_fd_rule *rule)
6894 {
6895 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6896 fs->ring_cookie = RX_CLS_FLOW_DISC;
6897 } else {
6898 u64 vf_id;
6899
6900 fs->ring_cookie = rule->queue_id;
6901 vf_id = rule->vf_id;
6902 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6903 fs->ring_cookie |= vf_id;
6904 }
6905 }
6906
hclge_get_fd_rule_info(struct hnae3_handle * handle,struct ethtool_rxnfc * cmd)6907 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
6908 struct ethtool_rxnfc *cmd)
6909 {
6910 struct hclge_vport *vport = hclge_get_vport(handle);
6911 struct hclge_fd_rule *rule = NULL;
6912 struct hclge_dev *hdev = vport->back;
6913 struct ethtool_rx_flow_spec *fs;
6914
6915 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
6916 return -EOPNOTSUPP;
6917
6918 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6919
6920 spin_lock_bh(&hdev->fd_rule_lock);
6921
6922 rule = hclge_get_fd_rule(hdev, fs->location);
6923 if (!rule) {
6924 spin_unlock_bh(&hdev->fd_rule_lock);
6925 return -ENOENT;
6926 }
6927
6928 fs->flow_type = rule->flow_type;
6929 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
6930 case SCTP_V4_FLOW:
6931 case TCP_V4_FLOW:
6932 case UDP_V4_FLOW:
6933 hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
6934 &fs->m_u.tcp_ip4_spec);
6935 break;
6936 case IP_USER_FLOW:
6937 hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
6938 &fs->m_u.usr_ip4_spec);
6939 break;
6940 case SCTP_V6_FLOW:
6941 case TCP_V6_FLOW:
6942 case UDP_V6_FLOW:
6943 hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
6944 &fs->m_u.tcp_ip6_spec);
6945 break;
6946 case IPV6_USER_FLOW:
6947 hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
6948 &fs->m_u.usr_ip6_spec);
6949 break;
6950 /* The flow type of fd rule has been checked before adding in to rule
6951 * list. As other flow types have been handled, it must be ETHER_FLOW
6952 * for the default case
6953 */
6954 default:
6955 hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
6956 &fs->m_u.ether_spec);
6957 break;
6958 }
6959
6960 hclge_fd_get_ext_info(fs, rule);
6961
6962 hclge_fd_get_ring_cookie(fs, rule);
6963
6964 spin_unlock_bh(&hdev->fd_rule_lock);
6965
6966 return 0;
6967 }
6968
hclge_get_all_rules(struct hnae3_handle * handle,struct ethtool_rxnfc * cmd,u32 * rule_locs)6969 static int hclge_get_all_rules(struct hnae3_handle *handle,
6970 struct ethtool_rxnfc *cmd, u32 *rule_locs)
6971 {
6972 struct hclge_vport *vport = hclge_get_vport(handle);
6973 struct hclge_dev *hdev = vport->back;
6974 struct hclge_fd_rule *rule;
6975 struct hlist_node *node2;
6976 int cnt = 0;
6977
6978 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
6979 return -EOPNOTSUPP;
6980
6981 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6982
6983 spin_lock_bh(&hdev->fd_rule_lock);
6984 hlist_for_each_entry_safe(rule, node2,
6985 &hdev->fd_rule_list, rule_node) {
6986 if (cnt == cmd->rule_cnt) {
6987 spin_unlock_bh(&hdev->fd_rule_lock);
6988 return -EMSGSIZE;
6989 }
6990
6991 if (rule->state == HCLGE_FD_TO_DEL)
6992 continue;
6993
6994 rule_locs[cnt] = rule->location;
6995 cnt++;
6996 }
6997
6998 spin_unlock_bh(&hdev->fd_rule_lock);
6999
7000 cmd->rule_cnt = cnt;
7001
7002 return 0;
7003 }
7004
hclge_fd_get_flow_tuples(const struct flow_keys * fkeys,struct hclge_fd_rule_tuples * tuples)7005 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
7006 struct hclge_fd_rule_tuples *tuples)
7007 {
7008 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
7009 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
7010
7011 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
7012 tuples->ip_proto = fkeys->basic.ip_proto;
7013 tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
7014
7015 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
7016 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
7017 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
7018 } else {
7019 int i;
7020
7021 for (i = 0; i < IPV6_ADDR_WORDS; i++) {
7022 tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
7023 tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
7024 }
7025 }
7026 }
7027
7028 /* traverse all rules, check whether an existed rule has the same tuples */
7029 static struct hclge_fd_rule *
hclge_fd_search_flow_keys(struct hclge_dev * hdev,const struct hclge_fd_rule_tuples * tuples)7030 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
7031 const struct hclge_fd_rule_tuples *tuples)
7032 {
7033 struct hclge_fd_rule *rule = NULL;
7034 struct hlist_node *node;
7035
7036 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7037 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
7038 return rule;
7039 }
7040
7041 return NULL;
7042 }
7043
hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples * tuples,struct hclge_fd_rule * rule)7044 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
7045 struct hclge_fd_rule *rule)
7046 {
7047 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
7048 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
7049 BIT(INNER_SRC_PORT);
7050 rule->action = 0;
7051 rule->vf_id = 0;
7052 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
7053 rule->state = HCLGE_FD_TO_ADD;
7054 if (tuples->ether_proto == ETH_P_IP) {
7055 if (tuples->ip_proto == IPPROTO_TCP)
7056 rule->flow_type = TCP_V4_FLOW;
7057 else
7058 rule->flow_type = UDP_V4_FLOW;
7059 } else {
7060 if (tuples->ip_proto == IPPROTO_TCP)
7061 rule->flow_type = TCP_V6_FLOW;
7062 else
7063 rule->flow_type = UDP_V6_FLOW;
7064 }
7065 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
7066 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
7067 }
7068
hclge_add_fd_entry_by_arfs(struct hnae3_handle * handle,u16 queue_id,u16 flow_id,struct flow_keys * fkeys)7069 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
7070 u16 flow_id, struct flow_keys *fkeys)
7071 {
7072 struct hclge_vport *vport = hclge_get_vport(handle);
7073 struct hclge_fd_rule_tuples new_tuples = {};
7074 struct hclge_dev *hdev = vport->back;
7075 struct hclge_fd_rule *rule;
7076 u16 bit_id;
7077
7078 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
7079 return -EOPNOTSUPP;
7080
7081 /* when there is already fd rule existed add by user,
7082 * arfs should not work
7083 */
7084 spin_lock_bh(&hdev->fd_rule_lock);
7085 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE &&
7086 hdev->fd_active_type != HCLGE_FD_RULE_NONE) {
7087 spin_unlock_bh(&hdev->fd_rule_lock);
7088 return -EOPNOTSUPP;
7089 }
7090
7091 hclge_fd_get_flow_tuples(fkeys, &new_tuples);
7092
7093 /* check is there flow director filter existed for this flow,
7094 * if not, create a new filter for it;
7095 * if filter exist with different queue id, modify the filter;
7096 * if filter exist with same queue id, do nothing
7097 */
7098 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
7099 if (!rule) {
7100 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
7101 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7102 spin_unlock_bh(&hdev->fd_rule_lock);
7103 return -ENOSPC;
7104 }
7105
7106 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
7107 if (!rule) {
7108 spin_unlock_bh(&hdev->fd_rule_lock);
7109 return -ENOMEM;
7110 }
7111
7112 rule->location = bit_id;
7113 rule->arfs.flow_id = flow_id;
7114 rule->queue_id = queue_id;
7115 hclge_fd_build_arfs_rule(&new_tuples, rule);
7116 hclge_update_fd_list(hdev, rule->state, rule->location, rule);
7117 hdev->fd_active_type = HCLGE_FD_ARFS_ACTIVE;
7118 } else if (rule->queue_id != queue_id) {
7119 rule->queue_id = queue_id;
7120 rule->state = HCLGE_FD_TO_ADD;
7121 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7122 hclge_task_schedule(hdev, 0);
7123 }
7124 spin_unlock_bh(&hdev->fd_rule_lock);
7125 return rule->location;
7126 }
7127
hclge_rfs_filter_expire(struct hclge_dev * hdev)7128 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
7129 {
7130 #ifdef CONFIG_RFS_ACCEL
7131 struct hnae3_handle *handle = &hdev->vport[0].nic;
7132 struct hclge_fd_rule *rule;
7133 struct hlist_node *node;
7134
7135 spin_lock_bh(&hdev->fd_rule_lock);
7136 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
7137 spin_unlock_bh(&hdev->fd_rule_lock);
7138 return;
7139 }
7140 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7141 if (rule->state != HCLGE_FD_ACTIVE)
7142 continue;
7143 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
7144 rule->arfs.flow_id, rule->location)) {
7145 rule->state = HCLGE_FD_TO_DEL;
7146 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7147 }
7148 }
7149 spin_unlock_bh(&hdev->fd_rule_lock);
7150 #endif
7151 }
7152
7153 /* make sure being called after lock up with fd_rule_lock */
hclge_clear_arfs_rules(struct hclge_dev * hdev)7154 static int hclge_clear_arfs_rules(struct hclge_dev *hdev)
7155 {
7156 #ifdef CONFIG_RFS_ACCEL
7157 struct hclge_fd_rule *rule;
7158 struct hlist_node *node;
7159 int ret;
7160
7161 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE)
7162 return 0;
7163
7164 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7165 switch (rule->state) {
7166 case HCLGE_FD_TO_DEL:
7167 case HCLGE_FD_ACTIVE:
7168 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7169 rule->location, NULL, false);
7170 if (ret)
7171 return ret;
7172 fallthrough;
7173 case HCLGE_FD_TO_ADD:
7174 hclge_fd_dec_rule_cnt(hdev, rule->location);
7175 hlist_del(&rule->rule_node);
7176 kfree(rule);
7177 break;
7178 default:
7179 break;
7180 }
7181 }
7182 hclge_sync_fd_state(hdev);
7183
7184 #endif
7185 return 0;
7186 }
7187
hclge_get_cls_key_basic(const struct flow_rule * flow,struct hclge_fd_rule * rule)7188 static void hclge_get_cls_key_basic(const struct flow_rule *flow,
7189 struct hclge_fd_rule *rule)
7190 {
7191 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) {
7192 struct flow_match_basic match;
7193 u16 ethtype_key, ethtype_mask;
7194
7195 flow_rule_match_basic(flow, &match);
7196 ethtype_key = ntohs(match.key->n_proto);
7197 ethtype_mask = ntohs(match.mask->n_proto);
7198
7199 if (ethtype_key == ETH_P_ALL) {
7200 ethtype_key = 0;
7201 ethtype_mask = 0;
7202 }
7203 rule->tuples.ether_proto = ethtype_key;
7204 rule->tuples_mask.ether_proto = ethtype_mask;
7205 rule->tuples.ip_proto = match.key->ip_proto;
7206 rule->tuples_mask.ip_proto = match.mask->ip_proto;
7207 } else {
7208 rule->unused_tuple |= BIT(INNER_IP_PROTO);
7209 rule->unused_tuple |= BIT(INNER_ETH_TYPE);
7210 }
7211 }
7212
hclge_get_cls_key_mac(const struct flow_rule * flow,struct hclge_fd_rule * rule)7213 static void hclge_get_cls_key_mac(const struct flow_rule *flow,
7214 struct hclge_fd_rule *rule)
7215 {
7216 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
7217 struct flow_match_eth_addrs match;
7218
7219 flow_rule_match_eth_addrs(flow, &match);
7220 ether_addr_copy(rule->tuples.dst_mac, match.key->dst);
7221 ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst);
7222 ether_addr_copy(rule->tuples.src_mac, match.key->src);
7223 ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src);
7224 } else {
7225 rule->unused_tuple |= BIT(INNER_DST_MAC);
7226 rule->unused_tuple |= BIT(INNER_SRC_MAC);
7227 }
7228 }
7229
hclge_get_cls_key_vlan(const struct flow_rule * flow,struct hclge_fd_rule * rule)7230 static void hclge_get_cls_key_vlan(const struct flow_rule *flow,
7231 struct hclge_fd_rule *rule)
7232 {
7233 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
7234 struct flow_match_vlan match;
7235
7236 flow_rule_match_vlan(flow, &match);
7237 rule->tuples.vlan_tag1 = match.key->vlan_id |
7238 (match.key->vlan_priority << VLAN_PRIO_SHIFT);
7239 rule->tuples_mask.vlan_tag1 = match.mask->vlan_id |
7240 (match.mask->vlan_priority << VLAN_PRIO_SHIFT);
7241 } else {
7242 rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST);
7243 }
7244 }
7245
hclge_get_cls_key_ip(const struct flow_rule * flow,struct hclge_fd_rule * rule,struct netlink_ext_ack * extack)7246 static int hclge_get_cls_key_ip(const struct flow_rule *flow,
7247 struct hclge_fd_rule *rule,
7248 struct netlink_ext_ack *extack)
7249 {
7250 u16 addr_type = 0;
7251
7252 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) {
7253 struct flow_match_control match;
7254
7255 flow_rule_match_control(flow, &match);
7256 addr_type = match.key->addr_type;
7257
7258 if (flow_rule_has_control_flags(match.mask->flags, extack))
7259 return -EOPNOTSUPP;
7260 }
7261
7262 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
7263 struct flow_match_ipv4_addrs match;
7264
7265 flow_rule_match_ipv4_addrs(flow, &match);
7266 rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src);
7267 rule->tuples_mask.src_ip[IPV4_INDEX] =
7268 be32_to_cpu(match.mask->src);
7269 rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst);
7270 rule->tuples_mask.dst_ip[IPV4_INDEX] =
7271 be32_to_cpu(match.mask->dst);
7272 } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
7273 struct flow_match_ipv6_addrs match;
7274
7275 flow_rule_match_ipv6_addrs(flow, &match);
7276 ipv6_addr_be32_to_cpu(rule->tuples.src_ip,
7277 match.key->src.s6_addr32);
7278 ipv6_addr_be32_to_cpu(rule->tuples_mask.src_ip,
7279 match.mask->src.s6_addr32);
7280 ipv6_addr_be32_to_cpu(rule->tuples.dst_ip,
7281 match.key->dst.s6_addr32);
7282 ipv6_addr_be32_to_cpu(rule->tuples_mask.dst_ip,
7283 match.mask->dst.s6_addr32);
7284 } else {
7285 rule->unused_tuple |= BIT(INNER_SRC_IP);
7286 rule->unused_tuple |= BIT(INNER_DST_IP);
7287 }
7288
7289 return 0;
7290 }
7291
hclge_get_cls_key_port(const struct flow_rule * flow,struct hclge_fd_rule * rule)7292 static void hclge_get_cls_key_port(const struct flow_rule *flow,
7293 struct hclge_fd_rule *rule)
7294 {
7295 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
7296 struct flow_match_ports match;
7297
7298 flow_rule_match_ports(flow, &match);
7299
7300 rule->tuples.src_port = be16_to_cpu(match.key->src);
7301 rule->tuples_mask.src_port = be16_to_cpu(match.mask->src);
7302 rule->tuples.dst_port = be16_to_cpu(match.key->dst);
7303 rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst);
7304 } else {
7305 rule->unused_tuple |= BIT(INNER_SRC_PORT);
7306 rule->unused_tuple |= BIT(INNER_DST_PORT);
7307 }
7308 }
7309
hclge_parse_cls_flower(struct hclge_dev * hdev,struct flow_cls_offload * cls_flower,struct hclge_fd_rule * rule)7310 static int hclge_parse_cls_flower(struct hclge_dev *hdev,
7311 struct flow_cls_offload *cls_flower,
7312 struct hclge_fd_rule *rule)
7313 {
7314 struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower);
7315 struct netlink_ext_ack *extack = cls_flower->common.extack;
7316 struct flow_dissector *dissector = flow->match.dissector;
7317 int ret;
7318
7319 if (dissector->used_keys &
7320 ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
7321 BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
7322 BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
7323 BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) |
7324 BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
7325 BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
7326 BIT_ULL(FLOW_DISSECTOR_KEY_PORTS))) {
7327 dev_err(&hdev->pdev->dev, "unsupported key set: %#llx\n",
7328 dissector->used_keys);
7329 return -EOPNOTSUPP;
7330 }
7331
7332 hclge_get_cls_key_basic(flow, rule);
7333 hclge_get_cls_key_mac(flow, rule);
7334 hclge_get_cls_key_vlan(flow, rule);
7335
7336 ret = hclge_get_cls_key_ip(flow, rule, extack);
7337 if (ret)
7338 return ret;
7339
7340 hclge_get_cls_key_port(flow, rule);
7341
7342 return 0;
7343 }
7344
hclge_check_cls_flower(struct hclge_dev * hdev,struct flow_cls_offload * cls_flower,int tc)7345 static int hclge_check_cls_flower(struct hclge_dev *hdev,
7346 struct flow_cls_offload *cls_flower, int tc)
7347 {
7348 u32 prio = cls_flower->common.prio;
7349
7350 if (tc < 0 || tc > hdev->tc_max) {
7351 dev_err(&hdev->pdev->dev, "invalid traffic class\n");
7352 return -EINVAL;
7353 }
7354
7355 if (prio == 0 ||
7356 prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7357 dev_err(&hdev->pdev->dev,
7358 "prio %u should be in range[1, %u]\n",
7359 prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
7360 return -EINVAL;
7361 }
7362
7363 if (test_bit(prio - 1, hdev->fd_bmap)) {
7364 dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio);
7365 return -EINVAL;
7366 }
7367 return 0;
7368 }
7369
hclge_add_cls_flower(struct hnae3_handle * handle,struct flow_cls_offload * cls_flower,int tc)7370 static int hclge_add_cls_flower(struct hnae3_handle *handle,
7371 struct flow_cls_offload *cls_flower,
7372 int tc)
7373 {
7374 struct hclge_vport *vport = hclge_get_vport(handle);
7375 struct hclge_dev *hdev = vport->back;
7376 struct hclge_fd_rule *rule;
7377 int ret;
7378
7379 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
7380 dev_err(&hdev->pdev->dev,
7381 "cls flower is not supported\n");
7382 return -EOPNOTSUPP;
7383 }
7384
7385 ret = hclge_check_cls_flower(hdev, cls_flower, tc);
7386 if (ret) {
7387 dev_err(&hdev->pdev->dev,
7388 "failed to check cls flower params, ret = %d\n", ret);
7389 return ret;
7390 }
7391
7392 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
7393 if (!rule)
7394 return -ENOMEM;
7395
7396 ret = hclge_parse_cls_flower(hdev, cls_flower, rule);
7397 if (ret) {
7398 kfree(rule);
7399 return ret;
7400 }
7401
7402 rule->action = HCLGE_FD_ACTION_SELECT_TC;
7403 rule->cls_flower.tc = tc;
7404 rule->location = cls_flower->common.prio - 1;
7405 rule->vf_id = 0;
7406 rule->cls_flower.cookie = cls_flower->cookie;
7407 rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE;
7408
7409 ret = hclge_add_fd_entry_common(hdev, rule);
7410 if (ret)
7411 kfree(rule);
7412
7413 return ret;
7414 }
7415
hclge_find_cls_flower(struct hclge_dev * hdev,unsigned long cookie)7416 static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev,
7417 unsigned long cookie)
7418 {
7419 struct hclge_fd_rule *rule;
7420 struct hlist_node *node;
7421
7422 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7423 if (rule->cls_flower.cookie == cookie)
7424 return rule;
7425 }
7426
7427 return NULL;
7428 }
7429
hclge_del_cls_flower(struct hnae3_handle * handle,struct flow_cls_offload * cls_flower)7430 static int hclge_del_cls_flower(struct hnae3_handle *handle,
7431 struct flow_cls_offload *cls_flower)
7432 {
7433 struct hclge_vport *vport = hclge_get_vport(handle);
7434 struct hclge_dev *hdev = vport->back;
7435 struct hclge_fd_rule *rule;
7436 int ret;
7437
7438 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
7439 return -EOPNOTSUPP;
7440
7441 spin_lock_bh(&hdev->fd_rule_lock);
7442
7443 rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
7444 if (!rule) {
7445 spin_unlock_bh(&hdev->fd_rule_lock);
7446 return -EINVAL;
7447 }
7448
7449 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
7450 NULL, false);
7451 if (ret) {
7452 /* if tcam config fail, set rule state to TO_DEL,
7453 * so the rule will be deleted when periodic
7454 * task being scheduled.
7455 */
7456 hclge_update_fd_list(hdev, HCLGE_FD_TO_DEL, rule->location, NULL);
7457 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7458 spin_unlock_bh(&hdev->fd_rule_lock);
7459 return ret;
7460 }
7461
7462 hclge_update_fd_list(hdev, HCLGE_FD_DELETED, rule->location, NULL);
7463 spin_unlock_bh(&hdev->fd_rule_lock);
7464
7465 return 0;
7466 }
7467
hclge_sync_fd_list(struct hclge_dev * hdev,struct hlist_head * hlist)7468 static void hclge_sync_fd_list(struct hclge_dev *hdev, struct hlist_head *hlist)
7469 {
7470 struct hclge_fd_rule *rule;
7471 struct hlist_node *node;
7472 int ret = 0;
7473
7474 if (!test_and_clear_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state))
7475 return;
7476
7477 spin_lock_bh(&hdev->fd_rule_lock);
7478
7479 hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
7480 switch (rule->state) {
7481 case HCLGE_FD_TO_ADD:
7482 ret = hclge_fd_config_rule(hdev, rule);
7483 if (ret)
7484 goto out;
7485 rule->state = HCLGE_FD_ACTIVE;
7486 break;
7487 case HCLGE_FD_TO_DEL:
7488 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7489 rule->location, NULL, false);
7490 if (ret)
7491 goto out;
7492 hclge_fd_dec_rule_cnt(hdev, rule->location);
7493 hclge_fd_free_node(hdev, rule);
7494 break;
7495 default:
7496 break;
7497 }
7498 }
7499
7500 out:
7501 if (ret)
7502 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7503
7504 spin_unlock_bh(&hdev->fd_rule_lock);
7505 }
7506
hclge_sync_fd_table(struct hclge_dev * hdev)7507 static void hclge_sync_fd_table(struct hclge_dev *hdev)
7508 {
7509 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
7510 return;
7511
7512 if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) {
7513 bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
7514
7515 hclge_clear_fd_rules_in_list(hdev, clear_list);
7516 }
7517
7518 hclge_sync_fd_user_def_cfg(hdev, false);
7519
7520 hclge_sync_fd_list(hdev, &hdev->fd_rule_list);
7521 }
7522
hclge_get_hw_reset_stat(struct hnae3_handle * handle)7523 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
7524 {
7525 struct hclge_vport *vport = hclge_get_vport(handle);
7526 struct hclge_dev *hdev = vport->back;
7527
7528 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
7529 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
7530 }
7531
hclge_get_cmdq_stat(struct hnae3_handle * handle)7532 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
7533 {
7534 struct hclge_vport *vport = hclge_get_vport(handle);
7535 struct hclge_dev *hdev = vport->back;
7536
7537 return test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
7538 }
7539
hclge_ae_dev_resetting(struct hnae3_handle * handle)7540 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
7541 {
7542 struct hclge_vport *vport = hclge_get_vport(handle);
7543 struct hclge_dev *hdev = vport->back;
7544
7545 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7546 }
7547
hclge_ae_dev_reset_cnt(struct hnae3_handle * handle)7548 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
7549 {
7550 struct hclge_vport *vport = hclge_get_vport(handle);
7551 struct hclge_dev *hdev = vport->back;
7552
7553 return hdev->rst_stats.hw_reset_done_cnt;
7554 }
7555
hclge_enable_fd(struct hnae3_handle * handle,bool enable)7556 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
7557 {
7558 struct hclge_vport *vport = hclge_get_vport(handle);
7559 struct hclge_dev *hdev = vport->back;
7560
7561 hdev->fd_en = enable;
7562
7563 if (!enable)
7564 set_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state);
7565 else
7566 hclge_restore_fd_entries(handle);
7567
7568 hclge_task_schedule(hdev, 0);
7569 }
7570
hclge_cfg_mac_mode(struct hclge_dev * hdev,bool enable)7571 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
7572 {
7573 #define HCLGE_LINK_STATUS_WAIT_CNT 3
7574
7575 struct hclge_desc desc;
7576 struct hclge_config_mac_mode_cmd *req =
7577 (struct hclge_config_mac_mode_cmd *)desc.data;
7578 u32 loop_en = 0;
7579 int ret;
7580
7581 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
7582
7583 if (enable) {
7584 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
7585 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
7586 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
7587 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
7588 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
7589 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
7590 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
7591 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
7592 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
7593 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
7594 }
7595
7596 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7597
7598 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7599 if (ret) {
7600 dev_err(&hdev->pdev->dev,
7601 "mac enable fail, ret =%d.\n", ret);
7602 return;
7603 }
7604
7605 if (!enable)
7606 hclge_mac_link_status_wait(hdev, HCLGE_LINK_STATUS_DOWN,
7607 HCLGE_LINK_STATUS_WAIT_CNT);
7608 }
7609
hclge_config_switch_param(struct hclge_dev * hdev,int vfid,u8 switch_param,u8 param_mask)7610 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
7611 u8 switch_param, u8 param_mask)
7612 {
7613 struct hclge_mac_vlan_switch_cmd *req;
7614 struct hclge_desc desc;
7615 u32 func_id;
7616 int ret;
7617
7618 func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
7619 req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
7620
7621 /* read current config parameter */
7622 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
7623 true);
7624 req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
7625 req->func_id = cpu_to_le32(func_id);
7626
7627 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7628 if (ret) {
7629 dev_err(&hdev->pdev->dev,
7630 "read mac vlan switch parameter fail, ret = %d\n", ret);
7631 return ret;
7632 }
7633
7634 /* modify and write new config parameter */
7635 hclge_comm_cmd_reuse_desc(&desc, false);
7636 req->switch_param = (req->switch_param & param_mask) | switch_param;
7637 req->param_mask = param_mask;
7638
7639 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7640 if (ret)
7641 dev_err(&hdev->pdev->dev,
7642 "set mac vlan switch parameter fail, ret = %d\n", ret);
7643 return ret;
7644 }
7645
hclge_phy_link_status_wait(struct hclge_dev * hdev,int link_ret)7646 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
7647 int link_ret)
7648 {
7649 #define HCLGE_PHY_LINK_STATUS_NUM 200
7650
7651 struct phy_device *phydev = hdev->hw.mac.phydev;
7652 int i = 0;
7653 int ret;
7654
7655 do {
7656 ret = phy_read_status(phydev);
7657 if (ret) {
7658 dev_err(&hdev->pdev->dev,
7659 "phy update link status fail, ret = %d\n", ret);
7660 return;
7661 }
7662
7663 if (phydev->link == link_ret)
7664 break;
7665
7666 msleep(HCLGE_LINK_STATUS_MS);
7667 } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
7668 }
7669
hclge_mac_link_status_wait(struct hclge_dev * hdev,int link_ret,int wait_cnt)7670 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret,
7671 int wait_cnt)
7672 {
7673 int link_status;
7674 int i = 0;
7675 int ret;
7676
7677 do {
7678 ret = hclge_get_mac_link_status(hdev, &link_status);
7679 if (ret)
7680 return ret;
7681 if (link_status == link_ret)
7682 return 0;
7683
7684 msleep(HCLGE_LINK_STATUS_MS);
7685 } while (++i < wait_cnt);
7686 return -EBUSY;
7687 }
7688
hclge_mac_phy_link_status_wait(struct hclge_dev * hdev,bool en,bool is_phy)7689 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
7690 bool is_phy)
7691 {
7692 #define HCLGE_MAC_LINK_STATUS_NUM 100
7693
7694 int link_ret;
7695
7696 link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
7697
7698 if (is_phy)
7699 hclge_phy_link_status_wait(hdev, link_ret);
7700
7701 return hclge_mac_link_status_wait(hdev, link_ret,
7702 HCLGE_MAC_LINK_STATUS_NUM);
7703 }
7704
hclge_set_app_loopback(struct hclge_dev * hdev,bool en)7705 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
7706 {
7707 struct hclge_config_mac_mode_cmd *req;
7708 struct hclge_desc desc;
7709 u32 loop_en;
7710 int ret;
7711
7712 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
7713 /* 1 Read out the MAC mode config at first */
7714 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
7715 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7716 if (ret) {
7717 dev_err(&hdev->pdev->dev,
7718 "mac loopback get fail, ret =%d.\n", ret);
7719 return ret;
7720 }
7721
7722 /* 2 Then setup the loopback flag */
7723 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
7724 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
7725
7726 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7727
7728 /* 3 Config mac work mode with loopback flag
7729 * and its original configure parameters
7730 */
7731 hclge_comm_cmd_reuse_desc(&desc, false);
7732 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7733 if (ret)
7734 dev_err(&hdev->pdev->dev,
7735 "mac loopback set fail, ret =%d.\n", ret);
7736 return ret;
7737 }
7738
hclge_cfg_common_loopback_cmd_send(struct hclge_dev * hdev,bool en,enum hnae3_loop loop_mode)7739 static int hclge_cfg_common_loopback_cmd_send(struct hclge_dev *hdev, bool en,
7740 enum hnae3_loop loop_mode)
7741 {
7742 struct hclge_common_lb_cmd *req;
7743 struct hclge_desc desc;
7744 u8 loop_mode_b;
7745 int ret;
7746
7747 req = (struct hclge_common_lb_cmd *)desc.data;
7748 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, false);
7749
7750 switch (loop_mode) {
7751 case HNAE3_LOOP_SERIAL_SERDES:
7752 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
7753 break;
7754 case HNAE3_LOOP_PARALLEL_SERDES:
7755 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
7756 break;
7757 case HNAE3_LOOP_PHY:
7758 loop_mode_b = HCLGE_CMD_GE_PHY_INNER_LOOP_B;
7759 break;
7760 default:
7761 dev_err(&hdev->pdev->dev,
7762 "unsupported loopback mode %d\n", loop_mode);
7763 return -ENOTSUPP;
7764 }
7765
7766 req->mask = loop_mode_b;
7767 if (en)
7768 req->enable = loop_mode_b;
7769
7770 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7771 if (ret)
7772 dev_err(&hdev->pdev->dev,
7773 "failed to send loopback cmd, loop_mode = %d, ret = %d\n",
7774 loop_mode, ret);
7775
7776 return ret;
7777 }
7778
hclge_cfg_common_loopback_wait(struct hclge_dev * hdev)7779 static int hclge_cfg_common_loopback_wait(struct hclge_dev *hdev)
7780 {
7781 #define HCLGE_COMMON_LB_RETRY_MS 10
7782 #define HCLGE_COMMON_LB_RETRY_NUM 100
7783
7784 struct hclge_common_lb_cmd *req;
7785 struct hclge_desc desc;
7786 u32 i = 0;
7787 int ret;
7788
7789 req = (struct hclge_common_lb_cmd *)desc.data;
7790
7791 do {
7792 msleep(HCLGE_COMMON_LB_RETRY_MS);
7793 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK,
7794 true);
7795 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7796 if (ret) {
7797 dev_err(&hdev->pdev->dev,
7798 "failed to get loopback done status, ret = %d\n",
7799 ret);
7800 return ret;
7801 }
7802 } while (++i < HCLGE_COMMON_LB_RETRY_NUM &&
7803 !(req->result & HCLGE_CMD_COMMON_LB_DONE_B));
7804
7805 if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) {
7806 dev_err(&hdev->pdev->dev, "wait loopback timeout\n");
7807 return -EBUSY;
7808 } else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) {
7809 dev_err(&hdev->pdev->dev, "failed to do loopback test\n");
7810 return -EIO;
7811 }
7812
7813 return 0;
7814 }
7815
hclge_cfg_common_loopback(struct hclge_dev * hdev,bool en,enum hnae3_loop loop_mode)7816 static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
7817 enum hnae3_loop loop_mode)
7818 {
7819 int ret;
7820
7821 ret = hclge_cfg_common_loopback_cmd_send(hdev, en, loop_mode);
7822 if (ret)
7823 return ret;
7824
7825 return hclge_cfg_common_loopback_wait(hdev);
7826 }
7827
hclge_set_common_loopback(struct hclge_dev * hdev,bool en,enum hnae3_loop loop_mode)7828 static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en,
7829 enum hnae3_loop loop_mode)
7830 {
7831 int ret;
7832
7833 ret = hclge_cfg_common_loopback(hdev, en, loop_mode);
7834 if (ret)
7835 return ret;
7836
7837 hclge_cfg_mac_mode(hdev, en);
7838
7839 ret = hclge_mac_phy_link_status_wait(hdev, en, false);
7840 if (ret)
7841 dev_err(&hdev->pdev->dev,
7842 "serdes loopback config mac mode timeout\n");
7843
7844 return ret;
7845 }
7846
hclge_enable_phy_loopback(struct hclge_dev * hdev,struct phy_device * phydev)7847 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
7848 struct phy_device *phydev)
7849 {
7850 int ret;
7851
7852 if (!phydev->suspended) {
7853 ret = phy_suspend(phydev);
7854 if (ret)
7855 return ret;
7856 }
7857
7858 ret = phy_resume(phydev);
7859 if (ret)
7860 return ret;
7861
7862 return phy_loopback(phydev, true);
7863 }
7864
hclge_disable_phy_loopback(struct hclge_dev * hdev,struct phy_device * phydev)7865 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
7866 struct phy_device *phydev)
7867 {
7868 int ret;
7869
7870 ret = phy_loopback(phydev, false);
7871 if (ret)
7872 return ret;
7873
7874 return phy_suspend(phydev);
7875 }
7876
hclge_set_phy_loopback(struct hclge_dev * hdev,bool en)7877 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
7878 {
7879 struct phy_device *phydev = hdev->hw.mac.phydev;
7880 int ret;
7881
7882 if (!phydev) {
7883 if (hnae3_dev_phy_imp_supported(hdev))
7884 return hclge_set_common_loopback(hdev, en,
7885 HNAE3_LOOP_PHY);
7886 return -ENOTSUPP;
7887 }
7888
7889 if (en)
7890 ret = hclge_enable_phy_loopback(hdev, phydev);
7891 else
7892 ret = hclge_disable_phy_loopback(hdev, phydev);
7893 if (ret) {
7894 dev_err(&hdev->pdev->dev,
7895 "set phy loopback fail, ret = %d\n", ret);
7896 return ret;
7897 }
7898
7899 hclge_cfg_mac_mode(hdev, en);
7900
7901 ret = hclge_mac_phy_link_status_wait(hdev, en, true);
7902 if (ret)
7903 dev_err(&hdev->pdev->dev,
7904 "phy loopback config mac mode timeout\n");
7905
7906 return ret;
7907 }
7908
hclge_tqp_enable_cmd_send(struct hclge_dev * hdev,u16 tqp_id,u16 stream_id,bool enable)7909 static int hclge_tqp_enable_cmd_send(struct hclge_dev *hdev, u16 tqp_id,
7910 u16 stream_id, bool enable)
7911 {
7912 struct hclge_desc desc;
7913 struct hclge_cfg_com_tqp_queue_cmd *req =
7914 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
7915
7916 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
7917 req->tqp_id = cpu_to_le16(tqp_id);
7918 req->stream_id = cpu_to_le16(stream_id);
7919 if (enable)
7920 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
7921
7922 return hclge_cmd_send(&hdev->hw, &desc, 1);
7923 }
7924
hclge_tqp_enable(struct hnae3_handle * handle,bool enable)7925 static int hclge_tqp_enable(struct hnae3_handle *handle, bool enable)
7926 {
7927 struct hclge_vport *vport = hclge_get_vport(handle);
7928 struct hclge_dev *hdev = vport->back;
7929 int ret;
7930 u16 i;
7931
7932 for (i = 0; i < handle->kinfo.num_tqps; i++) {
7933 ret = hclge_tqp_enable_cmd_send(hdev, i, 0, enable);
7934 if (ret)
7935 return ret;
7936 }
7937 return 0;
7938 }
7939
hclge_set_loopback(struct hnae3_handle * handle,enum hnae3_loop loop_mode,bool en)7940 static int hclge_set_loopback(struct hnae3_handle *handle,
7941 enum hnae3_loop loop_mode, bool en)
7942 {
7943 struct hclge_vport *vport = hclge_get_vport(handle);
7944 struct hclge_dev *hdev = vport->back;
7945 int ret = 0;
7946
7947 /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
7948 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
7949 * the same, the packets are looped back in the SSU. If SSU loopback
7950 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
7951 */
7952 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
7953 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
7954
7955 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
7956 HCLGE_SWITCH_ALW_LPBK_MASK);
7957 if (ret)
7958 return ret;
7959 }
7960
7961 switch (loop_mode) {
7962 case HNAE3_LOOP_APP:
7963 ret = hclge_set_app_loopback(hdev, en);
7964 break;
7965 case HNAE3_LOOP_SERIAL_SERDES:
7966 case HNAE3_LOOP_PARALLEL_SERDES:
7967 ret = hclge_set_common_loopback(hdev, en, loop_mode);
7968 break;
7969 case HNAE3_LOOP_PHY:
7970 ret = hclge_set_phy_loopback(hdev, en);
7971 break;
7972 case HNAE3_LOOP_EXTERNAL:
7973 break;
7974 default:
7975 ret = -ENOTSUPP;
7976 dev_err(&hdev->pdev->dev,
7977 "loop_mode %d is not supported\n", loop_mode);
7978 break;
7979 }
7980
7981 if (ret)
7982 return ret;
7983
7984 ret = hclge_tqp_enable(handle, en);
7985 if (ret)
7986 dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n",
7987 en ? "enable" : "disable", ret);
7988
7989 return ret;
7990 }
7991
hclge_set_default_loopback(struct hclge_dev * hdev)7992 static int hclge_set_default_loopback(struct hclge_dev *hdev)
7993 {
7994 int ret;
7995
7996 ret = hclge_set_app_loopback(hdev, false);
7997 if (ret)
7998 return ret;
7999
8000 ret = hclge_cfg_common_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
8001 if (ret)
8002 return ret;
8003
8004 return hclge_cfg_common_loopback(hdev, false,
8005 HNAE3_LOOP_PARALLEL_SERDES);
8006 }
8007
hclge_flush_link_update(struct hclge_dev * hdev)8008 static void hclge_flush_link_update(struct hclge_dev *hdev)
8009 {
8010 #define HCLGE_FLUSH_LINK_TIMEOUT 100000
8011
8012 unsigned long last = hdev->serv_processed_cnt;
8013 int i = 0;
8014
8015 while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
8016 i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
8017 last == hdev->serv_processed_cnt)
8018 usleep_range(1, 1);
8019 }
8020
hclge_set_timer_task(struct hnae3_handle * handle,bool enable)8021 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
8022 {
8023 struct hclge_vport *vport = hclge_get_vport(handle);
8024 struct hclge_dev *hdev = vport->back;
8025
8026 if (enable) {
8027 hclge_task_schedule(hdev, 0);
8028 } else {
8029 /* Set the DOWN flag here to disable link updating */
8030 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8031
8032 smp_mb__after_atomic(); /* flush memory to make sure DOWN is seen by service task */
8033 hclge_flush_link_update(hdev);
8034 }
8035 }
8036
hclge_ae_start(struct hnae3_handle * handle)8037 static int hclge_ae_start(struct hnae3_handle *handle)
8038 {
8039 struct hclge_vport *vport = hclge_get_vport(handle);
8040 struct hclge_dev *hdev = vport->back;
8041
8042 /* mac enable */
8043 hclge_cfg_mac_mode(hdev, true);
8044 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
8045 hdev->hw.mac.link = 0;
8046
8047 /* reset tqp stats */
8048 hclge_comm_reset_tqp_stats(handle);
8049
8050 hclge_mac_start_phy(hdev);
8051
8052 return 0;
8053 }
8054
hclge_ae_stop(struct hnae3_handle * handle)8055 static void hclge_ae_stop(struct hnae3_handle *handle)
8056 {
8057 struct hclge_vport *vport = hclge_get_vport(handle);
8058 struct hclge_dev *hdev = vport->back;
8059
8060 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8061 spin_lock_bh(&hdev->fd_rule_lock);
8062 hclge_clear_arfs_rules(hdev);
8063 spin_unlock_bh(&hdev->fd_rule_lock);
8064
8065 /* If it is not PF reset or FLR, the firmware will disable the MAC,
8066 * so it only need to stop phy here.
8067 */
8068 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) {
8069 hclge_pfc_pause_en_cfg(hdev, HCLGE_PFC_TX_RX_DISABLE,
8070 HCLGE_PFC_DISABLE);
8071 if (hdev->reset_type != HNAE3_FUNC_RESET &&
8072 hdev->reset_type != HNAE3_FLR_RESET) {
8073 hclge_mac_stop_phy(hdev);
8074 hclge_update_link_status(hdev);
8075 return;
8076 }
8077 }
8078
8079 hclge_reset_tqp(handle);
8080
8081 hclge_config_mac_tnl_int(hdev, false);
8082
8083 /* Mac disable */
8084 hclge_cfg_mac_mode(hdev, false);
8085
8086 hclge_mac_stop_phy(hdev);
8087
8088 /* reset tqp stats */
8089 hclge_comm_reset_tqp_stats(handle);
8090 hclge_update_link_status(hdev);
8091 }
8092
hclge_vport_start(struct hclge_vport * vport)8093 int hclge_vport_start(struct hclge_vport *vport)
8094 {
8095 struct hclge_dev *hdev = vport->back;
8096
8097 set_bit(HCLGE_VPORT_STATE_INITED, &vport->state);
8098 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8099 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
8100 vport->last_active_jiffies = jiffies;
8101 vport->need_notify = 0;
8102
8103 if (test_bit(vport->vport_id, hdev->vport_config_block)) {
8104 if (vport->vport_id) {
8105 hclge_restore_mac_table_common(vport);
8106 hclge_restore_vport_vlan_table(vport);
8107 } else {
8108 hclge_restore_hw_table(hdev);
8109 }
8110 }
8111
8112 clear_bit(vport->vport_id, hdev->vport_config_block);
8113
8114 return 0;
8115 }
8116
hclge_vport_stop(struct hclge_vport * vport)8117 void hclge_vport_stop(struct hclge_vport *vport)
8118 {
8119 clear_bit(HCLGE_VPORT_STATE_INITED, &vport->state);
8120 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8121 vport->need_notify = 0;
8122 }
8123
hclge_client_start(struct hnae3_handle * handle)8124 static int hclge_client_start(struct hnae3_handle *handle)
8125 {
8126 struct hclge_vport *vport = hclge_get_vport(handle);
8127
8128 return hclge_vport_start(vport);
8129 }
8130
hclge_client_stop(struct hnae3_handle * handle)8131 static void hclge_client_stop(struct hnae3_handle *handle)
8132 {
8133 struct hclge_vport *vport = hclge_get_vport(handle);
8134
8135 hclge_vport_stop(vport);
8136 }
8137
hclge_get_mac_vlan_cmd_status(struct hclge_vport * vport,u16 cmdq_resp,u8 resp_code,enum hclge_mac_vlan_tbl_opcode op)8138 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
8139 u16 cmdq_resp, u8 resp_code,
8140 enum hclge_mac_vlan_tbl_opcode op)
8141 {
8142 struct hclge_dev *hdev = vport->back;
8143
8144 if (cmdq_resp) {
8145 dev_err(&hdev->pdev->dev,
8146 "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
8147 cmdq_resp);
8148 return -EIO;
8149 }
8150
8151 if (op == HCLGE_MAC_VLAN_ADD) {
8152 if (!resp_code || resp_code == 1)
8153 return 0;
8154 else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
8155 resp_code == HCLGE_ADD_MC_OVERFLOW)
8156 return -ENOSPC;
8157
8158 dev_err(&hdev->pdev->dev,
8159 "add mac addr failed for undefined, code=%u.\n",
8160 resp_code);
8161 return -EIO;
8162 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
8163 if (!resp_code) {
8164 return 0;
8165 } else if (resp_code == 1) {
8166 dev_dbg(&hdev->pdev->dev,
8167 "remove mac addr failed for miss.\n");
8168 return -ENOENT;
8169 }
8170
8171 dev_err(&hdev->pdev->dev,
8172 "remove mac addr failed for undefined, code=%u.\n",
8173 resp_code);
8174 return -EIO;
8175 } else if (op == HCLGE_MAC_VLAN_LKUP) {
8176 if (!resp_code) {
8177 return 0;
8178 } else if (resp_code == 1) {
8179 dev_dbg(&hdev->pdev->dev,
8180 "lookup mac addr failed for miss.\n");
8181 return -ENOENT;
8182 }
8183
8184 dev_err(&hdev->pdev->dev,
8185 "lookup mac addr failed for undefined, code=%u.\n",
8186 resp_code);
8187 return -EIO;
8188 }
8189
8190 dev_err(&hdev->pdev->dev,
8191 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
8192
8193 return -EINVAL;
8194 }
8195
hclge_update_desc_vfid(struct hclge_desc * desc,int vfid,bool clr)8196 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
8197 {
8198 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
8199
8200 unsigned int word_num;
8201 unsigned int bit_num;
8202
8203 if (vfid > 255 || vfid < 0)
8204 return -EIO;
8205
8206 if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
8207 word_num = vfid / 32;
8208 bit_num = vfid % 32;
8209 if (clr)
8210 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8211 else
8212 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
8213 } else {
8214 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
8215 bit_num = vfid % 32;
8216 if (clr)
8217 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8218 else
8219 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
8220 }
8221
8222 return 0;
8223 }
8224
hclge_is_all_function_id_zero(struct hclge_desc * desc)8225 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
8226 {
8227 #define HCLGE_DESC_NUMBER 3
8228 #define HCLGE_FUNC_NUMBER_PER_DESC 6
8229 int i, j;
8230
8231 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
8232 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
8233 if (desc[i].data[j])
8234 return false;
8235
8236 return true;
8237 }
8238
hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd * new_req,const u8 * addr,bool is_mc)8239 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
8240 const u8 *addr, bool is_mc)
8241 {
8242 const unsigned char *mac_addr = addr;
8243 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
8244 (mac_addr[0]) | (mac_addr[1] << 8);
8245 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
8246
8247 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8248 if (is_mc) {
8249 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
8250 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8251 }
8252
8253 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
8254 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
8255 }
8256
hclge_remove_mac_vlan_tbl(struct hclge_vport * vport,struct hclge_mac_vlan_tbl_entry_cmd * req)8257 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
8258 struct hclge_mac_vlan_tbl_entry_cmd *req)
8259 {
8260 struct hclge_dev *hdev = vport->back;
8261 struct hclge_desc desc;
8262 u8 resp_code;
8263 u16 retval;
8264 int ret;
8265
8266 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
8267
8268 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8269
8270 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8271 if (ret) {
8272 dev_err(&hdev->pdev->dev,
8273 "del mac addr failed for cmd_send, ret =%d.\n",
8274 ret);
8275 return ret;
8276 }
8277 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8278 retval = le16_to_cpu(desc.retval);
8279
8280 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8281 HCLGE_MAC_VLAN_REMOVE);
8282 }
8283
hclge_lookup_mac_vlan_tbl(struct hclge_vport * vport,struct hclge_mac_vlan_tbl_entry_cmd * req,struct hclge_desc * desc,bool is_mc)8284 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
8285 struct hclge_mac_vlan_tbl_entry_cmd *req,
8286 struct hclge_desc *desc,
8287 bool is_mc)
8288 {
8289 struct hclge_dev *hdev = vport->back;
8290 u8 resp_code;
8291 u16 retval;
8292 int ret;
8293
8294 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
8295 if (is_mc) {
8296 desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
8297 memcpy(desc[0].data,
8298 req,
8299 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8300 hclge_cmd_setup_basic_desc(&desc[1],
8301 HCLGE_OPC_MAC_VLAN_ADD,
8302 true);
8303 desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
8304 hclge_cmd_setup_basic_desc(&desc[2],
8305 HCLGE_OPC_MAC_VLAN_ADD,
8306 true);
8307 ret = hclge_cmd_send(&hdev->hw, desc, 3);
8308 } else {
8309 memcpy(desc[0].data,
8310 req,
8311 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8312 ret = hclge_cmd_send(&hdev->hw, desc, 1);
8313 }
8314 if (ret) {
8315 dev_err(&hdev->pdev->dev,
8316 "lookup mac addr failed for cmd_send, ret =%d.\n",
8317 ret);
8318 return ret;
8319 }
8320 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
8321 retval = le16_to_cpu(desc[0].retval);
8322
8323 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8324 HCLGE_MAC_VLAN_LKUP);
8325 }
8326
hclge_add_mac_vlan_tbl(struct hclge_vport * vport,struct hclge_mac_vlan_tbl_entry_cmd * req,struct hclge_desc * mc_desc)8327 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
8328 struct hclge_mac_vlan_tbl_entry_cmd *req,
8329 struct hclge_desc *mc_desc)
8330 {
8331 struct hclge_dev *hdev = vport->back;
8332 int cfg_status;
8333 u8 resp_code;
8334 u16 retval;
8335 int ret;
8336
8337 if (!mc_desc) {
8338 struct hclge_desc desc;
8339
8340 hclge_cmd_setup_basic_desc(&desc,
8341 HCLGE_OPC_MAC_VLAN_ADD,
8342 false);
8343 memcpy(desc.data, req,
8344 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8345 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8346 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8347 retval = le16_to_cpu(desc.retval);
8348
8349 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8350 resp_code,
8351 HCLGE_MAC_VLAN_ADD);
8352 } else {
8353 hclge_comm_cmd_reuse_desc(&mc_desc[0], false);
8354 mc_desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
8355 hclge_comm_cmd_reuse_desc(&mc_desc[1], false);
8356 mc_desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
8357 hclge_comm_cmd_reuse_desc(&mc_desc[2], false);
8358 mc_desc[2].flag &= cpu_to_le16(~HCLGE_COMM_CMD_FLAG_NEXT);
8359 memcpy(mc_desc[0].data, req,
8360 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8361 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
8362 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
8363 retval = le16_to_cpu(mc_desc[0].retval);
8364
8365 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8366 resp_code,
8367 HCLGE_MAC_VLAN_ADD);
8368 }
8369
8370 if (ret) {
8371 dev_err(&hdev->pdev->dev,
8372 "add mac addr failed for cmd_send, ret =%d.\n",
8373 ret);
8374 return ret;
8375 }
8376
8377 return cfg_status;
8378 }
8379
hclge_set_umv_space(struct hclge_dev * hdev,u16 space_size,u16 * allocated_size)8380 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
8381 u16 *allocated_size)
8382 {
8383 struct hclge_umv_spc_alc_cmd *req;
8384 struct hclge_desc desc;
8385 int ret;
8386
8387 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
8388 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
8389
8390 req->space_size = cpu_to_le32(space_size);
8391
8392 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8393 if (ret) {
8394 dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
8395 ret);
8396 return ret;
8397 }
8398
8399 *allocated_size = le32_to_cpu(desc.data[1]);
8400
8401 return 0;
8402 }
8403
hclge_init_umv_space(struct hclge_dev * hdev)8404 static int hclge_init_umv_space(struct hclge_dev *hdev)
8405 {
8406 u16 allocated_size = 0;
8407 int ret;
8408
8409 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
8410 if (ret)
8411 return ret;
8412
8413 if (allocated_size < hdev->wanted_umv_size)
8414 dev_warn(&hdev->pdev->dev,
8415 "failed to alloc umv space, want %u, get %u\n",
8416 hdev->wanted_umv_size, allocated_size);
8417
8418 hdev->max_umv_size = allocated_size;
8419 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
8420 hdev->share_umv_size = hdev->priv_umv_size +
8421 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8422
8423 if (hdev->ae_dev->dev_specs.mc_mac_size)
8424 set_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, hdev->ae_dev->caps);
8425
8426 return 0;
8427 }
8428
hclge_reset_umv_space(struct hclge_dev * hdev)8429 static void hclge_reset_umv_space(struct hclge_dev *hdev)
8430 {
8431 struct hclge_vport *vport;
8432 int i;
8433
8434 for (i = 0; i < hdev->num_alloc_vport; i++) {
8435 vport = &hdev->vport[i];
8436 vport->used_umv_num = 0;
8437 }
8438
8439 mutex_lock(&hdev->vport_lock);
8440 hdev->share_umv_size = hdev->priv_umv_size +
8441 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8442 mutex_unlock(&hdev->vport_lock);
8443
8444 hdev->used_mc_mac_num = 0;
8445 }
8446
hclge_is_umv_space_full(struct hclge_vport * vport,bool need_lock)8447 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
8448 {
8449 struct hclge_dev *hdev = vport->back;
8450 bool is_full;
8451
8452 if (need_lock)
8453 mutex_lock(&hdev->vport_lock);
8454
8455 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
8456 hdev->share_umv_size == 0);
8457
8458 if (need_lock)
8459 mutex_unlock(&hdev->vport_lock);
8460
8461 return is_full;
8462 }
8463
hclge_update_umv_space(struct hclge_vport * vport,bool is_free)8464 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
8465 {
8466 struct hclge_dev *hdev = vport->back;
8467
8468 if (is_free) {
8469 if (vport->used_umv_num > hdev->priv_umv_size)
8470 hdev->share_umv_size++;
8471
8472 if (vport->used_umv_num > 0)
8473 vport->used_umv_num--;
8474 } else {
8475 if (vport->used_umv_num >= hdev->priv_umv_size &&
8476 hdev->share_umv_size > 0)
8477 hdev->share_umv_size--;
8478 vport->used_umv_num++;
8479 }
8480 }
8481
hclge_find_mac_node(struct list_head * list,const u8 * mac_addr)8482 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
8483 const u8 *mac_addr)
8484 {
8485 struct hclge_mac_node *mac_node, *tmp;
8486
8487 list_for_each_entry_safe(mac_node, tmp, list, node)
8488 if (ether_addr_equal(mac_addr, mac_node->mac_addr))
8489 return mac_node;
8490
8491 return NULL;
8492 }
8493
hclge_update_mac_node(struct hclge_mac_node * mac_node,enum HCLGE_MAC_NODE_STATE state)8494 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
8495 enum HCLGE_MAC_NODE_STATE state)
8496 {
8497 switch (state) {
8498 /* from set_rx_mode or tmp_add_list */
8499 case HCLGE_MAC_TO_ADD:
8500 if (mac_node->state == HCLGE_MAC_TO_DEL)
8501 mac_node->state = HCLGE_MAC_ACTIVE;
8502 break;
8503 /* only from set_rx_mode */
8504 case HCLGE_MAC_TO_DEL:
8505 if (mac_node->state == HCLGE_MAC_TO_ADD) {
8506 list_del(&mac_node->node);
8507 kfree(mac_node);
8508 } else {
8509 mac_node->state = HCLGE_MAC_TO_DEL;
8510 }
8511 break;
8512 /* only from tmp_add_list, the mac_node->state won't be
8513 * ACTIVE.
8514 */
8515 case HCLGE_MAC_ACTIVE:
8516 if (mac_node->state == HCLGE_MAC_TO_ADD)
8517 mac_node->state = HCLGE_MAC_ACTIVE;
8518
8519 break;
8520 }
8521 }
8522
hclge_update_mac_list(struct hclge_vport * vport,enum HCLGE_MAC_NODE_STATE state,enum HCLGE_MAC_ADDR_TYPE mac_type,const unsigned char * addr)8523 int hclge_update_mac_list(struct hclge_vport *vport,
8524 enum HCLGE_MAC_NODE_STATE state,
8525 enum HCLGE_MAC_ADDR_TYPE mac_type,
8526 const unsigned char *addr)
8527 {
8528 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
8529 struct hclge_dev *hdev = vport->back;
8530 struct hclge_mac_node *mac_node;
8531 struct list_head *list;
8532
8533 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8534 &vport->uc_mac_list : &vport->mc_mac_list;
8535
8536 spin_lock_bh(&vport->mac_list_lock);
8537
8538 /* if the mac addr is already in the mac list, no need to add a new
8539 * one into it, just check the mac addr state, convert it to a new
8540 * state, or just remove it, or do nothing.
8541 */
8542 mac_node = hclge_find_mac_node(list, addr);
8543 if (mac_node) {
8544 hclge_update_mac_node(mac_node, state);
8545 spin_unlock_bh(&vport->mac_list_lock);
8546 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8547 return 0;
8548 }
8549
8550 /* if this address is never added, unnecessary to delete */
8551 if (state == HCLGE_MAC_TO_DEL) {
8552 spin_unlock_bh(&vport->mac_list_lock);
8553 hnae3_format_mac_addr(format_mac_addr, addr);
8554 dev_err(&hdev->pdev->dev,
8555 "failed to delete address %s from mac list\n",
8556 format_mac_addr);
8557 return -ENOENT;
8558 }
8559
8560 mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
8561 if (!mac_node) {
8562 spin_unlock_bh(&vport->mac_list_lock);
8563 return -ENOMEM;
8564 }
8565
8566 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8567
8568 mac_node->state = state;
8569 ether_addr_copy(mac_node->mac_addr, addr);
8570 list_add_tail(&mac_node->node, list);
8571
8572 spin_unlock_bh(&vport->mac_list_lock);
8573
8574 return 0;
8575 }
8576
hclge_add_uc_addr(struct hnae3_handle * handle,const unsigned char * addr)8577 static int hclge_add_uc_addr(struct hnae3_handle *handle,
8578 const unsigned char *addr)
8579 {
8580 struct hclge_vport *vport = hclge_get_vport(handle);
8581
8582 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
8583 addr);
8584 }
8585
hclge_add_uc_addr_common(struct hclge_vport * vport,const unsigned char * addr)8586 int hclge_add_uc_addr_common(struct hclge_vport *vport,
8587 const unsigned char *addr)
8588 {
8589 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
8590 struct hclge_dev *hdev = vport->back;
8591 struct hclge_mac_vlan_tbl_entry_cmd req;
8592 struct hclge_desc desc;
8593 u16 egress_port = 0;
8594 int ret;
8595
8596 /* mac addr check */
8597 if (is_zero_ether_addr(addr) ||
8598 is_broadcast_ether_addr(addr) ||
8599 is_multicast_ether_addr(addr)) {
8600 hnae3_format_mac_addr(format_mac_addr, addr);
8601 dev_err(&hdev->pdev->dev,
8602 "Set_uc mac err! invalid mac:%s. is_zero:%d,is_br=%d,is_mul=%d\n",
8603 format_mac_addr, is_zero_ether_addr(addr),
8604 is_broadcast_ether_addr(addr),
8605 is_multicast_ether_addr(addr));
8606 return -EINVAL;
8607 }
8608
8609 memset(&req, 0, sizeof(req));
8610
8611 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8612 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8613
8614 req.egress_port = cpu_to_le16(egress_port);
8615
8616 hclge_prepare_mac_addr(&req, addr, false);
8617
8618 /* Lookup the mac address in the mac_vlan table, and add
8619 * it if the entry is inexistent. Repeated unicast entry
8620 * is not allowed in the mac vlan table.
8621 */
8622 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
8623 if (ret == -ENOENT) {
8624 mutex_lock(&hdev->vport_lock);
8625 if (!hclge_is_umv_space_full(vport, false)) {
8626 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
8627 if (!ret)
8628 hclge_update_umv_space(vport, false);
8629 mutex_unlock(&hdev->vport_lock);
8630 return ret;
8631 }
8632 mutex_unlock(&hdev->vport_lock);
8633
8634 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
8635 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
8636 hdev->priv_umv_size);
8637
8638 return -ENOSPC;
8639 }
8640
8641 /* check if we just hit the duplicate */
8642 if (!ret)
8643 return -EEXIST;
8644
8645 return ret;
8646 }
8647
hclge_rm_uc_addr(struct hnae3_handle * handle,const unsigned char * addr)8648 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
8649 const unsigned char *addr)
8650 {
8651 struct hclge_vport *vport = hclge_get_vport(handle);
8652
8653 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
8654 addr);
8655 }
8656
hclge_rm_uc_addr_common(struct hclge_vport * vport,const unsigned char * addr)8657 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
8658 const unsigned char *addr)
8659 {
8660 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
8661 struct hclge_dev *hdev = vport->back;
8662 struct hclge_mac_vlan_tbl_entry_cmd req;
8663 int ret;
8664
8665 /* mac addr check */
8666 if (is_zero_ether_addr(addr) ||
8667 is_broadcast_ether_addr(addr) ||
8668 is_multicast_ether_addr(addr)) {
8669 hnae3_format_mac_addr(format_mac_addr, addr);
8670 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%s.\n",
8671 format_mac_addr);
8672 return -EINVAL;
8673 }
8674
8675 memset(&req, 0, sizeof(req));
8676 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
8677 hclge_prepare_mac_addr(&req, addr, false);
8678 ret = hclge_remove_mac_vlan_tbl(vport, &req);
8679 if (!ret || ret == -ENOENT) {
8680 mutex_lock(&hdev->vport_lock);
8681 hclge_update_umv_space(vport, true);
8682 mutex_unlock(&hdev->vport_lock);
8683 return 0;
8684 }
8685
8686 return ret;
8687 }
8688
hclge_add_mc_addr(struct hnae3_handle * handle,const unsigned char * addr)8689 static int hclge_add_mc_addr(struct hnae3_handle *handle,
8690 const unsigned char *addr)
8691 {
8692 struct hclge_vport *vport = hclge_get_vport(handle);
8693
8694 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
8695 addr);
8696 }
8697
hclge_add_mc_addr_common(struct hclge_vport * vport,const unsigned char * addr)8698 int hclge_add_mc_addr_common(struct hclge_vport *vport,
8699 const unsigned char *addr)
8700 {
8701 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
8702 struct hclge_dev *hdev = vport->back;
8703 struct hclge_mac_vlan_tbl_entry_cmd req;
8704 struct hclge_desc desc[3];
8705 bool is_new_addr = false;
8706 int status;
8707
8708 /* mac addr check */
8709 if (!is_multicast_ether_addr(addr)) {
8710 hnae3_format_mac_addr(format_mac_addr, addr);
8711 dev_err(&hdev->pdev->dev,
8712 "Add mc mac err! invalid mac:%s.\n",
8713 format_mac_addr);
8714 return -EINVAL;
8715 }
8716 memset(&req, 0, sizeof(req));
8717 hclge_prepare_mac_addr(&req, addr, true);
8718 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8719 if (status) {
8720 if (hnae3_ae_dev_mc_mac_mng_supported(hdev->ae_dev) &&
8721 hdev->used_mc_mac_num >=
8722 hdev->ae_dev->dev_specs.mc_mac_size)
8723 goto err_no_space;
8724
8725 is_new_addr = true;
8726
8727 /* This mac addr do not exist, add new entry for it */
8728 memset(desc[0].data, 0, sizeof(desc[0].data));
8729 memset(desc[1].data, 0, sizeof(desc[0].data));
8730 memset(desc[2].data, 0, sizeof(desc[0].data));
8731 }
8732 status = hclge_update_desc_vfid(desc, vport->vport_id, false);
8733 if (status)
8734 return status;
8735 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8736 if (status == -ENOSPC)
8737 goto err_no_space;
8738 else if (!status && is_new_addr)
8739 hdev->used_mc_mac_num++;
8740
8741 return status;
8742
8743 err_no_space:
8744 /* if already overflow, not to print each time */
8745 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE)) {
8746 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
8747 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
8748 }
8749
8750 return -ENOSPC;
8751 }
8752
hclge_rm_mc_addr(struct hnae3_handle * handle,const unsigned char * addr)8753 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
8754 const unsigned char *addr)
8755 {
8756 struct hclge_vport *vport = hclge_get_vport(handle);
8757
8758 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
8759 addr);
8760 }
8761
hclge_rm_mc_addr_common(struct hclge_vport * vport,const unsigned char * addr)8762 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
8763 const unsigned char *addr)
8764 {
8765 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
8766 struct hclge_dev *hdev = vport->back;
8767 struct hclge_mac_vlan_tbl_entry_cmd req;
8768 enum hclge_comm_cmd_status status;
8769 struct hclge_desc desc[3];
8770
8771 /* mac addr check */
8772 if (!is_multicast_ether_addr(addr)) {
8773 hnae3_format_mac_addr(format_mac_addr, addr);
8774 dev_dbg(&hdev->pdev->dev,
8775 "Remove mc mac err! invalid mac:%s.\n",
8776 format_mac_addr);
8777 return -EINVAL;
8778 }
8779
8780 memset(&req, 0, sizeof(req));
8781 hclge_prepare_mac_addr(&req, addr, true);
8782 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8783 if (!status) {
8784 /* This mac addr exist, remove this handle's VFID for it */
8785 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
8786 if (status)
8787 return status;
8788
8789 if (hclge_is_all_function_id_zero(desc)) {
8790 /* All the vfid is zero, so need to delete this entry */
8791 status = hclge_remove_mac_vlan_tbl(vport, &req);
8792 if (!status)
8793 hdev->used_mc_mac_num--;
8794 } else {
8795 /* Not all the vfid is zero, update the vfid */
8796 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8797 }
8798 } else if (status == -ENOENT) {
8799 status = 0;
8800 }
8801
8802 return status;
8803 }
8804
hclge_sync_vport_mac_list(struct hclge_vport * vport,struct list_head * list,enum HCLGE_MAC_ADDR_TYPE mac_type)8805 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
8806 struct list_head *list,
8807 enum HCLGE_MAC_ADDR_TYPE mac_type)
8808 {
8809 int (*sync)(struct hclge_vport *vport, const unsigned char *addr);
8810 struct hclge_mac_node *mac_node, *tmp;
8811 int ret;
8812
8813 if (mac_type == HCLGE_MAC_ADDR_UC)
8814 sync = hclge_add_uc_addr_common;
8815 else
8816 sync = hclge_add_mc_addr_common;
8817
8818 list_for_each_entry_safe(mac_node, tmp, list, node) {
8819 ret = sync(vport, mac_node->mac_addr);
8820 if (!ret) {
8821 mac_node->state = HCLGE_MAC_ACTIVE;
8822 } else {
8823 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8824 &vport->state);
8825
8826 /* If one unicast mac address is existing in hardware,
8827 * we need to try whether other unicast mac addresses
8828 * are new addresses that can be added.
8829 * Multicast mac address can be reusable, even though
8830 * there is no space to add new multicast mac address,
8831 * we should check whether other mac addresses are
8832 * existing in hardware for reuse.
8833 */
8834 if ((mac_type == HCLGE_MAC_ADDR_UC && ret != -EEXIST) ||
8835 (mac_type == HCLGE_MAC_ADDR_MC && ret != -ENOSPC))
8836 break;
8837 }
8838 }
8839 }
8840
hclge_unsync_vport_mac_list(struct hclge_vport * vport,struct list_head * list,enum HCLGE_MAC_ADDR_TYPE mac_type)8841 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
8842 struct list_head *list,
8843 enum HCLGE_MAC_ADDR_TYPE mac_type)
8844 {
8845 int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
8846 struct hclge_mac_node *mac_node, *tmp;
8847 int ret;
8848
8849 if (mac_type == HCLGE_MAC_ADDR_UC)
8850 unsync = hclge_rm_uc_addr_common;
8851 else
8852 unsync = hclge_rm_mc_addr_common;
8853
8854 list_for_each_entry_safe(mac_node, tmp, list, node) {
8855 ret = unsync(vport, mac_node->mac_addr);
8856 if (!ret || ret == -ENOENT) {
8857 list_del(&mac_node->node);
8858 kfree(mac_node);
8859 } else {
8860 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8861 &vport->state);
8862 break;
8863 }
8864 }
8865 }
8866
hclge_sync_from_add_list(struct list_head * add_list,struct list_head * mac_list)8867 static bool hclge_sync_from_add_list(struct list_head *add_list,
8868 struct list_head *mac_list)
8869 {
8870 struct hclge_mac_node *mac_node, *tmp, *new_node;
8871 bool all_added = true;
8872
8873 list_for_each_entry_safe(mac_node, tmp, add_list, node) {
8874 if (mac_node->state == HCLGE_MAC_TO_ADD)
8875 all_added = false;
8876
8877 /* if the mac address from tmp_add_list is not in the
8878 * uc/mc_mac_list, it means have received a TO_DEL request
8879 * during the time window of adding the mac address into mac
8880 * table. if mac_node state is ACTIVE, then change it to TO_DEL,
8881 * then it will be removed at next time. else it must be TO_ADD,
8882 * this address hasn't been added into mac table,
8883 * so just remove the mac node.
8884 */
8885 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8886 if (new_node) {
8887 hclge_update_mac_node(new_node, mac_node->state);
8888 list_del(&mac_node->node);
8889 kfree(mac_node);
8890 } else if (mac_node->state == HCLGE_MAC_ACTIVE) {
8891 mac_node->state = HCLGE_MAC_TO_DEL;
8892 list_move_tail(&mac_node->node, mac_list);
8893 } else {
8894 list_del(&mac_node->node);
8895 kfree(mac_node);
8896 }
8897 }
8898
8899 return all_added;
8900 }
8901
hclge_sync_from_del_list(struct list_head * del_list,struct list_head * mac_list)8902 static void hclge_sync_from_del_list(struct list_head *del_list,
8903 struct list_head *mac_list)
8904 {
8905 struct hclge_mac_node *mac_node, *tmp, *new_node;
8906
8907 list_for_each_entry_safe(mac_node, tmp, del_list, node) {
8908 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8909 if (new_node) {
8910 /* If the mac addr exists in the mac list, it means
8911 * received a new TO_ADD request during the time window
8912 * of configuring the mac address. For the mac node
8913 * state is TO_ADD, and the address is already in the
8914 * in the hardware(due to delete fail), so we just need
8915 * to change the mac node state to ACTIVE.
8916 */
8917 new_node->state = HCLGE_MAC_ACTIVE;
8918 list_del(&mac_node->node);
8919 kfree(mac_node);
8920 } else {
8921 list_move_tail(&mac_node->node, mac_list);
8922 }
8923 }
8924 }
8925
hclge_update_overflow_flags(struct hclge_vport * vport,enum HCLGE_MAC_ADDR_TYPE mac_type,bool is_all_added)8926 static void hclge_update_overflow_flags(struct hclge_vport *vport,
8927 enum HCLGE_MAC_ADDR_TYPE mac_type,
8928 bool is_all_added)
8929 {
8930 if (mac_type == HCLGE_MAC_ADDR_UC) {
8931 if (is_all_added)
8932 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
8933 else if (hclge_is_umv_space_full(vport, true))
8934 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
8935 } else {
8936 if (is_all_added)
8937 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
8938 else
8939 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
8940 }
8941 }
8942
hclge_sync_vport_mac_table(struct hclge_vport * vport,enum HCLGE_MAC_ADDR_TYPE mac_type)8943 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
8944 enum HCLGE_MAC_ADDR_TYPE mac_type)
8945 {
8946 struct hclge_mac_node *mac_node, *tmp, *new_node;
8947 struct list_head tmp_add_list, tmp_del_list;
8948 struct list_head *list;
8949 bool all_added;
8950
8951 INIT_LIST_HEAD(&tmp_add_list);
8952 INIT_LIST_HEAD(&tmp_del_list);
8953
8954 /* move the mac addr to the tmp_add_list and tmp_del_list, then
8955 * we can add/delete these mac addr outside the spin lock
8956 */
8957 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8958 &vport->uc_mac_list : &vport->mc_mac_list;
8959
8960 spin_lock_bh(&vport->mac_list_lock);
8961
8962 list_for_each_entry_safe(mac_node, tmp, list, node) {
8963 switch (mac_node->state) {
8964 case HCLGE_MAC_TO_DEL:
8965 list_move_tail(&mac_node->node, &tmp_del_list);
8966 break;
8967 case HCLGE_MAC_TO_ADD:
8968 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8969 if (!new_node)
8970 goto stop_traverse;
8971 ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
8972 new_node->state = mac_node->state;
8973 list_add_tail(&new_node->node, &tmp_add_list);
8974 break;
8975 default:
8976 break;
8977 }
8978 }
8979
8980 stop_traverse:
8981 spin_unlock_bh(&vport->mac_list_lock);
8982
8983 /* delete first, in order to get max mac table space for adding */
8984 hclge_unsync_vport_mac_list(vport, &tmp_del_list, mac_type);
8985 hclge_sync_vport_mac_list(vport, &tmp_add_list, mac_type);
8986
8987 /* if some mac addresses were added/deleted fail, move back to the
8988 * mac_list, and retry at next time.
8989 */
8990 spin_lock_bh(&vport->mac_list_lock);
8991
8992 hclge_sync_from_del_list(&tmp_del_list, list);
8993 all_added = hclge_sync_from_add_list(&tmp_add_list, list);
8994
8995 spin_unlock_bh(&vport->mac_list_lock);
8996
8997 hclge_update_overflow_flags(vport, mac_type, all_added);
8998 }
8999
hclge_need_sync_mac_table(struct hclge_vport * vport)9000 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
9001 {
9002 struct hclge_dev *hdev = vport->back;
9003
9004 if (test_bit(vport->vport_id, hdev->vport_config_block))
9005 return false;
9006
9007 if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
9008 return true;
9009
9010 return false;
9011 }
9012
hclge_sync_mac_table(struct hclge_dev * hdev)9013 static void hclge_sync_mac_table(struct hclge_dev *hdev)
9014 {
9015 int i;
9016
9017 for (i = 0; i < hdev->num_alloc_vport; i++) {
9018 struct hclge_vport *vport = &hdev->vport[i];
9019
9020 if (!hclge_need_sync_mac_table(vport))
9021 continue;
9022
9023 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
9024 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
9025 }
9026 }
9027
hclge_build_del_list(struct list_head * list,bool is_del_list,struct list_head * tmp_del_list)9028 static void hclge_build_del_list(struct list_head *list,
9029 bool is_del_list,
9030 struct list_head *tmp_del_list)
9031 {
9032 struct hclge_mac_node *mac_cfg, *tmp;
9033
9034 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
9035 switch (mac_cfg->state) {
9036 case HCLGE_MAC_TO_DEL:
9037 case HCLGE_MAC_ACTIVE:
9038 list_move_tail(&mac_cfg->node, tmp_del_list);
9039 break;
9040 case HCLGE_MAC_TO_ADD:
9041 if (is_del_list) {
9042 list_del(&mac_cfg->node);
9043 kfree(mac_cfg);
9044 }
9045 break;
9046 }
9047 }
9048 }
9049
hclge_unsync_del_list(struct hclge_vport * vport,int (* unsync)(struct hclge_vport * vport,const unsigned char * addr),bool is_del_list,struct list_head * tmp_del_list)9050 static void hclge_unsync_del_list(struct hclge_vport *vport,
9051 int (*unsync)(struct hclge_vport *vport,
9052 const unsigned char *addr),
9053 bool is_del_list,
9054 struct list_head *tmp_del_list)
9055 {
9056 struct hclge_mac_node *mac_cfg, *tmp;
9057 int ret;
9058
9059 list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) {
9060 ret = unsync(vport, mac_cfg->mac_addr);
9061 if (!ret || ret == -ENOENT) {
9062 /* clear all mac addr from hardware, but remain these
9063 * mac addr in the mac list, and restore them after
9064 * vf reset finished.
9065 */
9066 if (!is_del_list &&
9067 mac_cfg->state == HCLGE_MAC_ACTIVE) {
9068 mac_cfg->state = HCLGE_MAC_TO_ADD;
9069 } else {
9070 list_del(&mac_cfg->node);
9071 kfree(mac_cfg);
9072 }
9073 } else if (is_del_list) {
9074 mac_cfg->state = HCLGE_MAC_TO_DEL;
9075 }
9076 }
9077 }
9078
hclge_rm_vport_all_mac_table(struct hclge_vport * vport,bool is_del_list,enum HCLGE_MAC_ADDR_TYPE mac_type)9079 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
9080 enum HCLGE_MAC_ADDR_TYPE mac_type)
9081 {
9082 int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
9083 struct hclge_dev *hdev = vport->back;
9084 struct list_head tmp_del_list, *list;
9085
9086 if (mac_type == HCLGE_MAC_ADDR_UC) {
9087 list = &vport->uc_mac_list;
9088 unsync = hclge_rm_uc_addr_common;
9089 } else {
9090 list = &vport->mc_mac_list;
9091 unsync = hclge_rm_mc_addr_common;
9092 }
9093
9094 INIT_LIST_HEAD(&tmp_del_list);
9095
9096 if (!is_del_list)
9097 set_bit(vport->vport_id, hdev->vport_config_block);
9098
9099 spin_lock_bh(&vport->mac_list_lock);
9100
9101 hclge_build_del_list(list, is_del_list, &tmp_del_list);
9102
9103 spin_unlock_bh(&vport->mac_list_lock);
9104
9105 hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list);
9106
9107 spin_lock_bh(&vport->mac_list_lock);
9108
9109 hclge_sync_from_del_list(&tmp_del_list, list);
9110
9111 spin_unlock_bh(&vport->mac_list_lock);
9112 }
9113
9114 /* remove all mac address when uninitailize */
hclge_uninit_vport_mac_list(struct hclge_vport * vport,enum HCLGE_MAC_ADDR_TYPE mac_type)9115 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
9116 enum HCLGE_MAC_ADDR_TYPE mac_type)
9117 {
9118 struct hclge_mac_node *mac_node, *tmp;
9119 struct hclge_dev *hdev = vport->back;
9120 struct list_head tmp_del_list, *list;
9121
9122 INIT_LIST_HEAD(&tmp_del_list);
9123
9124 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
9125 &vport->uc_mac_list : &vport->mc_mac_list;
9126
9127 spin_lock_bh(&vport->mac_list_lock);
9128
9129 list_for_each_entry_safe(mac_node, tmp, list, node) {
9130 switch (mac_node->state) {
9131 case HCLGE_MAC_TO_DEL:
9132 case HCLGE_MAC_ACTIVE:
9133 list_move_tail(&mac_node->node, &tmp_del_list);
9134 break;
9135 case HCLGE_MAC_TO_ADD:
9136 list_del(&mac_node->node);
9137 kfree(mac_node);
9138 break;
9139 }
9140 }
9141
9142 spin_unlock_bh(&vport->mac_list_lock);
9143
9144 hclge_unsync_vport_mac_list(vport, &tmp_del_list, mac_type);
9145
9146 if (!list_empty(&tmp_del_list))
9147 dev_warn(&hdev->pdev->dev,
9148 "uninit %s mac list for vport %u not completely.\n",
9149 mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
9150 vport->vport_id);
9151
9152 list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
9153 list_del(&mac_node->node);
9154 kfree(mac_node);
9155 }
9156 }
9157
hclge_uninit_mac_table(struct hclge_dev * hdev)9158 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
9159 {
9160 struct hclge_vport *vport;
9161 int i;
9162
9163 for (i = 0; i < hdev->num_alloc_vport; i++) {
9164 vport = &hdev->vport[i];
9165 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
9166 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
9167 }
9168 }
9169
hclge_get_mac_ethertype_cmd_status(struct hclge_dev * hdev,u16 cmdq_resp,u8 resp_code)9170 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
9171 u16 cmdq_resp, u8 resp_code)
9172 {
9173 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
9174 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
9175 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
9176 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
9177
9178 int return_status;
9179
9180 if (cmdq_resp) {
9181 dev_err(&hdev->pdev->dev,
9182 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
9183 cmdq_resp);
9184 return -EIO;
9185 }
9186
9187 switch (resp_code) {
9188 case HCLGE_ETHERTYPE_SUCCESS_ADD:
9189 case HCLGE_ETHERTYPE_ALREADY_ADD:
9190 return_status = 0;
9191 break;
9192 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
9193 dev_err(&hdev->pdev->dev,
9194 "add mac ethertype failed for manager table overflow.\n");
9195 return_status = -EIO;
9196 break;
9197 case HCLGE_ETHERTYPE_KEY_CONFLICT:
9198 dev_err(&hdev->pdev->dev,
9199 "add mac ethertype failed for key conflict.\n");
9200 return_status = -EIO;
9201 break;
9202 default:
9203 dev_err(&hdev->pdev->dev,
9204 "add mac ethertype failed for undefined, code=%u.\n",
9205 resp_code);
9206 return_status = -EIO;
9207 }
9208
9209 return return_status;
9210 }
9211
hclge_set_vf_mac(struct hnae3_handle * handle,int vf,u8 * mac_addr)9212 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
9213 u8 *mac_addr)
9214 {
9215 struct hclge_vport *vport = hclge_get_vport(handle);
9216 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
9217 struct hclge_dev *hdev = vport->back;
9218
9219 vport = hclge_get_vf_vport(hdev, vf);
9220 if (!vport)
9221 return -EINVAL;
9222
9223 hnae3_format_mac_addr(format_mac_addr, mac_addr);
9224 if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
9225 dev_info(&hdev->pdev->dev,
9226 "Specified MAC(=%s) is same as before, no change committed!\n",
9227 format_mac_addr);
9228 return 0;
9229 }
9230
9231 ether_addr_copy(vport->vf_info.mac, mac_addr);
9232
9233 /* there is a timewindow for PF to know VF unalive, it may
9234 * cause send mailbox fail, but it doesn't matter, VF will
9235 * query it when reinit.
9236 */
9237 if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
9238 dev_info(&hdev->pdev->dev,
9239 "MAC of VF %d has been set to %s, and it will be reinitialized!\n",
9240 vf, format_mac_addr);
9241 (void)hclge_inform_reset_assert_to_vf(vport);
9242 return 0;
9243 }
9244
9245 dev_info(&hdev->pdev->dev,
9246 "MAC of VF %d has been set to %s, will be active after VF reset\n",
9247 vf, format_mac_addr);
9248 return 0;
9249 }
9250
hclge_add_mgr_tbl(struct hclge_dev * hdev,const struct hclge_mac_mgr_tbl_entry_cmd * req)9251 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
9252 const struct hclge_mac_mgr_tbl_entry_cmd *req)
9253 {
9254 struct hclge_desc desc;
9255 u8 resp_code;
9256 u16 retval;
9257 int ret;
9258
9259 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
9260 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
9261
9262 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9263 if (ret) {
9264 dev_err(&hdev->pdev->dev,
9265 "add mac ethertype failed for cmd_send, ret =%d.\n",
9266 ret);
9267 return ret;
9268 }
9269
9270 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
9271 retval = le16_to_cpu(desc.retval);
9272
9273 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
9274 }
9275
init_mgr_tbl(struct hclge_dev * hdev)9276 static int init_mgr_tbl(struct hclge_dev *hdev)
9277 {
9278 int ret;
9279 int i;
9280
9281 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
9282 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
9283 if (ret) {
9284 dev_err(&hdev->pdev->dev,
9285 "add mac ethertype failed, ret =%d.\n",
9286 ret);
9287 return ret;
9288 }
9289 }
9290
9291 return 0;
9292 }
9293
hclge_get_mac_addr(struct hnae3_handle * handle,u8 * p)9294 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
9295 {
9296 struct hclge_vport *vport = hclge_get_vport(handle);
9297 struct hclge_dev *hdev = vport->back;
9298
9299 ether_addr_copy(p, hdev->hw.mac.mac_addr);
9300 }
9301
hclge_update_mac_node_for_dev_addr(struct hclge_vport * vport,const u8 * old_addr,const u8 * new_addr)9302 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
9303 const u8 *old_addr, const u8 *new_addr)
9304 {
9305 struct list_head *list = &vport->uc_mac_list;
9306 struct hclge_mac_node *old_node, *new_node;
9307
9308 new_node = hclge_find_mac_node(list, new_addr);
9309 if (!new_node) {
9310 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
9311 if (!new_node)
9312 return -ENOMEM;
9313
9314 new_node->state = HCLGE_MAC_TO_ADD;
9315 ether_addr_copy(new_node->mac_addr, new_addr);
9316 list_add(&new_node->node, list);
9317 } else {
9318 if (new_node->state == HCLGE_MAC_TO_DEL)
9319 new_node->state = HCLGE_MAC_ACTIVE;
9320
9321 /* make sure the new addr is in the list head, avoid dev
9322 * addr may be not re-added into mac table for the umv space
9323 * limitation after global/imp reset which will clear mac
9324 * table by hardware.
9325 */
9326 list_move(&new_node->node, list);
9327 }
9328
9329 if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
9330 old_node = hclge_find_mac_node(list, old_addr);
9331 if (old_node) {
9332 if (old_node->state == HCLGE_MAC_TO_ADD) {
9333 list_del(&old_node->node);
9334 kfree(old_node);
9335 } else {
9336 old_node->state = HCLGE_MAC_TO_DEL;
9337 }
9338 }
9339 }
9340
9341 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
9342
9343 return 0;
9344 }
9345
hclge_set_mac_addr(struct hnae3_handle * handle,const void * p,bool is_first)9346 static int hclge_set_mac_addr(struct hnae3_handle *handle, const void *p,
9347 bool is_first)
9348 {
9349 const unsigned char *new_addr = (const unsigned char *)p;
9350 struct hclge_vport *vport = hclge_get_vport(handle);
9351 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
9352 struct hclge_dev *hdev = vport->back;
9353 unsigned char *old_addr = NULL;
9354 int ret;
9355
9356 /* mac addr check */
9357 if (is_zero_ether_addr(new_addr) ||
9358 is_broadcast_ether_addr(new_addr) ||
9359 is_multicast_ether_addr(new_addr)) {
9360 hnae3_format_mac_addr(format_mac_addr, new_addr);
9361 dev_err(&hdev->pdev->dev,
9362 "change uc mac err! invalid mac: %s.\n",
9363 format_mac_addr);
9364 return -EINVAL;
9365 }
9366
9367 ret = hclge_pause_addr_cfg(hdev, new_addr);
9368 if (ret) {
9369 dev_err(&hdev->pdev->dev,
9370 "failed to configure mac pause address, ret = %d\n",
9371 ret);
9372 return ret;
9373 }
9374
9375 if (!is_first)
9376 old_addr = hdev->hw.mac.mac_addr;
9377
9378 spin_lock_bh(&vport->mac_list_lock);
9379 ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
9380 if (ret) {
9381 hnae3_format_mac_addr(format_mac_addr, new_addr);
9382 dev_err(&hdev->pdev->dev,
9383 "failed to change the mac addr:%s, ret = %d\n",
9384 format_mac_addr, ret);
9385 spin_unlock_bh(&vport->mac_list_lock);
9386
9387 if (!is_first)
9388 hclge_pause_addr_cfg(hdev, old_addr);
9389
9390 return ret;
9391 }
9392 /* we must update dev addr with spin lock protect, preventing dev addr
9393 * being removed by set_rx_mode path.
9394 */
9395 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
9396 spin_unlock_bh(&vport->mac_list_lock);
9397
9398 hclge_task_schedule(hdev, 0);
9399
9400 return 0;
9401 }
9402
hclge_mii_ioctl(struct hclge_dev * hdev,struct ifreq * ifr,int cmd)9403 static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd)
9404 {
9405 struct mii_ioctl_data *data = if_mii(ifr);
9406
9407 if (!hnae3_dev_phy_imp_supported(hdev))
9408 return -EOPNOTSUPP;
9409
9410 switch (cmd) {
9411 case SIOCGMIIPHY:
9412 data->phy_id = hdev->hw.mac.phy_addr;
9413 /* this command reads phy id and register at the same time */
9414 fallthrough;
9415 case SIOCGMIIREG:
9416 data->val_out = hclge_read_phy_reg(hdev, data->reg_num);
9417 return 0;
9418
9419 case SIOCSMIIREG:
9420 return hclge_write_phy_reg(hdev, data->reg_num, data->val_in);
9421 default:
9422 return -EOPNOTSUPP;
9423 }
9424 }
9425
hclge_do_ioctl(struct hnae3_handle * handle,struct ifreq * ifr,int cmd)9426 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
9427 int cmd)
9428 {
9429 struct hclge_vport *vport = hclge_get_vport(handle);
9430 struct hclge_dev *hdev = vport->back;
9431
9432 switch (cmd) {
9433 case SIOCGHWTSTAMP:
9434 return hclge_ptp_get_cfg(hdev, ifr);
9435 case SIOCSHWTSTAMP:
9436 return hclge_ptp_set_cfg(hdev, ifr);
9437 default:
9438 if (!hdev->hw.mac.phydev)
9439 return hclge_mii_ioctl(hdev, ifr, cmd);
9440 }
9441
9442 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
9443 }
9444
hclge_set_port_vlan_filter_bypass(struct hclge_dev * hdev,u8 vf_id,bool bypass_en)9445 static int hclge_set_port_vlan_filter_bypass(struct hclge_dev *hdev, u8 vf_id,
9446 bool bypass_en)
9447 {
9448 struct hclge_port_vlan_filter_bypass_cmd *req;
9449 struct hclge_desc desc;
9450 int ret;
9451
9452 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, false);
9453 req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data;
9454 req->vf_id = vf_id;
9455 hnae3_set_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B,
9456 bypass_en ? 1 : 0);
9457
9458 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9459 if (ret)
9460 dev_err(&hdev->pdev->dev,
9461 "failed to set vport%u port vlan filter bypass state, ret = %d.\n",
9462 vf_id, ret);
9463
9464 return ret;
9465 }
9466
hclge_set_vlan_filter_ctrl(struct hclge_dev * hdev,u8 vlan_type,u8 fe_type,bool filter_en,u8 vf_id)9467 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
9468 u8 fe_type, bool filter_en, u8 vf_id)
9469 {
9470 struct hclge_vlan_filter_ctrl_cmd *req;
9471 struct hclge_desc desc;
9472 int ret;
9473
9474 /* read current vlan filter parameter */
9475 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
9476 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
9477 req->vlan_type = vlan_type;
9478 req->vf_id = vf_id;
9479
9480 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9481 if (ret) {
9482 dev_err(&hdev->pdev->dev, "failed to get vport%u vlan filter config, ret = %d.\n",
9483 vf_id, ret);
9484 return ret;
9485 }
9486
9487 /* modify and write new config parameter */
9488 hclge_comm_cmd_reuse_desc(&desc, false);
9489 req->vlan_fe = filter_en ?
9490 (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
9491
9492 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9493 if (ret)
9494 dev_err(&hdev->pdev->dev, "failed to set vport%u vlan filter, ret = %d.\n",
9495 vf_id, ret);
9496
9497 return ret;
9498 }
9499
hclge_set_vport_vlan_filter(struct hclge_vport * vport,bool enable)9500 static int hclge_set_vport_vlan_filter(struct hclge_vport *vport, bool enable)
9501 {
9502 struct hclge_dev *hdev = vport->back;
9503 struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
9504 int ret;
9505
9506 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9507 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9508 HCLGE_FILTER_FE_EGRESS_V1_B,
9509 enable, vport->vport_id);
9510
9511 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9512 HCLGE_FILTER_FE_EGRESS, enable,
9513 vport->vport_id);
9514 if (ret)
9515 return ret;
9516
9517 if (test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps)) {
9518 ret = hclge_set_port_vlan_filter_bypass(hdev, vport->vport_id,
9519 !enable);
9520 } else if (!vport->vport_id) {
9521 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps))
9522 enable = false;
9523
9524 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9525 HCLGE_FILTER_FE_INGRESS,
9526 enable, 0);
9527 }
9528
9529 return ret;
9530 }
9531
hclge_need_enable_vport_vlan_filter(struct hclge_vport * vport)9532 static bool hclge_need_enable_vport_vlan_filter(struct hclge_vport *vport)
9533 {
9534 struct hnae3_handle *handle = &vport->nic;
9535 struct hclge_vport_vlan_cfg *vlan, *tmp;
9536 struct hclge_dev *hdev = vport->back;
9537
9538 if (vport->vport_id) {
9539 if (vport->port_base_vlan_cfg.state !=
9540 HNAE3_PORT_BASE_VLAN_DISABLE)
9541 return true;
9542
9543 if (vport->vf_info.trusted && vport->vf_info.request_uc_en)
9544 return false;
9545 } else if (handle->netdev_flags & HNAE3_USER_UPE) {
9546 return false;
9547 }
9548
9549 if (!vport->req_vlan_fltr_en)
9550 return false;
9551
9552 /* compatible with former device, always enable vlan filter */
9553 if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
9554 return true;
9555
9556 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
9557 if (vlan->vlan_id != 0)
9558 return true;
9559
9560 return false;
9561 }
9562
hclge_enable_vport_vlan_filter(struct hclge_vport * vport,bool request_en)9563 int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en)
9564 {
9565 struct hclge_dev *hdev = vport->back;
9566 bool need_en;
9567 int ret;
9568
9569 mutex_lock(&hdev->vport_lock);
9570
9571 vport->req_vlan_fltr_en = request_en;
9572
9573 need_en = hclge_need_enable_vport_vlan_filter(vport);
9574 if (need_en == vport->cur_vlan_fltr_en) {
9575 mutex_unlock(&hdev->vport_lock);
9576 return 0;
9577 }
9578
9579 ret = hclge_set_vport_vlan_filter(vport, need_en);
9580 if (ret) {
9581 mutex_unlock(&hdev->vport_lock);
9582 return ret;
9583 }
9584
9585 vport->cur_vlan_fltr_en = need_en;
9586
9587 mutex_unlock(&hdev->vport_lock);
9588
9589 return 0;
9590 }
9591
hclge_enable_vlan_filter(struct hnae3_handle * handle,bool enable)9592 static int hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
9593 {
9594 struct hclge_vport *vport = hclge_get_vport(handle);
9595
9596 return hclge_enable_vport_vlan_filter(vport, enable);
9597 }
9598
hclge_set_vf_vlan_filter_cmd(struct hclge_dev * hdev,u16 vfid,bool is_kill,u16 vlan,struct hclge_desc * desc)9599 static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
9600 bool is_kill, u16 vlan,
9601 struct hclge_desc *desc)
9602 {
9603 struct hclge_vlan_filter_vf_cfg_cmd *req0;
9604 struct hclge_vlan_filter_vf_cfg_cmd *req1;
9605 u8 vf_byte_val;
9606 u8 vf_byte_off;
9607 int ret;
9608
9609 hclge_cmd_setup_basic_desc(&desc[0],
9610 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9611 hclge_cmd_setup_basic_desc(&desc[1],
9612 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9613
9614 desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
9615
9616 vf_byte_off = vfid / 8;
9617 vf_byte_val = 1 << (vfid % 8);
9618
9619 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9620 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
9621
9622 req0->vlan_id = cpu_to_le16(vlan);
9623 req0->vlan_cfg = is_kill;
9624
9625 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
9626 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
9627 else
9628 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
9629
9630 ret = hclge_cmd_send(&hdev->hw, desc, 2);
9631 if (ret) {
9632 dev_err(&hdev->pdev->dev,
9633 "Send vf vlan command fail, ret =%d.\n",
9634 ret);
9635 return ret;
9636 }
9637
9638 return 0;
9639 }
9640
hclge_check_vf_vlan_cmd_status(struct hclge_dev * hdev,u16 vfid,bool is_kill,struct hclge_desc * desc)9641 static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid,
9642 bool is_kill, struct hclge_desc *desc)
9643 {
9644 struct hclge_vlan_filter_vf_cfg_cmd *req;
9645
9646 req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9647
9648 if (!is_kill) {
9649 #define HCLGE_VF_VLAN_NO_ENTRY 2
9650 if (!req->resp_code || req->resp_code == 1)
9651 return 0;
9652
9653 if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
9654 set_bit(vfid, hdev->vf_vlan_full);
9655 dev_warn(&hdev->pdev->dev,
9656 "vf vlan table is full, vf vlan filter is disabled\n");
9657 return 0;
9658 }
9659
9660 dev_err(&hdev->pdev->dev,
9661 "Add vf vlan filter fail, ret =%u.\n",
9662 req->resp_code);
9663 } else {
9664 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
9665 if (!req->resp_code)
9666 return 0;
9667
9668 /* vf vlan filter is disabled when vf vlan table is full,
9669 * then new vlan id will not be added into vf vlan table.
9670 * Just return 0 without warning, avoid massive verbose
9671 * print logs when unload.
9672 */
9673 if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
9674 return 0;
9675
9676 dev_err(&hdev->pdev->dev,
9677 "Kill vf vlan filter fail, ret =%u.\n",
9678 req->resp_code);
9679 }
9680
9681 return -EIO;
9682 }
9683
hclge_set_vf_vlan_common(struct hclge_dev * hdev,u16 vfid,bool is_kill,u16 vlan)9684 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
9685 bool is_kill, u16 vlan)
9686 {
9687 struct hclge_vport *vport = &hdev->vport[vfid];
9688 struct hclge_desc desc[2];
9689 int ret;
9690
9691 /* if vf vlan table is full, firmware will close vf vlan filter, it
9692 * is unable and unnecessary to add new vlan id to vf vlan filter.
9693 * If spoof check is enable, and vf vlan is full, it shouldn't add
9694 * new vlan, because tx packets with these vlan id will be dropped.
9695 */
9696 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
9697 if (vport->vf_info.spoofchk && vlan) {
9698 dev_err(&hdev->pdev->dev,
9699 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
9700 return -EPERM;
9701 }
9702 return 0;
9703 }
9704
9705 ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc);
9706 if (ret)
9707 return ret;
9708
9709 return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc);
9710 }
9711
hclge_set_port_vlan_filter(struct hclge_dev * hdev,__be16 proto,u16 vlan_id,bool is_kill)9712 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
9713 u16 vlan_id, bool is_kill)
9714 {
9715 struct hclge_vlan_filter_pf_cfg_cmd *req;
9716 struct hclge_desc desc;
9717 u8 vlan_offset_byte_val;
9718 u8 vlan_offset_byte;
9719 u8 vlan_offset_160;
9720 int ret;
9721
9722 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
9723
9724 vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
9725 vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
9726 HCLGE_VLAN_BYTE_SIZE;
9727 vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
9728
9729 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
9730 req->vlan_offset = vlan_offset_160;
9731 req->vlan_cfg = is_kill;
9732 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
9733
9734 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9735 if (ret)
9736 dev_err(&hdev->pdev->dev,
9737 "port vlan command, send fail, ret =%d.\n", ret);
9738 return ret;
9739 }
9740
hclge_need_update_port_vlan(struct hclge_dev * hdev,u16 vport_id,u16 vlan_id,bool is_kill)9741 static bool hclge_need_update_port_vlan(struct hclge_dev *hdev, u16 vport_id,
9742 u16 vlan_id, bool is_kill)
9743 {
9744 /* vlan 0 may be added twice when 8021q module is enabled */
9745 if (!is_kill && !vlan_id &&
9746 test_bit(vport_id, hdev->vlan_table[vlan_id]))
9747 return false;
9748
9749 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
9750 dev_warn(&hdev->pdev->dev,
9751 "Add port vlan failed, vport %u is already in vlan %u\n",
9752 vport_id, vlan_id);
9753 return false;
9754 }
9755
9756 if (is_kill &&
9757 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
9758 dev_warn(&hdev->pdev->dev,
9759 "Delete port vlan failed, vport %u is not in vlan %u\n",
9760 vport_id, vlan_id);
9761 return false;
9762 }
9763
9764 return true;
9765 }
9766
hclge_set_vlan_filter_hw(struct hclge_dev * hdev,__be16 proto,u16 vport_id,u16 vlan_id,bool is_kill)9767 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
9768 u16 vport_id, u16 vlan_id,
9769 bool is_kill)
9770 {
9771 u16 vport_idx, vport_num = 0;
9772 int ret;
9773
9774 if (is_kill && !vlan_id)
9775 return 0;
9776
9777 if (vlan_id >= VLAN_N_VID)
9778 return -EINVAL;
9779
9780 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id);
9781 if (ret) {
9782 dev_err(&hdev->pdev->dev,
9783 "Set %u vport vlan filter config fail, ret =%d.\n",
9784 vport_id, ret);
9785 return ret;
9786 }
9787
9788 if (!hclge_need_update_port_vlan(hdev, vport_id, vlan_id, is_kill))
9789 return 0;
9790
9791 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
9792 vport_num++;
9793
9794 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
9795 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
9796 is_kill);
9797
9798 return ret;
9799 }
9800
hclge_set_vlan_tx_offload_cfg(struct hclge_vport * vport)9801 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
9802 {
9803 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
9804 struct hclge_vport_vtag_tx_cfg_cmd *req;
9805 struct hclge_dev *hdev = vport->back;
9806 struct hclge_desc desc;
9807 u16 bmap_index;
9808 int status;
9809
9810 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
9811
9812 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
9813 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
9814 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
9815 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
9816 vcfg->accept_tag1 ? 1 : 0);
9817 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
9818 vcfg->accept_untag1 ? 1 : 0);
9819 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
9820 vcfg->accept_tag2 ? 1 : 0);
9821 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
9822 vcfg->accept_untag2 ? 1 : 0);
9823 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
9824 vcfg->insert_tag1_en ? 1 : 0);
9825 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
9826 vcfg->insert_tag2_en ? 1 : 0);
9827 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B,
9828 vcfg->tag_shift_mode_en ? 1 : 0);
9829 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
9830
9831 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9832 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9833 HCLGE_VF_NUM_PER_BYTE;
9834 req->vf_bitmap[bmap_index] =
9835 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9836
9837 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9838 if (status)
9839 dev_err(&hdev->pdev->dev,
9840 "Send port txvlan cfg command fail, ret =%d\n",
9841 status);
9842
9843 return status;
9844 }
9845
hclge_set_vlan_rx_offload_cfg(struct hclge_vport * vport)9846 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
9847 {
9848 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
9849 struct hclge_vport_vtag_rx_cfg_cmd *req;
9850 struct hclge_dev *hdev = vport->back;
9851 struct hclge_desc desc;
9852 u16 bmap_index;
9853 int status;
9854
9855 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
9856
9857 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
9858 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
9859 vcfg->strip_tag1_en ? 1 : 0);
9860 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
9861 vcfg->strip_tag2_en ? 1 : 0);
9862 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
9863 vcfg->vlan1_vlan_prionly ? 1 : 0);
9864 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
9865 vcfg->vlan2_vlan_prionly ? 1 : 0);
9866 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B,
9867 vcfg->strip_tag1_discard_en ? 1 : 0);
9868 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B,
9869 vcfg->strip_tag2_discard_en ? 1 : 0);
9870
9871 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9872 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9873 HCLGE_VF_NUM_PER_BYTE;
9874 req->vf_bitmap[bmap_index] =
9875 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9876
9877 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9878 if (status)
9879 dev_err(&hdev->pdev->dev,
9880 "Send port rxvlan cfg command fail, ret =%d\n",
9881 status);
9882
9883 return status;
9884 }
9885
hclge_vlan_offload_cfg(struct hclge_vport * vport,u16 port_base_vlan_state,u16 vlan_tag,u8 qos)9886 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
9887 u16 port_base_vlan_state,
9888 u16 vlan_tag, u8 qos)
9889 {
9890 int ret;
9891
9892 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9893 vport->txvlan_cfg.accept_tag1 = true;
9894 vport->txvlan_cfg.insert_tag1_en = false;
9895 vport->txvlan_cfg.default_tag1 = 0;
9896 } else {
9897 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev);
9898
9899 vport->txvlan_cfg.accept_tag1 =
9900 ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3;
9901 vport->txvlan_cfg.insert_tag1_en = true;
9902 vport->txvlan_cfg.default_tag1 = (qos << VLAN_PRIO_SHIFT) |
9903 vlan_tag;
9904 }
9905
9906 vport->txvlan_cfg.accept_untag1 = true;
9907
9908 /* accept_tag2 and accept_untag2 are not supported on
9909 * pdev revision(0x20), new revision support them,
9910 * this two fields can not be configured by user.
9911 */
9912 vport->txvlan_cfg.accept_tag2 = true;
9913 vport->txvlan_cfg.accept_untag2 = true;
9914 vport->txvlan_cfg.insert_tag2_en = false;
9915 vport->txvlan_cfg.default_tag2 = 0;
9916 vport->txvlan_cfg.tag_shift_mode_en = true;
9917
9918 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9919 vport->rxvlan_cfg.strip_tag1_en = false;
9920 vport->rxvlan_cfg.strip_tag2_en =
9921 vport->rxvlan_cfg.rx_vlan_offload_en;
9922 vport->rxvlan_cfg.strip_tag2_discard_en = false;
9923 } else {
9924 vport->rxvlan_cfg.strip_tag1_en =
9925 vport->rxvlan_cfg.rx_vlan_offload_en;
9926 vport->rxvlan_cfg.strip_tag2_en = true;
9927 vport->rxvlan_cfg.strip_tag2_discard_en = true;
9928 }
9929
9930 vport->rxvlan_cfg.strip_tag1_discard_en = false;
9931 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9932 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9933
9934 ret = hclge_set_vlan_tx_offload_cfg(vport);
9935 if (ret)
9936 return ret;
9937
9938 return hclge_set_vlan_rx_offload_cfg(vport);
9939 }
9940
hclge_set_vlan_protocol_type(struct hclge_dev * hdev)9941 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
9942 {
9943 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
9944 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
9945 struct hclge_desc desc;
9946 int status;
9947
9948 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
9949 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
9950 rx_req->ot_fst_vlan_type =
9951 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
9952 rx_req->ot_sec_vlan_type =
9953 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
9954 rx_req->in_fst_vlan_type =
9955 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
9956 rx_req->in_sec_vlan_type =
9957 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
9958
9959 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9960 if (status) {
9961 dev_err(&hdev->pdev->dev,
9962 "Send rxvlan protocol type command fail, ret =%d\n",
9963 status);
9964 return status;
9965 }
9966
9967 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
9968
9969 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
9970 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
9971 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
9972
9973 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9974 if (status)
9975 dev_err(&hdev->pdev->dev,
9976 "Send txvlan protocol type command fail, ret =%d\n",
9977 status);
9978
9979 return status;
9980 }
9981
hclge_init_vlan_filter(struct hclge_dev * hdev)9982 static int hclge_init_vlan_filter(struct hclge_dev *hdev)
9983 {
9984 struct hclge_vport *vport;
9985 bool enable = true;
9986 int ret;
9987 int i;
9988
9989 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9990 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9991 HCLGE_FILTER_FE_EGRESS_V1_B,
9992 true, 0);
9993
9994 /* for revision 0x21, vf vlan filter is per function */
9995 for (i = 0; i < hdev->num_alloc_vport; i++) {
9996 vport = &hdev->vport[i];
9997 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9998 HCLGE_FILTER_FE_EGRESS, true,
9999 vport->vport_id);
10000 if (ret)
10001 return ret;
10002 vport->cur_vlan_fltr_en = true;
10003 }
10004
10005 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps) &&
10006 !test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, hdev->ae_dev->caps))
10007 enable = false;
10008
10009 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
10010 HCLGE_FILTER_FE_INGRESS, enable, 0);
10011 }
10012
hclge_init_vlan_type(struct hclge_dev * hdev)10013 static int hclge_init_vlan_type(struct hclge_dev *hdev)
10014 {
10015 hdev->vlan_type_cfg.rx_in_fst_vlan_type = ETH_P_8021Q;
10016 hdev->vlan_type_cfg.rx_in_sec_vlan_type = ETH_P_8021Q;
10017 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = ETH_P_8021Q;
10018 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = ETH_P_8021Q;
10019 hdev->vlan_type_cfg.tx_ot_vlan_type = ETH_P_8021Q;
10020 hdev->vlan_type_cfg.tx_in_vlan_type = ETH_P_8021Q;
10021
10022 return hclge_set_vlan_protocol_type(hdev);
10023 }
10024
hclge_init_vport_vlan_offload(struct hclge_dev * hdev)10025 static int hclge_init_vport_vlan_offload(struct hclge_dev *hdev)
10026 {
10027 struct hclge_port_base_vlan_config *cfg;
10028 struct hclge_vport *vport;
10029 int ret;
10030 int i;
10031
10032 for (i = 0; i < hdev->num_alloc_vport; i++) {
10033 vport = &hdev->vport[i];
10034 cfg = &vport->port_base_vlan_cfg;
10035
10036 ret = hclge_vlan_offload_cfg(vport, cfg->state,
10037 cfg->vlan_info.vlan_tag,
10038 cfg->vlan_info.qos);
10039 if (ret)
10040 return ret;
10041 }
10042 return 0;
10043 }
10044
hclge_init_vlan_config(struct hclge_dev * hdev)10045 static int hclge_init_vlan_config(struct hclge_dev *hdev)
10046 {
10047 struct hnae3_handle *handle = &hdev->vport[0].nic;
10048 int ret;
10049
10050 ret = hclge_init_vlan_filter(hdev);
10051 if (ret)
10052 return ret;
10053
10054 ret = hclge_init_vlan_type(hdev);
10055 if (ret)
10056 return ret;
10057
10058 ret = hclge_init_vport_vlan_offload(hdev);
10059 if (ret)
10060 return ret;
10061
10062 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
10063 }
10064
hclge_add_vport_vlan_table(struct hclge_vport * vport,u16 vlan_id,bool writen_to_tbl)10065 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
10066 bool writen_to_tbl)
10067 {
10068 struct hclge_vport_vlan_cfg *vlan, *tmp;
10069 struct hclge_dev *hdev = vport->back;
10070
10071 mutex_lock(&hdev->vport_lock);
10072
10073 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10074 if (vlan->vlan_id == vlan_id) {
10075 mutex_unlock(&hdev->vport_lock);
10076 return;
10077 }
10078 }
10079
10080 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
10081 if (!vlan) {
10082 mutex_unlock(&hdev->vport_lock);
10083 return;
10084 }
10085
10086 vlan->hd_tbl_status = writen_to_tbl;
10087 vlan->vlan_id = vlan_id;
10088
10089 list_add_tail(&vlan->node, &vport->vlan_list);
10090 mutex_unlock(&hdev->vport_lock);
10091 }
10092
hclge_add_vport_all_vlan_table(struct hclge_vport * vport)10093 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
10094 {
10095 struct hclge_vport_vlan_cfg *vlan, *tmp;
10096 struct hclge_dev *hdev = vport->back;
10097 int ret;
10098
10099 mutex_lock(&hdev->vport_lock);
10100
10101 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10102 if (!vlan->hd_tbl_status) {
10103 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10104 vport->vport_id,
10105 vlan->vlan_id, false);
10106 if (ret) {
10107 dev_err(&hdev->pdev->dev,
10108 "restore vport vlan list failed, ret=%d\n",
10109 ret);
10110
10111 mutex_unlock(&hdev->vport_lock);
10112 return ret;
10113 }
10114 }
10115 vlan->hd_tbl_status = true;
10116 }
10117
10118 mutex_unlock(&hdev->vport_lock);
10119
10120 return 0;
10121 }
10122
hclge_rm_vport_vlan_table(struct hclge_vport * vport,u16 vlan_id,bool is_write_tbl)10123 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
10124 bool is_write_tbl)
10125 {
10126 struct hclge_vport_vlan_cfg *vlan, *tmp;
10127 struct hclge_dev *hdev = vport->back;
10128
10129 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10130 if (vlan->vlan_id == vlan_id) {
10131 if (is_write_tbl && vlan->hd_tbl_status)
10132 hclge_set_vlan_filter_hw(hdev,
10133 htons(ETH_P_8021Q),
10134 vport->vport_id,
10135 vlan_id,
10136 true);
10137
10138 list_del(&vlan->node);
10139 kfree(vlan);
10140 break;
10141 }
10142 }
10143 }
10144
hclge_rm_vport_all_vlan_table(struct hclge_vport * vport,bool is_del_list)10145 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
10146 {
10147 struct hclge_vport_vlan_cfg *vlan, *tmp;
10148 struct hclge_dev *hdev = vport->back;
10149
10150 mutex_lock(&hdev->vport_lock);
10151
10152 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10153 if (vlan->hd_tbl_status)
10154 hclge_set_vlan_filter_hw(hdev,
10155 htons(ETH_P_8021Q),
10156 vport->vport_id,
10157 vlan->vlan_id,
10158 true);
10159
10160 vlan->hd_tbl_status = false;
10161 if (is_del_list) {
10162 list_del(&vlan->node);
10163 kfree(vlan);
10164 }
10165 }
10166 clear_bit(vport->vport_id, hdev->vf_vlan_full);
10167 mutex_unlock(&hdev->vport_lock);
10168 }
10169
hclge_uninit_vport_vlan_table(struct hclge_dev * hdev)10170 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
10171 {
10172 struct hclge_vport_vlan_cfg *vlan, *tmp;
10173 struct hclge_vport *vport;
10174 int i;
10175
10176 mutex_lock(&hdev->vport_lock);
10177
10178 for (i = 0; i < hdev->num_alloc_vport; i++) {
10179 vport = &hdev->vport[i];
10180 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10181 list_del(&vlan->node);
10182 kfree(vlan);
10183 }
10184 }
10185
10186 mutex_unlock(&hdev->vport_lock);
10187 }
10188
hclge_restore_vport_port_base_vlan_config(struct hclge_dev * hdev)10189 void hclge_restore_vport_port_base_vlan_config(struct hclge_dev *hdev)
10190 {
10191 struct hclge_vlan_info *vlan_info;
10192 struct hclge_vport *vport;
10193 u16 vlan_proto;
10194 u16 vlan_id;
10195 u16 state;
10196 int vf_id;
10197 int ret;
10198
10199 /* PF should restore all vfs port base vlan */
10200 for (vf_id = 0; vf_id < hdev->num_alloc_vfs; vf_id++) {
10201 vport = &hdev->vport[vf_id + HCLGE_VF_VPORT_START_NUM];
10202 vlan_info = vport->port_base_vlan_cfg.tbl_sta ?
10203 &vport->port_base_vlan_cfg.vlan_info :
10204 &vport->port_base_vlan_cfg.old_vlan_info;
10205
10206 vlan_id = vlan_info->vlan_tag;
10207 vlan_proto = vlan_info->vlan_proto;
10208 state = vport->port_base_vlan_cfg.state;
10209
10210 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
10211 clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
10212 ret = hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
10213 vport->vport_id,
10214 vlan_id, false);
10215 vport->port_base_vlan_cfg.tbl_sta = ret == 0;
10216 }
10217 }
10218 }
10219
hclge_restore_vport_vlan_table(struct hclge_vport * vport)10220 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
10221 {
10222 struct hclge_vport_vlan_cfg *vlan, *tmp;
10223 struct hclge_dev *hdev = vport->back;
10224 int ret;
10225
10226 mutex_lock(&hdev->vport_lock);
10227
10228 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10229 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10230 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10231 vport->vport_id,
10232 vlan->vlan_id, false);
10233 if (ret)
10234 break;
10235 vlan->hd_tbl_status = true;
10236 }
10237 }
10238
10239 mutex_unlock(&hdev->vport_lock);
10240 }
10241
10242 /* For global reset and imp reset, hardware will clear the mac table,
10243 * so we change the mac address state from ACTIVE to TO_ADD, then they
10244 * can be restored in the service task after reset complete. Furtherly,
10245 * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
10246 * be restored after reset, so just remove these mac nodes from mac_list.
10247 */
hclge_mac_node_convert_for_reset(struct list_head * list)10248 static void hclge_mac_node_convert_for_reset(struct list_head *list)
10249 {
10250 struct hclge_mac_node *mac_node, *tmp;
10251
10252 list_for_each_entry_safe(mac_node, tmp, list, node) {
10253 if (mac_node->state == HCLGE_MAC_ACTIVE) {
10254 mac_node->state = HCLGE_MAC_TO_ADD;
10255 } else if (mac_node->state == HCLGE_MAC_TO_DEL) {
10256 list_del(&mac_node->node);
10257 kfree(mac_node);
10258 }
10259 }
10260 }
10261
hclge_restore_mac_table_common(struct hclge_vport * vport)10262 void hclge_restore_mac_table_common(struct hclge_vport *vport)
10263 {
10264 spin_lock_bh(&vport->mac_list_lock);
10265
10266 hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
10267 hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
10268 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
10269
10270 spin_unlock_bh(&vport->mac_list_lock);
10271 }
10272
hclge_restore_hw_table(struct hclge_dev * hdev)10273 static void hclge_restore_hw_table(struct hclge_dev *hdev)
10274 {
10275 struct hclge_vport *vport = &hdev->vport[0];
10276 struct hnae3_handle *handle = &vport->nic;
10277
10278 hclge_restore_mac_table_common(vport);
10279 hclge_restore_vport_port_base_vlan_config(hdev);
10280 hclge_restore_vport_vlan_table(vport);
10281 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
10282 hclge_restore_fd_entries(handle);
10283 }
10284
hclge_en_hw_strip_rxvtag(struct hnae3_handle * handle,bool enable)10285 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
10286 {
10287 struct hclge_vport *vport = hclge_get_vport(handle);
10288
10289 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10290 vport->rxvlan_cfg.strip_tag1_en = false;
10291 vport->rxvlan_cfg.strip_tag2_en = enable;
10292 vport->rxvlan_cfg.strip_tag2_discard_en = false;
10293 } else {
10294 vport->rxvlan_cfg.strip_tag1_en = enable;
10295 vport->rxvlan_cfg.strip_tag2_en = true;
10296 vport->rxvlan_cfg.strip_tag2_discard_en = true;
10297 }
10298
10299 vport->rxvlan_cfg.strip_tag1_discard_en = false;
10300 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
10301 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
10302 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
10303
10304 return hclge_set_vlan_rx_offload_cfg(vport);
10305 }
10306
hclge_set_vport_vlan_fltr_change(struct hclge_vport * vport)10307 static void hclge_set_vport_vlan_fltr_change(struct hclge_vport *vport)
10308 {
10309 struct hclge_dev *hdev = vport->back;
10310
10311 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
10312 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, &vport->state);
10313 }
10314
hclge_update_vlan_filter_entries(struct hclge_vport * vport,u16 port_base_vlan_state,struct hclge_vlan_info * new_info,struct hclge_vlan_info * old_info)10315 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
10316 u16 port_base_vlan_state,
10317 struct hclge_vlan_info *new_info,
10318 struct hclge_vlan_info *old_info)
10319 {
10320 struct hclge_dev *hdev = vport->back;
10321 int ret;
10322
10323 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
10324 hclge_rm_vport_all_vlan_table(vport, false);
10325 /* force clear VLAN 0 */
10326 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, true, 0);
10327 if (ret)
10328 return ret;
10329 return hclge_set_vlan_filter_hw(hdev,
10330 htons(new_info->vlan_proto),
10331 vport->vport_id,
10332 new_info->vlan_tag,
10333 false);
10334 }
10335
10336 vport->port_base_vlan_cfg.tbl_sta = false;
10337
10338 /* force add VLAN 0 */
10339 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, false, 0);
10340 if (ret)
10341 return ret;
10342
10343 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
10344 vport->vport_id, old_info->vlan_tag,
10345 true);
10346 if (ret)
10347 return ret;
10348
10349 return hclge_add_vport_all_vlan_table(vport);
10350 }
10351
hclge_need_update_vlan_filter(const struct hclge_vlan_info * new_cfg,const struct hclge_vlan_info * old_cfg)10352 static bool hclge_need_update_vlan_filter(const struct hclge_vlan_info *new_cfg,
10353 const struct hclge_vlan_info *old_cfg)
10354 {
10355 if (new_cfg->vlan_tag != old_cfg->vlan_tag)
10356 return true;
10357
10358 if (new_cfg->vlan_tag == 0 && (new_cfg->qos == 0 || old_cfg->qos == 0))
10359 return true;
10360
10361 return false;
10362 }
10363
hclge_modify_port_base_vlan_tag(struct hclge_vport * vport,struct hclge_vlan_info * new_info,struct hclge_vlan_info * old_info)10364 static int hclge_modify_port_base_vlan_tag(struct hclge_vport *vport,
10365 struct hclge_vlan_info *new_info,
10366 struct hclge_vlan_info *old_info)
10367 {
10368 struct hclge_dev *hdev = vport->back;
10369 int ret;
10370
10371 /* add new VLAN tag */
10372 ret = hclge_set_vlan_filter_hw(hdev, htons(new_info->vlan_proto),
10373 vport->vport_id, new_info->vlan_tag,
10374 false);
10375 if (ret)
10376 return ret;
10377
10378 vport->port_base_vlan_cfg.tbl_sta = false;
10379 /* remove old VLAN tag */
10380 if (old_info->vlan_tag == 0)
10381 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id,
10382 true, 0);
10383 else
10384 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10385 vport->vport_id,
10386 old_info->vlan_tag, true);
10387 if (ret)
10388 dev_err(&hdev->pdev->dev,
10389 "failed to clear vport%u port base vlan %u, ret = %d.\n",
10390 vport->vport_id, old_info->vlan_tag, ret);
10391
10392 return ret;
10393 }
10394
hclge_update_port_base_vlan_cfg(struct hclge_vport * vport,u16 state,struct hclge_vlan_info * vlan_info)10395 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
10396 struct hclge_vlan_info *vlan_info)
10397 {
10398 struct hnae3_handle *nic = &vport->nic;
10399 struct hclge_vlan_info *old_vlan_info;
10400 int ret;
10401
10402 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10403
10404 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag,
10405 vlan_info->qos);
10406 if (ret)
10407 return ret;
10408
10409 if (!hclge_need_update_vlan_filter(vlan_info, old_vlan_info))
10410 goto out;
10411
10412 if (state == HNAE3_PORT_BASE_VLAN_MODIFY)
10413 ret = hclge_modify_port_base_vlan_tag(vport, vlan_info,
10414 old_vlan_info);
10415 else
10416 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
10417 old_vlan_info);
10418 if (ret)
10419 return ret;
10420
10421 out:
10422 vport->port_base_vlan_cfg.state = state;
10423 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
10424 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
10425 else
10426 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
10427
10428 vport->port_base_vlan_cfg.old_vlan_info = *old_vlan_info;
10429 vport->port_base_vlan_cfg.vlan_info = *vlan_info;
10430 vport->port_base_vlan_cfg.tbl_sta = true;
10431 hclge_set_vport_vlan_fltr_change(vport);
10432
10433 return 0;
10434 }
10435
hclge_get_port_base_vlan_state(struct hclge_vport * vport,enum hnae3_port_base_vlan_state state,u16 vlan,u8 qos)10436 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
10437 enum hnae3_port_base_vlan_state state,
10438 u16 vlan, u8 qos)
10439 {
10440 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10441 if (!vlan && !qos)
10442 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10443
10444 return HNAE3_PORT_BASE_VLAN_ENABLE;
10445 }
10446
10447 if (!vlan && !qos)
10448 return HNAE3_PORT_BASE_VLAN_DISABLE;
10449
10450 if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan &&
10451 vport->port_base_vlan_cfg.vlan_info.qos == qos)
10452 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10453
10454 return HNAE3_PORT_BASE_VLAN_MODIFY;
10455 }
10456
hclge_set_vf_vlan_filter(struct hnae3_handle * handle,int vfid,u16 vlan,u8 qos,__be16 proto)10457 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
10458 u16 vlan, u8 qos, __be16 proto)
10459 {
10460 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
10461 struct hclge_vport *vport = hclge_get_vport(handle);
10462 struct hclge_dev *hdev = vport->back;
10463 struct hclge_vlan_info vlan_info;
10464 u16 state;
10465 int ret;
10466
10467 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10468 return -EOPNOTSUPP;
10469
10470 vport = hclge_get_vf_vport(hdev, vfid);
10471 if (!vport)
10472 return -EINVAL;
10473
10474 /* qos is a 3 bits value, so can not be bigger than 7 */
10475 if (vlan > VLAN_N_VID - 1 || qos > 7)
10476 return -EINVAL;
10477 if (proto != htons(ETH_P_8021Q))
10478 return -EPROTONOSUPPORT;
10479
10480 state = hclge_get_port_base_vlan_state(vport,
10481 vport->port_base_vlan_cfg.state,
10482 vlan, qos);
10483 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
10484 return 0;
10485
10486 vlan_info.vlan_tag = vlan;
10487 vlan_info.qos = qos;
10488 vlan_info.vlan_proto = ntohs(proto);
10489
10490 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
10491 if (ret) {
10492 dev_err(&hdev->pdev->dev,
10493 "failed to update port base vlan for vf %d, ret = %d\n",
10494 vfid, ret);
10495 return ret;
10496 }
10497
10498 /* there is a timewindow for PF to know VF unalive, it may
10499 * cause send mailbox fail, but it doesn't matter, VF will
10500 * query it when reinit.
10501 * for DEVICE_VERSION_V3, vf doesn't need to know about the port based
10502 * VLAN state.
10503 */
10504 if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
10505 if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
10506 (void)hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
10507 vport->vport_id,
10508 state,
10509 &vlan_info);
10510 else
10511 set_bit(HCLGE_VPORT_NEED_NOTIFY_VF_VLAN,
10512 &vport->need_notify);
10513 }
10514 return 0;
10515 }
10516
hclge_clear_vf_vlan(struct hclge_dev * hdev)10517 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
10518 {
10519 struct hclge_vlan_info *vlan_info;
10520 struct hclge_vport *vport;
10521 int ret;
10522 int vf;
10523
10524 /* clear port base vlan for all vf */
10525 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10526 vport = &hdev->vport[vf];
10527 vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10528
10529 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10530 vport->vport_id,
10531 vlan_info->vlan_tag, true);
10532 if (ret)
10533 dev_err(&hdev->pdev->dev,
10534 "failed to clear vf vlan for vf%d, ret = %d\n",
10535 vf - HCLGE_VF_VPORT_START_NUM, ret);
10536 }
10537 }
10538
hclge_set_vlan_filter(struct hnae3_handle * handle,__be16 proto,u16 vlan_id,bool is_kill)10539 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
10540 u16 vlan_id, bool is_kill)
10541 {
10542 struct hclge_vport *vport = hclge_get_vport(handle);
10543 struct hclge_dev *hdev = vport->back;
10544 bool writen_to_tbl = false;
10545 int ret = 0;
10546
10547 /* When device is resetting or reset failed, firmware is unable to
10548 * handle mailbox. Just record the vlan id, and remove it after
10549 * reset finished.
10550 */
10551 mutex_lock(&hdev->vport_lock);
10552 if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10553 test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
10554 set_bit(vlan_id, vport->vlan_del_fail_bmap);
10555 mutex_unlock(&hdev->vport_lock);
10556 return -EBUSY;
10557 } else if (!is_kill && test_bit(vlan_id, vport->vlan_del_fail_bmap)) {
10558 clear_bit(vlan_id, vport->vlan_del_fail_bmap);
10559 }
10560 mutex_unlock(&hdev->vport_lock);
10561
10562 /* when port base vlan enabled, we use port base vlan as the vlan
10563 * filter entry. In this case, we don't update vlan filter table
10564 * when user add new vlan or remove exist vlan, just update the vport
10565 * vlan list. The vlan id in vlan list will be writen in vlan filter
10566 * table until port base vlan disabled
10567 */
10568 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10569 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
10570 vlan_id, is_kill);
10571 writen_to_tbl = true;
10572 }
10573
10574 if (!ret) {
10575 if (!is_kill) {
10576 hclge_add_vport_vlan_table(vport, vlan_id,
10577 writen_to_tbl);
10578 } else if (is_kill && vlan_id != 0) {
10579 mutex_lock(&hdev->vport_lock);
10580 hclge_rm_vport_vlan_table(vport, vlan_id, false);
10581 mutex_unlock(&hdev->vport_lock);
10582 }
10583 } else if (is_kill) {
10584 /* when remove hw vlan filter failed, record the vlan id,
10585 * and try to remove it from hw later, to be consistence
10586 * with stack
10587 */
10588 mutex_lock(&hdev->vport_lock);
10589 set_bit(vlan_id, vport->vlan_del_fail_bmap);
10590 mutex_unlock(&hdev->vport_lock);
10591 }
10592
10593 hclge_set_vport_vlan_fltr_change(vport);
10594
10595 return ret;
10596 }
10597
hclge_sync_vlan_fltr_state(struct hclge_dev * hdev)10598 static void hclge_sync_vlan_fltr_state(struct hclge_dev *hdev)
10599 {
10600 struct hclge_vport *vport;
10601 int ret;
10602 u16 i;
10603
10604 for (i = 0; i < hdev->num_alloc_vport; i++) {
10605 vport = &hdev->vport[i];
10606 if (!test_and_clear_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10607 &vport->state))
10608 continue;
10609
10610 ret = hclge_enable_vport_vlan_filter(vport,
10611 vport->req_vlan_fltr_en);
10612 if (ret) {
10613 dev_err(&hdev->pdev->dev,
10614 "failed to sync vlan filter state for vport%u, ret = %d\n",
10615 vport->vport_id, ret);
10616 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10617 &vport->state);
10618 return;
10619 }
10620 }
10621 }
10622
hclge_sync_vlan_filter(struct hclge_dev * hdev)10623 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
10624 {
10625 #define HCLGE_MAX_SYNC_COUNT 60
10626
10627 int i, ret, sync_cnt = 0;
10628 u16 vlan_id;
10629
10630 mutex_lock(&hdev->vport_lock);
10631 /* start from vport 1 for PF is always alive */
10632 for (i = 0; i < hdev->num_alloc_vport; i++) {
10633 struct hclge_vport *vport = &hdev->vport[i];
10634
10635 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10636 VLAN_N_VID);
10637 while (vlan_id != VLAN_N_VID) {
10638 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10639 vport->vport_id, vlan_id,
10640 true);
10641 if (ret && ret != -EINVAL) {
10642 mutex_unlock(&hdev->vport_lock);
10643 return;
10644 }
10645
10646 clear_bit(vlan_id, vport->vlan_del_fail_bmap);
10647 hclge_rm_vport_vlan_table(vport, vlan_id, false);
10648 hclge_set_vport_vlan_fltr_change(vport);
10649
10650 sync_cnt++;
10651 if (sync_cnt >= HCLGE_MAX_SYNC_COUNT) {
10652 mutex_unlock(&hdev->vport_lock);
10653 return;
10654 }
10655
10656 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10657 VLAN_N_VID);
10658 }
10659 }
10660 mutex_unlock(&hdev->vport_lock);
10661
10662 hclge_sync_vlan_fltr_state(hdev);
10663 }
10664
hclge_set_mac_mtu(struct hclge_dev * hdev,int new_mps)10665 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
10666 {
10667 struct hclge_config_max_frm_size_cmd *req;
10668 struct hclge_desc desc;
10669
10670 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
10671
10672 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
10673 req->max_frm_size = cpu_to_le16(new_mps);
10674 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
10675
10676 return hclge_cmd_send(&hdev->hw, &desc, 1);
10677 }
10678
hclge_set_mtu(struct hnae3_handle * handle,int new_mtu)10679 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
10680 {
10681 struct hclge_vport *vport = hclge_get_vport(handle);
10682
10683 return hclge_set_vport_mtu(vport, new_mtu);
10684 }
10685
hclge_set_vport_mtu(struct hclge_vport * vport,int new_mtu)10686 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
10687 {
10688 struct hclge_dev *hdev = vport->back;
10689 int i, max_frm_size, ret;
10690
10691 /* HW supprt 2 layer vlan */
10692 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10693 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
10694 max_frm_size > hdev->ae_dev->dev_specs.max_frm_size)
10695 return -EINVAL;
10696
10697 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
10698 mutex_lock(&hdev->vport_lock);
10699 /* VF's mps must fit within hdev->mps */
10700 if (vport->vport_id && max_frm_size > hdev->mps) {
10701 mutex_unlock(&hdev->vport_lock);
10702 return -EINVAL;
10703 } else if (vport->vport_id) {
10704 vport->mps = max_frm_size;
10705 mutex_unlock(&hdev->vport_lock);
10706 return 0;
10707 }
10708
10709 /* PF's mps must be greater then VF's mps */
10710 for (i = 1; i < hdev->num_alloc_vport; i++)
10711 if (max_frm_size < hdev->vport[i].mps) {
10712 dev_err(&hdev->pdev->dev,
10713 "failed to set pf mtu for less than vport %d, mps = %u.\n",
10714 i, hdev->vport[i].mps);
10715 mutex_unlock(&hdev->vport_lock);
10716 return -EINVAL;
10717 }
10718
10719 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
10720
10721 ret = hclge_set_mac_mtu(hdev, max_frm_size);
10722 if (ret) {
10723 dev_err(&hdev->pdev->dev,
10724 "Change mtu fail, ret =%d\n", ret);
10725 goto out;
10726 }
10727
10728 hdev->mps = max_frm_size;
10729 vport->mps = max_frm_size;
10730
10731 ret = hclge_buffer_alloc(hdev);
10732 if (ret)
10733 dev_err(&hdev->pdev->dev,
10734 "Allocate buffer fail, ret =%d\n", ret);
10735
10736 out:
10737 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
10738 mutex_unlock(&hdev->vport_lock);
10739 return ret;
10740 }
10741
hclge_reset_tqp_cmd_send(struct hclge_dev * hdev,u16 queue_id,bool enable)10742 static int hclge_reset_tqp_cmd_send(struct hclge_dev *hdev, u16 queue_id,
10743 bool enable)
10744 {
10745 struct hclge_reset_tqp_queue_cmd *req;
10746 struct hclge_desc desc;
10747 int ret;
10748
10749 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
10750
10751 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10752 req->tqp_id = cpu_to_le16(queue_id);
10753 if (enable)
10754 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
10755
10756 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10757 if (ret) {
10758 dev_err(&hdev->pdev->dev,
10759 "Send tqp reset cmd error, status =%d\n", ret);
10760 return ret;
10761 }
10762
10763 return 0;
10764 }
10765
hclge_get_reset_status(struct hclge_dev * hdev,u16 queue_id,u8 * reset_status)10766 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id,
10767 u8 *reset_status)
10768 {
10769 struct hclge_reset_tqp_queue_cmd *req;
10770 struct hclge_desc desc;
10771 int ret;
10772
10773 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
10774
10775 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10776 req->tqp_id = cpu_to_le16(queue_id);
10777
10778 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10779 if (ret) {
10780 dev_err(&hdev->pdev->dev,
10781 "Get reset status error, status =%d\n", ret);
10782 return ret;
10783 }
10784
10785 *reset_status = hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
10786
10787 return 0;
10788 }
10789
hclge_covert_handle_qid_global(struct hnae3_handle * handle,u16 queue_id)10790 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
10791 {
10792 struct hclge_comm_tqp *tqp;
10793 struct hnae3_queue *queue;
10794
10795 queue = handle->kinfo.tqp[queue_id];
10796 tqp = container_of(queue, struct hclge_comm_tqp, q);
10797
10798 return tqp->index;
10799 }
10800
hclge_reset_tqp_cmd(struct hnae3_handle * handle)10801 static int hclge_reset_tqp_cmd(struct hnae3_handle *handle)
10802 {
10803 struct hclge_vport *vport = hclge_get_vport(handle);
10804 struct hclge_dev *hdev = vport->back;
10805 u16 reset_try_times = 0;
10806 u8 reset_status;
10807 u16 queue_gid;
10808 int ret;
10809 u16 i;
10810
10811 for (i = 0; i < handle->kinfo.num_tqps; i++) {
10812 queue_gid = hclge_covert_handle_qid_global(handle, i);
10813 ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, true);
10814 if (ret) {
10815 dev_err(&hdev->pdev->dev,
10816 "failed to send reset tqp cmd, ret = %d\n",
10817 ret);
10818 return ret;
10819 }
10820
10821 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
10822 ret = hclge_get_reset_status(hdev, queue_gid,
10823 &reset_status);
10824 if (ret)
10825 return ret;
10826
10827 if (reset_status)
10828 break;
10829
10830 /* Wait for tqp hw reset */
10831 usleep_range(1000, 1200);
10832 }
10833
10834 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
10835 dev_err(&hdev->pdev->dev,
10836 "wait for tqp hw reset timeout\n");
10837 return -ETIME;
10838 }
10839
10840 ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, false);
10841 if (ret) {
10842 dev_err(&hdev->pdev->dev,
10843 "failed to deassert soft reset, ret = %d\n",
10844 ret);
10845 return ret;
10846 }
10847 reset_try_times = 0;
10848 }
10849 return 0;
10850 }
10851
hclge_reset_rcb(struct hnae3_handle * handle)10852 static int hclge_reset_rcb(struct hnae3_handle *handle)
10853 {
10854 #define HCLGE_RESET_RCB_NOT_SUPPORT 0U
10855 #define HCLGE_RESET_RCB_SUCCESS 1U
10856
10857 struct hclge_vport *vport = hclge_get_vport(handle);
10858 struct hclge_dev *hdev = vport->back;
10859 struct hclge_reset_cmd *req;
10860 struct hclge_desc desc;
10861 u8 return_status;
10862 u16 queue_gid;
10863 int ret;
10864
10865 queue_gid = hclge_covert_handle_qid_global(handle, 0);
10866
10867 req = (struct hclge_reset_cmd *)desc.data;
10868 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
10869 hnae3_set_bit(req->fun_reset_rcb, HCLGE_CFG_RESET_RCB_B, 1);
10870 req->fun_reset_rcb_vqid_start = cpu_to_le16(queue_gid);
10871 req->fun_reset_rcb_vqid_num = cpu_to_le16(handle->kinfo.num_tqps);
10872
10873 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10874 if (ret) {
10875 dev_err(&hdev->pdev->dev,
10876 "failed to send rcb reset cmd, ret = %d\n", ret);
10877 return ret;
10878 }
10879
10880 return_status = req->fun_reset_rcb_return_status;
10881 if (return_status == HCLGE_RESET_RCB_SUCCESS)
10882 return 0;
10883
10884 if (return_status != HCLGE_RESET_RCB_NOT_SUPPORT) {
10885 dev_err(&hdev->pdev->dev, "failed to reset rcb, ret = %u\n",
10886 return_status);
10887 return -EIO;
10888 }
10889
10890 /* if reset rcb cmd is unsupported, we need to send reset tqp cmd
10891 * again to reset all tqps
10892 */
10893 return hclge_reset_tqp_cmd(handle);
10894 }
10895
hclge_reset_tqp(struct hnae3_handle * handle)10896 int hclge_reset_tqp(struct hnae3_handle *handle)
10897 {
10898 struct hclge_vport *vport = hclge_get_vport(handle);
10899 struct hclge_dev *hdev = vport->back;
10900 int ret;
10901
10902 /* only need to disable PF's tqp */
10903 if (!vport->vport_id) {
10904 ret = hclge_tqp_enable(handle, false);
10905 if (ret) {
10906 dev_err(&hdev->pdev->dev,
10907 "failed to disable tqp, ret = %d\n", ret);
10908 return ret;
10909 }
10910 }
10911
10912 return hclge_reset_rcb(handle);
10913 }
10914
hclge_get_fw_version(struct hnae3_handle * handle)10915 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
10916 {
10917 struct hclge_vport *vport = hclge_get_vport(handle);
10918 struct hclge_dev *hdev = vport->back;
10919
10920 return hdev->fw_version;
10921 }
10922
hclge_query_scc_version(struct hclge_dev * hdev,u32 * scc_version)10923 int hclge_query_scc_version(struct hclge_dev *hdev, u32 *scc_version)
10924 {
10925 struct hclge_comm_query_scc_cmd *resp;
10926 struct hclge_desc desc;
10927 int ret;
10928
10929 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_SCC_VER, 1);
10930 resp = (struct hclge_comm_query_scc_cmd *)desc.data;
10931
10932 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10933 if (ret)
10934 return ret;
10935
10936 *scc_version = le32_to_cpu(resp->scc_version);
10937
10938 return 0;
10939 }
10940
hclge_set_flowctrl_adv(struct hclge_dev * hdev,u32 rx_en,u32 tx_en)10941 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10942 {
10943 struct phy_device *phydev = hdev->hw.mac.phydev;
10944
10945 if (!phydev)
10946 return;
10947
10948 phy_set_asym_pause(phydev, rx_en, tx_en);
10949 }
10950
hclge_cfg_pauseparam(struct hclge_dev * hdev,u32 rx_en,u32 tx_en)10951 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10952 {
10953 int ret;
10954
10955 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
10956 return 0;
10957
10958 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
10959 if (ret)
10960 dev_err(&hdev->pdev->dev,
10961 "configure pauseparam error, ret = %d.\n", ret);
10962
10963 return ret;
10964 }
10965
hclge_cfg_flowctrl(struct hclge_dev * hdev)10966 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
10967 {
10968 struct phy_device *phydev = hdev->hw.mac.phydev;
10969 u16 remote_advertising = 0;
10970 u16 local_advertising;
10971 u32 rx_pause, tx_pause;
10972 u8 flowctl;
10973
10974 if (!phydev->link)
10975 return 0;
10976
10977 if (!phydev->autoneg)
10978 return hclge_mac_pause_setup_hw(hdev);
10979
10980 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
10981
10982 if (phydev->pause)
10983 remote_advertising = LPA_PAUSE_CAP;
10984
10985 if (phydev->asym_pause)
10986 remote_advertising |= LPA_PAUSE_ASYM;
10987
10988 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
10989 remote_advertising);
10990 tx_pause = flowctl & FLOW_CTRL_TX;
10991 rx_pause = flowctl & FLOW_CTRL_RX;
10992
10993 if (phydev->duplex == HCLGE_MAC_HALF) {
10994 tx_pause = 0;
10995 rx_pause = 0;
10996 }
10997
10998 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
10999 }
11000
hclge_get_pauseparam(struct hnae3_handle * handle,u32 * auto_neg,u32 * rx_en,u32 * tx_en)11001 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
11002 u32 *rx_en, u32 *tx_en)
11003 {
11004 struct hclge_vport *vport = hclge_get_vport(handle);
11005 struct hclge_dev *hdev = vport->back;
11006 u8 media_type = hdev->hw.mac.media_type;
11007
11008 *auto_neg = (media_type == HNAE3_MEDIA_TYPE_COPPER) ?
11009 hclge_get_autoneg(handle) : 0;
11010
11011 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
11012 *rx_en = 0;
11013 *tx_en = 0;
11014 return;
11015 }
11016
11017 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
11018 *rx_en = 1;
11019 *tx_en = 0;
11020 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
11021 *tx_en = 1;
11022 *rx_en = 0;
11023 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
11024 *rx_en = 1;
11025 *tx_en = 1;
11026 } else {
11027 *rx_en = 0;
11028 *tx_en = 0;
11029 }
11030 }
11031
hclge_record_user_pauseparam(struct hclge_dev * hdev,u32 rx_en,u32 tx_en)11032 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
11033 u32 rx_en, u32 tx_en)
11034 {
11035 if (rx_en && tx_en)
11036 hdev->fc_mode_last_time = HCLGE_FC_FULL;
11037 else if (rx_en && !tx_en)
11038 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
11039 else if (!rx_en && tx_en)
11040 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
11041 else
11042 hdev->fc_mode_last_time = HCLGE_FC_NONE;
11043
11044 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
11045 }
11046
hclge_set_pauseparam(struct hnae3_handle * handle,u32 auto_neg,u32 rx_en,u32 tx_en)11047 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
11048 u32 rx_en, u32 tx_en)
11049 {
11050 struct hclge_vport *vport = hclge_get_vport(handle);
11051 struct hclge_dev *hdev = vport->back;
11052 struct phy_device *phydev = hdev->hw.mac.phydev;
11053 u32 fc_autoneg;
11054
11055 if (phydev || hnae3_dev_phy_imp_supported(hdev)) {
11056 fc_autoneg = hclge_get_autoneg(handle);
11057 if (auto_neg != fc_autoneg) {
11058 dev_info(&hdev->pdev->dev,
11059 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
11060 return -EOPNOTSUPP;
11061 }
11062 }
11063
11064 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
11065 dev_info(&hdev->pdev->dev,
11066 "Priority flow control enabled. Cannot set link flow control.\n");
11067 return -EOPNOTSUPP;
11068 }
11069
11070 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
11071
11072 hclge_record_user_pauseparam(hdev, rx_en, tx_en);
11073
11074 if (!auto_neg || hnae3_dev_phy_imp_supported(hdev))
11075 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
11076
11077 if (phydev)
11078 return phy_start_aneg(phydev);
11079
11080 return -EOPNOTSUPP;
11081 }
11082
hclge_get_ksettings_an_result(struct hnae3_handle * handle,u8 * auto_neg,u32 * speed,u8 * duplex,u32 * lane_num)11083 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
11084 u8 *auto_neg, u32 *speed, u8 *duplex, u32 *lane_num)
11085 {
11086 struct hclge_vport *vport = hclge_get_vport(handle);
11087 struct hclge_dev *hdev = vport->back;
11088
11089 if (speed)
11090 *speed = hdev->hw.mac.speed;
11091 if (duplex)
11092 *duplex = hdev->hw.mac.duplex;
11093 if (auto_neg)
11094 *auto_neg = hdev->hw.mac.autoneg;
11095 if (lane_num)
11096 *lane_num = hdev->hw.mac.lane_num;
11097 }
11098
hclge_get_media_type(struct hnae3_handle * handle,u8 * media_type,u8 * module_type)11099 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
11100 u8 *module_type)
11101 {
11102 struct hclge_vport *vport = hclge_get_vport(handle);
11103 struct hclge_dev *hdev = vport->back;
11104
11105 /* When nic is down, the service task is not running, doesn't update
11106 * the port information per second. Query the port information before
11107 * return the media type, ensure getting the correct media information.
11108 */
11109 hclge_update_port_info(hdev);
11110
11111 if (media_type)
11112 *media_type = hdev->hw.mac.media_type;
11113
11114 if (module_type)
11115 *module_type = hdev->hw.mac.module_type;
11116 }
11117
hclge_get_mdix_mode(struct hnae3_handle * handle,u8 * tp_mdix_ctrl,u8 * tp_mdix)11118 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
11119 u8 *tp_mdix_ctrl, u8 *tp_mdix)
11120 {
11121 struct hclge_vport *vport = hclge_get_vport(handle);
11122 struct hclge_dev *hdev = vport->back;
11123 struct phy_device *phydev = hdev->hw.mac.phydev;
11124 int mdix_ctrl, mdix, is_resolved;
11125 unsigned int retval;
11126
11127 if (!phydev) {
11128 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
11129 *tp_mdix = ETH_TP_MDI_INVALID;
11130 return;
11131 }
11132
11133 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
11134
11135 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
11136 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
11137 HCLGE_PHY_MDIX_CTRL_S);
11138
11139 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
11140 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
11141 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
11142
11143 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
11144
11145 switch (mdix_ctrl) {
11146 case 0x0:
11147 *tp_mdix_ctrl = ETH_TP_MDI;
11148 break;
11149 case 0x1:
11150 *tp_mdix_ctrl = ETH_TP_MDI_X;
11151 break;
11152 case 0x3:
11153 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
11154 break;
11155 default:
11156 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
11157 break;
11158 }
11159
11160 if (!is_resolved)
11161 *tp_mdix = ETH_TP_MDI_INVALID;
11162 else if (mdix)
11163 *tp_mdix = ETH_TP_MDI_X;
11164 else
11165 *tp_mdix = ETH_TP_MDI;
11166 }
11167
hclge_info_show(struct hclge_dev * hdev)11168 static void hclge_info_show(struct hclge_dev *hdev)
11169 {
11170 struct hnae3_handle *handle = &hdev->vport->nic;
11171 struct device *dev = &hdev->pdev->dev;
11172
11173 dev_info(dev, "PF info begin:\n");
11174
11175 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
11176 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
11177 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
11178 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
11179 dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
11180 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
11181 dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
11182 dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
11183 dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
11184 dev_info(dev, "This is %s PF\n",
11185 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
11186 dev_info(dev, "DCB %s\n",
11187 handle->kinfo.tc_info.dcb_ets_active ? "enable" : "disable");
11188 dev_info(dev, "MQPRIO %s\n",
11189 handle->kinfo.tc_info.mqprio_active ? "enable" : "disable");
11190 dev_info(dev, "Default tx spare buffer size: %u\n",
11191 hdev->tx_spare_buf_size);
11192
11193 dev_info(dev, "PF info end.\n");
11194 }
11195
hclge_init_nic_client_instance(struct hnae3_ae_dev * ae_dev,struct hclge_vport * vport)11196 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
11197 struct hclge_vport *vport)
11198 {
11199 struct hnae3_client *client = vport->nic.client;
11200 struct hclge_dev *hdev = ae_dev->priv;
11201 int rst_cnt = hdev->rst_stats.reset_cnt;
11202 int ret;
11203
11204 ret = client->ops->init_instance(&vport->nic);
11205 if (ret)
11206 return ret;
11207
11208 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11209 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11210 rst_cnt != hdev->rst_stats.reset_cnt) {
11211 ret = -EBUSY;
11212 goto init_nic_err;
11213 }
11214
11215 /* Enable nic hw error interrupts */
11216 ret = hclge_config_nic_hw_error(hdev, true);
11217 if (ret) {
11218 dev_err(&ae_dev->pdev->dev,
11219 "fail(%d) to enable hw error interrupts\n", ret);
11220 goto init_nic_err;
11221 }
11222
11223 hnae3_set_client_init_flag(client, ae_dev, 1);
11224
11225 if (netif_msg_drv(&hdev->vport->nic))
11226 hclge_info_show(hdev);
11227
11228 return ret;
11229
11230 init_nic_err:
11231 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11232 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11233 msleep(HCLGE_WAIT_RESET_DONE);
11234
11235 client->ops->uninit_instance(&vport->nic, 0);
11236
11237 return ret;
11238 }
11239
hclge_init_roce_client_instance(struct hnae3_ae_dev * ae_dev,struct hclge_vport * vport)11240 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
11241 struct hclge_vport *vport)
11242 {
11243 struct hclge_dev *hdev = ae_dev->priv;
11244 struct hnae3_client *client;
11245 int rst_cnt;
11246 int ret;
11247
11248 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
11249 !hdev->nic_client)
11250 return 0;
11251
11252 client = hdev->roce_client;
11253 ret = hclge_init_roce_base_info(vport);
11254 if (ret)
11255 return ret;
11256
11257 rst_cnt = hdev->rst_stats.reset_cnt;
11258 ret = client->ops->init_instance(&vport->roce);
11259 if (ret)
11260 return ret;
11261
11262 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11263 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11264 rst_cnt != hdev->rst_stats.reset_cnt) {
11265 ret = -EBUSY;
11266 goto init_roce_err;
11267 }
11268
11269 /* Enable roce ras interrupts */
11270 ret = hclge_config_rocee_ras_interrupt(hdev, true);
11271 if (ret) {
11272 dev_err(&ae_dev->pdev->dev,
11273 "fail(%d) to enable roce ras interrupts\n", ret);
11274 goto init_roce_err;
11275 }
11276
11277 hnae3_set_client_init_flag(client, ae_dev, 1);
11278
11279 return 0;
11280
11281 init_roce_err:
11282 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11283 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11284 msleep(HCLGE_WAIT_RESET_DONE);
11285
11286 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11287
11288 return ret;
11289 }
11290
hclge_init_client_instance(struct hnae3_client * client,struct hnae3_ae_dev * ae_dev)11291 static int hclge_init_client_instance(struct hnae3_client *client,
11292 struct hnae3_ae_dev *ae_dev)
11293 {
11294 struct hclge_dev *hdev = ae_dev->priv;
11295 struct hclge_vport *vport = &hdev->vport[0];
11296 int ret;
11297
11298 switch (client->type) {
11299 case HNAE3_CLIENT_KNIC:
11300 hdev->nic_client = client;
11301 vport->nic.client = client;
11302 ret = hclge_init_nic_client_instance(ae_dev, vport);
11303 if (ret)
11304 goto clear_nic;
11305
11306 ret = hclge_init_roce_client_instance(ae_dev, vport);
11307 if (ret)
11308 goto clear_roce;
11309
11310 break;
11311 case HNAE3_CLIENT_ROCE:
11312 if (hnae3_dev_roce_supported(hdev)) {
11313 hdev->roce_client = client;
11314 vport->roce.client = client;
11315 }
11316
11317 ret = hclge_init_roce_client_instance(ae_dev, vport);
11318 if (ret)
11319 goto clear_roce;
11320
11321 break;
11322 default:
11323 return -EINVAL;
11324 }
11325
11326 return 0;
11327
11328 clear_nic:
11329 hdev->nic_client = NULL;
11330 vport->nic.client = NULL;
11331 return ret;
11332 clear_roce:
11333 hdev->roce_client = NULL;
11334 vport->roce.client = NULL;
11335 return ret;
11336 }
11337
hclge_uninit_need_wait(struct hclge_dev * hdev)11338 static bool hclge_uninit_need_wait(struct hclge_dev *hdev)
11339 {
11340 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11341 test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
11342 }
11343
hclge_uninit_client_instance(struct hnae3_client * client,struct hnae3_ae_dev * ae_dev)11344 static void hclge_uninit_client_instance(struct hnae3_client *client,
11345 struct hnae3_ae_dev *ae_dev)
11346 {
11347 struct hclge_dev *hdev = ae_dev->priv;
11348 struct hclge_vport *vport = &hdev->vport[0];
11349
11350 if (hdev->roce_client) {
11351 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11352 while (hclge_uninit_need_wait(hdev))
11353 msleep(HCLGE_WAIT_RESET_DONE);
11354
11355 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11356 hdev->roce_client = NULL;
11357 vport->roce.client = NULL;
11358 }
11359 if (client->type == HNAE3_CLIENT_ROCE)
11360 return;
11361 if (hdev->nic_client && client->ops->uninit_instance) {
11362 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11363 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11364 msleep(HCLGE_WAIT_RESET_DONE);
11365
11366 client->ops->uninit_instance(&vport->nic, 0);
11367 hdev->nic_client = NULL;
11368 vport->nic.client = NULL;
11369 }
11370 }
11371
hclge_dev_mem_map(struct hclge_dev * hdev)11372 static int hclge_dev_mem_map(struct hclge_dev *hdev)
11373 {
11374 struct pci_dev *pdev = hdev->pdev;
11375 struct hclge_hw *hw = &hdev->hw;
11376
11377 /* for device does not have device memory, return directly */
11378 if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
11379 return 0;
11380
11381 hw->hw.mem_base =
11382 devm_ioremap_wc(&pdev->dev,
11383 pci_resource_start(pdev, HCLGE_MEM_BAR),
11384 pci_resource_len(pdev, HCLGE_MEM_BAR));
11385 if (!hw->hw.mem_base) {
11386 dev_err(&pdev->dev, "failed to map device memory\n");
11387 return -EFAULT;
11388 }
11389
11390 return 0;
11391 }
11392
hclge_pci_init(struct hclge_dev * hdev)11393 static int hclge_pci_init(struct hclge_dev *hdev)
11394 {
11395 struct pci_dev *pdev = hdev->pdev;
11396 struct hclge_hw *hw;
11397 int ret;
11398
11399 ret = pci_enable_device(pdev);
11400 if (ret) {
11401 dev_err(&pdev->dev, "failed to enable PCI device\n");
11402 return ret;
11403 }
11404
11405 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
11406 if (ret) {
11407 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
11408 if (ret) {
11409 dev_err(&pdev->dev,
11410 "can't set consistent PCI DMA");
11411 goto err_disable_device;
11412 }
11413 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
11414 }
11415
11416 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
11417 if (ret) {
11418 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
11419 goto err_disable_device;
11420 }
11421
11422 pci_set_master(pdev);
11423 hw = &hdev->hw;
11424 hw->hw.io_base = pcim_iomap(pdev, 2, 0);
11425 if (!hw->hw.io_base) {
11426 dev_err(&pdev->dev, "Can't map configuration register space\n");
11427 ret = -ENOMEM;
11428 goto err_release_regions;
11429 }
11430
11431 ret = hclge_dev_mem_map(hdev);
11432 if (ret)
11433 goto err_unmap_io_base;
11434
11435 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
11436
11437 return 0;
11438
11439 err_unmap_io_base:
11440 pcim_iounmap(pdev, hdev->hw.hw.io_base);
11441 err_release_regions:
11442 pci_release_regions(pdev);
11443 err_disable_device:
11444 pci_disable_device(pdev);
11445
11446 return ret;
11447 }
11448
hclge_pci_uninit(struct hclge_dev * hdev)11449 static void hclge_pci_uninit(struct hclge_dev *hdev)
11450 {
11451 struct pci_dev *pdev = hdev->pdev;
11452
11453 if (hdev->hw.hw.mem_base)
11454 devm_iounmap(&pdev->dev, hdev->hw.hw.mem_base);
11455
11456 pcim_iounmap(pdev, hdev->hw.hw.io_base);
11457 pci_free_irq_vectors(pdev);
11458 pci_release_regions(pdev);
11459 pci_disable_device(pdev);
11460 }
11461
hclge_state_init(struct hclge_dev * hdev)11462 static void hclge_state_init(struct hclge_dev *hdev)
11463 {
11464 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
11465 set_bit(HCLGE_STATE_DOWN, &hdev->state);
11466 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
11467 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11468 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
11469 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
11470 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
11471 }
11472
hclge_state_uninit(struct hclge_dev * hdev)11473 static void hclge_state_uninit(struct hclge_dev *hdev)
11474 {
11475 set_bit(HCLGE_STATE_DOWN, &hdev->state);
11476 set_bit(HCLGE_STATE_REMOVING, &hdev->state);
11477
11478 if (hdev->reset_timer.function)
11479 del_timer_sync(&hdev->reset_timer);
11480 if (hdev->service_task.work.func)
11481 cancel_delayed_work_sync(&hdev->service_task);
11482 }
11483
hclge_reset_prepare_general(struct hnae3_ae_dev * ae_dev,enum hnae3_reset_type rst_type)11484 static void hclge_reset_prepare_general(struct hnae3_ae_dev *ae_dev,
11485 enum hnae3_reset_type rst_type)
11486 {
11487 #define HCLGE_RESET_RETRY_WAIT_MS 500
11488 #define HCLGE_RESET_RETRY_CNT 5
11489
11490 struct hclge_dev *hdev = ae_dev->priv;
11491 int retry_cnt = 0;
11492 int ret;
11493
11494 while (retry_cnt++ < HCLGE_RESET_RETRY_CNT) {
11495 down(&hdev->reset_sem);
11496 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11497 hdev->reset_type = rst_type;
11498 ret = hclge_reset_prepare(hdev);
11499 if (!ret && !hdev->reset_pending)
11500 break;
11501
11502 dev_err(&hdev->pdev->dev,
11503 "failed to prepare to reset, ret=%d, reset_pending:0x%lx, retry_cnt:%d\n",
11504 ret, hdev->reset_pending, retry_cnt);
11505 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11506 up(&hdev->reset_sem);
11507 msleep(HCLGE_RESET_RETRY_WAIT_MS);
11508 }
11509
11510 /* disable misc vector before reset done */
11511 hclge_enable_vector(&hdev->misc_vector, false);
11512 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
11513
11514 if (hdev->reset_type == HNAE3_FLR_RESET)
11515 hdev->rst_stats.flr_rst_cnt++;
11516 }
11517
hclge_reset_done(struct hnae3_ae_dev * ae_dev)11518 static void hclge_reset_done(struct hnae3_ae_dev *ae_dev)
11519 {
11520 struct hclge_dev *hdev = ae_dev->priv;
11521 int ret;
11522
11523 hclge_enable_vector(&hdev->misc_vector, true);
11524
11525 ret = hclge_reset_rebuild(hdev);
11526 if (ret)
11527 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
11528
11529 hdev->reset_type = HNAE3_NONE_RESET;
11530 if (test_and_clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11531 up(&hdev->reset_sem);
11532 }
11533
hclge_clear_resetting_state(struct hclge_dev * hdev)11534 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
11535 {
11536 u16 i;
11537
11538 for (i = 0; i < hdev->num_alloc_vport; i++) {
11539 struct hclge_vport *vport = &hdev->vport[i];
11540 int ret;
11541
11542 /* Send cmd to clear vport's FUNC_RST_ING */
11543 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
11544 if (ret)
11545 dev_warn(&hdev->pdev->dev,
11546 "clear vport(%u) rst failed %d!\n",
11547 vport->vport_id, ret);
11548 }
11549 }
11550
hclge_clear_hw_resource(struct hclge_dev * hdev)11551 static int hclge_clear_hw_resource(struct hclge_dev *hdev)
11552 {
11553 struct hclge_desc desc;
11554 int ret;
11555
11556 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_HW_RESOURCE, false);
11557
11558 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11559 /* This new command is only supported by new firmware, it will
11560 * fail with older firmware. Error value -EOPNOSUPP can only be
11561 * returned by older firmware running this command, to keep code
11562 * backward compatible we will override this value and return
11563 * success.
11564 */
11565 if (ret && ret != -EOPNOTSUPP) {
11566 dev_err(&hdev->pdev->dev,
11567 "failed to clear hw resource, ret = %d\n", ret);
11568 return ret;
11569 }
11570 return 0;
11571 }
11572
hclge_init_rxd_adv_layout(struct hclge_dev * hdev)11573 static void hclge_init_rxd_adv_layout(struct hclge_dev *hdev)
11574 {
11575 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11576 hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 1);
11577 }
11578
hclge_uninit_rxd_adv_layout(struct hclge_dev * hdev)11579 static void hclge_uninit_rxd_adv_layout(struct hclge_dev *hdev)
11580 {
11581 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11582 hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 0);
11583 }
11584
hclge_get_wol_info(struct hnae3_handle * handle)11585 static struct hclge_wol_info *hclge_get_wol_info(struct hnae3_handle *handle)
11586 {
11587 struct hclge_vport *vport = hclge_get_vport(handle);
11588
11589 return &vport->back->hw.mac.wol;
11590 }
11591
hclge_get_wol_supported_mode(struct hclge_dev * hdev,u32 * wol_supported)11592 static int hclge_get_wol_supported_mode(struct hclge_dev *hdev,
11593 u32 *wol_supported)
11594 {
11595 struct hclge_query_wol_supported_cmd *wol_supported_cmd;
11596 struct hclge_desc desc;
11597 int ret;
11598
11599 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_WOL_GET_SUPPORTED_MODE,
11600 true);
11601 wol_supported_cmd = (struct hclge_query_wol_supported_cmd *)desc.data;
11602
11603 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11604 if (ret) {
11605 dev_err(&hdev->pdev->dev,
11606 "failed to query wol supported, ret = %d\n", ret);
11607 return ret;
11608 }
11609
11610 *wol_supported = le32_to_cpu(wol_supported_cmd->supported_wake_mode);
11611
11612 return 0;
11613 }
11614
hclge_set_wol_cfg(struct hclge_dev * hdev,struct hclge_wol_info * wol_info)11615 static int hclge_set_wol_cfg(struct hclge_dev *hdev,
11616 struct hclge_wol_info *wol_info)
11617 {
11618 struct hclge_wol_cfg_cmd *wol_cfg_cmd;
11619 struct hclge_desc desc;
11620 int ret;
11621
11622 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_WOL_CFG, false);
11623 wol_cfg_cmd = (struct hclge_wol_cfg_cmd *)desc.data;
11624 wol_cfg_cmd->wake_on_lan_mode = cpu_to_le32(wol_info->wol_current_mode);
11625 wol_cfg_cmd->sopass_size = wol_info->wol_sopass_size;
11626 memcpy(wol_cfg_cmd->sopass, wol_info->wol_sopass, SOPASS_MAX);
11627
11628 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11629 if (ret)
11630 dev_err(&hdev->pdev->dev,
11631 "failed to set wol config, ret = %d\n", ret);
11632
11633 return ret;
11634 }
11635
hclge_update_wol(struct hclge_dev * hdev)11636 static int hclge_update_wol(struct hclge_dev *hdev)
11637 {
11638 struct hclge_wol_info *wol_info = &hdev->hw.mac.wol;
11639
11640 if (!hnae3_ae_dev_wol_supported(hdev->ae_dev))
11641 return 0;
11642
11643 return hclge_set_wol_cfg(hdev, wol_info);
11644 }
11645
hclge_init_wol(struct hclge_dev * hdev)11646 static int hclge_init_wol(struct hclge_dev *hdev)
11647 {
11648 struct hclge_wol_info *wol_info = &hdev->hw.mac.wol;
11649 int ret;
11650
11651 if (!hnae3_ae_dev_wol_supported(hdev->ae_dev))
11652 return 0;
11653
11654 memset(wol_info, 0, sizeof(struct hclge_wol_info));
11655 ret = hclge_get_wol_supported_mode(hdev,
11656 &wol_info->wol_support_mode);
11657 if (ret) {
11658 wol_info->wol_support_mode = 0;
11659 return ret;
11660 }
11661
11662 return hclge_update_wol(hdev);
11663 }
11664
hclge_get_wol(struct hnae3_handle * handle,struct ethtool_wolinfo * wol)11665 static void hclge_get_wol(struct hnae3_handle *handle,
11666 struct ethtool_wolinfo *wol)
11667 {
11668 struct hclge_wol_info *wol_info = hclge_get_wol_info(handle);
11669
11670 wol->supported = wol_info->wol_support_mode;
11671 wol->wolopts = wol_info->wol_current_mode;
11672 if (wol_info->wol_current_mode & WAKE_MAGICSECURE)
11673 memcpy(wol->sopass, wol_info->wol_sopass, SOPASS_MAX);
11674 }
11675
hclge_set_wol(struct hnae3_handle * handle,struct ethtool_wolinfo * wol)11676 static int hclge_set_wol(struct hnae3_handle *handle,
11677 struct ethtool_wolinfo *wol)
11678 {
11679 struct hclge_wol_info *wol_info = hclge_get_wol_info(handle);
11680 struct hclge_vport *vport = hclge_get_vport(handle);
11681 u32 wol_mode;
11682 int ret;
11683
11684 wol_mode = wol->wolopts;
11685 if (wol_mode & ~wol_info->wol_support_mode)
11686 return -EINVAL;
11687
11688 wol_info->wol_current_mode = wol_mode;
11689 if (wol_mode & WAKE_MAGICSECURE) {
11690 memcpy(wol_info->wol_sopass, wol->sopass, SOPASS_MAX);
11691 wol_info->wol_sopass_size = SOPASS_MAX;
11692 } else {
11693 wol_info->wol_sopass_size = 0;
11694 }
11695
11696 ret = hclge_set_wol_cfg(vport->back, wol_info);
11697 if (ret)
11698 wol_info->wol_current_mode = 0;
11699
11700 return ret;
11701 }
11702
hclge_init_ae_dev(struct hnae3_ae_dev * ae_dev)11703 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
11704 {
11705 struct pci_dev *pdev = ae_dev->pdev;
11706 struct hclge_dev *hdev;
11707 int ret;
11708
11709 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
11710 if (!hdev)
11711 return -ENOMEM;
11712
11713 hdev->pdev = pdev;
11714 hdev->ae_dev = ae_dev;
11715 hdev->reset_type = HNAE3_NONE_RESET;
11716 hdev->reset_level = HNAE3_FUNC_RESET;
11717 ae_dev->priv = hdev;
11718
11719 /* HW supprt 2 layer vlan */
11720 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
11721
11722 mutex_init(&hdev->vport_lock);
11723 spin_lock_init(&hdev->fd_rule_lock);
11724 sema_init(&hdev->reset_sem, 1);
11725
11726 ret = hclge_pci_init(hdev);
11727 if (ret)
11728 goto out;
11729
11730 /* Firmware command queue initialize */
11731 ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw);
11732 if (ret)
11733 goto err_pci_uninit;
11734
11735 /* Firmware command initialize */
11736 hclge_comm_cmd_init_ops(&hdev->hw.hw, &hclge_cmq_ops);
11737 ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version,
11738 true, hdev->reset_pending);
11739 if (ret)
11740 goto err_cmd_uninit;
11741
11742 ret = hclge_clear_hw_resource(hdev);
11743 if (ret)
11744 goto err_cmd_uninit;
11745
11746 ret = hclge_get_cap(hdev);
11747 if (ret)
11748 goto err_cmd_uninit;
11749
11750 ret = hclge_query_dev_specs(hdev);
11751 if (ret) {
11752 dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
11753 ret);
11754 goto err_cmd_uninit;
11755 }
11756
11757 ret = hclge_configure(hdev);
11758 if (ret) {
11759 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
11760 goto err_cmd_uninit;
11761 }
11762
11763 ret = hclge_init_msi(hdev);
11764 if (ret) {
11765 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
11766 goto err_cmd_uninit;
11767 }
11768
11769 ret = hclge_misc_irq_init(hdev);
11770 if (ret)
11771 goto err_msi_uninit;
11772
11773 ret = hclge_alloc_tqps(hdev);
11774 if (ret) {
11775 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
11776 goto err_msi_irq_uninit;
11777 }
11778
11779 ret = hclge_alloc_vport(hdev);
11780 if (ret)
11781 goto err_msi_irq_uninit;
11782
11783 ret = hclge_map_tqp(hdev);
11784 if (ret)
11785 goto err_msi_irq_uninit;
11786
11787 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
11788 clear_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps);
11789 if (hnae3_dev_phy_imp_supported(hdev))
11790 ret = hclge_update_tp_port_info(hdev);
11791 else
11792 ret = hclge_mac_mdio_config(hdev);
11793
11794 if (ret)
11795 goto err_msi_irq_uninit;
11796 }
11797
11798 ret = hclge_init_umv_space(hdev);
11799 if (ret)
11800 goto err_mdiobus_unreg;
11801
11802 ret = hclge_mac_init(hdev);
11803 if (ret) {
11804 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11805 goto err_mdiobus_unreg;
11806 }
11807
11808 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11809 if (ret) {
11810 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11811 goto err_mdiobus_unreg;
11812 }
11813
11814 ret = hclge_config_gro(hdev);
11815 if (ret)
11816 goto err_mdiobus_unreg;
11817
11818 ret = hclge_init_vlan_config(hdev);
11819 if (ret) {
11820 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11821 goto err_mdiobus_unreg;
11822 }
11823
11824 ret = hclge_tm_schd_init(hdev);
11825 if (ret) {
11826 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
11827 goto err_mdiobus_unreg;
11828 }
11829
11830 ret = hclge_comm_rss_init_cfg(&hdev->vport->nic, hdev->ae_dev,
11831 &hdev->rss_cfg);
11832 if (ret) {
11833 dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
11834 goto err_mdiobus_unreg;
11835 }
11836
11837 ret = hclge_rss_init_hw(hdev);
11838 if (ret) {
11839 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11840 goto err_mdiobus_unreg;
11841 }
11842
11843 ret = init_mgr_tbl(hdev);
11844 if (ret) {
11845 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
11846 goto err_mdiobus_unreg;
11847 }
11848
11849 ret = hclge_init_fd_config(hdev);
11850 if (ret) {
11851 dev_err(&pdev->dev,
11852 "fd table init fail, ret=%d\n", ret);
11853 goto err_mdiobus_unreg;
11854 }
11855
11856 ret = hclge_ptp_init(hdev);
11857 if (ret)
11858 goto err_mdiobus_unreg;
11859
11860 ret = hclge_update_port_info(hdev);
11861 if (ret)
11862 goto err_ptp_uninit;
11863
11864 INIT_KFIFO(hdev->mac_tnl_log);
11865
11866 hclge_dcb_ops_set(hdev);
11867
11868 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
11869 INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
11870
11871 hclge_clear_all_event_cause(hdev);
11872 hclge_clear_resetting_state(hdev);
11873
11874 /* Log and clear the hw errors those already occurred */
11875 if (hnae3_dev_ras_imp_supported(hdev))
11876 hclge_handle_occurred_error(hdev);
11877 else
11878 hclge_handle_all_hns_hw_errors(ae_dev);
11879
11880 /* request delayed reset for the error recovery because an immediate
11881 * global reset on a PF affecting pending initialization of other PFs
11882 */
11883 if (ae_dev->hw_err_reset_req) {
11884 enum hnae3_reset_type reset_level;
11885
11886 reset_level = hclge_get_reset_level(ae_dev,
11887 &ae_dev->hw_err_reset_req);
11888 hclge_set_def_reset_request(ae_dev, reset_level);
11889 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
11890 }
11891
11892 hclge_init_rxd_adv_layout(hdev);
11893
11894 /* Enable MISC vector(vector0) */
11895 hclge_enable_vector(&hdev->misc_vector, true);
11896
11897 ret = hclge_init_wol(hdev);
11898 if (ret)
11899 dev_warn(&pdev->dev,
11900 "failed to wake on lan init, ret = %d\n", ret);
11901
11902 ret = hclge_devlink_init(hdev);
11903 if (ret)
11904 goto err_ptp_uninit;
11905
11906 hclge_state_init(hdev);
11907 hdev->last_reset_time = jiffies;
11908
11909 dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
11910 HCLGE_DRIVER_NAME);
11911
11912 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
11913 return 0;
11914
11915 err_ptp_uninit:
11916 hclge_ptp_uninit(hdev);
11917 err_mdiobus_unreg:
11918 if (hdev->hw.mac.phydev)
11919 mdiobus_unregister(hdev->hw.mac.mdio_bus);
11920 err_msi_irq_uninit:
11921 hclge_misc_irq_uninit(hdev);
11922 err_msi_uninit:
11923 pci_free_irq_vectors(pdev);
11924 err_cmd_uninit:
11925 hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
11926 err_pci_uninit:
11927 pcim_iounmap(pdev, hdev->hw.hw.io_base);
11928 pci_release_regions(pdev);
11929 pci_disable_device(pdev);
11930 out:
11931 mutex_destroy(&hdev->vport_lock);
11932 return ret;
11933 }
11934
hclge_stats_clear(struct hclge_dev * hdev)11935 static void hclge_stats_clear(struct hclge_dev *hdev)
11936 {
11937 memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
11938 memset(&hdev->fec_stats, 0, sizeof(hdev->fec_stats));
11939 }
11940
hclge_set_mac_spoofchk(struct hclge_dev * hdev,int vf,bool enable)11941 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11942 {
11943 return hclge_config_switch_param(hdev, vf, enable,
11944 HCLGE_SWITCH_ANTI_SPOOF_MASK);
11945 }
11946
hclge_set_vlan_spoofchk(struct hclge_dev * hdev,int vf,bool enable)11947 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11948 {
11949 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
11950 HCLGE_FILTER_FE_NIC_INGRESS_B,
11951 enable, vf);
11952 }
11953
hclge_set_vf_spoofchk_hw(struct hclge_dev * hdev,int vf,bool enable)11954 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
11955 {
11956 int ret;
11957
11958 ret = hclge_set_mac_spoofchk(hdev, vf, enable);
11959 if (ret) {
11960 dev_err(&hdev->pdev->dev,
11961 "Set vf %d mac spoof check %s failed, ret=%d\n",
11962 vf, enable ? "on" : "off", ret);
11963 return ret;
11964 }
11965
11966 ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
11967 if (ret)
11968 dev_err(&hdev->pdev->dev,
11969 "Set vf %d vlan spoof check %s failed, ret=%d\n",
11970 vf, enable ? "on" : "off", ret);
11971
11972 return ret;
11973 }
11974
hclge_set_vf_spoofchk(struct hnae3_handle * handle,int vf,bool enable)11975 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
11976 bool enable)
11977 {
11978 struct hclge_vport *vport = hclge_get_vport(handle);
11979 struct hclge_dev *hdev = vport->back;
11980 u32 new_spoofchk = enable ? 1 : 0;
11981 int ret;
11982
11983 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11984 return -EOPNOTSUPP;
11985
11986 vport = hclge_get_vf_vport(hdev, vf);
11987 if (!vport)
11988 return -EINVAL;
11989
11990 if (vport->vf_info.spoofchk == new_spoofchk)
11991 return 0;
11992
11993 if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
11994 dev_warn(&hdev->pdev->dev,
11995 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
11996 vf);
11997 else if (enable && hclge_is_umv_space_full(vport, true))
11998 dev_warn(&hdev->pdev->dev,
11999 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
12000 vf);
12001
12002 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
12003 if (ret)
12004 return ret;
12005
12006 vport->vf_info.spoofchk = new_spoofchk;
12007 return 0;
12008 }
12009
hclge_reset_vport_spoofchk(struct hclge_dev * hdev)12010 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
12011 {
12012 struct hclge_vport *vport = hdev->vport;
12013 int ret;
12014 int i;
12015
12016 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
12017 return 0;
12018
12019 /* resume the vf spoof check state after reset */
12020 for (i = 0; i < hdev->num_alloc_vport; i++) {
12021 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
12022 vport->vf_info.spoofchk);
12023 if (ret)
12024 return ret;
12025
12026 vport++;
12027 }
12028
12029 return 0;
12030 }
12031
hclge_set_vf_trust(struct hnae3_handle * handle,int vf,bool enable)12032 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
12033 {
12034 struct hclge_vport *vport = hclge_get_vport(handle);
12035 struct hclge_dev *hdev = vport->back;
12036 u32 new_trusted = enable ? 1 : 0;
12037
12038 vport = hclge_get_vf_vport(hdev, vf);
12039 if (!vport)
12040 return -EINVAL;
12041
12042 if (vport->vf_info.trusted == new_trusted)
12043 return 0;
12044
12045 vport->vf_info.trusted = new_trusted;
12046 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
12047 hclge_task_schedule(hdev, 0);
12048
12049 return 0;
12050 }
12051
hclge_reset_vf_rate(struct hclge_dev * hdev)12052 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
12053 {
12054 int ret;
12055 int vf;
12056
12057 /* reset vf rate to default value */
12058 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
12059 struct hclge_vport *vport = &hdev->vport[vf];
12060
12061 vport->vf_info.max_tx_rate = 0;
12062 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
12063 if (ret)
12064 dev_err(&hdev->pdev->dev,
12065 "vf%d failed to reset to default, ret=%d\n",
12066 vf - HCLGE_VF_VPORT_START_NUM, ret);
12067 }
12068 }
12069
hclge_vf_rate_param_check(struct hclge_dev * hdev,int min_tx_rate,int max_tx_rate)12070 static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
12071 int min_tx_rate, int max_tx_rate)
12072 {
12073 if (min_tx_rate != 0 ||
12074 max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
12075 dev_err(&hdev->pdev->dev,
12076 "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
12077 min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
12078 return -EINVAL;
12079 }
12080
12081 return 0;
12082 }
12083
hclge_set_vf_rate(struct hnae3_handle * handle,int vf,int min_tx_rate,int max_tx_rate,bool force)12084 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
12085 int min_tx_rate, int max_tx_rate, bool force)
12086 {
12087 struct hclge_vport *vport = hclge_get_vport(handle);
12088 struct hclge_dev *hdev = vport->back;
12089 int ret;
12090
12091 ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate);
12092 if (ret)
12093 return ret;
12094
12095 vport = hclge_get_vf_vport(hdev, vf);
12096 if (!vport)
12097 return -EINVAL;
12098
12099 if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
12100 return 0;
12101
12102 ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
12103 if (ret)
12104 return ret;
12105
12106 vport->vf_info.max_tx_rate = max_tx_rate;
12107
12108 return 0;
12109 }
12110
hclge_resume_vf_rate(struct hclge_dev * hdev)12111 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
12112 {
12113 struct hnae3_handle *handle = &hdev->vport->nic;
12114 struct hclge_vport *vport;
12115 int ret;
12116 int vf;
12117
12118 /* resume the vf max_tx_rate after reset */
12119 for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
12120 vport = hclge_get_vf_vport(hdev, vf);
12121 if (!vport)
12122 return -EINVAL;
12123
12124 /* zero means max rate, after reset, firmware already set it to
12125 * max rate, so just continue.
12126 */
12127 if (!vport->vf_info.max_tx_rate)
12128 continue;
12129
12130 ret = hclge_set_vf_rate(handle, vf, 0,
12131 vport->vf_info.max_tx_rate, true);
12132 if (ret) {
12133 dev_err(&hdev->pdev->dev,
12134 "vf%d failed to resume tx_rate:%u, ret=%d\n",
12135 vf, vport->vf_info.max_tx_rate, ret);
12136 return ret;
12137 }
12138 }
12139
12140 return 0;
12141 }
12142
hclge_reset_vport_state(struct hclge_dev * hdev)12143 static void hclge_reset_vport_state(struct hclge_dev *hdev)
12144 {
12145 struct hclge_vport *vport = hdev->vport;
12146 int i;
12147
12148 for (i = 0; i < hdev->num_alloc_vport; i++) {
12149 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
12150 vport++;
12151 }
12152 }
12153
hclge_reset_ae_dev(struct hnae3_ae_dev * ae_dev)12154 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
12155 {
12156 struct hclge_dev *hdev = ae_dev->priv;
12157 struct pci_dev *pdev = ae_dev->pdev;
12158 int ret;
12159
12160 set_bit(HCLGE_STATE_DOWN, &hdev->state);
12161
12162 hclge_stats_clear(hdev);
12163 /* NOTE: pf reset needn't to clear or restore pf and vf table entry.
12164 * so here should not clean table in memory.
12165 */
12166 if (hdev->reset_type == HNAE3_IMP_RESET ||
12167 hdev->reset_type == HNAE3_GLOBAL_RESET) {
12168 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
12169 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
12170 bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
12171 hclge_reset_umv_space(hdev);
12172 }
12173
12174 ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version,
12175 true, hdev->reset_pending);
12176 if (ret) {
12177 dev_err(&pdev->dev, "Cmd queue init failed\n");
12178 return ret;
12179 }
12180
12181 ret = hclge_map_tqp(hdev);
12182 if (ret) {
12183 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
12184 return ret;
12185 }
12186
12187 ret = hclge_mac_init(hdev);
12188 if (ret) {
12189 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
12190 return ret;
12191 }
12192
12193 ret = hclge_tp_port_init(hdev);
12194 if (ret) {
12195 dev_err(&pdev->dev, "failed to init tp port, ret = %d\n",
12196 ret);
12197 return ret;
12198 }
12199
12200 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
12201 if (ret) {
12202 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
12203 return ret;
12204 }
12205
12206 ret = hclge_config_gro(hdev);
12207 if (ret)
12208 return ret;
12209
12210 ret = hclge_init_vlan_config(hdev);
12211 if (ret) {
12212 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
12213 return ret;
12214 }
12215
12216 hclge_reset_tc_config(hdev);
12217
12218 ret = hclge_tm_init_hw(hdev, true);
12219 if (ret) {
12220 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
12221 return ret;
12222 }
12223
12224 ret = hclge_rss_init_hw(hdev);
12225 if (ret) {
12226 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
12227 return ret;
12228 }
12229
12230 ret = init_mgr_tbl(hdev);
12231 if (ret) {
12232 dev_err(&pdev->dev,
12233 "failed to reinit manager table, ret = %d\n", ret);
12234 return ret;
12235 }
12236
12237 ret = hclge_init_fd_config(hdev);
12238 if (ret) {
12239 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
12240 return ret;
12241 }
12242
12243 ret = hclge_ptp_init(hdev);
12244 if (ret)
12245 return ret;
12246
12247 /* Log and clear the hw errors those already occurred */
12248 if (hnae3_dev_ras_imp_supported(hdev))
12249 hclge_handle_occurred_error(hdev);
12250 else
12251 hclge_handle_all_hns_hw_errors(ae_dev);
12252
12253 /* Re-enable the hw error interrupts because
12254 * the interrupts get disabled on global reset.
12255 */
12256 ret = hclge_config_nic_hw_error(hdev, true);
12257 if (ret) {
12258 dev_err(&pdev->dev,
12259 "fail(%d) to re-enable NIC hw error interrupts\n",
12260 ret);
12261 return ret;
12262 }
12263
12264 if (hdev->roce_client) {
12265 ret = hclge_config_rocee_ras_interrupt(hdev, true);
12266 if (ret) {
12267 dev_err(&pdev->dev,
12268 "fail(%d) to re-enable roce ras interrupts\n",
12269 ret);
12270 return ret;
12271 }
12272 }
12273
12274 hclge_reset_vport_state(hdev);
12275 ret = hclge_reset_vport_spoofchk(hdev);
12276 if (ret)
12277 return ret;
12278
12279 ret = hclge_resume_vf_rate(hdev);
12280 if (ret)
12281 return ret;
12282
12283 hclge_init_rxd_adv_layout(hdev);
12284
12285 ret = hclge_update_wol(hdev);
12286 if (ret)
12287 dev_warn(&pdev->dev,
12288 "failed to update wol config, ret = %d\n", ret);
12289
12290 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
12291 HCLGE_DRIVER_NAME);
12292
12293 return 0;
12294 }
12295
hclge_uninit_ae_dev(struct hnae3_ae_dev * ae_dev)12296 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
12297 {
12298 struct hclge_dev *hdev = ae_dev->priv;
12299 struct hclge_mac *mac = &hdev->hw.mac;
12300
12301 hclge_reset_vf_rate(hdev);
12302 hclge_clear_vf_vlan(hdev);
12303 hclge_state_uninit(hdev);
12304 hclge_ptp_uninit(hdev);
12305 hclge_uninit_rxd_adv_layout(hdev);
12306 hclge_uninit_mac_table(hdev);
12307 hclge_del_all_fd_entries(hdev);
12308
12309 if (mac->phydev)
12310 mdiobus_unregister(mac->mdio_bus);
12311
12312 /* Disable MISC vector(vector0) */
12313 hclge_enable_vector(&hdev->misc_vector, false);
12314 synchronize_irq(hdev->misc_vector.vector_irq);
12315
12316 /* Disable all hw interrupts */
12317 hclge_config_mac_tnl_int(hdev, false);
12318 hclge_config_nic_hw_error(hdev, false);
12319 hclge_config_rocee_ras_interrupt(hdev, false);
12320
12321 hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
12322 hclge_misc_irq_uninit(hdev);
12323 hclge_devlink_uninit(hdev);
12324 hclge_pci_uninit(hdev);
12325 hclge_uninit_vport_vlan_table(hdev);
12326 mutex_destroy(&hdev->vport_lock);
12327 ae_dev->priv = NULL;
12328 }
12329
hclge_get_max_channels(struct hnae3_handle * handle)12330 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
12331 {
12332 struct hclge_vport *vport = hclge_get_vport(handle);
12333 struct hclge_dev *hdev = vport->back;
12334
12335 return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps);
12336 }
12337
hclge_get_channels(struct hnae3_handle * handle,struct ethtool_channels * ch)12338 static void hclge_get_channels(struct hnae3_handle *handle,
12339 struct ethtool_channels *ch)
12340 {
12341 ch->max_combined = hclge_get_max_channels(handle);
12342 ch->other_count = 1;
12343 ch->max_other = 1;
12344 ch->combined_count = handle->kinfo.rss_size;
12345 }
12346
hclge_get_tqps_and_rss_info(struct hnae3_handle * handle,u16 * alloc_tqps,u16 * max_rss_size)12347 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
12348 u16 *alloc_tqps, u16 *max_rss_size)
12349 {
12350 struct hclge_vport *vport = hclge_get_vport(handle);
12351 struct hclge_dev *hdev = vport->back;
12352
12353 *alloc_tqps = vport->alloc_tqps;
12354 *max_rss_size = hdev->pf_rss_size_max;
12355 }
12356
hclge_set_rss_tc_mode_cfg(struct hnae3_handle * handle)12357 static int hclge_set_rss_tc_mode_cfg(struct hnae3_handle *handle)
12358 {
12359 struct hclge_vport *vport = hclge_get_vport(handle);
12360 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
12361 struct hclge_dev *hdev = vport->back;
12362 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
12363 u16 tc_valid[HCLGE_MAX_TC_NUM];
12364 u16 roundup_size;
12365 unsigned int i;
12366
12367 roundup_size = roundup_pow_of_two(vport->nic.kinfo.rss_size);
12368 roundup_size = ilog2(roundup_size);
12369 /* Set the RSS TC mode according to the new RSS size */
12370 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
12371 tc_valid[i] = 0;
12372
12373 if (!(hdev->hw_tc_map & BIT(i)))
12374 continue;
12375
12376 tc_valid[i] = 1;
12377 tc_size[i] = roundup_size;
12378 tc_offset[i] = vport->nic.kinfo.rss_size * i;
12379 }
12380
12381 return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, tc_valid,
12382 tc_size);
12383 }
12384
hclge_set_channels(struct hnae3_handle * handle,u32 new_tqps_num,bool rxfh_configured)12385 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
12386 bool rxfh_configured)
12387 {
12388 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
12389 struct hclge_vport *vport = hclge_get_vport(handle);
12390 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
12391 struct hclge_dev *hdev = vport->back;
12392 u16 cur_rss_size = kinfo->rss_size;
12393 u16 cur_tqps = kinfo->num_tqps;
12394 u32 *rss_indir;
12395 unsigned int i;
12396 int ret;
12397
12398 kinfo->req_rss_size = new_tqps_num;
12399
12400 ret = hclge_tm_vport_map_update(hdev);
12401 if (ret) {
12402 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
12403 return ret;
12404 }
12405
12406 ret = hclge_set_rss_tc_mode_cfg(handle);
12407 if (ret)
12408 return ret;
12409
12410 /* RSS indirection table has been configured by user */
12411 if (rxfh_configured)
12412 goto out;
12413
12414 /* Reinitializes the rss indirect table according to the new RSS size */
12415 rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32),
12416 GFP_KERNEL);
12417 if (!rss_indir)
12418 return -ENOMEM;
12419
12420 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
12421 rss_indir[i] = i % kinfo->rss_size;
12422
12423 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
12424 if (ret)
12425 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
12426 ret);
12427
12428 kfree(rss_indir);
12429
12430 out:
12431 if (!ret)
12432 dev_info(&hdev->pdev->dev,
12433 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
12434 cur_rss_size, kinfo->rss_size,
12435 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
12436
12437 return ret;
12438 }
12439
hclge_set_led_status(struct hclge_dev * hdev,u8 locate_led_status)12440 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
12441 {
12442 struct hclge_set_led_state_cmd *req;
12443 struct hclge_desc desc;
12444 int ret;
12445
12446 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
12447
12448 req = (struct hclge_set_led_state_cmd *)desc.data;
12449 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
12450 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
12451
12452 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12453 if (ret)
12454 dev_err(&hdev->pdev->dev,
12455 "Send set led state cmd error, ret =%d\n", ret);
12456
12457 return ret;
12458 }
12459
12460 enum hclge_led_status {
12461 HCLGE_LED_OFF,
12462 HCLGE_LED_ON,
12463 HCLGE_LED_NO_CHANGE = 0xFF,
12464 };
12465
hclge_set_led_id(struct hnae3_handle * handle,enum ethtool_phys_id_state status)12466 static int hclge_set_led_id(struct hnae3_handle *handle,
12467 enum ethtool_phys_id_state status)
12468 {
12469 struct hclge_vport *vport = hclge_get_vport(handle);
12470 struct hclge_dev *hdev = vport->back;
12471
12472 switch (status) {
12473 case ETHTOOL_ID_ACTIVE:
12474 return hclge_set_led_status(hdev, HCLGE_LED_ON);
12475 case ETHTOOL_ID_INACTIVE:
12476 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
12477 default:
12478 return -EINVAL;
12479 }
12480 }
12481
hclge_get_link_mode(struct hnae3_handle * handle,unsigned long * supported,unsigned long * advertising)12482 static void hclge_get_link_mode(struct hnae3_handle *handle,
12483 unsigned long *supported,
12484 unsigned long *advertising)
12485 {
12486 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
12487 struct hclge_vport *vport = hclge_get_vport(handle);
12488 struct hclge_dev *hdev = vport->back;
12489 unsigned int idx = 0;
12490
12491 for (; idx < size; idx++) {
12492 supported[idx] = hdev->hw.mac.supported[idx];
12493 advertising[idx] = hdev->hw.mac.advertising[idx];
12494 }
12495 }
12496
hclge_gro_en(struct hnae3_handle * handle,bool enable)12497 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
12498 {
12499 struct hclge_vport *vport = hclge_get_vport(handle);
12500 struct hclge_dev *hdev = vport->back;
12501 bool gro_en_old = hdev->gro_en;
12502 int ret;
12503
12504 hdev->gro_en = enable;
12505 ret = hclge_config_gro(hdev);
12506 if (ret)
12507 hdev->gro_en = gro_en_old;
12508
12509 return ret;
12510 }
12511
hclge_sync_vport_promisc_mode(struct hclge_vport * vport)12512 static int hclge_sync_vport_promisc_mode(struct hclge_vport *vport)
12513 {
12514 struct hnae3_handle *handle = &vport->nic;
12515 struct hclge_dev *hdev = vport->back;
12516 bool uc_en = false;
12517 bool mc_en = false;
12518 u8 tmp_flags;
12519 bool bc_en;
12520 int ret;
12521
12522 if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
12523 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
12524 vport->last_promisc_flags = vport->overflow_promisc_flags;
12525 }
12526
12527 if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12528 &vport->state))
12529 return 0;
12530
12531 /* for PF */
12532 if (!vport->vport_id) {
12533 tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
12534 ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
12535 tmp_flags & HNAE3_MPE);
12536 if (!ret)
12537 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
12538 &vport->state);
12539 else
12540 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12541 &vport->state);
12542 return ret;
12543 }
12544
12545 /* for VF */
12546 if (vport->vf_info.trusted) {
12547 uc_en = vport->vf_info.request_uc_en > 0 ||
12548 vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE;
12549 mc_en = vport->vf_info.request_mc_en > 0 ||
12550 vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE;
12551 }
12552 bc_en = vport->vf_info.request_bc_en > 0;
12553
12554 ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en,
12555 mc_en, bc_en);
12556 if (ret) {
12557 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
12558 return ret;
12559 }
12560 hclge_set_vport_vlan_fltr_change(vport);
12561
12562 return 0;
12563 }
12564
hclge_sync_promisc_mode(struct hclge_dev * hdev)12565 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
12566 {
12567 struct hclge_vport *vport;
12568 int ret;
12569 u16 i;
12570
12571 for (i = 0; i < hdev->num_alloc_vport; i++) {
12572 vport = &hdev->vport[i];
12573
12574 ret = hclge_sync_vport_promisc_mode(vport);
12575 if (ret)
12576 return;
12577 }
12578 }
12579
hclge_module_existed(struct hclge_dev * hdev)12580 static bool hclge_module_existed(struct hclge_dev *hdev)
12581 {
12582 struct hclge_desc desc;
12583 u32 existed;
12584 int ret;
12585
12586 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
12587 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12588 if (ret) {
12589 dev_err(&hdev->pdev->dev,
12590 "failed to get SFP exist state, ret = %d\n", ret);
12591 return false;
12592 }
12593
12594 existed = le32_to_cpu(desc.data[0]);
12595
12596 return existed != 0;
12597 }
12598
12599 /* need 6 bds(total 140 bytes) in one reading
12600 * return the number of bytes actually read, 0 means read failed.
12601 */
hclge_get_sfp_eeprom_info(struct hclge_dev * hdev,u32 offset,u32 len,u8 * data)12602 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
12603 u32 len, u8 *data)
12604 {
12605 struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
12606 struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
12607 u16 read_len;
12608 u16 copy_len;
12609 int ret;
12610 int i;
12611
12612 /* setup all 6 bds to read module eeprom info. */
12613 for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12614 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
12615 true);
12616
12617 /* bd0~bd4 need next flag */
12618 if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
12619 desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
12620 }
12621
12622 /* setup bd0, this bd contains offset and read length. */
12623 sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
12624 sfp_info_bd0->offset = cpu_to_le16((u16)offset);
12625 read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
12626 sfp_info_bd0->read_len = cpu_to_le16(read_len);
12627
12628 ret = hclge_cmd_send(&hdev->hw, desc, i);
12629 if (ret) {
12630 dev_err(&hdev->pdev->dev,
12631 "failed to get SFP eeprom info, ret = %d\n", ret);
12632 return 0;
12633 }
12634
12635 /* copy sfp info from bd0 to out buffer. */
12636 copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
12637 memcpy(data, sfp_info_bd0->data, copy_len);
12638 read_len = copy_len;
12639
12640 /* copy sfp info from bd1~bd5 to out buffer if needed. */
12641 for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12642 if (read_len >= len)
12643 return read_len;
12644
12645 copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
12646 memcpy(data + read_len, desc[i].data, copy_len);
12647 read_len += copy_len;
12648 }
12649
12650 return read_len;
12651 }
12652
hclge_get_module_eeprom(struct hnae3_handle * handle,u32 offset,u32 len,u8 * data)12653 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
12654 u32 len, u8 *data)
12655 {
12656 struct hclge_vport *vport = hclge_get_vport(handle);
12657 struct hclge_dev *hdev = vport->back;
12658 u32 read_len = 0;
12659 u16 data_len;
12660
12661 if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
12662 return -EOPNOTSUPP;
12663
12664 if (!hclge_module_existed(hdev))
12665 return -ENXIO;
12666
12667 while (read_len < len) {
12668 data_len = hclge_get_sfp_eeprom_info(hdev,
12669 offset + read_len,
12670 len - read_len,
12671 data + read_len);
12672 if (!data_len)
12673 return -EIO;
12674
12675 read_len += data_len;
12676 }
12677
12678 return 0;
12679 }
12680
hclge_get_link_diagnosis_info(struct hnae3_handle * handle,u32 * status_code)12681 static int hclge_get_link_diagnosis_info(struct hnae3_handle *handle,
12682 u32 *status_code)
12683 {
12684 struct hclge_vport *vport = hclge_get_vport(handle);
12685 struct hclge_dev *hdev = vport->back;
12686 struct hclge_desc desc;
12687 int ret;
12688
12689 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2)
12690 return -EOPNOTSUPP;
12691
12692 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_DIAGNOSIS, true);
12693 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12694 if (ret) {
12695 dev_err(&hdev->pdev->dev,
12696 "failed to query link diagnosis info, ret = %d\n", ret);
12697 return ret;
12698 }
12699
12700 *status_code = le32_to_cpu(desc.data[0]);
12701 return 0;
12702 }
12703
12704 /* After disable sriov, VF still has some config and info need clean,
12705 * which configed by PF.
12706 */
hclge_clear_vport_vf_info(struct hclge_vport * vport,int vfid)12707 static void hclge_clear_vport_vf_info(struct hclge_vport *vport, int vfid)
12708 {
12709 struct hclge_dev *hdev = vport->back;
12710 struct hclge_vlan_info vlan_info;
12711 int ret;
12712
12713 clear_bit(HCLGE_VPORT_STATE_INITED, &vport->state);
12714 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
12715 vport->need_notify = 0;
12716 vport->mps = 0;
12717
12718 /* after disable sriov, clean VF rate configured by PF */
12719 ret = hclge_tm_qs_shaper_cfg(vport, 0);
12720 if (ret)
12721 dev_err(&hdev->pdev->dev,
12722 "failed to clean vf%d rate config, ret = %d\n",
12723 vfid, ret);
12724
12725 vlan_info.vlan_tag = 0;
12726 vlan_info.qos = 0;
12727 vlan_info.vlan_proto = ETH_P_8021Q;
12728 ret = hclge_update_port_base_vlan_cfg(vport,
12729 HNAE3_PORT_BASE_VLAN_DISABLE,
12730 &vlan_info);
12731 if (ret)
12732 dev_err(&hdev->pdev->dev,
12733 "failed to clean vf%d port base vlan, ret = %d\n",
12734 vfid, ret);
12735
12736 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, false);
12737 if (ret)
12738 dev_err(&hdev->pdev->dev,
12739 "failed to clean vf%d spoof config, ret = %d\n",
12740 vfid, ret);
12741
12742 memset(&vport->vf_info, 0, sizeof(vport->vf_info));
12743 }
12744
hclge_clean_vport_config(struct hnae3_ae_dev * ae_dev,int num_vfs)12745 static void hclge_clean_vport_config(struct hnae3_ae_dev *ae_dev, int num_vfs)
12746 {
12747 struct hclge_dev *hdev = ae_dev->priv;
12748 struct hclge_vport *vport;
12749 int i;
12750
12751 for (i = 0; i < num_vfs; i++) {
12752 vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM];
12753
12754 hclge_clear_vport_vf_info(vport, i);
12755 }
12756 }
12757
hclge_get_dscp_prio(struct hnae3_handle * h,u8 dscp,u8 * tc_mode,u8 * priority)12758 static int hclge_get_dscp_prio(struct hnae3_handle *h, u8 dscp, u8 *tc_mode,
12759 u8 *priority)
12760 {
12761 struct hclge_vport *vport = hclge_get_vport(h);
12762
12763 if (dscp >= HNAE3_MAX_DSCP)
12764 return -EINVAL;
12765
12766 if (tc_mode)
12767 *tc_mode = vport->nic.kinfo.tc_map_mode;
12768 if (priority)
12769 *priority = vport->nic.kinfo.dscp_prio[dscp] == HNAE3_PRIO_ID_INVALID ? 0 :
12770 vport->nic.kinfo.dscp_prio[dscp];
12771
12772 return 0;
12773 }
12774
12775 static const struct hnae3_ae_ops hclge_ops = {
12776 .init_ae_dev = hclge_init_ae_dev,
12777 .uninit_ae_dev = hclge_uninit_ae_dev,
12778 .reset_prepare = hclge_reset_prepare_general,
12779 .reset_done = hclge_reset_done,
12780 .init_client_instance = hclge_init_client_instance,
12781 .uninit_client_instance = hclge_uninit_client_instance,
12782 .map_ring_to_vector = hclge_map_ring_to_vector,
12783 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
12784 .get_vector = hclge_get_vector,
12785 .put_vector = hclge_put_vector,
12786 .set_promisc_mode = hclge_set_promisc_mode,
12787 .request_update_promisc_mode = hclge_request_update_promisc_mode,
12788 .set_loopback = hclge_set_loopback,
12789 .start = hclge_ae_start,
12790 .stop = hclge_ae_stop,
12791 .client_start = hclge_client_start,
12792 .client_stop = hclge_client_stop,
12793 .get_status = hclge_get_status,
12794 .get_ksettings_an_result = hclge_get_ksettings_an_result,
12795 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
12796 .get_media_type = hclge_get_media_type,
12797 .check_port_speed = hclge_check_port_speed,
12798 .get_fec_stats = hclge_get_fec_stats,
12799 .get_fec = hclge_get_fec,
12800 .set_fec = hclge_set_fec,
12801 .get_rss_key_size = hclge_comm_get_rss_key_size,
12802 .get_rss = hclge_get_rss,
12803 .set_rss = hclge_set_rss,
12804 .set_rss_tuple = hclge_set_rss_tuple,
12805 .get_rss_tuple = hclge_get_rss_tuple,
12806 .get_tc_size = hclge_get_tc_size,
12807 .get_mac_addr = hclge_get_mac_addr,
12808 .set_mac_addr = hclge_set_mac_addr,
12809 .do_ioctl = hclge_do_ioctl,
12810 .add_uc_addr = hclge_add_uc_addr,
12811 .rm_uc_addr = hclge_rm_uc_addr,
12812 .add_mc_addr = hclge_add_mc_addr,
12813 .rm_mc_addr = hclge_rm_mc_addr,
12814 .set_autoneg = hclge_set_autoneg,
12815 .get_autoneg = hclge_get_autoneg,
12816 .restart_autoneg = hclge_restart_autoneg,
12817 .halt_autoneg = hclge_halt_autoneg,
12818 .get_pauseparam = hclge_get_pauseparam,
12819 .set_pauseparam = hclge_set_pauseparam,
12820 .set_mtu = hclge_set_mtu,
12821 .reset_queue = hclge_reset_tqp,
12822 .get_stats = hclge_get_stats,
12823 .get_mac_stats = hclge_get_mac_stat,
12824 .update_stats = hclge_update_stats,
12825 .get_strings = hclge_get_strings,
12826 .get_sset_count = hclge_get_sset_count,
12827 .get_fw_version = hclge_get_fw_version,
12828 .get_mdix_mode = hclge_get_mdix_mode,
12829 .enable_vlan_filter = hclge_enable_vlan_filter,
12830 .set_vlan_filter = hclge_set_vlan_filter,
12831 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
12832 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
12833 .reset_event = hclge_reset_event,
12834 .get_reset_level = hclge_get_reset_level,
12835 .set_default_reset_request = hclge_set_def_reset_request,
12836 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
12837 .set_channels = hclge_set_channels,
12838 .get_channels = hclge_get_channels,
12839 .get_regs_len = hclge_get_regs_len,
12840 .get_regs = hclge_get_regs,
12841 .set_led_id = hclge_set_led_id,
12842 .get_link_mode = hclge_get_link_mode,
12843 .add_fd_entry = hclge_add_fd_entry,
12844 .del_fd_entry = hclge_del_fd_entry,
12845 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
12846 .get_fd_rule_info = hclge_get_fd_rule_info,
12847 .get_fd_all_rules = hclge_get_all_rules,
12848 .enable_fd = hclge_enable_fd,
12849 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
12850 .dbg_read_cmd = hclge_dbg_read_cmd,
12851 .handle_hw_ras_error = hclge_handle_hw_ras_error,
12852 .get_hw_reset_stat = hclge_get_hw_reset_stat,
12853 .ae_dev_resetting = hclge_ae_dev_resetting,
12854 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
12855 .set_gro_en = hclge_gro_en,
12856 .get_global_queue_id = hclge_covert_handle_qid_global,
12857 .set_timer_task = hclge_set_timer_task,
12858 .mac_connect_phy = hclge_mac_connect_phy,
12859 .mac_disconnect_phy = hclge_mac_disconnect_phy,
12860 .get_vf_config = hclge_get_vf_config,
12861 .set_vf_link_state = hclge_set_vf_link_state,
12862 .set_vf_spoofchk = hclge_set_vf_spoofchk,
12863 .set_vf_trust = hclge_set_vf_trust,
12864 .set_vf_rate = hclge_set_vf_rate,
12865 .set_vf_mac = hclge_set_vf_mac,
12866 .get_module_eeprom = hclge_get_module_eeprom,
12867 .get_cmdq_stat = hclge_get_cmdq_stat,
12868 .add_cls_flower = hclge_add_cls_flower,
12869 .del_cls_flower = hclge_del_cls_flower,
12870 .cls_flower_active = hclge_is_cls_flower_active,
12871 .get_phy_link_ksettings = hclge_get_phy_link_ksettings,
12872 .set_phy_link_ksettings = hclge_set_phy_link_ksettings,
12873 .set_tx_hwts_info = hclge_ptp_set_tx_info,
12874 .get_rx_hwts = hclge_ptp_get_rx_hwts,
12875 .get_ts_info = hclge_ptp_get_ts_info,
12876 .get_link_diagnosis_info = hclge_get_link_diagnosis_info,
12877 .clean_vf_config = hclge_clean_vport_config,
12878 .get_dscp_prio = hclge_get_dscp_prio,
12879 .get_wol = hclge_get_wol,
12880 .set_wol = hclge_set_wol,
12881 };
12882
12883 static struct hnae3_ae_algo ae_algo = {
12884 .ops = &hclge_ops,
12885 .pdev_id_table = ae_algo_pci_tbl,
12886 };
12887
hclge_init(void)12888 static int __init hclge_init(void)
12889 {
12890 pr_info("%s is initializing\n", HCLGE_NAME);
12891
12892 hclge_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGE_NAME);
12893 if (!hclge_wq) {
12894 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
12895 return -ENOMEM;
12896 }
12897
12898 hnae3_register_ae_algo(&ae_algo);
12899
12900 return 0;
12901 }
12902
hclge_exit(void)12903 static void __exit hclge_exit(void)
12904 {
12905 hnae3_unregister_ae_algo_prepare(&ae_algo);
12906 hnae3_unregister_ae_algo(&ae_algo);
12907 destroy_workqueue(hclge_wq);
12908 }
12909 module_init(hclge_init);
12910 module_exit(hclge_exit);
12911
12912 MODULE_LICENSE("GPL");
12913 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
12914 MODULE_DESCRIPTION("HCLGE Driver");
12915 MODULE_VERSION(HCLGE_MOD_VERSION);
12916