xref: /linux/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c (revision c77cd47cee041bc1664b8e5fcd23036e5aab8e2a)
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include <linux/etherdevice.h>
5 #include <linux/iopoll.h>
6 #include <net/rtnetlink.h>
7 #include "hclgevf_cmd.h"
8 #include "hclgevf_main.h"
9 #include "hclgevf_regs.h"
10 #include "hclge_mbx.h"
11 #include "hnae3.h"
12 #include "hclgevf_devlink.h"
13 #include "hclge_comm_rss.h"
14 #include "hclgevf_trace.h"
15 
16 #define HCLGEVF_NAME	"hclgevf"
17 
18 #define HCLGEVF_RESET_MAX_FAIL_CNT	5
19 
20 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev);
21 static void hclgevf_task_schedule(struct hclgevf_dev *hdev,
22 				  unsigned long delay);
23 
24 static struct hnae3_ae_algo ae_algovf;
25 
26 static struct workqueue_struct *hclgevf_wq;
27 
28 static const struct pci_device_id ae_algovf_pci_tbl[] = {
29 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_VF), 0},
30 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF),
31 	 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
32 	/* required last entry */
33 	{0, }
34 };
35 
36 MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl);
37 
38 /* hclgevf_cmd_send - send command to command queue
39  * @hw: pointer to the hw struct
40  * @desc: prefilled descriptor for describing the command
41  * @num : the number of descriptors to be sent
42  *
43  * This is the main send command for command queue, it
44  * sends the queue, cleans the queue, etc
45  */
hclgevf_cmd_send(struct hclgevf_hw * hw,struct hclge_desc * desc,int num)46 int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclge_desc *desc, int num)
47 {
48 	return hclge_comm_cmd_send(&hw->hw, desc, num);
49 }
50 
hclgevf_trace_cmd_send(struct hclge_comm_hw * hw,struct hclge_desc * desc,int num,bool is_special)51 static void hclgevf_trace_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc,
52 				   int num, bool is_special)
53 {
54 	int i;
55 
56 	trace_hclge_vf_cmd_send(hw, desc, 0, num);
57 
58 	if (is_special)
59 		return;
60 
61 	for (i = 1; i < num; i++)
62 		trace_hclge_vf_cmd_send(hw, &desc[i], i, num);
63 }
64 
hclgevf_trace_cmd_get(struct hclge_comm_hw * hw,struct hclge_desc * desc,int num,bool is_special)65 static void hclgevf_trace_cmd_get(struct hclge_comm_hw *hw, struct hclge_desc *desc,
66 				  int num, bool is_special)
67 {
68 	int i;
69 
70 	if (!HCLGE_COMM_SEND_SYNC(le16_to_cpu(desc->flag)))
71 		return;
72 
73 	trace_hclge_vf_cmd_get(hw, desc, 0, num);
74 
75 	if (is_special)
76 		return;
77 
78 	for (i = 1; i < num; i++)
79 		trace_hclge_vf_cmd_get(hw, &desc[i], i, num);
80 }
81 
82 static const struct hclge_comm_cmq_ops hclgevf_cmq_ops = {
83 	.trace_cmd_send = hclgevf_trace_cmd_send,
84 	.trace_cmd_get = hclgevf_trace_cmd_get,
85 };
86 
hclgevf_arq_init(struct hclgevf_dev * hdev)87 void hclgevf_arq_init(struct hclgevf_dev *hdev)
88 {
89 	struct hclge_comm_cmq *cmdq = &hdev->hw.hw.cmq;
90 
91 	spin_lock(&cmdq->crq.lock);
92 	/* initialize the pointers of async rx queue of mailbox */
93 	hdev->arq.hdev = hdev;
94 	hdev->arq.head = 0;
95 	hdev->arq.tail = 0;
96 	atomic_set(&hdev->arq.count, 0);
97 	spin_unlock(&cmdq->crq.lock);
98 }
99 
hclgevf_ae_get_hdev(struct hnae3_handle * handle)100 struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle)
101 {
102 	if (!handle->client)
103 		return container_of(handle, struct hclgevf_dev, nic);
104 	else if (handle->client->type == HNAE3_CLIENT_ROCE)
105 		return container_of(handle, struct hclgevf_dev, roce);
106 	else
107 		return container_of(handle, struct hclgevf_dev, nic);
108 }
109 
hclgevf_update_stats(struct hnae3_handle * handle)110 static void hclgevf_update_stats(struct hnae3_handle *handle)
111 {
112 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
113 	int status;
114 
115 	status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw);
116 	if (status)
117 		dev_err(&hdev->pdev->dev,
118 			"VF update of TQPS stats fail, status = %d.\n",
119 			status);
120 }
121 
hclgevf_get_sset_count(struct hnae3_handle * handle,int strset)122 static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset)
123 {
124 	if (strset == ETH_SS_TEST)
125 		return -EOPNOTSUPP;
126 	else if (strset == ETH_SS_STATS)
127 		return hclge_comm_tqps_get_sset_count(handle);
128 
129 	return 0;
130 }
131 
hclgevf_get_strings(struct hnae3_handle * handle,u32 strset,u8 ** data)132 static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset,
133 				u8 **data)
134 {
135 	if (strset == ETH_SS_STATS)
136 		hclge_comm_tqps_get_strings(handle, data);
137 }
138 
hclgevf_get_stats(struct hnae3_handle * handle,u64 * data)139 static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data)
140 {
141 	hclge_comm_tqps_get_stats(handle, data);
142 }
143 
hclgevf_build_send_msg(struct hclge_vf_to_pf_msg * msg,u8 code,u8 subcode)144 static void hclgevf_build_send_msg(struct hclge_vf_to_pf_msg *msg, u8 code,
145 				   u8 subcode)
146 {
147 	if (msg) {
148 		memset(msg, 0, sizeof(struct hclge_vf_to_pf_msg));
149 		msg->code = code;
150 		msg->subcode = subcode;
151 	}
152 }
153 
hclgevf_get_basic_info(struct hclgevf_dev * hdev)154 static int hclgevf_get_basic_info(struct hclgevf_dev *hdev)
155 {
156 	struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
157 	u8 resp_msg[HCLGE_MBX_MAX_RESP_DATA_SIZE];
158 	struct hclge_basic_info *basic_info;
159 	struct hclge_vf_to_pf_msg send_msg;
160 	unsigned long caps;
161 	int status;
162 
163 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_BASIC_INFO, 0);
164 	status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
165 				      sizeof(resp_msg));
166 	if (status) {
167 		dev_err(&hdev->pdev->dev,
168 			"failed to get basic info from pf, ret = %d", status);
169 		return status;
170 	}
171 
172 	basic_info = (struct hclge_basic_info *)resp_msg;
173 
174 	hdev->hw_tc_map = basic_info->hw_tc_map;
175 	hdev->mbx_api_version = le16_to_cpu(basic_info->mbx_api_version);
176 	caps = le32_to_cpu(basic_info->pf_caps);
177 	if (test_bit(HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B, &caps))
178 		set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
179 
180 	return 0;
181 }
182 
hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev * hdev)183 static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev)
184 {
185 	struct hnae3_handle *nic = &hdev->nic;
186 	struct hclge_vf_to_pf_msg send_msg;
187 	u8 resp_msg;
188 	int ret;
189 
190 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
191 			       HCLGE_MBX_GET_PORT_BASE_VLAN_STATE);
192 	ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg,
193 				   sizeof(u8));
194 	if (ret) {
195 		dev_err(&hdev->pdev->dev,
196 			"VF request to get port based vlan state failed %d",
197 			ret);
198 		return ret;
199 	}
200 
201 	nic->port_base_vlan_state = resp_msg;
202 
203 	return 0;
204 }
205 
hclgevf_get_queue_info(struct hclgevf_dev * hdev)206 static int hclgevf_get_queue_info(struct hclgevf_dev *hdev)
207 {
208 #define HCLGEVF_TQPS_RSS_INFO_LEN	6
209 
210 	struct hclge_mbx_vf_queue_info *queue_info;
211 	u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN];
212 	struct hclge_vf_to_pf_msg send_msg;
213 	int status;
214 
215 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QINFO, 0);
216 	status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
217 				      HCLGEVF_TQPS_RSS_INFO_LEN);
218 	if (status) {
219 		dev_err(&hdev->pdev->dev,
220 			"VF request to get tqp info from PF failed %d",
221 			status);
222 		return status;
223 	}
224 
225 	queue_info = (struct hclge_mbx_vf_queue_info *)resp_msg;
226 	hdev->num_tqps = le16_to_cpu(queue_info->num_tqps);
227 	hdev->rss_size_max = le16_to_cpu(queue_info->rss_size);
228 	hdev->rx_buf_len = le16_to_cpu(queue_info->rx_buf_len);
229 
230 	return 0;
231 }
232 
hclgevf_get_queue_depth(struct hclgevf_dev * hdev)233 static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev)
234 {
235 #define HCLGEVF_TQPS_DEPTH_INFO_LEN	4
236 
237 	struct hclge_mbx_vf_queue_depth *queue_depth;
238 	u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN];
239 	struct hclge_vf_to_pf_msg send_msg;
240 	int ret;
241 
242 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QDEPTH, 0);
243 	ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
244 				   HCLGEVF_TQPS_DEPTH_INFO_LEN);
245 	if (ret) {
246 		dev_err(&hdev->pdev->dev,
247 			"VF request to get tqp depth info from PF failed %d",
248 			ret);
249 		return ret;
250 	}
251 
252 	queue_depth = (struct hclge_mbx_vf_queue_depth *)resp_msg;
253 	hdev->num_tx_desc = le16_to_cpu(queue_depth->num_tx_desc);
254 	hdev->num_rx_desc = le16_to_cpu(queue_depth->num_rx_desc);
255 
256 	return 0;
257 }
258 
hclgevf_get_qid_global(struct hnae3_handle * handle,u16 queue_id)259 static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id)
260 {
261 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
262 	struct hclge_vf_to_pf_msg send_msg;
263 	u16 qid_in_pf = 0;
264 	u8 resp_data[2];
265 	int ret;
266 
267 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QID_IN_PF, 0);
268 	*(__le16 *)send_msg.data = cpu_to_le16(queue_id);
269 	ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_data,
270 				   sizeof(resp_data));
271 	if (!ret)
272 		qid_in_pf = le16_to_cpu(*(__le16 *)resp_data);
273 
274 	return qid_in_pf;
275 }
276 
hclgevf_get_pf_media_type(struct hclgevf_dev * hdev)277 static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev)
278 {
279 	struct hclge_vf_to_pf_msg send_msg;
280 	u8 resp_msg[2];
281 	int ret;
282 
283 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MEDIA_TYPE, 0);
284 	ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
285 				   sizeof(resp_msg));
286 	if (ret) {
287 		dev_err(&hdev->pdev->dev,
288 			"VF request to get the pf port media type failed %d",
289 			ret);
290 		return ret;
291 	}
292 
293 	hdev->hw.mac.media_type = resp_msg[0];
294 	hdev->hw.mac.module_type = resp_msg[1];
295 
296 	return 0;
297 }
298 
hclgevf_alloc_tqps(struct hclgevf_dev * hdev)299 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
300 {
301 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
302 	struct hclge_comm_tqp *tqp;
303 	int i;
304 
305 	hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
306 				  sizeof(struct hclge_comm_tqp), GFP_KERNEL);
307 	if (!hdev->htqp)
308 		return -ENOMEM;
309 
310 	tqp = hdev->htqp;
311 
312 	for (i = 0; i < hdev->num_tqps; i++) {
313 		tqp->dev = &hdev->pdev->dev;
314 		tqp->index = i;
315 
316 		tqp->q.ae_algo = &ae_algovf;
317 		tqp->q.buf_size = hdev->rx_buf_len;
318 		tqp->q.tx_desc_num = hdev->num_tx_desc;
319 		tqp->q.rx_desc_num = hdev->num_rx_desc;
320 
321 		/* need an extended offset to configure queues >=
322 		 * HCLGEVF_TQP_MAX_SIZE_DEV_V2.
323 		 */
324 		if (i < HCLGEVF_TQP_MAX_SIZE_DEV_V2)
325 			tqp->q.io_base = hdev->hw.hw.io_base +
326 					 HCLGEVF_TQP_REG_OFFSET +
327 					 i * HCLGEVF_TQP_REG_SIZE;
328 		else
329 			tqp->q.io_base = hdev->hw.hw.io_base +
330 					 HCLGEVF_TQP_REG_OFFSET +
331 					 HCLGEVF_TQP_EXT_REG_OFFSET +
332 					 (i - HCLGEVF_TQP_MAX_SIZE_DEV_V2) *
333 					 HCLGEVF_TQP_REG_SIZE;
334 
335 		/* when device supports tx push and has device memory,
336 		 * the queue can execute push mode or doorbell mode on
337 		 * device memory.
338 		 */
339 		if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps))
340 			tqp->q.mem_base = hdev->hw.hw.mem_base +
341 					  HCLGEVF_TQP_MEM_OFFSET(hdev, i);
342 
343 		tqp++;
344 	}
345 
346 	return 0;
347 }
348 
hclgevf_knic_setup(struct hclgevf_dev * hdev)349 static int hclgevf_knic_setup(struct hclgevf_dev *hdev)
350 {
351 	struct hnae3_handle *nic = &hdev->nic;
352 	struct hnae3_knic_private_info *kinfo;
353 	u16 new_tqps = hdev->num_tqps;
354 	unsigned int i;
355 	u8 num_tc = 0;
356 
357 	kinfo = &nic->kinfo;
358 	kinfo->num_tx_desc = hdev->num_tx_desc;
359 	kinfo->num_rx_desc = hdev->num_rx_desc;
360 	kinfo->rx_buf_len = hdev->rx_buf_len;
361 	for (i = 0; i < HCLGE_COMM_MAX_TC_NUM; i++)
362 		if (hdev->hw_tc_map & BIT(i))
363 			num_tc++;
364 
365 	num_tc = num_tc ? num_tc : 1;
366 	kinfo->tc_info.num_tc = num_tc;
367 	kinfo->rss_size = min_t(u16, hdev->rss_size_max, new_tqps / num_tc);
368 	new_tqps = kinfo->rss_size * num_tc;
369 	kinfo->num_tqps = min(new_tqps, hdev->num_tqps);
370 
371 	kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
372 				  sizeof(struct hnae3_queue *), GFP_KERNEL);
373 	if (!kinfo->tqp)
374 		return -ENOMEM;
375 
376 	for (i = 0; i < kinfo->num_tqps; i++) {
377 		hdev->htqp[i].q.handle = &hdev->nic;
378 		hdev->htqp[i].q.tqp_index = i;
379 		kinfo->tqp[i] = &hdev->htqp[i].q;
380 	}
381 
382 	/* after init the max rss_size and tqps, adjust the default tqp numbers
383 	 * and rss size with the actual vector numbers
384 	 */
385 	kinfo->num_tqps = min_t(u16, hdev->num_nic_msix - 1, kinfo->num_tqps);
386 	kinfo->rss_size = min_t(u16, kinfo->num_tqps / num_tc,
387 				kinfo->rss_size);
388 
389 	return 0;
390 }
391 
hclgevf_request_link_info(struct hclgevf_dev * hdev)392 static void hclgevf_request_link_info(struct hclgevf_dev *hdev)
393 {
394 	struct hclge_vf_to_pf_msg send_msg;
395 	int status;
396 
397 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_STATUS, 0);
398 	status = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
399 	if (status)
400 		dev_err(&hdev->pdev->dev,
401 			"VF failed to fetch link status(%d) from PF", status);
402 }
403 
hclgevf_update_link_status(struct hclgevf_dev * hdev,int link_state)404 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state)
405 {
406 	struct hnae3_handle *rhandle = &hdev->roce;
407 	struct hnae3_handle *handle = &hdev->nic;
408 	struct hnae3_client *rclient;
409 	struct hnae3_client *client;
410 
411 	if (test_and_set_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state))
412 		return;
413 
414 	client = handle->client;
415 	rclient = hdev->roce_client;
416 
417 	link_state =
418 		test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state;
419 	if (link_state != hdev->hw.mac.link) {
420 		hdev->hw.mac.link = link_state;
421 		client->ops->link_status_change(handle, !!link_state);
422 		if (rclient && rclient->ops->link_status_change)
423 			rclient->ops->link_status_change(rhandle, !!link_state);
424 	}
425 
426 	clear_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state);
427 }
428 
hclgevf_update_link_mode(struct hclgevf_dev * hdev)429 static void hclgevf_update_link_mode(struct hclgevf_dev *hdev)
430 {
431 #define HCLGEVF_ADVERTISING	0
432 #define HCLGEVF_SUPPORTED	1
433 
434 	struct hclge_vf_to_pf_msg send_msg;
435 
436 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_MODE, 0);
437 	send_msg.data[0] = HCLGEVF_ADVERTISING;
438 	hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
439 	send_msg.data[0] = HCLGEVF_SUPPORTED;
440 	hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
441 }
442 
hclgevf_set_handle_info(struct hclgevf_dev * hdev)443 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
444 {
445 	struct hnae3_handle *nic = &hdev->nic;
446 	int ret;
447 
448 	nic->ae_algo = &ae_algovf;
449 	nic->pdev = hdev->pdev;
450 	bitmap_copy(nic->numa_node_mask.bits, hdev->numa_node_mask.bits,
451 		    MAX_NUMNODES);
452 	nic->flags |= HNAE3_SUPPORT_VF;
453 	nic->kinfo.io_base = hdev->hw.hw.io_base;
454 
455 	ret = hclgevf_knic_setup(hdev);
456 	if (ret)
457 		dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n",
458 			ret);
459 	return ret;
460 }
461 
hclgevf_free_vector(struct hclgevf_dev * hdev,int vector_id)462 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id)
463 {
464 	if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) {
465 		dev_warn(&hdev->pdev->dev,
466 			 "vector(vector_id %d) has been freed.\n", vector_id);
467 		return;
468 	}
469 
470 	hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT;
471 	hdev->num_msi_left += 1;
472 	hdev->num_msi_used -= 1;
473 }
474 
hclgevf_get_vector(struct hnae3_handle * handle,u16 vector_num,struct hnae3_vector_info * vector_info)475 static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num,
476 			      struct hnae3_vector_info *vector_info)
477 {
478 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
479 	struct hnae3_vector_info *vector = vector_info;
480 	int alloc = 0;
481 	int i, j;
482 
483 	vector_num = min_t(u16, hdev->num_nic_msix - 1, vector_num);
484 	vector_num = min(hdev->num_msi_left, vector_num);
485 
486 	for (j = 0; j < vector_num; j++) {
487 		for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) {
488 			if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) {
489 				vector->vector = pci_irq_vector(hdev->pdev, i);
490 				vector->io_addr = hdev->hw.hw.io_base +
491 					HCLGEVF_VECTOR_REG_BASE +
492 					(i - 1) * HCLGEVF_VECTOR_REG_OFFSET;
493 				hdev->vector_status[i] = 0;
494 				hdev->vector_irq[i] = vector->vector;
495 
496 				vector++;
497 				alloc++;
498 
499 				break;
500 			}
501 		}
502 	}
503 	hdev->num_msi_left -= alloc;
504 	hdev->num_msi_used += alloc;
505 
506 	return alloc;
507 }
508 
hclgevf_get_vector_index(struct hclgevf_dev * hdev,int vector)509 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector)
510 {
511 	int i;
512 
513 	for (i = 0; i < hdev->num_msi; i++)
514 		if (vector == hdev->vector_irq[i])
515 			return i;
516 
517 	return -EINVAL;
518 }
519 
520 /* for revision 0x20, vf shared the same rss config with pf */
hclgevf_get_rss_hash_key(struct hclgevf_dev * hdev)521 static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev)
522 {
523 #define HCLGEVF_RSS_MBX_RESP_LEN	8
524 	struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
525 	u8 resp_msg[HCLGEVF_RSS_MBX_RESP_LEN];
526 	struct hclge_vf_to_pf_msg send_msg;
527 	u16 msg_num, hash_key_index;
528 	u8 index;
529 	int ret;
530 
531 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_RSS_KEY, 0);
532 	msg_num = (HCLGE_COMM_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) /
533 			HCLGEVF_RSS_MBX_RESP_LEN;
534 	for (index = 0; index < msg_num; index++) {
535 		send_msg.data[0] = index;
536 		ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
537 					   HCLGEVF_RSS_MBX_RESP_LEN);
538 		if (ret) {
539 			dev_err(&hdev->pdev->dev,
540 				"VF get rss hash key from PF failed, ret=%d",
541 				ret);
542 			return ret;
543 		}
544 
545 		hash_key_index = HCLGEVF_RSS_MBX_RESP_LEN * index;
546 		if (index == msg_num - 1)
547 			memcpy(&rss_cfg->rss_hash_key[hash_key_index],
548 			       &resp_msg[0],
549 			       HCLGE_COMM_RSS_KEY_SIZE - hash_key_index);
550 		else
551 			memcpy(&rss_cfg->rss_hash_key[hash_key_index],
552 			       &resp_msg[0], HCLGEVF_RSS_MBX_RESP_LEN);
553 	}
554 
555 	return 0;
556 }
557 
hclgevf_get_rss(struct hnae3_handle * handle,u32 * indir,u8 * key,u8 * hfunc)558 static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
559 			   u8 *hfunc)
560 {
561 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
562 	struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
563 	int ret;
564 
565 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
566 		hclge_comm_get_rss_hash_info(rss_cfg, key, hfunc);
567 	} else {
568 		if (hfunc)
569 			*hfunc = ETH_RSS_HASH_TOP;
570 		if (key) {
571 			ret = hclgevf_get_rss_hash_key(hdev);
572 			if (ret)
573 				return ret;
574 			memcpy(key, rss_cfg->rss_hash_key,
575 			       HCLGE_COMM_RSS_KEY_SIZE);
576 		}
577 	}
578 
579 	hclge_comm_get_rss_indir_tbl(rss_cfg, indir,
580 				     hdev->ae_dev->dev_specs.rss_ind_tbl_size);
581 
582 	return 0;
583 }
584 
hclgevf_set_rss(struct hnae3_handle * handle,const u32 * indir,const u8 * key,const u8 hfunc)585 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
586 			   const u8 *key, const u8 hfunc)
587 {
588 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
589 	struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
590 	int ret, i;
591 
592 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
593 		ret = hclge_comm_set_rss_hash_key(rss_cfg, &hdev->hw.hw, key,
594 						  hfunc);
595 		if (ret)
596 			return ret;
597 	}
598 
599 	/* update the shadow RSS table with user specified qids */
600 	for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
601 		rss_cfg->rss_indirection_tbl[i] = indir[i];
602 
603 	/* update the hardware */
604 	return hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw,
605 					      rss_cfg->rss_indirection_tbl);
606 }
607 
hclgevf_set_rss_tuple(struct hnae3_handle * handle,struct ethtool_rxnfc * nfc)608 static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
609 				 struct ethtool_rxnfc *nfc)
610 {
611 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
612 	int ret;
613 
614 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
615 		return -EOPNOTSUPP;
616 
617 	ret = hclge_comm_set_rss_tuple(hdev->ae_dev, &hdev->hw.hw,
618 				       &hdev->rss_cfg, nfc);
619 	if (ret)
620 		dev_err(&hdev->pdev->dev,
621 		"failed to set rss tuple, ret = %d.\n", ret);
622 
623 	return ret;
624 }
625 
hclgevf_get_rss_tuple(struct hnae3_handle * handle,struct ethtool_rxnfc * nfc)626 static int hclgevf_get_rss_tuple(struct hnae3_handle *handle,
627 				 struct ethtool_rxnfc *nfc)
628 {
629 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
630 	u8 tuple_sets;
631 	int ret;
632 
633 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
634 		return -EOPNOTSUPP;
635 
636 	nfc->data = 0;
637 
638 	ret = hclge_comm_get_rss_tuple(&hdev->rss_cfg, nfc->flow_type,
639 				       &tuple_sets);
640 	if (ret || !tuple_sets)
641 		return ret;
642 
643 	nfc->data = hclge_comm_convert_rss_tuple(tuple_sets);
644 
645 	return 0;
646 }
647 
hclgevf_get_tc_size(struct hnae3_handle * handle)648 static int hclgevf_get_tc_size(struct hnae3_handle *handle)
649 {
650 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
651 	struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
652 
653 	return rss_cfg->rss_size;
654 }
655 
hclgevf_bind_ring_to_vector(struct hnae3_handle * handle,bool en,int vector_id,struct hnae3_ring_chain_node * ring_chain)656 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en,
657 				       int vector_id,
658 				       struct hnae3_ring_chain_node *ring_chain)
659 {
660 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
661 	struct hclge_vf_to_pf_msg send_msg;
662 	struct hnae3_ring_chain_node *node;
663 	int status;
664 	int i = 0;
665 
666 	memset(&send_msg, 0, sizeof(send_msg));
667 	send_msg.code = en ? HCLGE_MBX_MAP_RING_TO_VECTOR :
668 		HCLGE_MBX_UNMAP_RING_TO_VECTOR;
669 	send_msg.vector_id = vector_id;
670 
671 	for (node = ring_chain; node; node = node->next) {
672 		send_msg.param[i].ring_type =
673 				hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B);
674 
675 		send_msg.param[i].tqp_index = node->tqp_index;
676 		send_msg.param[i].int_gl_index =
677 					hnae3_get_field(node->int_gl_idx,
678 							HNAE3_RING_GL_IDX_M,
679 							HNAE3_RING_GL_IDX_S);
680 
681 		i++;
682 		if (i == HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM || !node->next) {
683 			send_msg.ring_num = i;
684 
685 			status = hclgevf_send_mbx_msg(hdev, &send_msg, false,
686 						      NULL, 0);
687 			if (status) {
688 				dev_err(&hdev->pdev->dev,
689 					"Map TQP fail, status is %d.\n",
690 					status);
691 				return status;
692 			}
693 			i = 0;
694 		}
695 	}
696 
697 	return 0;
698 }
699 
hclgevf_map_ring_to_vector(struct hnae3_handle * handle,int vector,struct hnae3_ring_chain_node * ring_chain)700 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector,
701 				      struct hnae3_ring_chain_node *ring_chain)
702 {
703 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
704 	int vector_id;
705 
706 	vector_id = hclgevf_get_vector_index(hdev, vector);
707 	if (vector_id < 0) {
708 		dev_err(&handle->pdev->dev,
709 			"Get vector index fail. ret =%d\n", vector_id);
710 		return vector_id;
711 	}
712 
713 	return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain);
714 }
715 
hclgevf_unmap_ring_from_vector(struct hnae3_handle * handle,int vector,struct hnae3_ring_chain_node * ring_chain)716 static int hclgevf_unmap_ring_from_vector(
717 				struct hnae3_handle *handle,
718 				int vector,
719 				struct hnae3_ring_chain_node *ring_chain)
720 {
721 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
722 	int ret, vector_id;
723 
724 	if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
725 		return 0;
726 
727 	vector_id = hclgevf_get_vector_index(hdev, vector);
728 	if (vector_id < 0) {
729 		dev_err(&handle->pdev->dev,
730 			"Get vector index fail. ret =%d\n", vector_id);
731 		return vector_id;
732 	}
733 
734 	ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain);
735 	if (ret)
736 		dev_err(&handle->pdev->dev,
737 			"Unmap ring from vector fail. vector=%d, ret =%d\n",
738 			vector_id,
739 			ret);
740 
741 	return ret;
742 }
743 
hclgevf_put_vector(struct hnae3_handle * handle,int vector)744 static int hclgevf_put_vector(struct hnae3_handle *handle, int vector)
745 {
746 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
747 	int vector_id;
748 
749 	vector_id = hclgevf_get_vector_index(hdev, vector);
750 	if (vector_id < 0) {
751 		dev_err(&handle->pdev->dev,
752 			"hclgevf_put_vector get vector index fail. ret =%d\n",
753 			vector_id);
754 		return vector_id;
755 	}
756 
757 	hclgevf_free_vector(hdev, vector_id);
758 
759 	return 0;
760 }
761 
hclgevf_cmd_set_promisc_mode(struct hclgevf_dev * hdev,bool en_uc_pmc,bool en_mc_pmc,bool en_bc_pmc)762 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev,
763 					bool en_uc_pmc, bool en_mc_pmc,
764 					bool en_bc_pmc)
765 {
766 	struct hnae3_handle *handle = &hdev->nic;
767 	struct hclge_vf_to_pf_msg send_msg;
768 	int ret;
769 
770 	memset(&send_msg, 0, sizeof(send_msg));
771 	send_msg.code = HCLGE_MBX_SET_PROMISC_MODE;
772 	send_msg.en_bc = en_bc_pmc ? 1 : 0;
773 	send_msg.en_uc = en_uc_pmc ? 1 : 0;
774 	send_msg.en_mc = en_mc_pmc ? 1 : 0;
775 	send_msg.en_limit_promisc = test_bit(HNAE3_PFLAG_LIMIT_PROMISC,
776 					     &handle->priv_flags) ? 1 : 0;
777 
778 	ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
779 	if (ret)
780 		dev_err(&hdev->pdev->dev,
781 			"Set promisc mode fail, status is %d.\n", ret);
782 
783 	return ret;
784 }
785 
hclgevf_set_promisc_mode(struct hnae3_handle * handle,bool en_uc_pmc,bool en_mc_pmc)786 static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
787 				    bool en_mc_pmc)
788 {
789 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
790 	bool en_bc_pmc;
791 
792 	en_bc_pmc = hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2;
793 
794 	return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc,
795 					    en_bc_pmc);
796 }
797 
hclgevf_request_update_promisc_mode(struct hnae3_handle * handle)798 static void hclgevf_request_update_promisc_mode(struct hnae3_handle *handle)
799 {
800 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
801 
802 	set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state);
803 	hclgevf_task_schedule(hdev, 0);
804 }
805 
hclgevf_sync_promisc_mode(struct hclgevf_dev * hdev)806 static void hclgevf_sync_promisc_mode(struct hclgevf_dev *hdev)
807 {
808 	struct hnae3_handle *handle = &hdev->nic;
809 	bool en_uc_pmc = handle->netdev_flags & HNAE3_UPE;
810 	bool en_mc_pmc = handle->netdev_flags & HNAE3_MPE;
811 	int ret;
812 
813 	if (test_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state)) {
814 		ret = hclgevf_set_promisc_mode(handle, en_uc_pmc, en_mc_pmc);
815 		if (!ret)
816 			clear_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state);
817 	}
818 }
819 
hclgevf_tqp_enable_cmd_send(struct hclgevf_dev * hdev,u16 tqp_id,u16 stream_id,bool enable)820 static int hclgevf_tqp_enable_cmd_send(struct hclgevf_dev *hdev, u16 tqp_id,
821 				       u16 stream_id, bool enable)
822 {
823 	struct hclgevf_cfg_com_tqp_queue_cmd *req;
824 	struct hclge_desc desc;
825 
826 	req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data;
827 
828 	hclgevf_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
829 	req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK);
830 	req->stream_id = cpu_to_le16(stream_id);
831 	if (enable)
832 		req->enable |= 1U << HCLGEVF_TQP_ENABLE_B;
833 
834 	return hclgevf_cmd_send(&hdev->hw, &desc, 1);
835 }
836 
hclgevf_tqp_enable(struct hnae3_handle * handle,bool enable)837 static int hclgevf_tqp_enable(struct hnae3_handle *handle, bool enable)
838 {
839 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
840 	int ret;
841 	u16 i;
842 
843 	for (i = 0; i < handle->kinfo.num_tqps; i++) {
844 		ret = hclgevf_tqp_enable_cmd_send(hdev, i, 0, enable);
845 		if (ret)
846 			return ret;
847 	}
848 
849 	return 0;
850 }
851 
hclgevf_get_host_mac_addr(struct hclgevf_dev * hdev,u8 * p)852 static int hclgevf_get_host_mac_addr(struct hclgevf_dev *hdev, u8 *p)
853 {
854 	struct hclge_vf_to_pf_msg send_msg;
855 	u8 host_mac[ETH_ALEN];
856 	int status;
857 
858 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MAC_ADDR, 0);
859 	status = hclgevf_send_mbx_msg(hdev, &send_msg, true, host_mac,
860 				      ETH_ALEN);
861 	if (status) {
862 		dev_err(&hdev->pdev->dev,
863 			"fail to get VF MAC from host %d", status);
864 		return status;
865 	}
866 
867 	ether_addr_copy(p, host_mac);
868 
869 	return 0;
870 }
871 
hclgevf_get_mac_addr(struct hnae3_handle * handle,u8 * p)872 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p)
873 {
874 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
875 	u8 host_mac_addr[ETH_ALEN];
876 
877 	if (hclgevf_get_host_mac_addr(hdev, host_mac_addr))
878 		return;
879 
880 	hdev->has_pf_mac = !is_zero_ether_addr(host_mac_addr);
881 	if (hdev->has_pf_mac)
882 		ether_addr_copy(p, host_mac_addr);
883 	else
884 		ether_addr_copy(p, hdev->hw.mac.mac_addr);
885 }
886 
hclgevf_set_mac_addr(struct hnae3_handle * handle,const void * p,bool is_first)887 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, const void *p,
888 				bool is_first)
889 {
890 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
891 	u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr;
892 	struct hclge_vf_to_pf_msg send_msg;
893 	u8 *new_mac_addr = (u8 *)p;
894 	int status;
895 
896 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST, 0);
897 	send_msg.subcode = HCLGE_MBX_MAC_VLAN_UC_MODIFY;
898 	ether_addr_copy(send_msg.data, new_mac_addr);
899 	if (is_first && !hdev->has_pf_mac)
900 		eth_zero_addr(&send_msg.data[ETH_ALEN]);
901 	else
902 		ether_addr_copy(&send_msg.data[ETH_ALEN], old_mac_addr);
903 	status = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
904 	if (!status)
905 		ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr);
906 
907 	return status;
908 }
909 
910 static struct hclgevf_mac_addr_node *
hclgevf_find_mac_node(struct list_head * list,const u8 * mac_addr)911 hclgevf_find_mac_node(struct list_head *list, const u8 *mac_addr)
912 {
913 	struct hclgevf_mac_addr_node *mac_node, *tmp;
914 
915 	list_for_each_entry_safe(mac_node, tmp, list, node)
916 		if (ether_addr_equal(mac_addr, mac_node->mac_addr))
917 			return mac_node;
918 
919 	return NULL;
920 }
921 
hclgevf_update_mac_node(struct hclgevf_mac_addr_node * mac_node,enum HCLGEVF_MAC_NODE_STATE state)922 static void hclgevf_update_mac_node(struct hclgevf_mac_addr_node *mac_node,
923 				    enum HCLGEVF_MAC_NODE_STATE state)
924 {
925 	switch (state) {
926 	/* from set_rx_mode or tmp_add_list */
927 	case HCLGEVF_MAC_TO_ADD:
928 		if (mac_node->state == HCLGEVF_MAC_TO_DEL)
929 			mac_node->state = HCLGEVF_MAC_ACTIVE;
930 		break;
931 	/* only from set_rx_mode */
932 	case HCLGEVF_MAC_TO_DEL:
933 		if (mac_node->state == HCLGEVF_MAC_TO_ADD) {
934 			list_del(&mac_node->node);
935 			kfree(mac_node);
936 		} else {
937 			mac_node->state = HCLGEVF_MAC_TO_DEL;
938 		}
939 		break;
940 	/* only from tmp_add_list, the mac_node->state won't be
941 	 * HCLGEVF_MAC_ACTIVE
942 	 */
943 	case HCLGEVF_MAC_ACTIVE:
944 		if (mac_node->state == HCLGEVF_MAC_TO_ADD)
945 			mac_node->state = HCLGEVF_MAC_ACTIVE;
946 		break;
947 	}
948 }
949 
hclgevf_update_mac_list(struct hnae3_handle * handle,enum HCLGEVF_MAC_NODE_STATE state,enum HCLGEVF_MAC_ADDR_TYPE mac_type,const unsigned char * addr)950 static int hclgevf_update_mac_list(struct hnae3_handle *handle,
951 				   enum HCLGEVF_MAC_NODE_STATE state,
952 				   enum HCLGEVF_MAC_ADDR_TYPE mac_type,
953 				   const unsigned char *addr)
954 {
955 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
956 	struct hclgevf_mac_addr_node *mac_node;
957 	struct list_head *list;
958 
959 	list = (mac_type == HCLGEVF_MAC_ADDR_UC) ?
960 	       &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list;
961 
962 	spin_lock_bh(&hdev->mac_table.mac_list_lock);
963 
964 	/* if the mac addr is already in the mac list, no need to add a new
965 	 * one into it, just check the mac addr state, convert it to a new
966 	 * state, or just remove it, or do nothing.
967 	 */
968 	mac_node = hclgevf_find_mac_node(list, addr);
969 	if (mac_node) {
970 		hclgevf_update_mac_node(mac_node, state);
971 		spin_unlock_bh(&hdev->mac_table.mac_list_lock);
972 		return 0;
973 	}
974 	/* if this address is never added, unnecessary to delete */
975 	if (state == HCLGEVF_MAC_TO_DEL) {
976 		spin_unlock_bh(&hdev->mac_table.mac_list_lock);
977 		return -ENOENT;
978 	}
979 
980 	mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
981 	if (!mac_node) {
982 		spin_unlock_bh(&hdev->mac_table.mac_list_lock);
983 		return -ENOMEM;
984 	}
985 
986 	mac_node->state = state;
987 	ether_addr_copy(mac_node->mac_addr, addr);
988 	list_add_tail(&mac_node->node, list);
989 
990 	spin_unlock_bh(&hdev->mac_table.mac_list_lock);
991 	return 0;
992 }
993 
hclgevf_add_uc_addr(struct hnae3_handle * handle,const unsigned char * addr)994 static int hclgevf_add_uc_addr(struct hnae3_handle *handle,
995 			       const unsigned char *addr)
996 {
997 	return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD,
998 				       HCLGEVF_MAC_ADDR_UC, addr);
999 }
1000 
hclgevf_rm_uc_addr(struct hnae3_handle * handle,const unsigned char * addr)1001 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle,
1002 			      const unsigned char *addr)
1003 {
1004 	return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL,
1005 				       HCLGEVF_MAC_ADDR_UC, addr);
1006 }
1007 
hclgevf_add_mc_addr(struct hnae3_handle * handle,const unsigned char * addr)1008 static int hclgevf_add_mc_addr(struct hnae3_handle *handle,
1009 			       const unsigned char *addr)
1010 {
1011 	return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD,
1012 				       HCLGEVF_MAC_ADDR_MC, addr);
1013 }
1014 
hclgevf_rm_mc_addr(struct hnae3_handle * handle,const unsigned char * addr)1015 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle,
1016 			      const unsigned char *addr)
1017 {
1018 	return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL,
1019 				       HCLGEVF_MAC_ADDR_MC, addr);
1020 }
1021 
hclgevf_add_del_mac_addr(struct hclgevf_dev * hdev,struct hclgevf_mac_addr_node * mac_node,enum HCLGEVF_MAC_ADDR_TYPE mac_type)1022 static int hclgevf_add_del_mac_addr(struct hclgevf_dev *hdev,
1023 				    struct hclgevf_mac_addr_node *mac_node,
1024 				    enum HCLGEVF_MAC_ADDR_TYPE mac_type)
1025 {
1026 	struct hclge_vf_to_pf_msg send_msg;
1027 	u8 code, subcode;
1028 
1029 	if (mac_type == HCLGEVF_MAC_ADDR_UC) {
1030 		code = HCLGE_MBX_SET_UNICAST;
1031 		if (mac_node->state == HCLGEVF_MAC_TO_ADD)
1032 			subcode = HCLGE_MBX_MAC_VLAN_UC_ADD;
1033 		else
1034 			subcode = HCLGE_MBX_MAC_VLAN_UC_REMOVE;
1035 	} else {
1036 		code = HCLGE_MBX_SET_MULTICAST;
1037 		if (mac_node->state == HCLGEVF_MAC_TO_ADD)
1038 			subcode = HCLGE_MBX_MAC_VLAN_MC_ADD;
1039 		else
1040 			subcode = HCLGE_MBX_MAC_VLAN_MC_REMOVE;
1041 	}
1042 
1043 	hclgevf_build_send_msg(&send_msg, code, subcode);
1044 	ether_addr_copy(send_msg.data, mac_node->mac_addr);
1045 	return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
1046 }
1047 
hclgevf_config_mac_list(struct hclgevf_dev * hdev,struct list_head * list,enum HCLGEVF_MAC_ADDR_TYPE mac_type)1048 static void hclgevf_config_mac_list(struct hclgevf_dev *hdev,
1049 				    struct list_head *list,
1050 				    enum HCLGEVF_MAC_ADDR_TYPE mac_type)
1051 {
1052 	char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
1053 	struct hclgevf_mac_addr_node *mac_node, *tmp;
1054 	int ret;
1055 
1056 	list_for_each_entry_safe(mac_node, tmp, list, node) {
1057 		ret = hclgevf_add_del_mac_addr(hdev, mac_node, mac_type);
1058 		if  (ret) {
1059 			hnae3_format_mac_addr(format_mac_addr,
1060 					      mac_node->mac_addr);
1061 			dev_err(&hdev->pdev->dev,
1062 				"failed to configure mac %s, state = %d, ret = %d\n",
1063 				format_mac_addr, mac_node->state, ret);
1064 			return;
1065 		}
1066 		if (mac_node->state == HCLGEVF_MAC_TO_ADD) {
1067 			mac_node->state = HCLGEVF_MAC_ACTIVE;
1068 		} else {
1069 			list_del(&mac_node->node);
1070 			kfree(mac_node);
1071 		}
1072 	}
1073 }
1074 
hclgevf_sync_from_add_list(struct list_head * add_list,struct list_head * mac_list)1075 static void hclgevf_sync_from_add_list(struct list_head *add_list,
1076 				       struct list_head *mac_list)
1077 {
1078 	struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node;
1079 
1080 	list_for_each_entry_safe(mac_node, tmp, add_list, node) {
1081 		/* if the mac address from tmp_add_list is not in the
1082 		 * uc/mc_mac_list, it means have received a TO_DEL request
1083 		 * during the time window of sending mac config request to PF
1084 		 * If mac_node state is ACTIVE, then change its state to TO_DEL,
1085 		 * then it will be removed at next time. If is TO_ADD, it means
1086 		 * send TO_ADD request failed, so just remove the mac node.
1087 		 */
1088 		new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr);
1089 		if (new_node) {
1090 			hclgevf_update_mac_node(new_node, mac_node->state);
1091 			list_del(&mac_node->node);
1092 			kfree(mac_node);
1093 		} else if (mac_node->state == HCLGEVF_MAC_ACTIVE) {
1094 			mac_node->state = HCLGEVF_MAC_TO_DEL;
1095 			list_move_tail(&mac_node->node, mac_list);
1096 		} else {
1097 			list_del(&mac_node->node);
1098 			kfree(mac_node);
1099 		}
1100 	}
1101 }
1102 
hclgevf_sync_from_del_list(struct list_head * del_list,struct list_head * mac_list)1103 static void hclgevf_sync_from_del_list(struct list_head *del_list,
1104 				       struct list_head *mac_list)
1105 {
1106 	struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node;
1107 
1108 	list_for_each_entry_safe(mac_node, tmp, del_list, node) {
1109 		new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr);
1110 		if (new_node) {
1111 			/* If the mac addr is exist in the mac list, it means
1112 			 * received a new request TO_ADD during the time window
1113 			 * of sending mac addr configurrequest to PF, so just
1114 			 * change the mac state to ACTIVE.
1115 			 */
1116 			new_node->state = HCLGEVF_MAC_ACTIVE;
1117 			list_del(&mac_node->node);
1118 			kfree(mac_node);
1119 		} else {
1120 			list_move_tail(&mac_node->node, mac_list);
1121 		}
1122 	}
1123 }
1124 
hclgevf_clear_list(struct list_head * list)1125 static void hclgevf_clear_list(struct list_head *list)
1126 {
1127 	struct hclgevf_mac_addr_node *mac_node, *tmp;
1128 
1129 	list_for_each_entry_safe(mac_node, tmp, list, node) {
1130 		list_del(&mac_node->node);
1131 		kfree(mac_node);
1132 	}
1133 }
1134 
hclgevf_sync_mac_list(struct hclgevf_dev * hdev,enum HCLGEVF_MAC_ADDR_TYPE mac_type)1135 static void hclgevf_sync_mac_list(struct hclgevf_dev *hdev,
1136 				  enum HCLGEVF_MAC_ADDR_TYPE mac_type)
1137 {
1138 	struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node;
1139 	struct list_head tmp_add_list, tmp_del_list;
1140 	struct list_head *list;
1141 
1142 	INIT_LIST_HEAD(&tmp_add_list);
1143 	INIT_LIST_HEAD(&tmp_del_list);
1144 
1145 	/* move the mac addr to the tmp_add_list and tmp_del_list, then
1146 	 * we can add/delete these mac addr outside the spin lock
1147 	 */
1148 	list = (mac_type == HCLGEVF_MAC_ADDR_UC) ?
1149 		&hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list;
1150 
1151 	spin_lock_bh(&hdev->mac_table.mac_list_lock);
1152 
1153 	list_for_each_entry_safe(mac_node, tmp, list, node) {
1154 		switch (mac_node->state) {
1155 		case HCLGEVF_MAC_TO_DEL:
1156 			list_move_tail(&mac_node->node, &tmp_del_list);
1157 			break;
1158 		case HCLGEVF_MAC_TO_ADD:
1159 			new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
1160 			if (!new_node)
1161 				goto stop_traverse;
1162 
1163 			ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
1164 			new_node->state = mac_node->state;
1165 			list_add_tail(&new_node->node, &tmp_add_list);
1166 			break;
1167 		default:
1168 			break;
1169 		}
1170 	}
1171 
1172 stop_traverse:
1173 	spin_unlock_bh(&hdev->mac_table.mac_list_lock);
1174 
1175 	/* delete first, in order to get max mac table space for adding */
1176 	hclgevf_config_mac_list(hdev, &tmp_del_list, mac_type);
1177 	hclgevf_config_mac_list(hdev, &tmp_add_list, mac_type);
1178 
1179 	/* if some mac addresses were added/deleted fail, move back to the
1180 	 * mac_list, and retry at next time.
1181 	 */
1182 	spin_lock_bh(&hdev->mac_table.mac_list_lock);
1183 
1184 	hclgevf_sync_from_del_list(&tmp_del_list, list);
1185 	hclgevf_sync_from_add_list(&tmp_add_list, list);
1186 
1187 	spin_unlock_bh(&hdev->mac_table.mac_list_lock);
1188 }
1189 
hclgevf_sync_mac_table(struct hclgevf_dev * hdev)1190 static void hclgevf_sync_mac_table(struct hclgevf_dev *hdev)
1191 {
1192 	hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_UC);
1193 	hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_MC);
1194 }
1195 
hclgevf_uninit_mac_list(struct hclgevf_dev * hdev)1196 static void hclgevf_uninit_mac_list(struct hclgevf_dev *hdev)
1197 {
1198 	spin_lock_bh(&hdev->mac_table.mac_list_lock);
1199 
1200 	hclgevf_clear_list(&hdev->mac_table.uc_mac_list);
1201 	hclgevf_clear_list(&hdev->mac_table.mc_mac_list);
1202 
1203 	spin_unlock_bh(&hdev->mac_table.mac_list_lock);
1204 }
1205 
hclgevf_enable_vlan_filter(struct hnae3_handle * handle,bool enable)1206 static int hclgevf_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
1207 {
1208 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1209 	struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
1210 	struct hclge_vf_to_pf_msg send_msg;
1211 
1212 	if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps))
1213 		return -EOPNOTSUPP;
1214 
1215 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
1216 			       HCLGE_MBX_ENABLE_VLAN_FILTER);
1217 	send_msg.data[0] = enable ? 1 : 0;
1218 
1219 	return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
1220 }
1221 
hclgevf_set_vlan_filter(struct hnae3_handle * handle,__be16 proto,u16 vlan_id,bool is_kill)1222 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
1223 				   __be16 proto, u16 vlan_id,
1224 				   bool is_kill)
1225 {
1226 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1227 	struct hclge_mbx_vlan_filter *vlan_filter;
1228 	struct hclge_vf_to_pf_msg send_msg;
1229 	int ret;
1230 
1231 	if (vlan_id > HCLGEVF_MAX_VLAN_ID)
1232 		return -EINVAL;
1233 
1234 	if (proto != htons(ETH_P_8021Q))
1235 		return -EPROTONOSUPPORT;
1236 
1237 	/* When device is resetting or reset failed, firmware is unable to
1238 	 * handle mailbox. Just record the vlan id, and remove it after
1239 	 * reset finished.
1240 	 */
1241 	if ((test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
1242 	     test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) && is_kill) {
1243 		set_bit(vlan_id, hdev->vlan_del_fail_bmap);
1244 		return -EBUSY;
1245 	} else if (!is_kill && test_bit(vlan_id, hdev->vlan_del_fail_bmap)) {
1246 		clear_bit(vlan_id, hdev->vlan_del_fail_bmap);
1247 	}
1248 
1249 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
1250 			       HCLGE_MBX_VLAN_FILTER);
1251 	vlan_filter = (struct hclge_mbx_vlan_filter *)send_msg.data;
1252 	vlan_filter->is_kill = is_kill;
1253 	vlan_filter->vlan_id = cpu_to_le16(vlan_id);
1254 	vlan_filter->proto = cpu_to_le16(be16_to_cpu(proto));
1255 
1256 	/* when remove hw vlan filter failed, record the vlan id,
1257 	 * and try to remove it from hw later, to be consistence
1258 	 * with stack.
1259 	 */
1260 	ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
1261 	if (is_kill && ret)
1262 		set_bit(vlan_id, hdev->vlan_del_fail_bmap);
1263 
1264 	return ret;
1265 }
1266 
hclgevf_sync_vlan_filter(struct hclgevf_dev * hdev)1267 static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev)
1268 {
1269 #define HCLGEVF_MAX_SYNC_COUNT	60
1270 	struct hnae3_handle *handle = &hdev->nic;
1271 	int ret, sync_cnt = 0;
1272 	u16 vlan_id;
1273 
1274 	if (bitmap_empty(hdev->vlan_del_fail_bmap, VLAN_N_VID))
1275 		return;
1276 
1277 	rtnl_lock();
1278 	vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID);
1279 	while (vlan_id != VLAN_N_VID) {
1280 		ret = hclgevf_set_vlan_filter(handle, htons(ETH_P_8021Q),
1281 					      vlan_id, true);
1282 		if (ret)
1283 			break;
1284 
1285 		clear_bit(vlan_id, hdev->vlan_del_fail_bmap);
1286 		sync_cnt++;
1287 		if (sync_cnt >= HCLGEVF_MAX_SYNC_COUNT)
1288 			break;
1289 
1290 		vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID);
1291 	}
1292 	rtnl_unlock();
1293 }
1294 
hclgevf_en_hw_strip_rxvtag(struct hnae3_handle * handle,bool enable)1295 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
1296 {
1297 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1298 	struct hclge_vf_to_pf_msg send_msg;
1299 
1300 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
1301 			       HCLGE_MBX_VLAN_RX_OFF_CFG);
1302 	send_msg.data[0] = enable ? 1 : 0;
1303 	return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
1304 }
1305 
hclgevf_reset_tqp(struct hnae3_handle * handle)1306 static int hclgevf_reset_tqp(struct hnae3_handle *handle)
1307 {
1308 #define HCLGEVF_RESET_ALL_QUEUE_DONE	1U
1309 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1310 	struct hclge_vf_to_pf_msg send_msg;
1311 	u8 return_status = 0;
1312 	int ret;
1313 	u16 i;
1314 
1315 	/* disable vf queue before send queue reset msg to PF */
1316 	ret = hclgevf_tqp_enable(handle, false);
1317 	if (ret) {
1318 		dev_err(&hdev->pdev->dev, "failed to disable tqp, ret = %d\n",
1319 			ret);
1320 		return ret;
1321 	}
1322 
1323 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0);
1324 
1325 	ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &return_status,
1326 				   sizeof(return_status));
1327 	if (ret || return_status == HCLGEVF_RESET_ALL_QUEUE_DONE)
1328 		return ret;
1329 
1330 	for (i = 1; i < handle->kinfo.num_tqps; i++) {
1331 		hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0);
1332 		*(__le16 *)send_msg.data = cpu_to_le16(i);
1333 		ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
1334 		if (ret)
1335 			return ret;
1336 	}
1337 
1338 	return 0;
1339 }
1340 
hclgevf_set_mtu(struct hnae3_handle * handle,int new_mtu)1341 static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu)
1342 {
1343 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1344 	struct hclge_mbx_mtu_info *mtu_info;
1345 	struct hclge_vf_to_pf_msg send_msg;
1346 
1347 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MTU, 0);
1348 	mtu_info = (struct hclge_mbx_mtu_info *)send_msg.data;
1349 	mtu_info->mtu = cpu_to_le32(new_mtu);
1350 
1351 	return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
1352 }
1353 
hclgevf_notify_client(struct hclgevf_dev * hdev,enum hnae3_reset_notify_type type)1354 static int hclgevf_notify_client(struct hclgevf_dev *hdev,
1355 				 enum hnae3_reset_notify_type type)
1356 {
1357 	struct hnae3_client *client = hdev->nic_client;
1358 	struct hnae3_handle *handle = &hdev->nic;
1359 	int ret;
1360 
1361 	if (!test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state) ||
1362 	    !client)
1363 		return 0;
1364 
1365 	if (!client->ops->reset_notify)
1366 		return -EOPNOTSUPP;
1367 
1368 	ret = client->ops->reset_notify(handle, type);
1369 	if (ret)
1370 		dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
1371 			type, ret);
1372 
1373 	return ret;
1374 }
1375 
hclgevf_notify_roce_client(struct hclgevf_dev * hdev,enum hnae3_reset_notify_type type)1376 static int hclgevf_notify_roce_client(struct hclgevf_dev *hdev,
1377 				      enum hnae3_reset_notify_type type)
1378 {
1379 	struct hnae3_client *client = hdev->roce_client;
1380 	struct hnae3_handle *handle = &hdev->roce;
1381 	int ret;
1382 
1383 	if (!test_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state) || !client)
1384 		return 0;
1385 
1386 	if (!client->ops->reset_notify)
1387 		return -EOPNOTSUPP;
1388 
1389 	ret = client->ops->reset_notify(handle, type);
1390 	if (ret)
1391 		dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)",
1392 			type, ret);
1393 	return ret;
1394 }
1395 
hclgevf_set_reset_pending(struct hclgevf_dev * hdev,enum hnae3_reset_type reset_type)1396 static void hclgevf_set_reset_pending(struct hclgevf_dev *hdev,
1397 				      enum hnae3_reset_type reset_type)
1398 {
1399 	/* When an incorrect reset type is executed, the get_reset_level
1400 	 * function generates the HNAE3_NONE_RESET flag. As a result, this
1401 	 * type do not need to pending.
1402 	 */
1403 	if (reset_type != HNAE3_NONE_RESET)
1404 		set_bit(reset_type, &hdev->reset_pending);
1405 }
1406 
hclgevf_reset_wait(struct hclgevf_dev * hdev)1407 static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
1408 {
1409 #define HCLGEVF_RESET_WAIT_US	20000
1410 #define HCLGEVF_RESET_WAIT_CNT	2000
1411 #define HCLGEVF_RESET_WAIT_TIMEOUT_US	\
1412 	(HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT)
1413 
1414 	u32 val;
1415 	int ret;
1416 
1417 	if (hdev->reset_type == HNAE3_VF_RESET)
1418 		ret = readl_poll_timeout(hdev->hw.hw.io_base +
1419 					 HCLGEVF_VF_RST_ING, val,
1420 					 !(val & HCLGEVF_VF_RST_ING_BIT),
1421 					 HCLGEVF_RESET_WAIT_US,
1422 					 HCLGEVF_RESET_WAIT_TIMEOUT_US);
1423 	else
1424 		ret = readl_poll_timeout(hdev->hw.hw.io_base +
1425 					 HCLGEVF_RST_ING, val,
1426 					 !(val & HCLGEVF_RST_ING_BITS),
1427 					 HCLGEVF_RESET_WAIT_US,
1428 					 HCLGEVF_RESET_WAIT_TIMEOUT_US);
1429 
1430 	/* hardware completion status should be available by this time */
1431 	if (ret) {
1432 		dev_err(&hdev->pdev->dev,
1433 			"couldn't get reset done status from h/w, timeout!\n");
1434 		return ret;
1435 	}
1436 
1437 	/* we will wait a bit more to let reset of the stack to complete. This
1438 	 * might happen in case reset assertion was made by PF. Yes, this also
1439 	 * means we might end up waiting bit more even for VF reset.
1440 	 */
1441 	if (hdev->reset_type == HNAE3_VF_FULL_RESET)
1442 		msleep(5000);
1443 	else
1444 		msleep(500);
1445 
1446 	return 0;
1447 }
1448 
hclgevf_reset_handshake(struct hclgevf_dev * hdev,bool enable)1449 static void hclgevf_reset_handshake(struct hclgevf_dev *hdev, bool enable)
1450 {
1451 	u32 reg_val;
1452 
1453 	reg_val = hclgevf_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG);
1454 	if (enable)
1455 		reg_val |= HCLGEVF_NIC_SW_RST_RDY;
1456 	else
1457 		reg_val &= ~HCLGEVF_NIC_SW_RST_RDY;
1458 
1459 	hclgevf_write_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG,
1460 			  reg_val);
1461 }
1462 
hclgevf_reset_stack(struct hclgevf_dev * hdev)1463 static int hclgevf_reset_stack(struct hclgevf_dev *hdev)
1464 {
1465 	int ret;
1466 
1467 	/* uninitialize the nic client */
1468 	ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT);
1469 	if (ret)
1470 		return ret;
1471 
1472 	/* re-initialize the hclge device */
1473 	ret = hclgevf_reset_hdev(hdev);
1474 	if (ret) {
1475 		dev_err(&hdev->pdev->dev,
1476 			"hclge device re-init failed, VF is disabled!\n");
1477 		return ret;
1478 	}
1479 
1480 	/* bring up the nic client again */
1481 	ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT);
1482 	if (ret)
1483 		return ret;
1484 
1485 	/* clear handshake status with IMP */
1486 	hclgevf_reset_handshake(hdev, false);
1487 
1488 	/* bring up the nic to enable TX/RX again */
1489 	return hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
1490 }
1491 
hclgevf_reset_prepare_wait(struct hclgevf_dev * hdev)1492 static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
1493 {
1494 #define HCLGEVF_RESET_SYNC_TIME 100
1495 
1496 	if (hdev->reset_type == HNAE3_VF_FUNC_RESET) {
1497 		struct hclge_vf_to_pf_msg send_msg;
1498 		int ret;
1499 
1500 		hclgevf_build_send_msg(&send_msg, HCLGE_MBX_RESET, 0);
1501 		ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
1502 		if (ret) {
1503 			dev_err(&hdev->pdev->dev,
1504 				"failed to assert VF reset, ret = %d\n", ret);
1505 			return ret;
1506 		}
1507 		hdev->rst_stats.vf_func_rst_cnt++;
1508 	}
1509 
1510 	set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
1511 	/* inform hardware that preparatory work is done */
1512 	msleep(HCLGEVF_RESET_SYNC_TIME);
1513 	hclgevf_reset_handshake(hdev, true);
1514 	dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done\n",
1515 		 hdev->reset_type);
1516 
1517 	return 0;
1518 }
1519 
hclgevf_dump_rst_info(struct hclgevf_dev * hdev)1520 static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev)
1521 {
1522 	dev_info(&hdev->pdev->dev, "VF function reset count: %u\n",
1523 		 hdev->rst_stats.vf_func_rst_cnt);
1524 	dev_info(&hdev->pdev->dev, "FLR reset count: %u\n",
1525 		 hdev->rst_stats.flr_rst_cnt);
1526 	dev_info(&hdev->pdev->dev, "VF reset count: %u\n",
1527 		 hdev->rst_stats.vf_rst_cnt);
1528 	dev_info(&hdev->pdev->dev, "reset done count: %u\n",
1529 		 hdev->rst_stats.rst_done_cnt);
1530 	dev_info(&hdev->pdev->dev, "HW reset done count: %u\n",
1531 		 hdev->rst_stats.hw_rst_done_cnt);
1532 	dev_info(&hdev->pdev->dev, "reset count: %u\n",
1533 		 hdev->rst_stats.rst_cnt);
1534 	dev_info(&hdev->pdev->dev, "reset fail count: %u\n",
1535 		 hdev->rst_stats.rst_fail_cnt);
1536 	dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n",
1537 		 hclgevf_read_dev(&hdev->hw, HCLGEVF_MISC_VECTOR_REG_BASE));
1538 	dev_info(&hdev->pdev->dev, "vector0 interrupt status: 0x%x\n",
1539 		 hclgevf_read_dev(&hdev->hw, HCLGE_COMM_VECTOR0_CMDQ_STATE_REG));
1540 	dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n",
1541 		 hclgevf_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG));
1542 	dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n",
1543 		 hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING));
1544 	dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state);
1545 }
1546 
hclgevf_reset_err_handle(struct hclgevf_dev * hdev)1547 static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev)
1548 {
1549 	/* recover handshake status with IMP when reset fail */
1550 	hclgevf_reset_handshake(hdev, true);
1551 	hdev->rst_stats.rst_fail_cnt++;
1552 	dev_err(&hdev->pdev->dev, "failed to reset VF(%u)\n",
1553 		hdev->rst_stats.rst_fail_cnt);
1554 
1555 	if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT)
1556 		hclgevf_set_reset_pending(hdev, hdev->reset_type);
1557 
1558 	if (hclgevf_is_reset_pending(hdev)) {
1559 		set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
1560 		hclgevf_reset_task_schedule(hdev);
1561 	} else {
1562 		set_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
1563 		hclgevf_dump_rst_info(hdev);
1564 	}
1565 }
1566 
hclgevf_reset_prepare(struct hclgevf_dev * hdev)1567 static int hclgevf_reset_prepare(struct hclgevf_dev *hdev)
1568 {
1569 	int ret;
1570 
1571 	hdev->rst_stats.rst_cnt++;
1572 
1573 	/* perform reset of the stack & ae device for a client */
1574 	ret = hclgevf_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
1575 	if (ret)
1576 		return ret;
1577 
1578 	rtnl_lock();
1579 	/* bring down the nic to stop any ongoing TX/RX */
1580 	ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
1581 	rtnl_unlock();
1582 	if (ret)
1583 		return ret;
1584 
1585 	return hclgevf_reset_prepare_wait(hdev);
1586 }
1587 
hclgevf_reset_rebuild(struct hclgevf_dev * hdev)1588 static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev)
1589 {
1590 	int ret;
1591 
1592 	hdev->rst_stats.hw_rst_done_cnt++;
1593 	ret = hclgevf_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
1594 	if (ret)
1595 		return ret;
1596 
1597 	rtnl_lock();
1598 	/* now, re-initialize the nic client and ae device */
1599 	ret = hclgevf_reset_stack(hdev);
1600 	rtnl_unlock();
1601 	if (ret) {
1602 		dev_err(&hdev->pdev->dev, "failed to reset VF stack\n");
1603 		return ret;
1604 	}
1605 
1606 	ret = hclgevf_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
1607 	/* ignore RoCE notify error if it fails HCLGEVF_RESET_MAX_FAIL_CNT - 1
1608 	 * times
1609 	 */
1610 	if (ret &&
1611 	    hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT - 1)
1612 		return ret;
1613 
1614 	ret = hclgevf_notify_roce_client(hdev, HNAE3_UP_CLIENT);
1615 	if (ret)
1616 		return ret;
1617 
1618 	hdev->last_reset_time = jiffies;
1619 	hdev->rst_stats.rst_done_cnt++;
1620 	hdev->rst_stats.rst_fail_cnt = 0;
1621 	clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
1622 
1623 	return 0;
1624 }
1625 
hclgevf_reset(struct hclgevf_dev * hdev)1626 static void hclgevf_reset(struct hclgevf_dev *hdev)
1627 {
1628 	if (hclgevf_reset_prepare(hdev))
1629 		goto err_reset;
1630 
1631 	/* check if VF could successfully fetch the hardware reset completion
1632 	 * status from the hardware
1633 	 */
1634 	if (hclgevf_reset_wait(hdev)) {
1635 		/* can't do much in this situation, will disable VF */
1636 		dev_err(&hdev->pdev->dev,
1637 			"failed to fetch H/W reset completion status\n");
1638 		goto err_reset;
1639 	}
1640 
1641 	if (hclgevf_reset_rebuild(hdev))
1642 		goto err_reset;
1643 
1644 	return;
1645 
1646 err_reset:
1647 	hclgevf_reset_err_handle(hdev);
1648 }
1649 
hclgevf_get_reset_level(unsigned long * addr)1650 static enum hnae3_reset_type hclgevf_get_reset_level(unsigned long *addr)
1651 {
1652 	enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
1653 
1654 	/* return the highest priority reset level amongst all */
1655 	if (test_bit(HNAE3_VF_RESET, addr)) {
1656 		rst_level = HNAE3_VF_RESET;
1657 		clear_bit(HNAE3_VF_RESET, addr);
1658 		clear_bit(HNAE3_VF_PF_FUNC_RESET, addr);
1659 		clear_bit(HNAE3_VF_FUNC_RESET, addr);
1660 	} else if (test_bit(HNAE3_VF_FULL_RESET, addr)) {
1661 		rst_level = HNAE3_VF_FULL_RESET;
1662 		clear_bit(HNAE3_VF_FULL_RESET, addr);
1663 		clear_bit(HNAE3_VF_FUNC_RESET, addr);
1664 	} else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) {
1665 		rst_level = HNAE3_VF_PF_FUNC_RESET;
1666 		clear_bit(HNAE3_VF_PF_FUNC_RESET, addr);
1667 		clear_bit(HNAE3_VF_FUNC_RESET, addr);
1668 	} else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) {
1669 		rst_level = HNAE3_VF_FUNC_RESET;
1670 		clear_bit(HNAE3_VF_FUNC_RESET, addr);
1671 	} else if (test_bit(HNAE3_FLR_RESET, addr)) {
1672 		rst_level = HNAE3_FLR_RESET;
1673 		clear_bit(HNAE3_FLR_RESET, addr);
1674 	}
1675 
1676 	clear_bit(HNAE3_NONE_RESET, addr);
1677 
1678 	return rst_level;
1679 }
1680 
hclgevf_reset_event(struct pci_dev * pdev,struct hnae3_handle * handle)1681 static void hclgevf_reset_event(struct pci_dev *pdev,
1682 				struct hnae3_handle *handle)
1683 {
1684 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1685 	struct hclgevf_dev *hdev = ae_dev->priv;
1686 
1687 	if (hdev->default_reset_request)
1688 		hdev->reset_level =
1689 			hclgevf_get_reset_level(&hdev->default_reset_request);
1690 	else
1691 		hdev->reset_level = HNAE3_VF_FUNC_RESET;
1692 
1693 	dev_info(&hdev->pdev->dev, "received reset request from VF enet, reset level is %d\n",
1694 		 hdev->reset_level);
1695 
1696 	/* reset of this VF requested */
1697 	set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state);
1698 	hclgevf_reset_task_schedule(hdev);
1699 
1700 	hdev->last_reset_time = jiffies;
1701 }
1702 
hclgevf_set_def_reset_request(struct hnae3_ae_dev * ae_dev,enum hnae3_reset_type rst_type)1703 static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
1704 					  enum hnae3_reset_type rst_type)
1705 {
1706 #define HCLGEVF_SUPPORT_RESET_TYPE \
1707 	(BIT(HNAE3_VF_RESET) | BIT(HNAE3_VF_FUNC_RESET) | \
1708 	BIT(HNAE3_VF_PF_FUNC_RESET) | BIT(HNAE3_VF_FULL_RESET) | \
1709 	BIT(HNAE3_FLR_RESET) | BIT(HNAE3_VF_EXP_RESET))
1710 
1711 	struct hclgevf_dev *hdev = ae_dev->priv;
1712 
1713 	if (!(BIT(rst_type) & HCLGEVF_SUPPORT_RESET_TYPE)) {
1714 		/* To prevent reset triggered by hclge_reset_event */
1715 		set_bit(HNAE3_NONE_RESET, &hdev->default_reset_request);
1716 		dev_info(&hdev->pdev->dev, "unsupported reset type %d\n",
1717 			 rst_type);
1718 		return;
1719 	}
1720 	set_bit(rst_type, &hdev->default_reset_request);
1721 }
1722 
hclgevf_enable_vector(struct hclgevf_misc_vector * vector,bool en)1723 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en)
1724 {
1725 	writel(en ? 1 : 0, vector->addr);
1726 }
1727 
hclgevf_reset_prepare_general(struct hnae3_ae_dev * ae_dev,enum hnae3_reset_type rst_type)1728 static void hclgevf_reset_prepare_general(struct hnae3_ae_dev *ae_dev,
1729 					  enum hnae3_reset_type rst_type)
1730 {
1731 #define HCLGEVF_RESET_RETRY_WAIT_MS	500
1732 #define HCLGEVF_RESET_RETRY_CNT		5
1733 
1734 	struct hclgevf_dev *hdev = ae_dev->priv;
1735 	int retry_cnt = 0;
1736 	int ret;
1737 
1738 	while (retry_cnt++ < HCLGEVF_RESET_RETRY_CNT) {
1739 		down(&hdev->reset_sem);
1740 		set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
1741 		hdev->reset_type = rst_type;
1742 		ret = hclgevf_reset_prepare(hdev);
1743 		if (!ret && !hdev->reset_pending)
1744 			break;
1745 
1746 		dev_err(&hdev->pdev->dev,
1747 			"failed to prepare to reset, ret=%d, reset_pending:0x%lx, retry_cnt:%d\n",
1748 			ret, hdev->reset_pending, retry_cnt);
1749 		clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
1750 		up(&hdev->reset_sem);
1751 		msleep(HCLGEVF_RESET_RETRY_WAIT_MS);
1752 	}
1753 
1754 	/* disable misc vector before reset done */
1755 	hclgevf_enable_vector(&hdev->misc_vector, false);
1756 
1757 	if (hdev->reset_type == HNAE3_FLR_RESET)
1758 		hdev->rst_stats.flr_rst_cnt++;
1759 }
1760 
hclgevf_reset_done(struct hnae3_ae_dev * ae_dev)1761 static void hclgevf_reset_done(struct hnae3_ae_dev *ae_dev)
1762 {
1763 	struct hclgevf_dev *hdev = ae_dev->priv;
1764 	int ret;
1765 
1766 	hclgevf_enable_vector(&hdev->misc_vector, true);
1767 
1768 	ret = hclgevf_reset_rebuild(hdev);
1769 	if (ret)
1770 		dev_warn(&hdev->pdev->dev, "fail to rebuild, ret=%d\n",
1771 			 ret);
1772 
1773 	hdev->reset_type = HNAE3_NONE_RESET;
1774 	if (test_and_clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
1775 		up(&hdev->reset_sem);
1776 }
1777 
hclgevf_get_fw_version(struct hnae3_handle * handle)1778 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle)
1779 {
1780 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1781 
1782 	return hdev->fw_version;
1783 }
1784 
hclgevf_get_misc_vector(struct hclgevf_dev * hdev)1785 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev)
1786 {
1787 	struct hclgevf_misc_vector *vector = &hdev->misc_vector;
1788 
1789 	vector->vector_irq = pci_irq_vector(hdev->pdev,
1790 					    HCLGEVF_MISC_VECTOR_NUM);
1791 	vector->addr = hdev->hw.hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE;
1792 	/* vector status always valid for Vector 0 */
1793 	hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0;
1794 	hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq;
1795 
1796 	hdev->num_msi_left -= 1;
1797 	hdev->num_msi_used += 1;
1798 }
1799 
hclgevf_reset_task_schedule(struct hclgevf_dev * hdev)1800 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev)
1801 {
1802 	if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
1803 	    test_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state) &&
1804 	    !test_and_set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED,
1805 			      &hdev->state))
1806 		mod_delayed_work(hclgevf_wq, &hdev->service_task, 0);
1807 }
1808 
hclgevf_mbx_task_schedule(struct hclgevf_dev * hdev)1809 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev)
1810 {
1811 	if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
1812 	    !test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED,
1813 			      &hdev->state))
1814 		mod_delayed_work(hclgevf_wq, &hdev->service_task, 0);
1815 }
1816 
hclgevf_task_schedule(struct hclgevf_dev * hdev,unsigned long delay)1817 static void hclgevf_task_schedule(struct hclgevf_dev *hdev,
1818 				  unsigned long delay)
1819 {
1820 	if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
1821 	    !test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state))
1822 		mod_delayed_work(hclgevf_wq, &hdev->service_task, delay);
1823 }
1824 
hclgevf_reset_service_task(struct hclgevf_dev * hdev)1825 static void hclgevf_reset_service_task(struct hclgevf_dev *hdev)
1826 {
1827 #define	HCLGEVF_MAX_RESET_ATTEMPTS_CNT	3
1828 
1829 	if (!test_and_clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state))
1830 		return;
1831 
1832 	down(&hdev->reset_sem);
1833 	set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
1834 
1835 	if (test_and_clear_bit(HCLGEVF_RESET_PENDING,
1836 			       &hdev->reset_state)) {
1837 		/* PF has intimated that it is about to reset the hardware.
1838 		 * We now have to poll & check if hardware has actually
1839 		 * completed the reset sequence. On hardware reset completion,
1840 		 * VF needs to reset the client and ae device.
1841 		 */
1842 		hdev->reset_attempts = 0;
1843 
1844 		hdev->last_reset_time = jiffies;
1845 		hdev->reset_type =
1846 			hclgevf_get_reset_level(&hdev->reset_pending);
1847 		if (hdev->reset_type != HNAE3_NONE_RESET)
1848 			hclgevf_reset(hdev);
1849 	} else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED,
1850 				      &hdev->reset_state)) {
1851 		/* we could be here when either of below happens:
1852 		 * 1. reset was initiated due to watchdog timeout caused by
1853 		 *    a. IMP was earlier reset and our TX got choked down and
1854 		 *       which resulted in watchdog reacting and inducing VF
1855 		 *       reset. This also means our cmdq would be unreliable.
1856 		 *    b. problem in TX due to other lower layer(example link
1857 		 *       layer not functioning properly etc.)
1858 		 * 2. VF reset might have been initiated due to some config
1859 		 *    change.
1860 		 *
1861 		 * NOTE: Theres no clear way to detect above cases than to react
1862 		 * to the response of PF for this reset request. PF will ack the
1863 		 * 1b and 2. cases but we will not get any intimation about 1a
1864 		 * from PF as cmdq would be in unreliable state i.e. mailbox
1865 		 * communication between PF and VF would be broken.
1866 		 *
1867 		 * if we are never geting into pending state it means either:
1868 		 * 1. PF is not receiving our request which could be due to IMP
1869 		 *    reset
1870 		 * 2. PF is screwed
1871 		 * We cannot do much for 2. but to check first we can try reset
1872 		 * our PCIe + stack and see if it alleviates the problem.
1873 		 */
1874 		if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) {
1875 			/* prepare for full reset of stack + pcie interface */
1876 			hclgevf_set_reset_pending(hdev, HNAE3_VF_FULL_RESET);
1877 
1878 			/* "defer" schedule the reset task again */
1879 			set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
1880 		} else {
1881 			hdev->reset_attempts++;
1882 
1883 			hclgevf_set_reset_pending(hdev, hdev->reset_level);
1884 			set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
1885 		}
1886 		hclgevf_reset_task_schedule(hdev);
1887 	}
1888 
1889 	hdev->reset_type = HNAE3_NONE_RESET;
1890 	clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
1891 	up(&hdev->reset_sem);
1892 }
1893 
hclgevf_mailbox_service_task(struct hclgevf_dev * hdev)1894 static void hclgevf_mailbox_service_task(struct hclgevf_dev *hdev)
1895 {
1896 	if (!test_and_clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state))
1897 		return;
1898 
1899 	if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state))
1900 		return;
1901 
1902 	hclgevf_mbx_async_handler(hdev);
1903 
1904 	clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
1905 }
1906 
hclgevf_keep_alive(struct hclgevf_dev * hdev)1907 static void hclgevf_keep_alive(struct hclgevf_dev *hdev)
1908 {
1909 	struct hclge_vf_to_pf_msg send_msg;
1910 	int ret;
1911 
1912 	if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state))
1913 		return;
1914 
1915 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_KEEP_ALIVE, 0);
1916 	ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
1917 	if (ret)
1918 		dev_err(&hdev->pdev->dev,
1919 			"VF sends keep alive cmd failed(=%d)\n", ret);
1920 }
1921 
hclgevf_periodic_service_task(struct hclgevf_dev * hdev)1922 static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev)
1923 {
1924 	unsigned long delta = round_jiffies_relative(HZ);
1925 	struct hnae3_handle *handle = &hdev->nic;
1926 
1927 	if (test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state) ||
1928 	    test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state))
1929 		return;
1930 
1931 	if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
1932 		delta = jiffies - hdev->last_serv_processed;
1933 
1934 		if (delta < round_jiffies_relative(HZ)) {
1935 			delta = round_jiffies_relative(HZ) - delta;
1936 			goto out;
1937 		}
1938 	}
1939 
1940 	hdev->serv_processed_cnt++;
1941 	if (!(hdev->serv_processed_cnt % HCLGEVF_KEEP_ALIVE_TASK_INTERVAL))
1942 		hclgevf_keep_alive(hdev);
1943 
1944 	if (test_bit(HCLGEVF_STATE_DOWN, &hdev->state)) {
1945 		hdev->last_serv_processed = jiffies;
1946 		goto out;
1947 	}
1948 
1949 	if (!(hdev->serv_processed_cnt % HCLGEVF_STATS_TIMER_INTERVAL))
1950 		hclge_comm_tqps_update_stats(handle, &hdev->hw.hw);
1951 
1952 	/* VF does not need to request link status when this bit is set, because
1953 	 * PF will push its link status to VFs when link status changed.
1954 	 */
1955 	if (!test_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, &hdev->state))
1956 		hclgevf_request_link_info(hdev);
1957 
1958 	hclgevf_update_link_mode(hdev);
1959 
1960 	hclgevf_sync_vlan_filter(hdev);
1961 
1962 	hclgevf_sync_mac_table(hdev);
1963 
1964 	hclgevf_sync_promisc_mode(hdev);
1965 
1966 	hdev->last_serv_processed = jiffies;
1967 
1968 out:
1969 	hclgevf_task_schedule(hdev, delta);
1970 }
1971 
hclgevf_service_task(struct work_struct * work)1972 static void hclgevf_service_task(struct work_struct *work)
1973 {
1974 	struct hclgevf_dev *hdev = container_of(work, struct hclgevf_dev,
1975 						service_task.work);
1976 
1977 	hclgevf_reset_service_task(hdev);
1978 	hclgevf_mailbox_service_task(hdev);
1979 	hclgevf_periodic_service_task(hdev);
1980 
1981 	/* Handle reset and mbx again in case periodical task delays the
1982 	 * handling by calling hclgevf_task_schedule() in
1983 	 * hclgevf_periodic_service_task()
1984 	 */
1985 	hclgevf_reset_service_task(hdev);
1986 	hclgevf_mailbox_service_task(hdev);
1987 }
1988 
hclgevf_clear_event_cause(struct hclgevf_dev * hdev,u32 regclr)1989 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr)
1990 {
1991 	hclgevf_write_dev(&hdev->hw, HCLGE_COMM_VECTOR0_CMDQ_SRC_REG, regclr);
1992 }
1993 
hclgevf_check_evt_cause(struct hclgevf_dev * hdev,u32 * clearval)1994 static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
1995 						      u32 *clearval)
1996 {
1997 	u32 val, cmdq_stat_reg, rst_ing_reg;
1998 
1999 	/* fetch the events from their corresponding regs */
2000 	cmdq_stat_reg = hclgevf_read_dev(&hdev->hw,
2001 					 HCLGE_COMM_VECTOR0_CMDQ_STATE_REG);
2002 	if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_stat_reg) {
2003 		rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
2004 		dev_info(&hdev->pdev->dev,
2005 			 "receive reset interrupt 0x%x!\n", rst_ing_reg);
2006 		hclgevf_set_reset_pending(hdev, HNAE3_VF_RESET);
2007 		set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
2008 		set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
2009 		*clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B);
2010 		hdev->rst_stats.vf_rst_cnt++;
2011 		/* set up VF hardware reset status, its PF will clear
2012 		 * this status when PF has initialized done.
2013 		 */
2014 		val = hclgevf_read_dev(&hdev->hw, HCLGEVF_VF_RST_ING);
2015 		hclgevf_write_dev(&hdev->hw, HCLGEVF_VF_RST_ING,
2016 				  val | HCLGEVF_VF_RST_ING_BIT);
2017 		return HCLGEVF_VECTOR0_EVENT_RST;
2018 	}
2019 
2020 	/* check for vector0 mailbox(=CMDQ RX) event source */
2021 	if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) {
2022 		/* for revision 0x21, clearing interrupt is writing bit 0
2023 		 * to the clear register, writing bit 1 means to keep the
2024 		 * old value.
2025 		 * for revision 0x20, the clear register is a read & write
2026 		 * register, so we should just write 0 to the bit we are
2027 		 * handling, and keep other bits as cmdq_stat_reg.
2028 		 */
2029 		if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
2030 			*clearval = ~(1U << HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
2031 		else
2032 			*clearval = cmdq_stat_reg &
2033 				    ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
2034 
2035 		return HCLGEVF_VECTOR0_EVENT_MBX;
2036 	}
2037 
2038 	/* print other vector0 event source */
2039 	dev_info(&hdev->pdev->dev,
2040 		 "vector 0 interrupt from unknown source, cmdq_src = %#x\n",
2041 		 cmdq_stat_reg);
2042 
2043 	return HCLGEVF_VECTOR0_EVENT_OTHER;
2044 }
2045 
hclgevf_reset_timer(struct timer_list * t)2046 static void hclgevf_reset_timer(struct timer_list *t)
2047 {
2048 	struct hclgevf_dev *hdev = from_timer(hdev, t, reset_timer);
2049 
2050 	hclgevf_clear_event_cause(hdev, HCLGEVF_VECTOR0_EVENT_RST);
2051 	hclgevf_reset_task_schedule(hdev);
2052 }
2053 
hclgevf_misc_irq_handle(int irq,void * data)2054 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
2055 {
2056 #define HCLGEVF_RESET_DELAY	5
2057 
2058 	enum hclgevf_evt_cause event_cause;
2059 	struct hclgevf_dev *hdev = data;
2060 	u32 clearval;
2061 
2062 	hclgevf_enable_vector(&hdev->misc_vector, false);
2063 	event_cause = hclgevf_check_evt_cause(hdev, &clearval);
2064 	if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER)
2065 		hclgevf_clear_event_cause(hdev, clearval);
2066 
2067 	switch (event_cause) {
2068 	case HCLGEVF_VECTOR0_EVENT_RST:
2069 		mod_timer(&hdev->reset_timer,
2070 			  jiffies + msecs_to_jiffies(HCLGEVF_RESET_DELAY));
2071 		break;
2072 	case HCLGEVF_VECTOR0_EVENT_MBX:
2073 		hclgevf_mbx_handler(hdev);
2074 		break;
2075 	default:
2076 		break;
2077 	}
2078 
2079 	hclgevf_enable_vector(&hdev->misc_vector, true);
2080 
2081 	return IRQ_HANDLED;
2082 }
2083 
hclgevf_configure(struct hclgevf_dev * hdev)2084 static int hclgevf_configure(struct hclgevf_dev *hdev)
2085 {
2086 	int ret;
2087 
2088 	hdev->gro_en = true;
2089 
2090 	ret = hclgevf_get_basic_info(hdev);
2091 	if (ret)
2092 		return ret;
2093 
2094 	/* get current port based vlan state from PF */
2095 	ret = hclgevf_get_port_base_vlan_filter_state(hdev);
2096 	if (ret)
2097 		return ret;
2098 
2099 	/* get queue configuration from PF */
2100 	ret = hclgevf_get_queue_info(hdev);
2101 	if (ret)
2102 		return ret;
2103 
2104 	/* get queue depth info from PF */
2105 	ret = hclgevf_get_queue_depth(hdev);
2106 	if (ret)
2107 		return ret;
2108 
2109 	return hclgevf_get_pf_media_type(hdev);
2110 }
2111 
hclgevf_alloc_hdev(struct hnae3_ae_dev * ae_dev)2112 static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev)
2113 {
2114 	struct pci_dev *pdev = ae_dev->pdev;
2115 	struct hclgevf_dev *hdev;
2116 
2117 	hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
2118 	if (!hdev)
2119 		return -ENOMEM;
2120 
2121 	hdev->pdev = pdev;
2122 	hdev->ae_dev = ae_dev;
2123 	ae_dev->priv = hdev;
2124 
2125 	return 0;
2126 }
2127 
hclgevf_init_roce_base_info(struct hclgevf_dev * hdev)2128 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
2129 {
2130 	struct hnae3_handle *roce = &hdev->roce;
2131 	struct hnae3_handle *nic = &hdev->nic;
2132 
2133 	roce->rinfo.num_vectors = hdev->num_roce_msix;
2134 
2135 	if (hdev->num_msi_left < roce->rinfo.num_vectors ||
2136 	    hdev->num_msi_left == 0)
2137 		return -EINVAL;
2138 
2139 	roce->rinfo.base_vector = hdev->roce_base_msix_offset;
2140 
2141 	roce->rinfo.netdev = nic->kinfo.netdev;
2142 	roce->rinfo.roce_io_base = hdev->hw.hw.io_base;
2143 	roce->rinfo.roce_mem_base = hdev->hw.hw.mem_base;
2144 
2145 	roce->pdev = nic->pdev;
2146 	roce->ae_algo = nic->ae_algo;
2147 	bitmap_copy(roce->numa_node_mask.bits, nic->numa_node_mask.bits,
2148 		    MAX_NUMNODES);
2149 	return 0;
2150 }
2151 
hclgevf_config_gro(struct hclgevf_dev * hdev)2152 static int hclgevf_config_gro(struct hclgevf_dev *hdev)
2153 {
2154 	struct hclgevf_cfg_gro_status_cmd *req;
2155 	struct hclge_desc desc;
2156 	int ret;
2157 
2158 	if (!hnae3_ae_dev_gro_supported(hdev->ae_dev))
2159 		return 0;
2160 
2161 	hclgevf_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG,
2162 				     false);
2163 	req = (struct hclgevf_cfg_gro_status_cmd *)desc.data;
2164 
2165 	req->gro_en = hdev->gro_en ? 1 : 0;
2166 
2167 	ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
2168 	if (ret)
2169 		dev_err(&hdev->pdev->dev,
2170 			"VF GRO hardware config cmd failed, ret = %d.\n", ret);
2171 
2172 	return ret;
2173 }
2174 
hclgevf_rss_init_hw(struct hclgevf_dev * hdev)2175 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
2176 {
2177 	struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
2178 	u16 tc_offset[HCLGE_COMM_MAX_TC_NUM];
2179 	u16 tc_valid[HCLGE_COMM_MAX_TC_NUM];
2180 	u16 tc_size[HCLGE_COMM_MAX_TC_NUM];
2181 	int ret;
2182 
2183 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
2184 		ret = hclge_comm_set_rss_algo_key(&hdev->hw.hw,
2185 						  rss_cfg->rss_algo,
2186 						  rss_cfg->rss_hash_key);
2187 		if (ret)
2188 			return ret;
2189 
2190 		ret = hclge_comm_set_rss_input_tuple(&hdev->hw.hw, rss_cfg);
2191 		if (ret)
2192 			return ret;
2193 	}
2194 
2195 	ret = hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw,
2196 					     rss_cfg->rss_indirection_tbl);
2197 	if (ret)
2198 		return ret;
2199 
2200 	hclge_comm_get_rss_tc_info(rss_cfg->rss_size, hdev->hw_tc_map,
2201 				   tc_offset, tc_valid, tc_size);
2202 
2203 	return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset,
2204 					  tc_valid, tc_size);
2205 }
2206 
hclgevf_init_vlan_config(struct hclgevf_dev * hdev)2207 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
2208 {
2209 	struct hnae3_handle *nic = &hdev->nic;
2210 	int ret;
2211 
2212 	ret = hclgevf_en_hw_strip_rxvtag(nic, true);
2213 	if (ret) {
2214 		dev_err(&hdev->pdev->dev,
2215 			"failed to enable rx vlan offload, ret = %d\n", ret);
2216 		return ret;
2217 	}
2218 
2219 	return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0,
2220 				       false);
2221 }
2222 
hclgevf_flush_link_update(struct hclgevf_dev * hdev)2223 static void hclgevf_flush_link_update(struct hclgevf_dev *hdev)
2224 {
2225 #define HCLGEVF_FLUSH_LINK_TIMEOUT	100000
2226 
2227 	unsigned long last = hdev->serv_processed_cnt;
2228 	int i = 0;
2229 
2230 	while (test_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state) &&
2231 	       i++ < HCLGEVF_FLUSH_LINK_TIMEOUT &&
2232 	       last == hdev->serv_processed_cnt)
2233 		usleep_range(1, 1);
2234 }
2235 
hclgevf_set_timer_task(struct hnae3_handle * handle,bool enable)2236 static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable)
2237 {
2238 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2239 
2240 	if (enable) {
2241 		hclgevf_task_schedule(hdev, 0);
2242 	} else {
2243 		set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
2244 
2245 		smp_mb__after_atomic(); /* flush memory to make sure DOWN is seen by service task */
2246 		hclgevf_flush_link_update(hdev);
2247 	}
2248 }
2249 
hclgevf_ae_start(struct hnae3_handle * handle)2250 static int hclgevf_ae_start(struct hnae3_handle *handle)
2251 {
2252 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2253 
2254 	clear_bit(HCLGEVF_STATE_DOWN, &hdev->state);
2255 	clear_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, &hdev->state);
2256 
2257 	hclge_comm_reset_tqp_stats(handle);
2258 
2259 	hclgevf_request_link_info(hdev);
2260 
2261 	hclgevf_update_link_mode(hdev);
2262 
2263 	return 0;
2264 }
2265 
hclgevf_ae_stop(struct hnae3_handle * handle)2266 static void hclgevf_ae_stop(struct hnae3_handle *handle)
2267 {
2268 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2269 
2270 	set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
2271 
2272 	if (hdev->reset_type != HNAE3_VF_RESET)
2273 		hclgevf_reset_tqp(handle);
2274 
2275 	hclge_comm_reset_tqp_stats(handle);
2276 	hclgevf_update_link_status(hdev, 0);
2277 }
2278 
hclgevf_set_alive(struct hnae3_handle * handle,bool alive)2279 static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive)
2280 {
2281 #define HCLGEVF_STATE_ALIVE	1
2282 #define HCLGEVF_STATE_NOT_ALIVE	0
2283 
2284 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2285 	struct hclge_vf_to_pf_msg send_msg;
2286 
2287 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_ALIVE, 0);
2288 	send_msg.data[0] = alive ? HCLGEVF_STATE_ALIVE :
2289 				HCLGEVF_STATE_NOT_ALIVE;
2290 	return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
2291 }
2292 
hclgevf_client_start(struct hnae3_handle * handle)2293 static int hclgevf_client_start(struct hnae3_handle *handle)
2294 {
2295 	return hclgevf_set_alive(handle, true);
2296 }
2297 
hclgevf_client_stop(struct hnae3_handle * handle)2298 static void hclgevf_client_stop(struct hnae3_handle *handle)
2299 {
2300 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2301 	int ret;
2302 
2303 	ret = hclgevf_set_alive(handle, false);
2304 	if (ret)
2305 		dev_warn(&hdev->pdev->dev,
2306 			 "%s failed %d\n", __func__, ret);
2307 }
2308 
hclgevf_state_init(struct hclgevf_dev * hdev)2309 static void hclgevf_state_init(struct hclgevf_dev *hdev)
2310 {
2311 	clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
2312 	clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
2313 	clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
2314 
2315 	INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task);
2316 	/* timer needs to be initialized before misc irq */
2317 	timer_setup(&hdev->reset_timer, hclgevf_reset_timer, 0);
2318 
2319 	mutex_init(&hdev->mbx_resp.mbx_mutex);
2320 	sema_init(&hdev->reset_sem, 1);
2321 
2322 	spin_lock_init(&hdev->mac_table.mac_list_lock);
2323 	INIT_LIST_HEAD(&hdev->mac_table.uc_mac_list);
2324 	INIT_LIST_HEAD(&hdev->mac_table.mc_mac_list);
2325 
2326 	/* bring the device down */
2327 	set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
2328 }
2329 
hclgevf_state_uninit(struct hclgevf_dev * hdev)2330 static void hclgevf_state_uninit(struct hclgevf_dev *hdev)
2331 {
2332 	set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
2333 	set_bit(HCLGEVF_STATE_REMOVING, &hdev->state);
2334 
2335 	if (hdev->service_task.work.func)
2336 		cancel_delayed_work_sync(&hdev->service_task);
2337 
2338 	mutex_destroy(&hdev->mbx_resp.mbx_mutex);
2339 }
2340 
hclgevf_init_msi(struct hclgevf_dev * hdev)2341 static int hclgevf_init_msi(struct hclgevf_dev *hdev)
2342 {
2343 	struct pci_dev *pdev = hdev->pdev;
2344 	int vectors;
2345 	int i;
2346 
2347 	if (hnae3_dev_roce_supported(hdev))
2348 		vectors = pci_alloc_irq_vectors(pdev,
2349 						hdev->roce_base_msix_offset + 1,
2350 						hdev->num_msi,
2351 						PCI_IRQ_MSIX);
2352 	else
2353 		vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2354 						hdev->num_msi,
2355 						PCI_IRQ_MSI | PCI_IRQ_MSIX);
2356 
2357 	if (vectors < 0) {
2358 		dev_err(&pdev->dev,
2359 			"failed(%d) to allocate MSI/MSI-X vectors\n",
2360 			vectors);
2361 		return vectors;
2362 	}
2363 	if (vectors < hdev->num_msi)
2364 		dev_warn(&hdev->pdev->dev,
2365 			 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2366 			 hdev->num_msi, vectors);
2367 
2368 	hdev->num_msi = vectors;
2369 	hdev->num_msi_left = vectors;
2370 
2371 	hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2372 					   sizeof(u16), GFP_KERNEL);
2373 	if (!hdev->vector_status) {
2374 		pci_free_irq_vectors(pdev);
2375 		return -ENOMEM;
2376 	}
2377 
2378 	for (i = 0; i < hdev->num_msi; i++)
2379 		hdev->vector_status[i] = HCLGEVF_INVALID_VPORT;
2380 
2381 	hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2382 					sizeof(int), GFP_KERNEL);
2383 	if (!hdev->vector_irq) {
2384 		devm_kfree(&pdev->dev, hdev->vector_status);
2385 		pci_free_irq_vectors(pdev);
2386 		return -ENOMEM;
2387 	}
2388 
2389 	return 0;
2390 }
2391 
hclgevf_uninit_msi(struct hclgevf_dev * hdev)2392 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev)
2393 {
2394 	struct pci_dev *pdev = hdev->pdev;
2395 
2396 	devm_kfree(&pdev->dev, hdev->vector_status);
2397 	devm_kfree(&pdev->dev, hdev->vector_irq);
2398 	pci_free_irq_vectors(pdev);
2399 }
2400 
hclgevf_misc_irq_init(struct hclgevf_dev * hdev)2401 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev)
2402 {
2403 	int ret;
2404 
2405 	hclgevf_get_misc_vector(hdev);
2406 
2407 	snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
2408 		 HCLGEVF_NAME, pci_name(hdev->pdev));
2409 	ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle,
2410 			  0, hdev->misc_vector.name, hdev);
2411 	if (ret) {
2412 		dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n",
2413 			hdev->misc_vector.vector_irq);
2414 		return ret;
2415 	}
2416 
2417 	hclgevf_clear_event_cause(hdev, 0);
2418 
2419 	/* enable misc. vector(vector 0) */
2420 	hclgevf_enable_vector(&hdev->misc_vector, true);
2421 
2422 	return ret;
2423 }
2424 
hclgevf_misc_irq_uninit(struct hclgevf_dev * hdev)2425 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev)
2426 {
2427 	/* disable misc vector(vector 0) */
2428 	hclgevf_enable_vector(&hdev->misc_vector, false);
2429 	synchronize_irq(hdev->misc_vector.vector_irq);
2430 	free_irq(hdev->misc_vector.vector_irq, hdev);
2431 	hclgevf_free_vector(hdev, 0);
2432 }
2433 
hclgevf_info_show(struct hclgevf_dev * hdev)2434 static void hclgevf_info_show(struct hclgevf_dev *hdev)
2435 {
2436 	struct device *dev = &hdev->pdev->dev;
2437 
2438 	dev_info(dev, "VF info begin:\n");
2439 
2440 	dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
2441 	dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
2442 	dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
2443 	dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
2444 	dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
2445 	dev_info(dev, "PF media type of this VF: %u\n",
2446 		 hdev->hw.mac.media_type);
2447 
2448 	dev_info(dev, "VF info end.\n");
2449 }
2450 
hclgevf_init_nic_client_instance(struct hnae3_ae_dev * ae_dev,struct hnae3_client * client)2451 static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
2452 					    struct hnae3_client *client)
2453 {
2454 	struct hclgevf_dev *hdev = ae_dev->priv;
2455 	int rst_cnt = hdev->rst_stats.rst_cnt;
2456 	int ret;
2457 
2458 	ret = client->ops->init_instance(&hdev->nic);
2459 	if (ret)
2460 		return ret;
2461 
2462 	set_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state);
2463 	if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
2464 	    rst_cnt != hdev->rst_stats.rst_cnt) {
2465 		clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state);
2466 
2467 		client->ops->uninit_instance(&hdev->nic, 0);
2468 		return -EBUSY;
2469 	}
2470 
2471 	hnae3_set_client_init_flag(client, ae_dev, 1);
2472 
2473 	if (netif_msg_drv(&hdev->nic))
2474 		hclgevf_info_show(hdev);
2475 
2476 	return 0;
2477 }
2478 
hclgevf_init_roce_client_instance(struct hnae3_ae_dev * ae_dev,struct hnae3_client * client)2479 static int hclgevf_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
2480 					     struct hnae3_client *client)
2481 {
2482 	struct hclgevf_dev *hdev = ae_dev->priv;
2483 	int ret;
2484 
2485 	if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
2486 	    !hdev->nic_client)
2487 		return 0;
2488 
2489 	ret = hclgevf_init_roce_base_info(hdev);
2490 	if (ret)
2491 		return ret;
2492 
2493 	ret = client->ops->init_instance(&hdev->roce);
2494 	if (ret)
2495 		return ret;
2496 
2497 	set_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state);
2498 	hnae3_set_client_init_flag(client, ae_dev, 1);
2499 
2500 	return 0;
2501 }
2502 
hclgevf_init_client_instance(struct hnae3_client * client,struct hnae3_ae_dev * ae_dev)2503 static int hclgevf_init_client_instance(struct hnae3_client *client,
2504 					struct hnae3_ae_dev *ae_dev)
2505 {
2506 	struct hclgevf_dev *hdev = ae_dev->priv;
2507 	int ret;
2508 
2509 	switch (client->type) {
2510 	case HNAE3_CLIENT_KNIC:
2511 		hdev->nic_client = client;
2512 		hdev->nic.client = client;
2513 
2514 		ret = hclgevf_init_nic_client_instance(ae_dev, client);
2515 		if (ret)
2516 			goto clear_nic;
2517 
2518 		ret = hclgevf_init_roce_client_instance(ae_dev,
2519 							hdev->roce_client);
2520 		if (ret)
2521 			goto clear_roce;
2522 
2523 		break;
2524 	case HNAE3_CLIENT_ROCE:
2525 		if (hnae3_dev_roce_supported(hdev)) {
2526 			hdev->roce_client = client;
2527 			hdev->roce.client = client;
2528 		}
2529 
2530 		ret = hclgevf_init_roce_client_instance(ae_dev, client);
2531 		if (ret)
2532 			goto clear_roce;
2533 
2534 		break;
2535 	default:
2536 		return -EINVAL;
2537 	}
2538 
2539 	return 0;
2540 
2541 clear_nic:
2542 	hdev->nic_client = NULL;
2543 	hdev->nic.client = NULL;
2544 	return ret;
2545 clear_roce:
2546 	hdev->roce_client = NULL;
2547 	hdev->roce.client = NULL;
2548 	return ret;
2549 }
2550 
hclgevf_uninit_client_instance(struct hnae3_client * client,struct hnae3_ae_dev * ae_dev)2551 static void hclgevf_uninit_client_instance(struct hnae3_client *client,
2552 					   struct hnae3_ae_dev *ae_dev)
2553 {
2554 	struct hclgevf_dev *hdev = ae_dev->priv;
2555 
2556 	/* un-init roce, if it exists */
2557 	if (hdev->roce_client) {
2558 		while (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
2559 			msleep(HCLGEVF_WAIT_RESET_DONE);
2560 		clear_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state);
2561 
2562 		hdev->roce_client->ops->uninit_instance(&hdev->roce, 0);
2563 		hdev->roce_client = NULL;
2564 		hdev->roce.client = NULL;
2565 	}
2566 
2567 	/* un-init nic/unic, if this was not called by roce client */
2568 	if (client->ops->uninit_instance && hdev->nic_client &&
2569 	    client->type != HNAE3_CLIENT_ROCE) {
2570 		while (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
2571 			msleep(HCLGEVF_WAIT_RESET_DONE);
2572 		clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state);
2573 
2574 		client->ops->uninit_instance(&hdev->nic, 0);
2575 		hdev->nic_client = NULL;
2576 		hdev->nic.client = NULL;
2577 	}
2578 }
2579 
hclgevf_dev_mem_map(struct hclgevf_dev * hdev)2580 static int hclgevf_dev_mem_map(struct hclgevf_dev *hdev)
2581 {
2582 	struct pci_dev *pdev = hdev->pdev;
2583 	struct hclgevf_hw *hw = &hdev->hw;
2584 
2585 	/* for device does not have device memory, return directly */
2586 	if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGEVF_MEM_BAR)))
2587 		return 0;
2588 
2589 	hw->hw.mem_base =
2590 		devm_ioremap_wc(&pdev->dev,
2591 				pci_resource_start(pdev, HCLGEVF_MEM_BAR),
2592 				pci_resource_len(pdev, HCLGEVF_MEM_BAR));
2593 	if (!hw->hw.mem_base) {
2594 		dev_err(&pdev->dev, "failed to map device memory\n");
2595 		return -EFAULT;
2596 	}
2597 
2598 	return 0;
2599 }
2600 
hclgevf_pci_init(struct hclgevf_dev * hdev)2601 static int hclgevf_pci_init(struct hclgevf_dev *hdev)
2602 {
2603 	struct pci_dev *pdev = hdev->pdev;
2604 	struct hclgevf_hw *hw;
2605 	int ret;
2606 
2607 	ret = pci_enable_device(pdev);
2608 	if (ret) {
2609 		dev_err(&pdev->dev, "failed to enable PCI device\n");
2610 		return ret;
2611 	}
2612 
2613 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2614 	if (ret) {
2615 		dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting");
2616 		goto err_disable_device;
2617 	}
2618 
2619 	ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME);
2620 	if (ret) {
2621 		dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
2622 		goto err_disable_device;
2623 	}
2624 
2625 	pci_set_master(pdev);
2626 	hw = &hdev->hw;
2627 	hw->hw.io_base = pci_iomap(pdev, 2, 0);
2628 	if (!hw->hw.io_base) {
2629 		dev_err(&pdev->dev, "can't map configuration register space\n");
2630 		ret = -ENOMEM;
2631 		goto err_release_regions;
2632 	}
2633 
2634 	ret = hclgevf_dev_mem_map(hdev);
2635 	if (ret)
2636 		goto err_unmap_io_base;
2637 
2638 	return 0;
2639 
2640 err_unmap_io_base:
2641 	pci_iounmap(pdev, hdev->hw.hw.io_base);
2642 err_release_regions:
2643 	pci_release_regions(pdev);
2644 err_disable_device:
2645 	pci_disable_device(pdev);
2646 
2647 	return ret;
2648 }
2649 
hclgevf_pci_uninit(struct hclgevf_dev * hdev)2650 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev)
2651 {
2652 	struct pci_dev *pdev = hdev->pdev;
2653 
2654 	if (hdev->hw.hw.mem_base)
2655 		devm_iounmap(&pdev->dev, hdev->hw.hw.mem_base);
2656 
2657 	pci_iounmap(pdev, hdev->hw.hw.io_base);
2658 	pci_release_regions(pdev);
2659 	pci_disable_device(pdev);
2660 }
2661 
hclgevf_query_vf_resource(struct hclgevf_dev * hdev)2662 static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev)
2663 {
2664 	struct hclgevf_query_res_cmd *req;
2665 	struct hclge_desc desc;
2666 	int ret;
2667 
2668 	hclgevf_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RSRC, true);
2669 	ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
2670 	if (ret) {
2671 		dev_err(&hdev->pdev->dev,
2672 			"query vf resource failed, ret = %d.\n", ret);
2673 		return ret;
2674 	}
2675 
2676 	req = (struct hclgevf_query_res_cmd *)desc.data;
2677 
2678 	if (hnae3_dev_roce_supported(hdev)) {
2679 		hdev->roce_base_msix_offset =
2680 		hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee),
2681 				HCLGEVF_MSIX_OFT_ROCEE_M,
2682 				HCLGEVF_MSIX_OFT_ROCEE_S);
2683 		hdev->num_roce_msix =
2684 		hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number),
2685 				HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
2686 
2687 		/* nic's msix numbers is always equals to the roce's. */
2688 		hdev->num_nic_msix = hdev->num_roce_msix;
2689 
2690 		/* VF should have NIC vectors and Roce vectors, NIC vectors
2691 		 * are queued before Roce vectors. The offset is fixed to 64.
2692 		 */
2693 		hdev->num_msi = hdev->num_roce_msix +
2694 				hdev->roce_base_msix_offset;
2695 	} else {
2696 		hdev->num_msi =
2697 		hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number),
2698 				HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
2699 
2700 		hdev->num_nic_msix = hdev->num_msi;
2701 	}
2702 
2703 	if (hdev->num_nic_msix < HNAE3_MIN_VECTOR_NUM) {
2704 		dev_err(&hdev->pdev->dev,
2705 			"Just %u msi resources, not enough for vf(min:2).\n",
2706 			hdev->num_nic_msix);
2707 		return -EINVAL;
2708 	}
2709 
2710 	return 0;
2711 }
2712 
hclgevf_set_default_dev_specs(struct hclgevf_dev * hdev)2713 static void hclgevf_set_default_dev_specs(struct hclgevf_dev *hdev)
2714 {
2715 #define HCLGEVF_MAX_NON_TSO_BD_NUM			8U
2716 
2717 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
2718 
2719 	ae_dev->dev_specs.max_non_tso_bd_num =
2720 					HCLGEVF_MAX_NON_TSO_BD_NUM;
2721 	ae_dev->dev_specs.rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE;
2722 	ae_dev->dev_specs.rss_key_size = HCLGE_COMM_RSS_KEY_SIZE;
2723 	ae_dev->dev_specs.max_int_gl = HCLGEVF_DEF_MAX_INT_GL;
2724 	ae_dev->dev_specs.max_frm_size = HCLGEVF_MAC_MAX_FRAME;
2725 }
2726 
hclgevf_parse_dev_specs(struct hclgevf_dev * hdev,struct hclge_desc * desc)2727 static void hclgevf_parse_dev_specs(struct hclgevf_dev *hdev,
2728 				    struct hclge_desc *desc)
2729 {
2730 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
2731 	struct hclgevf_dev_specs_0_cmd *req0;
2732 	struct hclgevf_dev_specs_1_cmd *req1;
2733 
2734 	req0 = (struct hclgevf_dev_specs_0_cmd *)desc[0].data;
2735 	req1 = (struct hclgevf_dev_specs_1_cmd *)desc[1].data;
2736 
2737 	ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
2738 	ae_dev->dev_specs.rss_ind_tbl_size =
2739 					le16_to_cpu(req0->rss_ind_tbl_size);
2740 	ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
2741 	ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
2742 	ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
2743 	ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
2744 }
2745 
hclgevf_check_dev_specs(struct hclgevf_dev * hdev)2746 static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev)
2747 {
2748 	struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
2749 
2750 	if (!dev_specs->max_non_tso_bd_num)
2751 		dev_specs->max_non_tso_bd_num = HCLGEVF_MAX_NON_TSO_BD_NUM;
2752 	if (!dev_specs->rss_ind_tbl_size)
2753 		dev_specs->rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE;
2754 	if (!dev_specs->rss_key_size)
2755 		dev_specs->rss_key_size = HCLGE_COMM_RSS_KEY_SIZE;
2756 	if (!dev_specs->max_int_gl)
2757 		dev_specs->max_int_gl = HCLGEVF_DEF_MAX_INT_GL;
2758 	if (!dev_specs->max_frm_size)
2759 		dev_specs->max_frm_size = HCLGEVF_MAC_MAX_FRAME;
2760 }
2761 
hclgevf_query_dev_specs(struct hclgevf_dev * hdev)2762 static int hclgevf_query_dev_specs(struct hclgevf_dev *hdev)
2763 {
2764 	struct hclge_desc desc[HCLGEVF_QUERY_DEV_SPECS_BD_NUM];
2765 	int ret;
2766 	int i;
2767 
2768 	/* set default specifications as devices lower than version V3 do not
2769 	 * support querying specifications from firmware.
2770 	 */
2771 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
2772 		hclgevf_set_default_dev_specs(hdev);
2773 		return 0;
2774 	}
2775 
2776 	for (i = 0; i < HCLGEVF_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
2777 		hclgevf_cmd_setup_basic_desc(&desc[i],
2778 					     HCLGE_OPC_QUERY_DEV_SPECS, true);
2779 		desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
2780 	}
2781 	hclgevf_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
2782 
2783 	ret = hclgevf_cmd_send(&hdev->hw, desc, HCLGEVF_QUERY_DEV_SPECS_BD_NUM);
2784 	if (ret)
2785 		return ret;
2786 
2787 	hclgevf_parse_dev_specs(hdev, desc);
2788 	hclgevf_check_dev_specs(hdev);
2789 
2790 	return 0;
2791 }
2792 
hclgevf_pci_reset(struct hclgevf_dev * hdev)2793 static int hclgevf_pci_reset(struct hclgevf_dev *hdev)
2794 {
2795 	struct pci_dev *pdev = hdev->pdev;
2796 	int ret = 0;
2797 
2798 	if ((hdev->reset_type == HNAE3_VF_FULL_RESET ||
2799 	     hdev->reset_type == HNAE3_FLR_RESET) &&
2800 	    test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
2801 		hclgevf_misc_irq_uninit(hdev);
2802 		hclgevf_uninit_msi(hdev);
2803 		clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
2804 	}
2805 
2806 	if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
2807 		pci_set_master(pdev);
2808 		ret = hclgevf_init_msi(hdev);
2809 		if (ret) {
2810 			dev_err(&pdev->dev,
2811 				"failed(%d) to init MSI/MSI-X\n", ret);
2812 			return ret;
2813 		}
2814 
2815 		ret = hclgevf_misc_irq_init(hdev);
2816 		if (ret) {
2817 			hclgevf_uninit_msi(hdev);
2818 			dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n",
2819 				ret);
2820 			return ret;
2821 		}
2822 
2823 		set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
2824 	}
2825 
2826 	return ret;
2827 }
2828 
hclgevf_clear_vport_list(struct hclgevf_dev * hdev)2829 static int hclgevf_clear_vport_list(struct hclgevf_dev *hdev)
2830 {
2831 	struct hclge_vf_to_pf_msg send_msg;
2832 
2833 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_HANDLE_VF_TBL,
2834 			       HCLGE_MBX_VPORT_LIST_CLEAR);
2835 	return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
2836 }
2837 
hclgevf_init_rxd_adv_layout(struct hclgevf_dev * hdev)2838 static void hclgevf_init_rxd_adv_layout(struct hclgevf_dev *hdev)
2839 {
2840 	if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
2841 		hclgevf_write_dev(&hdev->hw, HCLGEVF_RXD_ADV_LAYOUT_EN_REG, 1);
2842 }
2843 
hclgevf_uninit_rxd_adv_layout(struct hclgevf_dev * hdev)2844 static void hclgevf_uninit_rxd_adv_layout(struct hclgevf_dev *hdev)
2845 {
2846 	if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
2847 		hclgevf_write_dev(&hdev->hw, HCLGEVF_RXD_ADV_LAYOUT_EN_REG, 0);
2848 }
2849 
hclgevf_reset_hdev(struct hclgevf_dev * hdev)2850 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
2851 {
2852 	struct pci_dev *pdev = hdev->pdev;
2853 	int ret;
2854 
2855 	ret = hclgevf_pci_reset(hdev);
2856 	if (ret) {
2857 		dev_err(&pdev->dev, "pci reset failed %d\n", ret);
2858 		return ret;
2859 	}
2860 
2861 	hclgevf_arq_init(hdev);
2862 
2863 	ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw,
2864 				  &hdev->fw_version, false,
2865 				  hdev->reset_pending);
2866 	if (ret) {
2867 		dev_err(&pdev->dev, "cmd failed %d\n", ret);
2868 		return ret;
2869 	}
2870 
2871 	ret = hclgevf_rss_init_hw(hdev);
2872 	if (ret) {
2873 		dev_err(&hdev->pdev->dev,
2874 			"failed(%d) to initialize RSS\n", ret);
2875 		return ret;
2876 	}
2877 
2878 	ret = hclgevf_config_gro(hdev);
2879 	if (ret)
2880 		return ret;
2881 
2882 	ret = hclgevf_init_vlan_config(hdev);
2883 	if (ret) {
2884 		dev_err(&hdev->pdev->dev,
2885 			"failed(%d) to initialize VLAN config\n", ret);
2886 		return ret;
2887 	}
2888 
2889 	/* get current port based vlan state from PF */
2890 	ret = hclgevf_get_port_base_vlan_filter_state(hdev);
2891 	if (ret)
2892 		return ret;
2893 
2894 	set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state);
2895 
2896 	hclgevf_init_rxd_adv_layout(hdev);
2897 
2898 	dev_info(&hdev->pdev->dev, "Reset done\n");
2899 
2900 	return 0;
2901 }
2902 
hclgevf_init_hdev(struct hclgevf_dev * hdev)2903 static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
2904 {
2905 	struct pci_dev *pdev = hdev->pdev;
2906 	int ret;
2907 
2908 	ret = hclgevf_pci_init(hdev);
2909 	if (ret)
2910 		return ret;
2911 
2912 	ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw);
2913 	if (ret)
2914 		goto err_cmd_queue_init;
2915 
2916 	hclgevf_arq_init(hdev);
2917 
2918 	hclge_comm_cmd_init_ops(&hdev->hw.hw, &hclgevf_cmq_ops);
2919 	ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw,
2920 				  &hdev->fw_version, false,
2921 				  hdev->reset_pending);
2922 	if (ret)
2923 		goto err_cmd_init;
2924 
2925 	/* Get vf resource */
2926 	ret = hclgevf_query_vf_resource(hdev);
2927 	if (ret)
2928 		goto err_cmd_init;
2929 
2930 	ret = hclgevf_query_dev_specs(hdev);
2931 	if (ret) {
2932 		dev_err(&pdev->dev,
2933 			"failed to query dev specifications, ret = %d\n", ret);
2934 		goto err_cmd_init;
2935 	}
2936 
2937 	ret = hclgevf_init_msi(hdev);
2938 	if (ret) {
2939 		dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret);
2940 		goto err_cmd_init;
2941 	}
2942 
2943 	hclgevf_state_init(hdev);
2944 	hdev->reset_level = HNAE3_VF_FUNC_RESET;
2945 	hdev->reset_type = HNAE3_NONE_RESET;
2946 
2947 	ret = hclgevf_misc_irq_init(hdev);
2948 	if (ret)
2949 		goto err_misc_irq_init;
2950 
2951 	set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
2952 
2953 	ret = hclgevf_configure(hdev);
2954 	if (ret) {
2955 		dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret);
2956 		goto err_config;
2957 	}
2958 
2959 	ret = hclgevf_alloc_tqps(hdev);
2960 	if (ret) {
2961 		dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret);
2962 		goto err_config;
2963 	}
2964 
2965 	ret = hclgevf_set_handle_info(hdev);
2966 	if (ret)
2967 		goto err_config;
2968 
2969 	ret = hclgevf_config_gro(hdev);
2970 	if (ret)
2971 		goto err_config;
2972 
2973 	/* Initialize RSS for this VF */
2974 	ret = hclge_comm_rss_init_cfg(&hdev->nic, hdev->ae_dev,
2975 				      &hdev->rss_cfg);
2976 	if (ret) {
2977 		dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
2978 		goto err_config;
2979 	}
2980 
2981 	ret = hclgevf_rss_init_hw(hdev);
2982 	if (ret) {
2983 		dev_err(&hdev->pdev->dev,
2984 			"failed(%d) to initialize RSS\n", ret);
2985 		goto err_config;
2986 	}
2987 
2988 	/* ensure vf tbl list as empty before init */
2989 	ret = hclgevf_clear_vport_list(hdev);
2990 	if (ret) {
2991 		dev_err(&pdev->dev,
2992 			"failed to clear tbl list configuration, ret = %d.\n",
2993 			ret);
2994 		goto err_config;
2995 	}
2996 
2997 	ret = hclgevf_init_vlan_config(hdev);
2998 	if (ret) {
2999 		dev_err(&hdev->pdev->dev,
3000 			"failed(%d) to initialize VLAN config\n", ret);
3001 		goto err_config;
3002 	}
3003 
3004 	hclgevf_init_rxd_adv_layout(hdev);
3005 
3006 	ret = hclgevf_devlink_init(hdev);
3007 	if (ret)
3008 		goto err_config;
3009 
3010 	set_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state);
3011 
3012 	hdev->last_reset_time = jiffies;
3013 	dev_info(&hdev->pdev->dev, "finished initializing %s driver\n",
3014 		 HCLGEVF_DRIVER_NAME);
3015 
3016 	hclgevf_task_schedule(hdev, round_jiffies_relative(HZ));
3017 
3018 	return 0;
3019 
3020 err_config:
3021 	hclgevf_misc_irq_uninit(hdev);
3022 err_misc_irq_init:
3023 	hclgevf_state_uninit(hdev);
3024 	hclgevf_uninit_msi(hdev);
3025 err_cmd_init:
3026 	hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
3027 err_cmd_queue_init:
3028 	hclgevf_pci_uninit(hdev);
3029 	clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
3030 	return ret;
3031 }
3032 
hclgevf_uninit_hdev(struct hclgevf_dev * hdev)3033 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
3034 {
3035 	struct hclge_vf_to_pf_msg send_msg;
3036 
3037 	hclgevf_state_uninit(hdev);
3038 	hclgevf_uninit_rxd_adv_layout(hdev);
3039 
3040 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_VF_UNINIT, 0);
3041 	hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
3042 
3043 	if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
3044 		hclgevf_misc_irq_uninit(hdev);
3045 		hclgevf_uninit_msi(hdev);
3046 	}
3047 
3048 	hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
3049 	hclgevf_devlink_uninit(hdev);
3050 	hclgevf_pci_uninit(hdev);
3051 	hclgevf_uninit_mac_list(hdev);
3052 }
3053 
hclgevf_init_ae_dev(struct hnae3_ae_dev * ae_dev)3054 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
3055 {
3056 	struct pci_dev *pdev = ae_dev->pdev;
3057 	int ret;
3058 
3059 	ret = hclgevf_alloc_hdev(ae_dev);
3060 	if (ret) {
3061 		dev_err(&pdev->dev, "hclge device allocation failed\n");
3062 		return ret;
3063 	}
3064 
3065 	ret = hclgevf_init_hdev(ae_dev->priv);
3066 	if (ret) {
3067 		dev_err(&pdev->dev, "hclge device initialization failed\n");
3068 		return ret;
3069 	}
3070 
3071 	return 0;
3072 }
3073 
hclgevf_uninit_ae_dev(struct hnae3_ae_dev * ae_dev)3074 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
3075 {
3076 	struct hclgevf_dev *hdev = ae_dev->priv;
3077 
3078 	hclgevf_uninit_hdev(hdev);
3079 	ae_dev->priv = NULL;
3080 }
3081 
hclgevf_get_max_channels(struct hclgevf_dev * hdev)3082 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev)
3083 {
3084 	struct hnae3_handle *nic = &hdev->nic;
3085 	struct hnae3_knic_private_info *kinfo = &nic->kinfo;
3086 
3087 	return min_t(u32, hdev->rss_size_max,
3088 		     hdev->num_tqps / kinfo->tc_info.num_tc);
3089 }
3090 
3091 /**
3092  * hclgevf_get_channels - Get the current channels enabled and max supported.
3093  * @handle: hardware information for network interface
3094  * @ch: ethtool channels structure
3095  *
3096  * We don't support separate tx and rx queues as channels. The other count
3097  * represents how many queues are being used for control. max_combined counts
3098  * how many queue pairs we can support. They may not be mapped 1 to 1 with
3099  * q_vectors since we support a lot more queue pairs than q_vectors.
3100  **/
hclgevf_get_channels(struct hnae3_handle * handle,struct ethtool_channels * ch)3101 static void hclgevf_get_channels(struct hnae3_handle *handle,
3102 				 struct ethtool_channels *ch)
3103 {
3104 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3105 
3106 	ch->max_combined = hclgevf_get_max_channels(hdev);
3107 	ch->other_count = 0;
3108 	ch->max_other = 0;
3109 	ch->combined_count = handle->kinfo.rss_size;
3110 }
3111 
hclgevf_get_tqps_and_rss_info(struct hnae3_handle * handle,u16 * alloc_tqps,u16 * max_rss_size)3112 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle,
3113 					  u16 *alloc_tqps, u16 *max_rss_size)
3114 {
3115 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3116 
3117 	*alloc_tqps = hdev->num_tqps;
3118 	*max_rss_size = hdev->rss_size_max;
3119 }
3120 
hclgevf_update_rss_size(struct hnae3_handle * handle,u32 new_tqps_num)3121 static void hclgevf_update_rss_size(struct hnae3_handle *handle,
3122 				    u32 new_tqps_num)
3123 {
3124 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
3125 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3126 	u16 max_rss_size;
3127 
3128 	kinfo->req_rss_size = new_tqps_num;
3129 
3130 	max_rss_size = min_t(u16, hdev->rss_size_max,
3131 			     hdev->num_tqps / kinfo->tc_info.num_tc);
3132 
3133 	/* Use the user's configuration when it is not larger than
3134 	 * max_rss_size, otherwise, use the maximum specification value.
3135 	 */
3136 	if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size &&
3137 	    kinfo->req_rss_size <= max_rss_size)
3138 		kinfo->rss_size = kinfo->req_rss_size;
3139 	else if (kinfo->rss_size > max_rss_size ||
3140 		 (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size))
3141 		kinfo->rss_size = max_rss_size;
3142 
3143 	kinfo->num_tqps = kinfo->tc_info.num_tc * kinfo->rss_size;
3144 }
3145 
hclgevf_set_channels(struct hnae3_handle * handle,u32 new_tqps_num,bool rxfh_configured)3146 static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
3147 				bool rxfh_configured)
3148 {
3149 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3150 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
3151 	u16 tc_offset[HCLGE_COMM_MAX_TC_NUM];
3152 	u16 tc_valid[HCLGE_COMM_MAX_TC_NUM];
3153 	u16 tc_size[HCLGE_COMM_MAX_TC_NUM];
3154 	u16 cur_rss_size = kinfo->rss_size;
3155 	u16 cur_tqps = kinfo->num_tqps;
3156 	u32 *rss_indir;
3157 	unsigned int i;
3158 	int ret;
3159 
3160 	hclgevf_update_rss_size(handle, new_tqps_num);
3161 
3162 	hclge_comm_get_rss_tc_info(kinfo->rss_size, hdev->hw_tc_map,
3163 				   tc_offset, tc_valid, tc_size);
3164 	ret = hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset,
3165 					 tc_valid, tc_size);
3166 	if (ret)
3167 		return ret;
3168 
3169 	/* RSS indirection table has been configured by user */
3170 	if (rxfh_configured)
3171 		goto out;
3172 
3173 	/* Reinitializes the rss indirect table according to the new RSS size */
3174 	rss_indir = kcalloc(hdev->ae_dev->dev_specs.rss_ind_tbl_size,
3175 			    sizeof(u32), GFP_KERNEL);
3176 	if (!rss_indir)
3177 		return -ENOMEM;
3178 
3179 	for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
3180 		rss_indir[i] = i % kinfo->rss_size;
3181 
3182 	hdev->rss_cfg.rss_size = kinfo->rss_size;
3183 
3184 	ret = hclgevf_set_rss(handle, rss_indir, NULL, 0);
3185 	if (ret)
3186 		dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
3187 			ret);
3188 
3189 	kfree(rss_indir);
3190 
3191 out:
3192 	if (!ret)
3193 		dev_info(&hdev->pdev->dev,
3194 			 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
3195 			 cur_rss_size, kinfo->rss_size,
3196 			 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
3197 
3198 	return ret;
3199 }
3200 
hclgevf_get_status(struct hnae3_handle * handle)3201 static int hclgevf_get_status(struct hnae3_handle *handle)
3202 {
3203 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3204 
3205 	return hdev->hw.mac.link;
3206 }
3207 
hclgevf_get_ksettings_an_result(struct hnae3_handle * handle,u8 * auto_neg,u32 * speed,u8 * duplex,u32 * lane_num)3208 static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle,
3209 					    u8 *auto_neg, u32 *speed,
3210 					    u8 *duplex, u32 *lane_num)
3211 {
3212 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3213 
3214 	if (speed)
3215 		*speed = hdev->hw.mac.speed;
3216 	if (duplex)
3217 		*duplex = hdev->hw.mac.duplex;
3218 	if (auto_neg)
3219 		*auto_neg = AUTONEG_DISABLE;
3220 }
3221 
hclgevf_update_speed_duplex(struct hclgevf_dev * hdev,u32 speed,u8 duplex)3222 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
3223 				 u8 duplex)
3224 {
3225 	hdev->hw.mac.speed = speed;
3226 	hdev->hw.mac.duplex = duplex;
3227 }
3228 
hclgevf_gro_en(struct hnae3_handle * handle,bool enable)3229 static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable)
3230 {
3231 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3232 	bool gro_en_old = hdev->gro_en;
3233 	int ret;
3234 
3235 	hdev->gro_en = enable;
3236 	ret = hclgevf_config_gro(hdev);
3237 	if (ret)
3238 		hdev->gro_en = gro_en_old;
3239 
3240 	return ret;
3241 }
3242 
hclgevf_get_media_type(struct hnae3_handle * handle,u8 * media_type,u8 * module_type)3243 static void hclgevf_get_media_type(struct hnae3_handle *handle, u8 *media_type,
3244 				   u8 *module_type)
3245 {
3246 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3247 
3248 	if (media_type)
3249 		*media_type = hdev->hw.mac.media_type;
3250 
3251 	if (module_type)
3252 		*module_type = hdev->hw.mac.module_type;
3253 }
3254 
hclgevf_get_hw_reset_stat(struct hnae3_handle * handle)3255 static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle)
3256 {
3257 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3258 
3259 	return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
3260 }
3261 
hclgevf_get_cmdq_stat(struct hnae3_handle * handle)3262 static bool hclgevf_get_cmdq_stat(struct hnae3_handle *handle)
3263 {
3264 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3265 
3266 	return test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
3267 }
3268 
hclgevf_ae_dev_resetting(struct hnae3_handle * handle)3269 static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle)
3270 {
3271 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3272 
3273 	return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
3274 }
3275 
hclgevf_ae_dev_reset_cnt(struct hnae3_handle * handle)3276 static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle)
3277 {
3278 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3279 
3280 	return hdev->rst_stats.hw_rst_done_cnt;
3281 }
3282 
hclgevf_get_link_mode(struct hnae3_handle * handle,unsigned long * supported,unsigned long * advertising)3283 static void hclgevf_get_link_mode(struct hnae3_handle *handle,
3284 				  unsigned long *supported,
3285 				  unsigned long *advertising)
3286 {
3287 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3288 
3289 	*supported = hdev->hw.mac.supported;
3290 	*advertising = hdev->hw.mac.advertising;
3291 }
3292 
hclgevf_update_port_base_vlan_info(struct hclgevf_dev * hdev,u16 state,struct hclge_mbx_port_base_vlan * port_base_vlan)3293 void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state,
3294 				struct hclge_mbx_port_base_vlan *port_base_vlan)
3295 {
3296 	struct hnae3_handle *nic = &hdev->nic;
3297 	struct hclge_vf_to_pf_msg send_msg;
3298 	int ret;
3299 
3300 	rtnl_lock();
3301 
3302 	if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
3303 	    test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) {
3304 		dev_warn(&hdev->pdev->dev,
3305 			 "is resetting when updating port based vlan info\n");
3306 		rtnl_unlock();
3307 		return;
3308 	}
3309 
3310 	ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
3311 	if (ret) {
3312 		rtnl_unlock();
3313 		return;
3314 	}
3315 
3316 	/* send msg to PF and wait update port based vlan info */
3317 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
3318 			       HCLGE_MBX_PORT_BASE_VLAN_CFG);
3319 	memcpy(send_msg.data, port_base_vlan, sizeof(*port_base_vlan));
3320 	ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
3321 	if (!ret) {
3322 		if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
3323 			nic->port_base_vlan_state = state;
3324 		else
3325 			nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
3326 	}
3327 
3328 	hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
3329 	rtnl_unlock();
3330 }
3331 
3332 static const struct hnae3_ae_ops hclgevf_ops = {
3333 	.init_ae_dev = hclgevf_init_ae_dev,
3334 	.uninit_ae_dev = hclgevf_uninit_ae_dev,
3335 	.reset_prepare = hclgevf_reset_prepare_general,
3336 	.reset_done = hclgevf_reset_done,
3337 	.init_client_instance = hclgevf_init_client_instance,
3338 	.uninit_client_instance = hclgevf_uninit_client_instance,
3339 	.start = hclgevf_ae_start,
3340 	.stop = hclgevf_ae_stop,
3341 	.client_start = hclgevf_client_start,
3342 	.client_stop = hclgevf_client_stop,
3343 	.map_ring_to_vector = hclgevf_map_ring_to_vector,
3344 	.unmap_ring_from_vector = hclgevf_unmap_ring_from_vector,
3345 	.get_vector = hclgevf_get_vector,
3346 	.put_vector = hclgevf_put_vector,
3347 	.reset_queue = hclgevf_reset_tqp,
3348 	.get_mac_addr = hclgevf_get_mac_addr,
3349 	.set_mac_addr = hclgevf_set_mac_addr,
3350 	.add_uc_addr = hclgevf_add_uc_addr,
3351 	.rm_uc_addr = hclgevf_rm_uc_addr,
3352 	.add_mc_addr = hclgevf_add_mc_addr,
3353 	.rm_mc_addr = hclgevf_rm_mc_addr,
3354 	.get_stats = hclgevf_get_stats,
3355 	.update_stats = hclgevf_update_stats,
3356 	.get_strings = hclgevf_get_strings,
3357 	.get_sset_count = hclgevf_get_sset_count,
3358 	.get_rss_key_size = hclge_comm_get_rss_key_size,
3359 	.get_rss = hclgevf_get_rss,
3360 	.set_rss = hclgevf_set_rss,
3361 	.get_rss_tuple = hclgevf_get_rss_tuple,
3362 	.set_rss_tuple = hclgevf_set_rss_tuple,
3363 	.get_tc_size = hclgevf_get_tc_size,
3364 	.get_fw_version = hclgevf_get_fw_version,
3365 	.set_vlan_filter = hclgevf_set_vlan_filter,
3366 	.enable_vlan_filter = hclgevf_enable_vlan_filter,
3367 	.enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag,
3368 	.reset_event = hclgevf_reset_event,
3369 	.set_default_reset_request = hclgevf_set_def_reset_request,
3370 	.set_channels = hclgevf_set_channels,
3371 	.get_channels = hclgevf_get_channels,
3372 	.get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info,
3373 	.get_regs_len = hclgevf_get_regs_len,
3374 	.get_regs = hclgevf_get_regs,
3375 	.get_status = hclgevf_get_status,
3376 	.get_ksettings_an_result = hclgevf_get_ksettings_an_result,
3377 	.get_media_type = hclgevf_get_media_type,
3378 	.get_hw_reset_stat = hclgevf_get_hw_reset_stat,
3379 	.ae_dev_resetting = hclgevf_ae_dev_resetting,
3380 	.ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt,
3381 	.set_gro_en = hclgevf_gro_en,
3382 	.set_mtu = hclgevf_set_mtu,
3383 	.get_global_queue_id = hclgevf_get_qid_global,
3384 	.set_timer_task = hclgevf_set_timer_task,
3385 	.get_link_mode = hclgevf_get_link_mode,
3386 	.set_promisc_mode = hclgevf_set_promisc_mode,
3387 	.request_update_promisc_mode = hclgevf_request_update_promisc_mode,
3388 	.get_cmdq_stat = hclgevf_get_cmdq_stat,
3389 };
3390 
3391 static struct hnae3_ae_algo ae_algovf = {
3392 	.ops = &hclgevf_ops,
3393 	.pdev_id_table = ae_algovf_pci_tbl,
3394 };
3395 
hclgevf_init(void)3396 static int __init hclgevf_init(void)
3397 {
3398 	pr_info("%s is initializing\n", HCLGEVF_NAME);
3399 
3400 	hclgevf_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGEVF_NAME);
3401 	if (!hclgevf_wq) {
3402 		pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME);
3403 		return -ENOMEM;
3404 	}
3405 
3406 	hnae3_register_ae_algo(&ae_algovf);
3407 
3408 	return 0;
3409 }
3410 
hclgevf_exit(void)3411 static void __exit hclgevf_exit(void)
3412 {
3413 	hnae3_unregister_ae_algo(&ae_algovf);
3414 	destroy_workqueue(hclgevf_wq);
3415 }
3416 module_init(hclgevf_init);
3417 module_exit(hclgevf_exit);
3418 
3419 MODULE_LICENSE("GPL");
3420 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
3421 MODULE_DESCRIPTION("HCLGEVF Driver");
3422 MODULE_VERSION(HCLGEVF_MOD_VERSION);
3423