xref: /linux/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c (revision a34b0e4e21d6be3c3d620aa7f9dfbf0e9550c19e)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
3 
4 #include <linux/etherdevice.h>
5 #include <linux/if_vlan.h>
6 #include <linux/netdevice.h>
7 #include <net/vxlan.h>
8 
9 #include "hinic3_hwif.h"
10 #include "hinic3_nic_cfg.h"
11 #include "hinic3_nic_dev.h"
12 #include "hinic3_nic_io.h"
13 #include "hinic3_rss.h"
14 #include "hinic3_rx.h"
15 #include "hinic3_tx.h"
16 
17 #define HINIC3_LRO_DEFAULT_COAL_PKT_SIZE  32
18 #define HINIC3_LRO_DEFAULT_TIME_LIMIT     16
19 
20 #define VLAN_BITMAP_BITS_SIZE(nic_dev)    (sizeof(*(nic_dev)->vlan_bitmap) * 8)
21 #define VID_LINE(nic_dev, vid)  \
22 	((vid) / VLAN_BITMAP_BITS_SIZE(nic_dev))
23 #define VID_COL(nic_dev, vid)  \
24 	((vid) & (VLAN_BITMAP_BITS_SIZE(nic_dev) - 1))
25 
26 /* try to modify the number of irq to the target number,
27  * and return the actual number of irq.
28  */
29 static u16 hinic3_qp_irq_change(struct net_device *netdev,
30 				u16 dst_num_qp_irq)
31 {
32 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
33 	struct msix_entry *qps_msix_entries;
34 	u16 resp_irq_num, irq_num_gap, i;
35 	u16 idx;
36 	int err;
37 
38 	qps_msix_entries = nic_dev->qps_msix_entries;
39 	if (dst_num_qp_irq > nic_dev->num_qp_irq) {
40 		irq_num_gap = dst_num_qp_irq - nic_dev->num_qp_irq;
41 		err = hinic3_alloc_irqs(nic_dev->hwdev, irq_num_gap,
42 					&qps_msix_entries[nic_dev->num_qp_irq],
43 					&resp_irq_num);
44 		if (err) {
45 			netdev_err(netdev, "Failed to alloc irqs\n");
46 			return nic_dev->num_qp_irq;
47 		}
48 
49 		nic_dev->num_qp_irq += resp_irq_num;
50 	} else if (dst_num_qp_irq < nic_dev->num_qp_irq) {
51 		irq_num_gap = nic_dev->num_qp_irq - dst_num_qp_irq;
52 		for (i = 0; i < irq_num_gap; i++) {
53 			idx = (nic_dev->num_qp_irq - i) - 1;
54 			hinic3_free_irq(nic_dev->hwdev,
55 					qps_msix_entries[idx].vector);
56 			qps_msix_entries[idx].vector = 0;
57 			qps_msix_entries[idx].entry = 0;
58 		}
59 		nic_dev->num_qp_irq = dst_num_qp_irq;
60 	}
61 
62 	return nic_dev->num_qp_irq;
63 }
64 
65 static void hinic3_config_num_qps(struct net_device *netdev,
66 				  struct hinic3_dyna_txrxq_params *q_params)
67 {
68 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
69 	u16 alloc_num_irq, cur_num_irq;
70 	u16 dst_num_irq;
71 
72 	if (!test_bit(HINIC3_RSS_ENABLE, &nic_dev->flags))
73 		q_params->num_qps = 1;
74 
75 	if (nic_dev->num_qp_irq >= q_params->num_qps)
76 		goto out;
77 
78 	cur_num_irq = nic_dev->num_qp_irq;
79 
80 	alloc_num_irq = hinic3_qp_irq_change(netdev, q_params->num_qps);
81 	if (alloc_num_irq < q_params->num_qps) {
82 		q_params->num_qps = alloc_num_irq;
83 		netdev_warn(netdev, "Can not get enough irqs, adjust num_qps to %u\n",
84 			    q_params->num_qps);
85 
86 		/* The current irq may be in use, we must keep it */
87 		dst_num_irq = max_t(u16, cur_num_irq, q_params->num_qps);
88 		hinic3_qp_irq_change(netdev, dst_num_irq);
89 	}
90 
91 out:
92 	netdev_dbg(netdev, "No need to change irqs, num_qps is %u\n",
93 		   q_params->num_qps);
94 }
95 
96 static int hinic3_setup_num_qps(struct net_device *netdev)
97 {
98 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
99 
100 	nic_dev->num_qp_irq = 0;
101 
102 	nic_dev->qps_msix_entries = kcalloc(nic_dev->max_qps,
103 					    sizeof(struct msix_entry),
104 					    GFP_KERNEL);
105 	if (!nic_dev->qps_msix_entries)
106 		return -ENOMEM;
107 
108 	hinic3_config_num_qps(netdev, &nic_dev->q_params);
109 
110 	return 0;
111 }
112 
113 static void hinic3_destroy_num_qps(struct net_device *netdev)
114 {
115 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
116 	u16 i;
117 
118 	for (i = 0; i < nic_dev->num_qp_irq; i++)
119 		hinic3_free_irq(nic_dev->hwdev,
120 				nic_dev->qps_msix_entries[i].vector);
121 
122 	kfree(nic_dev->qps_msix_entries);
123 }
124 
125 static int hinic3_alloc_txrxq_resources(struct net_device *netdev,
126 					struct hinic3_dyna_txrxq_params *q_params)
127 {
128 	int err;
129 
130 	q_params->txqs_res = kcalloc(q_params->num_qps,
131 				     sizeof(*q_params->txqs_res), GFP_KERNEL);
132 	if (!q_params->txqs_res)
133 		return -ENOMEM;
134 
135 	q_params->rxqs_res = kcalloc(q_params->num_qps,
136 				     sizeof(*q_params->rxqs_res), GFP_KERNEL);
137 	if (!q_params->rxqs_res) {
138 		err = -ENOMEM;
139 		goto err_free_txqs_res_arr;
140 	}
141 
142 	q_params->irq_cfg = kcalloc(q_params->num_qps,
143 				    sizeof(*q_params->irq_cfg), GFP_KERNEL);
144 	if (!q_params->irq_cfg) {
145 		err = -ENOMEM;
146 		goto err_free_rxqs_res_arr;
147 	}
148 
149 	err = hinic3_alloc_txqs_res(netdev, q_params->num_qps,
150 				    q_params->sq_depth, q_params->txqs_res);
151 	if (err) {
152 		netdev_err(netdev, "Failed to alloc txqs resource\n");
153 		goto err_free_irq_cfg;
154 	}
155 
156 	err = hinic3_alloc_rxqs_res(netdev, q_params->num_qps,
157 				    q_params->rq_depth, q_params->rxqs_res);
158 	if (err) {
159 		netdev_err(netdev, "Failed to alloc rxqs resource\n");
160 		goto err_free_txqs_res;
161 	}
162 
163 	return 0;
164 
165 err_free_txqs_res:
166 	hinic3_free_txqs_res(netdev, q_params->num_qps, q_params->sq_depth,
167 			     q_params->txqs_res);
168 err_free_irq_cfg:
169 	kfree(q_params->irq_cfg);
170 	q_params->irq_cfg = NULL;
171 err_free_rxqs_res_arr:
172 	kfree(q_params->rxqs_res);
173 	q_params->rxqs_res = NULL;
174 err_free_txqs_res_arr:
175 	kfree(q_params->txqs_res);
176 	q_params->txqs_res = NULL;
177 
178 	return err;
179 }
180 
181 static void hinic3_free_txrxq_resources(struct net_device *netdev,
182 					struct hinic3_dyna_txrxq_params *q_params)
183 {
184 	hinic3_free_rxqs_res(netdev, q_params->num_qps, q_params->rq_depth,
185 			     q_params->rxqs_res);
186 	hinic3_free_txqs_res(netdev, q_params->num_qps, q_params->sq_depth,
187 			     q_params->txqs_res);
188 
189 	kfree(q_params->irq_cfg);
190 	q_params->irq_cfg = NULL;
191 
192 	kfree(q_params->rxqs_res);
193 	q_params->rxqs_res = NULL;
194 
195 	kfree(q_params->txqs_res);
196 	q_params->txqs_res = NULL;
197 }
198 
199 static int hinic3_configure_txrxqs(struct net_device *netdev,
200 				   struct hinic3_dyna_txrxq_params *q_params)
201 {
202 	int err;
203 
204 	err = hinic3_configure_txqs(netdev, q_params->num_qps,
205 				    q_params->sq_depth, q_params->txqs_res);
206 	if (err) {
207 		netdev_err(netdev, "Failed to configure txqs\n");
208 		return err;
209 	}
210 
211 	err = hinic3_configure_rxqs(netdev, q_params->num_qps,
212 				    q_params->rq_depth, q_params->rxqs_res);
213 	if (err) {
214 		netdev_err(netdev, "Failed to configure rxqs\n");
215 		return err;
216 	}
217 
218 	return 0;
219 }
220 
221 static int hinic3_configure(struct net_device *netdev)
222 {
223 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
224 	int err;
225 
226 	netdev->min_mtu = HINIC3_MIN_MTU_SIZE;
227 	netdev->max_mtu = HINIC3_MAX_JUMBO_FRAME_SIZE;
228 	err = hinic3_set_port_mtu(netdev, netdev->mtu);
229 	if (err) {
230 		netdev_err(netdev, "Failed to set mtu\n");
231 		return err;
232 	}
233 
234 	/* Ensure DCB is disabled */
235 	hinic3_sync_dcb_state(nic_dev->hwdev, 1, 0);
236 
237 	if (test_bit(HINIC3_RSS_ENABLE, &nic_dev->flags)) {
238 		err = hinic3_rss_init(netdev);
239 		if (err) {
240 			netdev_err(netdev, "Failed to init rss\n");
241 			return err;
242 		}
243 	}
244 
245 	return 0;
246 }
247 
248 static void hinic3_remove_configure(struct net_device *netdev)
249 {
250 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
251 
252 	if (test_bit(HINIC3_RSS_ENABLE, &nic_dev->flags))
253 		hinic3_rss_uninit(netdev);
254 }
255 
256 static int hinic3_alloc_channel_resources(struct net_device *netdev,
257 					  struct hinic3_dyna_qp_params *qp_params,
258 					  struct hinic3_dyna_txrxq_params *trxq_params)
259 {
260 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
261 	int err;
262 
263 	qp_params->num_qps = trxq_params->num_qps;
264 	qp_params->sq_depth = trxq_params->sq_depth;
265 	qp_params->rq_depth = trxq_params->rq_depth;
266 
267 	err = hinic3_alloc_qps(nic_dev, qp_params);
268 	if (err) {
269 		netdev_err(netdev, "Failed to alloc qps\n");
270 		return err;
271 	}
272 
273 	err = hinic3_alloc_txrxq_resources(netdev, trxq_params);
274 	if (err) {
275 		netdev_err(netdev, "Failed to alloc txrxq resources\n");
276 		hinic3_free_qps(nic_dev, qp_params);
277 		return err;
278 	}
279 
280 	return 0;
281 }
282 
283 static void hinic3_free_channel_resources(struct net_device *netdev,
284 					  struct hinic3_dyna_qp_params *qp_params,
285 					  struct hinic3_dyna_txrxq_params *trxq_params)
286 {
287 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
288 
289 	hinic3_free_txrxq_resources(netdev, trxq_params);
290 	hinic3_free_qps(nic_dev, qp_params);
291 }
292 
293 static int hinic3_open_channel(struct net_device *netdev)
294 {
295 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
296 	int err;
297 
298 	err = hinic3_init_qp_ctxts(nic_dev);
299 	if (err) {
300 		netdev_err(netdev, "Failed to init qps\n");
301 		return err;
302 	}
303 
304 	err = hinic3_configure_txrxqs(netdev, &nic_dev->q_params);
305 	if (err) {
306 		netdev_err(netdev, "Failed to configure txrxqs\n");
307 		goto err_free_qp_ctxts;
308 	}
309 
310 	err = hinic3_qps_irq_init(netdev);
311 	if (err) {
312 		netdev_err(netdev, "Failed to init txrxq irq\n");
313 		goto err_free_qp_ctxts;
314 	}
315 
316 	err = hinic3_configure(netdev);
317 	if (err) {
318 		netdev_err(netdev, "Failed to configure device resources\n");
319 		goto err_uninit_qps_irq;
320 	}
321 
322 	return 0;
323 
324 err_uninit_qps_irq:
325 	hinic3_qps_irq_uninit(netdev);
326 err_free_qp_ctxts:
327 	hinic3_free_qp_ctxts(nic_dev);
328 
329 	return err;
330 }
331 
332 static void hinic3_close_channel(struct net_device *netdev)
333 {
334 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
335 
336 	hinic3_remove_configure(netdev);
337 	hinic3_qps_irq_uninit(netdev);
338 	hinic3_free_qp_ctxts(nic_dev);
339 }
340 
341 static int hinic3_maybe_set_port_state(struct net_device *netdev, bool enable)
342 {
343 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
344 	int err;
345 
346 	mutex_lock(&nic_dev->port_state_mutex);
347 	err = hinic3_set_port_enable(nic_dev->hwdev, enable);
348 	mutex_unlock(&nic_dev->port_state_mutex);
349 
350 	return err;
351 }
352 
353 static void hinic3_print_link_message(struct net_device *netdev,
354 				      bool link_status_up)
355 {
356 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
357 
358 	if (nic_dev->link_status_up == link_status_up)
359 		return;
360 
361 	nic_dev->link_status_up = link_status_up;
362 
363 	netdev_dbg(netdev, "Link is %s\n", str_up_down(link_status_up));
364 }
365 
366 static int hinic3_vport_up(struct net_device *netdev)
367 {
368 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
369 	bool link_status_up;
370 	u16 glb_func_id;
371 	int err;
372 
373 	glb_func_id = hinic3_global_func_id(nic_dev->hwdev);
374 	err = hinic3_set_vport_enable(nic_dev->hwdev, glb_func_id, true);
375 	if (err) {
376 		netdev_err(netdev, "Failed to enable vport\n");
377 		goto err_flush_qps_res;
378 	}
379 
380 	err = hinic3_maybe_set_port_state(netdev, true);
381 	if (err) {
382 		netdev_err(netdev, "Failed to enable port\n");
383 		goto err_disable_vport;
384 	}
385 
386 	err = netif_set_real_num_queues(netdev, nic_dev->q_params.num_qps,
387 					nic_dev->q_params.num_qps);
388 	if (err) {
389 		netdev_err(netdev, "Failed to set real number of queues\n");
390 		goto err_disable_vport;
391 	}
392 	netif_tx_start_all_queues(netdev);
393 
394 	err = hinic3_get_link_status(nic_dev->hwdev, &link_status_up);
395 	if (!err && link_status_up)
396 		netif_carrier_on(netdev);
397 
398 	hinic3_print_link_message(netdev, link_status_up);
399 
400 	return 0;
401 
402 err_disable_vport:
403 	hinic3_set_vport_enable(nic_dev->hwdev, glb_func_id, false);
404 err_flush_qps_res:
405 	hinic3_flush_qps_res(nic_dev->hwdev);
406 	/* wait to guarantee that no packets will be sent to host */
407 	msleep(100);
408 
409 	return err;
410 }
411 
412 static void hinic3_vport_down(struct net_device *netdev)
413 {
414 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
415 	u16 glb_func_id;
416 
417 	netif_carrier_off(netdev);
418 	netif_tx_disable(netdev);
419 
420 	glb_func_id = hinic3_global_func_id(nic_dev->hwdev);
421 	hinic3_set_vport_enable(nic_dev->hwdev, glb_func_id, false);
422 
423 	hinic3_flush_txqs(netdev);
424 	/* wait to guarantee that no packets will be sent to host */
425 	msleep(100);
426 	hinic3_flush_qps_res(nic_dev->hwdev);
427 }
428 
429 static int hinic3_open(struct net_device *netdev)
430 {
431 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
432 	struct hinic3_dyna_qp_params qp_params;
433 	int err;
434 
435 	if (test_bit(HINIC3_INTF_UP, &nic_dev->flags)) {
436 		netdev_dbg(netdev, "Netdev already open, do nothing\n");
437 		return 0;
438 	}
439 
440 	err = hinic3_init_nicio_res(nic_dev);
441 	if (err) {
442 		netdev_err(netdev, "Failed to init nicio resources\n");
443 		return err;
444 	}
445 
446 	err = hinic3_setup_num_qps(netdev);
447 	if (err) {
448 		netdev_err(netdev, "Failed to setup num_qps\n");
449 		goto err_free_nicio_res;
450 	}
451 
452 	err = hinic3_alloc_channel_resources(netdev, &qp_params,
453 					     &nic_dev->q_params);
454 	if (err)
455 		goto err_destroy_num_qps;
456 
457 	hinic3_init_qps(nic_dev, &qp_params);
458 
459 	err = hinic3_open_channel(netdev);
460 	if (err)
461 		goto err_uninit_qps;
462 
463 	err = hinic3_vport_up(netdev);
464 	if (err)
465 		goto err_close_channel;
466 
467 	set_bit(HINIC3_INTF_UP, &nic_dev->flags);
468 
469 	return 0;
470 
471 err_close_channel:
472 	hinic3_close_channel(netdev);
473 err_uninit_qps:
474 	hinic3_uninit_qps(nic_dev, &qp_params);
475 	hinic3_free_channel_resources(netdev, &qp_params, &nic_dev->q_params);
476 err_destroy_num_qps:
477 	hinic3_destroy_num_qps(netdev);
478 err_free_nicio_res:
479 	hinic3_free_nicio_res(nic_dev);
480 
481 	return err;
482 }
483 
484 static int hinic3_close(struct net_device *netdev)
485 {
486 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
487 	struct hinic3_dyna_qp_params qp_params;
488 
489 	if (!test_and_clear_bit(HINIC3_INTF_UP, &nic_dev->flags)) {
490 		netdev_dbg(netdev, "Netdev already close, do nothing\n");
491 		return 0;
492 	}
493 
494 	hinic3_vport_down(netdev);
495 	hinic3_close_channel(netdev);
496 	hinic3_uninit_qps(nic_dev, &qp_params);
497 	hinic3_free_channel_resources(netdev, &qp_params, &nic_dev->q_params);
498 
499 	return 0;
500 }
501 
502 #define SET_FEATURES_OP_STR(op)  ((op) ? "Enable" : "Disable")
503 
504 static int hinic3_set_feature_rx_csum(struct net_device *netdev,
505 				      netdev_features_t wanted_features,
506 				      netdev_features_t features,
507 				      netdev_features_t *failed_features)
508 {
509 	netdev_features_t changed = wanted_features ^ features;
510 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
511 	struct hinic3_hwdev *hwdev = nic_dev->hwdev;
512 
513 	if (changed & NETIF_F_RXCSUM)
514 		dev_dbg(hwdev->dev, "%s rx csum success\n",
515 			SET_FEATURES_OP_STR(wanted_features & NETIF_F_RXCSUM));
516 
517 	return 0;
518 }
519 
520 static int hinic3_set_feature_tso(struct net_device *netdev,
521 				  netdev_features_t wanted_features,
522 				  netdev_features_t features,
523 				  netdev_features_t *failed_features)
524 {
525 	netdev_features_t changed = wanted_features ^ features;
526 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
527 	struct hinic3_hwdev *hwdev = nic_dev->hwdev;
528 
529 	if (changed & NETIF_F_TSO)
530 		dev_dbg(hwdev->dev, "%s tso success\n",
531 			SET_FEATURES_OP_STR(wanted_features & NETIF_F_TSO));
532 
533 	return 0;
534 }
535 
536 static int hinic3_set_feature_lro(struct net_device *netdev,
537 				  netdev_features_t wanted_features,
538 				  netdev_features_t features,
539 				  netdev_features_t *failed_features)
540 {
541 	netdev_features_t changed = wanted_features ^ features;
542 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
543 	struct hinic3_hwdev *hwdev = nic_dev->hwdev;
544 	bool en = !!(wanted_features & NETIF_F_LRO);
545 	int err;
546 
547 	if (!(changed & NETIF_F_LRO))
548 		return 0;
549 
550 	err = hinic3_set_rx_lro_state(hwdev, en,
551 				      HINIC3_LRO_DEFAULT_TIME_LIMIT,
552 				      HINIC3_LRO_DEFAULT_COAL_PKT_SIZE);
553 	if (err) {
554 		dev_err(hwdev->dev, "%s lro failed\n", SET_FEATURES_OP_STR(en));
555 		*failed_features |= NETIF_F_LRO;
556 	}
557 
558 	return err;
559 }
560 
561 static int hinic3_set_feature_rx_cvlan(struct net_device *netdev,
562 				       netdev_features_t wanted_features,
563 				       netdev_features_t features,
564 				       netdev_features_t *failed_features)
565 {
566 	bool en = !!(wanted_features & NETIF_F_HW_VLAN_CTAG_RX);
567 	netdev_features_t changed = wanted_features ^ features;
568 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
569 	struct hinic3_hwdev *hwdev = nic_dev->hwdev;
570 	int err;
571 
572 	if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
573 		return 0;
574 
575 	err = hinic3_set_rx_vlan_offload(hwdev, en);
576 	if (err) {
577 		dev_err(hwdev->dev, "%s rx vlan offload failed\n",
578 			SET_FEATURES_OP_STR(en));
579 		*failed_features |= NETIF_F_HW_VLAN_CTAG_RX;
580 	}
581 
582 	return err;
583 }
584 
585 static int hinic3_set_feature_vlan_filter(struct net_device *netdev,
586 					  netdev_features_t wanted_features,
587 					  netdev_features_t features,
588 					  netdev_features_t *failed_features)
589 {
590 	bool en = !!(wanted_features & NETIF_F_HW_VLAN_CTAG_FILTER);
591 	netdev_features_t changed = wanted_features ^ features;
592 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
593 	struct hinic3_hwdev *hwdev = nic_dev->hwdev;
594 	int err;
595 
596 	if (!(changed & NETIF_F_HW_VLAN_CTAG_FILTER))
597 		return 0;
598 
599 	err = hinic3_set_vlan_filter(hwdev, en);
600 	if (err) {
601 		dev_err(hwdev->dev, "%s rx vlan filter failed\n",
602 			SET_FEATURES_OP_STR(en));
603 		*failed_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
604 	}
605 
606 	return err;
607 }
608 
609 static int hinic3_set_features(struct net_device *netdev,
610 			       netdev_features_t curr,
611 			       netdev_features_t wanted)
612 {
613 	netdev_features_t failed = 0;
614 	int err;
615 
616 	err = hinic3_set_feature_rx_csum(netdev, wanted, curr, &failed) |
617 	      hinic3_set_feature_tso(netdev, wanted, curr, &failed) |
618 	      hinic3_set_feature_lro(netdev, wanted, curr, &failed) |
619 	      hinic3_set_feature_rx_cvlan(netdev, wanted, curr, &failed) |
620 	      hinic3_set_feature_vlan_filter(netdev, wanted, curr, &failed);
621 	if (err) {
622 		netdev->features = wanted ^ failed;
623 		return err;
624 	}
625 
626 	return 0;
627 }
628 
629 static int hinic3_ndo_set_features(struct net_device *netdev,
630 				   netdev_features_t features)
631 {
632 	return hinic3_set_features(netdev, netdev->features, features);
633 }
634 
635 static netdev_features_t hinic3_fix_features(struct net_device *netdev,
636 					     netdev_features_t features)
637 {
638 	netdev_features_t features_tmp = features;
639 
640 	/* If Rx checksum is disabled, then LRO should also be disabled */
641 	if (!(features_tmp & NETIF_F_RXCSUM))
642 		features_tmp &= ~NETIF_F_LRO;
643 
644 	return features_tmp;
645 }
646 
647 static netdev_features_t hinic3_features_check(struct sk_buff *skb,
648 					       struct net_device *dev,
649 					       netdev_features_t features)
650 {
651 	features = vlan_features_check(skb, features);
652 	features = vxlan_features_check(skb, features);
653 
654 	return features;
655 }
656 
657 int hinic3_set_hw_features(struct net_device *netdev)
658 {
659 	netdev_features_t wanted, curr;
660 
661 	wanted = netdev->features;
662 	/* fake current features so all wanted are enabled */
663 	curr = ~wanted;
664 
665 	return hinic3_set_features(netdev, curr, wanted);
666 }
667 
668 static int hinic3_change_mtu(struct net_device *netdev, int new_mtu)
669 {
670 	int err;
671 
672 	err = hinic3_set_port_mtu(netdev, new_mtu);
673 	if (err) {
674 		netdev_err(netdev, "Failed to change port mtu to %d\n",
675 			   new_mtu);
676 		return err;
677 	}
678 
679 	netdev_dbg(netdev, "Change mtu from %u to %d\n", netdev->mtu, new_mtu);
680 	WRITE_ONCE(netdev->mtu, new_mtu);
681 
682 	return 0;
683 }
684 
685 static int hinic3_set_mac_addr(struct net_device *netdev, void *addr)
686 {
687 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
688 	struct sockaddr *saddr = addr;
689 	int err;
690 
691 	if (!is_valid_ether_addr(saddr->sa_data))
692 		return -EADDRNOTAVAIL;
693 
694 	if (ether_addr_equal(netdev->dev_addr, saddr->sa_data))
695 		return 0;
696 
697 	err = hinic3_update_mac(nic_dev->hwdev, netdev->dev_addr,
698 				saddr->sa_data, 0,
699 				hinic3_global_func_id(nic_dev->hwdev));
700 
701 	if (err)
702 		return err;
703 
704 	eth_hw_addr_set(netdev, saddr->sa_data);
705 
706 	return 0;
707 }
708 
709 static int hinic3_vlan_rx_add_vid(struct net_device *netdev,
710 				  __be16 proto, u16 vid)
711 {
712 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
713 	unsigned long *vlan_bitmap = nic_dev->vlan_bitmap;
714 	u32 column, row;
715 	u16 func_id;
716 	int err;
717 
718 	column = VID_COL(nic_dev, vid);
719 	row = VID_LINE(nic_dev, vid);
720 
721 	func_id = hinic3_global_func_id(nic_dev->hwdev);
722 
723 	err = hinic3_add_vlan(nic_dev->hwdev, vid, func_id);
724 	if (err) {
725 		netdev_err(netdev, "Failed to add vlan %u\n", vid);
726 		goto out;
727 	}
728 
729 	set_bit(column, &vlan_bitmap[row]);
730 	netdev_dbg(netdev, "Add vlan %u\n", vid);
731 
732 out:
733 	return err;
734 }
735 
736 static int hinic3_vlan_rx_kill_vid(struct net_device *netdev,
737 				   __be16 proto, u16 vid)
738 {
739 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
740 	unsigned long *vlan_bitmap = nic_dev->vlan_bitmap;
741 	u32 column, row;
742 	u16 func_id;
743 	int err;
744 
745 	column  = VID_COL(nic_dev, vid);
746 	row = VID_LINE(nic_dev, vid);
747 
748 	func_id = hinic3_global_func_id(nic_dev->hwdev);
749 	err = hinic3_del_vlan(nic_dev->hwdev, vid, func_id);
750 	if (err) {
751 		netdev_err(netdev, "Failed to delete vlan %u\n", vid);
752 		goto out;
753 	}
754 
755 	clear_bit(column, &vlan_bitmap[row]);
756 	netdev_dbg(netdev, "Remove vlan %u\n", vid);
757 
758 out:
759 	return err;
760 }
761 
762 static void hinic3_tx_timeout(struct net_device *netdev, unsigned int txqueue)
763 {
764 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
765 	struct hinic3_io_queue *sq;
766 	u16 sw_pi, hw_ci;
767 
768 	sq = nic_dev->txqs[txqueue].sq;
769 	sw_pi = hinic3_get_sq_local_pi(sq);
770 	hw_ci = hinic3_get_sq_hw_ci(sq);
771 	netdev_dbg(netdev,
772 		   "txq%u: sw_pi: %u, hw_ci: %u, sw_ci: %u, napi->state: 0x%lx.\n",
773 		   txqueue, sw_pi, hw_ci, hinic3_get_sq_local_ci(sq),
774 		   nic_dev->q_params.irq_cfg[txqueue].napi.state);
775 
776 	if (sw_pi != hw_ci)
777 		set_bit(HINIC3_EVENT_WORK_TX_TIMEOUT, &nic_dev->event_flag);
778 }
779 
780 static void hinic3_get_stats64(struct net_device *netdev,
781 			       struct rtnl_link_stats64 *stats)
782 {
783 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
784 	u64 bytes, packets, dropped, errors;
785 	struct hinic3_txq_stats *txq_stats;
786 	struct hinic3_rxq_stats *rxq_stats;
787 	struct hinic3_txq *txq;
788 	struct hinic3_rxq *rxq;
789 	unsigned int start;
790 	int i;
791 
792 	bytes = 0;
793 	packets = 0;
794 	dropped = 0;
795 	for (i = 0; i < nic_dev->max_qps; i++) {
796 		if (!nic_dev->txqs)
797 			break;
798 
799 		txq = &nic_dev->txqs[i];
800 		txq_stats = &txq->txq_stats;
801 		do {
802 			start = u64_stats_fetch_begin(&txq_stats->syncp);
803 			bytes += txq_stats->bytes;
804 			packets += txq_stats->packets;
805 			dropped += txq_stats->dropped;
806 		} while (u64_stats_fetch_retry(&txq_stats->syncp, start));
807 	}
808 	stats->tx_packets = packets;
809 	stats->tx_bytes   = bytes;
810 	stats->tx_dropped = dropped;
811 
812 	bytes = 0;
813 	packets = 0;
814 	errors = 0;
815 	dropped = 0;
816 	for (i = 0; i < nic_dev->max_qps; i++) {
817 		if (!nic_dev->rxqs)
818 			break;
819 
820 		rxq = &nic_dev->rxqs[i];
821 		rxq_stats = &rxq->rxq_stats;
822 		do {
823 			start = u64_stats_fetch_begin(&rxq_stats->syncp);
824 			bytes += rxq_stats->bytes;
825 			packets += rxq_stats->packets;
826 			errors += rxq_stats->csum_errors +
827 				rxq_stats->other_errors;
828 			dropped += rxq_stats->dropped;
829 		} while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
830 	}
831 	stats->rx_packets = packets;
832 	stats->rx_bytes   = bytes;
833 	stats->rx_errors  = errors;
834 	stats->rx_dropped = dropped;
835 }
836 
837 static void hinic3_nic_set_rx_mode(struct net_device *netdev)
838 {
839 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
840 
841 	if (netdev_uc_count(netdev) != nic_dev->netdev_uc_cnt ||
842 	    netdev_mc_count(netdev) != nic_dev->netdev_mc_cnt) {
843 		set_bit(HINIC3_UPDATE_MAC_FILTER, &nic_dev->flags);
844 		nic_dev->netdev_uc_cnt = netdev_uc_count(netdev);
845 		nic_dev->netdev_mc_cnt = netdev_mc_count(netdev);
846 	}
847 
848 	queue_work(nic_dev->workq, &nic_dev->rx_mode_work);
849 }
850 
851 static const struct net_device_ops hinic3_netdev_ops = {
852 	.ndo_open             = hinic3_open,
853 	.ndo_stop             = hinic3_close,
854 	.ndo_set_features     = hinic3_ndo_set_features,
855 	.ndo_fix_features     = hinic3_fix_features,
856 	.ndo_features_check   = hinic3_features_check,
857 	.ndo_change_mtu       = hinic3_change_mtu,
858 	.ndo_set_mac_address  = hinic3_set_mac_addr,
859 	.ndo_validate_addr    = eth_validate_addr,
860 	.ndo_vlan_rx_add_vid  = hinic3_vlan_rx_add_vid,
861 	.ndo_vlan_rx_kill_vid = hinic3_vlan_rx_kill_vid,
862 	.ndo_tx_timeout       = hinic3_tx_timeout,
863 	.ndo_get_stats64      = hinic3_get_stats64,
864 	.ndo_set_rx_mode      = hinic3_nic_set_rx_mode,
865 	.ndo_start_xmit       = hinic3_xmit_frame,
866 };
867 
868 void hinic3_set_netdev_ops(struct net_device *netdev)
869 {
870 	netdev->netdev_ops = &hinic3_netdev_ops;
871 }
872