xref: /linux/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c (revision 189f164e573e18d9f8876dbd3ad8fcbe11f93037)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
3 
4 #include <linux/etherdevice.h>
5 #include <linux/if_vlan.h>
6 #include <linux/netdevice.h>
7 #include <net/vxlan.h>
8 
9 #include "hinic3_hwif.h"
10 #include "hinic3_nic_cfg.h"
11 #include "hinic3_nic_dev.h"
12 #include "hinic3_nic_io.h"
13 #include "hinic3_rss.h"
14 #include "hinic3_rx.h"
15 #include "hinic3_tx.h"
16 
17 #define HINIC3_LRO_DEFAULT_COAL_PKT_SIZE  32
18 #define HINIC3_LRO_DEFAULT_TIME_LIMIT     16
19 
20 #define VLAN_BITMAP_BITS_SIZE(nic_dev)    (sizeof(*(nic_dev)->vlan_bitmap) * 8)
21 #define VID_LINE(nic_dev, vid)  \
22 	((vid) / VLAN_BITMAP_BITS_SIZE(nic_dev))
23 #define VID_COL(nic_dev, vid)  \
24 	((vid) & (VLAN_BITMAP_BITS_SIZE(nic_dev) - 1))
25 
26 /* try to modify the number of irq to the target number,
27  * and return the actual number of irq.
28  */
hinic3_qp_irq_change(struct net_device * netdev,u16 dst_num_qp_irq)29 static u16 hinic3_qp_irq_change(struct net_device *netdev,
30 				u16 dst_num_qp_irq)
31 {
32 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
33 	struct msix_entry *qps_msix_entries;
34 	u16 resp_irq_num, irq_num_gap, i;
35 	u16 idx;
36 	int err;
37 
38 	qps_msix_entries = nic_dev->qps_msix_entries;
39 	if (dst_num_qp_irq > nic_dev->num_qp_irq) {
40 		irq_num_gap = dst_num_qp_irq - nic_dev->num_qp_irq;
41 		err = hinic3_alloc_irqs(nic_dev->hwdev, irq_num_gap,
42 					&qps_msix_entries[nic_dev->num_qp_irq],
43 					&resp_irq_num);
44 		if (err) {
45 			netdev_err(netdev, "Failed to alloc irqs\n");
46 			return nic_dev->num_qp_irq;
47 		}
48 
49 		nic_dev->num_qp_irq += resp_irq_num;
50 	} else if (dst_num_qp_irq < nic_dev->num_qp_irq) {
51 		irq_num_gap = nic_dev->num_qp_irq - dst_num_qp_irq;
52 		for (i = 0; i < irq_num_gap; i++) {
53 			idx = (nic_dev->num_qp_irq - i) - 1;
54 			hinic3_free_irq(nic_dev->hwdev,
55 					qps_msix_entries[idx].vector);
56 			qps_msix_entries[idx].vector = 0;
57 			qps_msix_entries[idx].entry = 0;
58 		}
59 		nic_dev->num_qp_irq = dst_num_qp_irq;
60 	}
61 
62 	return nic_dev->num_qp_irq;
63 }
64 
hinic3_config_num_qps(struct net_device * netdev,struct hinic3_dyna_txrxq_params * q_params)65 static void hinic3_config_num_qps(struct net_device *netdev,
66 				  struct hinic3_dyna_txrxq_params *q_params)
67 {
68 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
69 	u16 alloc_num_irq, cur_num_irq;
70 	u16 dst_num_irq;
71 
72 	if (!test_bit(HINIC3_RSS_ENABLE, &nic_dev->flags))
73 		q_params->num_qps = 1;
74 
75 	if (nic_dev->num_qp_irq >= q_params->num_qps)
76 		goto out;
77 
78 	cur_num_irq = nic_dev->num_qp_irq;
79 
80 	alloc_num_irq = hinic3_qp_irq_change(netdev, q_params->num_qps);
81 	if (alloc_num_irq < q_params->num_qps) {
82 		q_params->num_qps = alloc_num_irq;
83 		netdev_warn(netdev, "Can not get enough irqs, adjust num_qps to %u\n",
84 			    q_params->num_qps);
85 
86 		/* The current irq may be in use, we must keep it */
87 		dst_num_irq = max_t(u16, cur_num_irq, q_params->num_qps);
88 		hinic3_qp_irq_change(netdev, dst_num_irq);
89 	}
90 
91 out:
92 	netdev_dbg(netdev, "No need to change irqs, num_qps is %u\n",
93 		   q_params->num_qps);
94 }
95 
hinic3_setup_num_qps(struct net_device * netdev)96 static int hinic3_setup_num_qps(struct net_device *netdev)
97 {
98 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
99 
100 	nic_dev->num_qp_irq = 0;
101 
102 	nic_dev->qps_msix_entries = kzalloc_objs(struct msix_entry,
103 						 nic_dev->max_qps);
104 	if (!nic_dev->qps_msix_entries)
105 		return -ENOMEM;
106 
107 	hinic3_config_num_qps(netdev, &nic_dev->q_params);
108 
109 	return 0;
110 }
111 
hinic3_destroy_num_qps(struct net_device * netdev)112 static void hinic3_destroy_num_qps(struct net_device *netdev)
113 {
114 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
115 	u16 i;
116 
117 	for (i = 0; i < nic_dev->num_qp_irq; i++)
118 		hinic3_free_irq(nic_dev->hwdev,
119 				nic_dev->qps_msix_entries[i].vector);
120 
121 	kfree(nic_dev->qps_msix_entries);
122 }
123 
hinic3_alloc_txrxq_resources(struct net_device * netdev,struct hinic3_dyna_txrxq_params * q_params)124 static int hinic3_alloc_txrxq_resources(struct net_device *netdev,
125 					struct hinic3_dyna_txrxq_params *q_params)
126 {
127 	int err;
128 
129 	q_params->txqs_res = kzalloc_objs(*q_params->txqs_res,
130 					  q_params->num_qps);
131 	if (!q_params->txqs_res)
132 		return -ENOMEM;
133 
134 	q_params->rxqs_res = kzalloc_objs(*q_params->rxqs_res,
135 					  q_params->num_qps);
136 	if (!q_params->rxqs_res) {
137 		err = -ENOMEM;
138 		goto err_free_txqs_res_arr;
139 	}
140 
141 	q_params->irq_cfg = kzalloc_objs(*q_params->irq_cfg, q_params->num_qps);
142 	if (!q_params->irq_cfg) {
143 		err = -ENOMEM;
144 		goto err_free_rxqs_res_arr;
145 	}
146 
147 	err = hinic3_alloc_txqs_res(netdev, q_params->num_qps,
148 				    q_params->sq_depth, q_params->txqs_res);
149 	if (err) {
150 		netdev_err(netdev, "Failed to alloc txqs resource\n");
151 		goto err_free_irq_cfg;
152 	}
153 
154 	err = hinic3_alloc_rxqs_res(netdev, q_params->num_qps,
155 				    q_params->rq_depth, q_params->rxqs_res);
156 	if (err) {
157 		netdev_err(netdev, "Failed to alloc rxqs resource\n");
158 		goto err_free_txqs_res;
159 	}
160 
161 	return 0;
162 
163 err_free_txqs_res:
164 	hinic3_free_txqs_res(netdev, q_params->num_qps, q_params->sq_depth,
165 			     q_params->txqs_res);
166 err_free_irq_cfg:
167 	kfree(q_params->irq_cfg);
168 	q_params->irq_cfg = NULL;
169 err_free_rxqs_res_arr:
170 	kfree(q_params->rxqs_res);
171 	q_params->rxqs_res = NULL;
172 err_free_txqs_res_arr:
173 	kfree(q_params->txqs_res);
174 	q_params->txqs_res = NULL;
175 
176 	return err;
177 }
178 
hinic3_free_txrxq_resources(struct net_device * netdev,struct hinic3_dyna_txrxq_params * q_params)179 static void hinic3_free_txrxq_resources(struct net_device *netdev,
180 					struct hinic3_dyna_txrxq_params *q_params)
181 {
182 	hinic3_free_rxqs_res(netdev, q_params->num_qps, q_params->rq_depth,
183 			     q_params->rxqs_res);
184 	hinic3_free_txqs_res(netdev, q_params->num_qps, q_params->sq_depth,
185 			     q_params->txqs_res);
186 
187 	kfree(q_params->irq_cfg);
188 	q_params->irq_cfg = NULL;
189 
190 	kfree(q_params->rxqs_res);
191 	q_params->rxqs_res = NULL;
192 
193 	kfree(q_params->txqs_res);
194 	q_params->txqs_res = NULL;
195 }
196 
hinic3_configure_txrxqs(struct net_device * netdev,struct hinic3_dyna_txrxq_params * q_params)197 static int hinic3_configure_txrxqs(struct net_device *netdev,
198 				   struct hinic3_dyna_txrxq_params *q_params)
199 {
200 	int err;
201 
202 	err = hinic3_configure_txqs(netdev, q_params->num_qps,
203 				    q_params->sq_depth, q_params->txqs_res);
204 	if (err) {
205 		netdev_err(netdev, "Failed to configure txqs\n");
206 		return err;
207 	}
208 
209 	err = hinic3_configure_rxqs(netdev, q_params->num_qps,
210 				    q_params->rq_depth, q_params->rxqs_res);
211 	if (err) {
212 		netdev_err(netdev, "Failed to configure rxqs\n");
213 		return err;
214 	}
215 
216 	return 0;
217 }
218 
hinic3_configure(struct net_device * netdev)219 static int hinic3_configure(struct net_device *netdev)
220 {
221 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
222 	int err;
223 
224 	netdev->min_mtu = HINIC3_MIN_MTU_SIZE;
225 	netdev->max_mtu = HINIC3_MAX_JUMBO_FRAME_SIZE;
226 	err = hinic3_set_port_mtu(netdev, netdev->mtu);
227 	if (err) {
228 		netdev_err(netdev, "Failed to set mtu\n");
229 		return err;
230 	}
231 
232 	/* Ensure DCB is disabled */
233 	hinic3_sync_dcb_state(nic_dev->hwdev, 1, 0);
234 
235 	if (test_bit(HINIC3_RSS_ENABLE, &nic_dev->flags)) {
236 		err = hinic3_rss_init(netdev);
237 		if (err) {
238 			netdev_err(netdev, "Failed to init rss\n");
239 			return err;
240 		}
241 	}
242 
243 	return 0;
244 }
245 
hinic3_remove_configure(struct net_device * netdev)246 static void hinic3_remove_configure(struct net_device *netdev)
247 {
248 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
249 
250 	if (test_bit(HINIC3_RSS_ENABLE, &nic_dev->flags))
251 		hinic3_rss_uninit(netdev);
252 }
253 
hinic3_alloc_channel_resources(struct net_device * netdev,struct hinic3_dyna_qp_params * qp_params,struct hinic3_dyna_txrxq_params * trxq_params)254 static int hinic3_alloc_channel_resources(struct net_device *netdev,
255 					  struct hinic3_dyna_qp_params *qp_params,
256 					  struct hinic3_dyna_txrxq_params *trxq_params)
257 {
258 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
259 	int err;
260 
261 	qp_params->num_qps = trxq_params->num_qps;
262 	qp_params->sq_depth = trxq_params->sq_depth;
263 	qp_params->rq_depth = trxq_params->rq_depth;
264 
265 	err = hinic3_alloc_qps(nic_dev, qp_params);
266 	if (err) {
267 		netdev_err(netdev, "Failed to alloc qps\n");
268 		return err;
269 	}
270 
271 	err = hinic3_alloc_txrxq_resources(netdev, trxq_params);
272 	if (err) {
273 		netdev_err(netdev, "Failed to alloc txrxq resources\n");
274 		hinic3_free_qps(nic_dev, qp_params);
275 		return err;
276 	}
277 
278 	return 0;
279 }
280 
hinic3_free_channel_resources(struct net_device * netdev,struct hinic3_dyna_qp_params * qp_params,struct hinic3_dyna_txrxq_params * trxq_params)281 static void hinic3_free_channel_resources(struct net_device *netdev,
282 					  struct hinic3_dyna_qp_params *qp_params,
283 					  struct hinic3_dyna_txrxq_params *trxq_params)
284 {
285 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
286 
287 	hinic3_free_txrxq_resources(netdev, trxq_params);
288 	hinic3_free_qps(nic_dev, qp_params);
289 }
290 
hinic3_open_channel(struct net_device * netdev)291 static int hinic3_open_channel(struct net_device *netdev)
292 {
293 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
294 	int err;
295 
296 	err = hinic3_init_qp_ctxts(nic_dev);
297 	if (err) {
298 		netdev_err(netdev, "Failed to init qps\n");
299 		return err;
300 	}
301 
302 	err = hinic3_configure_txrxqs(netdev, &nic_dev->q_params);
303 	if (err) {
304 		netdev_err(netdev, "Failed to configure txrxqs\n");
305 		goto err_free_qp_ctxts;
306 	}
307 
308 	err = hinic3_qps_irq_init(netdev);
309 	if (err) {
310 		netdev_err(netdev, "Failed to init txrxq irq\n");
311 		goto err_free_qp_ctxts;
312 	}
313 
314 	err = hinic3_configure(netdev);
315 	if (err) {
316 		netdev_err(netdev, "Failed to configure device resources\n");
317 		goto err_uninit_qps_irq;
318 	}
319 
320 	return 0;
321 
322 err_uninit_qps_irq:
323 	hinic3_qps_irq_uninit(netdev);
324 err_free_qp_ctxts:
325 	hinic3_free_qp_ctxts(nic_dev);
326 
327 	return err;
328 }
329 
hinic3_close_channel(struct net_device * netdev)330 static void hinic3_close_channel(struct net_device *netdev)
331 {
332 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
333 
334 	hinic3_remove_configure(netdev);
335 	hinic3_qps_irq_uninit(netdev);
336 	hinic3_free_qp_ctxts(nic_dev);
337 }
338 
hinic3_maybe_set_port_state(struct net_device * netdev,bool enable)339 static int hinic3_maybe_set_port_state(struct net_device *netdev, bool enable)
340 {
341 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
342 	int err;
343 
344 	mutex_lock(&nic_dev->port_state_mutex);
345 	err = hinic3_set_port_enable(nic_dev->hwdev, enable);
346 	mutex_unlock(&nic_dev->port_state_mutex);
347 
348 	return err;
349 }
350 
hinic3_print_link_message(struct net_device * netdev,bool link_status_up)351 static void hinic3_print_link_message(struct net_device *netdev,
352 				      bool link_status_up)
353 {
354 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
355 
356 	if (nic_dev->link_status_up == link_status_up)
357 		return;
358 
359 	nic_dev->link_status_up = link_status_up;
360 
361 	netdev_dbg(netdev, "Link is %s\n", str_up_down(link_status_up));
362 }
363 
hinic3_vport_up(struct net_device * netdev)364 static int hinic3_vport_up(struct net_device *netdev)
365 {
366 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
367 	bool link_status_up;
368 	u16 glb_func_id;
369 	int err;
370 
371 	glb_func_id = hinic3_global_func_id(nic_dev->hwdev);
372 	err = hinic3_set_vport_enable(nic_dev->hwdev, glb_func_id, true);
373 	if (err) {
374 		netdev_err(netdev, "Failed to enable vport\n");
375 		goto err_flush_qps_res;
376 	}
377 
378 	err = hinic3_maybe_set_port_state(netdev, true);
379 	if (err) {
380 		netdev_err(netdev, "Failed to enable port\n");
381 		goto err_disable_vport;
382 	}
383 
384 	err = netif_set_real_num_queues(netdev, nic_dev->q_params.num_qps,
385 					nic_dev->q_params.num_qps);
386 	if (err) {
387 		netdev_err(netdev, "Failed to set real number of queues\n");
388 		goto err_disable_vport;
389 	}
390 	netif_tx_start_all_queues(netdev);
391 
392 	err = hinic3_get_link_status(nic_dev->hwdev, &link_status_up);
393 	if (!err && link_status_up)
394 		netif_carrier_on(netdev);
395 
396 	hinic3_print_link_message(netdev, link_status_up);
397 
398 	return 0;
399 
400 err_disable_vport:
401 	hinic3_set_vport_enable(nic_dev->hwdev, glb_func_id, false);
402 err_flush_qps_res:
403 	hinic3_flush_qps_res(nic_dev->hwdev);
404 	/* wait to guarantee that no packets will be sent to host */
405 	msleep(100);
406 
407 	return err;
408 }
409 
hinic3_vport_down(struct net_device * netdev)410 static void hinic3_vport_down(struct net_device *netdev)
411 {
412 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
413 	u16 glb_func_id;
414 
415 	netif_carrier_off(netdev);
416 	netif_tx_disable(netdev);
417 
418 	glb_func_id = hinic3_global_func_id(nic_dev->hwdev);
419 	hinic3_set_vport_enable(nic_dev->hwdev, glb_func_id, false);
420 
421 	hinic3_flush_txqs(netdev);
422 	/* wait to guarantee that no packets will be sent to host */
423 	msleep(100);
424 	hinic3_flush_qps_res(nic_dev->hwdev);
425 }
426 
hinic3_open(struct net_device * netdev)427 static int hinic3_open(struct net_device *netdev)
428 {
429 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
430 	struct hinic3_dyna_qp_params qp_params;
431 	int err;
432 
433 	if (test_bit(HINIC3_INTF_UP, &nic_dev->flags)) {
434 		netdev_dbg(netdev, "Netdev already open, do nothing\n");
435 		return 0;
436 	}
437 
438 	err = hinic3_init_nicio_res(nic_dev);
439 	if (err) {
440 		netdev_err(netdev, "Failed to init nicio resources\n");
441 		return err;
442 	}
443 
444 	err = hinic3_setup_num_qps(netdev);
445 	if (err) {
446 		netdev_err(netdev, "Failed to setup num_qps\n");
447 		goto err_free_nicio_res;
448 	}
449 
450 	err = hinic3_alloc_channel_resources(netdev, &qp_params,
451 					     &nic_dev->q_params);
452 	if (err)
453 		goto err_destroy_num_qps;
454 
455 	hinic3_init_qps(nic_dev, &qp_params);
456 
457 	err = hinic3_open_channel(netdev);
458 	if (err)
459 		goto err_uninit_qps;
460 
461 	err = hinic3_vport_up(netdev);
462 	if (err)
463 		goto err_close_channel;
464 
465 	set_bit(HINIC3_INTF_UP, &nic_dev->flags);
466 
467 	return 0;
468 
469 err_close_channel:
470 	hinic3_close_channel(netdev);
471 err_uninit_qps:
472 	hinic3_uninit_qps(nic_dev, &qp_params);
473 	hinic3_free_channel_resources(netdev, &qp_params, &nic_dev->q_params);
474 err_destroy_num_qps:
475 	hinic3_destroy_num_qps(netdev);
476 err_free_nicio_res:
477 	hinic3_free_nicio_res(nic_dev);
478 
479 	return err;
480 }
481 
hinic3_close(struct net_device * netdev)482 static int hinic3_close(struct net_device *netdev)
483 {
484 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
485 	struct hinic3_dyna_qp_params qp_params;
486 
487 	if (!test_and_clear_bit(HINIC3_INTF_UP, &nic_dev->flags)) {
488 		netdev_dbg(netdev, "Netdev already close, do nothing\n");
489 		return 0;
490 	}
491 
492 	hinic3_vport_down(netdev);
493 	hinic3_close_channel(netdev);
494 	hinic3_uninit_qps(nic_dev, &qp_params);
495 	hinic3_free_channel_resources(netdev, &qp_params, &nic_dev->q_params);
496 
497 	return 0;
498 }
499 
500 #define SET_FEATURES_OP_STR(op)  ((op) ? "Enable" : "Disable")
501 
hinic3_set_feature_rx_csum(struct net_device * netdev,netdev_features_t wanted_features,netdev_features_t features,netdev_features_t * failed_features)502 static int hinic3_set_feature_rx_csum(struct net_device *netdev,
503 				      netdev_features_t wanted_features,
504 				      netdev_features_t features,
505 				      netdev_features_t *failed_features)
506 {
507 	netdev_features_t changed = wanted_features ^ features;
508 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
509 	struct hinic3_hwdev *hwdev = nic_dev->hwdev;
510 
511 	if (changed & NETIF_F_RXCSUM)
512 		dev_dbg(hwdev->dev, "%s rx csum success\n",
513 			SET_FEATURES_OP_STR(wanted_features & NETIF_F_RXCSUM));
514 
515 	return 0;
516 }
517 
hinic3_set_feature_tso(struct net_device * netdev,netdev_features_t wanted_features,netdev_features_t features,netdev_features_t * failed_features)518 static int hinic3_set_feature_tso(struct net_device *netdev,
519 				  netdev_features_t wanted_features,
520 				  netdev_features_t features,
521 				  netdev_features_t *failed_features)
522 {
523 	netdev_features_t changed = wanted_features ^ features;
524 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
525 	struct hinic3_hwdev *hwdev = nic_dev->hwdev;
526 
527 	if (changed & NETIF_F_TSO)
528 		dev_dbg(hwdev->dev, "%s tso success\n",
529 			SET_FEATURES_OP_STR(wanted_features & NETIF_F_TSO));
530 
531 	return 0;
532 }
533 
hinic3_set_feature_lro(struct net_device * netdev,netdev_features_t wanted_features,netdev_features_t features,netdev_features_t * failed_features)534 static int hinic3_set_feature_lro(struct net_device *netdev,
535 				  netdev_features_t wanted_features,
536 				  netdev_features_t features,
537 				  netdev_features_t *failed_features)
538 {
539 	netdev_features_t changed = wanted_features ^ features;
540 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
541 	struct hinic3_hwdev *hwdev = nic_dev->hwdev;
542 	bool en = !!(wanted_features & NETIF_F_LRO);
543 	int err;
544 
545 	if (!(changed & NETIF_F_LRO))
546 		return 0;
547 
548 	err = hinic3_set_rx_lro_state(hwdev, en,
549 				      HINIC3_LRO_DEFAULT_TIME_LIMIT,
550 				      HINIC3_LRO_DEFAULT_COAL_PKT_SIZE);
551 	if (err) {
552 		dev_err(hwdev->dev, "%s lro failed\n", SET_FEATURES_OP_STR(en));
553 		*failed_features |= NETIF_F_LRO;
554 	}
555 
556 	return err;
557 }
558 
hinic3_set_feature_rx_cvlan(struct net_device * netdev,netdev_features_t wanted_features,netdev_features_t features,netdev_features_t * failed_features)559 static int hinic3_set_feature_rx_cvlan(struct net_device *netdev,
560 				       netdev_features_t wanted_features,
561 				       netdev_features_t features,
562 				       netdev_features_t *failed_features)
563 {
564 	bool en = !!(wanted_features & NETIF_F_HW_VLAN_CTAG_RX);
565 	netdev_features_t changed = wanted_features ^ features;
566 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
567 	struct hinic3_hwdev *hwdev = nic_dev->hwdev;
568 	int err;
569 
570 	if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
571 		return 0;
572 
573 	err = hinic3_set_rx_vlan_offload(hwdev, en);
574 	if (err) {
575 		dev_err(hwdev->dev, "%s rx vlan offload failed\n",
576 			SET_FEATURES_OP_STR(en));
577 		*failed_features |= NETIF_F_HW_VLAN_CTAG_RX;
578 	}
579 
580 	return err;
581 }
582 
hinic3_set_feature_vlan_filter(struct net_device * netdev,netdev_features_t wanted_features,netdev_features_t features,netdev_features_t * failed_features)583 static int hinic3_set_feature_vlan_filter(struct net_device *netdev,
584 					  netdev_features_t wanted_features,
585 					  netdev_features_t features,
586 					  netdev_features_t *failed_features)
587 {
588 	bool en = !!(wanted_features & NETIF_F_HW_VLAN_CTAG_FILTER);
589 	netdev_features_t changed = wanted_features ^ features;
590 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
591 	struct hinic3_hwdev *hwdev = nic_dev->hwdev;
592 	int err;
593 
594 	if (!(changed & NETIF_F_HW_VLAN_CTAG_FILTER))
595 		return 0;
596 
597 	err = hinic3_set_vlan_filter(hwdev, en);
598 	if (err) {
599 		dev_err(hwdev->dev, "%s rx vlan filter failed\n",
600 			SET_FEATURES_OP_STR(en));
601 		*failed_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
602 	}
603 
604 	return err;
605 }
606 
hinic3_set_features(struct net_device * netdev,netdev_features_t curr,netdev_features_t wanted)607 static int hinic3_set_features(struct net_device *netdev,
608 			       netdev_features_t curr,
609 			       netdev_features_t wanted)
610 {
611 	netdev_features_t failed = 0;
612 	int err;
613 
614 	err = hinic3_set_feature_rx_csum(netdev, wanted, curr, &failed) |
615 	      hinic3_set_feature_tso(netdev, wanted, curr, &failed) |
616 	      hinic3_set_feature_lro(netdev, wanted, curr, &failed) |
617 	      hinic3_set_feature_rx_cvlan(netdev, wanted, curr, &failed) |
618 	      hinic3_set_feature_vlan_filter(netdev, wanted, curr, &failed);
619 	if (err) {
620 		netdev->features = wanted ^ failed;
621 		return err;
622 	}
623 
624 	return 0;
625 }
626 
hinic3_ndo_set_features(struct net_device * netdev,netdev_features_t features)627 static int hinic3_ndo_set_features(struct net_device *netdev,
628 				   netdev_features_t features)
629 {
630 	return hinic3_set_features(netdev, netdev->features, features);
631 }
632 
hinic3_fix_features(struct net_device * netdev,netdev_features_t features)633 static netdev_features_t hinic3_fix_features(struct net_device *netdev,
634 					     netdev_features_t features)
635 {
636 	netdev_features_t features_tmp = features;
637 
638 	/* If Rx checksum is disabled, then LRO should also be disabled */
639 	if (!(features_tmp & NETIF_F_RXCSUM))
640 		features_tmp &= ~NETIF_F_LRO;
641 
642 	return features_tmp;
643 }
644 
hinic3_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)645 static netdev_features_t hinic3_features_check(struct sk_buff *skb,
646 					       struct net_device *dev,
647 					       netdev_features_t features)
648 {
649 	features = vlan_features_check(skb, features);
650 	features = vxlan_features_check(skb, features);
651 
652 	return features;
653 }
654 
hinic3_set_hw_features(struct net_device * netdev)655 int hinic3_set_hw_features(struct net_device *netdev)
656 {
657 	netdev_features_t wanted, curr;
658 
659 	wanted = netdev->features;
660 	/* fake current features so all wanted are enabled */
661 	curr = ~wanted;
662 
663 	return hinic3_set_features(netdev, curr, wanted);
664 }
665 
hinic3_change_mtu(struct net_device * netdev,int new_mtu)666 static int hinic3_change_mtu(struct net_device *netdev, int new_mtu)
667 {
668 	int err;
669 
670 	err = hinic3_set_port_mtu(netdev, new_mtu);
671 	if (err) {
672 		netdev_err(netdev, "Failed to change port mtu to %d\n",
673 			   new_mtu);
674 		return err;
675 	}
676 
677 	netdev_dbg(netdev, "Change mtu from %u to %d\n", netdev->mtu, new_mtu);
678 	WRITE_ONCE(netdev->mtu, new_mtu);
679 
680 	return 0;
681 }
682 
hinic3_set_mac_addr(struct net_device * netdev,void * addr)683 static int hinic3_set_mac_addr(struct net_device *netdev, void *addr)
684 {
685 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
686 	struct sockaddr *saddr = addr;
687 	int err;
688 
689 	if (!is_valid_ether_addr(saddr->sa_data))
690 		return -EADDRNOTAVAIL;
691 
692 	if (ether_addr_equal(netdev->dev_addr, saddr->sa_data))
693 		return 0;
694 
695 	err = hinic3_update_mac(nic_dev->hwdev, netdev->dev_addr,
696 				saddr->sa_data, 0,
697 				hinic3_global_func_id(nic_dev->hwdev));
698 
699 	if (err)
700 		return err;
701 
702 	eth_hw_addr_set(netdev, saddr->sa_data);
703 
704 	return 0;
705 }
706 
hinic3_vlan_rx_add_vid(struct net_device * netdev,__be16 proto,u16 vid)707 static int hinic3_vlan_rx_add_vid(struct net_device *netdev,
708 				  __be16 proto, u16 vid)
709 {
710 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
711 	unsigned long *vlan_bitmap = nic_dev->vlan_bitmap;
712 	u32 column, row;
713 	u16 func_id;
714 	int err;
715 
716 	column = VID_COL(nic_dev, vid);
717 	row = VID_LINE(nic_dev, vid);
718 
719 	func_id = hinic3_global_func_id(nic_dev->hwdev);
720 
721 	err = hinic3_add_vlan(nic_dev->hwdev, vid, func_id);
722 	if (err) {
723 		netdev_err(netdev, "Failed to add vlan %u\n", vid);
724 		goto out;
725 	}
726 
727 	set_bit(column, &vlan_bitmap[row]);
728 	netdev_dbg(netdev, "Add vlan %u\n", vid);
729 
730 out:
731 	return err;
732 }
733 
hinic3_vlan_rx_kill_vid(struct net_device * netdev,__be16 proto,u16 vid)734 static int hinic3_vlan_rx_kill_vid(struct net_device *netdev,
735 				   __be16 proto, u16 vid)
736 {
737 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
738 	unsigned long *vlan_bitmap = nic_dev->vlan_bitmap;
739 	u32 column, row;
740 	u16 func_id;
741 	int err;
742 
743 	column  = VID_COL(nic_dev, vid);
744 	row = VID_LINE(nic_dev, vid);
745 
746 	func_id = hinic3_global_func_id(nic_dev->hwdev);
747 	err = hinic3_del_vlan(nic_dev->hwdev, vid, func_id);
748 	if (err) {
749 		netdev_err(netdev, "Failed to delete vlan %u\n", vid);
750 		goto out;
751 	}
752 
753 	clear_bit(column, &vlan_bitmap[row]);
754 	netdev_dbg(netdev, "Remove vlan %u\n", vid);
755 
756 out:
757 	return err;
758 }
759 
hinic3_tx_timeout(struct net_device * netdev,unsigned int txqueue)760 static void hinic3_tx_timeout(struct net_device *netdev, unsigned int txqueue)
761 {
762 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
763 	struct hinic3_io_queue *sq;
764 	u16 sw_pi, hw_ci;
765 
766 	sq = nic_dev->txqs[txqueue].sq;
767 	sw_pi = hinic3_get_sq_local_pi(sq);
768 	hw_ci = hinic3_get_sq_hw_ci(sq);
769 	netdev_dbg(netdev,
770 		   "txq%u: sw_pi: %u, hw_ci: %u, sw_ci: %u, napi->state: 0x%lx.\n",
771 		   txqueue, sw_pi, hw_ci, hinic3_get_sq_local_ci(sq),
772 		   nic_dev->q_params.irq_cfg[txqueue].napi.state);
773 
774 	if (sw_pi != hw_ci)
775 		set_bit(HINIC3_EVENT_WORK_TX_TIMEOUT, &nic_dev->event_flag);
776 }
777 
hinic3_get_stats64(struct net_device * netdev,struct rtnl_link_stats64 * stats)778 static void hinic3_get_stats64(struct net_device *netdev,
779 			       struct rtnl_link_stats64 *stats)
780 {
781 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
782 	u64 bytes, packets, dropped, errors;
783 	struct hinic3_txq_stats *txq_stats;
784 	struct hinic3_rxq_stats *rxq_stats;
785 	struct hinic3_txq *txq;
786 	struct hinic3_rxq *rxq;
787 	unsigned int start;
788 	int i;
789 
790 	bytes = 0;
791 	packets = 0;
792 	dropped = 0;
793 	for (i = 0; i < nic_dev->max_qps; i++) {
794 		if (!nic_dev->txqs)
795 			break;
796 
797 		txq = &nic_dev->txqs[i];
798 		txq_stats = &txq->txq_stats;
799 		do {
800 			start = u64_stats_fetch_begin(&txq_stats->syncp);
801 			bytes += txq_stats->bytes;
802 			packets += txq_stats->packets;
803 			dropped += txq_stats->dropped;
804 		} while (u64_stats_fetch_retry(&txq_stats->syncp, start));
805 	}
806 	stats->tx_packets = packets;
807 	stats->tx_bytes   = bytes;
808 	stats->tx_dropped = dropped;
809 
810 	bytes = 0;
811 	packets = 0;
812 	errors = 0;
813 	dropped = 0;
814 	for (i = 0; i < nic_dev->max_qps; i++) {
815 		if (!nic_dev->rxqs)
816 			break;
817 
818 		rxq = &nic_dev->rxqs[i];
819 		rxq_stats = &rxq->rxq_stats;
820 		do {
821 			start = u64_stats_fetch_begin(&rxq_stats->syncp);
822 			bytes += rxq_stats->bytes;
823 			packets += rxq_stats->packets;
824 			errors += rxq_stats->csum_errors +
825 				rxq_stats->other_errors;
826 			dropped += rxq_stats->dropped;
827 		} while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
828 	}
829 	stats->rx_packets = packets;
830 	stats->rx_bytes   = bytes;
831 	stats->rx_errors  = errors;
832 	stats->rx_dropped = dropped;
833 }
834 
hinic3_nic_set_rx_mode(struct net_device * netdev)835 static void hinic3_nic_set_rx_mode(struct net_device *netdev)
836 {
837 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
838 
839 	if (netdev_uc_count(netdev) != nic_dev->netdev_uc_cnt ||
840 	    netdev_mc_count(netdev) != nic_dev->netdev_mc_cnt) {
841 		set_bit(HINIC3_UPDATE_MAC_FILTER, &nic_dev->flags);
842 		nic_dev->netdev_uc_cnt = netdev_uc_count(netdev);
843 		nic_dev->netdev_mc_cnt = netdev_mc_count(netdev);
844 	}
845 
846 	queue_work(nic_dev->workq, &nic_dev->rx_mode_work);
847 }
848 
849 static const struct net_device_ops hinic3_netdev_ops = {
850 	.ndo_open             = hinic3_open,
851 	.ndo_stop             = hinic3_close,
852 	.ndo_set_features     = hinic3_ndo_set_features,
853 	.ndo_fix_features     = hinic3_fix_features,
854 	.ndo_features_check   = hinic3_features_check,
855 	.ndo_change_mtu       = hinic3_change_mtu,
856 	.ndo_set_mac_address  = hinic3_set_mac_addr,
857 	.ndo_validate_addr    = eth_validate_addr,
858 	.ndo_vlan_rx_add_vid  = hinic3_vlan_rx_add_vid,
859 	.ndo_vlan_rx_kill_vid = hinic3_vlan_rx_kill_vid,
860 	.ndo_tx_timeout       = hinic3_tx_timeout,
861 	.ndo_get_stats64      = hinic3_get_stats64,
862 	.ndo_set_rx_mode      = hinic3_nic_set_rx_mode,
863 	.ndo_start_xmit       = hinic3_xmit_frame,
864 };
865 
hinic3_set_netdev_ops(struct net_device * netdev)866 void hinic3_set_netdev_ops(struct net_device *netdev)
867 {
868 	netdev->netdev_ops = &hinic3_netdev_ops;
869 }
870