xref: /linux/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c (revision 8a5f956a9fb7d74fff681145082acfad5afa6bb8)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
3 
4 #include <linux/etherdevice.h>
5 #include <linux/netdevice.h>
6 
7 #include "hinic3_hwif.h"
8 #include "hinic3_nic_cfg.h"
9 #include "hinic3_nic_dev.h"
10 #include "hinic3_nic_io.h"
11 #include "hinic3_rss.h"
12 #include "hinic3_rx.h"
13 #include "hinic3_tx.h"
14 
15 /* try to modify the number of irq to the target number,
16  * and return the actual number of irq.
17  */
18 static u16 hinic3_qp_irq_change(struct net_device *netdev,
19 				u16 dst_num_qp_irq)
20 {
21 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
22 	struct msix_entry *qps_msix_entries;
23 	u16 resp_irq_num, irq_num_gap, i;
24 	u16 idx;
25 	int err;
26 
27 	qps_msix_entries = nic_dev->qps_msix_entries;
28 	if (dst_num_qp_irq > nic_dev->num_qp_irq) {
29 		irq_num_gap = dst_num_qp_irq - nic_dev->num_qp_irq;
30 		err = hinic3_alloc_irqs(nic_dev->hwdev, irq_num_gap,
31 					&qps_msix_entries[nic_dev->num_qp_irq],
32 					&resp_irq_num);
33 		if (err) {
34 			netdev_err(netdev, "Failed to alloc irqs\n");
35 			return nic_dev->num_qp_irq;
36 		}
37 
38 		nic_dev->num_qp_irq += resp_irq_num;
39 	} else if (dst_num_qp_irq < nic_dev->num_qp_irq) {
40 		irq_num_gap = nic_dev->num_qp_irq - dst_num_qp_irq;
41 		for (i = 0; i < irq_num_gap; i++) {
42 			idx = (nic_dev->num_qp_irq - i) - 1;
43 			hinic3_free_irq(nic_dev->hwdev,
44 					qps_msix_entries[idx].vector);
45 			qps_msix_entries[idx].vector = 0;
46 			qps_msix_entries[idx].entry = 0;
47 		}
48 		nic_dev->num_qp_irq = dst_num_qp_irq;
49 	}
50 
51 	return nic_dev->num_qp_irq;
52 }
53 
54 static void hinic3_config_num_qps(struct net_device *netdev,
55 				  struct hinic3_dyna_txrxq_params *q_params)
56 {
57 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
58 	u16 alloc_num_irq, cur_num_irq;
59 	u16 dst_num_irq;
60 
61 	if (!test_bit(HINIC3_RSS_ENABLE, &nic_dev->flags))
62 		q_params->num_qps = 1;
63 
64 	if (nic_dev->num_qp_irq >= q_params->num_qps)
65 		goto out;
66 
67 	cur_num_irq = nic_dev->num_qp_irq;
68 
69 	alloc_num_irq = hinic3_qp_irq_change(netdev, q_params->num_qps);
70 	if (alloc_num_irq < q_params->num_qps) {
71 		q_params->num_qps = alloc_num_irq;
72 		netdev_warn(netdev, "Can not get enough irqs, adjust num_qps to %u\n",
73 			    q_params->num_qps);
74 
75 		/* The current irq may be in use, we must keep it */
76 		dst_num_irq = max_t(u16, cur_num_irq, q_params->num_qps);
77 		hinic3_qp_irq_change(netdev, dst_num_irq);
78 	}
79 
80 out:
81 	netdev_dbg(netdev, "No need to change irqs, num_qps is %u\n",
82 		   q_params->num_qps);
83 }
84 
85 static int hinic3_setup_num_qps(struct net_device *netdev)
86 {
87 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
88 
89 	nic_dev->num_qp_irq = 0;
90 
91 	nic_dev->qps_msix_entries = kcalloc(nic_dev->max_qps,
92 					    sizeof(struct msix_entry),
93 					    GFP_KERNEL);
94 	if (!nic_dev->qps_msix_entries)
95 		return -ENOMEM;
96 
97 	hinic3_config_num_qps(netdev, &nic_dev->q_params);
98 
99 	return 0;
100 }
101 
102 static void hinic3_destroy_num_qps(struct net_device *netdev)
103 {
104 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
105 	u16 i;
106 
107 	for (i = 0; i < nic_dev->num_qp_irq; i++)
108 		hinic3_free_irq(nic_dev->hwdev,
109 				nic_dev->qps_msix_entries[i].vector);
110 
111 	kfree(nic_dev->qps_msix_entries);
112 }
113 
114 static int hinic3_alloc_txrxq_resources(struct net_device *netdev,
115 					struct hinic3_dyna_txrxq_params *q_params)
116 {
117 	int err;
118 
119 	q_params->txqs_res = kcalloc(q_params->num_qps,
120 				     sizeof(*q_params->txqs_res), GFP_KERNEL);
121 	if (!q_params->txqs_res)
122 		return -ENOMEM;
123 
124 	q_params->rxqs_res = kcalloc(q_params->num_qps,
125 				     sizeof(*q_params->rxqs_res), GFP_KERNEL);
126 	if (!q_params->rxqs_res) {
127 		err = -ENOMEM;
128 		goto err_free_txqs_res_arr;
129 	}
130 
131 	q_params->irq_cfg = kcalloc(q_params->num_qps,
132 				    sizeof(*q_params->irq_cfg), GFP_KERNEL);
133 	if (!q_params->irq_cfg) {
134 		err = -ENOMEM;
135 		goto err_free_rxqs_res_arr;
136 	}
137 
138 	err = hinic3_alloc_txqs_res(netdev, q_params->num_qps,
139 				    q_params->sq_depth, q_params->txqs_res);
140 	if (err) {
141 		netdev_err(netdev, "Failed to alloc txqs resource\n");
142 		goto err_free_irq_cfg;
143 	}
144 
145 	err = hinic3_alloc_rxqs_res(netdev, q_params->num_qps,
146 				    q_params->rq_depth, q_params->rxqs_res);
147 	if (err) {
148 		netdev_err(netdev, "Failed to alloc rxqs resource\n");
149 		goto err_free_txqs_res;
150 	}
151 
152 	return 0;
153 
154 err_free_txqs_res:
155 	hinic3_free_txqs_res(netdev, q_params->num_qps, q_params->sq_depth,
156 			     q_params->txqs_res);
157 err_free_irq_cfg:
158 	kfree(q_params->irq_cfg);
159 	q_params->irq_cfg = NULL;
160 err_free_rxqs_res_arr:
161 	kfree(q_params->rxqs_res);
162 	q_params->rxqs_res = NULL;
163 err_free_txqs_res_arr:
164 	kfree(q_params->txqs_res);
165 	q_params->txqs_res = NULL;
166 
167 	return err;
168 }
169 
170 static void hinic3_free_txrxq_resources(struct net_device *netdev,
171 					struct hinic3_dyna_txrxq_params *q_params)
172 {
173 	hinic3_free_rxqs_res(netdev, q_params->num_qps, q_params->rq_depth,
174 			     q_params->rxqs_res);
175 	hinic3_free_txqs_res(netdev, q_params->num_qps, q_params->sq_depth,
176 			     q_params->txqs_res);
177 
178 	kfree(q_params->irq_cfg);
179 	q_params->irq_cfg = NULL;
180 
181 	kfree(q_params->rxqs_res);
182 	q_params->rxqs_res = NULL;
183 
184 	kfree(q_params->txqs_res);
185 	q_params->txqs_res = NULL;
186 }
187 
188 static int hinic3_configure_txrxqs(struct net_device *netdev,
189 				   struct hinic3_dyna_txrxq_params *q_params)
190 {
191 	int err;
192 
193 	err = hinic3_configure_txqs(netdev, q_params->num_qps,
194 				    q_params->sq_depth, q_params->txqs_res);
195 	if (err) {
196 		netdev_err(netdev, "Failed to configure txqs\n");
197 		return err;
198 	}
199 
200 	err = hinic3_configure_rxqs(netdev, q_params->num_qps,
201 				    q_params->rq_depth, q_params->rxqs_res);
202 	if (err) {
203 		netdev_err(netdev, "Failed to configure rxqs\n");
204 		return err;
205 	}
206 
207 	return 0;
208 }
209 
210 static int hinic3_configure(struct net_device *netdev)
211 {
212 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
213 	int err;
214 
215 	netdev->min_mtu = HINIC3_MIN_MTU_SIZE;
216 	netdev->max_mtu = HINIC3_MAX_JUMBO_FRAME_SIZE;
217 	err = hinic3_set_port_mtu(netdev, netdev->mtu);
218 	if (err) {
219 		netdev_err(netdev, "Failed to set mtu\n");
220 		return err;
221 	}
222 
223 	/* Ensure DCB is disabled */
224 	hinic3_sync_dcb_state(nic_dev->hwdev, 1, 0);
225 
226 	if (test_bit(HINIC3_RSS_ENABLE, &nic_dev->flags)) {
227 		err = hinic3_rss_init(netdev);
228 		if (err) {
229 			netdev_err(netdev, "Failed to init rss\n");
230 			return err;
231 		}
232 	}
233 
234 	return 0;
235 }
236 
237 static void hinic3_remove_configure(struct net_device *netdev)
238 {
239 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
240 
241 	if (test_bit(HINIC3_RSS_ENABLE, &nic_dev->flags))
242 		hinic3_rss_uninit(netdev);
243 }
244 
245 static int hinic3_alloc_channel_resources(struct net_device *netdev,
246 					  struct hinic3_dyna_qp_params *qp_params,
247 					  struct hinic3_dyna_txrxq_params *trxq_params)
248 {
249 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
250 	int err;
251 
252 	qp_params->num_qps = trxq_params->num_qps;
253 	qp_params->sq_depth = trxq_params->sq_depth;
254 	qp_params->rq_depth = trxq_params->rq_depth;
255 
256 	err = hinic3_alloc_qps(nic_dev, qp_params);
257 	if (err) {
258 		netdev_err(netdev, "Failed to alloc qps\n");
259 		return err;
260 	}
261 
262 	err = hinic3_alloc_txrxq_resources(netdev, trxq_params);
263 	if (err) {
264 		netdev_err(netdev, "Failed to alloc txrxq resources\n");
265 		hinic3_free_qps(nic_dev, qp_params);
266 		return err;
267 	}
268 
269 	return 0;
270 }
271 
272 static void hinic3_free_channel_resources(struct net_device *netdev,
273 					  struct hinic3_dyna_qp_params *qp_params,
274 					  struct hinic3_dyna_txrxq_params *trxq_params)
275 {
276 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
277 
278 	hinic3_free_txrxq_resources(netdev, trxq_params);
279 	hinic3_free_qps(nic_dev, qp_params);
280 }
281 
282 static int hinic3_open_channel(struct net_device *netdev)
283 {
284 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
285 	int err;
286 
287 	err = hinic3_init_qp_ctxts(nic_dev);
288 	if (err) {
289 		netdev_err(netdev, "Failed to init qps\n");
290 		return err;
291 	}
292 
293 	err = hinic3_configure_txrxqs(netdev, &nic_dev->q_params);
294 	if (err) {
295 		netdev_err(netdev, "Failed to configure txrxqs\n");
296 		goto err_free_qp_ctxts;
297 	}
298 
299 	err = hinic3_qps_irq_init(netdev);
300 	if (err) {
301 		netdev_err(netdev, "Failed to init txrxq irq\n");
302 		goto err_free_qp_ctxts;
303 	}
304 
305 	err = hinic3_configure(netdev);
306 	if (err) {
307 		netdev_err(netdev, "Failed to init txrxq irq\n");
308 		goto err_uninit_qps_irq;
309 	}
310 
311 	return 0;
312 
313 err_uninit_qps_irq:
314 	hinic3_qps_irq_uninit(netdev);
315 err_free_qp_ctxts:
316 	hinic3_free_qp_ctxts(nic_dev);
317 
318 	return err;
319 }
320 
321 static void hinic3_close_channel(struct net_device *netdev)
322 {
323 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
324 
325 	hinic3_remove_configure(netdev);
326 	hinic3_qps_irq_uninit(netdev);
327 	hinic3_free_qp_ctxts(nic_dev);
328 }
329 
330 static int hinic3_vport_up(struct net_device *netdev)
331 {
332 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
333 	bool link_status_up;
334 	u16 glb_func_id;
335 	int err;
336 
337 	glb_func_id = hinic3_global_func_id(nic_dev->hwdev);
338 	err = hinic3_set_vport_enable(nic_dev->hwdev, glb_func_id, true);
339 	if (err) {
340 		netdev_err(netdev, "Failed to enable vport\n");
341 		goto err_flush_qps_res;
342 	}
343 
344 	err = netif_set_real_num_queues(netdev, nic_dev->q_params.num_qps,
345 					nic_dev->q_params.num_qps);
346 	if (err) {
347 		netdev_err(netdev, "Failed to set real number of queues\n");
348 		goto err_flush_qps_res;
349 	}
350 	netif_tx_start_all_queues(netdev);
351 
352 	err = hinic3_get_link_status(nic_dev->hwdev, &link_status_up);
353 	if (!err && link_status_up)
354 		netif_carrier_on(netdev);
355 
356 	return 0;
357 
358 err_flush_qps_res:
359 	hinic3_flush_qps_res(nic_dev->hwdev);
360 	/* wait to guarantee that no packets will be sent to host */
361 	msleep(100);
362 
363 	return err;
364 }
365 
366 static void hinic3_vport_down(struct net_device *netdev)
367 {
368 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
369 	u16 glb_func_id;
370 
371 	netif_carrier_off(netdev);
372 	netif_tx_disable(netdev);
373 
374 	glb_func_id = hinic3_global_func_id(nic_dev->hwdev);
375 	hinic3_set_vport_enable(nic_dev->hwdev, glb_func_id, false);
376 
377 	hinic3_flush_txqs(netdev);
378 	/* wait to guarantee that no packets will be sent to host */
379 	msleep(100);
380 	hinic3_flush_qps_res(nic_dev->hwdev);
381 }
382 
383 static int hinic3_open(struct net_device *netdev)
384 {
385 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
386 	struct hinic3_dyna_qp_params qp_params;
387 	int err;
388 
389 	err = hinic3_init_nicio_res(nic_dev);
390 	if (err) {
391 		netdev_err(netdev, "Failed to init nicio resources\n");
392 		return err;
393 	}
394 
395 	err = hinic3_setup_num_qps(netdev);
396 	if (err) {
397 		netdev_err(netdev, "Failed to setup num_qps\n");
398 		goto err_free_nicio_res;
399 	}
400 
401 	err = hinic3_alloc_channel_resources(netdev, &qp_params,
402 					     &nic_dev->q_params);
403 	if (err)
404 		goto err_destroy_num_qps;
405 
406 	hinic3_init_qps(nic_dev, &qp_params);
407 
408 	err = hinic3_open_channel(netdev);
409 	if (err)
410 		goto err_uninit_qps;
411 
412 	err = hinic3_vport_up(netdev);
413 	if (err)
414 		goto err_close_channel;
415 
416 	return 0;
417 
418 err_close_channel:
419 	hinic3_close_channel(netdev);
420 err_uninit_qps:
421 	hinic3_uninit_qps(nic_dev, &qp_params);
422 	hinic3_free_channel_resources(netdev, &qp_params, &nic_dev->q_params);
423 err_destroy_num_qps:
424 	hinic3_destroy_num_qps(netdev);
425 err_free_nicio_res:
426 	hinic3_free_nicio_res(nic_dev);
427 
428 	return err;
429 }
430 
431 static int hinic3_close(struct net_device *netdev)
432 {
433 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
434 	struct hinic3_dyna_qp_params qp_params;
435 
436 	hinic3_vport_down(netdev);
437 	hinic3_close_channel(netdev);
438 	hinic3_uninit_qps(nic_dev, &qp_params);
439 	hinic3_free_channel_resources(netdev, &qp_params, &nic_dev->q_params);
440 
441 	return 0;
442 }
443 
444 static int hinic3_change_mtu(struct net_device *netdev, int new_mtu)
445 {
446 	int err;
447 
448 	err = hinic3_set_port_mtu(netdev, new_mtu);
449 	if (err) {
450 		netdev_err(netdev, "Failed to change port mtu to %d\n",
451 			   new_mtu);
452 		return err;
453 	}
454 
455 	netdev_dbg(netdev, "Change mtu from %u to %d\n", netdev->mtu, new_mtu);
456 	WRITE_ONCE(netdev->mtu, new_mtu);
457 
458 	return 0;
459 }
460 
461 static int hinic3_set_mac_addr(struct net_device *netdev, void *addr)
462 {
463 	struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
464 	struct sockaddr *saddr = addr;
465 	int err;
466 
467 	if (!is_valid_ether_addr(saddr->sa_data))
468 		return -EADDRNOTAVAIL;
469 
470 	if (ether_addr_equal(netdev->dev_addr, saddr->sa_data))
471 		return 0;
472 
473 	err = hinic3_update_mac(nic_dev->hwdev, netdev->dev_addr,
474 				saddr->sa_data, 0,
475 				hinic3_global_func_id(nic_dev->hwdev));
476 
477 	if (err)
478 		return err;
479 
480 	eth_hw_addr_set(netdev, saddr->sa_data);
481 
482 	return 0;
483 }
484 
485 static const struct net_device_ops hinic3_netdev_ops = {
486 	.ndo_open             = hinic3_open,
487 	.ndo_stop             = hinic3_close,
488 	.ndo_change_mtu       = hinic3_change_mtu,
489 	.ndo_set_mac_address  = hinic3_set_mac_addr,
490 	.ndo_start_xmit       = hinic3_xmit_frame,
491 };
492 
493 void hinic3_set_netdev_ops(struct net_device *netdev)
494 {
495 	netdev->netdev_ops = &hinic3_netdev_ops;
496 }
497