xref: /linux/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c (revision b5c6891b2c5b54bf58069966296917da46cda6f2)
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2024 Hisilicon Limited.
3 
4 #include <linux/etherdevice.h>
5 #include <linux/if_vlan.h>
6 #include <linux/netdevice.h>
7 #include <linux/pci.h>
8 #include <linux/phy.h>
9 #include "hbg_common.h"
10 #include "hbg_diagnose.h"
11 #include "hbg_err.h"
12 #include "hbg_ethtool.h"
13 #include "hbg_hw.h"
14 #include "hbg_irq.h"
15 #include "hbg_mdio.h"
16 #include "hbg_txrx.h"
17 #include "hbg_debugfs.h"
18 
19 #define HBG_SUPPORT_FEATURES (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | \
20 			     NETIF_F_RXCSUM)
21 
hbg_all_irq_enable(struct hbg_priv * priv,bool enabled)22 static void hbg_all_irq_enable(struct hbg_priv *priv, bool enabled)
23 {
24 	const struct hbg_irq_info *info;
25 	u32 i;
26 
27 	for (i = 0; i < priv->vectors.info_array_len; i++) {
28 		info = &priv->vectors.info_array[i];
29 		hbg_hw_irq_enable(priv, info->mask, enabled);
30 	}
31 }
32 
hbg_net_open(struct net_device * netdev)33 static int hbg_net_open(struct net_device *netdev)
34 {
35 	struct hbg_priv *priv = netdev_priv(netdev);
36 	int ret;
37 
38 	ret = hbg_txrx_init(priv);
39 	if (ret)
40 		return ret;
41 
42 	hbg_all_irq_enable(priv, true);
43 	hbg_hw_mac_enable(priv, HBG_STATUS_ENABLE);
44 	netif_start_queue(netdev);
45 	hbg_phy_start(priv);
46 
47 	return 0;
48 }
49 
50 /* This function only can be called after hbg_txrx_uninit() */
hbg_hw_txrx_clear(struct hbg_priv * priv)51 static int hbg_hw_txrx_clear(struct hbg_priv *priv)
52 {
53 	int ret;
54 
55 	/* After ring buffers have been released,
56 	 * do a reset to release hw fifo rx ring buffer
57 	 */
58 	ret = hbg_hw_event_notify(priv, HBG_HW_EVENT_RESET);
59 	if (ret)
60 		return ret;
61 
62 	/* After reset, regs need to be reconfigured */
63 	return hbg_rebuild(priv);
64 }
65 
hbg_net_stop(struct net_device * netdev)66 static int hbg_net_stop(struct net_device *netdev)
67 {
68 	struct hbg_priv *priv = netdev_priv(netdev);
69 
70 	hbg_phy_stop(priv);
71 	netif_stop_queue(netdev);
72 	hbg_hw_mac_enable(priv, HBG_STATUS_DISABLE);
73 	hbg_all_irq_enable(priv, false);
74 	hbg_txrx_uninit(priv);
75 	return hbg_hw_txrx_clear(priv);
76 }
77 
hbg_update_promisc_mode(struct net_device * netdev,bool overflow)78 static void hbg_update_promisc_mode(struct net_device *netdev, bool overflow)
79 {
80 	struct hbg_priv *priv = netdev_priv(netdev);
81 
82 	/* Only when not table_overflow, and netdev->flags not set IFF_PROMISC,
83 	 * The MAC filter will be enabled.
84 	 * Otherwise the filter will be disabled.
85 	 */
86 	priv->filter.enabled = !(overflow || (netdev->flags & IFF_PROMISC));
87 	hbg_hw_set_mac_filter_enable(priv, priv->filter.enabled);
88 }
89 
hbg_set_mac_to_mac_table(struct hbg_priv * priv,u32 index,const u8 * addr)90 static void hbg_set_mac_to_mac_table(struct hbg_priv *priv,
91 				     u32 index, const u8 *addr)
92 {
93 	if (addr) {
94 		ether_addr_copy(priv->filter.mac_table[index].addr, addr);
95 		hbg_hw_set_uc_addr(priv, ether_addr_to_u64(addr), index);
96 	} else {
97 		eth_zero_addr(priv->filter.mac_table[index].addr);
98 		hbg_hw_set_uc_addr(priv, 0, index);
99 	}
100 }
101 
hbg_get_index_from_mac_table(struct hbg_priv * priv,const u8 * addr,u32 * index)102 static int hbg_get_index_from_mac_table(struct hbg_priv *priv,
103 					const u8 *addr, u32 *index)
104 {
105 	u32 i;
106 
107 	for (i = 0; i < priv->filter.table_max_len; i++)
108 		if (ether_addr_equal(priv->filter.mac_table[i].addr, addr)) {
109 			*index = i;
110 			return 0;
111 		}
112 
113 	return -EINVAL;
114 }
115 
hbg_add_mac_to_filter(struct hbg_priv * priv,const u8 * addr)116 static int hbg_add_mac_to_filter(struct hbg_priv *priv, const u8 *addr)
117 {
118 	u32 index;
119 
120 	/* already exists */
121 	if (!hbg_get_index_from_mac_table(priv, addr, &index))
122 		return 0;
123 
124 	for (index = 0; index < priv->filter.table_max_len; index++)
125 		if (is_zero_ether_addr(priv->filter.mac_table[index].addr)) {
126 			hbg_set_mac_to_mac_table(priv, index, addr);
127 			return 0;
128 		}
129 
130 	return -ENOSPC;
131 }
132 
hbg_del_mac_from_filter(struct hbg_priv * priv,const u8 * addr)133 static void hbg_del_mac_from_filter(struct hbg_priv *priv, const u8 *addr)
134 {
135 	u32 index;
136 
137 	/* not exists */
138 	if (hbg_get_index_from_mac_table(priv, addr, &index))
139 		return;
140 
141 	hbg_set_mac_to_mac_table(priv, index, NULL);
142 }
143 
hbg_uc_sync(struct net_device * netdev,const unsigned char * addr)144 static int hbg_uc_sync(struct net_device *netdev, const unsigned char *addr)
145 {
146 	struct hbg_priv *priv = netdev_priv(netdev);
147 
148 	return hbg_add_mac_to_filter(priv, addr);
149 }
150 
hbg_uc_unsync(struct net_device * netdev,const unsigned char * addr)151 static int hbg_uc_unsync(struct net_device *netdev, const unsigned char *addr)
152 {
153 	struct hbg_priv *priv = netdev_priv(netdev);
154 
155 	if (ether_addr_equal(netdev->dev_addr, (u8 *)addr))
156 		return 0;
157 
158 	hbg_del_mac_from_filter(priv, addr);
159 	return 0;
160 }
161 
hbg_net_set_rx_mode(struct net_device * netdev)162 static void hbg_net_set_rx_mode(struct net_device *netdev)
163 {
164 	int ret;
165 
166 	ret = __dev_uc_sync(netdev, hbg_uc_sync, hbg_uc_unsync);
167 
168 	/* If ret != 0, overflow has occurred */
169 	hbg_update_promisc_mode(netdev, !!ret);
170 }
171 
hbg_net_set_mac_address(struct net_device * netdev,void * addr)172 static int hbg_net_set_mac_address(struct net_device *netdev, void *addr)
173 {
174 	struct hbg_priv *priv = netdev_priv(netdev);
175 	u8 *mac_addr;
176 	bool exists;
177 	u32 index;
178 
179 	mac_addr = ((struct sockaddr *)addr)->sa_data;
180 
181 	if (!is_valid_ether_addr(mac_addr))
182 		return -EADDRNOTAVAIL;
183 
184 	/* The index of host mac is always 0.
185 	 * If new mac address already exists,
186 	 * delete the existing mac address and
187 	 * add it to the position with index 0.
188 	 */
189 	exists = !hbg_get_index_from_mac_table(priv, mac_addr, &index);
190 	hbg_set_mac_to_mac_table(priv, 0, mac_addr);
191 	if (exists)
192 		hbg_set_mac_to_mac_table(priv, index, NULL);
193 
194 	hbg_hw_set_rx_pause_mac_addr(priv, ether_addr_to_u64(mac_addr));
195 	dev_addr_set(netdev, mac_addr);
196 	return 0;
197 }
198 
hbg_net_change_mtu(struct net_device * netdev,int new_mtu)199 static int hbg_net_change_mtu(struct net_device *netdev, int new_mtu)
200 {
201 	struct hbg_priv *priv = netdev_priv(netdev);
202 
203 	if (netif_running(netdev))
204 		return -EBUSY;
205 
206 	dev_dbg(&priv->pdev->dev,
207 		"change mtu from %u to %u\n", netdev->mtu, new_mtu);
208 
209 	hbg_hw_set_mtu(priv, new_mtu);
210 	WRITE_ONCE(netdev->mtu, new_mtu);
211 
212 	return 0;
213 }
214 
hbg_net_tx_timeout(struct net_device * netdev,unsigned int txqueue)215 static void hbg_net_tx_timeout(struct net_device *netdev, unsigned int txqueue)
216 {
217 	struct hbg_priv *priv = netdev_priv(netdev);
218 	struct hbg_ring *ring = &priv->tx_ring;
219 	char *buf = ring->tout_log_buf;
220 	u32 pos = 0;
221 
222 	priv->stats.tx_timeout_cnt++;
223 
224 	pos += scnprintf(buf + pos, HBG_TX_TIMEOUT_BUF_LEN - pos,
225 			 "tx_timeout cnt: %llu\n", priv->stats.tx_timeout_cnt);
226 	pos += scnprintf(buf + pos, HBG_TX_TIMEOUT_BUF_LEN - pos,
227 			 "ring used num: %u, fifo used num: %u\n",
228 			 hbg_get_queue_used_num(ring),
229 			 hbg_hw_get_fifo_used_num(priv, HBG_DIR_TX));
230 	pos += scnprintf(buf + pos, HBG_TX_TIMEOUT_BUF_LEN - pos,
231 			 "ntc: %u, ntu: %u, irq enabled: %u\n",
232 			 ring->ntc, ring->ntu,
233 			 hbg_hw_irq_is_enabled(priv, HBG_INT_MSK_TX_B));
234 
235 	netdev_info(netdev, "%s", buf);
236 }
237 
hbg_net_get_stats(struct net_device * netdev,struct rtnl_link_stats64 * stats)238 static void hbg_net_get_stats(struct net_device *netdev,
239 			      struct rtnl_link_stats64 *stats)
240 {
241 	struct hbg_priv *priv = netdev_priv(netdev);
242 	struct hbg_stats *h_stats = &priv->stats;
243 
244 	hbg_update_stats(priv);
245 	dev_get_tstats64(netdev, stats);
246 
247 	/* fifo empty */
248 	stats->tx_fifo_errors += h_stats->tx_drop_cnt;
249 
250 	stats->tx_dropped += h_stats->tx_excessive_length_drop_cnt +
251 			     h_stats->tx_drop_cnt;
252 	stats->tx_errors += h_stats->tx_add_cs_fail_cnt +
253 			    h_stats->tx_bufrl_err_cnt +
254 			    h_stats->tx_underrun_err_cnt +
255 			    h_stats->tx_crc_err_cnt;
256 	stats->rx_errors += h_stats->rx_data_error_cnt;
257 	stats->multicast += h_stats->rx_mc_pkt_cnt;
258 	stats->rx_dropped += h_stats->rx_desc_drop;
259 	stats->rx_length_errors += h_stats->rx_frame_very_long_err_cnt +
260 				   h_stats->rx_frame_long_err_cnt +
261 				   h_stats->rx_frame_runt_err_cnt +
262 				   h_stats->rx_frame_short_err_cnt +
263 				   h_stats->rx_lengthfield_err_cnt;
264 	stats->rx_frame_errors += h_stats->rx_desc_l2_err_cnt +
265 				  h_stats->rx_desc_l3l4_err_cnt;
266 	stats->rx_fifo_errors += h_stats->rx_overflow_cnt +
267 				 h_stats->rx_overrun_cnt;
268 	stats->rx_crc_errors += h_stats->rx_fcs_error_cnt;
269 }
270 
271 static const struct net_device_ops hbg_netdev_ops = {
272 	.ndo_open		= hbg_net_open,
273 	.ndo_stop		= hbg_net_stop,
274 	.ndo_start_xmit		= hbg_net_start_xmit,
275 	.ndo_validate_addr	= eth_validate_addr,
276 	.ndo_set_mac_address	= hbg_net_set_mac_address,
277 	.ndo_change_mtu		= hbg_net_change_mtu,
278 	.ndo_tx_timeout		= hbg_net_tx_timeout,
279 	.ndo_set_rx_mode	= hbg_net_set_rx_mode,
280 	.ndo_get_stats64	= hbg_net_get_stats,
281 	.ndo_eth_ioctl		= phy_do_ioctl_running,
282 };
283 
hbg_service_task(struct work_struct * work)284 static void hbg_service_task(struct work_struct *work)
285 {
286 	struct hbg_priv *priv = container_of(work, struct hbg_priv,
287 					     service_task.work);
288 
289 	if (test_and_clear_bit(HBG_NIC_STATE_NEED_RESET, &priv->state))
290 		hbg_err_reset(priv);
291 
292 	if (test_and_clear_bit(HBG_NIC_STATE_NP_LINK_FAIL, &priv->state))
293 		hbg_fix_np_link_fail(priv);
294 
295 	hbg_diagnose_message_push(priv);
296 
297 	/* The type of statistics register is u32,
298 	 * To prevent the statistics register from overflowing,
299 	 * the driver dumps the statistics every 30 seconds.
300 	 */
301 	if (time_after(jiffies, priv->last_update_stats_time + 30 * HZ)) {
302 		hbg_update_stats(priv);
303 		priv->last_update_stats_time = jiffies;
304 	}
305 
306 	schedule_delayed_work(&priv->service_task,
307 			      msecs_to_jiffies(MSEC_PER_SEC));
308 }
309 
hbg_err_reset_task_schedule(struct hbg_priv * priv)310 void hbg_err_reset_task_schedule(struct hbg_priv *priv)
311 {
312 	set_bit(HBG_NIC_STATE_NEED_RESET, &priv->state);
313 	schedule_delayed_work(&priv->service_task, 0);
314 }
315 
hbg_np_link_fail_task_schedule(struct hbg_priv * priv)316 void hbg_np_link_fail_task_schedule(struct hbg_priv *priv)
317 {
318 	set_bit(HBG_NIC_STATE_NP_LINK_FAIL, &priv->state);
319 	schedule_delayed_work(&priv->service_task, 0);
320 }
321 
hbg_cancel_delayed_work_sync(void * data)322 static void hbg_cancel_delayed_work_sync(void *data)
323 {
324 	cancel_delayed_work_sync(data);
325 }
326 
hbg_delaywork_init(struct hbg_priv * priv)327 static int hbg_delaywork_init(struct hbg_priv *priv)
328 {
329 	INIT_DELAYED_WORK(&priv->service_task, hbg_service_task);
330 	schedule_delayed_work(&priv->service_task, 0);
331 	return devm_add_action_or_reset(&priv->pdev->dev,
332 					hbg_cancel_delayed_work_sync,
333 					&priv->service_task);
334 }
335 
hbg_mac_filter_init(struct hbg_priv * priv)336 static int hbg_mac_filter_init(struct hbg_priv *priv)
337 {
338 	struct hbg_dev_specs *dev_specs = &priv->dev_specs;
339 	struct hbg_mac_filter *filter = &priv->filter;
340 	struct hbg_mac_table_entry *tmp_table;
341 
342 	tmp_table = devm_kcalloc(&priv->pdev->dev, dev_specs->uc_mac_num,
343 				 sizeof(*tmp_table), GFP_KERNEL);
344 	if (!tmp_table)
345 		return -ENOMEM;
346 
347 	filter->mac_table = tmp_table;
348 	filter->table_max_len = dev_specs->uc_mac_num;
349 	filter->enabled = true;
350 
351 	hbg_hw_set_mac_filter_enable(priv, filter->enabled);
352 	return 0;
353 }
354 
hbg_init_user_def(struct hbg_priv * priv)355 static void hbg_init_user_def(struct hbg_priv *priv)
356 {
357 	struct ethtool_pauseparam *pause_param = &priv->user_def.pause_param;
358 
359 	priv->mac.pause_autoneg = HBG_STATUS_ENABLE;
360 
361 	pause_param->autoneg = priv->mac.pause_autoneg;
362 	hbg_hw_get_pause_enable(priv, &pause_param->tx_pause,
363 				&pause_param->rx_pause);
364 }
365 
hbg_init(struct hbg_priv * priv)366 static int hbg_init(struct hbg_priv *priv)
367 {
368 	int ret;
369 
370 	ret = hbg_hw_event_notify(priv, HBG_HW_EVENT_INIT);
371 	if (ret)
372 		return ret;
373 
374 	ret = hbg_hw_init(priv);
375 	if (ret)
376 		return ret;
377 
378 	ret = hbg_irq_init(priv);
379 	if (ret)
380 		return ret;
381 
382 	ret = hbg_mdio_init(priv);
383 	if (ret)
384 		return ret;
385 
386 	ret = hbg_mac_filter_init(priv);
387 	if (ret)
388 		return ret;
389 
390 	ret = hbg_delaywork_init(priv);
391 	if (ret)
392 		return ret;
393 
394 	hbg_debugfs_init(priv);
395 	hbg_init_user_def(priv);
396 	return 0;
397 }
398 
hbg_pci_init(struct pci_dev * pdev)399 static int hbg_pci_init(struct pci_dev *pdev)
400 {
401 	struct net_device *netdev = pci_get_drvdata(pdev);
402 	struct hbg_priv *priv = netdev_priv(netdev);
403 	struct device *dev = &pdev->dev;
404 	int ret;
405 
406 	ret = pcim_enable_device(pdev);
407 	if (ret)
408 		return dev_err_probe(dev, ret, "failed to enable PCI device\n");
409 
410 	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
411 	if (ret)
412 		return dev_err_probe(dev, ret, "failed to set PCI DMA mask\n");
413 
414 	ret = pcim_iomap_regions(pdev, BIT(0), dev_driver_string(dev));
415 	if (ret)
416 		return dev_err_probe(dev, ret, "failed to map PCI bar space\n");
417 
418 	priv->io_base = pcim_iomap_table(pdev)[0];
419 	if (!priv->io_base)
420 		return dev_err_probe(dev, -ENOMEM, "failed to get io base\n");
421 
422 	pci_set_master(pdev);
423 	return 0;
424 }
425 
hbg_probe(struct pci_dev * pdev,const struct pci_device_id * ent)426 static int hbg_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
427 {
428 	struct device *dev = &pdev->dev;
429 	struct net_device *netdev;
430 	struct hbg_priv *priv;
431 	int ret;
432 
433 	netdev = devm_alloc_etherdev(dev, sizeof(struct hbg_priv));
434 	if (!netdev)
435 		return -ENOMEM;
436 
437 	pci_set_drvdata(pdev, netdev);
438 	SET_NETDEV_DEV(netdev, dev);
439 
440 	priv = netdev_priv(netdev);
441 	priv->netdev = netdev;
442 	priv->pdev = pdev;
443 
444 	ret = hbg_pci_init(pdev);
445 	if (ret)
446 		return ret;
447 
448 	ret = hbg_init(priv);
449 	if (ret)
450 		return ret;
451 
452 	/* set default features */
453 	netdev->features |= HBG_SUPPORT_FEATURES;
454 	netdev->hw_features |= HBG_SUPPORT_FEATURES;
455 	netdev->priv_flags |= IFF_UNICAST_FLT;
456 
457 	netdev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
458 	netdev->max_mtu = priv->dev_specs.max_mtu;
459 	netdev->min_mtu = priv->dev_specs.min_mtu;
460 	netdev->netdev_ops = &hbg_netdev_ops;
461 	netdev->watchdog_timeo = 5 * HZ;
462 
463 	hbg_hw_set_mtu(priv, ETH_DATA_LEN);
464 	hbg_net_set_mac_address(priv->netdev, &priv->dev_specs.mac_addr);
465 	hbg_ethtool_set_ops(netdev);
466 
467 	ret = devm_register_netdev(dev, netdev);
468 	if (ret)
469 		return dev_err_probe(dev, ret, "failed to register netdev\n");
470 
471 	netif_carrier_off(netdev);
472 	return 0;
473 }
474 
475 static const struct pci_device_id hbg_pci_tbl[] = {
476 	{PCI_VDEVICE(HUAWEI, 0x3730), 0},
477 	{ }
478 };
479 MODULE_DEVICE_TABLE(pci, hbg_pci_tbl);
480 
481 static struct pci_driver hbg_driver = {
482 	.name		= "hibmcge",
483 	.id_table	= hbg_pci_tbl,
484 	.probe		= hbg_probe,
485 };
486 
hbg_module_init(void)487 static int __init hbg_module_init(void)
488 {
489 	int ret;
490 
491 	hbg_debugfs_register();
492 	hbg_set_pci_err_handler(&hbg_driver);
493 	ret = pci_register_driver(&hbg_driver);
494 	if (ret)
495 		hbg_debugfs_unregister();
496 
497 	return ret;
498 }
499 module_init(hbg_module_init);
500 
hbg_module_exit(void)501 static void __exit hbg_module_exit(void)
502 {
503 	pci_unregister_driver(&hbg_driver);
504 	hbg_debugfs_unregister();
505 }
506 module_exit(hbg_module_exit);
507 
508 MODULE_LICENSE("GPL");
509 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
510 MODULE_DESCRIPTION("hibmcge driver");
511 MODULE_VERSION("1.0");
512