xref: /linux/drivers/net/ethernet/amd/xgbe/xgbe-main.c (revision abacaf559950eec0d99d37ff6b92049409af5943)
1 // SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
2 /*
3  * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
4  * Copyright (c) 2014, Synopsys, Inc.
5  * All rights reserved
6  */
7 
8 #include <linux/module.h>
9 #include <linux/device.h>
10 #include <linux/spinlock.h>
11 #include <linux/netdevice.h>
12 #include <linux/etherdevice.h>
13 #include <linux/io.h>
14 #include <linux/notifier.h>
15 
16 #include "xgbe.h"
17 #include "xgbe-common.h"
18 
19 MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
20 MODULE_LICENSE("Dual BSD/GPL");
21 MODULE_DESCRIPTION(XGBE_DRV_DESC);
22 
23 static int debug = -1;
24 module_param(debug, int, 0644);
25 MODULE_PARM_DESC(debug, " Network interface message level setting");
26 
27 static const u32 default_msg_level = (NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
28 				      NETIF_MSG_IFUP);
29 
xgbe_default_config(struct xgbe_prv_data * pdata)30 static void xgbe_default_config(struct xgbe_prv_data *pdata)
31 {
32 	DBGPR("-->xgbe_default_config\n");
33 
34 	pdata->blen = DMA_SBMR_BLEN_64;
35 	pdata->pbl = DMA_PBL_128;
36 	pdata->aal = 1;
37 	pdata->rd_osr_limit = 8;
38 	pdata->wr_osr_limit = 8;
39 	pdata->tx_sf_mode = MTL_TSF_ENABLE;
40 	pdata->tx_threshold = MTL_TX_THRESHOLD_64;
41 	pdata->tx_osp_mode = DMA_OSP_ENABLE;
42 	pdata->rx_sf_mode = MTL_RSF_DISABLE;
43 	pdata->rx_threshold = MTL_RX_THRESHOLD_64;
44 	pdata->pause_autoneg = 1;
45 	pdata->tx_pause = 1;
46 	pdata->rx_pause = 1;
47 	pdata->phy_speed = SPEED_UNKNOWN;
48 	pdata->power_down = 0;
49 
50 	DBGPR("<--xgbe_default_config\n");
51 }
52 
xgbe_init_all_fptrs(struct xgbe_prv_data * pdata)53 static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
54 {
55 	xgbe_init_function_ptrs_dev(&pdata->hw_if);
56 	xgbe_init_function_ptrs_phy(&pdata->phy_if);
57 	xgbe_init_function_ptrs_i2c(&pdata->i2c_if);
58 	xgbe_init_function_ptrs_desc(&pdata->desc_if);
59 
60 	pdata->vdata->init_function_ptrs_phy_impl(&pdata->phy_if);
61 }
62 
xgbe_alloc_pdata(struct device * dev)63 struct xgbe_prv_data *xgbe_alloc_pdata(struct device *dev)
64 {
65 	struct xgbe_prv_data *pdata;
66 	struct net_device *netdev;
67 
68 	netdev = alloc_etherdev_mq(sizeof(struct xgbe_prv_data),
69 				   XGBE_MAX_DMA_CHANNELS);
70 	if (!netdev) {
71 		dev_err(dev, "alloc_etherdev_mq failed\n");
72 		return ERR_PTR(-ENOMEM);
73 	}
74 	SET_NETDEV_DEV(netdev, dev);
75 	pdata = netdev_priv(netdev);
76 	pdata->netdev = netdev;
77 	pdata->dev = dev;
78 
79 	spin_lock_init(&pdata->xpcs_lock);
80 	mutex_init(&pdata->rss_mutex);
81 	spin_lock_init(&pdata->tstamp_lock);
82 	mutex_init(&pdata->i2c_mutex);
83 	init_completion(&pdata->i2c_complete);
84 	init_completion(&pdata->mdio_complete);
85 
86 	pdata->msg_enable = netif_msg_init(debug, default_msg_level);
87 
88 	set_bit(XGBE_DOWN, &pdata->dev_state);
89 	set_bit(XGBE_STOPPED, &pdata->dev_state);
90 
91 	return pdata;
92 }
93 
xgbe_free_pdata(struct xgbe_prv_data * pdata)94 void xgbe_free_pdata(struct xgbe_prv_data *pdata)
95 {
96 	struct net_device *netdev = pdata->netdev;
97 
98 	free_netdev(netdev);
99 }
100 
xgbe_set_counts(struct xgbe_prv_data * pdata)101 void xgbe_set_counts(struct xgbe_prv_data *pdata)
102 {
103 	/* Set all the function pointers */
104 	xgbe_init_all_fptrs(pdata);
105 
106 	/* Populate the hardware features */
107 	xgbe_get_all_hw_features(pdata);
108 
109 	/* Set default max values if not provided */
110 	if (!pdata->tx_max_channel_count)
111 		pdata->tx_max_channel_count = pdata->hw_feat.tx_ch_cnt;
112 	if (!pdata->rx_max_channel_count)
113 		pdata->rx_max_channel_count = pdata->hw_feat.rx_ch_cnt;
114 
115 	if (!pdata->tx_max_q_count)
116 		pdata->tx_max_q_count = pdata->hw_feat.tx_q_cnt;
117 	if (!pdata->rx_max_q_count)
118 		pdata->rx_max_q_count = pdata->hw_feat.rx_q_cnt;
119 
120 	/* Calculate the number of Tx and Rx rings to be created
121 	 *  -Tx (DMA) Channels map 1-to-1 to Tx Queues so set
122 	 *   the number of Tx queues to the number of Tx channels
123 	 *   enabled
124 	 *  -Rx (DMA) Channels do not map 1-to-1 so use the actual
125 	 *   number of Rx queues or maximum allowed
126 	 */
127 	pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(),
128 				     pdata->hw_feat.tx_ch_cnt);
129 	pdata->tx_ring_count = min_t(unsigned int, pdata->tx_ring_count,
130 				     pdata->tx_max_channel_count);
131 	pdata->tx_ring_count = min_t(unsigned int, pdata->tx_ring_count,
132 				     pdata->tx_max_q_count);
133 
134 	pdata->tx_q_count = pdata->tx_ring_count;
135 
136 	pdata->rx_ring_count = min_t(unsigned int, num_online_cpus(),
137 				     pdata->hw_feat.rx_ch_cnt);
138 	pdata->rx_ring_count = min_t(unsigned int, pdata->rx_ring_count,
139 				     pdata->rx_max_channel_count);
140 
141 	pdata->rx_q_count = min_t(unsigned int, pdata->hw_feat.rx_q_cnt,
142 				  pdata->rx_max_q_count);
143 
144 	if (netif_msg_probe(pdata)) {
145 		dev_dbg(pdata->dev, "TX/RX DMA channel count = %u/%u\n",
146 			pdata->tx_ring_count, pdata->rx_ring_count);
147 		dev_dbg(pdata->dev, "TX/RX hardware queue count = %u/%u\n",
148 			pdata->tx_q_count, pdata->rx_q_count);
149 	}
150 }
151 
xgbe_config_netdev(struct xgbe_prv_data * pdata)152 int xgbe_config_netdev(struct xgbe_prv_data *pdata)
153 {
154 	struct net_device *netdev = pdata->netdev;
155 	struct device *dev = pdata->dev;
156 	int ret;
157 
158 	netdev->irq = pdata->dev_irq;
159 	netdev->base_addr = (unsigned long)pdata->xgmac_regs;
160 	eth_hw_addr_set(netdev, pdata->mac_addr);
161 
162 	/* Initialize ECC timestamps */
163 	pdata->tx_sec_period = jiffies;
164 	pdata->tx_ded_period = jiffies;
165 	pdata->rx_sec_period = jiffies;
166 	pdata->rx_ded_period = jiffies;
167 	pdata->desc_sec_period = jiffies;
168 	pdata->desc_ded_period = jiffies;
169 
170 	/* Issue software reset to device */
171 	ret = pdata->hw_if.exit(pdata);
172 	if (ret) {
173 		dev_err(dev, "software reset failed\n");
174 		return ret;
175 	}
176 
177 	/* Set default configuration data */
178 	xgbe_default_config(pdata);
179 
180 	/* Set the DMA mask */
181 	ret = dma_set_mask_and_coherent(dev,
182 					DMA_BIT_MASK(pdata->hw_feat.dma_width));
183 	if (ret) {
184 		dev_err(dev, "dma_set_mask_and_coherent failed\n");
185 		return ret;
186 	}
187 
188 	/* Set default max values if not provided */
189 	if (!pdata->tx_max_fifo_size)
190 		pdata->tx_max_fifo_size = pdata->hw_feat.tx_fifo_size;
191 	if (!pdata->rx_max_fifo_size)
192 		pdata->rx_max_fifo_size = pdata->hw_feat.rx_fifo_size;
193 
194 	/* Set and validate the number of descriptors for a ring */
195 	BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_TX_DESC_CNT);
196 	pdata->tx_desc_count = XGBE_TX_DESC_CNT;
197 
198 	BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_RX_DESC_CNT);
199 	pdata->rx_desc_count = XGBE_RX_DESC_CNT;
200 
201 	/* Adjust the number of queues based on interrupts assigned */
202 	if (pdata->channel_irq_count) {
203 		pdata->tx_ring_count = min_t(unsigned int, pdata->tx_ring_count,
204 					     pdata->channel_irq_count);
205 		pdata->rx_ring_count = min_t(unsigned int, pdata->rx_ring_count,
206 					     pdata->channel_irq_count);
207 
208 		if (netif_msg_probe(pdata))
209 			dev_dbg(pdata->dev,
210 				"adjusted TX/RX DMA channel count = %u/%u\n",
211 				pdata->tx_ring_count, pdata->rx_ring_count);
212 	}
213 
214 	/* Initialize RSS hash key */
215 	netdev_rss_key_fill(pdata->rss_key, sizeof(pdata->rss_key));
216 
217 	XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
218 	XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
219 	XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
220 
221 	/* Call MDIO/PHY initialization routine */
222 	pdata->debugfs_an_cdr_workaround = pdata->vdata->an_cdr_workaround;
223 	ret = pdata->phy_if.phy_init(pdata);
224 	if (ret)
225 		return ret;
226 
227 	/* Set device operations */
228 	netdev->netdev_ops = xgbe_get_netdev_ops();
229 	netdev->ethtool_ops = xgbe_get_ethtool_ops();
230 #ifdef CONFIG_AMD_XGBE_DCB
231 	netdev->dcbnl_ops = xgbe_get_dcbnl_ops();
232 #endif
233 
234 	/* Set device features */
235 	netdev->hw_features = NETIF_F_SG |
236 			      NETIF_F_IP_CSUM |
237 			      NETIF_F_IPV6_CSUM |
238 			      NETIF_F_RXCSUM |
239 			      NETIF_F_TSO |
240 			      NETIF_F_TSO6 |
241 			      NETIF_F_GRO |
242 			      NETIF_F_HW_VLAN_CTAG_RX |
243 			      NETIF_F_HW_VLAN_CTAG_TX |
244 			      NETIF_F_HW_VLAN_CTAG_FILTER;
245 
246 	if (pdata->hw_feat.rss)
247 		netdev->hw_features |= NETIF_F_RXHASH;
248 
249 	if (pdata->hw_feat.vxn) {
250 		netdev->hw_enc_features = NETIF_F_SG |
251 					  NETIF_F_IP_CSUM |
252 					  NETIF_F_IPV6_CSUM |
253 					  NETIF_F_RXCSUM |
254 					  NETIF_F_TSO |
255 					  NETIF_F_TSO6 |
256 					  NETIF_F_GRO |
257 					  NETIF_F_GSO_UDP_TUNNEL |
258 					  NETIF_F_GSO_UDP_TUNNEL_CSUM;
259 
260 		netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
261 				       NETIF_F_GSO_UDP_TUNNEL_CSUM;
262 
263 		netdev->udp_tunnel_nic_info = xgbe_get_udp_tunnel_info();
264 	}
265 
266 	netdev->vlan_features |= NETIF_F_SG |
267 				 NETIF_F_IP_CSUM |
268 				 NETIF_F_IPV6_CSUM |
269 				 NETIF_F_TSO |
270 				 NETIF_F_TSO6;
271 
272 	netdev->features |= netdev->hw_features;
273 	pdata->netdev_features = netdev->features;
274 
275 	netdev->priv_flags |= IFF_UNICAST_FLT;
276 	netdev->min_mtu = 0;
277 	netdev->max_mtu = XGMAC_GIANT_PACKET_MTU - XGBE_ETH_FRAME_HDR;
278 
279 	/* Use default watchdog timeout */
280 	netdev->watchdog_timeo = 0;
281 
282 	xgbe_init_rx_coalesce(pdata);
283 	xgbe_init_tx_coalesce(pdata);
284 
285 	netif_carrier_off(netdev);
286 	ret = register_netdev(netdev);
287 	if (ret) {
288 		dev_err(dev, "net device registration failed\n");
289 		return ret;
290 	}
291 
292 	if (IS_REACHABLE(CONFIG_PTP_1588_CLOCK))
293 		xgbe_ptp_register(pdata);
294 
295 	xgbe_debugfs_init(pdata);
296 
297 	netif_dbg(pdata, drv, pdata->netdev, "%u Tx software queues\n",
298 		  pdata->tx_ring_count);
299 	netif_dbg(pdata, drv, pdata->netdev, "%u Rx software queues\n",
300 		  pdata->rx_ring_count);
301 
302 	return 0;
303 }
304 
xgbe_deconfig_netdev(struct xgbe_prv_data * pdata)305 void xgbe_deconfig_netdev(struct xgbe_prv_data *pdata)
306 {
307 	struct net_device *netdev = pdata->netdev;
308 
309 	xgbe_debugfs_exit(pdata);
310 
311 	if (IS_REACHABLE(CONFIG_PTP_1588_CLOCK))
312 		xgbe_ptp_unregister(pdata);
313 
314 	unregister_netdev(netdev);
315 
316 	pdata->phy_if.phy_exit(pdata);
317 }
318 
xgbe_netdev_event(struct notifier_block * nb,unsigned long event,void * data)319 static int xgbe_netdev_event(struct notifier_block *nb, unsigned long event,
320 			     void *data)
321 {
322 	struct net_device *netdev = netdev_notifier_info_to_dev(data);
323 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
324 
325 	if (netdev->netdev_ops != xgbe_get_netdev_ops())
326 		goto out;
327 
328 	switch (event) {
329 	case NETDEV_CHANGENAME:
330 		xgbe_debugfs_rename(pdata);
331 		break;
332 
333 	default:
334 		break;
335 	}
336 
337 out:
338 	return NOTIFY_DONE;
339 }
340 
341 static struct notifier_block xgbe_netdev_notifier = {
342 	.notifier_call = xgbe_netdev_event,
343 };
344 
xgbe_mod_init(void)345 static int __init xgbe_mod_init(void)
346 {
347 	int ret;
348 
349 	ret = register_netdevice_notifier(&xgbe_netdev_notifier);
350 	if (ret)
351 		return ret;
352 
353 	ret = xgbe_platform_init();
354 	if (ret)
355 		goto err_platform_init;
356 
357 	ret = xgbe_pci_init();
358 	if (ret)
359 		goto err_pci_init;
360 
361 	return 0;
362 
363 err_pci_init:
364 	xgbe_platform_exit();
365 err_platform_init:
366 	unregister_netdevice_notifier(&xgbe_netdev_notifier);
367 	return ret;
368 }
369 
xgbe_mod_exit(void)370 static void __exit xgbe_mod_exit(void)
371 {
372 	xgbe_pci_exit();
373 
374 	xgbe_platform_exit();
375 
376 	unregister_netdevice_notifier(&xgbe_netdev_notifier);
377 }
378 
379 module_init(xgbe_mod_init);
380 module_exit(xgbe_mod_exit);
381