xref: /linux/drivers/net/ethernet/amd/xgbe/xgbe-pci.c (revision a0285236ab93fdfdd1008afaa04561d142d6c276)
1 // SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
2 /*
3  * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
4  * Copyright (c) 2014, Synopsys, Inc.
5  * All rights reserved
6  */
7 
8 #include <linux/module.h>
9 #include <linux/device.h>
10 #include <linux/pci.h>
11 #include <linux/log2.h>
12 
13 #include "xgbe.h"
14 #include "xgbe-common.h"
15 
16 static int xgbe_config_multi_msi(struct xgbe_prv_data *pdata)
17 {
18 	unsigned int vector_count;
19 	unsigned int i, j;
20 	int ret;
21 
22 	vector_count = XGBE_MSI_BASE_COUNT;
23 	vector_count += max(pdata->rx_ring_count,
24 			    pdata->tx_ring_count);
25 
26 	ret = pci_alloc_irq_vectors(pdata->pcidev, XGBE_MSI_MIN_COUNT,
27 				    vector_count, PCI_IRQ_MSI | PCI_IRQ_MSIX);
28 	if (ret < 0) {
29 		dev_info(pdata->dev, "multi MSI/MSI-X enablement failed\n");
30 		return ret;
31 	}
32 
33 	pdata->isr_as_bh_work = 1;
34 	pdata->irq_count = ret;
35 
36 	pdata->dev_irq = pci_irq_vector(pdata->pcidev, 0);
37 	pdata->ecc_irq = pci_irq_vector(pdata->pcidev, 1);
38 	pdata->i2c_irq = pci_irq_vector(pdata->pcidev, 2);
39 	pdata->an_irq = pci_irq_vector(pdata->pcidev, 3);
40 
41 	for (i = XGBE_MSI_BASE_COUNT, j = 0; i < ret; i++, j++)
42 		pdata->channel_irq[j] = pci_irq_vector(pdata->pcidev, i);
43 	pdata->channel_irq_count = j;
44 
45 	pdata->per_channel_irq = 1;
46 	pdata->channel_irq_mode = XGBE_IRQ_MODE_LEVEL;
47 
48 	if (netif_msg_probe(pdata))
49 		dev_dbg(pdata->dev, "multi %s interrupts enabled\n",
50 			pdata->pcidev->msix_enabled ? "MSI-X" : "MSI");
51 
52 	return 0;
53 }
54 
55 static int xgbe_config_irqs(struct xgbe_prv_data *pdata)
56 {
57 	int ret;
58 
59 	ret = xgbe_config_multi_msi(pdata);
60 	if (!ret)
61 		goto out;
62 
63 	ret = pci_alloc_irq_vectors(pdata->pcidev, 1, 1,
64 				    PCI_IRQ_INTX | PCI_IRQ_MSI);
65 	if (ret < 0) {
66 		dev_info(pdata->dev, "single IRQ enablement failed\n");
67 		return ret;
68 	}
69 
70 	pdata->isr_as_bh_work = pdata->pcidev->msi_enabled ? 1 : 0;
71 	pdata->irq_count = 1;
72 	pdata->channel_irq_count = 1;
73 
74 	pdata->dev_irq = pci_irq_vector(pdata->pcidev, 0);
75 	pdata->ecc_irq = pci_irq_vector(pdata->pcidev, 0);
76 	pdata->i2c_irq = pci_irq_vector(pdata->pcidev, 0);
77 	pdata->an_irq = pci_irq_vector(pdata->pcidev, 0);
78 
79 	if (netif_msg_probe(pdata))
80 		dev_dbg(pdata->dev, "single %s interrupt enabled\n",
81 			pdata->pcidev->msi_enabled ?  "MSI" : "legacy");
82 
83 out:
84 	if (netif_msg_probe(pdata)) {
85 		unsigned int i;
86 
87 		dev_dbg(pdata->dev, " dev irq=%d\n", pdata->dev_irq);
88 		dev_dbg(pdata->dev, " ecc irq=%d\n", pdata->ecc_irq);
89 		dev_dbg(pdata->dev, " i2c irq=%d\n", pdata->i2c_irq);
90 		dev_dbg(pdata->dev, "  an irq=%d\n", pdata->an_irq);
91 		for (i = 0; i < pdata->channel_irq_count; i++)
92 			dev_dbg(pdata->dev, " dma%u irq=%d\n",
93 				i, pdata->channel_irq[i]);
94 	}
95 
96 	return 0;
97 }
98 
99 static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
100 {
101 	struct xgbe_prv_data *pdata;
102 	struct device *dev = &pdev->dev;
103 	void __iomem * const *iomap_table;
104 	struct pci_dev *rdev;
105 	unsigned int ma_lo, ma_hi;
106 	unsigned int reg;
107 	int bar_mask;
108 	int ret;
109 
110 	pdata = xgbe_alloc_pdata(dev);
111 	if (IS_ERR(pdata)) {
112 		ret = PTR_ERR(pdata);
113 		goto err_alloc;
114 	}
115 
116 	pdata->pcidev = pdev;
117 	pci_set_drvdata(pdev, pdata);
118 
119 	/* Get the version data */
120 	pdata->vdata = (struct xgbe_version_data *)id->driver_data;
121 
122 	ret = pcim_enable_device(pdev);
123 	if (ret) {
124 		dev_err(dev, "pcim_enable_device failed\n");
125 		goto err_pci_enable;
126 	}
127 
128 	/* Obtain the mmio areas for the device */
129 	bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
130 	ret = pcim_iomap_regions(pdev, bar_mask, XGBE_DRV_NAME);
131 	if (ret) {
132 		dev_err(dev, "pcim_iomap_regions failed\n");
133 		goto err_pci_enable;
134 	}
135 
136 	iomap_table = pcim_iomap_table(pdev);
137 	if (!iomap_table) {
138 		dev_err(dev, "pcim_iomap_table failed\n");
139 		ret = -ENOMEM;
140 		goto err_pci_enable;
141 	}
142 
143 	pdata->xgmac_regs = iomap_table[XGBE_XGMAC_BAR];
144 	if (!pdata->xgmac_regs) {
145 		dev_err(dev, "xgmac ioremap failed\n");
146 		ret = -ENOMEM;
147 		goto err_pci_enable;
148 	}
149 	pdata->xprop_regs = pdata->xgmac_regs + XGBE_MAC_PROP_OFFSET;
150 	pdata->xi2c_regs = pdata->xgmac_regs + XGBE_I2C_CTRL_OFFSET;
151 	if (netif_msg_probe(pdata)) {
152 		dev_dbg(dev, "xgmac_regs = %p\n", pdata->xgmac_regs);
153 		dev_dbg(dev, "xprop_regs = %p\n", pdata->xprop_regs);
154 		dev_dbg(dev, "xi2c_regs  = %p\n", pdata->xi2c_regs);
155 	}
156 
157 	pdata->xpcs_regs = iomap_table[XGBE_XPCS_BAR];
158 	if (!pdata->xpcs_regs) {
159 		dev_err(dev, "xpcs ioremap failed\n");
160 		ret = -ENOMEM;
161 		goto err_pci_enable;
162 	}
163 	if (netif_msg_probe(pdata))
164 		dev_dbg(dev, "xpcs_regs  = %p\n", pdata->xpcs_regs);
165 
166 	/* Set the PCS indirect addressing definition registers */
167 	rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
168 	if (rdev &&
169 	    (rdev->vendor == PCI_VENDOR_ID_AMD) && (rdev->device == 0x15d0)) {
170 		pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF;
171 		pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT;
172 	} else if (rdev && (rdev->vendor == PCI_VENDOR_ID_AMD) &&
173 		   (rdev->device == 0x14b5)) {
174 		pdata->xpcs_window_def_reg = PCS_V2_YC_WINDOW_DEF;
175 		pdata->xpcs_window_sel_reg = PCS_V2_YC_WINDOW_SELECT;
176 
177 		/* Yellow Carp devices do not need cdr workaround */
178 		pdata->vdata->an_cdr_workaround = 0;
179 
180 		/* Yellow Carp devices do not need rrc */
181 		pdata->vdata->enable_rrc = 0;
182 	} else {
183 		pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
184 		pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
185 	}
186 	pci_dev_put(rdev);
187 
188 	/* Configure the PCS indirect addressing support */
189 	reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg);
190 	pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET);
191 	pdata->xpcs_window <<= 6;
192 	pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE);
193 	pdata->xpcs_window_size = 1 << (pdata->xpcs_window_size + 7);
194 	pdata->xpcs_window_mask = pdata->xpcs_window_size - 1;
195 	if (netif_msg_probe(pdata)) {
196 		dev_dbg(dev, "xpcs window def  = %#010x\n",
197 			pdata->xpcs_window_def_reg);
198 		dev_dbg(dev, "xpcs window sel  = %#010x\n",
199 			pdata->xpcs_window_sel_reg);
200 		dev_dbg(dev, "xpcs window      = %#010x\n",
201 			pdata->xpcs_window);
202 		dev_dbg(dev, "xpcs window size = %#010x\n",
203 			pdata->xpcs_window_size);
204 		dev_dbg(dev, "xpcs window mask = %#010x\n",
205 			pdata->xpcs_window_mask);
206 	}
207 
208 	pci_set_master(pdev);
209 
210 	/* Enable all interrupts in the hardware */
211 	XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff);
212 
213 	/* Retrieve the MAC address */
214 	ma_lo = XP_IOREAD(pdata, XP_MAC_ADDR_LO);
215 	ma_hi = XP_IOREAD(pdata, XP_MAC_ADDR_HI);
216 	pdata->mac_addr[0] = ma_lo & 0xff;
217 	pdata->mac_addr[1] = (ma_lo >> 8) & 0xff;
218 	pdata->mac_addr[2] = (ma_lo >> 16) & 0xff;
219 	pdata->mac_addr[3] = (ma_lo >> 24) & 0xff;
220 	pdata->mac_addr[4] = ma_hi & 0xff;
221 	pdata->mac_addr[5] = (ma_hi >> 8) & 0xff;
222 	if (!XP_GET_BITS(ma_hi, XP_MAC_ADDR_HI, VALID) ||
223 	    !is_valid_ether_addr(pdata->mac_addr)) {
224 		dev_err(dev, "invalid mac address\n");
225 		ret = -EINVAL;
226 		goto err_pci_enable;
227 	}
228 
229 	/* Clock settings */
230 	pdata->sysclk_rate = XGBE_V2_DMA_CLOCK_FREQ;
231 	pdata->ptpclk_rate = XGBE_V2_PTP_CLOCK_FREQ;
232 
233 	/* Set the DMA coherency values */
234 	pdata->coherent = 1;
235 	pdata->arcr = XGBE_DMA_PCI_ARCR;
236 	pdata->awcr = XGBE_DMA_PCI_AWCR;
237 	pdata->awarcr = XGBE_DMA_PCI_AWARCR;
238 
239 	/* Read the port property registers */
240 	pdata->pp0 = XP_IOREAD(pdata, XP_PROP_0);
241 	pdata->pp1 = XP_IOREAD(pdata, XP_PROP_1);
242 	pdata->pp2 = XP_IOREAD(pdata, XP_PROP_2);
243 	pdata->pp3 = XP_IOREAD(pdata, XP_PROP_3);
244 	pdata->pp4 = XP_IOREAD(pdata, XP_PROP_4);
245 	if (netif_msg_probe(pdata)) {
246 		dev_dbg(dev, "port property 0 = %#010x\n", pdata->pp0);
247 		dev_dbg(dev, "port property 1 = %#010x\n", pdata->pp1);
248 		dev_dbg(dev, "port property 2 = %#010x\n", pdata->pp2);
249 		dev_dbg(dev, "port property 3 = %#010x\n", pdata->pp3);
250 		dev_dbg(dev, "port property 4 = %#010x\n", pdata->pp4);
251 	}
252 
253 	/* Set the maximum channels and queues */
254 	pdata->tx_max_channel_count = XP_GET_BITS(pdata->pp1, XP_PROP_1,
255 						  MAX_TX_DMA);
256 	pdata->rx_max_channel_count = XP_GET_BITS(pdata->pp1, XP_PROP_1,
257 						  MAX_RX_DMA);
258 	pdata->tx_max_q_count = XP_GET_BITS(pdata->pp1, XP_PROP_1,
259 					    MAX_TX_QUEUES);
260 	pdata->rx_max_q_count = XP_GET_BITS(pdata->pp1, XP_PROP_1,
261 					    MAX_RX_QUEUES);
262 	if (netif_msg_probe(pdata)) {
263 		dev_dbg(dev, "max tx/rx channel count = %u/%u\n",
264 			pdata->tx_max_channel_count,
265 			pdata->rx_max_channel_count);
266 		dev_dbg(dev, "max tx/rx hw queue count = %u/%u\n",
267 			pdata->tx_max_q_count, pdata->rx_max_q_count);
268 	}
269 
270 	/* Set the hardware channel and queue counts */
271 	xgbe_set_counts(pdata);
272 
273 	/* Set the maximum fifo amounts */
274 	pdata->tx_max_fifo_size = XP_GET_BITS(pdata->pp2, XP_PROP_2,
275 					      TX_FIFO_SIZE);
276 	pdata->tx_max_fifo_size *= 16384;
277 	pdata->tx_max_fifo_size = min(pdata->tx_max_fifo_size,
278 				      pdata->vdata->tx_max_fifo_size);
279 	pdata->rx_max_fifo_size = XP_GET_BITS(pdata->pp2, XP_PROP_2,
280 					      RX_FIFO_SIZE);
281 	pdata->rx_max_fifo_size *= 16384;
282 	pdata->rx_max_fifo_size = min(pdata->rx_max_fifo_size,
283 				      pdata->vdata->rx_max_fifo_size);
284 	if (netif_msg_probe(pdata))
285 		dev_dbg(dev, "max tx/rx max fifo size = %u/%u\n",
286 			pdata->tx_max_fifo_size, pdata->rx_max_fifo_size);
287 
288 	/* Configure interrupt support */
289 	ret = xgbe_config_irqs(pdata);
290 	if (ret)
291 		goto err_pci_enable;
292 
293 	/* Configure the netdev resource */
294 	ret = xgbe_config_netdev(pdata);
295 	if (ret)
296 		goto err_irq_vectors;
297 
298 	netdev_notice(pdata->netdev, "net device enabled\n");
299 
300 	return 0;
301 
302 err_irq_vectors:
303 	pci_free_irq_vectors(pdata->pcidev);
304 
305 err_pci_enable:
306 	xgbe_free_pdata(pdata);
307 
308 err_alloc:
309 	dev_notice(dev, "net device not enabled\n");
310 
311 	return ret;
312 }
313 
314 static void xgbe_pci_remove(struct pci_dev *pdev)
315 {
316 	struct xgbe_prv_data *pdata = pci_get_drvdata(pdev);
317 
318 	xgbe_deconfig_netdev(pdata);
319 
320 	pci_free_irq_vectors(pdata->pcidev);
321 
322 	/* Disable all interrupts in the hardware */
323 	XP_IOWRITE(pdata, XP_INT_EN, 0x0);
324 
325 	xgbe_free_pdata(pdata);
326 }
327 
328 static int __maybe_unused xgbe_pci_suspend(struct device *dev)
329 {
330 	struct xgbe_prv_data *pdata = dev_get_drvdata(dev);
331 	struct net_device *netdev = pdata->netdev;
332 	int ret = 0;
333 
334 	if (netif_running(netdev))
335 		ret = xgbe_powerdown(netdev, XGMAC_DRIVER_CONTEXT);
336 
337 	pdata->lpm_ctrl = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
338 	pdata->lpm_ctrl |= MDIO_CTRL1_LPOWER;
339 	XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
340 
341 	return ret;
342 }
343 
344 static int __maybe_unused xgbe_pci_resume(struct device *dev)
345 {
346 	struct xgbe_prv_data *pdata = dev_get_drvdata(dev);
347 	struct net_device *netdev = pdata->netdev;
348 	int ret = 0;
349 
350 	XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff);
351 
352 	pdata->lpm_ctrl &= ~MDIO_CTRL1_LPOWER;
353 	XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
354 
355 	if (netif_running(netdev)) {
356 		ret = xgbe_powerup(netdev, XGMAC_DRIVER_CONTEXT);
357 
358 		/* Schedule a restart in case the link or phy state changed
359 		 * while we were powered down.
360 		 */
361 		schedule_work(&pdata->restart_work);
362 	}
363 
364 	return ret;
365 }
366 
367 static struct xgbe_version_data xgbe_v2a = {
368 	.init_function_ptrs_phy_impl	= xgbe_init_function_ptrs_phy_v2,
369 	.xpcs_access			= XGBE_XPCS_ACCESS_V2,
370 	.mmc_64bit			= 1,
371 	.tx_max_fifo_size		= 229376,
372 	.rx_max_fifo_size		= 229376,
373 	.tx_tstamp_workaround		= 1,
374 	.ecc_support			= 1,
375 	.i2c_support			= 1,
376 	.irq_reissue_support		= 1,
377 	.tx_desc_prefetch		= 5,
378 	.rx_desc_prefetch		= 5,
379 	.an_cdr_workaround		= 1,
380 	.enable_rrc			= 1,
381 };
382 
383 static struct xgbe_version_data xgbe_v2b = {
384 	.init_function_ptrs_phy_impl	= xgbe_init_function_ptrs_phy_v2,
385 	.xpcs_access			= XGBE_XPCS_ACCESS_V2,
386 	.mmc_64bit			= 1,
387 	.tx_max_fifo_size		= 65536,
388 	.rx_max_fifo_size		= 65536,
389 	.tx_tstamp_workaround		= 1,
390 	.ecc_support			= 1,
391 	.i2c_support			= 1,
392 	.irq_reissue_support		= 1,
393 	.tx_desc_prefetch		= 5,
394 	.rx_desc_prefetch		= 5,
395 	.an_cdr_workaround		= 1,
396 	.enable_rrc			= 1,
397 };
398 
399 static const struct pci_device_id xgbe_pci_table[] = {
400 	{ PCI_VDEVICE(AMD, 0x1458),
401 	  .driver_data = (kernel_ulong_t)&xgbe_v2a },
402 	{ PCI_VDEVICE(AMD, 0x1459),
403 	  .driver_data = (kernel_ulong_t)&xgbe_v2b },
404 	/* Last entry must be zero */
405 	{ 0, }
406 };
407 MODULE_DEVICE_TABLE(pci, xgbe_pci_table);
408 
409 static SIMPLE_DEV_PM_OPS(xgbe_pci_pm_ops, xgbe_pci_suspend, xgbe_pci_resume);
410 
411 static struct pci_driver xgbe_driver = {
412 	.name = XGBE_DRV_NAME,
413 	.id_table = xgbe_pci_table,
414 	.probe = xgbe_pci_probe,
415 	.remove = xgbe_pci_remove,
416 	.driver = {
417 		.pm = &xgbe_pci_pm_ops,
418 	}
419 };
420 
421 int xgbe_pci_init(void)
422 {
423 	return pci_register_driver(&xgbe_driver);
424 }
425 
426 void xgbe_pci_exit(void)
427 {
428 	pci_unregister_driver(&xgbe_driver);
429 }
430