xref: /linux/drivers/net/ethernet/amd/xgbe/xgbe-pci.c (revision 22c55fb9eb92395d999b8404d73e58540d11bdd8)
1 // SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
2 /*
3  * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
4  * Copyright (c) 2014, Synopsys, Inc.
5  * All rights reserved
6  */
7 
8 #include <linux/module.h>
9 #include <linux/device.h>
10 #include <linux/pci.h>
11 #include <linux/log2.h>
12 #include "xgbe-smn.h"
13 
14 #include "xgbe.h"
15 #include "xgbe-common.h"
16 
17 static int xgbe_config_multi_msi(struct xgbe_prv_data *pdata)
18 {
19 	unsigned int vector_count;
20 	unsigned int i, j;
21 	int ret;
22 
23 	vector_count = XGBE_MSI_BASE_COUNT;
24 	vector_count += max(pdata->rx_ring_count,
25 			    pdata->tx_ring_count);
26 
27 	ret = pci_alloc_irq_vectors(pdata->pcidev, XGBE_MSI_MIN_COUNT,
28 				    vector_count, PCI_IRQ_MSI | PCI_IRQ_MSIX);
29 	if (ret < 0) {
30 		dev_info(pdata->dev, "multi MSI/MSI-X enablement failed\n");
31 		return ret;
32 	}
33 
34 	pdata->isr_as_bh_work = 1;
35 	pdata->irq_count = ret;
36 
37 	pdata->dev_irq = pci_irq_vector(pdata->pcidev, 0);
38 	pdata->ecc_irq = pci_irq_vector(pdata->pcidev, 1);
39 	pdata->i2c_irq = pci_irq_vector(pdata->pcidev, 2);
40 	pdata->an_irq = pci_irq_vector(pdata->pcidev, 3);
41 
42 	for (i = XGBE_MSI_BASE_COUNT, j = 0; i < ret; i++, j++)
43 		pdata->channel_irq[j] = pci_irq_vector(pdata->pcidev, i);
44 	pdata->channel_irq_count = j;
45 
46 	pdata->per_channel_irq = 1;
47 	pdata->channel_irq_mode = XGBE_IRQ_MODE_LEVEL;
48 
49 	if (netif_msg_probe(pdata))
50 		dev_dbg(pdata->dev, "multi %s interrupts enabled\n",
51 			pdata->pcidev->msix_enabled ? "MSI-X" : "MSI");
52 
53 	return 0;
54 }
55 
56 static int xgbe_config_irqs(struct xgbe_prv_data *pdata)
57 {
58 	int ret;
59 
60 	ret = xgbe_config_multi_msi(pdata);
61 	if (!ret)
62 		goto out;
63 
64 	ret = pci_alloc_irq_vectors(pdata->pcidev, 1, 1,
65 				    PCI_IRQ_INTX | PCI_IRQ_MSI);
66 	if (ret < 0) {
67 		dev_info(pdata->dev, "single IRQ enablement failed\n");
68 		return ret;
69 	}
70 
71 	pdata->isr_as_bh_work = pdata->pcidev->msi_enabled ? 1 : 0;
72 	pdata->irq_count = 1;
73 	pdata->channel_irq_count = 1;
74 
75 	pdata->dev_irq = pci_irq_vector(pdata->pcidev, 0);
76 	pdata->ecc_irq = pci_irq_vector(pdata->pcidev, 0);
77 	pdata->i2c_irq = pci_irq_vector(pdata->pcidev, 0);
78 	pdata->an_irq = pci_irq_vector(pdata->pcidev, 0);
79 
80 	if (netif_msg_probe(pdata))
81 		dev_dbg(pdata->dev, "single %s interrupt enabled\n",
82 			pdata->pcidev->msi_enabled ?  "MSI" : "legacy");
83 
84 out:
85 	if (netif_msg_probe(pdata)) {
86 		unsigned int i;
87 
88 		dev_dbg(pdata->dev, " dev irq=%d\n", pdata->dev_irq);
89 		dev_dbg(pdata->dev, " ecc irq=%d\n", pdata->ecc_irq);
90 		dev_dbg(pdata->dev, " i2c irq=%d\n", pdata->i2c_irq);
91 		dev_dbg(pdata->dev, "  an irq=%d\n", pdata->an_irq);
92 		for (i = 0; i < pdata->channel_irq_count; i++)
93 			dev_dbg(pdata->dev, " dma%u irq=%d\n",
94 				i, pdata->channel_irq[i]);
95 	}
96 
97 	return 0;
98 }
99 
100 static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
101 {
102 	void __iomem * const *iomap_table;
103 	unsigned int port_addr_size, reg;
104 	struct device *dev = &pdev->dev;
105 	struct xgbe_prv_data *pdata;
106 	unsigned int ma_lo, ma_hi;
107 	struct pci_dev *rdev;
108 	int bar_mask, ret;
109 	u32 address;
110 
111 	pdata = xgbe_alloc_pdata(dev);
112 	if (IS_ERR(pdata)) {
113 		ret = PTR_ERR(pdata);
114 		goto err_alloc;
115 	}
116 
117 	pdata->pcidev = pdev;
118 	pci_set_drvdata(pdev, pdata);
119 
120 	/* Get the version data */
121 	pdata->vdata = (struct xgbe_version_data *)id->driver_data;
122 
123 	ret = pcim_enable_device(pdev);
124 	if (ret) {
125 		dev_err(dev, "pcim_enable_device failed\n");
126 		goto err_pci_enable;
127 	}
128 
129 	/* Obtain the mmio areas for the device */
130 	bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
131 	ret = pcim_iomap_regions(pdev, bar_mask, XGBE_DRV_NAME);
132 	if (ret) {
133 		dev_err(dev, "pcim_iomap_regions failed\n");
134 		goto err_pci_enable;
135 	}
136 
137 	iomap_table = pcim_iomap_table(pdev);
138 	if (!iomap_table) {
139 		dev_err(dev, "pcim_iomap_table failed\n");
140 		ret = -ENOMEM;
141 		goto err_pci_enable;
142 	}
143 
144 	pdata->xgmac_regs = iomap_table[XGBE_XGMAC_BAR];
145 	if (!pdata->xgmac_regs) {
146 		dev_err(dev, "xgmac ioremap failed\n");
147 		ret = -ENOMEM;
148 		goto err_pci_enable;
149 	}
150 	pdata->xprop_regs = pdata->xgmac_regs + XGBE_MAC_PROP_OFFSET;
151 	pdata->xi2c_regs = pdata->xgmac_regs + XGBE_I2C_CTRL_OFFSET;
152 	if (netif_msg_probe(pdata)) {
153 		dev_dbg(dev, "xgmac_regs = %p\n", pdata->xgmac_regs);
154 		dev_dbg(dev, "xprop_regs = %p\n", pdata->xprop_regs);
155 		dev_dbg(dev, "xi2c_regs  = %p\n", pdata->xi2c_regs);
156 	}
157 
158 	pdata->xpcs_regs = iomap_table[XGBE_XPCS_BAR];
159 	if (!pdata->xpcs_regs) {
160 		dev_err(dev, "xpcs ioremap failed\n");
161 		ret = -ENOMEM;
162 		goto err_pci_enable;
163 	}
164 	if (netif_msg_probe(pdata))
165 		dev_dbg(dev, "xpcs_regs  = %p\n", pdata->xpcs_regs);
166 
167 	/* Set the PCS indirect addressing definition registers */
168 	rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
169 	if (rdev && rdev->vendor == PCI_VENDOR_ID_AMD) {
170 		switch (rdev->device) {
171 		case XGBE_RV_PCI_DEVICE_ID:
172 			pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF;
173 			pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT;
174 			break;
175 		case XGBE_YC_PCI_DEVICE_ID:
176 			pdata->xpcs_window_def_reg = PCS_V2_YC_WINDOW_DEF;
177 			pdata->xpcs_window_sel_reg = PCS_V2_YC_WINDOW_SELECT;
178 
179 			/* Yellow Carp devices do not need cdr workaround */
180 			pdata->vdata->an_cdr_workaround = 0;
181 
182 			/* Yellow Carp devices do not need rrc */
183 			pdata->vdata->enable_rrc = 0;
184 			break;
185 		case XGBE_RN_PCI_DEVICE_ID:
186 			pdata->xpcs_window_def_reg = PCS_V3_RN_WINDOW_DEF;
187 			pdata->xpcs_window_sel_reg = PCS_V3_RN_WINDOW_SELECT;
188 			break;
189 		default:
190 			pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
191 			pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
192 			break;
193 		}
194 	} else {
195 		pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
196 		pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
197 	}
198 	pci_dev_put(rdev);
199 
200 	/* Configure the PCS indirect addressing support */
201 	if (pdata->vdata->xpcs_access == XGBE_XPCS_ACCESS_V3) {
202 		reg = XP_IOREAD(pdata, XP_PROP_0);
203 		port_addr_size = PCS_RN_PORT_ADDR_SIZE *
204 				 XP_GET_BITS(reg, XP_PROP_0, PORT_ID);
205 		pdata->smn_base = PCS_RN_SMN_BASE_ADDR + port_addr_size;
206 
207 		address = pdata->smn_base + (pdata->xpcs_window_def_reg);
208 		ret = amd_smn_read(0, address, &reg);
209 		if (ret) {
210 			pci_err(pdata->pcidev, "Failed to read data\n");
211 			goto err_pci_enable;
212 		}
213 	} else {
214 		reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg);
215 	}
216 
217 	pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET);
218 	pdata->xpcs_window <<= 6;
219 	pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE);
220 	pdata->xpcs_window_size = 1 << (pdata->xpcs_window_size + 7);
221 	pdata->xpcs_window_mask = pdata->xpcs_window_size - 1;
222 	if (netif_msg_probe(pdata)) {
223 		dev_dbg(dev, "xpcs window def  = %#010x\n",
224 			pdata->xpcs_window_def_reg);
225 		dev_dbg(dev, "xpcs window sel  = %#010x\n",
226 			pdata->xpcs_window_sel_reg);
227 		dev_dbg(dev, "xpcs window      = %#010x\n",
228 			pdata->xpcs_window);
229 		dev_dbg(dev, "xpcs window size = %#010x\n",
230 			pdata->xpcs_window_size);
231 		dev_dbg(dev, "xpcs window mask = %#010x\n",
232 			pdata->xpcs_window_mask);
233 	}
234 
235 	pci_set_master(pdev);
236 
237 	/* Enable all interrupts in the hardware */
238 	XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff);
239 
240 	/* Retrieve the MAC address */
241 	ma_lo = XP_IOREAD(pdata, XP_MAC_ADDR_LO);
242 	ma_hi = XP_IOREAD(pdata, XP_MAC_ADDR_HI);
243 	pdata->mac_addr[0] = ma_lo & 0xff;
244 	pdata->mac_addr[1] = (ma_lo >> 8) & 0xff;
245 	pdata->mac_addr[2] = (ma_lo >> 16) & 0xff;
246 	pdata->mac_addr[3] = (ma_lo >> 24) & 0xff;
247 	pdata->mac_addr[4] = ma_hi & 0xff;
248 	pdata->mac_addr[5] = (ma_hi >> 8) & 0xff;
249 	if (!XP_GET_BITS(ma_hi, XP_MAC_ADDR_HI, VALID) ||
250 	    !is_valid_ether_addr(pdata->mac_addr)) {
251 		dev_err(dev, "invalid mac address\n");
252 		ret = -EINVAL;
253 		goto err_pci_enable;
254 	}
255 
256 	/* Clock settings */
257 	pdata->sysclk_rate = XGBE_V2_DMA_CLOCK_FREQ;
258 	pdata->ptpclk_rate = XGBE_V2_PTP_CLOCK_FREQ;
259 
260 	/* Set the DMA coherency values */
261 	pdata->coherent = 1;
262 	pdata->arcr = XGBE_DMA_PCI_ARCR;
263 	pdata->awcr = XGBE_DMA_PCI_AWCR;
264 	pdata->awarcr = XGBE_DMA_PCI_AWARCR;
265 
266 	/* Read the port property registers */
267 	pdata->pp0 = XP_IOREAD(pdata, XP_PROP_0);
268 	pdata->pp1 = XP_IOREAD(pdata, XP_PROP_1);
269 	pdata->pp2 = XP_IOREAD(pdata, XP_PROP_2);
270 	pdata->pp3 = XP_IOREAD(pdata, XP_PROP_3);
271 	pdata->pp4 = XP_IOREAD(pdata, XP_PROP_4);
272 	if (netif_msg_probe(pdata)) {
273 		dev_dbg(dev, "port property 0 = %#010x\n", pdata->pp0);
274 		dev_dbg(dev, "port property 1 = %#010x\n", pdata->pp1);
275 		dev_dbg(dev, "port property 2 = %#010x\n", pdata->pp2);
276 		dev_dbg(dev, "port property 3 = %#010x\n", pdata->pp3);
277 		dev_dbg(dev, "port property 4 = %#010x\n", pdata->pp4);
278 	}
279 
280 	/* Set the maximum channels and queues */
281 	pdata->tx_max_channel_count = XP_GET_BITS(pdata->pp1, XP_PROP_1,
282 						  MAX_TX_DMA);
283 	pdata->rx_max_channel_count = XP_GET_BITS(pdata->pp1, XP_PROP_1,
284 						  MAX_RX_DMA);
285 	pdata->tx_max_q_count = XP_GET_BITS(pdata->pp1, XP_PROP_1,
286 					    MAX_TX_QUEUES);
287 	pdata->rx_max_q_count = XP_GET_BITS(pdata->pp1, XP_PROP_1,
288 					    MAX_RX_QUEUES);
289 	if (netif_msg_probe(pdata)) {
290 		dev_dbg(dev, "max tx/rx channel count = %u/%u\n",
291 			pdata->tx_max_channel_count,
292 			pdata->rx_max_channel_count);
293 		dev_dbg(dev, "max tx/rx hw queue count = %u/%u\n",
294 			pdata->tx_max_q_count, pdata->rx_max_q_count);
295 	}
296 
297 	/* Set the hardware channel and queue counts */
298 	xgbe_set_counts(pdata);
299 
300 	/* Set the maximum fifo amounts */
301 	pdata->tx_max_fifo_size = XP_GET_BITS(pdata->pp2, XP_PROP_2,
302 					      TX_FIFO_SIZE);
303 	pdata->tx_max_fifo_size *= 16384;
304 	pdata->tx_max_fifo_size = min(pdata->tx_max_fifo_size,
305 				      pdata->vdata->tx_max_fifo_size);
306 	pdata->rx_max_fifo_size = XP_GET_BITS(pdata->pp2, XP_PROP_2,
307 					      RX_FIFO_SIZE);
308 	pdata->rx_max_fifo_size *= 16384;
309 	pdata->rx_max_fifo_size = min(pdata->rx_max_fifo_size,
310 				      pdata->vdata->rx_max_fifo_size);
311 	if (netif_msg_probe(pdata))
312 		dev_dbg(dev, "max tx/rx max fifo size = %u/%u\n",
313 			pdata->tx_max_fifo_size, pdata->rx_max_fifo_size);
314 
315 	/* Configure interrupt support */
316 	ret = xgbe_config_irqs(pdata);
317 	if (ret)
318 		goto err_pci_enable;
319 
320 	/* Configure the netdev resource */
321 	ret = xgbe_config_netdev(pdata);
322 	if (ret)
323 		goto err_irq_vectors;
324 
325 	netdev_notice(pdata->netdev, "net device enabled\n");
326 
327 	return 0;
328 
329 err_irq_vectors:
330 	pci_free_irq_vectors(pdata->pcidev);
331 
332 err_pci_enable:
333 	xgbe_free_pdata(pdata);
334 
335 err_alloc:
336 	dev_notice(dev, "net device not enabled\n");
337 
338 	return ret;
339 }
340 
341 static void xgbe_pci_remove(struct pci_dev *pdev)
342 {
343 	struct xgbe_prv_data *pdata = pci_get_drvdata(pdev);
344 
345 	xgbe_deconfig_netdev(pdata);
346 
347 	pci_free_irq_vectors(pdata->pcidev);
348 
349 	/* Disable all interrupts in the hardware */
350 	XP_IOWRITE(pdata, XP_INT_EN, 0x0);
351 
352 	xgbe_free_pdata(pdata);
353 }
354 
355 static int __maybe_unused xgbe_pci_suspend(struct device *dev)
356 {
357 	struct xgbe_prv_data *pdata = dev_get_drvdata(dev);
358 	struct net_device *netdev = pdata->netdev;
359 	int ret = 0;
360 
361 	if (netif_running(netdev))
362 		ret = xgbe_powerdown(netdev, XGMAC_DRIVER_CONTEXT);
363 
364 	pdata->lpm_ctrl = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
365 	pdata->lpm_ctrl |= MDIO_CTRL1_LPOWER;
366 	XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
367 
368 	return ret;
369 }
370 
371 static int __maybe_unused xgbe_pci_resume(struct device *dev)
372 {
373 	struct xgbe_prv_data *pdata = dev_get_drvdata(dev);
374 	struct net_device *netdev = pdata->netdev;
375 	int ret = 0;
376 
377 	XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff);
378 
379 	pdata->lpm_ctrl &= ~MDIO_CTRL1_LPOWER;
380 	XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
381 
382 	if (netif_running(netdev)) {
383 		ret = xgbe_powerup(netdev, XGMAC_DRIVER_CONTEXT);
384 
385 		/* Schedule a restart in case the link or phy state changed
386 		 * while we were powered down.
387 		 */
388 		schedule_work(&pdata->restart_work);
389 	}
390 
391 	return ret;
392 }
393 
394 static struct xgbe_version_data xgbe_v3 = {
395 	.init_function_ptrs_phy_impl	= xgbe_init_function_ptrs_phy_v2,
396 	.xpcs_access			= XGBE_XPCS_ACCESS_V3,
397 	.mmc_64bit			= 1,
398 	.tx_max_fifo_size		= 65536,
399 	.rx_max_fifo_size		= 65536,
400 	.tx_tstamp_workaround		= 1,
401 	.ecc_support			= 1,
402 	.i2c_support			= 1,
403 	.irq_reissue_support		= 1,
404 	.tx_desc_prefetch		= 5,
405 	.rx_desc_prefetch		= 5,
406 	.an_cdr_workaround		= 0,
407 	.enable_rrc			= 0,
408 };
409 
410 static struct xgbe_version_data xgbe_v2a = {
411 	.init_function_ptrs_phy_impl	= xgbe_init_function_ptrs_phy_v2,
412 	.xpcs_access			= XGBE_XPCS_ACCESS_V2,
413 	.mmc_64bit			= 1,
414 	.tx_max_fifo_size		= 229376,
415 	.rx_max_fifo_size		= 229376,
416 	.tx_tstamp_workaround		= 1,
417 	.tstamp_ptp_clock_freq		= 1,
418 	.ecc_support			= 1,
419 	.i2c_support			= 1,
420 	.irq_reissue_support		= 1,
421 	.tx_desc_prefetch		= 5,
422 	.rx_desc_prefetch		= 5,
423 	.an_cdr_workaround		= 1,
424 	.enable_rrc			= 1,
425 };
426 
427 static struct xgbe_version_data xgbe_v2b = {
428 	.init_function_ptrs_phy_impl	= xgbe_init_function_ptrs_phy_v2,
429 	.xpcs_access			= XGBE_XPCS_ACCESS_V2,
430 	.mmc_64bit			= 1,
431 	.tx_max_fifo_size		= 65536,
432 	.rx_max_fifo_size		= 65536,
433 	.tx_tstamp_workaround		= 1,
434 	.tstamp_ptp_clock_freq		= 1,
435 	.ecc_support			= 1,
436 	.i2c_support			= 1,
437 	.irq_reissue_support		= 1,
438 	.tx_desc_prefetch		= 5,
439 	.rx_desc_prefetch		= 5,
440 	.an_cdr_workaround		= 1,
441 	.enable_rrc			= 1,
442 };
443 
444 static const struct pci_device_id xgbe_pci_table[] = {
445 	{ PCI_VDEVICE(AMD, 0x1458),
446 	  .driver_data = (kernel_ulong_t)&xgbe_v2a },
447 	{ PCI_VDEVICE(AMD, 0x1459),
448 	  .driver_data = (kernel_ulong_t)&xgbe_v2b },
449 	{ PCI_VDEVICE(AMD, 0x1641),
450 	  .driver_data = (kernel_ulong_t)&xgbe_v3 },
451 	/* Last entry must be zero */
452 	{ 0, }
453 };
454 MODULE_DEVICE_TABLE(pci, xgbe_pci_table);
455 
456 static SIMPLE_DEV_PM_OPS(xgbe_pci_pm_ops, xgbe_pci_suspend, xgbe_pci_resume);
457 
458 static struct pci_driver xgbe_driver = {
459 	.name = XGBE_DRV_NAME,
460 	.id_table = xgbe_pci_table,
461 	.probe = xgbe_pci_probe,
462 	.remove = xgbe_pci_remove,
463 	.driver = {
464 		.pm = &xgbe_pci_pm_ops,
465 	}
466 };
467 
468 int xgbe_pci_init(void)
469 {
470 	return pci_register_driver(&xgbe_driver);
471 }
472 
473 void xgbe_pci_exit(void)
474 {
475 	pci_unregister_driver(&xgbe_driver);
476 }
477