xref: /linux/drivers/pci/controller/dwc/pcie-amd-mdb.c (revision 2f2c7254931f41b5736e3ba12aaa9ac1bbeeeb92)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * PCIe host controller driver for AMD MDB PCIe Bridge
4  *
5  * Copyright (C) 2024-2025, Advanced Micro Devices, Inc.
6  */
7 
8 #include <linux/clk.h>
9 #include <linux/delay.h>
10 #include <linux/gpio.h>
11 #include <linux/interrupt.h>
12 #include <linux/irqdomain.h>
13 #include <linux/kernel.h>
14 #include <linux/init.h>
15 #include <linux/of_device.h>
16 #include <linux/pci.h>
17 #include <linux/platform_device.h>
18 #include <linux/resource.h>
19 #include <linux/types.h>
20 
21 #include "../../pci.h"
22 #include "pcie-designware.h"
23 
24 #define AMD_MDB_TLP_IR_STATUS_MISC		0x4C0
25 #define AMD_MDB_TLP_IR_MASK_MISC		0x4C4
26 #define AMD_MDB_TLP_IR_ENABLE_MISC		0x4C8
27 #define AMD_MDB_TLP_IR_DISABLE_MISC		0x4CC
28 
29 #define AMD_MDB_TLP_PCIE_INTX_MASK	GENMASK(23, 16)
30 
31 #define AMD_MDB_PCIE_INTR_INTX_ASSERT(x)	BIT((x) * 2)
32 
33 /* Interrupt registers definitions. */
34 #define AMD_MDB_PCIE_INTR_CMPL_TIMEOUT		15
35 #define AMD_MDB_PCIE_INTR_INTX			16
36 #define AMD_MDB_PCIE_INTR_PM_PME_RCVD		24
37 #define AMD_MDB_PCIE_INTR_PME_TO_ACK_RCVD	25
38 #define AMD_MDB_PCIE_INTR_MISC_CORRECTABLE	26
39 #define AMD_MDB_PCIE_INTR_NONFATAL		27
40 #define AMD_MDB_PCIE_INTR_FATAL			28
41 
42 #define IMR(x) BIT(AMD_MDB_PCIE_INTR_ ##x)
43 #define AMD_MDB_PCIE_IMR_ALL_MASK			\
44 	(						\
45 		IMR(CMPL_TIMEOUT)	|		\
46 		IMR(PM_PME_RCVD)	|		\
47 		IMR(PME_TO_ACK_RCVD)	|		\
48 		IMR(MISC_CORRECTABLE)	|		\
49 		IMR(NONFATAL)		|		\
50 		IMR(FATAL)		|		\
51 		AMD_MDB_TLP_PCIE_INTX_MASK		\
52 	)
53 
54 /**
55  * struct amd_mdb_pcie - PCIe port information
56  * @pci: DesignWare PCIe controller structure
57  * @slcr: MDB System Level Control and Status Register (SLCR) base
58  * @intx_domain: INTx IRQ domain pointer
59  * @mdb_domain: MDB IRQ domain pointer
60  * @perst_gpio: GPIO descriptor for PERST# signal handling
61  * @intx_irq: INTx IRQ interrupt number
62  */
63 struct amd_mdb_pcie {
64 	struct dw_pcie			pci;
65 	void __iomem			*slcr;
66 	struct irq_domain		*intx_domain;
67 	struct irq_domain		*mdb_domain;
68 	struct gpio_desc		*perst_gpio;
69 	int				intx_irq;
70 };
71 
72 static const struct dw_pcie_host_ops amd_mdb_pcie_host_ops = {
73 };
74 
amd_mdb_intx_irq_mask(struct irq_data * data)75 static void amd_mdb_intx_irq_mask(struct irq_data *data)
76 {
77 	struct amd_mdb_pcie *pcie = irq_data_get_irq_chip_data(data);
78 	struct dw_pcie *pci = &pcie->pci;
79 	struct dw_pcie_rp *port = &pci->pp;
80 	unsigned long flags;
81 	u32 val;
82 
83 	raw_spin_lock_irqsave(&port->lock, flags);
84 	val = FIELD_PREP(AMD_MDB_TLP_PCIE_INTX_MASK,
85 			 AMD_MDB_PCIE_INTR_INTX_ASSERT(data->hwirq));
86 
87 	/*
88 	 * Writing '1' to a bit in AMD_MDB_TLP_IR_DISABLE_MISC disables that
89 	 * interrupt, writing '0' has no effect.
90 	 */
91 	writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_DISABLE_MISC);
92 	raw_spin_unlock_irqrestore(&port->lock, flags);
93 }
94 
amd_mdb_intx_irq_unmask(struct irq_data * data)95 static void amd_mdb_intx_irq_unmask(struct irq_data *data)
96 {
97 	struct amd_mdb_pcie *pcie = irq_data_get_irq_chip_data(data);
98 	struct dw_pcie *pci = &pcie->pci;
99 	struct dw_pcie_rp *port = &pci->pp;
100 	unsigned long flags;
101 	u32 val;
102 
103 	raw_spin_lock_irqsave(&port->lock, flags);
104 	val = FIELD_PREP(AMD_MDB_TLP_PCIE_INTX_MASK,
105 			 AMD_MDB_PCIE_INTR_INTX_ASSERT(data->hwirq));
106 
107 	/*
108 	 * Writing '1' to a bit in AMD_MDB_TLP_IR_ENABLE_MISC enables that
109 	 * interrupt, writing '0' has no effect.
110 	 */
111 	writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_ENABLE_MISC);
112 	raw_spin_unlock_irqrestore(&port->lock, flags);
113 }
114 
115 static struct irq_chip amd_mdb_intx_irq_chip = {
116 	.name		= "AMD MDB INTx",
117 	.irq_mask	= amd_mdb_intx_irq_mask,
118 	.irq_unmask	= amd_mdb_intx_irq_unmask,
119 };
120 
121 /**
122  * amd_mdb_pcie_intx_map - Set the handler for the INTx and mark IRQ as valid
123  * @domain: IRQ domain
124  * @irq: Virtual IRQ number
125  * @hwirq: Hardware interrupt number
126  *
127  * Return: Always returns '0'.
128  */
amd_mdb_pcie_intx_map(struct irq_domain * domain,unsigned int irq,irq_hw_number_t hwirq)129 static int amd_mdb_pcie_intx_map(struct irq_domain *domain,
130 				 unsigned int irq, irq_hw_number_t hwirq)
131 {
132 	irq_set_chip_and_handler(irq, &amd_mdb_intx_irq_chip,
133 				 handle_level_irq);
134 	irq_set_chip_data(irq, domain->host_data);
135 	irq_set_status_flags(irq, IRQ_LEVEL);
136 
137 	return 0;
138 }
139 
140 /* INTx IRQ domain operations. */
141 static const struct irq_domain_ops amd_intx_domain_ops = {
142 	.map = amd_mdb_pcie_intx_map,
143 };
144 
dw_pcie_rp_intx(int irq,void * args)145 static irqreturn_t dw_pcie_rp_intx(int irq, void *args)
146 {
147 	struct amd_mdb_pcie *pcie = args;
148 	unsigned long val;
149 	int i, int_status;
150 
151 	val = readl_relaxed(pcie->slcr + AMD_MDB_TLP_IR_STATUS_MISC);
152 	int_status = FIELD_GET(AMD_MDB_TLP_PCIE_INTX_MASK, val);
153 
154 	for (i = 0; i < PCI_NUM_INTX; i++) {
155 		if (int_status & AMD_MDB_PCIE_INTR_INTX_ASSERT(i))
156 			generic_handle_domain_irq(pcie->intx_domain, i);
157 	}
158 
159 	return IRQ_HANDLED;
160 }
161 
162 #define _IC(x, s)[AMD_MDB_PCIE_INTR_ ## x] = { __stringify(x), s }
163 
164 static const struct {
165 	const char	*sym;
166 	const char	*str;
167 } intr_cause[32] = {
168 	_IC(CMPL_TIMEOUT,	"Completion timeout"),
169 	_IC(PM_PME_RCVD,	"PM_PME message received"),
170 	_IC(PME_TO_ACK_RCVD,	"PME_TO_ACK message received"),
171 	_IC(MISC_CORRECTABLE,	"Correctable error message"),
172 	_IC(NONFATAL,		"Non fatal error message"),
173 	_IC(FATAL,		"Fatal error message"),
174 };
175 
amd_mdb_event_irq_mask(struct irq_data * d)176 static void amd_mdb_event_irq_mask(struct irq_data *d)
177 {
178 	struct amd_mdb_pcie *pcie = irq_data_get_irq_chip_data(d);
179 	struct dw_pcie *pci = &pcie->pci;
180 	struct dw_pcie_rp *port = &pci->pp;
181 	unsigned long flags;
182 	u32 val;
183 
184 	raw_spin_lock_irqsave(&port->lock, flags);
185 	val = BIT(d->hwirq);
186 	writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_DISABLE_MISC);
187 	raw_spin_unlock_irqrestore(&port->lock, flags);
188 }
189 
amd_mdb_event_irq_unmask(struct irq_data * d)190 static void amd_mdb_event_irq_unmask(struct irq_data *d)
191 {
192 	struct amd_mdb_pcie *pcie = irq_data_get_irq_chip_data(d);
193 	struct dw_pcie *pci = &pcie->pci;
194 	struct dw_pcie_rp *port = &pci->pp;
195 	unsigned long flags;
196 	u32 val;
197 
198 	raw_spin_lock_irqsave(&port->lock, flags);
199 	val = BIT(d->hwirq);
200 	writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_ENABLE_MISC);
201 	raw_spin_unlock_irqrestore(&port->lock, flags);
202 }
203 
204 static struct irq_chip amd_mdb_event_irq_chip = {
205 	.name		= "AMD MDB RC-Event",
206 	.irq_mask	= amd_mdb_event_irq_mask,
207 	.irq_unmask	= amd_mdb_event_irq_unmask,
208 };
209 
amd_mdb_pcie_event_map(struct irq_domain * domain,unsigned int irq,irq_hw_number_t hwirq)210 static int amd_mdb_pcie_event_map(struct irq_domain *domain,
211 				  unsigned int irq, irq_hw_number_t hwirq)
212 {
213 	irq_set_chip_and_handler(irq, &amd_mdb_event_irq_chip,
214 				 handle_level_irq);
215 	irq_set_chip_data(irq, domain->host_data);
216 	irq_set_status_flags(irq, IRQ_LEVEL);
217 
218 	return 0;
219 }
220 
221 static const struct irq_domain_ops event_domain_ops = {
222 	.map = amd_mdb_pcie_event_map,
223 };
224 
amd_mdb_pcie_event(int irq,void * args)225 static irqreturn_t amd_mdb_pcie_event(int irq, void *args)
226 {
227 	struct amd_mdb_pcie *pcie = args;
228 	unsigned long val;
229 	int i;
230 
231 	val = readl_relaxed(pcie->slcr + AMD_MDB_TLP_IR_STATUS_MISC);
232 	val &= ~readl_relaxed(pcie->slcr + AMD_MDB_TLP_IR_MASK_MISC);
233 	for_each_set_bit(i, &val, 32)
234 		generic_handle_domain_irq(pcie->mdb_domain, i);
235 	writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_STATUS_MISC);
236 
237 	return IRQ_HANDLED;
238 }
239 
amd_mdb_pcie_free_irq_domains(struct amd_mdb_pcie * pcie)240 static void amd_mdb_pcie_free_irq_domains(struct amd_mdb_pcie *pcie)
241 {
242 	if (pcie->intx_domain) {
243 		irq_domain_remove(pcie->intx_domain);
244 		pcie->intx_domain = NULL;
245 	}
246 
247 	if (pcie->mdb_domain) {
248 		irq_domain_remove(pcie->mdb_domain);
249 		pcie->mdb_domain = NULL;
250 	}
251 }
252 
amd_mdb_pcie_init_port(struct amd_mdb_pcie * pcie)253 static int amd_mdb_pcie_init_port(struct amd_mdb_pcie *pcie)
254 {
255 	unsigned long val;
256 
257 	/* Disable all TLP interrupts. */
258 	writel_relaxed(AMD_MDB_PCIE_IMR_ALL_MASK,
259 		       pcie->slcr + AMD_MDB_TLP_IR_DISABLE_MISC);
260 
261 	/* Clear pending TLP interrupts. */
262 	val = readl_relaxed(pcie->slcr + AMD_MDB_TLP_IR_STATUS_MISC);
263 	val &= AMD_MDB_PCIE_IMR_ALL_MASK;
264 	writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_STATUS_MISC);
265 
266 	/* Enable all TLP interrupts. */
267 	writel_relaxed(AMD_MDB_PCIE_IMR_ALL_MASK,
268 		       pcie->slcr + AMD_MDB_TLP_IR_ENABLE_MISC);
269 
270 	return 0;
271 }
272 
273 /**
274  * amd_mdb_pcie_init_irq_domains - Initialize IRQ domain
275  * @pcie: PCIe port information
276  * @pdev: Platform device
277  *
278  * Return: Returns '0' on success and error value on failure.
279  */
amd_mdb_pcie_init_irq_domains(struct amd_mdb_pcie * pcie,struct platform_device * pdev)280 static int amd_mdb_pcie_init_irq_domains(struct amd_mdb_pcie *pcie,
281 					 struct platform_device *pdev)
282 {
283 	struct dw_pcie *pci = &pcie->pci;
284 	struct dw_pcie_rp *pp = &pci->pp;
285 	struct device *dev = &pdev->dev;
286 	struct device_node *node = dev->of_node;
287 	struct device_node *pcie_intc_node;
288 	int err;
289 
290 	pcie_intc_node = of_get_child_by_name(node, "interrupt-controller");
291 	if (!pcie_intc_node) {
292 		dev_err(dev, "No PCIe Intc node found\n");
293 		return -ENODEV;
294 	}
295 
296 	pcie->mdb_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), 32,
297 						    &event_domain_ops, pcie);
298 	if (!pcie->mdb_domain) {
299 		err = -ENOMEM;
300 		dev_err(dev, "Failed to add MDB domain\n");
301 		goto out;
302 	}
303 
304 	irq_domain_update_bus_token(pcie->mdb_domain, DOMAIN_BUS_NEXUS);
305 
306 	pcie->intx_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node),
307 						     PCI_NUM_INTX, &amd_intx_domain_ops, pcie);
308 	if (!pcie->intx_domain) {
309 		err = -ENOMEM;
310 		dev_err(dev, "Failed to add INTx domain\n");
311 		goto mdb_out;
312 	}
313 
314 	of_node_put(pcie_intc_node);
315 	irq_domain_update_bus_token(pcie->intx_domain, DOMAIN_BUS_WIRED);
316 
317 	raw_spin_lock_init(&pp->lock);
318 
319 	return 0;
320 mdb_out:
321 	amd_mdb_pcie_free_irq_domains(pcie);
322 out:
323 	of_node_put(pcie_intc_node);
324 	return err;
325 }
326 
amd_mdb_pcie_intr_handler(int irq,void * args)327 static irqreturn_t amd_mdb_pcie_intr_handler(int irq, void *args)
328 {
329 	struct amd_mdb_pcie *pcie = args;
330 	struct device *dev;
331 	struct irq_data *d;
332 
333 	dev = pcie->pci.dev;
334 
335 	/*
336 	 * In the future, error reporting will be hooked to the AER subsystem.
337 	 * Currently, the driver prints a warning message to the user.
338 	 */
339 	d = irq_domain_get_irq_data(pcie->mdb_domain, irq);
340 	if (intr_cause[d->hwirq].str)
341 		dev_warn(dev, "%s\n", intr_cause[d->hwirq].str);
342 	else
343 		dev_warn_once(dev, "Unknown IRQ %ld\n", d->hwirq);
344 
345 	return IRQ_HANDLED;
346 }
347 
amd_mdb_setup_irq(struct amd_mdb_pcie * pcie,struct platform_device * pdev)348 static int amd_mdb_setup_irq(struct amd_mdb_pcie *pcie,
349 			     struct platform_device *pdev)
350 {
351 	struct dw_pcie *pci = &pcie->pci;
352 	struct dw_pcie_rp *pp = &pci->pp;
353 	struct device *dev = &pdev->dev;
354 	int i, irq, err;
355 
356 	amd_mdb_pcie_init_port(pcie);
357 
358 	pp->irq = platform_get_irq(pdev, 0);
359 	if (pp->irq < 0)
360 		return pp->irq;
361 
362 	for (i = 0; i < ARRAY_SIZE(intr_cause); i++) {
363 		if (!intr_cause[i].str)
364 			continue;
365 
366 		irq = irq_create_mapping(pcie->mdb_domain, i);
367 		if (!irq) {
368 			dev_err(dev, "Failed to map MDB domain interrupt\n");
369 			return -ENOMEM;
370 		}
371 
372 		err = devm_request_irq(dev, irq, amd_mdb_pcie_intr_handler,
373 				       IRQF_NO_THREAD, intr_cause[i].sym, pcie);
374 		if (err) {
375 			dev_err(dev, "Failed to request IRQ %d, err=%d\n",
376 				irq, err);
377 			return err;
378 		}
379 	}
380 
381 	pcie->intx_irq = irq_create_mapping(pcie->mdb_domain,
382 					    AMD_MDB_PCIE_INTR_INTX);
383 	if (!pcie->intx_irq) {
384 		dev_err(dev, "Failed to map INTx interrupt\n");
385 		return -ENXIO;
386 	}
387 
388 	err = devm_request_irq(dev, pcie->intx_irq, dw_pcie_rp_intx,
389 			       IRQF_NO_THREAD, NULL, pcie);
390 	if (err) {
391 		dev_err(dev, "Failed to request INTx IRQ %d, err=%d\n",
392 			irq, err);
393 		return err;
394 	}
395 
396 	/* Plug the main event handler. */
397 	err = devm_request_irq(dev, pp->irq, amd_mdb_pcie_event, IRQF_NO_THREAD,
398 			       "amd_mdb pcie_irq", pcie);
399 	if (err) {
400 		dev_err(dev, "Failed to request event IRQ %d, err=%d\n",
401 			pp->irq, err);
402 		return err;
403 	}
404 
405 	return 0;
406 }
407 
amd_mdb_parse_pcie_port(struct amd_mdb_pcie * pcie)408 static int amd_mdb_parse_pcie_port(struct amd_mdb_pcie *pcie)
409 {
410 	struct device *dev = pcie->pci.dev;
411 	struct device_node *pcie_port_node __maybe_unused;
412 
413 	/*
414 	 * This platform currently supports only one Root Port, so the loop
415 	 * will execute only once.
416 	 * TODO: Enhance the driver to handle multiple Root Ports in the future.
417 	 */
418 	for_each_child_of_node_with_prefix(dev->of_node, pcie_port_node, "pcie") {
419 		pcie->perst_gpio = devm_fwnode_gpiod_get(dev, of_fwnode_handle(pcie_port_node),
420 							 "reset", GPIOD_OUT_HIGH, NULL);
421 		if (IS_ERR(pcie->perst_gpio))
422 			return dev_err_probe(dev, PTR_ERR(pcie->perst_gpio),
423 					     "Failed to request reset GPIO\n");
424 		return 0;
425 	}
426 
427 	return -ENODEV;
428 }
429 
amd_mdb_add_pcie_port(struct amd_mdb_pcie * pcie,struct platform_device * pdev)430 static int amd_mdb_add_pcie_port(struct amd_mdb_pcie *pcie,
431 				 struct platform_device *pdev)
432 {
433 	struct dw_pcie *pci = &pcie->pci;
434 	struct dw_pcie_rp *pp = &pci->pp;
435 	struct device *dev = &pdev->dev;
436 	int err;
437 
438 	pcie->slcr = devm_platform_ioremap_resource_byname(pdev, "slcr");
439 	if (IS_ERR(pcie->slcr))
440 		return PTR_ERR(pcie->slcr);
441 
442 	err = amd_mdb_pcie_init_irq_domains(pcie, pdev);
443 	if (err)
444 		return err;
445 
446 	err = amd_mdb_setup_irq(pcie, pdev);
447 	if (err) {
448 		dev_err(dev, "Failed to set up interrupts, err=%d\n", err);
449 		goto out;
450 	}
451 
452 	pp->ops = &amd_mdb_pcie_host_ops;
453 
454 	if (pcie->perst_gpio) {
455 		mdelay(PCIE_T_PVPERL_MS);
456 		gpiod_set_value_cansleep(pcie->perst_gpio, 0);
457 		mdelay(PCIE_RESET_CONFIG_WAIT_MS);
458 	}
459 
460 	err = dw_pcie_host_init(pp);
461 	if (err) {
462 		dev_err(dev, "Failed to initialize host, err=%d\n", err);
463 		goto out;
464 	}
465 
466 	return 0;
467 
468 out:
469 	amd_mdb_pcie_free_irq_domains(pcie);
470 	return err;
471 }
472 
amd_mdb_pcie_probe(struct platform_device * pdev)473 static int amd_mdb_pcie_probe(struct platform_device *pdev)
474 {
475 	struct device *dev = &pdev->dev;
476 	struct amd_mdb_pcie *pcie;
477 	struct dw_pcie *pci;
478 	int ret;
479 
480 	pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
481 	if (!pcie)
482 		return -ENOMEM;
483 
484 	pci = &pcie->pci;
485 	pci->dev = dev;
486 
487 	platform_set_drvdata(pdev, pcie);
488 
489 	ret = amd_mdb_parse_pcie_port(pcie);
490 	/*
491 	 * If amd_mdb_parse_pcie_port returns -ENODEV, it indicates that the
492 	 * PCIe Bridge node was not found in the device tree. This is not
493 	 * considered a fatal error and will trigger a fallback where the
494 	 * reset GPIO is acquired directly from the PCIe Host Bridge node.
495 	 */
496 	if (ret) {
497 		if (ret != -ENODEV)
498 			return ret;
499 
500 		pcie->perst_gpio = devm_gpiod_get_optional(dev, "reset",
501 							   GPIOD_OUT_HIGH);
502 		if (IS_ERR(pcie->perst_gpio))
503 			return dev_err_probe(dev, PTR_ERR(pcie->perst_gpio),
504 					     "Failed to request reset GPIO\n");
505 	}
506 
507 	return amd_mdb_add_pcie_port(pcie, pdev);
508 }
509 
510 static const struct of_device_id amd_mdb_pcie_of_match[] = {
511 	{
512 		.compatible = "amd,versal2-mdb-host",
513 	},
514 	{},
515 };
516 
517 static struct platform_driver amd_mdb_pcie_driver = {
518 	.driver = {
519 		.name	= "amd-mdb-pcie",
520 		.of_match_table = amd_mdb_pcie_of_match,
521 		.suppress_bind_attrs = true,
522 	},
523 	.probe = amd_mdb_pcie_probe,
524 };
525 
526 builtin_platform_driver(amd_mdb_pcie_driver);
527