xref: /linux/drivers/pci/controller/plda/pcie-plda-host.c (revision 4602c370bdf6946b4e954a3db0ef5958aac2b7b4)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * PLDA PCIe XpressRich host controller driver
4  *
5  * Copyright (C) 2023 Microchip Co. Ltd
6  *
7  * Author: Daire McNamara <daire.mcnamara@microchip.com>
8  */
9 
10 #include <linux/irqchip/chained_irq.h>
11 #include <linux/irqdomain.h>
12 #include <linux/msi.h>
13 #include <linux/pci_regs.h>
14 #include <linux/pci-ecam.h>
15 
16 #include "pcie-plda.h"
17 
18 static void plda_handle_msi(struct irq_desc *desc)
19 {
20 	struct plda_pcie_rp *port = irq_desc_get_handler_data(desc);
21 	struct irq_chip *chip = irq_desc_get_chip(desc);
22 	struct device *dev = port->dev;
23 	struct plda_msi *msi = &port->msi;
24 	void __iomem *bridge_base_addr = port->bridge_addr;
25 	unsigned long status;
26 	u32 bit;
27 	int ret;
28 
29 	chained_irq_enter(chip, desc);
30 
31 	status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
32 	if (status & PM_MSI_INT_MSI_MASK) {
33 		writel_relaxed(status & PM_MSI_INT_MSI_MASK,
34 			       bridge_base_addr + ISTATUS_LOCAL);
35 		status = readl_relaxed(bridge_base_addr + ISTATUS_MSI);
36 		for_each_set_bit(bit, &status, msi->num_vectors) {
37 			ret = generic_handle_domain_irq(msi->dev_domain, bit);
38 			if (ret)
39 				dev_err_ratelimited(dev, "bad MSI IRQ %d\n",
40 						    bit);
41 		}
42 	}
43 
44 	chained_irq_exit(chip, desc);
45 }
46 
47 static void plda_msi_bottom_irq_ack(struct irq_data *data)
48 {
49 	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
50 	void __iomem *bridge_base_addr = port->bridge_addr;
51 	u32 bitpos = data->hwirq;
52 
53 	writel_relaxed(BIT(bitpos), bridge_base_addr + ISTATUS_MSI);
54 }
55 
56 static void plda_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
57 {
58 	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
59 	phys_addr_t addr = port->msi.vector_phy;
60 
61 	msg->address_lo = lower_32_bits(addr);
62 	msg->address_hi = upper_32_bits(addr);
63 	msg->data = data->hwirq;
64 
65 	dev_dbg(port->dev, "msi#%x address_hi %#x address_lo %#x\n",
66 		(int)data->hwirq, msg->address_hi, msg->address_lo);
67 }
68 
69 static int plda_msi_set_affinity(struct irq_data *irq_data,
70 				 const struct cpumask *mask, bool force)
71 {
72 	return -EINVAL;
73 }
74 
75 static struct irq_chip plda_msi_bottom_irq_chip = {
76 	.name = "PLDA MSI",
77 	.irq_ack = plda_msi_bottom_irq_ack,
78 	.irq_compose_msi_msg = plda_compose_msi_msg,
79 	.irq_set_affinity = plda_msi_set_affinity,
80 };
81 
82 static int plda_irq_msi_domain_alloc(struct irq_domain *domain,
83 				     unsigned int virq,
84 				     unsigned int nr_irqs,
85 				     void *args)
86 {
87 	struct plda_pcie_rp *port = domain->host_data;
88 	struct plda_msi *msi = &port->msi;
89 	unsigned long bit;
90 
91 	mutex_lock(&msi->lock);
92 	bit = find_first_zero_bit(msi->used, msi->num_vectors);
93 	if (bit >= msi->num_vectors) {
94 		mutex_unlock(&msi->lock);
95 		return -ENOSPC;
96 	}
97 
98 	set_bit(bit, msi->used);
99 
100 	irq_domain_set_info(domain, virq, bit, &plda_msi_bottom_irq_chip,
101 			    domain->host_data, handle_edge_irq, NULL, NULL);
102 
103 	mutex_unlock(&msi->lock);
104 
105 	return 0;
106 }
107 
108 static void plda_irq_msi_domain_free(struct irq_domain *domain,
109 				     unsigned int virq,
110 				     unsigned int nr_irqs)
111 {
112 	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
113 	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(d);
114 	struct plda_msi *msi = &port->msi;
115 
116 	mutex_lock(&msi->lock);
117 
118 	if (test_bit(d->hwirq, msi->used))
119 		__clear_bit(d->hwirq, msi->used);
120 	else
121 		dev_err(port->dev, "trying to free unused MSI%lu\n", d->hwirq);
122 
123 	mutex_unlock(&msi->lock);
124 }
125 
126 static const struct irq_domain_ops msi_domain_ops = {
127 	.alloc	= plda_irq_msi_domain_alloc,
128 	.free	= plda_irq_msi_domain_free,
129 };
130 
131 static struct irq_chip plda_msi_irq_chip = {
132 	.name = "PLDA PCIe MSI",
133 	.irq_ack = irq_chip_ack_parent,
134 	.irq_mask = pci_msi_mask_irq,
135 	.irq_unmask = pci_msi_unmask_irq,
136 };
137 
138 static struct msi_domain_info plda_msi_domain_info = {
139 	.flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
140 		  MSI_FLAG_PCI_MSIX),
141 	.chip = &plda_msi_irq_chip,
142 };
143 
144 static int plda_allocate_msi_domains(struct plda_pcie_rp *port)
145 {
146 	struct device *dev = port->dev;
147 	struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
148 	struct plda_msi *msi = &port->msi;
149 
150 	mutex_init(&port->msi.lock);
151 
152 	msi->dev_domain = irq_domain_add_linear(NULL, msi->num_vectors,
153 						&msi_domain_ops, port);
154 	if (!msi->dev_domain) {
155 		dev_err(dev, "failed to create IRQ domain\n");
156 		return -ENOMEM;
157 	}
158 
159 	msi->msi_domain = pci_msi_create_irq_domain(fwnode,
160 						    &plda_msi_domain_info,
161 						    msi->dev_domain);
162 	if (!msi->msi_domain) {
163 		dev_err(dev, "failed to create MSI domain\n");
164 		irq_domain_remove(msi->dev_domain);
165 		return -ENOMEM;
166 	}
167 
168 	return 0;
169 }
170 
171 static void plda_handle_intx(struct irq_desc *desc)
172 {
173 	struct plda_pcie_rp *port = irq_desc_get_handler_data(desc);
174 	struct irq_chip *chip = irq_desc_get_chip(desc);
175 	struct device *dev = port->dev;
176 	void __iomem *bridge_base_addr = port->bridge_addr;
177 	unsigned long status;
178 	u32 bit;
179 	int ret;
180 
181 	chained_irq_enter(chip, desc);
182 
183 	status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
184 	if (status & PM_MSI_INT_INTX_MASK) {
185 		status &= PM_MSI_INT_INTX_MASK;
186 		status >>= PM_MSI_INT_INTX_SHIFT;
187 		for_each_set_bit(bit, &status, PCI_NUM_INTX) {
188 			ret = generic_handle_domain_irq(port->intx_domain, bit);
189 			if (ret)
190 				dev_err_ratelimited(dev, "bad INTx IRQ %d\n",
191 						    bit);
192 		}
193 	}
194 
195 	chained_irq_exit(chip, desc);
196 }
197 
198 static void plda_ack_intx_irq(struct irq_data *data)
199 {
200 	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
201 	void __iomem *bridge_base_addr = port->bridge_addr;
202 	u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
203 
204 	writel_relaxed(mask, bridge_base_addr + ISTATUS_LOCAL);
205 }
206 
207 static void plda_mask_intx_irq(struct irq_data *data)
208 {
209 	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
210 	void __iomem *bridge_base_addr = port->bridge_addr;
211 	unsigned long flags;
212 	u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
213 	u32 val;
214 
215 	raw_spin_lock_irqsave(&port->lock, flags);
216 	val = readl_relaxed(bridge_base_addr + IMASK_LOCAL);
217 	val &= ~mask;
218 	writel_relaxed(val, bridge_base_addr + IMASK_LOCAL);
219 	raw_spin_unlock_irqrestore(&port->lock, flags);
220 }
221 
222 static void plda_unmask_intx_irq(struct irq_data *data)
223 {
224 	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
225 	void __iomem *bridge_base_addr = port->bridge_addr;
226 	unsigned long flags;
227 	u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
228 	u32 val;
229 
230 	raw_spin_lock_irqsave(&port->lock, flags);
231 	val = readl_relaxed(bridge_base_addr + IMASK_LOCAL);
232 	val |= mask;
233 	writel_relaxed(val, bridge_base_addr + IMASK_LOCAL);
234 	raw_spin_unlock_irqrestore(&port->lock, flags);
235 }
236 
237 static struct irq_chip plda_intx_irq_chip = {
238 	.name = "PLDA PCIe INTx",
239 	.irq_ack = plda_ack_intx_irq,
240 	.irq_mask = plda_mask_intx_irq,
241 	.irq_unmask = plda_unmask_intx_irq,
242 };
243 
244 static int plda_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
245 			      irq_hw_number_t hwirq)
246 {
247 	irq_set_chip_and_handler(irq, &plda_intx_irq_chip, handle_level_irq);
248 	irq_set_chip_data(irq, domain->host_data);
249 
250 	return 0;
251 }
252 
253 static const struct irq_domain_ops intx_domain_ops = {
254 	.map = plda_pcie_intx_map,
255 };
256 
257 static u32 plda_get_events(struct plda_pcie_rp *port)
258 {
259 	u32 events, val, origin;
260 
261 	origin = readl_relaxed(port->bridge_addr + ISTATUS_LOCAL);
262 
263 	/* MSI event and sys events */
264 	val = (origin & SYS_AND_MSI_MASK) >> PM_MSI_INT_MSI_SHIFT;
265 	events = val << (PM_MSI_INT_MSI_SHIFT - PCI_NUM_INTX + 1);
266 
267 	/* INTx events */
268 	if (origin & PM_MSI_INT_INTX_MASK)
269 		events |= BIT(PM_MSI_INT_INTX_SHIFT);
270 
271 	/* remains are same with register */
272 	events |= origin & GENMASK(P_ATR_EVT_DOORBELL_SHIFT, 0);
273 
274 	return events;
275 }
276 
277 static irqreturn_t plda_event_handler(int irq, void *dev_id)
278 {
279 	return IRQ_HANDLED;
280 }
281 
282 static void plda_handle_event(struct irq_desc *desc)
283 {
284 	struct plda_pcie_rp *port = irq_desc_get_handler_data(desc);
285 	unsigned long events;
286 	u32 bit;
287 	struct irq_chip *chip = irq_desc_get_chip(desc);
288 
289 	chained_irq_enter(chip, desc);
290 
291 	events = port->event_ops->get_events(port);
292 
293 	for_each_set_bit(bit, &events, port->num_events)
294 		generic_handle_domain_irq(port->event_domain, bit);
295 
296 	chained_irq_exit(chip, desc);
297 }
298 
299 static u32 plda_hwirq_to_mask(int hwirq)
300 {
301 	u32 mask;
302 
303 	/* hwirq 23 - 0 are the same with register */
304 	if (hwirq < EVENT_PM_MSI_INT_INTX)
305 		mask = BIT(hwirq);
306 	else if (hwirq == EVENT_PM_MSI_INT_INTX)
307 		mask = PM_MSI_INT_INTX_MASK;
308 	else
309 		mask = BIT(hwirq + PCI_NUM_INTX - 1);
310 
311 	return mask;
312 }
313 
314 static void plda_ack_event_irq(struct irq_data *data)
315 {
316 	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
317 
318 	writel_relaxed(plda_hwirq_to_mask(data->hwirq),
319 		       port->bridge_addr + ISTATUS_LOCAL);
320 }
321 
322 static void plda_mask_event_irq(struct irq_data *data)
323 {
324 	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
325 	u32 mask, val;
326 
327 	mask = plda_hwirq_to_mask(data->hwirq);
328 
329 	raw_spin_lock(&port->lock);
330 	val = readl_relaxed(port->bridge_addr + IMASK_LOCAL);
331 	val &= ~mask;
332 	writel_relaxed(val, port->bridge_addr + IMASK_LOCAL);
333 	raw_spin_unlock(&port->lock);
334 }
335 
336 static void plda_unmask_event_irq(struct irq_data *data)
337 {
338 	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
339 	u32 mask, val;
340 
341 	mask = plda_hwirq_to_mask(data->hwirq);
342 
343 	raw_spin_lock(&port->lock);
344 	val = readl_relaxed(port->bridge_addr + IMASK_LOCAL);
345 	val |= mask;
346 	writel_relaxed(val, port->bridge_addr + IMASK_LOCAL);
347 	raw_spin_unlock(&port->lock);
348 }
349 
350 static struct irq_chip plda_event_irq_chip = {
351 	.name = "PLDA PCIe EVENT",
352 	.irq_ack = plda_ack_event_irq,
353 	.irq_mask = plda_mask_event_irq,
354 	.irq_unmask = plda_unmask_event_irq,
355 };
356 
357 static const struct plda_event_ops plda_event_ops = {
358 	.get_events = plda_get_events,
359 };
360 
361 static int plda_pcie_event_map(struct irq_domain *domain, unsigned int irq,
362 			       irq_hw_number_t hwirq)
363 {
364 	struct plda_pcie_rp *port = (void *)domain->host_data;
365 
366 	irq_set_chip_and_handler(irq, port->event_irq_chip, handle_level_irq);
367 	irq_set_chip_data(irq, domain->host_data);
368 
369 	return 0;
370 }
371 
372 static const struct irq_domain_ops plda_event_domain_ops = {
373 	.map = plda_pcie_event_map,
374 };
375 
376 static int plda_pcie_init_irq_domains(struct plda_pcie_rp *port)
377 {
378 	struct device *dev = port->dev;
379 	struct device_node *node = dev->of_node;
380 	struct device_node *pcie_intc_node;
381 
382 	/* Setup INTx */
383 	pcie_intc_node = of_get_next_child(node, NULL);
384 	if (!pcie_intc_node) {
385 		dev_err(dev, "failed to find PCIe Intc node\n");
386 		return -EINVAL;
387 	}
388 
389 	port->event_domain = irq_domain_add_linear(pcie_intc_node,
390 						   port->num_events,
391 						   &plda_event_domain_ops,
392 						   port);
393 	if (!port->event_domain) {
394 		dev_err(dev, "failed to get event domain\n");
395 		of_node_put(pcie_intc_node);
396 		return -ENOMEM;
397 	}
398 
399 	irq_domain_update_bus_token(port->event_domain, DOMAIN_BUS_NEXUS);
400 
401 	port->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
402 						  &intx_domain_ops, port);
403 	if (!port->intx_domain) {
404 		dev_err(dev, "failed to get an INTx IRQ domain\n");
405 		of_node_put(pcie_intc_node);
406 		return -ENOMEM;
407 	}
408 
409 	irq_domain_update_bus_token(port->intx_domain, DOMAIN_BUS_WIRED);
410 
411 	of_node_put(pcie_intc_node);
412 	raw_spin_lock_init(&port->lock);
413 
414 	return plda_allocate_msi_domains(port);
415 }
416 
417 int plda_init_interrupts(struct platform_device *pdev,
418 			 struct plda_pcie_rp *port,
419 			 const struct plda_event *event)
420 {
421 	struct device *dev = &pdev->dev;
422 	int irq;
423 	int i, intx_irq, msi_irq, event_irq;
424 	int ret;
425 
426 	if (!port->event_ops)
427 		port->event_ops = &plda_event_ops;
428 
429 	if (!port->event_irq_chip)
430 		port->event_irq_chip = &plda_event_irq_chip;
431 
432 	ret = plda_pcie_init_irq_domains(port);
433 	if (ret) {
434 		dev_err(dev, "failed creating IRQ domains\n");
435 		return ret;
436 	}
437 
438 	irq = platform_get_irq(pdev, 0);
439 	if (irq < 0)
440 		return -ENODEV;
441 
442 	for (i = 0; i < port->num_events; i++) {
443 		event_irq = irq_create_mapping(port->event_domain, i);
444 		if (!event_irq) {
445 			dev_err(dev, "failed to map hwirq %d\n", i);
446 			return -ENXIO;
447 		}
448 
449 		if (event->request_event_irq)
450 			ret = event->request_event_irq(port, event_irq, i);
451 		else
452 			ret = devm_request_irq(dev, event_irq,
453 					       plda_event_handler,
454 					       0, NULL, port);
455 
456 		if (ret) {
457 			dev_err(dev, "failed to request IRQ %d\n", event_irq);
458 			return ret;
459 		}
460 	}
461 
462 	intx_irq = irq_create_mapping(port->event_domain,
463 				      event->intx_event);
464 	if (!intx_irq) {
465 		dev_err(dev, "failed to map INTx interrupt\n");
466 		return -ENXIO;
467 	}
468 
469 	/* Plug the INTx chained handler */
470 	irq_set_chained_handler_and_data(intx_irq, plda_handle_intx, port);
471 
472 	msi_irq = irq_create_mapping(port->event_domain,
473 				     event->msi_event);
474 	if (!msi_irq)
475 		return -ENXIO;
476 
477 	/* Plug the MSI chained handler */
478 	irq_set_chained_handler_and_data(msi_irq, plda_handle_msi, port);
479 
480 	/* Plug the main event chained handler */
481 	irq_set_chained_handler_and_data(irq, plda_handle_event, port);
482 
483 	return 0;
484 }
485 EXPORT_SYMBOL_GPL(plda_init_interrupts);
486 
487 void plda_pcie_setup_window(void __iomem *bridge_base_addr, u32 index,
488 			    phys_addr_t axi_addr, phys_addr_t pci_addr,
489 			    size_t size)
490 {
491 	u32 atr_sz = ilog2(size) - 1;
492 	u32 val;
493 
494 	if (index == 0)
495 		val = PCIE_CONFIG_INTERFACE;
496 	else
497 		val = PCIE_TX_RX_INTERFACE;
498 
499 	writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
500 	       ATR0_AXI4_SLV0_TRSL_PARAM);
501 
502 	val = lower_32_bits(axi_addr) | (atr_sz << ATR_SIZE_SHIFT) |
503 			    ATR_IMPL_ENABLE;
504 	writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
505 	       ATR0_AXI4_SLV0_SRCADDR_PARAM);
506 
507 	val = upper_32_bits(axi_addr);
508 	writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
509 	       ATR0_AXI4_SLV0_SRC_ADDR);
510 
511 	val = lower_32_bits(pci_addr);
512 	writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
513 	       ATR0_AXI4_SLV0_TRSL_ADDR_LSB);
514 
515 	val = upper_32_bits(pci_addr);
516 	writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
517 	       ATR0_AXI4_SLV0_TRSL_ADDR_UDW);
518 
519 	val = readl(bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
520 	val |= (ATR0_PCIE_ATR_SIZE << ATR0_PCIE_ATR_SIZE_SHIFT);
521 	writel(val, bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
522 	writel(0, bridge_base_addr + ATR0_PCIE_WIN0_SRC_ADDR);
523 }
524 EXPORT_SYMBOL_GPL(plda_pcie_setup_window);
525 
526 int plda_pcie_setup_iomems(struct platform_device *pdev,
527 			   struct plda_pcie_rp *port)
528 {
529 	void __iomem *bridge_base_addr = port->bridge_addr;
530 	struct pci_host_bridge *bridge = platform_get_drvdata(pdev);
531 	struct resource_entry *entry;
532 	u64 pci_addr;
533 	u32 index = 1;
534 
535 	resource_list_for_each_entry(entry, &bridge->windows) {
536 		if (resource_type(entry->res) == IORESOURCE_MEM) {
537 			pci_addr = entry->res->start - entry->offset;
538 			plda_pcie_setup_window(bridge_base_addr, index,
539 					       entry->res->start, pci_addr,
540 					       resource_size(entry->res));
541 			index++;
542 		}
543 	}
544 
545 	return 0;
546 }
547 EXPORT_SYMBOL_GPL(plda_pcie_setup_iomems);
548