xref: /linux/drivers/pci/controller/plda/pcie-plda-host.c (revision a576fff39eecb89befbb0bf567a5b5d889199d56)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * PLDA PCIe XpressRich host controller driver
4  *
5  * Copyright (C) 2023 Microchip Co. Ltd
6  *
7  * Author: Daire McNamara <daire.mcnamara@microchip.com>
8  */
9 
10 #include <linux/irqchip/chained_irq.h>
11 #include <linux/irqdomain.h>
12 #include <linux/msi.h>
13 #include <linux/pci_regs.h>
14 #include <linux/pci-ecam.h>
15 
16 #include "pcie-plda.h"
17 
18 static void plda_handle_msi(struct irq_desc *desc)
19 {
20 	struct plda_pcie_rp *port = irq_desc_get_handler_data(desc);
21 	struct irq_chip *chip = irq_desc_get_chip(desc);
22 	struct device *dev = port->dev;
23 	struct plda_msi *msi = &port->msi;
24 	void __iomem *bridge_base_addr = port->bridge_addr;
25 	unsigned long status;
26 	u32 bit;
27 	int ret;
28 
29 	chained_irq_enter(chip, desc);
30 
31 	status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
32 	if (status & PM_MSI_INT_MSI_MASK) {
33 		writel_relaxed(status & PM_MSI_INT_MSI_MASK,
34 			       bridge_base_addr + ISTATUS_LOCAL);
35 		status = readl_relaxed(bridge_base_addr + ISTATUS_MSI);
36 		for_each_set_bit(bit, &status, msi->num_vectors) {
37 			ret = generic_handle_domain_irq(msi->dev_domain, bit);
38 			if (ret)
39 				dev_err_ratelimited(dev, "bad MSI IRQ %d\n",
40 						    bit);
41 		}
42 	}
43 
44 	chained_irq_exit(chip, desc);
45 }
46 
47 static void plda_msi_bottom_irq_ack(struct irq_data *data)
48 {
49 	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
50 	void __iomem *bridge_base_addr = port->bridge_addr;
51 	u32 bitpos = data->hwirq;
52 
53 	writel_relaxed(BIT(bitpos), bridge_base_addr + ISTATUS_MSI);
54 }
55 
56 static void plda_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
57 {
58 	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
59 	phys_addr_t addr = port->msi.vector_phy;
60 
61 	msg->address_lo = lower_32_bits(addr);
62 	msg->address_hi = upper_32_bits(addr);
63 	msg->data = data->hwirq;
64 
65 	dev_dbg(port->dev, "msi#%x address_hi %#x address_lo %#x\n",
66 		(int)data->hwirq, msg->address_hi, msg->address_lo);
67 }
68 
69 static int plda_msi_set_affinity(struct irq_data *irq_data,
70 				 const struct cpumask *mask, bool force)
71 {
72 	return -EINVAL;
73 }
74 
75 static struct irq_chip plda_msi_bottom_irq_chip = {
76 	.name = "PLDA MSI",
77 	.irq_ack = plda_msi_bottom_irq_ack,
78 	.irq_compose_msi_msg = plda_compose_msi_msg,
79 	.irq_set_affinity = plda_msi_set_affinity,
80 };
81 
82 static int plda_irq_msi_domain_alloc(struct irq_domain *domain,
83 				     unsigned int virq,
84 				     unsigned int nr_irqs,
85 				     void *args)
86 {
87 	struct plda_pcie_rp *port = domain->host_data;
88 	struct plda_msi *msi = &port->msi;
89 	unsigned long bit;
90 
91 	mutex_lock(&msi->lock);
92 	bit = find_first_zero_bit(msi->used, msi->num_vectors);
93 	if (bit >= msi->num_vectors) {
94 		mutex_unlock(&msi->lock);
95 		return -ENOSPC;
96 	}
97 
98 	set_bit(bit, msi->used);
99 
100 	irq_domain_set_info(domain, virq, bit, &plda_msi_bottom_irq_chip,
101 			    domain->host_data, handle_edge_irq, NULL, NULL);
102 
103 	mutex_unlock(&msi->lock);
104 
105 	return 0;
106 }
107 
108 static void plda_irq_msi_domain_free(struct irq_domain *domain,
109 				     unsigned int virq,
110 				     unsigned int nr_irqs)
111 {
112 	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
113 	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(d);
114 	struct plda_msi *msi = &port->msi;
115 
116 	mutex_lock(&msi->lock);
117 
118 	if (test_bit(d->hwirq, msi->used))
119 		__clear_bit(d->hwirq, msi->used);
120 	else
121 		dev_err(port->dev, "trying to free unused MSI%lu\n", d->hwirq);
122 
123 	mutex_unlock(&msi->lock);
124 }
125 
126 static const struct irq_domain_ops msi_domain_ops = {
127 	.alloc	= plda_irq_msi_domain_alloc,
128 	.free	= plda_irq_msi_domain_free,
129 };
130 
131 static struct irq_chip plda_msi_irq_chip = {
132 	.name = "PLDA PCIe MSI",
133 	.irq_ack = irq_chip_ack_parent,
134 	.irq_mask = pci_msi_mask_irq,
135 	.irq_unmask = pci_msi_unmask_irq,
136 };
137 
138 static struct msi_domain_info plda_msi_domain_info = {
139 	.flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
140 		  MSI_FLAG_PCI_MSIX),
141 	.chip = &plda_msi_irq_chip,
142 };
143 
144 static int plda_allocate_msi_domains(struct plda_pcie_rp *port)
145 {
146 	struct device *dev = port->dev;
147 	struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
148 	struct plda_msi *msi = &port->msi;
149 
150 	mutex_init(&port->msi.lock);
151 
152 	msi->dev_domain = irq_domain_add_linear(NULL, msi->num_vectors,
153 						&msi_domain_ops, port);
154 	if (!msi->dev_domain) {
155 		dev_err(dev, "failed to create IRQ domain\n");
156 		return -ENOMEM;
157 	}
158 
159 	msi->msi_domain = pci_msi_create_irq_domain(fwnode,
160 						    &plda_msi_domain_info,
161 						    msi->dev_domain);
162 	if (!msi->msi_domain) {
163 		dev_err(dev, "failed to create MSI domain\n");
164 		irq_domain_remove(msi->dev_domain);
165 		return -ENOMEM;
166 	}
167 
168 	return 0;
169 }
170 
171 static void plda_handle_intx(struct irq_desc *desc)
172 {
173 	struct plda_pcie_rp *port = irq_desc_get_handler_data(desc);
174 	struct irq_chip *chip = irq_desc_get_chip(desc);
175 	struct device *dev = port->dev;
176 	void __iomem *bridge_base_addr = port->bridge_addr;
177 	unsigned long status;
178 	u32 bit;
179 	int ret;
180 
181 	chained_irq_enter(chip, desc);
182 
183 	status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
184 	if (status & PM_MSI_INT_INTX_MASK) {
185 		status &= PM_MSI_INT_INTX_MASK;
186 		status >>= PM_MSI_INT_INTX_SHIFT;
187 		for_each_set_bit(bit, &status, PCI_NUM_INTX) {
188 			ret = generic_handle_domain_irq(port->intx_domain, bit);
189 			if (ret)
190 				dev_err_ratelimited(dev, "bad INTx IRQ %d\n",
191 						    bit);
192 		}
193 	}
194 
195 	chained_irq_exit(chip, desc);
196 }
197 
198 static void plda_ack_intx_irq(struct irq_data *data)
199 {
200 	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
201 	void __iomem *bridge_base_addr = port->bridge_addr;
202 	u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
203 
204 	writel_relaxed(mask, bridge_base_addr + ISTATUS_LOCAL);
205 }
206 
207 static void plda_mask_intx_irq(struct irq_data *data)
208 {
209 	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
210 	void __iomem *bridge_base_addr = port->bridge_addr;
211 	unsigned long flags;
212 	u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
213 	u32 val;
214 
215 	raw_spin_lock_irqsave(&port->lock, flags);
216 	val = readl_relaxed(bridge_base_addr + IMASK_LOCAL);
217 	val &= ~mask;
218 	writel_relaxed(val, bridge_base_addr + IMASK_LOCAL);
219 	raw_spin_unlock_irqrestore(&port->lock, flags);
220 }
221 
222 static void plda_unmask_intx_irq(struct irq_data *data)
223 {
224 	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
225 	void __iomem *bridge_base_addr = port->bridge_addr;
226 	unsigned long flags;
227 	u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
228 	u32 val;
229 
230 	raw_spin_lock_irqsave(&port->lock, flags);
231 	val = readl_relaxed(bridge_base_addr + IMASK_LOCAL);
232 	val |= mask;
233 	writel_relaxed(val, bridge_base_addr + IMASK_LOCAL);
234 	raw_spin_unlock_irqrestore(&port->lock, flags);
235 }
236 
237 static struct irq_chip plda_intx_irq_chip = {
238 	.name = "PLDA PCIe INTx",
239 	.irq_ack = plda_ack_intx_irq,
240 	.irq_mask = plda_mask_intx_irq,
241 	.irq_unmask = plda_unmask_intx_irq,
242 };
243 
244 static int plda_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
245 			      irq_hw_number_t hwirq)
246 {
247 	irq_set_chip_and_handler(irq, &plda_intx_irq_chip, handle_level_irq);
248 	irq_set_chip_data(irq, domain->host_data);
249 
250 	return 0;
251 }
252 
253 static const struct irq_domain_ops intx_domain_ops = {
254 	.map = plda_pcie_intx_map,
255 };
256 
257 static u32 plda_get_events(struct plda_pcie_rp *port)
258 {
259 	u32 events, val, origin;
260 
261 	origin = readl_relaxed(port->bridge_addr + ISTATUS_LOCAL);
262 
263 	/* MSI event and sys events */
264 	val = (origin & SYS_AND_MSI_MASK) >> PM_MSI_INT_MSI_SHIFT;
265 	events = val << (PM_MSI_INT_MSI_SHIFT - PCI_NUM_INTX + 1);
266 
267 	/* INTx events */
268 	if (origin & PM_MSI_INT_INTX_MASK)
269 		events |= BIT(PM_MSI_INT_INTX_SHIFT);
270 
271 	/* remains are same with register */
272 	events |= origin & GENMASK(P_ATR_EVT_DOORBELL_SHIFT, 0);
273 
274 	return events;
275 }
276 
277 static irqreturn_t plda_event_handler(int irq, void *dev_id)
278 {
279 	return IRQ_HANDLED;
280 }
281 
282 static void plda_handle_event(struct irq_desc *desc)
283 {
284 	struct plda_pcie_rp *port = irq_desc_get_handler_data(desc);
285 	unsigned long events;
286 	u32 bit;
287 	struct irq_chip *chip = irq_desc_get_chip(desc);
288 
289 	chained_irq_enter(chip, desc);
290 
291 	events = port->event_ops->get_events(port);
292 
293 	events &= port->events_bitmap;
294 	for_each_set_bit(bit, &events, port->num_events)
295 		generic_handle_domain_irq(port->event_domain, bit);
296 
297 	chained_irq_exit(chip, desc);
298 }
299 
300 static u32 plda_hwirq_to_mask(int hwirq)
301 {
302 	u32 mask;
303 
304 	/* hwirq 23 - 0 are the same with register */
305 	if (hwirq < EVENT_PM_MSI_INT_INTX)
306 		mask = BIT(hwirq);
307 	else if (hwirq == EVENT_PM_MSI_INT_INTX)
308 		mask = PM_MSI_INT_INTX_MASK;
309 	else
310 		mask = BIT(hwirq + PCI_NUM_INTX - 1);
311 
312 	return mask;
313 }
314 
315 static void plda_ack_event_irq(struct irq_data *data)
316 {
317 	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
318 
319 	writel_relaxed(plda_hwirq_to_mask(data->hwirq),
320 		       port->bridge_addr + ISTATUS_LOCAL);
321 }
322 
323 static void plda_mask_event_irq(struct irq_data *data)
324 {
325 	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
326 	u32 mask, val;
327 
328 	mask = plda_hwirq_to_mask(data->hwirq);
329 
330 	raw_spin_lock(&port->lock);
331 	val = readl_relaxed(port->bridge_addr + IMASK_LOCAL);
332 	val &= ~mask;
333 	writel_relaxed(val, port->bridge_addr + IMASK_LOCAL);
334 	raw_spin_unlock(&port->lock);
335 }
336 
337 static void plda_unmask_event_irq(struct irq_data *data)
338 {
339 	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
340 	u32 mask, val;
341 
342 	mask = plda_hwirq_to_mask(data->hwirq);
343 
344 	raw_spin_lock(&port->lock);
345 	val = readl_relaxed(port->bridge_addr + IMASK_LOCAL);
346 	val |= mask;
347 	writel_relaxed(val, port->bridge_addr + IMASK_LOCAL);
348 	raw_spin_unlock(&port->lock);
349 }
350 
351 static struct irq_chip plda_event_irq_chip = {
352 	.name = "PLDA PCIe EVENT",
353 	.irq_ack = plda_ack_event_irq,
354 	.irq_mask = plda_mask_event_irq,
355 	.irq_unmask = plda_unmask_event_irq,
356 };
357 
358 static const struct plda_event_ops plda_event_ops = {
359 	.get_events = plda_get_events,
360 };
361 
362 static int plda_pcie_event_map(struct irq_domain *domain, unsigned int irq,
363 			       irq_hw_number_t hwirq)
364 {
365 	struct plda_pcie_rp *port = (void *)domain->host_data;
366 
367 	irq_set_chip_and_handler(irq, port->event_irq_chip, handle_level_irq);
368 	irq_set_chip_data(irq, domain->host_data);
369 
370 	return 0;
371 }
372 
373 static const struct irq_domain_ops plda_event_domain_ops = {
374 	.map = plda_pcie_event_map,
375 };
376 
377 static int plda_pcie_init_irq_domains(struct plda_pcie_rp *port)
378 {
379 	struct device *dev = port->dev;
380 	struct device_node *node = dev->of_node;
381 	struct device_node *pcie_intc_node;
382 
383 	/* Setup INTx */
384 	pcie_intc_node = of_get_next_child(node, NULL);
385 	if (!pcie_intc_node) {
386 		dev_err(dev, "failed to find PCIe Intc node\n");
387 		return -EINVAL;
388 	}
389 
390 	port->event_domain = irq_domain_add_linear(pcie_intc_node,
391 						   port->num_events,
392 						   &plda_event_domain_ops,
393 						   port);
394 	if (!port->event_domain) {
395 		dev_err(dev, "failed to get event domain\n");
396 		of_node_put(pcie_intc_node);
397 		return -ENOMEM;
398 	}
399 
400 	irq_domain_update_bus_token(port->event_domain, DOMAIN_BUS_NEXUS);
401 
402 	port->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
403 						  &intx_domain_ops, port);
404 	if (!port->intx_domain) {
405 		dev_err(dev, "failed to get an INTx IRQ domain\n");
406 		of_node_put(pcie_intc_node);
407 		return -ENOMEM;
408 	}
409 
410 	irq_domain_update_bus_token(port->intx_domain, DOMAIN_BUS_WIRED);
411 
412 	of_node_put(pcie_intc_node);
413 	raw_spin_lock_init(&port->lock);
414 
415 	return plda_allocate_msi_domains(port);
416 }
417 
418 int plda_init_interrupts(struct platform_device *pdev,
419 			 struct plda_pcie_rp *port,
420 			 const struct plda_event *event)
421 {
422 	struct device *dev = &pdev->dev;
423 	int irq;
424 	int intx_irq, msi_irq, event_irq;
425 	int ret;
426 	u32 i;
427 
428 	if (!port->event_ops)
429 		port->event_ops = &plda_event_ops;
430 
431 	if (!port->event_irq_chip)
432 		port->event_irq_chip = &plda_event_irq_chip;
433 
434 	ret = plda_pcie_init_irq_domains(port);
435 	if (ret) {
436 		dev_err(dev, "failed creating IRQ domains\n");
437 		return ret;
438 	}
439 
440 	irq = platform_get_irq(pdev, 0);
441 	if (irq < 0)
442 		return -ENODEV;
443 
444 	for_each_set_bit(i, &port->events_bitmap, port->num_events) {
445 		event_irq = irq_create_mapping(port->event_domain, i);
446 		if (!event_irq) {
447 			dev_err(dev, "failed to map hwirq %d\n", i);
448 			return -ENXIO;
449 		}
450 
451 		if (event->request_event_irq)
452 			ret = event->request_event_irq(port, event_irq, i);
453 		else
454 			ret = devm_request_irq(dev, event_irq,
455 					       plda_event_handler,
456 					       0, NULL, port);
457 
458 		if (ret) {
459 			dev_err(dev, "failed to request IRQ %d\n", event_irq);
460 			return ret;
461 		}
462 	}
463 
464 	intx_irq = irq_create_mapping(port->event_domain,
465 				      event->intx_event);
466 	if (!intx_irq) {
467 		dev_err(dev, "failed to map INTx interrupt\n");
468 		return -ENXIO;
469 	}
470 
471 	/* Plug the INTx chained handler */
472 	irq_set_chained_handler_and_data(intx_irq, plda_handle_intx, port);
473 
474 	msi_irq = irq_create_mapping(port->event_domain,
475 				     event->msi_event);
476 	if (!msi_irq)
477 		return -ENXIO;
478 
479 	/* Plug the MSI chained handler */
480 	irq_set_chained_handler_and_data(msi_irq, plda_handle_msi, port);
481 
482 	/* Plug the main event chained handler */
483 	irq_set_chained_handler_and_data(irq, plda_handle_event, port);
484 
485 	return 0;
486 }
487 EXPORT_SYMBOL_GPL(plda_init_interrupts);
488 
489 void plda_pcie_setup_window(void __iomem *bridge_base_addr, u32 index,
490 			    phys_addr_t axi_addr, phys_addr_t pci_addr,
491 			    size_t size)
492 {
493 	u32 atr_sz = ilog2(size) - 1;
494 	u32 val;
495 
496 	if (index == 0)
497 		val = PCIE_CONFIG_INTERFACE;
498 	else
499 		val = PCIE_TX_RX_INTERFACE;
500 
501 	writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
502 	       ATR0_AXI4_SLV0_TRSL_PARAM);
503 
504 	val = lower_32_bits(axi_addr) | (atr_sz << ATR_SIZE_SHIFT) |
505 			    ATR_IMPL_ENABLE;
506 	writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
507 	       ATR0_AXI4_SLV0_SRCADDR_PARAM);
508 
509 	val = upper_32_bits(axi_addr);
510 	writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
511 	       ATR0_AXI4_SLV0_SRC_ADDR);
512 
513 	val = lower_32_bits(pci_addr);
514 	writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
515 	       ATR0_AXI4_SLV0_TRSL_ADDR_LSB);
516 
517 	val = upper_32_bits(pci_addr);
518 	writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
519 	       ATR0_AXI4_SLV0_TRSL_ADDR_UDW);
520 
521 	val = readl(bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
522 	val |= (ATR0_PCIE_ATR_SIZE << ATR0_PCIE_ATR_SIZE_SHIFT);
523 	writel(val, bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
524 	writel(0, bridge_base_addr + ATR0_PCIE_WIN0_SRC_ADDR);
525 }
526 EXPORT_SYMBOL_GPL(plda_pcie_setup_window);
527 
528 int plda_pcie_setup_iomems(struct platform_device *pdev,
529 			   struct plda_pcie_rp *port)
530 {
531 	void __iomem *bridge_base_addr = port->bridge_addr;
532 	struct pci_host_bridge *bridge = platform_get_drvdata(pdev);
533 	struct resource_entry *entry;
534 	u64 pci_addr;
535 	u32 index = 1;
536 
537 	resource_list_for_each_entry(entry, &bridge->windows) {
538 		if (resource_type(entry->res) == IORESOURCE_MEM) {
539 			pci_addr = entry->res->start - entry->offset;
540 			plda_pcie_setup_window(bridge_base_addr, index,
541 					       entry->res->start, pci_addr,
542 					       resource_size(entry->res));
543 			index++;
544 		}
545 	}
546 
547 	return 0;
548 }
549 EXPORT_SYMBOL_GPL(plda_pcie_setup_iomems);
550