xref: /linux/drivers/pci/controller/plda/pcie-plda-host.c (revision 4602c370bdf6946b4e954a3db0ef5958aac2b7b4)
139bd5f82SMinda Chen // SPDX-License-Identifier: GPL-2.0
239bd5f82SMinda Chen /*
339bd5f82SMinda Chen  * PLDA PCIe XpressRich host controller driver
439bd5f82SMinda Chen  *
539bd5f82SMinda Chen  * Copyright (C) 2023 Microchip Co. Ltd
639bd5f82SMinda Chen  *
739bd5f82SMinda Chen  * Author: Daire McNamara <daire.mcnamara@microchip.com>
839bd5f82SMinda Chen  */
939bd5f82SMinda Chen 
10*4602c370SMinda Chen #include <linux/irqchip/chained_irq.h>
11*4602c370SMinda Chen #include <linux/irqdomain.h>
12*4602c370SMinda Chen #include <linux/msi.h>
13*4602c370SMinda Chen #include <linux/pci_regs.h>
1439bd5f82SMinda Chen #include <linux/pci-ecam.h>
1539bd5f82SMinda Chen 
1639bd5f82SMinda Chen #include "pcie-plda.h"
1739bd5f82SMinda Chen 
18*4602c370SMinda Chen static void plda_handle_msi(struct irq_desc *desc)
19*4602c370SMinda Chen {
20*4602c370SMinda Chen 	struct plda_pcie_rp *port = irq_desc_get_handler_data(desc);
21*4602c370SMinda Chen 	struct irq_chip *chip = irq_desc_get_chip(desc);
22*4602c370SMinda Chen 	struct device *dev = port->dev;
23*4602c370SMinda Chen 	struct plda_msi *msi = &port->msi;
24*4602c370SMinda Chen 	void __iomem *bridge_base_addr = port->bridge_addr;
25*4602c370SMinda Chen 	unsigned long status;
26*4602c370SMinda Chen 	u32 bit;
27*4602c370SMinda Chen 	int ret;
28*4602c370SMinda Chen 
29*4602c370SMinda Chen 	chained_irq_enter(chip, desc);
30*4602c370SMinda Chen 
31*4602c370SMinda Chen 	status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
32*4602c370SMinda Chen 	if (status & PM_MSI_INT_MSI_MASK) {
33*4602c370SMinda Chen 		writel_relaxed(status & PM_MSI_INT_MSI_MASK,
34*4602c370SMinda Chen 			       bridge_base_addr + ISTATUS_LOCAL);
35*4602c370SMinda Chen 		status = readl_relaxed(bridge_base_addr + ISTATUS_MSI);
36*4602c370SMinda Chen 		for_each_set_bit(bit, &status, msi->num_vectors) {
37*4602c370SMinda Chen 			ret = generic_handle_domain_irq(msi->dev_domain, bit);
38*4602c370SMinda Chen 			if (ret)
39*4602c370SMinda Chen 				dev_err_ratelimited(dev, "bad MSI IRQ %d\n",
40*4602c370SMinda Chen 						    bit);
41*4602c370SMinda Chen 		}
42*4602c370SMinda Chen 	}
43*4602c370SMinda Chen 
44*4602c370SMinda Chen 	chained_irq_exit(chip, desc);
45*4602c370SMinda Chen }
46*4602c370SMinda Chen 
47*4602c370SMinda Chen static void plda_msi_bottom_irq_ack(struct irq_data *data)
48*4602c370SMinda Chen {
49*4602c370SMinda Chen 	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
50*4602c370SMinda Chen 	void __iomem *bridge_base_addr = port->bridge_addr;
51*4602c370SMinda Chen 	u32 bitpos = data->hwirq;
52*4602c370SMinda Chen 
53*4602c370SMinda Chen 	writel_relaxed(BIT(bitpos), bridge_base_addr + ISTATUS_MSI);
54*4602c370SMinda Chen }
55*4602c370SMinda Chen 
56*4602c370SMinda Chen static void plda_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
57*4602c370SMinda Chen {
58*4602c370SMinda Chen 	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
59*4602c370SMinda Chen 	phys_addr_t addr = port->msi.vector_phy;
60*4602c370SMinda Chen 
61*4602c370SMinda Chen 	msg->address_lo = lower_32_bits(addr);
62*4602c370SMinda Chen 	msg->address_hi = upper_32_bits(addr);
63*4602c370SMinda Chen 	msg->data = data->hwirq;
64*4602c370SMinda Chen 
65*4602c370SMinda Chen 	dev_dbg(port->dev, "msi#%x address_hi %#x address_lo %#x\n",
66*4602c370SMinda Chen 		(int)data->hwirq, msg->address_hi, msg->address_lo);
67*4602c370SMinda Chen }
68*4602c370SMinda Chen 
69*4602c370SMinda Chen static int plda_msi_set_affinity(struct irq_data *irq_data,
70*4602c370SMinda Chen 				 const struct cpumask *mask, bool force)
71*4602c370SMinda Chen {
72*4602c370SMinda Chen 	return -EINVAL;
73*4602c370SMinda Chen }
74*4602c370SMinda Chen 
75*4602c370SMinda Chen static struct irq_chip plda_msi_bottom_irq_chip = {
76*4602c370SMinda Chen 	.name = "PLDA MSI",
77*4602c370SMinda Chen 	.irq_ack = plda_msi_bottom_irq_ack,
78*4602c370SMinda Chen 	.irq_compose_msi_msg = plda_compose_msi_msg,
79*4602c370SMinda Chen 	.irq_set_affinity = plda_msi_set_affinity,
80*4602c370SMinda Chen };
81*4602c370SMinda Chen 
82*4602c370SMinda Chen static int plda_irq_msi_domain_alloc(struct irq_domain *domain,
83*4602c370SMinda Chen 				     unsigned int virq,
84*4602c370SMinda Chen 				     unsigned int nr_irqs,
85*4602c370SMinda Chen 				     void *args)
86*4602c370SMinda Chen {
87*4602c370SMinda Chen 	struct plda_pcie_rp *port = domain->host_data;
88*4602c370SMinda Chen 	struct plda_msi *msi = &port->msi;
89*4602c370SMinda Chen 	unsigned long bit;
90*4602c370SMinda Chen 
91*4602c370SMinda Chen 	mutex_lock(&msi->lock);
92*4602c370SMinda Chen 	bit = find_first_zero_bit(msi->used, msi->num_vectors);
93*4602c370SMinda Chen 	if (bit >= msi->num_vectors) {
94*4602c370SMinda Chen 		mutex_unlock(&msi->lock);
95*4602c370SMinda Chen 		return -ENOSPC;
96*4602c370SMinda Chen 	}
97*4602c370SMinda Chen 
98*4602c370SMinda Chen 	set_bit(bit, msi->used);
99*4602c370SMinda Chen 
100*4602c370SMinda Chen 	irq_domain_set_info(domain, virq, bit, &plda_msi_bottom_irq_chip,
101*4602c370SMinda Chen 			    domain->host_data, handle_edge_irq, NULL, NULL);
102*4602c370SMinda Chen 
103*4602c370SMinda Chen 	mutex_unlock(&msi->lock);
104*4602c370SMinda Chen 
105*4602c370SMinda Chen 	return 0;
106*4602c370SMinda Chen }
107*4602c370SMinda Chen 
108*4602c370SMinda Chen static void plda_irq_msi_domain_free(struct irq_domain *domain,
109*4602c370SMinda Chen 				     unsigned int virq,
110*4602c370SMinda Chen 				     unsigned int nr_irqs)
111*4602c370SMinda Chen {
112*4602c370SMinda Chen 	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
113*4602c370SMinda Chen 	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(d);
114*4602c370SMinda Chen 	struct plda_msi *msi = &port->msi;
115*4602c370SMinda Chen 
116*4602c370SMinda Chen 	mutex_lock(&msi->lock);
117*4602c370SMinda Chen 
118*4602c370SMinda Chen 	if (test_bit(d->hwirq, msi->used))
119*4602c370SMinda Chen 		__clear_bit(d->hwirq, msi->used);
120*4602c370SMinda Chen 	else
121*4602c370SMinda Chen 		dev_err(port->dev, "trying to free unused MSI%lu\n", d->hwirq);
122*4602c370SMinda Chen 
123*4602c370SMinda Chen 	mutex_unlock(&msi->lock);
124*4602c370SMinda Chen }
125*4602c370SMinda Chen 
126*4602c370SMinda Chen static const struct irq_domain_ops msi_domain_ops = {
127*4602c370SMinda Chen 	.alloc	= plda_irq_msi_domain_alloc,
128*4602c370SMinda Chen 	.free	= plda_irq_msi_domain_free,
129*4602c370SMinda Chen };
130*4602c370SMinda Chen 
131*4602c370SMinda Chen static struct irq_chip plda_msi_irq_chip = {
132*4602c370SMinda Chen 	.name = "PLDA PCIe MSI",
133*4602c370SMinda Chen 	.irq_ack = irq_chip_ack_parent,
134*4602c370SMinda Chen 	.irq_mask = pci_msi_mask_irq,
135*4602c370SMinda Chen 	.irq_unmask = pci_msi_unmask_irq,
136*4602c370SMinda Chen };
137*4602c370SMinda Chen 
138*4602c370SMinda Chen static struct msi_domain_info plda_msi_domain_info = {
139*4602c370SMinda Chen 	.flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
140*4602c370SMinda Chen 		  MSI_FLAG_PCI_MSIX),
141*4602c370SMinda Chen 	.chip = &plda_msi_irq_chip,
142*4602c370SMinda Chen };
143*4602c370SMinda Chen 
144*4602c370SMinda Chen static int plda_allocate_msi_domains(struct plda_pcie_rp *port)
145*4602c370SMinda Chen {
146*4602c370SMinda Chen 	struct device *dev = port->dev;
147*4602c370SMinda Chen 	struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
148*4602c370SMinda Chen 	struct plda_msi *msi = &port->msi;
149*4602c370SMinda Chen 
150*4602c370SMinda Chen 	mutex_init(&port->msi.lock);
151*4602c370SMinda Chen 
152*4602c370SMinda Chen 	msi->dev_domain = irq_domain_add_linear(NULL, msi->num_vectors,
153*4602c370SMinda Chen 						&msi_domain_ops, port);
154*4602c370SMinda Chen 	if (!msi->dev_domain) {
155*4602c370SMinda Chen 		dev_err(dev, "failed to create IRQ domain\n");
156*4602c370SMinda Chen 		return -ENOMEM;
157*4602c370SMinda Chen 	}
158*4602c370SMinda Chen 
159*4602c370SMinda Chen 	msi->msi_domain = pci_msi_create_irq_domain(fwnode,
160*4602c370SMinda Chen 						    &plda_msi_domain_info,
161*4602c370SMinda Chen 						    msi->dev_domain);
162*4602c370SMinda Chen 	if (!msi->msi_domain) {
163*4602c370SMinda Chen 		dev_err(dev, "failed to create MSI domain\n");
164*4602c370SMinda Chen 		irq_domain_remove(msi->dev_domain);
165*4602c370SMinda Chen 		return -ENOMEM;
166*4602c370SMinda Chen 	}
167*4602c370SMinda Chen 
168*4602c370SMinda Chen 	return 0;
169*4602c370SMinda Chen }
170*4602c370SMinda Chen 
171*4602c370SMinda Chen static void plda_handle_intx(struct irq_desc *desc)
172*4602c370SMinda Chen {
173*4602c370SMinda Chen 	struct plda_pcie_rp *port = irq_desc_get_handler_data(desc);
174*4602c370SMinda Chen 	struct irq_chip *chip = irq_desc_get_chip(desc);
175*4602c370SMinda Chen 	struct device *dev = port->dev;
176*4602c370SMinda Chen 	void __iomem *bridge_base_addr = port->bridge_addr;
177*4602c370SMinda Chen 	unsigned long status;
178*4602c370SMinda Chen 	u32 bit;
179*4602c370SMinda Chen 	int ret;
180*4602c370SMinda Chen 
181*4602c370SMinda Chen 	chained_irq_enter(chip, desc);
182*4602c370SMinda Chen 
183*4602c370SMinda Chen 	status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
184*4602c370SMinda Chen 	if (status & PM_MSI_INT_INTX_MASK) {
185*4602c370SMinda Chen 		status &= PM_MSI_INT_INTX_MASK;
186*4602c370SMinda Chen 		status >>= PM_MSI_INT_INTX_SHIFT;
187*4602c370SMinda Chen 		for_each_set_bit(bit, &status, PCI_NUM_INTX) {
188*4602c370SMinda Chen 			ret = generic_handle_domain_irq(port->intx_domain, bit);
189*4602c370SMinda Chen 			if (ret)
190*4602c370SMinda Chen 				dev_err_ratelimited(dev, "bad INTx IRQ %d\n",
191*4602c370SMinda Chen 						    bit);
192*4602c370SMinda Chen 		}
193*4602c370SMinda Chen 	}
194*4602c370SMinda Chen 
195*4602c370SMinda Chen 	chained_irq_exit(chip, desc);
196*4602c370SMinda Chen }
197*4602c370SMinda Chen 
198*4602c370SMinda Chen static void plda_ack_intx_irq(struct irq_data *data)
199*4602c370SMinda Chen {
200*4602c370SMinda Chen 	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
201*4602c370SMinda Chen 	void __iomem *bridge_base_addr = port->bridge_addr;
202*4602c370SMinda Chen 	u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
203*4602c370SMinda Chen 
204*4602c370SMinda Chen 	writel_relaxed(mask, bridge_base_addr + ISTATUS_LOCAL);
205*4602c370SMinda Chen }
206*4602c370SMinda Chen 
207*4602c370SMinda Chen static void plda_mask_intx_irq(struct irq_data *data)
208*4602c370SMinda Chen {
209*4602c370SMinda Chen 	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
210*4602c370SMinda Chen 	void __iomem *bridge_base_addr = port->bridge_addr;
211*4602c370SMinda Chen 	unsigned long flags;
212*4602c370SMinda Chen 	u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
213*4602c370SMinda Chen 	u32 val;
214*4602c370SMinda Chen 
215*4602c370SMinda Chen 	raw_spin_lock_irqsave(&port->lock, flags);
216*4602c370SMinda Chen 	val = readl_relaxed(bridge_base_addr + IMASK_LOCAL);
217*4602c370SMinda Chen 	val &= ~mask;
218*4602c370SMinda Chen 	writel_relaxed(val, bridge_base_addr + IMASK_LOCAL);
219*4602c370SMinda Chen 	raw_spin_unlock_irqrestore(&port->lock, flags);
220*4602c370SMinda Chen }
221*4602c370SMinda Chen 
222*4602c370SMinda Chen static void plda_unmask_intx_irq(struct irq_data *data)
223*4602c370SMinda Chen {
224*4602c370SMinda Chen 	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
225*4602c370SMinda Chen 	void __iomem *bridge_base_addr = port->bridge_addr;
226*4602c370SMinda Chen 	unsigned long flags;
227*4602c370SMinda Chen 	u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
228*4602c370SMinda Chen 	u32 val;
229*4602c370SMinda Chen 
230*4602c370SMinda Chen 	raw_spin_lock_irqsave(&port->lock, flags);
231*4602c370SMinda Chen 	val = readl_relaxed(bridge_base_addr + IMASK_LOCAL);
232*4602c370SMinda Chen 	val |= mask;
233*4602c370SMinda Chen 	writel_relaxed(val, bridge_base_addr + IMASK_LOCAL);
234*4602c370SMinda Chen 	raw_spin_unlock_irqrestore(&port->lock, flags);
235*4602c370SMinda Chen }
236*4602c370SMinda Chen 
237*4602c370SMinda Chen static struct irq_chip plda_intx_irq_chip = {
238*4602c370SMinda Chen 	.name = "PLDA PCIe INTx",
239*4602c370SMinda Chen 	.irq_ack = plda_ack_intx_irq,
240*4602c370SMinda Chen 	.irq_mask = plda_mask_intx_irq,
241*4602c370SMinda Chen 	.irq_unmask = plda_unmask_intx_irq,
242*4602c370SMinda Chen };
243*4602c370SMinda Chen 
244*4602c370SMinda Chen static int plda_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
245*4602c370SMinda Chen 			      irq_hw_number_t hwirq)
246*4602c370SMinda Chen {
247*4602c370SMinda Chen 	irq_set_chip_and_handler(irq, &plda_intx_irq_chip, handle_level_irq);
248*4602c370SMinda Chen 	irq_set_chip_data(irq, domain->host_data);
249*4602c370SMinda Chen 
250*4602c370SMinda Chen 	return 0;
251*4602c370SMinda Chen }
252*4602c370SMinda Chen 
253*4602c370SMinda Chen static const struct irq_domain_ops intx_domain_ops = {
254*4602c370SMinda Chen 	.map = plda_pcie_intx_map,
255*4602c370SMinda Chen };
256*4602c370SMinda Chen 
257*4602c370SMinda Chen static u32 plda_get_events(struct plda_pcie_rp *port)
258*4602c370SMinda Chen {
259*4602c370SMinda Chen 	u32 events, val, origin;
260*4602c370SMinda Chen 
261*4602c370SMinda Chen 	origin = readl_relaxed(port->bridge_addr + ISTATUS_LOCAL);
262*4602c370SMinda Chen 
263*4602c370SMinda Chen 	/* MSI event and sys events */
264*4602c370SMinda Chen 	val = (origin & SYS_AND_MSI_MASK) >> PM_MSI_INT_MSI_SHIFT;
265*4602c370SMinda Chen 	events = val << (PM_MSI_INT_MSI_SHIFT - PCI_NUM_INTX + 1);
266*4602c370SMinda Chen 
267*4602c370SMinda Chen 	/* INTx events */
268*4602c370SMinda Chen 	if (origin & PM_MSI_INT_INTX_MASK)
269*4602c370SMinda Chen 		events |= BIT(PM_MSI_INT_INTX_SHIFT);
270*4602c370SMinda Chen 
271*4602c370SMinda Chen 	/* remains are same with register */
272*4602c370SMinda Chen 	events |= origin & GENMASK(P_ATR_EVT_DOORBELL_SHIFT, 0);
273*4602c370SMinda Chen 
274*4602c370SMinda Chen 	return events;
275*4602c370SMinda Chen }
276*4602c370SMinda Chen 
277*4602c370SMinda Chen static irqreturn_t plda_event_handler(int irq, void *dev_id)
278*4602c370SMinda Chen {
279*4602c370SMinda Chen 	return IRQ_HANDLED;
280*4602c370SMinda Chen }
281*4602c370SMinda Chen 
282*4602c370SMinda Chen static void plda_handle_event(struct irq_desc *desc)
283*4602c370SMinda Chen {
284*4602c370SMinda Chen 	struct plda_pcie_rp *port = irq_desc_get_handler_data(desc);
285*4602c370SMinda Chen 	unsigned long events;
286*4602c370SMinda Chen 	u32 bit;
287*4602c370SMinda Chen 	struct irq_chip *chip = irq_desc_get_chip(desc);
288*4602c370SMinda Chen 
289*4602c370SMinda Chen 	chained_irq_enter(chip, desc);
290*4602c370SMinda Chen 
291*4602c370SMinda Chen 	events = port->event_ops->get_events(port);
292*4602c370SMinda Chen 
293*4602c370SMinda Chen 	for_each_set_bit(bit, &events, port->num_events)
294*4602c370SMinda Chen 		generic_handle_domain_irq(port->event_domain, bit);
295*4602c370SMinda Chen 
296*4602c370SMinda Chen 	chained_irq_exit(chip, desc);
297*4602c370SMinda Chen }
298*4602c370SMinda Chen 
299*4602c370SMinda Chen static u32 plda_hwirq_to_mask(int hwirq)
300*4602c370SMinda Chen {
301*4602c370SMinda Chen 	u32 mask;
302*4602c370SMinda Chen 
303*4602c370SMinda Chen 	/* hwirq 23 - 0 are the same with register */
304*4602c370SMinda Chen 	if (hwirq < EVENT_PM_MSI_INT_INTX)
305*4602c370SMinda Chen 		mask = BIT(hwirq);
306*4602c370SMinda Chen 	else if (hwirq == EVENT_PM_MSI_INT_INTX)
307*4602c370SMinda Chen 		mask = PM_MSI_INT_INTX_MASK;
308*4602c370SMinda Chen 	else
309*4602c370SMinda Chen 		mask = BIT(hwirq + PCI_NUM_INTX - 1);
310*4602c370SMinda Chen 
311*4602c370SMinda Chen 	return mask;
312*4602c370SMinda Chen }
313*4602c370SMinda Chen 
314*4602c370SMinda Chen static void plda_ack_event_irq(struct irq_data *data)
315*4602c370SMinda Chen {
316*4602c370SMinda Chen 	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
317*4602c370SMinda Chen 
318*4602c370SMinda Chen 	writel_relaxed(plda_hwirq_to_mask(data->hwirq),
319*4602c370SMinda Chen 		       port->bridge_addr + ISTATUS_LOCAL);
320*4602c370SMinda Chen }
321*4602c370SMinda Chen 
322*4602c370SMinda Chen static void plda_mask_event_irq(struct irq_data *data)
323*4602c370SMinda Chen {
324*4602c370SMinda Chen 	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
325*4602c370SMinda Chen 	u32 mask, val;
326*4602c370SMinda Chen 
327*4602c370SMinda Chen 	mask = plda_hwirq_to_mask(data->hwirq);
328*4602c370SMinda Chen 
329*4602c370SMinda Chen 	raw_spin_lock(&port->lock);
330*4602c370SMinda Chen 	val = readl_relaxed(port->bridge_addr + IMASK_LOCAL);
331*4602c370SMinda Chen 	val &= ~mask;
332*4602c370SMinda Chen 	writel_relaxed(val, port->bridge_addr + IMASK_LOCAL);
333*4602c370SMinda Chen 	raw_spin_unlock(&port->lock);
334*4602c370SMinda Chen }
335*4602c370SMinda Chen 
336*4602c370SMinda Chen static void plda_unmask_event_irq(struct irq_data *data)
337*4602c370SMinda Chen {
338*4602c370SMinda Chen 	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
339*4602c370SMinda Chen 	u32 mask, val;
340*4602c370SMinda Chen 
341*4602c370SMinda Chen 	mask = plda_hwirq_to_mask(data->hwirq);
342*4602c370SMinda Chen 
343*4602c370SMinda Chen 	raw_spin_lock(&port->lock);
344*4602c370SMinda Chen 	val = readl_relaxed(port->bridge_addr + IMASK_LOCAL);
345*4602c370SMinda Chen 	val |= mask;
346*4602c370SMinda Chen 	writel_relaxed(val, port->bridge_addr + IMASK_LOCAL);
347*4602c370SMinda Chen 	raw_spin_unlock(&port->lock);
348*4602c370SMinda Chen }
349*4602c370SMinda Chen 
350*4602c370SMinda Chen static struct irq_chip plda_event_irq_chip = {
351*4602c370SMinda Chen 	.name = "PLDA PCIe EVENT",
352*4602c370SMinda Chen 	.irq_ack = plda_ack_event_irq,
353*4602c370SMinda Chen 	.irq_mask = plda_mask_event_irq,
354*4602c370SMinda Chen 	.irq_unmask = plda_unmask_event_irq,
355*4602c370SMinda Chen };
356*4602c370SMinda Chen 
357*4602c370SMinda Chen static const struct plda_event_ops plda_event_ops = {
358*4602c370SMinda Chen 	.get_events = plda_get_events,
359*4602c370SMinda Chen };
360*4602c370SMinda Chen 
361*4602c370SMinda Chen static int plda_pcie_event_map(struct irq_domain *domain, unsigned int irq,
362*4602c370SMinda Chen 			       irq_hw_number_t hwirq)
363*4602c370SMinda Chen {
364*4602c370SMinda Chen 	struct plda_pcie_rp *port = (void *)domain->host_data;
365*4602c370SMinda Chen 
366*4602c370SMinda Chen 	irq_set_chip_and_handler(irq, port->event_irq_chip, handle_level_irq);
367*4602c370SMinda Chen 	irq_set_chip_data(irq, domain->host_data);
368*4602c370SMinda Chen 
369*4602c370SMinda Chen 	return 0;
370*4602c370SMinda Chen }
371*4602c370SMinda Chen 
372*4602c370SMinda Chen static const struct irq_domain_ops plda_event_domain_ops = {
373*4602c370SMinda Chen 	.map = plda_pcie_event_map,
374*4602c370SMinda Chen };
375*4602c370SMinda Chen 
376*4602c370SMinda Chen static int plda_pcie_init_irq_domains(struct plda_pcie_rp *port)
377*4602c370SMinda Chen {
378*4602c370SMinda Chen 	struct device *dev = port->dev;
379*4602c370SMinda Chen 	struct device_node *node = dev->of_node;
380*4602c370SMinda Chen 	struct device_node *pcie_intc_node;
381*4602c370SMinda Chen 
382*4602c370SMinda Chen 	/* Setup INTx */
383*4602c370SMinda Chen 	pcie_intc_node = of_get_next_child(node, NULL);
384*4602c370SMinda Chen 	if (!pcie_intc_node) {
385*4602c370SMinda Chen 		dev_err(dev, "failed to find PCIe Intc node\n");
386*4602c370SMinda Chen 		return -EINVAL;
387*4602c370SMinda Chen 	}
388*4602c370SMinda Chen 
389*4602c370SMinda Chen 	port->event_domain = irq_domain_add_linear(pcie_intc_node,
390*4602c370SMinda Chen 						   port->num_events,
391*4602c370SMinda Chen 						   &plda_event_domain_ops,
392*4602c370SMinda Chen 						   port);
393*4602c370SMinda Chen 	if (!port->event_domain) {
394*4602c370SMinda Chen 		dev_err(dev, "failed to get event domain\n");
395*4602c370SMinda Chen 		of_node_put(pcie_intc_node);
396*4602c370SMinda Chen 		return -ENOMEM;
397*4602c370SMinda Chen 	}
398*4602c370SMinda Chen 
399*4602c370SMinda Chen 	irq_domain_update_bus_token(port->event_domain, DOMAIN_BUS_NEXUS);
400*4602c370SMinda Chen 
401*4602c370SMinda Chen 	port->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
402*4602c370SMinda Chen 						  &intx_domain_ops, port);
403*4602c370SMinda Chen 	if (!port->intx_domain) {
404*4602c370SMinda Chen 		dev_err(dev, "failed to get an INTx IRQ domain\n");
405*4602c370SMinda Chen 		of_node_put(pcie_intc_node);
406*4602c370SMinda Chen 		return -ENOMEM;
407*4602c370SMinda Chen 	}
408*4602c370SMinda Chen 
409*4602c370SMinda Chen 	irq_domain_update_bus_token(port->intx_domain, DOMAIN_BUS_WIRED);
410*4602c370SMinda Chen 
411*4602c370SMinda Chen 	of_node_put(pcie_intc_node);
412*4602c370SMinda Chen 	raw_spin_lock_init(&port->lock);
413*4602c370SMinda Chen 
414*4602c370SMinda Chen 	return plda_allocate_msi_domains(port);
415*4602c370SMinda Chen }
416*4602c370SMinda Chen 
417*4602c370SMinda Chen int plda_init_interrupts(struct platform_device *pdev,
418*4602c370SMinda Chen 			 struct plda_pcie_rp *port,
419*4602c370SMinda Chen 			 const struct plda_event *event)
420*4602c370SMinda Chen {
421*4602c370SMinda Chen 	struct device *dev = &pdev->dev;
422*4602c370SMinda Chen 	int irq;
423*4602c370SMinda Chen 	int i, intx_irq, msi_irq, event_irq;
424*4602c370SMinda Chen 	int ret;
425*4602c370SMinda Chen 
426*4602c370SMinda Chen 	if (!port->event_ops)
427*4602c370SMinda Chen 		port->event_ops = &plda_event_ops;
428*4602c370SMinda Chen 
429*4602c370SMinda Chen 	if (!port->event_irq_chip)
430*4602c370SMinda Chen 		port->event_irq_chip = &plda_event_irq_chip;
431*4602c370SMinda Chen 
432*4602c370SMinda Chen 	ret = plda_pcie_init_irq_domains(port);
433*4602c370SMinda Chen 	if (ret) {
434*4602c370SMinda Chen 		dev_err(dev, "failed creating IRQ domains\n");
435*4602c370SMinda Chen 		return ret;
436*4602c370SMinda Chen 	}
437*4602c370SMinda Chen 
438*4602c370SMinda Chen 	irq = platform_get_irq(pdev, 0);
439*4602c370SMinda Chen 	if (irq < 0)
440*4602c370SMinda Chen 		return -ENODEV;
441*4602c370SMinda Chen 
442*4602c370SMinda Chen 	for (i = 0; i < port->num_events; i++) {
443*4602c370SMinda Chen 		event_irq = irq_create_mapping(port->event_domain, i);
444*4602c370SMinda Chen 		if (!event_irq) {
445*4602c370SMinda Chen 			dev_err(dev, "failed to map hwirq %d\n", i);
446*4602c370SMinda Chen 			return -ENXIO;
447*4602c370SMinda Chen 		}
448*4602c370SMinda Chen 
449*4602c370SMinda Chen 		if (event->request_event_irq)
450*4602c370SMinda Chen 			ret = event->request_event_irq(port, event_irq, i);
451*4602c370SMinda Chen 		else
452*4602c370SMinda Chen 			ret = devm_request_irq(dev, event_irq,
453*4602c370SMinda Chen 					       plda_event_handler,
454*4602c370SMinda Chen 					       0, NULL, port);
455*4602c370SMinda Chen 
456*4602c370SMinda Chen 		if (ret) {
457*4602c370SMinda Chen 			dev_err(dev, "failed to request IRQ %d\n", event_irq);
458*4602c370SMinda Chen 			return ret;
459*4602c370SMinda Chen 		}
460*4602c370SMinda Chen 	}
461*4602c370SMinda Chen 
462*4602c370SMinda Chen 	intx_irq = irq_create_mapping(port->event_domain,
463*4602c370SMinda Chen 				      event->intx_event);
464*4602c370SMinda Chen 	if (!intx_irq) {
465*4602c370SMinda Chen 		dev_err(dev, "failed to map INTx interrupt\n");
466*4602c370SMinda Chen 		return -ENXIO;
467*4602c370SMinda Chen 	}
468*4602c370SMinda Chen 
469*4602c370SMinda Chen 	/* Plug the INTx chained handler */
470*4602c370SMinda Chen 	irq_set_chained_handler_and_data(intx_irq, plda_handle_intx, port);
471*4602c370SMinda Chen 
472*4602c370SMinda Chen 	msi_irq = irq_create_mapping(port->event_domain,
473*4602c370SMinda Chen 				     event->msi_event);
474*4602c370SMinda Chen 	if (!msi_irq)
475*4602c370SMinda Chen 		return -ENXIO;
476*4602c370SMinda Chen 
477*4602c370SMinda Chen 	/* Plug the MSI chained handler */
478*4602c370SMinda Chen 	irq_set_chained_handler_and_data(msi_irq, plda_handle_msi, port);
479*4602c370SMinda Chen 
480*4602c370SMinda Chen 	/* Plug the main event chained handler */
481*4602c370SMinda Chen 	irq_set_chained_handler_and_data(irq, plda_handle_event, port);
482*4602c370SMinda Chen 
483*4602c370SMinda Chen 	return 0;
484*4602c370SMinda Chen }
485*4602c370SMinda Chen EXPORT_SYMBOL_GPL(plda_init_interrupts);
486*4602c370SMinda Chen 
48739bd5f82SMinda Chen void plda_pcie_setup_window(void __iomem *bridge_base_addr, u32 index,
48839bd5f82SMinda Chen 			    phys_addr_t axi_addr, phys_addr_t pci_addr,
48939bd5f82SMinda Chen 			    size_t size)
49039bd5f82SMinda Chen {
49139bd5f82SMinda Chen 	u32 atr_sz = ilog2(size) - 1;
49239bd5f82SMinda Chen 	u32 val;
49339bd5f82SMinda Chen 
49439bd5f82SMinda Chen 	if (index == 0)
49539bd5f82SMinda Chen 		val = PCIE_CONFIG_INTERFACE;
49639bd5f82SMinda Chen 	else
49739bd5f82SMinda Chen 		val = PCIE_TX_RX_INTERFACE;
49839bd5f82SMinda Chen 
49939bd5f82SMinda Chen 	writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
50039bd5f82SMinda Chen 	       ATR0_AXI4_SLV0_TRSL_PARAM);
50139bd5f82SMinda Chen 
50239bd5f82SMinda Chen 	val = lower_32_bits(axi_addr) | (atr_sz << ATR_SIZE_SHIFT) |
50339bd5f82SMinda Chen 			    ATR_IMPL_ENABLE;
50439bd5f82SMinda Chen 	writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
50539bd5f82SMinda Chen 	       ATR0_AXI4_SLV0_SRCADDR_PARAM);
50639bd5f82SMinda Chen 
50739bd5f82SMinda Chen 	val = upper_32_bits(axi_addr);
50839bd5f82SMinda Chen 	writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
50939bd5f82SMinda Chen 	       ATR0_AXI4_SLV0_SRC_ADDR);
51039bd5f82SMinda Chen 
51139bd5f82SMinda Chen 	val = lower_32_bits(pci_addr);
51239bd5f82SMinda Chen 	writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
51339bd5f82SMinda Chen 	       ATR0_AXI4_SLV0_TRSL_ADDR_LSB);
51439bd5f82SMinda Chen 
51539bd5f82SMinda Chen 	val = upper_32_bits(pci_addr);
51639bd5f82SMinda Chen 	writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
51739bd5f82SMinda Chen 	       ATR0_AXI4_SLV0_TRSL_ADDR_UDW);
51839bd5f82SMinda Chen 
51939bd5f82SMinda Chen 	val = readl(bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
52039bd5f82SMinda Chen 	val |= (ATR0_PCIE_ATR_SIZE << ATR0_PCIE_ATR_SIZE_SHIFT);
52139bd5f82SMinda Chen 	writel(val, bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
52239bd5f82SMinda Chen 	writel(0, bridge_base_addr + ATR0_PCIE_WIN0_SRC_ADDR);
52339bd5f82SMinda Chen }
52439bd5f82SMinda Chen EXPORT_SYMBOL_GPL(plda_pcie_setup_window);
52539bd5f82SMinda Chen 
52639bd5f82SMinda Chen int plda_pcie_setup_iomems(struct platform_device *pdev,
52739bd5f82SMinda Chen 			   struct plda_pcie_rp *port)
52839bd5f82SMinda Chen {
52939bd5f82SMinda Chen 	void __iomem *bridge_base_addr = port->bridge_addr;
53039bd5f82SMinda Chen 	struct pci_host_bridge *bridge = platform_get_drvdata(pdev);
53139bd5f82SMinda Chen 	struct resource_entry *entry;
53239bd5f82SMinda Chen 	u64 pci_addr;
53339bd5f82SMinda Chen 	u32 index = 1;
53439bd5f82SMinda Chen 
53539bd5f82SMinda Chen 	resource_list_for_each_entry(entry, &bridge->windows) {
53639bd5f82SMinda Chen 		if (resource_type(entry->res) == IORESOURCE_MEM) {
53739bd5f82SMinda Chen 			pci_addr = entry->res->start - entry->offset;
53839bd5f82SMinda Chen 			plda_pcie_setup_window(bridge_base_addr, index,
53939bd5f82SMinda Chen 					       entry->res->start, pci_addr,
54039bd5f82SMinda Chen 					       resource_size(entry->res));
54139bd5f82SMinda Chen 			index++;
54239bd5f82SMinda Chen 		}
54339bd5f82SMinda Chen 	}
54439bd5f82SMinda Chen 
54539bd5f82SMinda Chen 	return 0;
54639bd5f82SMinda Chen }
54739bd5f82SMinda Chen EXPORT_SYMBOL_GPL(plda_pcie_setup_iomems);
548