xref: /linux/drivers/pci/controller/plda/pcie-microchip-host.c (revision 647690479660e6eb7e5974979c24e219a0637790)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Microchip AXI PCIe Bridge host controller driver
4  *
5  * Copyright (c) 2018 - 2020 Microchip Corporation. All rights reserved.
6  *
7  * Author: Daire McNamara <daire.mcnamara@microchip.com>
8  */
9 
10 #include <linux/bitfield.h>
11 #include <linux/clk.h>
12 #include <linux/irqchip/chained_irq.h>
13 #include <linux/irqdomain.h>
14 #include <linux/module.h>
15 #include <linux/msi.h>
16 #include <linux/of_address.h>
17 #include <linux/of_pci.h>
18 #include <linux/pci-ecam.h>
19 #include <linux/platform_device.h>
20 
21 #include "../../pci.h"
22 #include "pcie-plda.h"
23 
24 /* PCIe Bridge Phy and Controller Phy offsets */
25 #define MC_PCIE1_BRIDGE_ADDR			0x00008000u
26 #define MC_PCIE1_CTRL_ADDR			0x0000a000u
27 
28 #define MC_PCIE_BRIDGE_ADDR			(MC_PCIE1_BRIDGE_ADDR)
29 #define MC_PCIE_CTRL_ADDR			(MC_PCIE1_CTRL_ADDR)
30 
31 /* PCIe Controller Phy Regs */
32 #define SEC_ERROR_EVENT_CNT			0x20
33 #define DED_ERROR_EVENT_CNT			0x24
34 #define SEC_ERROR_INT				0x28
35 #define  SEC_ERROR_INT_TX_RAM_SEC_ERR_INT	GENMASK(3, 0)
36 #define  SEC_ERROR_INT_RX_RAM_SEC_ERR_INT	GENMASK(7, 4)
37 #define  SEC_ERROR_INT_PCIE2AXI_RAM_SEC_ERR_INT	GENMASK(11, 8)
38 #define  SEC_ERROR_INT_AXI2PCIE_RAM_SEC_ERR_INT	GENMASK(15, 12)
39 #define  SEC_ERROR_INT_ALL_RAM_SEC_ERR_INT	GENMASK(15, 0)
40 #define  NUM_SEC_ERROR_INTS			(4)
41 #define SEC_ERROR_INT_MASK			0x2c
42 #define DED_ERROR_INT				0x30
43 #define  DED_ERROR_INT_TX_RAM_DED_ERR_INT	GENMASK(3, 0)
44 #define  DED_ERROR_INT_RX_RAM_DED_ERR_INT	GENMASK(7, 4)
45 #define  DED_ERROR_INT_PCIE2AXI_RAM_DED_ERR_INT	GENMASK(11, 8)
46 #define  DED_ERROR_INT_AXI2PCIE_RAM_DED_ERR_INT	GENMASK(15, 12)
47 #define  DED_ERROR_INT_ALL_RAM_DED_ERR_INT	GENMASK(15, 0)
48 #define  NUM_DED_ERROR_INTS			(4)
49 #define DED_ERROR_INT_MASK			0x34
50 #define ECC_CONTROL				0x38
51 #define  ECC_CONTROL_TX_RAM_INJ_ERROR_0		BIT(0)
52 #define  ECC_CONTROL_TX_RAM_INJ_ERROR_1		BIT(1)
53 #define  ECC_CONTROL_TX_RAM_INJ_ERROR_2		BIT(2)
54 #define  ECC_CONTROL_TX_RAM_INJ_ERROR_3		BIT(3)
55 #define  ECC_CONTROL_RX_RAM_INJ_ERROR_0		BIT(4)
56 #define  ECC_CONTROL_RX_RAM_INJ_ERROR_1		BIT(5)
57 #define  ECC_CONTROL_RX_RAM_INJ_ERROR_2		BIT(6)
58 #define  ECC_CONTROL_RX_RAM_INJ_ERROR_3		BIT(7)
59 #define  ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_0	BIT(8)
60 #define  ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_1	BIT(9)
61 #define  ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_2	BIT(10)
62 #define  ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_3	BIT(11)
63 #define  ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_0	BIT(12)
64 #define  ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_1	BIT(13)
65 #define  ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_2	BIT(14)
66 #define  ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_3	BIT(15)
67 #define  ECC_CONTROL_TX_RAM_ECC_BYPASS		BIT(24)
68 #define  ECC_CONTROL_RX_RAM_ECC_BYPASS		BIT(25)
69 #define  ECC_CONTROL_PCIE2AXI_RAM_ECC_BYPASS	BIT(26)
70 #define  ECC_CONTROL_AXI2PCIE_RAM_ECC_BYPASS	BIT(27)
71 #define PCIE_EVENT_INT				0x14c
72 #define  PCIE_EVENT_INT_L2_EXIT_INT		BIT(0)
73 #define  PCIE_EVENT_INT_HOTRST_EXIT_INT		BIT(1)
74 #define  PCIE_EVENT_INT_DLUP_EXIT_INT		BIT(2)
75 #define  PCIE_EVENT_INT_MASK			GENMASK(2, 0)
76 #define  PCIE_EVENT_INT_L2_EXIT_INT_MASK	BIT(16)
77 #define  PCIE_EVENT_INT_HOTRST_EXIT_INT_MASK	BIT(17)
78 #define  PCIE_EVENT_INT_DLUP_EXIT_INT_MASK	BIT(18)
79 #define  PCIE_EVENT_INT_ENB_MASK		GENMASK(18, 16)
80 #define  PCIE_EVENT_INT_ENB_SHIFT		16
81 #define  NUM_PCIE_EVENTS			(3)
82 
83 /* PCIe Config space MSI capability structure */
84 #define MC_MSI_CAP_CTRL_OFFSET			0xe0u
85 
86 /* Events */
87 #define EVENT_PCIE_L2_EXIT			0
88 #define EVENT_PCIE_HOTRST_EXIT			1
89 #define EVENT_PCIE_DLUP_EXIT			2
90 #define EVENT_SEC_TX_RAM_SEC_ERR		3
91 #define EVENT_SEC_RX_RAM_SEC_ERR		4
92 #define EVENT_SEC_PCIE2AXI_RAM_SEC_ERR		5
93 #define EVENT_SEC_AXI2PCIE_RAM_SEC_ERR		6
94 #define EVENT_DED_TX_RAM_DED_ERR		7
95 #define EVENT_DED_RX_RAM_DED_ERR		8
96 #define EVENT_DED_PCIE2AXI_RAM_DED_ERR		9
97 #define EVENT_DED_AXI2PCIE_RAM_DED_ERR		10
98 #define EVENT_LOCAL_DMA_END_ENGINE_0		11
99 #define EVENT_LOCAL_DMA_END_ENGINE_1		12
100 #define EVENT_LOCAL_DMA_ERROR_ENGINE_0		13
101 #define EVENT_LOCAL_DMA_ERROR_ENGINE_1		14
102 #define NUM_MC_EVENTS				15
103 #define EVENT_LOCAL_A_ATR_EVT_POST_ERR		(NUM_MC_EVENTS + PLDA_AXI_POST_ERR)
104 #define EVENT_LOCAL_A_ATR_EVT_FETCH_ERR		(NUM_MC_EVENTS + PLDA_AXI_FETCH_ERR)
105 #define EVENT_LOCAL_A_ATR_EVT_DISCARD_ERR	(NUM_MC_EVENTS + PLDA_AXI_DISCARD_ERR)
106 #define EVENT_LOCAL_A_ATR_EVT_DOORBELL		(NUM_MC_EVENTS + PLDA_AXI_DOORBELL)
107 #define EVENT_LOCAL_P_ATR_EVT_POST_ERR		(NUM_MC_EVENTS + PLDA_PCIE_POST_ERR)
108 #define EVENT_LOCAL_P_ATR_EVT_FETCH_ERR		(NUM_MC_EVENTS + PLDA_PCIE_FETCH_ERR)
109 #define EVENT_LOCAL_P_ATR_EVT_DISCARD_ERR	(NUM_MC_EVENTS + PLDA_PCIE_DISCARD_ERR)
110 #define EVENT_LOCAL_P_ATR_EVT_DOORBELL		(NUM_MC_EVENTS + PLDA_PCIE_DOORBELL)
111 #define EVENT_LOCAL_PM_MSI_INT_INTX		(NUM_MC_EVENTS + PLDA_INTX)
112 #define EVENT_LOCAL_PM_MSI_INT_MSI		(NUM_MC_EVENTS + PLDA_MSI)
113 #define EVENT_LOCAL_PM_MSI_INT_AER_EVT		(NUM_MC_EVENTS + PLDA_AER_EVENT)
114 #define EVENT_LOCAL_PM_MSI_INT_EVENTS		(NUM_MC_EVENTS + PLDA_MISC_EVENTS)
115 #define EVENT_LOCAL_PM_MSI_INT_SYS_ERR		(NUM_MC_EVENTS + PLDA_SYS_ERR)
116 #define NUM_EVENTS				(NUM_MC_EVENTS + PLDA_INT_EVENT_NUM)
117 
118 #define PCIE_EVENT_CAUSE(x, s)	\
119 	[EVENT_PCIE_ ## x] = { __stringify(x), s }
120 
121 #define SEC_ERROR_CAUSE(x, s) \
122 	[EVENT_SEC_ ## x] = { __stringify(x), s }
123 
124 #define DED_ERROR_CAUSE(x, s) \
125 	[EVENT_DED_ ## x] = { __stringify(x), s }
126 
127 #define LOCAL_EVENT_CAUSE(x, s) \
128 	[EVENT_LOCAL_ ## x] = { __stringify(x), s }
129 
130 #define PCIE_EVENT(x) \
131 	.base = MC_PCIE_CTRL_ADDR, \
132 	.offset = PCIE_EVENT_INT, \
133 	.mask_offset = PCIE_EVENT_INT, \
134 	.mask_high = 1, \
135 	.mask = PCIE_EVENT_INT_ ## x ## _INT, \
136 	.enb_mask = PCIE_EVENT_INT_ENB_MASK
137 
138 #define SEC_EVENT(x) \
139 	.base = MC_PCIE_CTRL_ADDR, \
140 	.offset = SEC_ERROR_INT, \
141 	.mask_offset = SEC_ERROR_INT_MASK, \
142 	.mask = SEC_ERROR_INT_ ## x ## _INT, \
143 	.mask_high = 1, \
144 	.enb_mask = 0
145 
146 #define DED_EVENT(x) \
147 	.base = MC_PCIE_CTRL_ADDR, \
148 	.offset = DED_ERROR_INT, \
149 	.mask_offset = DED_ERROR_INT_MASK, \
150 	.mask_high = 1, \
151 	.mask = DED_ERROR_INT_ ## x ## _INT, \
152 	.enb_mask = 0
153 
154 #define LOCAL_EVENT(x) \
155 	.base = MC_PCIE_BRIDGE_ADDR, \
156 	.offset = ISTATUS_LOCAL, \
157 	.mask_offset = IMASK_LOCAL, \
158 	.mask_high = 0, \
159 	.mask = x ## _MASK, \
160 	.enb_mask = 0
161 
162 #define PCIE_EVENT_TO_EVENT_MAP(x) \
163 	{ PCIE_EVENT_INT_ ## x ## _INT, EVENT_PCIE_ ## x }
164 
165 #define SEC_ERROR_TO_EVENT_MAP(x) \
166 	{ SEC_ERROR_INT_ ## x ## _INT, EVENT_SEC_ ## x }
167 
168 #define DED_ERROR_TO_EVENT_MAP(x) \
169 	{ DED_ERROR_INT_ ## x ## _INT, EVENT_DED_ ## x }
170 
171 #define LOCAL_STATUS_TO_EVENT_MAP(x) \
172 	{ x ## _MASK, EVENT_LOCAL_ ## x }
173 
174 struct event_map {
175 	u32 reg_mask;
176 	u32 event_bit;
177 };
178 
179 
180 struct mc_pcie {
181 	struct plda_pcie_rp plda;
182 	void __iomem *axi_base_addr;
183 };
184 
185 struct cause {
186 	const char *sym;
187 	const char *str;
188 };
189 
190 static const struct cause event_cause[NUM_EVENTS] = {
191 	PCIE_EVENT_CAUSE(L2_EXIT, "L2 exit event"),
192 	PCIE_EVENT_CAUSE(HOTRST_EXIT, "Hot reset exit event"),
193 	PCIE_EVENT_CAUSE(DLUP_EXIT, "DLUP exit event"),
194 	SEC_ERROR_CAUSE(TX_RAM_SEC_ERR,  "sec error in tx buffer"),
195 	SEC_ERROR_CAUSE(RX_RAM_SEC_ERR,  "sec error in rx buffer"),
196 	SEC_ERROR_CAUSE(PCIE2AXI_RAM_SEC_ERR,  "sec error in pcie2axi buffer"),
197 	SEC_ERROR_CAUSE(AXI2PCIE_RAM_SEC_ERR,  "sec error in axi2pcie buffer"),
198 	DED_ERROR_CAUSE(TX_RAM_DED_ERR,  "ded error in tx buffer"),
199 	DED_ERROR_CAUSE(RX_RAM_DED_ERR,  "ded error in rx buffer"),
200 	DED_ERROR_CAUSE(PCIE2AXI_RAM_DED_ERR,  "ded error in pcie2axi buffer"),
201 	DED_ERROR_CAUSE(AXI2PCIE_RAM_DED_ERR,  "ded error in axi2pcie buffer"),
202 	LOCAL_EVENT_CAUSE(DMA_ERROR_ENGINE_0, "dma engine 0 error"),
203 	LOCAL_EVENT_CAUSE(DMA_ERROR_ENGINE_1, "dma engine 1 error"),
204 	LOCAL_EVENT_CAUSE(A_ATR_EVT_POST_ERR, "axi write request error"),
205 	LOCAL_EVENT_CAUSE(A_ATR_EVT_FETCH_ERR, "axi read request error"),
206 	LOCAL_EVENT_CAUSE(A_ATR_EVT_DISCARD_ERR, "axi read timeout"),
207 	LOCAL_EVENT_CAUSE(P_ATR_EVT_POST_ERR, "pcie write request error"),
208 	LOCAL_EVENT_CAUSE(P_ATR_EVT_FETCH_ERR, "pcie read request error"),
209 	LOCAL_EVENT_CAUSE(P_ATR_EVT_DISCARD_ERR, "pcie read timeout"),
210 	LOCAL_EVENT_CAUSE(PM_MSI_INT_AER_EVT, "aer event"),
211 	LOCAL_EVENT_CAUSE(PM_MSI_INT_EVENTS, "pm/ltr/hotplug event"),
212 	LOCAL_EVENT_CAUSE(PM_MSI_INT_SYS_ERR, "system error"),
213 };
214 
215 static struct event_map pcie_event_to_event[] = {
216 	PCIE_EVENT_TO_EVENT_MAP(L2_EXIT),
217 	PCIE_EVENT_TO_EVENT_MAP(HOTRST_EXIT),
218 	PCIE_EVENT_TO_EVENT_MAP(DLUP_EXIT),
219 };
220 
221 static struct event_map sec_error_to_event[] = {
222 	SEC_ERROR_TO_EVENT_MAP(TX_RAM_SEC_ERR),
223 	SEC_ERROR_TO_EVENT_MAP(RX_RAM_SEC_ERR),
224 	SEC_ERROR_TO_EVENT_MAP(PCIE2AXI_RAM_SEC_ERR),
225 	SEC_ERROR_TO_EVENT_MAP(AXI2PCIE_RAM_SEC_ERR),
226 };
227 
228 static struct event_map ded_error_to_event[] = {
229 	DED_ERROR_TO_EVENT_MAP(TX_RAM_DED_ERR),
230 	DED_ERROR_TO_EVENT_MAP(RX_RAM_DED_ERR),
231 	DED_ERROR_TO_EVENT_MAP(PCIE2AXI_RAM_DED_ERR),
232 	DED_ERROR_TO_EVENT_MAP(AXI2PCIE_RAM_DED_ERR),
233 };
234 
235 static struct event_map local_status_to_event[] = {
236 	LOCAL_STATUS_TO_EVENT_MAP(DMA_END_ENGINE_0),
237 	LOCAL_STATUS_TO_EVENT_MAP(DMA_END_ENGINE_1),
238 	LOCAL_STATUS_TO_EVENT_MAP(DMA_ERROR_ENGINE_0),
239 	LOCAL_STATUS_TO_EVENT_MAP(DMA_ERROR_ENGINE_1),
240 	LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_POST_ERR),
241 	LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_FETCH_ERR),
242 	LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_DISCARD_ERR),
243 	LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_DOORBELL),
244 	LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_POST_ERR),
245 	LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_FETCH_ERR),
246 	LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_DISCARD_ERR),
247 	LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_DOORBELL),
248 	LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_INTX),
249 	LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_MSI),
250 	LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_AER_EVT),
251 	LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_EVENTS),
252 	LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_SYS_ERR),
253 };
254 
255 static struct {
256 	u32 base;
257 	u32 offset;
258 	u32 mask;
259 	u32 shift;
260 	u32 enb_mask;
261 	u32 mask_high;
262 	u32 mask_offset;
263 } event_descs[] = {
264 	{ PCIE_EVENT(L2_EXIT) },
265 	{ PCIE_EVENT(HOTRST_EXIT) },
266 	{ PCIE_EVENT(DLUP_EXIT) },
267 	{ SEC_EVENT(TX_RAM_SEC_ERR) },
268 	{ SEC_EVENT(RX_RAM_SEC_ERR) },
269 	{ SEC_EVENT(PCIE2AXI_RAM_SEC_ERR) },
270 	{ SEC_EVENT(AXI2PCIE_RAM_SEC_ERR) },
271 	{ DED_EVENT(TX_RAM_DED_ERR) },
272 	{ DED_EVENT(RX_RAM_DED_ERR) },
273 	{ DED_EVENT(PCIE2AXI_RAM_DED_ERR) },
274 	{ DED_EVENT(AXI2PCIE_RAM_DED_ERR) },
275 	{ LOCAL_EVENT(DMA_END_ENGINE_0) },
276 	{ LOCAL_EVENT(DMA_END_ENGINE_1) },
277 	{ LOCAL_EVENT(DMA_ERROR_ENGINE_0) },
278 	{ LOCAL_EVENT(DMA_ERROR_ENGINE_1) },
279 	{ LOCAL_EVENT(A_ATR_EVT_POST_ERR) },
280 	{ LOCAL_EVENT(A_ATR_EVT_FETCH_ERR) },
281 	{ LOCAL_EVENT(A_ATR_EVT_DISCARD_ERR) },
282 	{ LOCAL_EVENT(A_ATR_EVT_DOORBELL) },
283 	{ LOCAL_EVENT(P_ATR_EVT_POST_ERR) },
284 	{ LOCAL_EVENT(P_ATR_EVT_FETCH_ERR) },
285 	{ LOCAL_EVENT(P_ATR_EVT_DISCARD_ERR) },
286 	{ LOCAL_EVENT(P_ATR_EVT_DOORBELL) },
287 	{ LOCAL_EVENT(PM_MSI_INT_INTX) },
288 	{ LOCAL_EVENT(PM_MSI_INT_MSI) },
289 	{ LOCAL_EVENT(PM_MSI_INT_AER_EVT) },
290 	{ LOCAL_EVENT(PM_MSI_INT_EVENTS) },
291 	{ LOCAL_EVENT(PM_MSI_INT_SYS_ERR) },
292 };
293 
294 static char poss_clks[][5] = { "fic0", "fic1", "fic2", "fic3" };
295 
296 static struct mc_pcie *port;
297 
298 static void mc_pcie_enable_msi(struct mc_pcie *port, void __iomem *ecam)
299 {
300 	struct plda_msi *msi = &port->plda.msi;
301 	u16 reg;
302 	u8 queue_size;
303 
304 	/* Fixup MSI enable flag */
305 	reg = readw_relaxed(ecam + MC_MSI_CAP_CTRL_OFFSET + PCI_MSI_FLAGS);
306 	reg |= PCI_MSI_FLAGS_ENABLE;
307 	writew_relaxed(reg, ecam + MC_MSI_CAP_CTRL_OFFSET + PCI_MSI_FLAGS);
308 
309 	/* Fixup PCI MSI queue flags */
310 	queue_size = FIELD_GET(PCI_MSI_FLAGS_QMASK, reg);
311 	reg |= FIELD_PREP(PCI_MSI_FLAGS_QSIZE, queue_size);
312 	writew_relaxed(reg, ecam + MC_MSI_CAP_CTRL_OFFSET + PCI_MSI_FLAGS);
313 
314 	/* Fixup MSI addr fields */
315 	writel_relaxed(lower_32_bits(msi->vector_phy),
316 		       ecam + MC_MSI_CAP_CTRL_OFFSET + PCI_MSI_ADDRESS_LO);
317 	writel_relaxed(upper_32_bits(msi->vector_phy),
318 		       ecam + MC_MSI_CAP_CTRL_OFFSET + PCI_MSI_ADDRESS_HI);
319 }
320 
321 static void plda_handle_msi(struct irq_desc *desc)
322 {
323 	struct plda_pcie_rp *port = irq_desc_get_handler_data(desc);
324 	struct irq_chip *chip = irq_desc_get_chip(desc);
325 	struct device *dev = port->dev;
326 	struct plda_msi *msi = &port->msi;
327 	void __iomem *bridge_base_addr = port->bridge_addr;
328 	unsigned long status;
329 	u32 bit;
330 	int ret;
331 
332 	chained_irq_enter(chip, desc);
333 
334 	status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
335 	if (status & PM_MSI_INT_MSI_MASK) {
336 		writel_relaxed(status & PM_MSI_INT_MSI_MASK, bridge_base_addr + ISTATUS_LOCAL);
337 		status = readl_relaxed(bridge_base_addr + ISTATUS_MSI);
338 		for_each_set_bit(bit, &status, msi->num_vectors) {
339 			ret = generic_handle_domain_irq(msi->dev_domain, bit);
340 			if (ret)
341 				dev_err_ratelimited(dev, "bad MSI IRQ %d\n",
342 						    bit);
343 		}
344 	}
345 
346 	chained_irq_exit(chip, desc);
347 }
348 
349 static void plda_msi_bottom_irq_ack(struct irq_data *data)
350 {
351 	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
352 	void __iomem *bridge_base_addr = port->bridge_addr;
353 	u32 bitpos = data->hwirq;
354 
355 	writel_relaxed(BIT(bitpos), bridge_base_addr + ISTATUS_MSI);
356 }
357 
358 static void plda_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
359 {
360 	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
361 	phys_addr_t addr = port->msi.vector_phy;
362 
363 	msg->address_lo = lower_32_bits(addr);
364 	msg->address_hi = upper_32_bits(addr);
365 	msg->data = data->hwirq;
366 
367 	dev_dbg(port->dev, "msi#%x address_hi %#x address_lo %#x\n",
368 		(int)data->hwirq, msg->address_hi, msg->address_lo);
369 }
370 
371 static int plda_msi_set_affinity(struct irq_data *irq_data,
372 				 const struct cpumask *mask, bool force)
373 {
374 	return -EINVAL;
375 }
376 
377 static struct irq_chip plda_msi_bottom_irq_chip = {
378 	.name = "PLDA MSI",
379 	.irq_ack = plda_msi_bottom_irq_ack,
380 	.irq_compose_msi_msg = plda_compose_msi_msg,
381 	.irq_set_affinity = plda_msi_set_affinity,
382 };
383 
384 static int plda_irq_msi_domain_alloc(struct irq_domain *domain,
385 				     unsigned int virq,
386 				     unsigned int nr_irqs,
387 				     void *args)
388 {
389 	struct plda_pcie_rp *port = domain->host_data;
390 	struct plda_msi *msi = &port->msi;
391 	unsigned long bit;
392 
393 	mutex_lock(&msi->lock);
394 	bit = find_first_zero_bit(msi->used, msi->num_vectors);
395 	if (bit >= msi->num_vectors) {
396 		mutex_unlock(&msi->lock);
397 		return -ENOSPC;
398 	}
399 
400 	set_bit(bit, msi->used);
401 
402 	irq_domain_set_info(domain, virq, bit, &plda_msi_bottom_irq_chip,
403 			    domain->host_data, handle_edge_irq, NULL, NULL);
404 
405 	mutex_unlock(&msi->lock);
406 
407 	return 0;
408 }
409 
410 static void plda_irq_msi_domain_free(struct irq_domain *domain,
411 				     unsigned int virq,
412 				     unsigned int nr_irqs)
413 {
414 	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
415 	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(d);
416 	struct plda_msi *msi = &port->msi;
417 
418 	mutex_lock(&msi->lock);
419 
420 	if (test_bit(d->hwirq, msi->used))
421 		__clear_bit(d->hwirq, msi->used);
422 	else
423 		dev_err(port->dev, "trying to free unused MSI%lu\n", d->hwirq);
424 
425 	mutex_unlock(&msi->lock);
426 }
427 
428 static const struct irq_domain_ops msi_domain_ops = {
429 	.alloc	= plda_irq_msi_domain_alloc,
430 	.free	= plda_irq_msi_domain_free,
431 };
432 
433 static struct irq_chip plda_msi_irq_chip = {
434 	.name = "PLDA PCIe MSI",
435 	.irq_ack = irq_chip_ack_parent,
436 	.irq_mask = pci_msi_mask_irq,
437 	.irq_unmask = pci_msi_unmask_irq,
438 };
439 
440 static struct msi_domain_info plda_msi_domain_info = {
441 	.flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
442 		  MSI_FLAG_PCI_MSIX),
443 	.chip = &plda_msi_irq_chip,
444 };
445 
446 static int plda_allocate_msi_domains(struct plda_pcie_rp *port)
447 {
448 	struct device *dev = port->dev;
449 	struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
450 	struct plda_msi *msi = &port->msi;
451 
452 	mutex_init(&port->msi.lock);
453 
454 	msi->dev_domain = irq_domain_add_linear(NULL, msi->num_vectors,
455 						&msi_domain_ops, port);
456 	if (!msi->dev_domain) {
457 		dev_err(dev, "failed to create IRQ domain\n");
458 		return -ENOMEM;
459 	}
460 
461 	msi->msi_domain = pci_msi_create_irq_domain(fwnode,
462 						    &plda_msi_domain_info,
463 						    msi->dev_domain);
464 	if (!msi->msi_domain) {
465 		dev_err(dev, "failed to create MSI domain\n");
466 		irq_domain_remove(msi->dev_domain);
467 		return -ENOMEM;
468 	}
469 
470 	return 0;
471 }
472 
473 static void plda_handle_intx(struct irq_desc *desc)
474 {
475 	struct plda_pcie_rp *port = irq_desc_get_handler_data(desc);
476 	struct irq_chip *chip = irq_desc_get_chip(desc);
477 	struct device *dev = port->dev;
478 	void __iomem *bridge_base_addr = port->bridge_addr;
479 	unsigned long status;
480 	u32 bit;
481 	int ret;
482 
483 	chained_irq_enter(chip, desc);
484 
485 	status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
486 	if (status & PM_MSI_INT_INTX_MASK) {
487 		status &= PM_MSI_INT_INTX_MASK;
488 		status >>= PM_MSI_INT_INTX_SHIFT;
489 		for_each_set_bit(bit, &status, PCI_NUM_INTX) {
490 			ret = generic_handle_domain_irq(port->intx_domain, bit);
491 			if (ret)
492 				dev_err_ratelimited(dev, "bad INTx IRQ %d\n",
493 						    bit);
494 		}
495 	}
496 
497 	chained_irq_exit(chip, desc);
498 }
499 
500 static void plda_ack_intx_irq(struct irq_data *data)
501 {
502 	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
503 	void __iomem *bridge_base_addr = port->bridge_addr;
504 	u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
505 
506 	writel_relaxed(mask, bridge_base_addr + ISTATUS_LOCAL);
507 }
508 
509 static void plda_mask_intx_irq(struct irq_data *data)
510 {
511 	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
512 	void __iomem *bridge_base_addr = port->bridge_addr;
513 	unsigned long flags;
514 	u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
515 	u32 val;
516 
517 	raw_spin_lock_irqsave(&port->lock, flags);
518 	val = readl_relaxed(bridge_base_addr + IMASK_LOCAL);
519 	val &= ~mask;
520 	writel_relaxed(val, bridge_base_addr + IMASK_LOCAL);
521 	raw_spin_unlock_irqrestore(&port->lock, flags);
522 }
523 
524 static void plda_unmask_intx_irq(struct irq_data *data)
525 {
526 	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
527 	void __iomem *bridge_base_addr = port->bridge_addr;
528 	unsigned long flags;
529 	u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
530 	u32 val;
531 
532 	raw_spin_lock_irqsave(&port->lock, flags);
533 	val = readl_relaxed(bridge_base_addr + IMASK_LOCAL);
534 	val |= mask;
535 	writel_relaxed(val, bridge_base_addr + IMASK_LOCAL);
536 	raw_spin_unlock_irqrestore(&port->lock, flags);
537 }
538 
539 static struct irq_chip plda_intx_irq_chip = {
540 	.name = "PLDA PCIe INTx",
541 	.irq_ack = plda_ack_intx_irq,
542 	.irq_mask = plda_mask_intx_irq,
543 	.irq_unmask = plda_unmask_intx_irq,
544 };
545 
546 static int plda_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
547 			      irq_hw_number_t hwirq)
548 {
549 	irq_set_chip_and_handler(irq, &plda_intx_irq_chip, handle_level_irq);
550 	irq_set_chip_data(irq, domain->host_data);
551 
552 	return 0;
553 }
554 
555 static const struct irq_domain_ops intx_domain_ops = {
556 	.map = plda_pcie_intx_map,
557 };
558 
559 static inline u32 reg_to_event(u32 reg, struct event_map field)
560 {
561 	return (reg & field.reg_mask) ? BIT(field.event_bit) : 0;
562 }
563 
564 static u32 pcie_events(struct mc_pcie *port)
565 {
566 	void __iomem *ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR;
567 	u32 reg = readl_relaxed(ctrl_base_addr + PCIE_EVENT_INT);
568 	u32 val = 0;
569 	int i;
570 
571 	for (i = 0; i < ARRAY_SIZE(pcie_event_to_event); i++)
572 		val |= reg_to_event(reg, pcie_event_to_event[i]);
573 
574 	return val;
575 }
576 
577 static u32 sec_errors(struct mc_pcie *port)
578 {
579 	void __iomem *ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR;
580 	u32 reg = readl_relaxed(ctrl_base_addr + SEC_ERROR_INT);
581 	u32 val = 0;
582 	int i;
583 
584 	for (i = 0; i < ARRAY_SIZE(sec_error_to_event); i++)
585 		val |= reg_to_event(reg, sec_error_to_event[i]);
586 
587 	return val;
588 }
589 
590 static u32 ded_errors(struct mc_pcie *port)
591 {
592 	void __iomem *ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR;
593 	u32 reg = readl_relaxed(ctrl_base_addr + DED_ERROR_INT);
594 	u32 val = 0;
595 	int i;
596 
597 	for (i = 0; i < ARRAY_SIZE(ded_error_to_event); i++)
598 		val |= reg_to_event(reg, ded_error_to_event[i]);
599 
600 	return val;
601 }
602 
603 static u32 local_events(struct mc_pcie *port)
604 {
605 	void __iomem *bridge_base_addr = port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
606 	u32 reg = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
607 	u32 val = 0;
608 	int i;
609 
610 	for (i = 0; i < ARRAY_SIZE(local_status_to_event); i++)
611 		val |= reg_to_event(reg, local_status_to_event[i]);
612 
613 	return val;
614 }
615 
616 static u32 mc_get_events(struct plda_pcie_rp *port)
617 {
618 	struct mc_pcie *mc_port = container_of(port, struct mc_pcie, plda);
619 	u32 events = 0;
620 
621 	events |= pcie_events(mc_port);
622 	events |= sec_errors(mc_port);
623 	events |= ded_errors(mc_port);
624 	events |= local_events(mc_port);
625 
626 	return events;
627 }
628 
629 static irqreturn_t mc_event_handler(int irq, void *dev_id)
630 {
631 	struct plda_pcie_rp *port = dev_id;
632 	struct device *dev = port->dev;
633 	struct irq_data *data;
634 
635 	data = irq_domain_get_irq_data(port->event_domain, irq);
636 
637 	if (event_cause[data->hwirq].str)
638 		dev_err_ratelimited(dev, "%s\n", event_cause[data->hwirq].str);
639 	else
640 		dev_err_ratelimited(dev, "bad event IRQ %ld\n", data->hwirq);
641 
642 	return IRQ_HANDLED;
643 }
644 
645 static irqreturn_t plda_event_handler(int irq, void *dev_id)
646 {
647 	return IRQ_HANDLED;
648 }
649 
650 static void plda_handle_event(struct irq_desc *desc)
651 {
652 	struct plda_pcie_rp *port = irq_desc_get_handler_data(desc);
653 	unsigned long events;
654 	u32 bit;
655 	struct irq_chip *chip = irq_desc_get_chip(desc);
656 
657 	chained_irq_enter(chip, desc);
658 
659 	events = mc_get_events(port);
660 
661 	for_each_set_bit(bit, &events, port->num_events)
662 		generic_handle_domain_irq(port->event_domain, bit);
663 
664 	chained_irq_exit(chip, desc);
665 }
666 
667 static void mc_ack_event_irq(struct irq_data *data)
668 {
669 	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
670 	struct mc_pcie *mc_port = container_of(port, struct mc_pcie, plda);
671 	u32 event = data->hwirq;
672 	void __iomem *addr;
673 	u32 mask;
674 
675 	addr = mc_port->axi_base_addr + event_descs[event].base +
676 		event_descs[event].offset;
677 	mask = event_descs[event].mask;
678 	mask |= event_descs[event].enb_mask;
679 
680 	writel_relaxed(mask, addr);
681 }
682 
683 static void mc_mask_event_irq(struct irq_data *data)
684 {
685 	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
686 	struct mc_pcie *mc_port = container_of(port, struct mc_pcie, plda);
687 	u32 event = data->hwirq;
688 	void __iomem *addr;
689 	u32 mask;
690 	u32 val;
691 
692 	addr = mc_port->axi_base_addr + event_descs[event].base +
693 		event_descs[event].mask_offset;
694 	mask = event_descs[event].mask;
695 	if (event_descs[event].enb_mask) {
696 		mask <<= PCIE_EVENT_INT_ENB_SHIFT;
697 		mask &= PCIE_EVENT_INT_ENB_MASK;
698 	}
699 
700 	if (!event_descs[event].mask_high)
701 		mask = ~mask;
702 
703 	raw_spin_lock(&port->lock);
704 	val = readl_relaxed(addr);
705 	if (event_descs[event].mask_high)
706 		val |= mask;
707 	else
708 		val &= mask;
709 
710 	writel_relaxed(val, addr);
711 	raw_spin_unlock(&port->lock);
712 }
713 
714 static void mc_unmask_event_irq(struct irq_data *data)
715 {
716 	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
717 	struct mc_pcie *mc_port = container_of(port, struct mc_pcie, plda);
718 	u32 event = data->hwirq;
719 	void __iomem *addr;
720 	u32 mask;
721 	u32 val;
722 
723 	addr = mc_port->axi_base_addr + event_descs[event].base +
724 		event_descs[event].mask_offset;
725 	mask = event_descs[event].mask;
726 
727 	if (event_descs[event].enb_mask)
728 		mask <<= PCIE_EVENT_INT_ENB_SHIFT;
729 
730 	if (event_descs[event].mask_high)
731 		mask = ~mask;
732 
733 	if (event_descs[event].enb_mask)
734 		mask &= PCIE_EVENT_INT_ENB_MASK;
735 
736 	raw_spin_lock(&port->lock);
737 	val = readl_relaxed(addr);
738 	if (event_descs[event].mask_high)
739 		val &= mask;
740 	else
741 		val |= mask;
742 	writel_relaxed(val, addr);
743 	raw_spin_unlock(&port->lock);
744 }
745 
746 static struct irq_chip mc_event_irq_chip = {
747 	.name = "Microchip PCIe EVENT",
748 	.irq_ack = mc_ack_event_irq,
749 	.irq_mask = mc_mask_event_irq,
750 	.irq_unmask = mc_unmask_event_irq,
751 };
752 
753 static int plda_pcie_event_map(struct irq_domain *domain, unsigned int irq,
754 			       irq_hw_number_t hwirq)
755 {
756 	irq_set_chip_and_handler(irq, &mc_event_irq_chip, handle_level_irq);
757 	irq_set_chip_data(irq, domain->host_data);
758 
759 	return 0;
760 }
761 
762 static const struct irq_domain_ops plda_event_domain_ops = {
763 	.map = plda_pcie_event_map,
764 };
765 
766 static inline void mc_pcie_deinit_clk(void *data)
767 {
768 	struct clk *clk = data;
769 
770 	clk_disable_unprepare(clk);
771 }
772 
773 static inline struct clk *mc_pcie_init_clk(struct device *dev, const char *id)
774 {
775 	struct clk *clk;
776 	int ret;
777 
778 	clk = devm_clk_get_optional(dev, id);
779 	if (IS_ERR(clk))
780 		return clk;
781 	if (!clk)
782 		return clk;
783 
784 	ret = clk_prepare_enable(clk);
785 	if (ret)
786 		return ERR_PTR(ret);
787 
788 	devm_add_action_or_reset(dev, mc_pcie_deinit_clk, clk);
789 
790 	return clk;
791 }
792 
793 static int mc_pcie_init_clks(struct device *dev)
794 {
795 	int i;
796 	struct clk *fic;
797 
798 	/*
799 	 * PCIe may be clocked via Fabric Interface using between 1 and 4
800 	 * clocks. Scan DT for clocks and enable them if present
801 	 */
802 	for (i = 0; i < ARRAY_SIZE(poss_clks); i++) {
803 		fic = mc_pcie_init_clk(dev, poss_clks[i]);
804 		if (IS_ERR(fic))
805 			return PTR_ERR(fic);
806 	}
807 
808 	return 0;
809 }
810 
811 static int mc_request_event_irq(struct plda_pcie_rp *plda, int event_irq,
812 				int event)
813 {
814 	return devm_request_irq(plda->dev, event_irq, mc_event_handler,
815 				0, event_cause[event].sym, plda);
816 }
817 
818 static const struct plda_event mc_event = {
819 	.request_event_irq = mc_request_event_irq,
820 };
821 
822 static int plda_pcie_init_irq_domains(struct plda_pcie_rp *port)
823 {
824 	struct device *dev = port->dev;
825 	struct device_node *node = dev->of_node;
826 	struct device_node *pcie_intc_node;
827 
828 	/* Setup INTx */
829 	pcie_intc_node = of_get_next_child(node, NULL);
830 	if (!pcie_intc_node) {
831 		dev_err(dev, "failed to find PCIe Intc node\n");
832 		return -EINVAL;
833 	}
834 
835 	port->event_domain = irq_domain_add_linear(pcie_intc_node,
836 						   port->num_events,
837 						   &plda_event_domain_ops,
838 						   port);
839 	if (!port->event_domain) {
840 		dev_err(dev, "failed to get event domain\n");
841 		of_node_put(pcie_intc_node);
842 		return -ENOMEM;
843 	}
844 
845 	irq_domain_update_bus_token(port->event_domain, DOMAIN_BUS_NEXUS);
846 
847 	port->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
848 						  &intx_domain_ops, port);
849 	if (!port->intx_domain) {
850 		dev_err(dev, "failed to get an INTx IRQ domain\n");
851 		of_node_put(pcie_intc_node);
852 		return -ENOMEM;
853 	}
854 
855 	irq_domain_update_bus_token(port->intx_domain, DOMAIN_BUS_WIRED);
856 
857 	of_node_put(pcie_intc_node);
858 	raw_spin_lock_init(&port->lock);
859 
860 	return plda_allocate_msi_domains(port);
861 }
862 
863 static inline void mc_clear_secs(struct mc_pcie *port)
864 {
865 	void __iomem *ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR;
866 
867 	writel_relaxed(SEC_ERROR_INT_ALL_RAM_SEC_ERR_INT, ctrl_base_addr +
868 		       SEC_ERROR_INT);
869 	writel_relaxed(0, ctrl_base_addr + SEC_ERROR_EVENT_CNT);
870 }
871 
872 static inline void mc_clear_deds(struct mc_pcie *port)
873 {
874 	void __iomem *ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR;
875 
876 	writel_relaxed(DED_ERROR_INT_ALL_RAM_DED_ERR_INT, ctrl_base_addr +
877 		       DED_ERROR_INT);
878 	writel_relaxed(0, ctrl_base_addr + DED_ERROR_EVENT_CNT);
879 }
880 
881 static void mc_disable_interrupts(struct mc_pcie *port)
882 {
883 	void __iomem *bridge_base_addr = port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
884 	void __iomem *ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR;
885 	u32 val;
886 
887 	/* Ensure ECC bypass is enabled */
888 	val = ECC_CONTROL_TX_RAM_ECC_BYPASS |
889 	      ECC_CONTROL_RX_RAM_ECC_BYPASS |
890 	      ECC_CONTROL_PCIE2AXI_RAM_ECC_BYPASS |
891 	      ECC_CONTROL_AXI2PCIE_RAM_ECC_BYPASS;
892 	writel_relaxed(val, ctrl_base_addr + ECC_CONTROL);
893 
894 	/* Disable SEC errors and clear any outstanding */
895 	writel_relaxed(SEC_ERROR_INT_ALL_RAM_SEC_ERR_INT, ctrl_base_addr +
896 		       SEC_ERROR_INT_MASK);
897 	mc_clear_secs(port);
898 
899 	/* Disable DED errors and clear any outstanding */
900 	writel_relaxed(DED_ERROR_INT_ALL_RAM_DED_ERR_INT, ctrl_base_addr +
901 		       DED_ERROR_INT_MASK);
902 	mc_clear_deds(port);
903 
904 	/* Disable local interrupts and clear any outstanding */
905 	writel_relaxed(0, bridge_base_addr + IMASK_LOCAL);
906 	writel_relaxed(GENMASK(31, 0), bridge_base_addr + ISTATUS_LOCAL);
907 	writel_relaxed(GENMASK(31, 0), bridge_base_addr + ISTATUS_MSI);
908 
909 	/* Disable PCIe events and clear any outstanding */
910 	val = PCIE_EVENT_INT_L2_EXIT_INT |
911 	      PCIE_EVENT_INT_HOTRST_EXIT_INT |
912 	      PCIE_EVENT_INT_DLUP_EXIT_INT |
913 	      PCIE_EVENT_INT_L2_EXIT_INT_MASK |
914 	      PCIE_EVENT_INT_HOTRST_EXIT_INT_MASK |
915 	      PCIE_EVENT_INT_DLUP_EXIT_INT_MASK;
916 	writel_relaxed(val, ctrl_base_addr + PCIE_EVENT_INT);
917 
918 	/* Disable host interrupts and clear any outstanding */
919 	writel_relaxed(0, bridge_base_addr + IMASK_HOST);
920 	writel_relaxed(GENMASK(31, 0), bridge_base_addr + ISTATUS_HOST);
921 }
922 
923 static int plda_init_interrupts(struct platform_device *pdev,
924 				struct plda_pcie_rp *port,
925 				const struct plda_event *event)
926 {
927 	struct device *dev = &pdev->dev;
928 	int irq;
929 	int i, intx_irq, msi_irq, event_irq;
930 	int ret;
931 
932 	ret = plda_pcie_init_irq_domains(port);
933 	if (ret) {
934 		dev_err(dev, "failed creating IRQ domains\n");
935 		return ret;
936 	}
937 
938 	irq = platform_get_irq(pdev, 0);
939 	if (irq < 0)
940 		return -ENODEV;
941 
942 	for (i = 0; i < port->num_events; i++) {
943 		event_irq = irq_create_mapping(port->event_domain, i);
944 		if (!event_irq) {
945 			dev_err(dev, "failed to map hwirq %d\n", i);
946 			return -ENXIO;
947 		}
948 
949 		if (event->request_event_irq)
950 			ret = event->request_event_irq(port, event_irq, i);
951 		else
952 			ret = devm_request_irq(dev, event_irq,
953 					       plda_event_handler,
954 					       0, NULL, port);
955 
956 		if (ret) {
957 			dev_err(dev, "failed to request IRQ %d\n", event_irq);
958 			return ret;
959 		}
960 	}
961 
962 	intx_irq = irq_create_mapping(port->event_domain,
963 				      EVENT_LOCAL_PM_MSI_INT_INTX);
964 	if (!intx_irq) {
965 		dev_err(dev, "failed to map INTx interrupt\n");
966 		return -ENXIO;
967 	}
968 
969 	/* Plug the INTx chained handler */
970 	irq_set_chained_handler_and_data(intx_irq, plda_handle_intx, port);
971 
972 	msi_irq = irq_create_mapping(port->event_domain,
973 				     EVENT_LOCAL_PM_MSI_INT_MSI);
974 	if (!msi_irq)
975 		return -ENXIO;
976 
977 	/* Plug the MSI chained handler */
978 	irq_set_chained_handler_and_data(msi_irq, plda_handle_msi, port);
979 
980 	/* Plug the main event chained handler */
981 	irq_set_chained_handler_and_data(irq, plda_handle_event, port);
982 
983 	return 0;
984 }
985 
986 static int mc_platform_init(struct pci_config_window *cfg)
987 {
988 	struct device *dev = cfg->parent;
989 	struct platform_device *pdev = to_platform_device(dev);
990 	void __iomem *bridge_base_addr =
991 		port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
992 	int ret;
993 
994 	/* Configure address translation table 0 for PCIe config space */
995 	plda_pcie_setup_window(bridge_base_addr, 0, cfg->res.start,
996 			       cfg->res.start,
997 			       resource_size(&cfg->res));
998 
999 	/* Need some fixups in config space */
1000 	mc_pcie_enable_msi(port, cfg->win);
1001 
1002 	/* Configure non-config space outbound ranges */
1003 	ret = plda_pcie_setup_iomems(pdev, &port->plda);
1004 	if (ret)
1005 		return ret;
1006 
1007 	/* Address translation is up; safe to enable interrupts */
1008 	ret = plda_init_interrupts(pdev, &port->plda, &mc_event);
1009 	if (ret)
1010 		return ret;
1011 
1012 	return 0;
1013 }
1014 
1015 static int mc_host_probe(struct platform_device *pdev)
1016 {
1017 	struct device *dev = &pdev->dev;
1018 	void __iomem *bridge_base_addr;
1019 	struct plda_pcie_rp *plda;
1020 	int ret;
1021 	u32 val;
1022 
1023 	port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
1024 	if (!port)
1025 		return -ENOMEM;
1026 
1027 	plda = &port->plda;
1028 	plda->dev = dev;
1029 
1030 	port->axi_base_addr = devm_platform_ioremap_resource(pdev, 1);
1031 	if (IS_ERR(port->axi_base_addr))
1032 		return PTR_ERR(port->axi_base_addr);
1033 
1034 	mc_disable_interrupts(port);
1035 
1036 	bridge_base_addr = port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
1037 	plda->bridge_addr = bridge_base_addr;
1038 	plda->num_events = NUM_EVENTS;
1039 
1040 	/* Allow enabling MSI by disabling MSI-X */
1041 	val = readl(bridge_base_addr + PCIE_PCI_IRQ_DW0);
1042 	val &= ~MSIX_CAP_MASK;
1043 	writel(val, bridge_base_addr + PCIE_PCI_IRQ_DW0);
1044 
1045 	/* Pick num vectors from bitfile programmed onto FPGA fabric */
1046 	val = readl(bridge_base_addr + PCIE_PCI_IRQ_DW0);
1047 	val &= NUM_MSI_MSGS_MASK;
1048 	val >>= NUM_MSI_MSGS_SHIFT;
1049 
1050 	plda->msi.num_vectors = 1 << val;
1051 
1052 	/* Pick vector address from design */
1053 	plda->msi.vector_phy = readl_relaxed(bridge_base_addr + IMSI_ADDR);
1054 
1055 	ret = mc_pcie_init_clks(dev);
1056 	if (ret) {
1057 		dev_err(dev, "failed to get clock resources, error %d\n", ret);
1058 		return -ENODEV;
1059 	}
1060 
1061 	return pci_host_common_probe(pdev);
1062 }
1063 
1064 static const struct pci_ecam_ops mc_ecam_ops = {
1065 	.init = mc_platform_init,
1066 	.pci_ops = {
1067 		.map_bus = pci_ecam_map_bus,
1068 		.read = pci_generic_config_read,
1069 		.write = pci_generic_config_write,
1070 	}
1071 };
1072 
1073 static const struct of_device_id mc_pcie_of_match[] = {
1074 	{
1075 		.compatible = "microchip,pcie-host-1.0",
1076 		.data = &mc_ecam_ops,
1077 	},
1078 	{},
1079 };
1080 
1081 MODULE_DEVICE_TABLE(of, mc_pcie_of_match);
1082 
1083 static struct platform_driver mc_pcie_driver = {
1084 	.probe = mc_host_probe,
1085 	.driver = {
1086 		.name = "microchip-pcie",
1087 		.of_match_table = mc_pcie_of_match,
1088 		.suppress_bind_attrs = true,
1089 	},
1090 };
1091 
1092 builtin_platform_driver(mc_pcie_driver);
1093 MODULE_LICENSE("GPL");
1094 MODULE_DESCRIPTION("Microchip PCIe host controller driver");
1095 MODULE_AUTHOR("Daire McNamara <daire.mcnamara@microchip.com>");
1096