xref: /linux/drivers/pci/controller/plda/pcie-plda-host.c (revision 4436e6da008fee87d54c038e983e5be9a6baf8fb)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * PLDA PCIe XpressRich host controller driver
4  *
5  * Copyright (C) 2023 Microchip Co. Ltd
6  *		      StarFive Co. Ltd
7  *
8  * Author: Daire McNamara <daire.mcnamara@microchip.com>
9  */
10 
11 #include <linux/irqchip/chained_irq.h>
12 #include <linux/irqdomain.h>
13 #include <linux/msi.h>
14 #include <linux/pci_regs.h>
15 #include <linux/pci-ecam.h>
16 
17 #include "pcie-plda.h"
18 
19 void __iomem *plda_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
20 				int where)
21 {
22 	struct plda_pcie_rp *pcie = bus->sysdata;
23 
24 	return pcie->config_base + PCIE_ECAM_OFFSET(bus->number, devfn, where);
25 }
26 EXPORT_SYMBOL_GPL(plda_pcie_map_bus);
27 
28 static void plda_handle_msi(struct irq_desc *desc)
29 {
30 	struct plda_pcie_rp *port = irq_desc_get_handler_data(desc);
31 	struct irq_chip *chip = irq_desc_get_chip(desc);
32 	struct device *dev = port->dev;
33 	struct plda_msi *msi = &port->msi;
34 	void __iomem *bridge_base_addr = port->bridge_addr;
35 	unsigned long status;
36 	u32 bit;
37 	int ret;
38 
39 	chained_irq_enter(chip, desc);
40 
41 	status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
42 	if (status & PM_MSI_INT_MSI_MASK) {
43 		writel_relaxed(status & PM_MSI_INT_MSI_MASK,
44 			       bridge_base_addr + ISTATUS_LOCAL);
45 		status = readl_relaxed(bridge_base_addr + ISTATUS_MSI);
46 		for_each_set_bit(bit, &status, msi->num_vectors) {
47 			ret = generic_handle_domain_irq(msi->dev_domain, bit);
48 			if (ret)
49 				dev_err_ratelimited(dev, "bad MSI IRQ %d\n",
50 						    bit);
51 		}
52 	}
53 
54 	chained_irq_exit(chip, desc);
55 }
56 
57 static void plda_msi_bottom_irq_ack(struct irq_data *data)
58 {
59 	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
60 	void __iomem *bridge_base_addr = port->bridge_addr;
61 	u32 bitpos = data->hwirq;
62 
63 	writel_relaxed(BIT(bitpos), bridge_base_addr + ISTATUS_MSI);
64 }
65 
66 static void plda_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
67 {
68 	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
69 	phys_addr_t addr = port->msi.vector_phy;
70 
71 	msg->address_lo = lower_32_bits(addr);
72 	msg->address_hi = upper_32_bits(addr);
73 	msg->data = data->hwirq;
74 
75 	dev_dbg(port->dev, "msi#%x address_hi %#x address_lo %#x\n",
76 		(int)data->hwirq, msg->address_hi, msg->address_lo);
77 }
78 
79 static int plda_msi_set_affinity(struct irq_data *irq_data,
80 				 const struct cpumask *mask, bool force)
81 {
82 	return -EINVAL;
83 }
84 
85 static struct irq_chip plda_msi_bottom_irq_chip = {
86 	.name = "PLDA MSI",
87 	.irq_ack = plda_msi_bottom_irq_ack,
88 	.irq_compose_msi_msg = plda_compose_msi_msg,
89 	.irq_set_affinity = plda_msi_set_affinity,
90 };
91 
92 static int plda_irq_msi_domain_alloc(struct irq_domain *domain,
93 				     unsigned int virq,
94 				     unsigned int nr_irqs,
95 				     void *args)
96 {
97 	struct plda_pcie_rp *port = domain->host_data;
98 	struct plda_msi *msi = &port->msi;
99 	unsigned long bit;
100 
101 	mutex_lock(&msi->lock);
102 	bit = find_first_zero_bit(msi->used, msi->num_vectors);
103 	if (bit >= msi->num_vectors) {
104 		mutex_unlock(&msi->lock);
105 		return -ENOSPC;
106 	}
107 
108 	set_bit(bit, msi->used);
109 
110 	irq_domain_set_info(domain, virq, bit, &plda_msi_bottom_irq_chip,
111 			    domain->host_data, handle_edge_irq, NULL, NULL);
112 
113 	mutex_unlock(&msi->lock);
114 
115 	return 0;
116 }
117 
118 static void plda_irq_msi_domain_free(struct irq_domain *domain,
119 				     unsigned int virq,
120 				     unsigned int nr_irqs)
121 {
122 	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
123 	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(d);
124 	struct plda_msi *msi = &port->msi;
125 
126 	mutex_lock(&msi->lock);
127 
128 	if (test_bit(d->hwirq, msi->used))
129 		__clear_bit(d->hwirq, msi->used);
130 	else
131 		dev_err(port->dev, "trying to free unused MSI%lu\n", d->hwirq);
132 
133 	mutex_unlock(&msi->lock);
134 }
135 
136 static const struct irq_domain_ops msi_domain_ops = {
137 	.alloc	= plda_irq_msi_domain_alloc,
138 	.free	= plda_irq_msi_domain_free,
139 };
140 
141 static struct irq_chip plda_msi_irq_chip = {
142 	.name = "PLDA PCIe MSI",
143 	.irq_ack = irq_chip_ack_parent,
144 	.irq_mask = pci_msi_mask_irq,
145 	.irq_unmask = pci_msi_unmask_irq,
146 };
147 
148 static struct msi_domain_info plda_msi_domain_info = {
149 	.flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
150 		  MSI_FLAG_PCI_MSIX),
151 	.chip = &plda_msi_irq_chip,
152 };
153 
154 static int plda_allocate_msi_domains(struct plda_pcie_rp *port)
155 {
156 	struct device *dev = port->dev;
157 	struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
158 	struct plda_msi *msi = &port->msi;
159 
160 	mutex_init(&port->msi.lock);
161 
162 	msi->dev_domain = irq_domain_add_linear(NULL, msi->num_vectors,
163 						&msi_domain_ops, port);
164 	if (!msi->dev_domain) {
165 		dev_err(dev, "failed to create IRQ domain\n");
166 		return -ENOMEM;
167 	}
168 
169 	msi->msi_domain = pci_msi_create_irq_domain(fwnode,
170 						    &plda_msi_domain_info,
171 						    msi->dev_domain);
172 	if (!msi->msi_domain) {
173 		dev_err(dev, "failed to create MSI domain\n");
174 		irq_domain_remove(msi->dev_domain);
175 		return -ENOMEM;
176 	}
177 
178 	return 0;
179 }
180 
181 static void plda_handle_intx(struct irq_desc *desc)
182 {
183 	struct plda_pcie_rp *port = irq_desc_get_handler_data(desc);
184 	struct irq_chip *chip = irq_desc_get_chip(desc);
185 	struct device *dev = port->dev;
186 	void __iomem *bridge_base_addr = port->bridge_addr;
187 	unsigned long status;
188 	u32 bit;
189 	int ret;
190 
191 	chained_irq_enter(chip, desc);
192 
193 	status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
194 	if (status & PM_MSI_INT_INTX_MASK) {
195 		status &= PM_MSI_INT_INTX_MASK;
196 		status >>= PM_MSI_INT_INTX_SHIFT;
197 		for_each_set_bit(bit, &status, PCI_NUM_INTX) {
198 			ret = generic_handle_domain_irq(port->intx_domain, bit);
199 			if (ret)
200 				dev_err_ratelimited(dev, "bad INTx IRQ %d\n",
201 						    bit);
202 		}
203 	}
204 
205 	chained_irq_exit(chip, desc);
206 }
207 
208 static void plda_ack_intx_irq(struct irq_data *data)
209 {
210 	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
211 	void __iomem *bridge_base_addr = port->bridge_addr;
212 	u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
213 
214 	writel_relaxed(mask, bridge_base_addr + ISTATUS_LOCAL);
215 }
216 
217 static void plda_mask_intx_irq(struct irq_data *data)
218 {
219 	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
220 	void __iomem *bridge_base_addr = port->bridge_addr;
221 	unsigned long flags;
222 	u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
223 	u32 val;
224 
225 	raw_spin_lock_irqsave(&port->lock, flags);
226 	val = readl_relaxed(bridge_base_addr + IMASK_LOCAL);
227 	val &= ~mask;
228 	writel_relaxed(val, bridge_base_addr + IMASK_LOCAL);
229 	raw_spin_unlock_irqrestore(&port->lock, flags);
230 }
231 
232 static void plda_unmask_intx_irq(struct irq_data *data)
233 {
234 	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
235 	void __iomem *bridge_base_addr = port->bridge_addr;
236 	unsigned long flags;
237 	u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
238 	u32 val;
239 
240 	raw_spin_lock_irqsave(&port->lock, flags);
241 	val = readl_relaxed(bridge_base_addr + IMASK_LOCAL);
242 	val |= mask;
243 	writel_relaxed(val, bridge_base_addr + IMASK_LOCAL);
244 	raw_spin_unlock_irqrestore(&port->lock, flags);
245 }
246 
247 static struct irq_chip plda_intx_irq_chip = {
248 	.name = "PLDA PCIe INTx",
249 	.irq_ack = plda_ack_intx_irq,
250 	.irq_mask = plda_mask_intx_irq,
251 	.irq_unmask = plda_unmask_intx_irq,
252 };
253 
254 static int plda_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
255 			      irq_hw_number_t hwirq)
256 {
257 	irq_set_chip_and_handler(irq, &plda_intx_irq_chip, handle_level_irq);
258 	irq_set_chip_data(irq, domain->host_data);
259 
260 	return 0;
261 }
262 
263 static const struct irq_domain_ops intx_domain_ops = {
264 	.map = plda_pcie_intx_map,
265 };
266 
267 static u32 plda_get_events(struct plda_pcie_rp *port)
268 {
269 	u32 events, val, origin;
270 
271 	origin = readl_relaxed(port->bridge_addr + ISTATUS_LOCAL);
272 
273 	/* MSI event and sys events */
274 	val = (origin & SYS_AND_MSI_MASK) >> PM_MSI_INT_MSI_SHIFT;
275 	events = val << (PM_MSI_INT_MSI_SHIFT - PCI_NUM_INTX + 1);
276 
277 	/* INTx events */
278 	if (origin & PM_MSI_INT_INTX_MASK)
279 		events |= BIT(PM_MSI_INT_INTX_SHIFT);
280 
281 	/* remains are same with register */
282 	events |= origin & GENMASK(P_ATR_EVT_DOORBELL_SHIFT, 0);
283 
284 	return events;
285 }
286 
287 static irqreturn_t plda_event_handler(int irq, void *dev_id)
288 {
289 	return IRQ_HANDLED;
290 }
291 
292 static void plda_handle_event(struct irq_desc *desc)
293 {
294 	struct plda_pcie_rp *port = irq_desc_get_handler_data(desc);
295 	unsigned long events;
296 	u32 bit;
297 	struct irq_chip *chip = irq_desc_get_chip(desc);
298 
299 	chained_irq_enter(chip, desc);
300 
301 	events = port->event_ops->get_events(port);
302 
303 	events &= port->events_bitmap;
304 	for_each_set_bit(bit, &events, port->num_events)
305 		generic_handle_domain_irq(port->event_domain, bit);
306 
307 	chained_irq_exit(chip, desc);
308 }
309 
310 static u32 plda_hwirq_to_mask(int hwirq)
311 {
312 	u32 mask;
313 
314 	/* hwirq 23 - 0 are the same with register */
315 	if (hwirq < EVENT_PM_MSI_INT_INTX)
316 		mask = BIT(hwirq);
317 	else if (hwirq == EVENT_PM_MSI_INT_INTX)
318 		mask = PM_MSI_INT_INTX_MASK;
319 	else
320 		mask = BIT(hwirq + PCI_NUM_INTX - 1);
321 
322 	return mask;
323 }
324 
325 static void plda_ack_event_irq(struct irq_data *data)
326 {
327 	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
328 
329 	writel_relaxed(plda_hwirq_to_mask(data->hwirq),
330 		       port->bridge_addr + ISTATUS_LOCAL);
331 }
332 
333 static void plda_mask_event_irq(struct irq_data *data)
334 {
335 	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
336 	u32 mask, val;
337 
338 	mask = plda_hwirq_to_mask(data->hwirq);
339 
340 	raw_spin_lock(&port->lock);
341 	val = readl_relaxed(port->bridge_addr + IMASK_LOCAL);
342 	val &= ~mask;
343 	writel_relaxed(val, port->bridge_addr + IMASK_LOCAL);
344 	raw_spin_unlock(&port->lock);
345 }
346 
347 static void plda_unmask_event_irq(struct irq_data *data)
348 {
349 	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
350 	u32 mask, val;
351 
352 	mask = plda_hwirq_to_mask(data->hwirq);
353 
354 	raw_spin_lock(&port->lock);
355 	val = readl_relaxed(port->bridge_addr + IMASK_LOCAL);
356 	val |= mask;
357 	writel_relaxed(val, port->bridge_addr + IMASK_LOCAL);
358 	raw_spin_unlock(&port->lock);
359 }
360 
361 static struct irq_chip plda_event_irq_chip = {
362 	.name = "PLDA PCIe EVENT",
363 	.irq_ack = plda_ack_event_irq,
364 	.irq_mask = plda_mask_event_irq,
365 	.irq_unmask = plda_unmask_event_irq,
366 };
367 
368 static const struct plda_event_ops plda_event_ops = {
369 	.get_events = plda_get_events,
370 };
371 
372 static int plda_pcie_event_map(struct irq_domain *domain, unsigned int irq,
373 			       irq_hw_number_t hwirq)
374 {
375 	struct plda_pcie_rp *port = (void *)domain->host_data;
376 
377 	irq_set_chip_and_handler(irq, port->event_irq_chip, handle_level_irq);
378 	irq_set_chip_data(irq, domain->host_data);
379 
380 	return 0;
381 }
382 
383 static const struct irq_domain_ops plda_event_domain_ops = {
384 	.map = plda_pcie_event_map,
385 };
386 
387 static int plda_pcie_init_irq_domains(struct plda_pcie_rp *port)
388 {
389 	struct device *dev = port->dev;
390 	struct device_node *node = dev->of_node;
391 	struct device_node *pcie_intc_node;
392 
393 	/* Setup INTx */
394 	pcie_intc_node = of_get_next_child(node, NULL);
395 	if (!pcie_intc_node) {
396 		dev_err(dev, "failed to find PCIe Intc node\n");
397 		return -EINVAL;
398 	}
399 
400 	port->event_domain = irq_domain_add_linear(pcie_intc_node,
401 						   port->num_events,
402 						   &plda_event_domain_ops,
403 						   port);
404 	if (!port->event_domain) {
405 		dev_err(dev, "failed to get event domain\n");
406 		of_node_put(pcie_intc_node);
407 		return -ENOMEM;
408 	}
409 
410 	irq_domain_update_bus_token(port->event_domain, DOMAIN_BUS_NEXUS);
411 
412 	port->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
413 						  &intx_domain_ops, port);
414 	if (!port->intx_domain) {
415 		dev_err(dev, "failed to get an INTx IRQ domain\n");
416 		of_node_put(pcie_intc_node);
417 		return -ENOMEM;
418 	}
419 
420 	irq_domain_update_bus_token(port->intx_domain, DOMAIN_BUS_WIRED);
421 
422 	of_node_put(pcie_intc_node);
423 	raw_spin_lock_init(&port->lock);
424 
425 	return plda_allocate_msi_domains(port);
426 }
427 
428 int plda_init_interrupts(struct platform_device *pdev,
429 			 struct plda_pcie_rp *port,
430 			 const struct plda_event *event)
431 {
432 	struct device *dev = &pdev->dev;
433 	int event_irq, ret;
434 	u32 i;
435 
436 	if (!port->event_ops)
437 		port->event_ops = &plda_event_ops;
438 
439 	if (!port->event_irq_chip)
440 		port->event_irq_chip = &plda_event_irq_chip;
441 
442 	ret = plda_pcie_init_irq_domains(port);
443 	if (ret) {
444 		dev_err(dev, "failed creating IRQ domains\n");
445 		return ret;
446 	}
447 
448 	port->irq = platform_get_irq(pdev, 0);
449 	if (port->irq < 0)
450 		return -ENODEV;
451 
452 	for_each_set_bit(i, &port->events_bitmap, port->num_events) {
453 		event_irq = irq_create_mapping(port->event_domain, i);
454 		if (!event_irq) {
455 			dev_err(dev, "failed to map hwirq %d\n", i);
456 			return -ENXIO;
457 		}
458 
459 		if (event->request_event_irq)
460 			ret = event->request_event_irq(port, event_irq, i);
461 		else
462 			ret = devm_request_irq(dev, event_irq,
463 					       plda_event_handler,
464 					       0, NULL, port);
465 
466 		if (ret) {
467 			dev_err(dev, "failed to request IRQ %d\n", event_irq);
468 			return ret;
469 		}
470 	}
471 
472 	port->intx_irq = irq_create_mapping(port->event_domain,
473 					    event->intx_event);
474 	if (!port->intx_irq) {
475 		dev_err(dev, "failed to map INTx interrupt\n");
476 		return -ENXIO;
477 	}
478 
479 	/* Plug the INTx chained handler */
480 	irq_set_chained_handler_and_data(port->intx_irq, plda_handle_intx, port);
481 
482 	port->msi_irq = irq_create_mapping(port->event_domain,
483 					   event->msi_event);
484 	if (!port->msi_irq)
485 		return -ENXIO;
486 
487 	/* Plug the MSI chained handler */
488 	irq_set_chained_handler_and_data(port->msi_irq, plda_handle_msi, port);
489 
490 	/* Plug the main event chained handler */
491 	irq_set_chained_handler_and_data(port->irq, plda_handle_event, port);
492 
493 	return 0;
494 }
495 EXPORT_SYMBOL_GPL(plda_init_interrupts);
496 
497 void plda_pcie_setup_window(void __iomem *bridge_base_addr, u32 index,
498 			    phys_addr_t axi_addr, phys_addr_t pci_addr,
499 			    size_t size)
500 {
501 	u32 atr_sz = ilog2(size) - 1;
502 	u32 val;
503 
504 	if (index == 0)
505 		val = PCIE_CONFIG_INTERFACE;
506 	else
507 		val = PCIE_TX_RX_INTERFACE;
508 
509 	writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
510 	       ATR0_AXI4_SLV0_TRSL_PARAM);
511 
512 	val = lower_32_bits(axi_addr) | (atr_sz << ATR_SIZE_SHIFT) |
513 			    ATR_IMPL_ENABLE;
514 	writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
515 	       ATR0_AXI4_SLV0_SRCADDR_PARAM);
516 
517 	val = upper_32_bits(axi_addr);
518 	writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
519 	       ATR0_AXI4_SLV0_SRC_ADDR);
520 
521 	val = lower_32_bits(pci_addr);
522 	writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
523 	       ATR0_AXI4_SLV0_TRSL_ADDR_LSB);
524 
525 	val = upper_32_bits(pci_addr);
526 	writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
527 	       ATR0_AXI4_SLV0_TRSL_ADDR_UDW);
528 
529 	val = readl(bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
530 	val |= (ATR0_PCIE_ATR_SIZE << ATR0_PCIE_ATR_SIZE_SHIFT);
531 	writel(val, bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
532 	writel(0, bridge_base_addr + ATR0_PCIE_WIN0_SRC_ADDR);
533 }
534 EXPORT_SYMBOL_GPL(plda_pcie_setup_window);
535 
536 int plda_pcie_setup_iomems(struct pci_host_bridge *bridge,
537 			   struct plda_pcie_rp *port)
538 {
539 	void __iomem *bridge_base_addr = port->bridge_addr;
540 	struct resource_entry *entry;
541 	u64 pci_addr;
542 	u32 index = 1;
543 
544 	resource_list_for_each_entry(entry, &bridge->windows) {
545 		if (resource_type(entry->res) == IORESOURCE_MEM) {
546 			pci_addr = entry->res->start - entry->offset;
547 			plda_pcie_setup_window(bridge_base_addr, index,
548 					       entry->res->start, pci_addr,
549 					       resource_size(entry->res));
550 			index++;
551 		}
552 	}
553 
554 	return 0;
555 }
556 EXPORT_SYMBOL_GPL(plda_pcie_setup_iomems);
557 
558 static void plda_pcie_irq_domain_deinit(struct plda_pcie_rp *pcie)
559 {
560 	irq_set_chained_handler_and_data(pcie->irq, NULL, NULL);
561 	irq_set_chained_handler_and_data(pcie->msi_irq, NULL, NULL);
562 	irq_set_chained_handler_and_data(pcie->intx_irq, NULL, NULL);
563 
564 	irq_domain_remove(pcie->msi.msi_domain);
565 	irq_domain_remove(pcie->msi.dev_domain);
566 
567 	irq_domain_remove(pcie->intx_domain);
568 	irq_domain_remove(pcie->event_domain);
569 }
570 
571 int plda_pcie_host_init(struct plda_pcie_rp *port, struct pci_ops *ops,
572 			const struct plda_event *plda_event)
573 {
574 	struct device *dev = port->dev;
575 	struct pci_host_bridge *bridge;
576 	struct platform_device *pdev = to_platform_device(dev);
577 	struct resource *cfg_res;
578 	int ret;
579 
580 	pdev = to_platform_device(dev);
581 
582 	port->bridge_addr =
583 		devm_platform_ioremap_resource_byname(pdev, "apb");
584 
585 	if (IS_ERR(port->bridge_addr))
586 		return dev_err_probe(dev, PTR_ERR(port->bridge_addr),
587 				     "failed to map reg memory\n");
588 
589 	cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
590 	if (!cfg_res)
591 		return dev_err_probe(dev, -ENODEV,
592 				     "failed to get config memory\n");
593 
594 	port->config_base = devm_ioremap_resource(dev, cfg_res);
595 	if (IS_ERR(port->config_base))
596 		return dev_err_probe(dev, PTR_ERR(port->config_base),
597 				     "failed to map config memory\n");
598 
599 	bridge = devm_pci_alloc_host_bridge(dev, 0);
600 	if (!bridge)
601 		return dev_err_probe(dev, -ENOMEM,
602 				     "failed to alloc bridge\n");
603 
604 	if (port->host_ops && port->host_ops->host_init) {
605 		ret = port->host_ops->host_init(port);
606 		if (ret)
607 			return ret;
608 	}
609 
610 	port->bridge = bridge;
611 	plda_pcie_setup_window(port->bridge_addr, 0, cfg_res->start, 0,
612 			       resource_size(cfg_res));
613 	plda_pcie_setup_iomems(bridge, port);
614 	plda_set_default_msi(&port->msi);
615 	ret = plda_init_interrupts(pdev, port, plda_event);
616 	if (ret)
617 		goto err_host;
618 
619 	/* Set default bus ops */
620 	bridge->ops = ops;
621 	bridge->sysdata = port;
622 
623 	ret = pci_host_probe(bridge);
624 	if (ret < 0) {
625 		dev_err_probe(dev, ret, "failed to probe pci host\n");
626 		goto err_probe;
627 	}
628 
629 	return ret;
630 
631 err_probe:
632 	plda_pcie_irq_domain_deinit(port);
633 err_host:
634 	if (port->host_ops && port->host_ops->host_deinit)
635 		port->host_ops->host_deinit(port);
636 
637 	return ret;
638 }
639 EXPORT_SYMBOL_GPL(plda_pcie_host_init);
640 
641 void plda_pcie_host_deinit(struct plda_pcie_rp *port)
642 {
643 	pci_stop_root_bus(port->bridge->bus);
644 	pci_remove_root_bus(port->bridge->bus);
645 
646 	plda_pcie_irq_domain_deinit(port);
647 
648 	if (port->host_ops && port->host_ops->host_deinit)
649 		port->host_ops->host_deinit(port);
650 }
651 EXPORT_SYMBOL_GPL(plda_pcie_host_deinit);
652