xref: /linux/drivers/pci/controller/plda/pcie-plda-host.c (revision 2bd1bea5fa6aa79bc563a57919730eb809651b28)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * PLDA PCIe XpressRich host controller driver
4  *
5  * Copyright (C) 2023 Microchip Co. Ltd
6  *		      StarFive Co. Ltd
7  *
8  * Author: Daire McNamara <daire.mcnamara@microchip.com>
9  */
10 
11 #include <linux/align.h>
12 #include <linux/bitfield.h>
13 #include <linux/irqchip/chained_irq.h>
14 #include <linux/irqdomain.h>
15 #include <linux/msi.h>
16 #include <linux/pci_regs.h>
17 #include <linux/pci-ecam.h>
18 #include <linux/wordpart.h>
19 
20 #include "pcie-plda.h"
21 
plda_pcie_map_bus(struct pci_bus * bus,unsigned int devfn,int where)22 void __iomem *plda_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
23 				int where)
24 {
25 	struct plda_pcie_rp *pcie = bus->sysdata;
26 
27 	return pcie->config_base + PCIE_ECAM_OFFSET(bus->number, devfn, where);
28 }
29 EXPORT_SYMBOL_GPL(plda_pcie_map_bus);
30 
plda_handle_msi(struct irq_desc * desc)31 static void plda_handle_msi(struct irq_desc *desc)
32 {
33 	struct plda_pcie_rp *port = irq_desc_get_handler_data(desc);
34 	struct irq_chip *chip = irq_desc_get_chip(desc);
35 	struct device *dev = port->dev;
36 	struct plda_msi *msi = &port->msi;
37 	void __iomem *bridge_base_addr = port->bridge_addr;
38 	unsigned long status;
39 	u32 bit;
40 	int ret;
41 
42 	chained_irq_enter(chip, desc);
43 
44 	status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
45 	if (status & PM_MSI_INT_MSI_MASK) {
46 		writel_relaxed(status & PM_MSI_INT_MSI_MASK,
47 			       bridge_base_addr + ISTATUS_LOCAL);
48 		status = readl_relaxed(bridge_base_addr + ISTATUS_MSI);
49 		for_each_set_bit(bit, &status, msi->num_vectors) {
50 			ret = generic_handle_domain_irq(msi->dev_domain, bit);
51 			if (ret)
52 				dev_err_ratelimited(dev, "bad MSI IRQ %d\n",
53 						    bit);
54 		}
55 	}
56 
57 	chained_irq_exit(chip, desc);
58 }
59 
plda_msi_bottom_irq_ack(struct irq_data * data)60 static void plda_msi_bottom_irq_ack(struct irq_data *data)
61 {
62 	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
63 	void __iomem *bridge_base_addr = port->bridge_addr;
64 	u32 bitpos = data->hwirq;
65 
66 	writel_relaxed(BIT(bitpos), bridge_base_addr + ISTATUS_MSI);
67 }
68 
plda_compose_msi_msg(struct irq_data * data,struct msi_msg * msg)69 static void plda_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
70 {
71 	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
72 	phys_addr_t addr = port->msi.vector_phy;
73 
74 	msg->address_lo = lower_32_bits(addr);
75 	msg->address_hi = upper_32_bits(addr);
76 	msg->data = data->hwirq;
77 
78 	dev_dbg(port->dev, "msi#%x address_hi %#x address_lo %#x\n",
79 		(int)data->hwirq, msg->address_hi, msg->address_lo);
80 }
81 
82 static struct irq_chip plda_msi_bottom_irq_chip = {
83 	.name = "PLDA MSI",
84 	.irq_ack = plda_msi_bottom_irq_ack,
85 	.irq_compose_msi_msg = plda_compose_msi_msg,
86 };
87 
plda_irq_msi_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * args)88 static int plda_irq_msi_domain_alloc(struct irq_domain *domain,
89 				     unsigned int virq,
90 				     unsigned int nr_irqs,
91 				     void *args)
92 {
93 	struct plda_pcie_rp *port = domain->host_data;
94 	struct plda_msi *msi = &port->msi;
95 	unsigned long bit;
96 
97 	mutex_lock(&msi->lock);
98 	bit = find_first_zero_bit(msi->used, msi->num_vectors);
99 	if (bit >= msi->num_vectors) {
100 		mutex_unlock(&msi->lock);
101 		return -ENOSPC;
102 	}
103 
104 	set_bit(bit, msi->used);
105 
106 	irq_domain_set_info(domain, virq, bit, &plda_msi_bottom_irq_chip,
107 			    domain->host_data, handle_edge_irq, NULL, NULL);
108 
109 	mutex_unlock(&msi->lock);
110 
111 	return 0;
112 }
113 
plda_irq_msi_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)114 static void plda_irq_msi_domain_free(struct irq_domain *domain,
115 				     unsigned int virq,
116 				     unsigned int nr_irqs)
117 {
118 	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
119 	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(d);
120 	struct plda_msi *msi = &port->msi;
121 
122 	mutex_lock(&msi->lock);
123 
124 	if (test_bit(d->hwirq, msi->used))
125 		__clear_bit(d->hwirq, msi->used);
126 	else
127 		dev_err(port->dev, "trying to free unused MSI%lu\n", d->hwirq);
128 
129 	mutex_unlock(&msi->lock);
130 }
131 
132 static const struct irq_domain_ops msi_domain_ops = {
133 	.alloc	= plda_irq_msi_domain_alloc,
134 	.free	= plda_irq_msi_domain_free,
135 };
136 
137 static struct irq_chip plda_msi_irq_chip = {
138 	.name = "PLDA PCIe MSI",
139 	.irq_ack = irq_chip_ack_parent,
140 	.irq_mask = pci_msi_mask_irq,
141 	.irq_unmask = pci_msi_unmask_irq,
142 };
143 
144 static struct msi_domain_info plda_msi_domain_info = {
145 	.flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
146 		 MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX,
147 	.chip = &plda_msi_irq_chip,
148 };
149 
plda_allocate_msi_domains(struct plda_pcie_rp * port)150 static int plda_allocate_msi_domains(struct plda_pcie_rp *port)
151 {
152 	struct device *dev = port->dev;
153 	struct fwnode_handle *fwnode = of_fwnode_handle(dev->of_node);
154 	struct plda_msi *msi = &port->msi;
155 
156 	mutex_init(&port->msi.lock);
157 
158 	msi->dev_domain = irq_domain_create_linear(NULL, msi->num_vectors, &msi_domain_ops, port);
159 	if (!msi->dev_domain) {
160 		dev_err(dev, "failed to create IRQ domain\n");
161 		return -ENOMEM;
162 	}
163 
164 	msi->msi_domain = pci_msi_create_irq_domain(fwnode,
165 						    &plda_msi_domain_info,
166 						    msi->dev_domain);
167 	if (!msi->msi_domain) {
168 		dev_err(dev, "failed to create MSI domain\n");
169 		irq_domain_remove(msi->dev_domain);
170 		return -ENOMEM;
171 	}
172 
173 	return 0;
174 }
175 
plda_handle_intx(struct irq_desc * desc)176 static void plda_handle_intx(struct irq_desc *desc)
177 {
178 	struct plda_pcie_rp *port = irq_desc_get_handler_data(desc);
179 	struct irq_chip *chip = irq_desc_get_chip(desc);
180 	struct device *dev = port->dev;
181 	void __iomem *bridge_base_addr = port->bridge_addr;
182 	unsigned long status;
183 	u32 bit;
184 	int ret;
185 
186 	chained_irq_enter(chip, desc);
187 
188 	status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
189 	if (status & PM_MSI_INT_INTX_MASK) {
190 		status &= PM_MSI_INT_INTX_MASK;
191 		status >>= PM_MSI_INT_INTX_SHIFT;
192 		for_each_set_bit(bit, &status, PCI_NUM_INTX) {
193 			ret = generic_handle_domain_irq(port->intx_domain, bit);
194 			if (ret)
195 				dev_err_ratelimited(dev, "bad INTx IRQ %d\n",
196 						    bit);
197 		}
198 	}
199 
200 	chained_irq_exit(chip, desc);
201 }
202 
plda_ack_intx_irq(struct irq_data * data)203 static void plda_ack_intx_irq(struct irq_data *data)
204 {
205 	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
206 	void __iomem *bridge_base_addr = port->bridge_addr;
207 	u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
208 
209 	writel_relaxed(mask, bridge_base_addr + ISTATUS_LOCAL);
210 }
211 
plda_mask_intx_irq(struct irq_data * data)212 static void plda_mask_intx_irq(struct irq_data *data)
213 {
214 	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
215 	void __iomem *bridge_base_addr = port->bridge_addr;
216 	unsigned long flags;
217 	u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
218 	u32 val;
219 
220 	raw_spin_lock_irqsave(&port->lock, flags);
221 	val = readl_relaxed(bridge_base_addr + IMASK_LOCAL);
222 	val &= ~mask;
223 	writel_relaxed(val, bridge_base_addr + IMASK_LOCAL);
224 	raw_spin_unlock_irqrestore(&port->lock, flags);
225 }
226 
plda_unmask_intx_irq(struct irq_data * data)227 static void plda_unmask_intx_irq(struct irq_data *data)
228 {
229 	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
230 	void __iomem *bridge_base_addr = port->bridge_addr;
231 	unsigned long flags;
232 	u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
233 	u32 val;
234 
235 	raw_spin_lock_irqsave(&port->lock, flags);
236 	val = readl_relaxed(bridge_base_addr + IMASK_LOCAL);
237 	val |= mask;
238 	writel_relaxed(val, bridge_base_addr + IMASK_LOCAL);
239 	raw_spin_unlock_irqrestore(&port->lock, flags);
240 }
241 
242 static struct irq_chip plda_intx_irq_chip = {
243 	.name = "PLDA PCIe INTx",
244 	.irq_ack = plda_ack_intx_irq,
245 	.irq_mask = plda_mask_intx_irq,
246 	.irq_unmask = plda_unmask_intx_irq,
247 };
248 
plda_pcie_intx_map(struct irq_domain * domain,unsigned int irq,irq_hw_number_t hwirq)249 static int plda_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
250 			      irq_hw_number_t hwirq)
251 {
252 	irq_set_chip_and_handler(irq, &plda_intx_irq_chip, handle_level_irq);
253 	irq_set_chip_data(irq, domain->host_data);
254 
255 	return 0;
256 }
257 
258 static const struct irq_domain_ops intx_domain_ops = {
259 	.map = plda_pcie_intx_map,
260 };
261 
plda_get_events(struct plda_pcie_rp * port)262 static u32 plda_get_events(struct plda_pcie_rp *port)
263 {
264 	u32 events, val, origin;
265 
266 	origin = readl_relaxed(port->bridge_addr + ISTATUS_LOCAL);
267 
268 	/* MSI event and sys events */
269 	val = (origin & SYS_AND_MSI_MASK) >> PM_MSI_INT_MSI_SHIFT;
270 	events = val << (PM_MSI_INT_MSI_SHIFT - PCI_NUM_INTX + 1);
271 
272 	/* INTx events */
273 	if (origin & PM_MSI_INT_INTX_MASK)
274 		events |= BIT(PM_MSI_INT_INTX_SHIFT);
275 
276 	/* remains are same with register */
277 	events |= origin & GENMASK(P_ATR_EVT_DOORBELL_SHIFT, 0);
278 
279 	return events;
280 }
281 
plda_event_handler(int irq,void * dev_id)282 static irqreturn_t plda_event_handler(int irq, void *dev_id)
283 {
284 	return IRQ_HANDLED;
285 }
286 
plda_handle_event(struct irq_desc * desc)287 static void plda_handle_event(struct irq_desc *desc)
288 {
289 	struct plda_pcie_rp *port = irq_desc_get_handler_data(desc);
290 	unsigned long events;
291 	u32 bit;
292 	struct irq_chip *chip = irq_desc_get_chip(desc);
293 
294 	chained_irq_enter(chip, desc);
295 
296 	events = port->event_ops->get_events(port);
297 
298 	events &= port->events_bitmap;
299 	for_each_set_bit(bit, &events, port->num_events)
300 		generic_handle_domain_irq(port->event_domain, bit);
301 
302 	chained_irq_exit(chip, desc);
303 }
304 
plda_hwirq_to_mask(int hwirq)305 static u32 plda_hwirq_to_mask(int hwirq)
306 {
307 	u32 mask;
308 
309 	/* hwirq 23 - 0 are the same with register */
310 	if (hwirq < EVENT_PM_MSI_INT_INTX)
311 		mask = BIT(hwirq);
312 	else if (hwirq == EVENT_PM_MSI_INT_INTX)
313 		mask = PM_MSI_INT_INTX_MASK;
314 	else
315 		mask = BIT(hwirq + PCI_NUM_INTX - 1);
316 
317 	return mask;
318 }
319 
plda_ack_event_irq(struct irq_data * data)320 static void plda_ack_event_irq(struct irq_data *data)
321 {
322 	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
323 
324 	writel_relaxed(plda_hwirq_to_mask(data->hwirq),
325 		       port->bridge_addr + ISTATUS_LOCAL);
326 }
327 
plda_mask_event_irq(struct irq_data * data)328 static void plda_mask_event_irq(struct irq_data *data)
329 {
330 	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
331 	u32 mask, val;
332 
333 	mask = plda_hwirq_to_mask(data->hwirq);
334 
335 	raw_spin_lock(&port->lock);
336 	val = readl_relaxed(port->bridge_addr + IMASK_LOCAL);
337 	val &= ~mask;
338 	writel_relaxed(val, port->bridge_addr + IMASK_LOCAL);
339 	raw_spin_unlock(&port->lock);
340 }
341 
plda_unmask_event_irq(struct irq_data * data)342 static void plda_unmask_event_irq(struct irq_data *data)
343 {
344 	struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
345 	u32 mask, val;
346 
347 	mask = plda_hwirq_to_mask(data->hwirq);
348 
349 	raw_spin_lock(&port->lock);
350 	val = readl_relaxed(port->bridge_addr + IMASK_LOCAL);
351 	val |= mask;
352 	writel_relaxed(val, port->bridge_addr + IMASK_LOCAL);
353 	raw_spin_unlock(&port->lock);
354 }
355 
356 static struct irq_chip plda_event_irq_chip = {
357 	.name = "PLDA PCIe EVENT",
358 	.irq_ack = plda_ack_event_irq,
359 	.irq_mask = plda_mask_event_irq,
360 	.irq_unmask = plda_unmask_event_irq,
361 };
362 
363 static const struct plda_event_ops plda_event_ops = {
364 	.get_events = plda_get_events,
365 };
366 
plda_pcie_event_map(struct irq_domain * domain,unsigned int irq,irq_hw_number_t hwirq)367 static int plda_pcie_event_map(struct irq_domain *domain, unsigned int irq,
368 			       irq_hw_number_t hwirq)
369 {
370 	struct plda_pcie_rp *port = (void *)domain->host_data;
371 
372 	irq_set_chip_and_handler(irq, port->event_irq_chip, handle_level_irq);
373 	irq_set_chip_data(irq, domain->host_data);
374 
375 	return 0;
376 }
377 
378 static const struct irq_domain_ops plda_event_domain_ops = {
379 	.map = plda_pcie_event_map,
380 };
381 
plda_pcie_init_irq_domains(struct plda_pcie_rp * port)382 static int plda_pcie_init_irq_domains(struct plda_pcie_rp *port)
383 {
384 	struct device *dev = port->dev;
385 	struct device_node *node = dev->of_node;
386 	struct device_node *pcie_intc_node;
387 
388 	/* Setup INTx */
389 	pcie_intc_node = of_get_next_child(node, NULL);
390 	if (!pcie_intc_node) {
391 		dev_err(dev, "failed to find PCIe Intc node\n");
392 		return -EINVAL;
393 	}
394 
395 	port->event_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node),
396 						      port->num_events, &plda_event_domain_ops,
397 						      port);
398 	if (!port->event_domain) {
399 		dev_err(dev, "failed to get event domain\n");
400 		of_node_put(pcie_intc_node);
401 		return -ENOMEM;
402 	}
403 
404 	irq_domain_update_bus_token(port->event_domain, DOMAIN_BUS_NEXUS);
405 
406 	port->intx_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), PCI_NUM_INTX,
407 						     &intx_domain_ops, port);
408 	if (!port->intx_domain) {
409 		dev_err(dev, "failed to get an INTx IRQ domain\n");
410 		of_node_put(pcie_intc_node);
411 		return -ENOMEM;
412 	}
413 
414 	irq_domain_update_bus_token(port->intx_domain, DOMAIN_BUS_WIRED);
415 
416 	of_node_put(pcie_intc_node);
417 	raw_spin_lock_init(&port->lock);
418 
419 	return plda_allocate_msi_domains(port);
420 }
421 
plda_init_interrupts(struct platform_device * pdev,struct plda_pcie_rp * port,const struct plda_event * event)422 int plda_init_interrupts(struct platform_device *pdev,
423 			 struct plda_pcie_rp *port,
424 			 const struct plda_event *event)
425 {
426 	struct device *dev = &pdev->dev;
427 	int event_irq, ret;
428 	u32 i;
429 
430 	if (!port->event_ops)
431 		port->event_ops = &plda_event_ops;
432 
433 	if (!port->event_irq_chip)
434 		port->event_irq_chip = &plda_event_irq_chip;
435 
436 	ret = plda_pcie_init_irq_domains(port);
437 	if (ret) {
438 		dev_err(dev, "failed creating IRQ domains\n");
439 		return ret;
440 	}
441 
442 	port->irq = platform_get_irq(pdev, 0);
443 	if (port->irq < 0)
444 		return -ENODEV;
445 
446 	for_each_set_bit(i, &port->events_bitmap, port->num_events) {
447 		event_irq = irq_create_mapping(port->event_domain, i);
448 		if (!event_irq) {
449 			dev_err(dev, "failed to map hwirq %d\n", i);
450 			return -ENXIO;
451 		}
452 
453 		if (event->request_event_irq)
454 			ret = event->request_event_irq(port, event_irq, i);
455 		else
456 			ret = devm_request_irq(dev, event_irq,
457 					       plda_event_handler,
458 					       0, NULL, port);
459 
460 		if (ret) {
461 			dev_err(dev, "failed to request IRQ %d\n", event_irq);
462 			return ret;
463 		}
464 	}
465 
466 	port->intx_irq = irq_create_mapping(port->event_domain,
467 					    event->intx_event);
468 	if (!port->intx_irq) {
469 		dev_err(dev, "failed to map INTx interrupt\n");
470 		return -ENXIO;
471 	}
472 
473 	/* Plug the INTx chained handler */
474 	irq_set_chained_handler_and_data(port->intx_irq, plda_handle_intx, port);
475 
476 	port->msi_irq = irq_create_mapping(port->event_domain,
477 					   event->msi_event);
478 	if (!port->msi_irq)
479 		return -ENXIO;
480 
481 	/* Plug the MSI chained handler */
482 	irq_set_chained_handler_and_data(port->msi_irq, plda_handle_msi, port);
483 
484 	/* Plug the main event chained handler */
485 	irq_set_chained_handler_and_data(port->irq, plda_handle_event, port);
486 
487 	return 0;
488 }
489 EXPORT_SYMBOL_GPL(plda_init_interrupts);
490 
plda_pcie_setup_window(void __iomem * bridge_base_addr,u32 index,phys_addr_t axi_addr,phys_addr_t pci_addr,size_t size)491 void plda_pcie_setup_window(void __iomem *bridge_base_addr, u32 index,
492 			    phys_addr_t axi_addr, phys_addr_t pci_addr,
493 			    size_t size)
494 {
495 	u32 atr_sz = ilog2(size) - 1;
496 	u32 val;
497 
498 	if (index == 0)
499 		val = PCIE_CONFIG_INTERFACE;
500 	else
501 		val = PCIE_TX_RX_INTERFACE;
502 
503 	writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
504 	       ATR0_AXI4_SLV0_TRSL_PARAM);
505 
506 	val = ALIGN_DOWN(lower_32_bits(axi_addr), SZ_4K);
507 	val |= FIELD_PREP(ATR_SIZE_MASK, atr_sz);
508 	val |= ATR_IMPL_ENABLE;
509 	writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
510 	       ATR0_AXI4_SLV0_SRCADDR_PARAM);
511 
512 	val = upper_32_bits(axi_addr);
513 	writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
514 	       ATR0_AXI4_SLV0_SRC_ADDR);
515 
516 	val = lower_32_bits(pci_addr);
517 	writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
518 	       ATR0_AXI4_SLV0_TRSL_ADDR_LSB);
519 
520 	val = upper_32_bits(pci_addr);
521 	writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
522 	       ATR0_AXI4_SLV0_TRSL_ADDR_UDW);
523 }
524 EXPORT_SYMBOL_GPL(plda_pcie_setup_window);
525 
plda_pcie_setup_inbound_address_translation(struct plda_pcie_rp * port)526 void plda_pcie_setup_inbound_address_translation(struct plda_pcie_rp *port)
527 {
528 	void __iomem *bridge_base_addr = port->bridge_addr;
529 	u32 val;
530 
531 	val = readl(bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
532 	val |= (ATR0_PCIE_ATR_SIZE << ATR0_PCIE_ATR_SIZE_SHIFT);
533 	writel(val, bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
534 	writel(0, bridge_base_addr + ATR0_PCIE_WIN0_SRC_ADDR);
535 }
536 EXPORT_SYMBOL_GPL(plda_pcie_setup_inbound_address_translation);
537 
plda_pcie_setup_iomems(struct pci_host_bridge * bridge,struct plda_pcie_rp * port)538 int plda_pcie_setup_iomems(struct pci_host_bridge *bridge,
539 			   struct plda_pcie_rp *port)
540 {
541 	void __iomem *bridge_base_addr = port->bridge_addr;
542 	struct resource_entry *entry;
543 	u64 pci_addr;
544 	u32 index = 1;
545 
546 	resource_list_for_each_entry(entry, &bridge->windows) {
547 		if (resource_type(entry->res) == IORESOURCE_MEM) {
548 			pci_addr = entry->res->start - entry->offset;
549 			plda_pcie_setup_window(bridge_base_addr, index,
550 					       entry->res->start, pci_addr,
551 					       resource_size(entry->res));
552 			index++;
553 		}
554 	}
555 
556 	return 0;
557 }
558 EXPORT_SYMBOL_GPL(plda_pcie_setup_iomems);
559 
plda_pcie_irq_domain_deinit(struct plda_pcie_rp * pcie)560 static void plda_pcie_irq_domain_deinit(struct plda_pcie_rp *pcie)
561 {
562 	irq_set_chained_handler_and_data(pcie->irq, NULL, NULL);
563 	irq_set_chained_handler_and_data(pcie->msi_irq, NULL, NULL);
564 	irq_set_chained_handler_and_data(pcie->intx_irq, NULL, NULL);
565 
566 	irq_domain_remove(pcie->msi.msi_domain);
567 	irq_domain_remove(pcie->msi.dev_domain);
568 
569 	irq_domain_remove(pcie->intx_domain);
570 	irq_domain_remove(pcie->event_domain);
571 }
572 
plda_pcie_host_init(struct plda_pcie_rp * port,struct pci_ops * ops,const struct plda_event * plda_event)573 int plda_pcie_host_init(struct plda_pcie_rp *port, struct pci_ops *ops,
574 			const struct plda_event *plda_event)
575 {
576 	struct device *dev = port->dev;
577 	struct pci_host_bridge *bridge;
578 	struct platform_device *pdev = to_platform_device(dev);
579 	struct resource *cfg_res;
580 	int ret;
581 
582 	pdev = to_platform_device(dev);
583 
584 	port->bridge_addr =
585 		devm_platform_ioremap_resource_byname(pdev, "apb");
586 
587 	if (IS_ERR(port->bridge_addr))
588 		return dev_err_probe(dev, PTR_ERR(port->bridge_addr),
589 				     "failed to map reg memory\n");
590 
591 	cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
592 	if (!cfg_res)
593 		return dev_err_probe(dev, -ENODEV,
594 				     "failed to get config memory\n");
595 
596 	port->config_base = devm_ioremap_resource(dev, cfg_res);
597 	if (IS_ERR(port->config_base))
598 		return dev_err_probe(dev, PTR_ERR(port->config_base),
599 				     "failed to map config memory\n");
600 
601 	bridge = devm_pci_alloc_host_bridge(dev, 0);
602 	if (!bridge)
603 		return dev_err_probe(dev, -ENOMEM,
604 				     "failed to alloc bridge\n");
605 
606 	if (port->host_ops && port->host_ops->host_init) {
607 		ret = port->host_ops->host_init(port);
608 		if (ret)
609 			return ret;
610 	}
611 
612 	port->bridge = bridge;
613 	plda_pcie_setup_window(port->bridge_addr, 0, cfg_res->start, 0,
614 			       resource_size(cfg_res));
615 	plda_pcie_setup_iomems(bridge, port);
616 	plda_set_default_msi(&port->msi);
617 	ret = plda_init_interrupts(pdev, port, plda_event);
618 	if (ret)
619 		goto err_host;
620 
621 	/* Set default bus ops */
622 	bridge->ops = ops;
623 	bridge->sysdata = port;
624 
625 	ret = pci_host_probe(bridge);
626 	if (ret < 0) {
627 		dev_err_probe(dev, ret, "failed to probe pci host\n");
628 		goto err_probe;
629 	}
630 
631 	return ret;
632 
633 err_probe:
634 	plda_pcie_irq_domain_deinit(port);
635 err_host:
636 	if (port->host_ops && port->host_ops->host_deinit)
637 		port->host_ops->host_deinit(port);
638 
639 	return ret;
640 }
641 EXPORT_SYMBOL_GPL(plda_pcie_host_init);
642 
plda_pcie_host_deinit(struct plda_pcie_rp * port)643 void plda_pcie_host_deinit(struct plda_pcie_rp *port)
644 {
645 	pci_stop_root_bus(port->bridge->bus);
646 	pci_remove_root_bus(port->bridge->bus);
647 
648 	plda_pcie_irq_domain_deinit(port);
649 
650 	if (port->host_ops && port->host_ops->host_deinit)
651 		port->host_ops->host_deinit(port);
652 }
653 EXPORT_SYMBOL_GPL(plda_pcie_host_deinit);
654