1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * PLDA PCIe XpressRich host controller driver
4 *
5 * Copyright (C) 2023 Microchip Co. Ltd
6 * StarFive Co. Ltd
7 *
8 * Author: Daire McNamara <daire.mcnamara@microchip.com>
9 */
10
11 #include <linux/align.h>
12 #include <linux/bitfield.h>
13 #include <linux/irqchip/chained_irq.h>
14 #include <linux/irqchip/irq-msi-lib.h>
15 #include <linux/irqdomain.h>
16 #include <linux/msi.h>
17 #include <linux/pci_regs.h>
18 #include <linux/pci-ecam.h>
19 #include <linux/wordpart.h>
20
21 #include "pcie-plda.h"
22
plda_pcie_map_bus(struct pci_bus * bus,unsigned int devfn,int where)23 void __iomem *plda_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
24 int where)
25 {
26 struct plda_pcie_rp *pcie = bus->sysdata;
27
28 return pcie->config_base + PCIE_ECAM_OFFSET(bus->number, devfn, where);
29 }
30 EXPORT_SYMBOL_GPL(plda_pcie_map_bus);
31
plda_handle_msi(struct irq_desc * desc)32 static void plda_handle_msi(struct irq_desc *desc)
33 {
34 struct plda_pcie_rp *port = irq_desc_get_handler_data(desc);
35 struct irq_chip *chip = irq_desc_get_chip(desc);
36 struct device *dev = port->dev;
37 struct plda_msi *msi = &port->msi;
38 void __iomem *bridge_base_addr = port->bridge_addr;
39 unsigned long status;
40 u32 bit;
41 int ret;
42
43 chained_irq_enter(chip, desc);
44
45 status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
46 if (status & PM_MSI_INT_MSI_MASK) {
47 writel_relaxed(status & PM_MSI_INT_MSI_MASK,
48 bridge_base_addr + ISTATUS_LOCAL);
49 status = readl_relaxed(bridge_base_addr + ISTATUS_MSI);
50 for_each_set_bit(bit, &status, msi->num_vectors) {
51 ret = generic_handle_domain_irq(msi->dev_domain, bit);
52 if (ret)
53 dev_err_ratelimited(dev, "bad MSI IRQ %d\n",
54 bit);
55 }
56 }
57
58 chained_irq_exit(chip, desc);
59 }
60
plda_msi_bottom_irq_ack(struct irq_data * data)61 static void plda_msi_bottom_irq_ack(struct irq_data *data)
62 {
63 struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
64 void __iomem *bridge_base_addr = port->bridge_addr;
65 u32 bitpos = data->hwirq;
66
67 writel_relaxed(BIT(bitpos), bridge_base_addr + ISTATUS_MSI);
68 }
69
plda_compose_msi_msg(struct irq_data * data,struct msi_msg * msg)70 static void plda_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
71 {
72 struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
73 phys_addr_t addr = port->msi.vector_phy;
74
75 msg->address_lo = lower_32_bits(addr);
76 msg->address_hi = upper_32_bits(addr);
77 msg->data = data->hwirq;
78
79 dev_dbg(port->dev, "msi#%x address_hi %#x address_lo %#x\n",
80 (int)data->hwirq, msg->address_hi, msg->address_lo);
81 }
82
83 static struct irq_chip plda_msi_bottom_irq_chip = {
84 .name = "PLDA MSI",
85 .irq_ack = plda_msi_bottom_irq_ack,
86 .irq_compose_msi_msg = plda_compose_msi_msg,
87 };
88
plda_irq_msi_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * args)89 static int plda_irq_msi_domain_alloc(struct irq_domain *domain,
90 unsigned int virq,
91 unsigned int nr_irqs,
92 void *args)
93 {
94 struct plda_pcie_rp *port = domain->host_data;
95 struct plda_msi *msi = &port->msi;
96 unsigned long bit;
97
98 mutex_lock(&msi->lock);
99 bit = find_first_zero_bit(msi->used, msi->num_vectors);
100 if (bit >= msi->num_vectors) {
101 mutex_unlock(&msi->lock);
102 return -ENOSPC;
103 }
104
105 set_bit(bit, msi->used);
106
107 irq_domain_set_info(domain, virq, bit, &plda_msi_bottom_irq_chip,
108 domain->host_data, handle_edge_irq, NULL, NULL);
109
110 mutex_unlock(&msi->lock);
111
112 return 0;
113 }
114
plda_irq_msi_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)115 static void plda_irq_msi_domain_free(struct irq_domain *domain,
116 unsigned int virq,
117 unsigned int nr_irqs)
118 {
119 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
120 struct plda_pcie_rp *port = irq_data_get_irq_chip_data(d);
121 struct plda_msi *msi = &port->msi;
122
123 mutex_lock(&msi->lock);
124
125 if (test_bit(d->hwirq, msi->used))
126 __clear_bit(d->hwirq, msi->used);
127 else
128 dev_err(port->dev, "trying to free unused MSI%lu\n", d->hwirq);
129
130 mutex_unlock(&msi->lock);
131 }
132
133 static const struct irq_domain_ops msi_domain_ops = {
134 .alloc = plda_irq_msi_domain_alloc,
135 .free = plda_irq_msi_domain_free,
136 };
137
138 #define PLDA_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
139 MSI_FLAG_USE_DEF_CHIP_OPS | \
140 MSI_FLAG_NO_AFFINITY)
141 #define PLDA_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
142 MSI_FLAG_PCI_MSIX)
143
144 static const struct msi_parent_ops plda_msi_parent_ops = {
145 .required_flags = PLDA_MSI_FLAGS_REQUIRED,
146 .supported_flags = PLDA_MSI_FLAGS_SUPPORTED,
147 .chip_flags = MSI_CHIP_FLAG_SET_ACK,
148 .bus_select_token = DOMAIN_BUS_PCI_MSI,
149 .prefix = "PLDA-",
150 .init_dev_msi_info = msi_lib_init_dev_msi_info,
151 };
152
plda_allocate_msi_domains(struct plda_pcie_rp * port)153 static int plda_allocate_msi_domains(struct plda_pcie_rp *port)
154 {
155 struct device *dev = port->dev;
156 struct plda_msi *msi = &port->msi;
157
158 mutex_init(&port->msi.lock);
159
160 struct irq_domain_info info = {
161 .fwnode = dev_fwnode(dev),
162 .ops = &msi_domain_ops,
163 .host_data = port,
164 .size = msi->num_vectors,
165 };
166
167 msi->dev_domain = msi_create_parent_irq_domain(&info, &plda_msi_parent_ops);
168 if (!msi->dev_domain) {
169 dev_err(dev, "failed to create IRQ domain\n");
170 return -ENOMEM;
171 }
172
173 return 0;
174 }
175
plda_handle_intx(struct irq_desc * desc)176 static void plda_handle_intx(struct irq_desc *desc)
177 {
178 struct plda_pcie_rp *port = irq_desc_get_handler_data(desc);
179 struct irq_chip *chip = irq_desc_get_chip(desc);
180 struct device *dev = port->dev;
181 void __iomem *bridge_base_addr = port->bridge_addr;
182 unsigned long status;
183 u32 bit;
184 int ret;
185
186 chained_irq_enter(chip, desc);
187
188 status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
189 if (status & PM_MSI_INT_INTX_MASK) {
190 status &= PM_MSI_INT_INTX_MASK;
191 status >>= PM_MSI_INT_INTX_SHIFT;
192 for_each_set_bit(bit, &status, PCI_NUM_INTX) {
193 ret = generic_handle_domain_irq(port->intx_domain, bit);
194 if (ret)
195 dev_err_ratelimited(dev, "bad INTx IRQ %d\n",
196 bit);
197 }
198 }
199
200 chained_irq_exit(chip, desc);
201 }
202
plda_ack_intx_irq(struct irq_data * data)203 static void plda_ack_intx_irq(struct irq_data *data)
204 {
205 struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
206 void __iomem *bridge_base_addr = port->bridge_addr;
207 u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
208
209 writel_relaxed(mask, bridge_base_addr + ISTATUS_LOCAL);
210 }
211
plda_mask_intx_irq(struct irq_data * data)212 static void plda_mask_intx_irq(struct irq_data *data)
213 {
214 struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
215 void __iomem *bridge_base_addr = port->bridge_addr;
216 unsigned long flags;
217 u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
218 u32 val;
219
220 raw_spin_lock_irqsave(&port->lock, flags);
221 val = readl_relaxed(bridge_base_addr + IMASK_LOCAL);
222 val &= ~mask;
223 writel_relaxed(val, bridge_base_addr + IMASK_LOCAL);
224 raw_spin_unlock_irqrestore(&port->lock, flags);
225 }
226
plda_unmask_intx_irq(struct irq_data * data)227 static void plda_unmask_intx_irq(struct irq_data *data)
228 {
229 struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
230 void __iomem *bridge_base_addr = port->bridge_addr;
231 unsigned long flags;
232 u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
233 u32 val;
234
235 raw_spin_lock_irqsave(&port->lock, flags);
236 val = readl_relaxed(bridge_base_addr + IMASK_LOCAL);
237 val |= mask;
238 writel_relaxed(val, bridge_base_addr + IMASK_LOCAL);
239 raw_spin_unlock_irqrestore(&port->lock, flags);
240 }
241
242 static struct irq_chip plda_intx_irq_chip = {
243 .name = "PLDA PCIe INTx",
244 .irq_ack = plda_ack_intx_irq,
245 .irq_mask = plda_mask_intx_irq,
246 .irq_unmask = plda_unmask_intx_irq,
247 };
248
plda_pcie_intx_map(struct irq_domain * domain,unsigned int irq,irq_hw_number_t hwirq)249 static int plda_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
250 irq_hw_number_t hwirq)
251 {
252 irq_set_chip_and_handler(irq, &plda_intx_irq_chip, handle_level_irq);
253 irq_set_chip_data(irq, domain->host_data);
254
255 return 0;
256 }
257
258 static const struct irq_domain_ops intx_domain_ops = {
259 .map = plda_pcie_intx_map,
260 };
261
plda_get_events(struct plda_pcie_rp * port)262 static u32 plda_get_events(struct plda_pcie_rp *port)
263 {
264 u32 events, val, origin;
265
266 origin = readl_relaxed(port->bridge_addr + ISTATUS_LOCAL);
267
268 /* MSI event and sys events */
269 val = (origin & SYS_AND_MSI_MASK) >> PM_MSI_INT_MSI_SHIFT;
270 events = val << (PM_MSI_INT_MSI_SHIFT - PCI_NUM_INTX + 1);
271
272 /* INTx events */
273 if (origin & PM_MSI_INT_INTX_MASK)
274 events |= BIT(PM_MSI_INT_INTX_SHIFT);
275
276 /* remains are same with register */
277 events |= origin & GENMASK(P_ATR_EVT_DOORBELL_SHIFT, 0);
278
279 return events;
280 }
281
plda_event_handler(int irq,void * dev_id)282 static irqreturn_t plda_event_handler(int irq, void *dev_id)
283 {
284 return IRQ_HANDLED;
285 }
286
plda_handle_event(struct irq_desc * desc)287 static void plda_handle_event(struct irq_desc *desc)
288 {
289 struct plda_pcie_rp *port = irq_desc_get_handler_data(desc);
290 unsigned long events;
291 u32 bit;
292 struct irq_chip *chip = irq_desc_get_chip(desc);
293
294 chained_irq_enter(chip, desc);
295
296 events = port->event_ops->get_events(port);
297
298 events &= port->events_bitmap;
299 for_each_set_bit(bit, &events, port->num_events)
300 generic_handle_domain_irq(port->event_domain, bit);
301
302 chained_irq_exit(chip, desc);
303 }
304
plda_hwirq_to_mask(int hwirq)305 static u32 plda_hwirq_to_mask(int hwirq)
306 {
307 u32 mask;
308
309 /* hwirq 23 - 0 are the same with register */
310 if (hwirq < EVENT_PM_MSI_INT_INTX)
311 mask = BIT(hwirq);
312 else if (hwirq == EVENT_PM_MSI_INT_INTX)
313 mask = PM_MSI_INT_INTX_MASK;
314 else
315 mask = BIT(hwirq + PCI_NUM_INTX - 1);
316
317 return mask;
318 }
319
plda_ack_event_irq(struct irq_data * data)320 static void plda_ack_event_irq(struct irq_data *data)
321 {
322 struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
323
324 writel_relaxed(plda_hwirq_to_mask(data->hwirq),
325 port->bridge_addr + ISTATUS_LOCAL);
326 }
327
plda_mask_event_irq(struct irq_data * data)328 static void plda_mask_event_irq(struct irq_data *data)
329 {
330 struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
331 u32 mask, val;
332
333 mask = plda_hwirq_to_mask(data->hwirq);
334
335 raw_spin_lock(&port->lock);
336 val = readl_relaxed(port->bridge_addr + IMASK_LOCAL);
337 val &= ~mask;
338 writel_relaxed(val, port->bridge_addr + IMASK_LOCAL);
339 raw_spin_unlock(&port->lock);
340 }
341
plda_unmask_event_irq(struct irq_data * data)342 static void plda_unmask_event_irq(struct irq_data *data)
343 {
344 struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
345 u32 mask, val;
346
347 mask = plda_hwirq_to_mask(data->hwirq);
348
349 raw_spin_lock(&port->lock);
350 val = readl_relaxed(port->bridge_addr + IMASK_LOCAL);
351 val |= mask;
352 writel_relaxed(val, port->bridge_addr + IMASK_LOCAL);
353 raw_spin_unlock(&port->lock);
354 }
355
356 static struct irq_chip plda_event_irq_chip = {
357 .name = "PLDA PCIe EVENT",
358 .irq_ack = plda_ack_event_irq,
359 .irq_mask = plda_mask_event_irq,
360 .irq_unmask = plda_unmask_event_irq,
361 };
362
363 static const struct plda_event_ops plda_event_ops = {
364 .get_events = plda_get_events,
365 };
366
plda_pcie_event_map(struct irq_domain * domain,unsigned int irq,irq_hw_number_t hwirq)367 static int plda_pcie_event_map(struct irq_domain *domain, unsigned int irq,
368 irq_hw_number_t hwirq)
369 {
370 struct plda_pcie_rp *port = (void *)domain->host_data;
371
372 irq_set_chip_and_handler(irq, port->event_irq_chip, handle_level_irq);
373 irq_set_chip_data(irq, domain->host_data);
374
375 return 0;
376 }
377
378 static const struct irq_domain_ops plda_event_domain_ops = {
379 .map = plda_pcie_event_map,
380 };
381
plda_pcie_init_irq_domains(struct plda_pcie_rp * port)382 static int plda_pcie_init_irq_domains(struct plda_pcie_rp *port)
383 {
384 struct device *dev = port->dev;
385 struct device_node *node = dev->of_node;
386 struct device_node *pcie_intc_node;
387
388 /* Setup INTx */
389 pcie_intc_node = of_get_next_child(node, NULL);
390 if (!pcie_intc_node) {
391 dev_err(dev, "failed to find PCIe Intc node\n");
392 return -EINVAL;
393 }
394
395 port->event_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node),
396 port->num_events, &plda_event_domain_ops,
397 port);
398 if (!port->event_domain) {
399 dev_err(dev, "failed to get event domain\n");
400 of_node_put(pcie_intc_node);
401 return -ENOMEM;
402 }
403
404 irq_domain_update_bus_token(port->event_domain, DOMAIN_BUS_NEXUS);
405
406 port->intx_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), PCI_NUM_INTX,
407 &intx_domain_ops, port);
408 if (!port->intx_domain) {
409 dev_err(dev, "failed to get an INTx IRQ domain\n");
410 of_node_put(pcie_intc_node);
411 return -ENOMEM;
412 }
413
414 irq_domain_update_bus_token(port->intx_domain, DOMAIN_BUS_WIRED);
415
416 of_node_put(pcie_intc_node);
417 raw_spin_lock_init(&port->lock);
418
419 return plda_allocate_msi_domains(port);
420 }
421
plda_init_interrupts(struct platform_device * pdev,struct plda_pcie_rp * port,const struct plda_event * event)422 int plda_init_interrupts(struct platform_device *pdev,
423 struct plda_pcie_rp *port,
424 const struct plda_event *event)
425 {
426 struct device *dev = &pdev->dev;
427 int event_irq, ret;
428 u32 i;
429
430 if (!port->event_ops)
431 port->event_ops = &plda_event_ops;
432
433 if (!port->event_irq_chip)
434 port->event_irq_chip = &plda_event_irq_chip;
435
436 ret = plda_pcie_init_irq_domains(port);
437 if (ret) {
438 dev_err(dev, "failed creating IRQ domains\n");
439 return ret;
440 }
441
442 port->irq = platform_get_irq(pdev, 0);
443 if (port->irq < 0)
444 return -ENODEV;
445
446 for_each_set_bit(i, &port->events_bitmap, port->num_events) {
447 event_irq = irq_create_mapping(port->event_domain, i);
448 if (!event_irq) {
449 dev_err(dev, "failed to map hwirq %d\n", i);
450 return -ENXIO;
451 }
452
453 if (event->request_event_irq)
454 ret = event->request_event_irq(port, event_irq, i);
455 else
456 ret = devm_request_irq(dev, event_irq,
457 plda_event_handler,
458 0, NULL, port);
459
460 if (ret) {
461 dev_err(dev, "failed to request IRQ %d\n", event_irq);
462 return ret;
463 }
464 }
465
466 port->intx_irq = irq_create_mapping(port->event_domain,
467 event->intx_event);
468 if (!port->intx_irq) {
469 dev_err(dev, "failed to map INTx interrupt\n");
470 return -ENXIO;
471 }
472
473 /* Plug the INTx chained handler */
474 irq_set_chained_handler_and_data(port->intx_irq, plda_handle_intx, port);
475
476 port->msi_irq = irq_create_mapping(port->event_domain,
477 event->msi_event);
478 if (!port->msi_irq)
479 return -ENXIO;
480
481 /* Plug the MSI chained handler */
482 irq_set_chained_handler_and_data(port->msi_irq, plda_handle_msi, port);
483
484 /* Plug the main event chained handler */
485 irq_set_chained_handler_and_data(port->irq, plda_handle_event, port);
486
487 return 0;
488 }
489 EXPORT_SYMBOL_GPL(plda_init_interrupts);
490
plda_pcie_setup_window(void __iomem * bridge_base_addr,u32 index,phys_addr_t axi_addr,phys_addr_t pci_addr,size_t size)491 void plda_pcie_setup_window(void __iomem *bridge_base_addr, u32 index,
492 phys_addr_t axi_addr, phys_addr_t pci_addr,
493 size_t size)
494 {
495 u32 atr_sz = ilog2(size) - 1;
496 u32 val;
497
498 if (index == 0)
499 val = PCIE_CONFIG_INTERFACE;
500 else
501 val = PCIE_TX_RX_INTERFACE;
502
503 writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
504 ATR0_AXI4_SLV0_TRSL_PARAM);
505
506 val = ALIGN_DOWN(lower_32_bits(axi_addr), SZ_4K);
507 val |= FIELD_PREP(ATR_SIZE_MASK, atr_sz);
508 val |= ATR_IMPL_ENABLE;
509 writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
510 ATR0_AXI4_SLV0_SRCADDR_PARAM);
511
512 val = upper_32_bits(axi_addr);
513 writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
514 ATR0_AXI4_SLV0_SRC_ADDR);
515
516 val = lower_32_bits(pci_addr);
517 writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
518 ATR0_AXI4_SLV0_TRSL_ADDR_LSB);
519
520 val = upper_32_bits(pci_addr);
521 writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
522 ATR0_AXI4_SLV0_TRSL_ADDR_UDW);
523 }
524 EXPORT_SYMBOL_GPL(plda_pcie_setup_window);
525
plda_pcie_setup_inbound_address_translation(struct plda_pcie_rp * port)526 void plda_pcie_setup_inbound_address_translation(struct plda_pcie_rp *port)
527 {
528 void __iomem *bridge_base_addr = port->bridge_addr;
529 u32 val;
530
531 val = readl(bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
532 val |= (ATR0_PCIE_ATR_SIZE << ATR0_PCIE_ATR_SIZE_SHIFT);
533 writel(val, bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
534 writel(0, bridge_base_addr + ATR0_PCIE_WIN0_SRC_ADDR);
535 }
536 EXPORT_SYMBOL_GPL(plda_pcie_setup_inbound_address_translation);
537
plda_pcie_setup_iomems(struct pci_host_bridge * bridge,struct plda_pcie_rp * port)538 int plda_pcie_setup_iomems(struct pci_host_bridge *bridge,
539 struct plda_pcie_rp *port)
540 {
541 void __iomem *bridge_base_addr = port->bridge_addr;
542 struct resource_entry *entry;
543 u64 pci_addr;
544 u32 index = 1;
545
546 resource_list_for_each_entry(entry, &bridge->windows) {
547 if (resource_type(entry->res) == IORESOURCE_MEM) {
548 pci_addr = entry->res->start - entry->offset;
549 plda_pcie_setup_window(bridge_base_addr, index,
550 entry->res->start, pci_addr,
551 resource_size(entry->res));
552 index++;
553 }
554 }
555
556 return 0;
557 }
558 EXPORT_SYMBOL_GPL(plda_pcie_setup_iomems);
559
plda_pcie_irq_domain_deinit(struct plda_pcie_rp * pcie)560 static void plda_pcie_irq_domain_deinit(struct plda_pcie_rp *pcie)
561 {
562 irq_set_chained_handler_and_data(pcie->irq, NULL, NULL);
563 irq_set_chained_handler_and_data(pcie->msi_irq, NULL, NULL);
564 irq_set_chained_handler_and_data(pcie->intx_irq, NULL, NULL);
565
566 irq_domain_remove(pcie->msi.dev_domain);
567
568 irq_domain_remove(pcie->intx_domain);
569 irq_domain_remove(pcie->event_domain);
570 }
571
plda_pcie_host_init(struct plda_pcie_rp * port,struct pci_ops * ops,const struct plda_event * plda_event)572 int plda_pcie_host_init(struct plda_pcie_rp *port, struct pci_ops *ops,
573 const struct plda_event *plda_event)
574 {
575 struct device *dev = port->dev;
576 struct pci_host_bridge *bridge;
577 struct platform_device *pdev = to_platform_device(dev);
578 struct resource *cfg_res;
579 int ret;
580
581 pdev = to_platform_device(dev);
582
583 port->bridge_addr =
584 devm_platform_ioremap_resource_byname(pdev, "apb");
585
586 if (IS_ERR(port->bridge_addr))
587 return dev_err_probe(dev, PTR_ERR(port->bridge_addr),
588 "failed to map reg memory\n");
589
590 cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
591 if (!cfg_res)
592 return dev_err_probe(dev, -ENODEV,
593 "failed to get config memory\n");
594
595 port->config_base = devm_ioremap_resource(dev, cfg_res);
596 if (IS_ERR(port->config_base))
597 return dev_err_probe(dev, PTR_ERR(port->config_base),
598 "failed to map config memory\n");
599
600 bridge = devm_pci_alloc_host_bridge(dev, 0);
601 if (!bridge)
602 return dev_err_probe(dev, -ENOMEM,
603 "failed to alloc bridge\n");
604
605 if (port->host_ops && port->host_ops->host_init) {
606 ret = port->host_ops->host_init(port);
607 if (ret)
608 return ret;
609 }
610
611 port->bridge = bridge;
612 plda_pcie_setup_window(port->bridge_addr, 0, cfg_res->start, 0,
613 resource_size(cfg_res));
614 plda_pcie_setup_iomems(bridge, port);
615 plda_set_default_msi(&port->msi);
616 ret = plda_init_interrupts(pdev, port, plda_event);
617 if (ret)
618 goto err_host;
619
620 /* Set default bus ops */
621 bridge->ops = ops;
622 bridge->sysdata = port;
623
624 ret = pci_host_probe(bridge);
625 if (ret < 0) {
626 dev_err_probe(dev, ret, "failed to probe pci host\n");
627 goto err_probe;
628 }
629
630 return ret;
631
632 err_probe:
633 plda_pcie_irq_domain_deinit(port);
634 err_host:
635 if (port->host_ops && port->host_ops->host_deinit)
636 port->host_ops->host_deinit(port);
637
638 return ret;
639 }
640 EXPORT_SYMBOL_GPL(plda_pcie_host_init);
641
plda_pcie_host_deinit(struct plda_pcie_rp * port)642 void plda_pcie_host_deinit(struct plda_pcie_rp *port)
643 {
644 pci_stop_root_bus(port->bridge->bus);
645 pci_remove_root_bus(port->bridge->bus);
646
647 plda_pcie_irq_domain_deinit(port);
648
649 if (port->host_ops && port->host_ops->host_deinit)
650 port->host_ops->host_deinit(port);
651 }
652 EXPORT_SYMBOL_GPL(plda_pcie_host_deinit);
653