Lines Matching +full:msi +full:- +full:map
1 // SPDX-License-Identifier: GPL-2.0
14 #include <linux/irqchip/irq-msi-lib.h>
16 #include <linux/msi.h>
18 #include <linux/pci-ecam.h>
21 #include "pcie-plda.h"
26 struct plda_pcie_rp *pcie = bus->sysdata; in plda_pcie_map_bus()
28 return pcie->config_base + PCIE_ECAM_OFFSET(bus->number, devfn, where); in plda_pcie_map_bus()
36 struct device *dev = port->dev; in plda_handle_msi()
37 struct plda_msi *msi = &port->msi; in plda_handle_msi() local
38 void __iomem *bridge_base_addr = port->bridge_addr; in plda_handle_msi()
50 for_each_set_bit(bit, &status, msi->num_vectors) { in plda_handle_msi()
51 ret = generic_handle_domain_irq(msi->dev_domain, bit); in plda_handle_msi()
53 dev_err_ratelimited(dev, "bad MSI IRQ %d\n", in plda_handle_msi()
64 void __iomem *bridge_base_addr = port->bridge_addr; in plda_msi_bottom_irq_ack()
65 u32 bitpos = data->hwirq; in plda_msi_bottom_irq_ack()
73 phys_addr_t addr = port->msi.vector_phy; in plda_compose_msi_msg()
75 msg->address_lo = lower_32_bits(addr); in plda_compose_msi_msg()
76 msg->address_hi = upper_32_bits(addr); in plda_compose_msi_msg()
77 msg->data = data->hwirq; in plda_compose_msi_msg()
79 dev_dbg(port->dev, "msi#%x address_hi %#x address_lo %#x\n", in plda_compose_msi_msg()
80 (int)data->hwirq, msg->address_hi, msg->address_lo); in plda_compose_msi_msg()
84 .name = "PLDA MSI",
94 struct plda_pcie_rp *port = domain->host_data; in plda_irq_msi_domain_alloc()
95 struct plda_msi *msi = &port->msi; in plda_irq_msi_domain_alloc() local
98 mutex_lock(&msi->lock); in plda_irq_msi_domain_alloc()
99 bit = find_first_zero_bit(msi->used, msi->num_vectors); in plda_irq_msi_domain_alloc()
100 if (bit >= msi->num_vectors) { in plda_irq_msi_domain_alloc()
101 mutex_unlock(&msi->lock); in plda_irq_msi_domain_alloc()
102 return -ENOSPC; in plda_irq_msi_domain_alloc()
105 set_bit(bit, msi->used); in plda_irq_msi_domain_alloc()
108 domain->host_data, handle_edge_irq, NULL, NULL); in plda_irq_msi_domain_alloc()
110 mutex_unlock(&msi->lock); in plda_irq_msi_domain_alloc()
121 struct plda_msi *msi = &port->msi; in plda_irq_msi_domain_free() local
123 mutex_lock(&msi->lock); in plda_irq_msi_domain_free()
125 if (test_bit(d->hwirq, msi->used)) in plda_irq_msi_domain_free()
126 __clear_bit(d->hwirq, msi->used); in plda_irq_msi_domain_free()
128 dev_err(port->dev, "trying to free unused MSI%lu\n", d->hwirq); in plda_irq_msi_domain_free()
130 mutex_unlock(&msi->lock); in plda_irq_msi_domain_free()
149 .prefix = "PLDA-",
155 struct device *dev = port->dev; in plda_allocate_msi_domains()
156 struct plda_msi *msi = &port->msi; in plda_allocate_msi_domains() local
158 mutex_init(&port->msi.lock); in plda_allocate_msi_domains()
164 .size = msi->num_vectors, in plda_allocate_msi_domains()
167 msi->dev_domain = msi_create_parent_irq_domain(&info, &plda_msi_parent_ops); in plda_allocate_msi_domains()
168 if (!msi->dev_domain) { in plda_allocate_msi_domains()
170 return -ENOMEM; in plda_allocate_msi_domains()
180 struct device *dev = port->dev; in plda_handle_intx()
181 void __iomem *bridge_base_addr = port->bridge_addr; in plda_handle_intx()
193 ret = generic_handle_domain_irq(port->intx_domain, bit); in plda_handle_intx()
206 void __iomem *bridge_base_addr = port->bridge_addr; in plda_ack_intx_irq()
207 u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT); in plda_ack_intx_irq()
215 void __iomem *bridge_base_addr = port->bridge_addr; in plda_mask_intx_irq()
217 u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT); in plda_mask_intx_irq()
220 raw_spin_lock_irqsave(&port->lock, flags); in plda_mask_intx_irq()
224 raw_spin_unlock_irqrestore(&port->lock, flags); in plda_mask_intx_irq()
230 void __iomem *bridge_base_addr = port->bridge_addr; in plda_unmask_intx_irq()
232 u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT); in plda_unmask_intx_irq()
235 raw_spin_lock_irqsave(&port->lock, flags); in plda_unmask_intx_irq()
239 raw_spin_unlock_irqrestore(&port->lock, flags); in plda_unmask_intx_irq()
253 irq_set_chip_data(irq, domain->host_data); in plda_pcie_intx_map()
259 .map = plda_pcie_intx_map,
266 origin = readl_relaxed(port->bridge_addr + ISTATUS_LOCAL); in plda_get_events()
268 /* MSI event and sys events */ in plda_get_events()
270 events = val << (PM_MSI_INT_MSI_SHIFT - PCI_NUM_INTX + 1); in plda_get_events()
296 events = port->event_ops->get_events(port); in plda_handle_event()
298 events &= port->events_bitmap; in plda_handle_event()
299 for_each_set_bit(bit, &events, port->num_events) in plda_handle_event()
300 generic_handle_domain_irq(port->event_domain, bit); in plda_handle_event()
309 /* hwirq 23 - 0 are the same with register */ in plda_hwirq_to_mask()
315 mask = BIT(hwirq + PCI_NUM_INTX - 1); in plda_hwirq_to_mask()
324 writel_relaxed(plda_hwirq_to_mask(data->hwirq), in plda_ack_event_irq()
325 port->bridge_addr + ISTATUS_LOCAL); in plda_ack_event_irq()
333 mask = plda_hwirq_to_mask(data->hwirq); in plda_mask_event_irq()
335 raw_spin_lock(&port->lock); in plda_mask_event_irq()
336 val = readl_relaxed(port->bridge_addr + IMASK_LOCAL); in plda_mask_event_irq()
338 writel_relaxed(val, port->bridge_addr + IMASK_LOCAL); in plda_mask_event_irq()
339 raw_spin_unlock(&port->lock); in plda_mask_event_irq()
347 mask = plda_hwirq_to_mask(data->hwirq); in plda_unmask_event_irq()
349 raw_spin_lock(&port->lock); in plda_unmask_event_irq()
350 val = readl_relaxed(port->bridge_addr + IMASK_LOCAL); in plda_unmask_event_irq()
352 writel_relaxed(val, port->bridge_addr + IMASK_LOCAL); in plda_unmask_event_irq()
353 raw_spin_unlock(&port->lock); in plda_unmask_event_irq()
370 struct plda_pcie_rp *port = (void *)domain->host_data; in plda_pcie_event_map()
372 irq_set_chip_and_handler(irq, port->event_irq_chip, handle_level_irq); in plda_pcie_event_map()
373 irq_set_chip_data(irq, domain->host_data); in plda_pcie_event_map()
379 .map = plda_pcie_event_map,
384 struct device *dev = port->dev; in plda_pcie_init_irq_domains()
385 struct device_node *node = dev->of_node; in plda_pcie_init_irq_domains()
392 return -EINVAL; in plda_pcie_init_irq_domains()
395 port->event_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), in plda_pcie_init_irq_domains()
396 port->num_events, &plda_event_domain_ops, in plda_pcie_init_irq_domains()
398 if (!port->event_domain) { in plda_pcie_init_irq_domains()
401 return -ENOMEM; in plda_pcie_init_irq_domains()
404 irq_domain_update_bus_token(port->event_domain, DOMAIN_BUS_NEXUS); in plda_pcie_init_irq_domains()
406 port->intx_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), PCI_NUM_INTX, in plda_pcie_init_irq_domains()
408 if (!port->intx_domain) { in plda_pcie_init_irq_domains()
411 return -ENOMEM; in plda_pcie_init_irq_domains()
414 irq_domain_update_bus_token(port->intx_domain, DOMAIN_BUS_WIRED); in plda_pcie_init_irq_domains()
417 raw_spin_lock_init(&port->lock); in plda_pcie_init_irq_domains()
426 struct device *dev = &pdev->dev; in plda_init_interrupts()
430 if (!port->event_ops) in plda_init_interrupts()
431 port->event_ops = &plda_event_ops; in plda_init_interrupts()
433 if (!port->event_irq_chip) in plda_init_interrupts()
434 port->event_irq_chip = &plda_event_irq_chip; in plda_init_interrupts()
442 port->irq = platform_get_irq(pdev, 0); in plda_init_interrupts()
443 if (port->irq < 0) in plda_init_interrupts()
444 return -ENODEV; in plda_init_interrupts()
446 for_each_set_bit(i, &port->events_bitmap, port->num_events) { in plda_init_interrupts()
447 event_irq = irq_create_mapping(port->event_domain, i); in plda_init_interrupts()
449 dev_err(dev, "failed to map hwirq %d\n", i); in plda_init_interrupts()
450 return -ENXIO; in plda_init_interrupts()
453 if (event->request_event_irq) in plda_init_interrupts()
454 ret = event->request_event_irq(port, event_irq, i); in plda_init_interrupts()
466 port->intx_irq = irq_create_mapping(port->event_domain, in plda_init_interrupts()
467 event->intx_event); in plda_init_interrupts()
468 if (!port->intx_irq) { in plda_init_interrupts()
469 dev_err(dev, "failed to map INTx interrupt\n"); in plda_init_interrupts()
470 return -ENXIO; in plda_init_interrupts()
474 irq_set_chained_handler_and_data(port->intx_irq, plda_handle_intx, port); in plda_init_interrupts()
476 port->msi_irq = irq_create_mapping(port->event_domain, in plda_init_interrupts()
477 event->msi_event); in plda_init_interrupts()
478 if (!port->msi_irq) in plda_init_interrupts()
479 return -ENXIO; in plda_init_interrupts()
481 /* Plug the MSI chained handler */ in plda_init_interrupts()
482 irq_set_chained_handler_and_data(port->msi_irq, plda_handle_msi, port); in plda_init_interrupts()
485 irq_set_chained_handler_and_data(port->irq, plda_handle_event, port); in plda_init_interrupts()
495 u32 atr_sz = ilog2(size) - 1; in plda_pcie_setup_window()
528 void __iomem *bridge_base_addr = port->bridge_addr; in plda_pcie_setup_inbound_address_translation()
541 void __iomem *bridge_base_addr = port->bridge_addr; in plda_pcie_setup_iomems()
546 resource_list_for_each_entry(entry, &bridge->windows) { in plda_pcie_setup_iomems()
547 if (resource_type(entry->res) == IORESOURCE_MEM) { in plda_pcie_setup_iomems()
548 pci_addr = entry->res->start - entry->offset; in plda_pcie_setup_iomems()
550 entry->res->start, pci_addr, in plda_pcie_setup_iomems()
551 resource_size(entry->res)); in plda_pcie_setup_iomems()
562 irq_set_chained_handler_and_data(pcie->irq, NULL, NULL); in plda_pcie_irq_domain_deinit()
563 irq_set_chained_handler_and_data(pcie->msi_irq, NULL, NULL); in plda_pcie_irq_domain_deinit()
564 irq_set_chained_handler_and_data(pcie->intx_irq, NULL, NULL); in plda_pcie_irq_domain_deinit()
566 irq_domain_remove(pcie->msi.dev_domain); in plda_pcie_irq_domain_deinit()
568 irq_domain_remove(pcie->intx_domain); in plda_pcie_irq_domain_deinit()
569 irq_domain_remove(pcie->event_domain); in plda_pcie_irq_domain_deinit()
575 struct device *dev = port->dev; in plda_pcie_host_init()
583 port->bridge_addr = in plda_pcie_host_init()
586 if (IS_ERR(port->bridge_addr)) in plda_pcie_host_init()
587 return dev_err_probe(dev, PTR_ERR(port->bridge_addr), in plda_pcie_host_init()
588 "failed to map reg memory\n"); in plda_pcie_host_init()
592 return dev_err_probe(dev, -ENODEV, in plda_pcie_host_init()
595 port->config_base = devm_ioremap_resource(dev, cfg_res); in plda_pcie_host_init()
596 if (IS_ERR(port->config_base)) in plda_pcie_host_init()
597 return dev_err_probe(dev, PTR_ERR(port->config_base), in plda_pcie_host_init()
598 "failed to map config memory\n"); in plda_pcie_host_init()
602 return -ENOMEM; in plda_pcie_host_init()
604 if (port->host_ops && port->host_ops->host_init) { in plda_pcie_host_init()
605 ret = port->host_ops->host_init(port); in plda_pcie_host_init()
610 port->bridge = bridge; in plda_pcie_host_init()
611 plda_pcie_setup_window(port->bridge_addr, 0, cfg_res->start, 0, in plda_pcie_host_init()
614 plda_set_default_msi(&port->msi); in plda_pcie_host_init()
620 bridge->ops = ops; in plda_pcie_host_init()
621 bridge->sysdata = port; in plda_pcie_host_init()
634 if (port->host_ops && port->host_ops->host_deinit) in plda_pcie_host_init()
635 port->host_ops->host_deinit(port); in plda_pcie_host_init()
643 pci_stop_root_bus(port->bridge->bus); in plda_pcie_host_deinit()
644 pci_remove_root_bus(port->bridge->bus); in plda_pcie_host_deinit()
648 if (port->host_ops && port->host_ops->host_deinit) in plda_pcie_host_deinit()
649 port->host_ops->host_deinit(port); in plda_pcie_host_deinit()