16e0832faSShawn Lin // SPDX-License-Identifier: GPL-2.0 26e0832faSShawn Lin /* 36e0832faSShawn Lin * Synopsys DesignWare PCIe host controller driver 46e0832faSShawn Lin * 56e0832faSShawn Lin * Copyright (C) 2013 Samsung Electronics Co., Ltd. 67ecd4a81SAlexander A. Klimov * https://www.samsung.com 76e0832faSShawn Lin * 86e0832faSShawn Lin * Author: Jingoo Han <jg1.han@samsung.com> 96e0832faSShawn Lin */ 106e0832faSShawn Lin 116e0832faSShawn Lin #include <linux/irqchip/chained_irq.h> 126e0832faSShawn Lin #include <linux/irqdomain.h> 13bbd8810dSKrzysztof Wilczynski #include <linux/msi.h> 146e0832faSShawn Lin #include <linux/of_address.h> 156e0832faSShawn Lin #include <linux/of_pci.h> 166e0832faSShawn Lin #include <linux/pci_regs.h> 176e0832faSShawn Lin #include <linux/platform_device.h> 186e0832faSShawn Lin 196e0832faSShawn Lin #include "pcie-designware.h" 206e0832faSShawn Lin 216e0832faSShawn Lin static struct pci_ops dw_pcie_ops; 22c2b0c098SRob Herring static struct pci_ops dw_child_pcie_ops; 236e0832faSShawn Lin 246e0832faSShawn Lin static void dw_msi_ack_irq(struct irq_data *d) 256e0832faSShawn Lin { 266e0832faSShawn Lin irq_chip_ack_parent(d); 276e0832faSShawn Lin } 286e0832faSShawn Lin 296e0832faSShawn Lin static void dw_msi_mask_irq(struct irq_data *d) 306e0832faSShawn Lin { 316e0832faSShawn Lin pci_msi_mask_irq(d); 326e0832faSShawn Lin irq_chip_mask_parent(d); 336e0832faSShawn Lin } 346e0832faSShawn Lin 356e0832faSShawn Lin static void dw_msi_unmask_irq(struct irq_data *d) 366e0832faSShawn Lin { 376e0832faSShawn Lin pci_msi_unmask_irq(d); 386e0832faSShawn Lin irq_chip_unmask_parent(d); 396e0832faSShawn Lin } 406e0832faSShawn Lin 416e0832faSShawn Lin static struct irq_chip dw_pcie_msi_irq_chip = { 426e0832faSShawn Lin .name = "PCI-MSI", 436e0832faSShawn Lin .irq_ack = dw_msi_ack_irq, 446e0832faSShawn Lin .irq_mask = dw_msi_mask_irq, 456e0832faSShawn Lin .irq_unmask = dw_msi_unmask_irq, 466e0832faSShawn Lin }; 476e0832faSShawn Lin 486e0832faSShawn Lin static struct msi_domain_info dw_pcie_msi_domain_info = { 496e0832faSShawn Lin .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | 506e0832faSShawn Lin MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI), 516e0832faSShawn Lin .chip = &dw_pcie_msi_irq_chip, 526e0832faSShawn Lin }; 536e0832faSShawn Lin 546e0832faSShawn Lin /* MSI int handler */ 5560b3c27fSSerge Semin irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp) 566e0832faSShawn Lin { 57d21faba1SMarc Zyngier int i, pos; 581137e61dSNiklas Cassel unsigned long val; 591137e61dSNiklas Cassel u32 status, num_ctrls; 606e0832faSShawn Lin irqreturn_t ret = IRQ_NONE; 61f81c770dSRob Herring struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 626e0832faSShawn Lin 636e0832faSShawn Lin num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; 646e0832faSShawn Lin 656e0832faSShawn Lin for (i = 0; i < num_ctrls; i++) { 66f81c770dSRob Herring status = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS + 67f81c770dSRob Herring (i * MSI_REG_CTRL_BLOCK_SIZE)); 681137e61dSNiklas Cassel if (!status) 696e0832faSShawn Lin continue; 706e0832faSShawn Lin 716e0832faSShawn Lin ret = IRQ_HANDLED; 721137e61dSNiklas Cassel val = status; 736e0832faSShawn Lin pos = 0; 741137e61dSNiklas Cassel while ((pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL, 756e0832faSShawn Lin pos)) != MAX_MSI_IRQS_PER_CTRL) { 76d21faba1SMarc Zyngier generic_handle_domain_irq(pp->irq_domain, 776e0832faSShawn Lin (i * MAX_MSI_IRQS_PER_CTRL) + 786e0832faSShawn Lin pos); 796e0832faSShawn Lin pos++; 806e0832faSShawn Lin } 816e0832faSShawn Lin } 826e0832faSShawn Lin 836e0832faSShawn Lin return ret; 846e0832faSShawn Lin } 856e0832faSShawn Lin 866e0832faSShawn Lin /* Chained MSI interrupt service routine */ 876e0832faSShawn Lin static void dw_chained_msi_isr(struct irq_desc *desc) 886e0832faSShawn Lin { 896e0832faSShawn Lin struct irq_chip *chip = irq_desc_get_chip(desc); 9060b3c27fSSerge Semin struct dw_pcie_rp *pp; 916e0832faSShawn Lin 926e0832faSShawn Lin chained_irq_enter(chip, desc); 936e0832faSShawn Lin 946e0832faSShawn Lin pp = irq_desc_get_handler_data(desc); 956e0832faSShawn Lin dw_handle_msi_irq(pp); 966e0832faSShawn Lin 976e0832faSShawn Lin chained_irq_exit(chip, desc); 986e0832faSShawn Lin } 996e0832faSShawn Lin 10059ea68b3SGustavo Pimentel static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg) 1016e0832faSShawn Lin { 10260b3c27fSSerge Semin struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d); 1036e0832faSShawn Lin struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 1046e0832faSShawn Lin u64 msi_target; 1056e0832faSShawn Lin 1066e0832faSShawn Lin msi_target = (u64)pp->msi_data; 1076e0832faSShawn Lin 1086e0832faSShawn Lin msg->address_lo = lower_32_bits(msi_target); 1096e0832faSShawn Lin msg->address_hi = upper_32_bits(msi_target); 1106e0832faSShawn Lin 11159ea68b3SGustavo Pimentel msg->data = d->hwirq; 1126e0832faSShawn Lin 1136e0832faSShawn Lin dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n", 11459ea68b3SGustavo Pimentel (int)d->hwirq, msg->address_hi, msg->address_lo); 1156e0832faSShawn Lin } 1166e0832faSShawn Lin 117fd5288a3SGustavo Pimentel static int dw_pci_msi_set_affinity(struct irq_data *d, 1186e0832faSShawn Lin const struct cpumask *mask, bool force) 1196e0832faSShawn Lin { 1206e0832faSShawn Lin return -EINVAL; 1216e0832faSShawn Lin } 1226e0832faSShawn Lin 12340e9892eSGustavo Pimentel static void dw_pci_bottom_mask(struct irq_data *d) 1246e0832faSShawn Lin { 12560b3c27fSSerge Semin struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d); 126f81c770dSRob Herring struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 1276e0832faSShawn Lin unsigned int res, bit, ctrl; 1286e0832faSShawn Lin unsigned long flags; 1296e0832faSShawn Lin 1306e0832faSShawn Lin raw_spin_lock_irqsave(&pp->lock, flags); 1316e0832faSShawn Lin 13240e9892eSGustavo Pimentel ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; 1336e0832faSShawn Lin res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; 13440e9892eSGustavo Pimentel bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; 1356e0832faSShawn Lin 13665772257SGustavo Pimentel pp->irq_mask[ctrl] |= BIT(bit); 137f81c770dSRob Herring dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]); 1386e0832faSShawn Lin 1396e0832faSShawn Lin raw_spin_unlock_irqrestore(&pp->lock, flags); 1406e0832faSShawn Lin } 1416e0832faSShawn Lin 14240e9892eSGustavo Pimentel static void dw_pci_bottom_unmask(struct irq_data *d) 1436e0832faSShawn Lin { 14460b3c27fSSerge Semin struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d); 145f81c770dSRob Herring struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 1466e0832faSShawn Lin unsigned int res, bit, ctrl; 1476e0832faSShawn Lin unsigned long flags; 1486e0832faSShawn Lin 1496e0832faSShawn Lin raw_spin_lock_irqsave(&pp->lock, flags); 1506e0832faSShawn Lin 15140e9892eSGustavo Pimentel ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; 1526e0832faSShawn Lin res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; 15340e9892eSGustavo Pimentel bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; 1546e0832faSShawn Lin 15565772257SGustavo Pimentel pp->irq_mask[ctrl] &= ~BIT(bit); 156f81c770dSRob Herring dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]); 1576e0832faSShawn Lin 1586e0832faSShawn Lin raw_spin_unlock_irqrestore(&pp->lock, flags); 1596e0832faSShawn Lin } 1606e0832faSShawn Lin 1616e0832faSShawn Lin static void dw_pci_bottom_ack(struct irq_data *d) 1626e0832faSShawn Lin { 16360b3c27fSSerge Semin struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d); 164f81c770dSRob Herring struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 1653f7bb2ecSMarc Zyngier unsigned int res, bit, ctrl; 1666e0832faSShawn Lin 1673f7bb2ecSMarc Zyngier ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; 1683f7bb2ecSMarc Zyngier res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; 1693f7bb2ecSMarc Zyngier bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; 1706e0832faSShawn Lin 171f81c770dSRob Herring dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_STATUS + res, BIT(bit)); 1726e0832faSShawn Lin } 1736e0832faSShawn Lin 1746e0832faSShawn Lin static struct irq_chip dw_pci_msi_bottom_irq_chip = { 1756e0832faSShawn Lin .name = "DWPCI-MSI", 1766e0832faSShawn Lin .irq_ack = dw_pci_bottom_ack, 1776e0832faSShawn Lin .irq_compose_msi_msg = dw_pci_setup_msi_msg, 1786e0832faSShawn Lin .irq_set_affinity = dw_pci_msi_set_affinity, 1796e0832faSShawn Lin .irq_mask = dw_pci_bottom_mask, 1806e0832faSShawn Lin .irq_unmask = dw_pci_bottom_unmask, 1816e0832faSShawn Lin }; 1826e0832faSShawn Lin 1836e0832faSShawn Lin static int dw_pcie_irq_domain_alloc(struct irq_domain *domain, 1846e0832faSShawn Lin unsigned int virq, unsigned int nr_irqs, 1856e0832faSShawn Lin void *args) 1866e0832faSShawn Lin { 18760b3c27fSSerge Semin struct dw_pcie_rp *pp = domain->host_data; 1886e0832faSShawn Lin unsigned long flags; 1896e0832faSShawn Lin u32 i; 1906e0832faSShawn Lin int bit; 1916e0832faSShawn Lin 1926e0832faSShawn Lin raw_spin_lock_irqsave(&pp->lock, flags); 1936e0832faSShawn Lin 1946e0832faSShawn Lin bit = bitmap_find_free_region(pp->msi_irq_in_use, pp->num_vectors, 1956e0832faSShawn Lin order_base_2(nr_irqs)); 1966e0832faSShawn Lin 1976e0832faSShawn Lin raw_spin_unlock_irqrestore(&pp->lock, flags); 1986e0832faSShawn Lin 1996e0832faSShawn Lin if (bit < 0) 2006e0832faSShawn Lin return -ENOSPC; 2016e0832faSShawn Lin 2026e0832faSShawn Lin for (i = 0; i < nr_irqs; i++) 2036e0832faSShawn Lin irq_domain_set_info(domain, virq + i, bit + i, 2049f67437bSKishon Vijay Abraham I pp->msi_irq_chip, 2056e0832faSShawn Lin pp, handle_edge_irq, 2066e0832faSShawn Lin NULL, NULL); 2076e0832faSShawn Lin 2086e0832faSShawn Lin return 0; 2096e0832faSShawn Lin } 2106e0832faSShawn Lin 2116e0832faSShawn Lin static void dw_pcie_irq_domain_free(struct irq_domain *domain, 2126e0832faSShawn Lin unsigned int virq, unsigned int nr_irqs) 2136e0832faSShawn Lin { 2144cfae0f1SGustavo Pimentel struct irq_data *d = irq_domain_get_irq_data(domain, virq); 21560b3c27fSSerge Semin struct dw_pcie_rp *pp = domain->host_data; 2166e0832faSShawn Lin unsigned long flags; 2176e0832faSShawn Lin 2186e0832faSShawn Lin raw_spin_lock_irqsave(&pp->lock, flags); 2196e0832faSShawn Lin 2204cfae0f1SGustavo Pimentel bitmap_release_region(pp->msi_irq_in_use, d->hwirq, 2216e0832faSShawn Lin order_base_2(nr_irqs)); 2226e0832faSShawn Lin 2236e0832faSShawn Lin raw_spin_unlock_irqrestore(&pp->lock, flags); 2246e0832faSShawn Lin } 2256e0832faSShawn Lin 2266e0832faSShawn Lin static const struct irq_domain_ops dw_pcie_msi_domain_ops = { 2276e0832faSShawn Lin .alloc = dw_pcie_irq_domain_alloc, 2286e0832faSShawn Lin .free = dw_pcie_irq_domain_free, 2296e0832faSShawn Lin }; 2306e0832faSShawn Lin 23160b3c27fSSerge Semin int dw_pcie_allocate_domains(struct dw_pcie_rp *pp) 2326e0832faSShawn Lin { 2336e0832faSShawn Lin struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 2346e0832faSShawn Lin struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node); 2356e0832faSShawn Lin 2366e0832faSShawn Lin pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors, 2376e0832faSShawn Lin &dw_pcie_msi_domain_ops, pp); 2386e0832faSShawn Lin if (!pp->irq_domain) { 2396e0832faSShawn Lin dev_err(pci->dev, "Failed to create IRQ domain\n"); 2406e0832faSShawn Lin return -ENOMEM; 2416e0832faSShawn Lin } 2426e0832faSShawn Lin 2430414b93eSMarc Zyngier irq_domain_update_bus_token(pp->irq_domain, DOMAIN_BUS_NEXUS); 2440414b93eSMarc Zyngier 2456e0832faSShawn Lin pp->msi_domain = pci_msi_create_irq_domain(fwnode, 2466e0832faSShawn Lin &dw_pcie_msi_domain_info, 2476e0832faSShawn Lin pp->irq_domain); 2486e0832faSShawn Lin if (!pp->msi_domain) { 2496e0832faSShawn Lin dev_err(pci->dev, "Failed to create MSI domain\n"); 2506e0832faSShawn Lin irq_domain_remove(pp->irq_domain); 2516e0832faSShawn Lin return -ENOMEM; 2526e0832faSShawn Lin } 2536e0832faSShawn Lin 2546e0832faSShawn Lin return 0; 2556e0832faSShawn Lin } 2566e0832faSShawn Lin 25760b3c27fSSerge Semin static void dw_pcie_free_msi(struct dw_pcie_rp *pp) 2586e0832faSShawn Lin { 259db388348SDmitry Baryshkov u32 ctrl; 260db388348SDmitry Baryshkov 261db388348SDmitry Baryshkov for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) { 262db388348SDmitry Baryshkov if (pp->msi_irq[ctrl] > 0) 263db388348SDmitry Baryshkov irq_set_chained_handler_and_data(pp->msi_irq[ctrl], 264db388348SDmitry Baryshkov NULL, NULL); 265db388348SDmitry Baryshkov } 2666e0832faSShawn Lin 2676e0832faSShawn Lin irq_domain_remove(pp->msi_domain); 2686e0832faSShawn Lin irq_domain_remove(pp->irq_domain); 2696e0832faSShawn Lin } 2706e0832faSShawn Lin 27160b3c27fSSerge Semin static void dw_pcie_msi_init(struct dw_pcie_rp *pp) 2726e0832faSShawn Lin { 2736e0832faSShawn Lin struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 27407940c36SJisheng Zhang u64 msi_target = (u64)pp->msi_data; 2756e0832faSShawn Lin 27659fbab1aSRob Herring if (!pci_msi_enabled() || !pp->has_msi_ctrl) 277cf627713SRob Herring return; 278cf627713SRob Herring 2796e0832faSShawn Lin /* Program the msi_data */ 280f81c770dSRob Herring dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_LO, lower_32_bits(msi_target)); 281f81c770dSRob Herring dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_HI, upper_32_bits(msi_target)); 2826e0832faSShawn Lin } 2836e0832faSShawn Lin 284cd761378SDmitry Baryshkov static int dw_pcie_parse_split_msi_irq(struct dw_pcie_rp *pp) 285cd761378SDmitry Baryshkov { 286cd761378SDmitry Baryshkov struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 287cd761378SDmitry Baryshkov struct device *dev = pci->dev; 288cd761378SDmitry Baryshkov struct platform_device *pdev = to_platform_device(dev); 289cd761378SDmitry Baryshkov u32 ctrl, max_vectors; 290cd761378SDmitry Baryshkov int irq; 291cd761378SDmitry Baryshkov 292cd761378SDmitry Baryshkov /* Parse any "msiX" IRQs described in the devicetree */ 293cd761378SDmitry Baryshkov for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) { 294cd761378SDmitry Baryshkov char msi_name[] = "msiX"; 295cd761378SDmitry Baryshkov 296cd761378SDmitry Baryshkov msi_name[3] = '0' + ctrl; 297cd761378SDmitry Baryshkov irq = platform_get_irq_byname_optional(pdev, msi_name); 298cd761378SDmitry Baryshkov if (irq == -ENXIO) 299cd761378SDmitry Baryshkov break; 300cd761378SDmitry Baryshkov if (irq < 0) 301cd761378SDmitry Baryshkov return dev_err_probe(dev, irq, 302cd761378SDmitry Baryshkov "Failed to parse MSI IRQ '%s'\n", 303cd761378SDmitry Baryshkov msi_name); 304cd761378SDmitry Baryshkov 305cd761378SDmitry Baryshkov pp->msi_irq[ctrl] = irq; 306cd761378SDmitry Baryshkov } 307cd761378SDmitry Baryshkov 308cd761378SDmitry Baryshkov /* If no "msiX" IRQs, caller should fallback to "msi" IRQ */ 309cd761378SDmitry Baryshkov if (ctrl == 0) 310cd761378SDmitry Baryshkov return -ENXIO; 311cd761378SDmitry Baryshkov 312cd761378SDmitry Baryshkov max_vectors = ctrl * MAX_MSI_IRQS_PER_CTRL; 313cd761378SDmitry Baryshkov if (pp->num_vectors > max_vectors) { 314cd761378SDmitry Baryshkov dev_warn(dev, "Exceeding number of MSI vectors, limiting to %u\n", 315cd761378SDmitry Baryshkov max_vectors); 316cd761378SDmitry Baryshkov pp->num_vectors = max_vectors; 317cd761378SDmitry Baryshkov } 318cd761378SDmitry Baryshkov if (!pp->num_vectors) 319cd761378SDmitry Baryshkov pp->num_vectors = max_vectors; 320cd761378SDmitry Baryshkov 321cd761378SDmitry Baryshkov return 0; 322cd761378SDmitry Baryshkov } 323cd761378SDmitry Baryshkov 324226ec087SDmitry Baryshkov static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp) 325226ec087SDmitry Baryshkov { 326226ec087SDmitry Baryshkov struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 327226ec087SDmitry Baryshkov struct device *dev = pci->dev; 328226ec087SDmitry Baryshkov struct platform_device *pdev = to_platform_device(dev); 329423511ecSWill McVicker u64 *msi_vaddr; 330226ec087SDmitry Baryshkov int ret; 331226ec087SDmitry Baryshkov u32 ctrl, num_ctrls; 332226ec087SDmitry Baryshkov 333cd761378SDmitry Baryshkov for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) 334226ec087SDmitry Baryshkov pp->irq_mask[ctrl] = ~0; 335226ec087SDmitry Baryshkov 336db388348SDmitry Baryshkov if (!pp->msi_irq[0]) { 337cd761378SDmitry Baryshkov ret = dw_pcie_parse_split_msi_irq(pp); 338cd761378SDmitry Baryshkov if (ret < 0 && ret != -ENXIO) 339cd761378SDmitry Baryshkov return ret; 340cd761378SDmitry Baryshkov } 341cd761378SDmitry Baryshkov 342cd761378SDmitry Baryshkov if (!pp->num_vectors) 343cd761378SDmitry Baryshkov pp->num_vectors = MSI_DEF_NUM_VECTORS; 344cd761378SDmitry Baryshkov num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; 345cd761378SDmitry Baryshkov 346cd761378SDmitry Baryshkov if (!pp->msi_irq[0]) { 347db388348SDmitry Baryshkov pp->msi_irq[0] = platform_get_irq_byname_optional(pdev, "msi"); 348db388348SDmitry Baryshkov if (pp->msi_irq[0] < 0) { 349db388348SDmitry Baryshkov pp->msi_irq[0] = platform_get_irq(pdev, 0); 350db388348SDmitry Baryshkov if (pp->msi_irq[0] < 0) 351db388348SDmitry Baryshkov return pp->msi_irq[0]; 352226ec087SDmitry Baryshkov } 353226ec087SDmitry Baryshkov } 354226ec087SDmitry Baryshkov 355cd761378SDmitry Baryshkov dev_dbg(dev, "Using %d MSI vectors\n", pp->num_vectors); 356cd761378SDmitry Baryshkov 357226ec087SDmitry Baryshkov pp->msi_irq_chip = &dw_pci_msi_bottom_irq_chip; 358226ec087SDmitry Baryshkov 359226ec087SDmitry Baryshkov ret = dw_pcie_allocate_domains(pp); 360226ec087SDmitry Baryshkov if (ret) 361226ec087SDmitry Baryshkov return ret; 362226ec087SDmitry Baryshkov 363db388348SDmitry Baryshkov for (ctrl = 0; ctrl < num_ctrls; ctrl++) { 364db388348SDmitry Baryshkov if (pp->msi_irq[ctrl] > 0) 365db388348SDmitry Baryshkov irq_set_chained_handler_and_data(pp->msi_irq[ctrl], 366226ec087SDmitry Baryshkov dw_chained_msi_isr, pp); 367db388348SDmitry Baryshkov } 368226ec087SDmitry Baryshkov 369*6c784e21SSerge Semin /* 370*6c784e21SSerge Semin * Even though the iMSI-RX Module supports 64-bit addresses some 371*6c784e21SSerge Semin * peripheral PCIe devices may lack 64-bit message support. In 372*6c784e21SSerge Semin * order not to miss MSI TLPs from those devices the MSI target 373*6c784e21SSerge Semin * address has to be within the lowest 4GB. 374*6c784e21SSerge Semin * 375*6c784e21SSerge Semin * Note until there is a better alternative found the reservation is 376*6c784e21SSerge Semin * done by allocating from the artificially limited DMA-coherent 377*6c784e21SSerge Semin * memory. 378*6c784e21SSerge Semin */ 379*6c784e21SSerge Semin ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); 380226ec087SDmitry Baryshkov if (ret) 381226ec087SDmitry Baryshkov dev_warn(dev, "Failed to set DMA mask to 32-bit. Devices with only 32-bit MSI support may not work properly\n"); 382226ec087SDmitry Baryshkov 383423511ecSWill McVicker msi_vaddr = dmam_alloc_coherent(dev, sizeof(u64), &pp->msi_data, 384423511ecSWill McVicker GFP_KERNEL); 385423511ecSWill McVicker if (!msi_vaddr) { 386423511ecSWill McVicker dev_err(dev, "Failed to alloc and map MSI data\n"); 387226ec087SDmitry Baryshkov dw_pcie_free_msi(pp); 388423511ecSWill McVicker return -ENOMEM; 389226ec087SDmitry Baryshkov } 390226ec087SDmitry Baryshkov 391226ec087SDmitry Baryshkov return 0; 392226ec087SDmitry Baryshkov } 393226ec087SDmitry Baryshkov 39460b3c27fSSerge Semin int dw_pcie_host_init(struct dw_pcie_rp *pp) 3956e0832faSShawn Lin { 3966e0832faSShawn Lin struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 3976e0832faSShawn Lin struct device *dev = pci->dev; 3986e0832faSShawn Lin struct device_node *np = dev->of_node; 3996e0832faSShawn Lin struct platform_device *pdev = to_platform_device(dev); 4007fe71aa8SRob Herring struct resource_entry *win; 4016e0832faSShawn Lin struct pci_host_bridge *bridge; 402bd42f310SSerge Semin struct resource *res; 4036e0832faSShawn Lin int ret; 4046e0832faSShawn Lin 40560a4352fSSerge Semin raw_spin_lock_init(&pp->lock); 4066e0832faSShawn Lin 407ef8c5887SSerge Semin ret = dw_pcie_get_resources(pci); 408ef8c5887SSerge Semin if (ret) 409ef8c5887SSerge Semin return ret; 410ef8c5887SSerge Semin 411bd42f310SSerge Semin res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config"); 412bd42f310SSerge Semin if (res) { 413bd42f310SSerge Semin pp->cfg0_size = resource_size(res); 414bd42f310SSerge Semin pp->cfg0_base = res->start; 4152f5ab5afSRob Herring 416bd42f310SSerge Semin pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res); 4172f5ab5afSRob Herring if (IS_ERR(pp->va_cfg0_base)) 4182f5ab5afSRob Herring return PTR_ERR(pp->va_cfg0_base); 4192f5ab5afSRob Herring } else { 4206e0832faSShawn Lin dev_err(dev, "Missing *config* reg space\n"); 4212f5ab5afSRob Herring return -ENODEV; 4226e0832faSShawn Lin } 4236e0832faSShawn Lin 424e6fdd3bfSJisheng Zhang bridge = devm_pci_alloc_host_bridge(dev, 0); 4256e0832faSShawn Lin if (!bridge) 4266e0832faSShawn Lin return -ENOMEM; 4276e0832faSShawn Lin 428444ddca5SRob Herring pp->bridge = bridge; 429444ddca5SRob Herring 4302f5ab5afSRob Herring /* Get the I/O range from DT */ 4312f5ab5afSRob Herring win = resource_list_first_type(&bridge->windows, IORESOURCE_IO); 4322f5ab5afSRob Herring if (win) { 4330f71c60fSRob Herring pp->io_size = resource_size(win->res); 4340f71c60fSRob Herring pp->io_bus_addr = win->res->start - win->offset; 4350f71c60fSRob Herring pp->io_base = pci_pio_to_address(win->res->start); 4366e0832faSShawn Lin } 4376e0832faSShawn Lin 4387e919677SBjorn Andersson /* Set default bus ops */ 4397e919677SBjorn Andersson bridge->ops = &dw_pcie_ops; 4407e919677SBjorn Andersson bridge->child_ops = &dw_child_pcie_ops; 4417e919677SBjorn Andersson 4427e919677SBjorn Andersson if (pp->ops->host_init) { 4437e919677SBjorn Andersson ret = pp->ops->host_init(pp); 4447e919677SBjorn Andersson if (ret) 4457e919677SBjorn Andersson return ret; 4467e919677SBjorn Andersson } 4477e919677SBjorn Andersson 4489e2b5de5SJisheng Zhang if (pci_msi_enabled()) { 449f78f0263SRob Herring pp->has_msi_ctrl = !(pp->ops->msi_host_init || 450f78f0263SRob Herring of_property_read_bool(np, "msi-parent") || 451f78f0263SRob Herring of_property_read_bool(np, "msi-map")); 452f78f0263SRob Herring 453cd761378SDmitry Baryshkov /* 454cd761378SDmitry Baryshkov * For the has_msi_ctrl case the default assignment is handled 455cd761378SDmitry Baryshkov * in the dw_pcie_msi_host_init(). 456cd761378SDmitry Baryshkov */ 457cd761378SDmitry Baryshkov if (!pp->has_msi_ctrl && !pp->num_vectors) { 4586e0832faSShawn Lin pp->num_vectors = MSI_DEF_NUM_VECTORS; 459331e9bceSRob Herring } else if (pp->num_vectors > MAX_MSI_IRQS) { 460331e9bceSRob Herring dev_err(dev, "Invalid number of vectors\n"); 461c6481d51SSerge Semin ret = -EINVAL; 462c6481d51SSerge Semin goto err_deinit_host; 4636e0832faSShawn Lin } 4646e0832faSShawn Lin 465f78f0263SRob Herring if (pp->ops->msi_host_init) { 466f78f0263SRob Herring ret = pp->ops->msi_host_init(pp); 467f78f0263SRob Herring if (ret < 0) 468c6481d51SSerge Semin goto err_deinit_host; 469f78f0263SRob Herring } else if (pp->has_msi_ctrl) { 470226ec087SDmitry Baryshkov ret = dw_pcie_msi_host_init(pp); 471226ec087SDmitry Baryshkov if (ret < 0) 472c6481d51SSerge Semin goto err_deinit_host; 473c6481d51SSerge Semin } 4745bcb1757SRob Herring } 4756e0832faSShawn Lin 47613e9d390SSerge Semin dw_pcie_version_detect(pci); 47713e9d390SSerge Semin 4788bcca265SHou Zhiqiang dw_pcie_iatu_detect(pci); 4796e0832faSShawn Lin 480ce06bf57SSerge Semin ret = dw_pcie_setup_rc(pp); 481ce06bf57SSerge Semin if (ret) 482ce06bf57SSerge Semin goto err_free_msi; 48359fbab1aSRob Herring 484a37beefbSSerge Semin if (!dw_pcie_link_up(pci)) { 485a37beefbSSerge Semin ret = dw_pcie_start_link(pci); 486886a9c13SRob Herring if (ret) 487886a9c13SRob Herring goto err_free_msi; 488886a9c13SRob Herring } 489886a9c13SRob Herring 490886a9c13SRob Herring /* Ignore errors, the link may come up later */ 491886a9c13SRob Herring dw_pcie_wait_for_link(pci); 492886a9c13SRob Herring 4936e0832faSShawn Lin bridge->sysdata = pp; 4946e0832faSShawn Lin 4951df79305SRob Herring ret = pci_host_probe(bridge); 496113fa857SSerge Semin if (ret) 497113fa857SSerge Semin goto err_stop_link; 498113fa857SSerge Semin 4996e0832faSShawn Lin return 0; 5006e0832faSShawn Lin 501113fa857SSerge Semin err_stop_link: 502a37beefbSSerge Semin dw_pcie_stop_link(pci); 503113fa857SSerge Semin 5049e2b5de5SJisheng Zhang err_free_msi: 505f78f0263SRob Herring if (pp->has_msi_ctrl) 5069e2b5de5SJisheng Zhang dw_pcie_free_msi(pp); 507c6481d51SSerge Semin 508c6481d51SSerge Semin err_deinit_host: 509c6481d51SSerge Semin if (pp->ops->host_deinit) 510c6481d51SSerge Semin pp->ops->host_deinit(pp); 511c6481d51SSerge Semin 5126e0832faSShawn Lin return ret; 5136e0832faSShawn Lin } 514ca98329dSVidya Sagar EXPORT_SYMBOL_GPL(dw_pcie_host_init); 5156e0832faSShawn Lin 51660b3c27fSSerge Semin void dw_pcie_host_deinit(struct dw_pcie_rp *pp) 5179d071cadSVidya Sagar { 518113fa857SSerge Semin struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 519113fa857SSerge Semin 5205808d43eSRob Herring pci_stop_root_bus(pp->bridge->bus); 5215808d43eSRob Herring pci_remove_root_bus(pp->bridge->bus); 522113fa857SSerge Semin 523a37beefbSSerge Semin dw_pcie_stop_link(pci); 524113fa857SSerge Semin 525f78f0263SRob Herring if (pp->has_msi_ctrl) 5269d071cadSVidya Sagar dw_pcie_free_msi(pp); 527c6481d51SSerge Semin 528c6481d51SSerge Semin if (pp->ops->host_deinit) 529c6481d51SSerge Semin pp->ops->host_deinit(pp); 5309d071cadSVidya Sagar } 531ca98329dSVidya Sagar EXPORT_SYMBOL_GPL(dw_pcie_host_deinit); 5329d071cadSVidya Sagar 533c2b0c098SRob Herring static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus, 534c2b0c098SRob Herring unsigned int devfn, int where) 5356e0832faSShawn Lin { 53660b3c27fSSerge Semin struct dw_pcie_rp *pp = bus->sysdata; 5376e0832faSShawn Lin struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 538ce06bf57SSerge Semin int type, ret; 539ce06bf57SSerge Semin u32 busdev; 5406e0832faSShawn Lin 54115b23906SHou Zhiqiang /* 54215b23906SHou Zhiqiang * Checking whether the link is up here is a last line of defense 54315b23906SHou Zhiqiang * against platforms that forward errors on the system bus as 54415b23906SHou Zhiqiang * SError upon PCI configuration transactions issued when the link 54515b23906SHou Zhiqiang * is down. This check is racy by definition and does not stop 54615b23906SHou Zhiqiang * the system from triggering an SError if the link goes down 54715b23906SHou Zhiqiang * after this check is performed. 54815b23906SHou Zhiqiang */ 54915b23906SHou Zhiqiang if (!dw_pcie_link_up(pci)) 55015b23906SHou Zhiqiang return NULL; 55115b23906SHou Zhiqiang 5526e0832faSShawn Lin busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) | 5536e0832faSShawn Lin PCIE_ATU_FUNC(PCI_FUNC(devfn)); 5546e0832faSShawn Lin 5552ef6b06aSRob Herring if (pci_is_root_bus(bus->parent)) 5566e0832faSShawn Lin type = PCIE_ATU_TYPE_CFG0; 5572ef6b06aSRob Herring else 5586e0832faSShawn Lin type = PCIE_ATU_TYPE_CFG1; 5592ef6b06aSRob Herring 560ce06bf57SSerge Semin ret = dw_pcie_prog_outbound_atu(pci, 0, type, pp->cfg0_base, busdev, 561ce06bf57SSerge Semin pp->cfg0_size); 562ce06bf57SSerge Semin if (ret) 563ce06bf57SSerge Semin return NULL; 564689e349aSAndrey Smirnov 5652ef6b06aSRob Herring return pp->va_cfg0_base + where; 566c2b0c098SRob Herring } 567c2b0c098SRob Herring 568c2b0c098SRob Herring static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn, 569c2b0c098SRob Herring int where, int size, u32 *val) 570c2b0c098SRob Herring { 57160b3c27fSSerge Semin struct dw_pcie_rp *pp = bus->sysdata; 572c2b0c098SRob Herring struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 573ce06bf57SSerge Semin int ret; 574c2b0c098SRob Herring 575c2b0c098SRob Herring ret = pci_generic_config_read(bus, devfn, where, size, val); 576ce06bf57SSerge Semin if (ret != PCIBIOS_SUCCESSFUL) 5776e0832faSShawn Lin return ret; 578ce06bf57SSerge Semin 579ce06bf57SSerge Semin if (pp->cfg0_io_shared) { 580ce06bf57SSerge Semin ret = dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO, 581ce06bf57SSerge Semin pp->io_base, pp->io_bus_addr, 582ce06bf57SSerge Semin pp->io_size); 583ce06bf57SSerge Semin if (ret) 584ce06bf57SSerge Semin return PCIBIOS_SET_FAILED; 585ce06bf57SSerge Semin } 586ce06bf57SSerge Semin 587ce06bf57SSerge Semin return PCIBIOS_SUCCESSFUL; 5886e0832faSShawn Lin } 5896e0832faSShawn Lin 590c2b0c098SRob Herring static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn, 5916e0832faSShawn Lin int where, int size, u32 val) 5926e0832faSShawn Lin { 59360b3c27fSSerge Semin struct dw_pcie_rp *pp = bus->sysdata; 594c2b0c098SRob Herring struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 595ce06bf57SSerge Semin int ret; 5966e0832faSShawn Lin 597c2b0c098SRob Herring ret = pci_generic_config_write(bus, devfn, where, size, val); 598ce06bf57SSerge Semin if (ret != PCIBIOS_SUCCESSFUL) 599c2b0c098SRob Herring return ret; 600ce06bf57SSerge Semin 601ce06bf57SSerge Semin if (pp->cfg0_io_shared) { 602ce06bf57SSerge Semin ret = dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO, 603ce06bf57SSerge Semin pp->io_base, pp->io_bus_addr, 604ce06bf57SSerge Semin pp->io_size); 605ce06bf57SSerge Semin if (ret) 606ce06bf57SSerge Semin return PCIBIOS_SET_FAILED; 607ce06bf57SSerge Semin } 608ce06bf57SSerge Semin 609ce06bf57SSerge Semin return PCIBIOS_SUCCESSFUL; 6106e0832faSShawn Lin } 6116e0832faSShawn Lin 612c2b0c098SRob Herring static struct pci_ops dw_child_pcie_ops = { 613c2b0c098SRob Herring .map_bus = dw_pcie_other_conf_map_bus, 614c2b0c098SRob Herring .read = dw_pcie_rd_other_conf, 615c2b0c098SRob Herring .write = dw_pcie_wr_other_conf, 616c2b0c098SRob Herring }; 617c2b0c098SRob Herring 61827e7ed01SRob Herring void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn, int where) 61927e7ed01SRob Herring { 62060b3c27fSSerge Semin struct dw_pcie_rp *pp = bus->sysdata; 62127e7ed01SRob Herring struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 62227e7ed01SRob Herring 62327e7ed01SRob Herring if (PCI_SLOT(devfn) > 0) 62427e7ed01SRob Herring return NULL; 62527e7ed01SRob Herring 62627e7ed01SRob Herring return pci->dbi_base + where; 62727e7ed01SRob Herring } 62827e7ed01SRob Herring EXPORT_SYMBOL_GPL(dw_pcie_own_conf_map_bus); 62927e7ed01SRob Herring 6306e0832faSShawn Lin static struct pci_ops dw_pcie_ops = { 631c2b0c098SRob Herring .map_bus = dw_pcie_own_conf_map_bus, 632c2b0c098SRob Herring .read = pci_generic_config_read, 633c2b0c098SRob Herring .write = pci_generic_config_write, 6346e0832faSShawn Lin }; 6356e0832faSShawn Lin 636ce06bf57SSerge Semin static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp) 6376e0832faSShawn Lin { 6386e0832faSShawn Lin struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 639ce06bf57SSerge Semin struct resource_entry *entry; 640ce06bf57SSerge Semin int i, ret; 641ce06bf57SSerge Semin 642ce06bf57SSerge Semin /* Note the very first outbound ATU is used for CFG IOs */ 643ce06bf57SSerge Semin if (!pci->num_ob_windows) { 644ce06bf57SSerge Semin dev_err(pci->dev, "No outbound iATU found\n"); 645ce06bf57SSerge Semin return -EINVAL; 646ce06bf57SSerge Semin } 647ce06bf57SSerge Semin 648ce06bf57SSerge Semin /* 6498522e17dSSerge Semin * Ensure all out/inbound windows are disabled before proceeding with 6508522e17dSSerge Semin * the MEM/IO (dma-)ranges setups. 651ce06bf57SSerge Semin */ 652ce06bf57SSerge Semin for (i = 0; i < pci->num_ob_windows; i++) 653ce06bf57SSerge Semin dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_OB, i); 654ce06bf57SSerge Semin 6558522e17dSSerge Semin for (i = 0; i < pci->num_ib_windows; i++) 6568522e17dSSerge Semin dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_IB, i); 6578522e17dSSerge Semin 658ce06bf57SSerge Semin i = 0; 659ce06bf57SSerge Semin resource_list_for_each_entry(entry, &pp->bridge->windows) { 660ce06bf57SSerge Semin if (resource_type(entry->res) != IORESOURCE_MEM) 661ce06bf57SSerge Semin continue; 662ce06bf57SSerge Semin 663ce06bf57SSerge Semin if (pci->num_ob_windows <= ++i) 664ce06bf57SSerge Semin break; 665ce06bf57SSerge Semin 666ce06bf57SSerge Semin ret = dw_pcie_prog_outbound_atu(pci, i, PCIE_ATU_TYPE_MEM, 667ce06bf57SSerge Semin entry->res->start, 668ce06bf57SSerge Semin entry->res->start - entry->offset, 669ce06bf57SSerge Semin resource_size(entry->res)); 670ce06bf57SSerge Semin if (ret) { 671ce06bf57SSerge Semin dev_err(pci->dev, "Failed to set MEM range %pr\n", 672ce06bf57SSerge Semin entry->res); 673ce06bf57SSerge Semin return ret; 674ce06bf57SSerge Semin } 675ce06bf57SSerge Semin } 676ce06bf57SSerge Semin 677ce06bf57SSerge Semin if (pp->io_size) { 678ce06bf57SSerge Semin if (pci->num_ob_windows > ++i) { 679ce06bf57SSerge Semin ret = dw_pcie_prog_outbound_atu(pci, i, PCIE_ATU_TYPE_IO, 680ce06bf57SSerge Semin pp->io_base, 681ce06bf57SSerge Semin pp->io_bus_addr, 682ce06bf57SSerge Semin pp->io_size); 683ce06bf57SSerge Semin if (ret) { 684ce06bf57SSerge Semin dev_err(pci->dev, "Failed to set IO range %pr\n", 685ce06bf57SSerge Semin entry->res); 686ce06bf57SSerge Semin return ret; 687ce06bf57SSerge Semin } 688ce06bf57SSerge Semin } else { 689ce06bf57SSerge Semin pp->cfg0_io_shared = true; 690ce06bf57SSerge Semin } 691ce06bf57SSerge Semin } 692ce06bf57SSerge Semin 693ce06bf57SSerge Semin if (pci->num_ob_windows <= i) 6948522e17dSSerge Semin dev_warn(pci->dev, "Ranges exceed outbound iATU size (%d)\n", 695ce06bf57SSerge Semin pci->num_ob_windows); 696ce06bf57SSerge Semin 6978522e17dSSerge Semin i = 0; 6988522e17dSSerge Semin resource_list_for_each_entry(entry, &pp->bridge->dma_ranges) { 6998522e17dSSerge Semin if (resource_type(entry->res) != IORESOURCE_MEM) 7008522e17dSSerge Semin continue; 7018522e17dSSerge Semin 7028522e17dSSerge Semin if (pci->num_ib_windows <= i) 7038522e17dSSerge Semin break; 7048522e17dSSerge Semin 7058522e17dSSerge Semin ret = dw_pcie_prog_inbound_atu(pci, i++, PCIE_ATU_TYPE_MEM, 7068522e17dSSerge Semin entry->res->start, 7078522e17dSSerge Semin entry->res->start - entry->offset, 7088522e17dSSerge Semin resource_size(entry->res)); 7098522e17dSSerge Semin if (ret) { 7108522e17dSSerge Semin dev_err(pci->dev, "Failed to set DMA range %pr\n", 7118522e17dSSerge Semin entry->res); 7128522e17dSSerge Semin return ret; 7138522e17dSSerge Semin } 7148522e17dSSerge Semin } 7158522e17dSSerge Semin 7168522e17dSSerge Semin if (pci->num_ib_windows <= i) 7178522e17dSSerge Semin dev_warn(pci->dev, "Dma-ranges exceed inbound iATU size (%u)\n", 7188522e17dSSerge Semin pci->num_ib_windows); 7198522e17dSSerge Semin 720ce06bf57SSerge Semin return 0; 721ce06bf57SSerge Semin } 722ce06bf57SSerge Semin 723ce06bf57SSerge Semin int dw_pcie_setup_rc(struct dw_pcie_rp *pp) 724ce06bf57SSerge Semin { 725ce06bf57SSerge Semin struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 726ce06bf57SSerge Semin u32 val, ctrl, num_ctrls; 727ce06bf57SSerge Semin int ret; 7286e0832faSShawn Lin 7293924bc2fSVidya Sagar /* 7303924bc2fSVidya Sagar * Enable DBI read-only registers for writing/updating configuration. 7313924bc2fSVidya Sagar * Write permission gets disabled towards the end of this function. 7323924bc2fSVidya Sagar */ 7333924bc2fSVidya Sagar dw_pcie_dbi_ro_wr_en(pci); 7343924bc2fSVidya Sagar 7356e0832faSShawn Lin dw_pcie_setup(pci); 7366e0832faSShawn Lin 737f78f0263SRob Herring if (pp->has_msi_ctrl) { 7386e0832faSShawn Lin num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; 7396e0832faSShawn Lin 7406e0832faSShawn Lin /* Initialize IRQ Status array */ 741830920e0SMarc Zyngier for (ctrl = 0; ctrl < num_ctrls; ctrl++) { 742f81c770dSRob Herring dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + 7436e0832faSShawn Lin (ctrl * MSI_REG_CTRL_BLOCK_SIZE), 744f81c770dSRob Herring pp->irq_mask[ctrl]); 745f81c770dSRob Herring dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE + 746830920e0SMarc Zyngier (ctrl * MSI_REG_CTRL_BLOCK_SIZE), 747f81c770dSRob Herring ~0); 748830920e0SMarc Zyngier } 749fd8a44bdSKishon Vijay Abraham I } 7506e0832faSShawn Lin 751294353d9SJisheng Zhang dw_pcie_msi_init(pp); 752294353d9SJisheng Zhang 7536e0832faSShawn Lin /* Setup RC BARs */ 7546e0832faSShawn Lin dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004); 7556e0832faSShawn Lin dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000); 7566e0832faSShawn Lin 7576e0832faSShawn Lin /* Setup interrupt pins */ 7586e0832faSShawn Lin val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE); 7596e0832faSShawn Lin val &= 0xffff00ff; 7606e0832faSShawn Lin val |= 0x00000100; 7616e0832faSShawn Lin dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val); 7626e0832faSShawn Lin 7636e0832faSShawn Lin /* Setup bus numbers */ 7646e0832faSShawn Lin val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS); 7656e0832faSShawn Lin val &= 0xff000000; 7666e0832faSShawn Lin val |= 0x00ff0100; 7676e0832faSShawn Lin dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val); 7686e0832faSShawn Lin 7696e0832faSShawn Lin /* Setup command register */ 7706e0832faSShawn Lin val = dw_pcie_readl_dbi(pci, PCI_COMMAND); 7716e0832faSShawn Lin val &= 0xffff0000; 7726e0832faSShawn Lin val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | 7736e0832faSShawn Lin PCI_COMMAND_MASTER | PCI_COMMAND_SERR; 7746e0832faSShawn Lin dw_pcie_writel_dbi(pci, PCI_COMMAND, val); 7756e0832faSShawn Lin 7766e0832faSShawn Lin /* 777444ddca5SRob Herring * If the platform provides its own child bus config accesses, it means 778444ddca5SRob Herring * the platform uses its own address translation component rather than 779444ddca5SRob Herring * ATU, so we should not program the ATU here. 7806e0832faSShawn Lin */ 781c2b0c098SRob Herring if (pp->bridge->child_ops == &dw_child_pcie_ops) { 782ce06bf57SSerge Semin ret = dw_pcie_iatu_setup(pp); 783ce06bf57SSerge Semin if (ret) 784ce06bf57SSerge Semin return ret; 7856e0832faSShawn Lin } 7866e0832faSShawn Lin 787f81c770dSRob Herring dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0); 7886e0832faSShawn Lin 7896e0832faSShawn Lin /* Program correct class for RC */ 790f81c770dSRob Herring dw_pcie_writew_dbi(pci, PCI_CLASS_DEVICE, PCI_CLASS_BRIDGE_PCI); 7916e0832faSShawn Lin 792f81c770dSRob Herring val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); 7936e0832faSShawn Lin val |= PORT_LOGIC_SPEED_CHANGE; 794f81c770dSRob Herring dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val); 7953924bc2fSVidya Sagar 7963924bc2fSVidya Sagar dw_pcie_dbi_ro_wr_dis(pci); 797ce06bf57SSerge Semin 798ce06bf57SSerge Semin return 0; 7996e0832faSShawn Lin } 800ca98329dSVidya Sagar EXPORT_SYMBOL_GPL(dw_pcie_setup_rc); 801