16e0832faSShawn Lin // SPDX-License-Identifier: GPL-2.0
26e0832faSShawn Lin /*
36e0832faSShawn Lin * Synopsys DesignWare PCIe host controller driver
46e0832faSShawn Lin *
56e0832faSShawn Lin * Copyright (C) 2013 Samsung Electronics Co., Ltd.
67ecd4a81SAlexander A. Klimov * https://www.samsung.com
76e0832faSShawn Lin *
86e0832faSShawn Lin * Author: Jingoo Han <jg1.han@samsung.com>
96e0832faSShawn Lin */
106e0832faSShawn Lin
114774faf8SFrank Li #include <linux/iopoll.h>
126e0832faSShawn Lin #include <linux/irqchip/chained_irq.h>
136e0832faSShawn Lin #include <linux/irqdomain.h>
14bbd8810dSKrzysztof Wilczynski #include <linux/msi.h>
156e0832faSShawn Lin #include <linux/of_address.h>
166e0832faSShawn Lin #include <linux/of_pci.h>
176e0832faSShawn Lin #include <linux/pci_regs.h>
186e0832faSShawn Lin #include <linux/platform_device.h>
196e0832faSShawn Lin
204774faf8SFrank Li #include "../../pci.h"
216e0832faSShawn Lin #include "pcie-designware.h"
226e0832faSShawn Lin
236e0832faSShawn Lin static struct pci_ops dw_pcie_ops;
24c2b0c098SRob Herring static struct pci_ops dw_child_pcie_ops;
256e0832faSShawn Lin
dw_msi_ack_irq(struct irq_data * d)266e0832faSShawn Lin static void dw_msi_ack_irq(struct irq_data *d)
276e0832faSShawn Lin {
286e0832faSShawn Lin irq_chip_ack_parent(d);
296e0832faSShawn Lin }
306e0832faSShawn Lin
dw_msi_mask_irq(struct irq_data * d)316e0832faSShawn Lin static void dw_msi_mask_irq(struct irq_data *d)
326e0832faSShawn Lin {
336e0832faSShawn Lin pci_msi_mask_irq(d);
346e0832faSShawn Lin irq_chip_mask_parent(d);
356e0832faSShawn Lin }
366e0832faSShawn Lin
dw_msi_unmask_irq(struct irq_data * d)376e0832faSShawn Lin static void dw_msi_unmask_irq(struct irq_data *d)
386e0832faSShawn Lin {
396e0832faSShawn Lin pci_msi_unmask_irq(d);
406e0832faSShawn Lin irq_chip_unmask_parent(d);
416e0832faSShawn Lin }
426e0832faSShawn Lin
436e0832faSShawn Lin static struct irq_chip dw_pcie_msi_irq_chip = {
446e0832faSShawn Lin .name = "PCI-MSI",
456e0832faSShawn Lin .irq_ack = dw_msi_ack_irq,
466e0832faSShawn Lin .irq_mask = dw_msi_mask_irq,
476e0832faSShawn Lin .irq_unmask = dw_msi_unmask_irq,
486e0832faSShawn Lin };
496e0832faSShawn Lin
506e0832faSShawn Lin static struct msi_domain_info dw_pcie_msi_domain_info = {
51*363d53acSMarek Vasut .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
52*363d53acSMarek Vasut MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX |
53*363d53acSMarek Vasut MSI_FLAG_MULTI_PCI_MSI,
546e0832faSShawn Lin .chip = &dw_pcie_msi_irq_chip,
556e0832faSShawn Lin };
566e0832faSShawn Lin
576e0832faSShawn Lin /* MSI int handler */
dw_handle_msi_irq(struct dw_pcie_rp * pp)5860b3c27fSSerge Semin irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp)
596e0832faSShawn Lin {
60d21faba1SMarc Zyngier int i, pos;
611137e61dSNiklas Cassel unsigned long val;
621137e61dSNiklas Cassel u32 status, num_ctrls;
636e0832faSShawn Lin irqreturn_t ret = IRQ_NONE;
64f81c770dSRob Herring struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
656e0832faSShawn Lin
666e0832faSShawn Lin num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
676e0832faSShawn Lin
686e0832faSShawn Lin for (i = 0; i < num_ctrls; i++) {
69f81c770dSRob Herring status = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS +
70f81c770dSRob Herring (i * MSI_REG_CTRL_BLOCK_SIZE));
711137e61dSNiklas Cassel if (!status)
726e0832faSShawn Lin continue;
736e0832faSShawn Lin
746e0832faSShawn Lin ret = IRQ_HANDLED;
751137e61dSNiklas Cassel val = status;
766e0832faSShawn Lin pos = 0;
771137e61dSNiklas Cassel while ((pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL,
786e0832faSShawn Lin pos)) != MAX_MSI_IRQS_PER_CTRL) {
79d21faba1SMarc Zyngier generic_handle_domain_irq(pp->irq_domain,
806e0832faSShawn Lin (i * MAX_MSI_IRQS_PER_CTRL) +
816e0832faSShawn Lin pos);
826e0832faSShawn Lin pos++;
836e0832faSShawn Lin }
846e0832faSShawn Lin }
856e0832faSShawn Lin
866e0832faSShawn Lin return ret;
876e0832faSShawn Lin }
886e0832faSShawn Lin
896e0832faSShawn Lin /* Chained MSI interrupt service routine */
dw_chained_msi_isr(struct irq_desc * desc)906e0832faSShawn Lin static void dw_chained_msi_isr(struct irq_desc *desc)
916e0832faSShawn Lin {
926e0832faSShawn Lin struct irq_chip *chip = irq_desc_get_chip(desc);
9360b3c27fSSerge Semin struct dw_pcie_rp *pp;
946e0832faSShawn Lin
956e0832faSShawn Lin chained_irq_enter(chip, desc);
966e0832faSShawn Lin
976e0832faSShawn Lin pp = irq_desc_get_handler_data(desc);
986e0832faSShawn Lin dw_handle_msi_irq(pp);
996e0832faSShawn Lin
1006e0832faSShawn Lin chained_irq_exit(chip, desc);
1016e0832faSShawn Lin }
1026e0832faSShawn Lin
dw_pci_setup_msi_msg(struct irq_data * d,struct msi_msg * msg)10359ea68b3SGustavo Pimentel static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg)
1046e0832faSShawn Lin {
10560b3c27fSSerge Semin struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
1066e0832faSShawn Lin struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
1076e0832faSShawn Lin u64 msi_target;
1086e0832faSShawn Lin
1096e0832faSShawn Lin msi_target = (u64)pp->msi_data;
1106e0832faSShawn Lin
1116e0832faSShawn Lin msg->address_lo = lower_32_bits(msi_target);
1126e0832faSShawn Lin msg->address_hi = upper_32_bits(msi_target);
1136e0832faSShawn Lin
11459ea68b3SGustavo Pimentel msg->data = d->hwirq;
1156e0832faSShawn Lin
1166e0832faSShawn Lin dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
11759ea68b3SGustavo Pimentel (int)d->hwirq, msg->address_hi, msg->address_lo);
1186e0832faSShawn Lin }
1196e0832faSShawn Lin
dw_pci_bottom_mask(struct irq_data * d)12040e9892eSGustavo Pimentel static void dw_pci_bottom_mask(struct irq_data *d)
1216e0832faSShawn Lin {
12260b3c27fSSerge Semin struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
123f81c770dSRob Herring struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
1246e0832faSShawn Lin unsigned int res, bit, ctrl;
1256e0832faSShawn Lin unsigned long flags;
1266e0832faSShawn Lin
1276e0832faSShawn Lin raw_spin_lock_irqsave(&pp->lock, flags);
1286e0832faSShawn Lin
12940e9892eSGustavo Pimentel ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
1306e0832faSShawn Lin res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
13140e9892eSGustavo Pimentel bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
1326e0832faSShawn Lin
13365772257SGustavo Pimentel pp->irq_mask[ctrl] |= BIT(bit);
134f81c770dSRob Herring dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]);
1356e0832faSShawn Lin
1366e0832faSShawn Lin raw_spin_unlock_irqrestore(&pp->lock, flags);
1376e0832faSShawn Lin }
1386e0832faSShawn Lin
dw_pci_bottom_unmask(struct irq_data * d)13940e9892eSGustavo Pimentel static void dw_pci_bottom_unmask(struct irq_data *d)
1406e0832faSShawn Lin {
14160b3c27fSSerge Semin struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
142f81c770dSRob Herring struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
1436e0832faSShawn Lin unsigned int res, bit, ctrl;
1446e0832faSShawn Lin unsigned long flags;
1456e0832faSShawn Lin
1466e0832faSShawn Lin raw_spin_lock_irqsave(&pp->lock, flags);
1476e0832faSShawn Lin
14840e9892eSGustavo Pimentel ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
1496e0832faSShawn Lin res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
15040e9892eSGustavo Pimentel bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
1516e0832faSShawn Lin
15265772257SGustavo Pimentel pp->irq_mask[ctrl] &= ~BIT(bit);
153f81c770dSRob Herring dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]);
1546e0832faSShawn Lin
1556e0832faSShawn Lin raw_spin_unlock_irqrestore(&pp->lock, flags);
1566e0832faSShawn Lin }
1576e0832faSShawn Lin
dw_pci_bottom_ack(struct irq_data * d)1586e0832faSShawn Lin static void dw_pci_bottom_ack(struct irq_data *d)
1596e0832faSShawn Lin {
16060b3c27fSSerge Semin struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
161f81c770dSRob Herring struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
1623f7bb2ecSMarc Zyngier unsigned int res, bit, ctrl;
1636e0832faSShawn Lin
1643f7bb2ecSMarc Zyngier ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
1653f7bb2ecSMarc Zyngier res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
1663f7bb2ecSMarc Zyngier bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
1676e0832faSShawn Lin
168f81c770dSRob Herring dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_STATUS + res, BIT(bit));
1696e0832faSShawn Lin }
1706e0832faSShawn Lin
1716e0832faSShawn Lin static struct irq_chip dw_pci_msi_bottom_irq_chip = {
1726e0832faSShawn Lin .name = "DWPCI-MSI",
1736e0832faSShawn Lin .irq_ack = dw_pci_bottom_ack,
1746e0832faSShawn Lin .irq_compose_msi_msg = dw_pci_setup_msi_msg,
1756e0832faSShawn Lin .irq_mask = dw_pci_bottom_mask,
1766e0832faSShawn Lin .irq_unmask = dw_pci_bottom_unmask,
1776e0832faSShawn Lin };
1786e0832faSShawn Lin
dw_pcie_irq_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * args)1796e0832faSShawn Lin static int dw_pcie_irq_domain_alloc(struct irq_domain *domain,
1806e0832faSShawn Lin unsigned int virq, unsigned int nr_irqs,
1816e0832faSShawn Lin void *args)
1826e0832faSShawn Lin {
18360b3c27fSSerge Semin struct dw_pcie_rp *pp = domain->host_data;
1846e0832faSShawn Lin unsigned long flags;
1856e0832faSShawn Lin u32 i;
1866e0832faSShawn Lin int bit;
1876e0832faSShawn Lin
1886e0832faSShawn Lin raw_spin_lock_irqsave(&pp->lock, flags);
1896e0832faSShawn Lin
1906e0832faSShawn Lin bit = bitmap_find_free_region(pp->msi_irq_in_use, pp->num_vectors,
1916e0832faSShawn Lin order_base_2(nr_irqs));
1926e0832faSShawn Lin
1936e0832faSShawn Lin raw_spin_unlock_irqrestore(&pp->lock, flags);
1946e0832faSShawn Lin
1956e0832faSShawn Lin if (bit < 0)
1966e0832faSShawn Lin return -ENOSPC;
1976e0832faSShawn Lin
1986e0832faSShawn Lin for (i = 0; i < nr_irqs; i++)
1996e0832faSShawn Lin irq_domain_set_info(domain, virq + i, bit + i,
2009f67437bSKishon Vijay Abraham I pp->msi_irq_chip,
2016e0832faSShawn Lin pp, handle_edge_irq,
2026e0832faSShawn Lin NULL, NULL);
2036e0832faSShawn Lin
2046e0832faSShawn Lin return 0;
2056e0832faSShawn Lin }
2066e0832faSShawn Lin
dw_pcie_irq_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)2076e0832faSShawn Lin static void dw_pcie_irq_domain_free(struct irq_domain *domain,
2086e0832faSShawn Lin unsigned int virq, unsigned int nr_irqs)
2096e0832faSShawn Lin {
2104cfae0f1SGustavo Pimentel struct irq_data *d = irq_domain_get_irq_data(domain, virq);
21160b3c27fSSerge Semin struct dw_pcie_rp *pp = domain->host_data;
2126e0832faSShawn Lin unsigned long flags;
2136e0832faSShawn Lin
2146e0832faSShawn Lin raw_spin_lock_irqsave(&pp->lock, flags);
2156e0832faSShawn Lin
2164cfae0f1SGustavo Pimentel bitmap_release_region(pp->msi_irq_in_use, d->hwirq,
2176e0832faSShawn Lin order_base_2(nr_irqs));
2186e0832faSShawn Lin
2196e0832faSShawn Lin raw_spin_unlock_irqrestore(&pp->lock, flags);
2206e0832faSShawn Lin }
2216e0832faSShawn Lin
2226e0832faSShawn Lin static const struct irq_domain_ops dw_pcie_msi_domain_ops = {
2236e0832faSShawn Lin .alloc = dw_pcie_irq_domain_alloc,
2246e0832faSShawn Lin .free = dw_pcie_irq_domain_free,
2256e0832faSShawn Lin };
2266e0832faSShawn Lin
dw_pcie_allocate_domains(struct dw_pcie_rp * pp)22760b3c27fSSerge Semin int dw_pcie_allocate_domains(struct dw_pcie_rp *pp)
2286e0832faSShawn Lin {
2296e0832faSShawn Lin struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
2306e0832faSShawn Lin struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node);
2316e0832faSShawn Lin
2326e0832faSShawn Lin pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors,
2336e0832faSShawn Lin &dw_pcie_msi_domain_ops, pp);
2346e0832faSShawn Lin if (!pp->irq_domain) {
2356e0832faSShawn Lin dev_err(pci->dev, "Failed to create IRQ domain\n");
2366e0832faSShawn Lin return -ENOMEM;
2376e0832faSShawn Lin }
2386e0832faSShawn Lin
2390414b93eSMarc Zyngier irq_domain_update_bus_token(pp->irq_domain, DOMAIN_BUS_NEXUS);
2400414b93eSMarc Zyngier
2416e0832faSShawn Lin pp->msi_domain = pci_msi_create_irq_domain(fwnode,
2426e0832faSShawn Lin &dw_pcie_msi_domain_info,
2436e0832faSShawn Lin pp->irq_domain);
2446e0832faSShawn Lin if (!pp->msi_domain) {
2456e0832faSShawn Lin dev_err(pci->dev, "Failed to create MSI domain\n");
2466e0832faSShawn Lin irq_domain_remove(pp->irq_domain);
2476e0832faSShawn Lin return -ENOMEM;
2486e0832faSShawn Lin }
2496e0832faSShawn Lin
2506e0832faSShawn Lin return 0;
2516e0832faSShawn Lin }
2526e0832faSShawn Lin
dw_pcie_free_msi(struct dw_pcie_rp * pp)25360b3c27fSSerge Semin static void dw_pcie_free_msi(struct dw_pcie_rp *pp)
2546e0832faSShawn Lin {
255db388348SDmitry Baryshkov u32 ctrl;
256db388348SDmitry Baryshkov
257db388348SDmitry Baryshkov for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) {
258db388348SDmitry Baryshkov if (pp->msi_irq[ctrl] > 0)
259db388348SDmitry Baryshkov irq_set_chained_handler_and_data(pp->msi_irq[ctrl],
260db388348SDmitry Baryshkov NULL, NULL);
261db388348SDmitry Baryshkov }
2626e0832faSShawn Lin
2636e0832faSShawn Lin irq_domain_remove(pp->msi_domain);
2646e0832faSShawn Lin irq_domain_remove(pp->irq_domain);
2656e0832faSShawn Lin }
2666e0832faSShawn Lin
dw_pcie_msi_init(struct dw_pcie_rp * pp)26760b3c27fSSerge Semin static void dw_pcie_msi_init(struct dw_pcie_rp *pp)
2686e0832faSShawn Lin {
2696e0832faSShawn Lin struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
27007940c36SJisheng Zhang u64 msi_target = (u64)pp->msi_data;
2716e0832faSShawn Lin
27259fbab1aSRob Herring if (!pci_msi_enabled() || !pp->has_msi_ctrl)
273cf627713SRob Herring return;
274cf627713SRob Herring
2756e0832faSShawn Lin /* Program the msi_data */
276f81c770dSRob Herring dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_LO, lower_32_bits(msi_target));
277f81c770dSRob Herring dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_HI, upper_32_bits(msi_target));
2786e0832faSShawn Lin }
2796e0832faSShawn Lin
dw_pcie_parse_split_msi_irq(struct dw_pcie_rp * pp)280cd761378SDmitry Baryshkov static int dw_pcie_parse_split_msi_irq(struct dw_pcie_rp *pp)
281cd761378SDmitry Baryshkov {
282cd761378SDmitry Baryshkov struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
283cd761378SDmitry Baryshkov struct device *dev = pci->dev;
284cd761378SDmitry Baryshkov struct platform_device *pdev = to_platform_device(dev);
285cd761378SDmitry Baryshkov u32 ctrl, max_vectors;
286cd761378SDmitry Baryshkov int irq;
287cd761378SDmitry Baryshkov
288cd761378SDmitry Baryshkov /* Parse any "msiX" IRQs described in the devicetree */
289cd761378SDmitry Baryshkov for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) {
290cd761378SDmitry Baryshkov char msi_name[] = "msiX";
291cd761378SDmitry Baryshkov
292cd761378SDmitry Baryshkov msi_name[3] = '0' + ctrl;
293cd761378SDmitry Baryshkov irq = platform_get_irq_byname_optional(pdev, msi_name);
294cd761378SDmitry Baryshkov if (irq == -ENXIO)
295cd761378SDmitry Baryshkov break;
296cd761378SDmitry Baryshkov if (irq < 0)
297cd761378SDmitry Baryshkov return dev_err_probe(dev, irq,
298cd761378SDmitry Baryshkov "Failed to parse MSI IRQ '%s'\n",
299cd761378SDmitry Baryshkov msi_name);
300cd761378SDmitry Baryshkov
301cd761378SDmitry Baryshkov pp->msi_irq[ctrl] = irq;
302cd761378SDmitry Baryshkov }
303cd761378SDmitry Baryshkov
304cd761378SDmitry Baryshkov /* If no "msiX" IRQs, caller should fallback to "msi" IRQ */
305cd761378SDmitry Baryshkov if (ctrl == 0)
306cd761378SDmitry Baryshkov return -ENXIO;
307cd761378SDmitry Baryshkov
308cd761378SDmitry Baryshkov max_vectors = ctrl * MAX_MSI_IRQS_PER_CTRL;
309cd761378SDmitry Baryshkov if (pp->num_vectors > max_vectors) {
310cd761378SDmitry Baryshkov dev_warn(dev, "Exceeding number of MSI vectors, limiting to %u\n",
311cd761378SDmitry Baryshkov max_vectors);
312cd761378SDmitry Baryshkov pp->num_vectors = max_vectors;
313cd761378SDmitry Baryshkov }
314cd761378SDmitry Baryshkov if (!pp->num_vectors)
315cd761378SDmitry Baryshkov pp->num_vectors = max_vectors;
316cd761378SDmitry Baryshkov
317cd761378SDmitry Baryshkov return 0;
318cd761378SDmitry Baryshkov }
319cd761378SDmitry Baryshkov
dw_pcie_msi_host_init(struct dw_pcie_rp * pp)320226ec087SDmitry Baryshkov static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
321226ec087SDmitry Baryshkov {
322226ec087SDmitry Baryshkov struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
323226ec087SDmitry Baryshkov struct device *dev = pci->dev;
324226ec087SDmitry Baryshkov struct platform_device *pdev = to_platform_device(dev);
325f3a29640SAjay Agarwal u64 *msi_vaddr = NULL;
326226ec087SDmitry Baryshkov int ret;
327226ec087SDmitry Baryshkov u32 ctrl, num_ctrls;
328226ec087SDmitry Baryshkov
329cd761378SDmitry Baryshkov for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++)
330226ec087SDmitry Baryshkov pp->irq_mask[ctrl] = ~0;
331226ec087SDmitry Baryshkov
332db388348SDmitry Baryshkov if (!pp->msi_irq[0]) {
333cd761378SDmitry Baryshkov ret = dw_pcie_parse_split_msi_irq(pp);
334cd761378SDmitry Baryshkov if (ret < 0 && ret != -ENXIO)
335cd761378SDmitry Baryshkov return ret;
336cd761378SDmitry Baryshkov }
337cd761378SDmitry Baryshkov
338cd761378SDmitry Baryshkov if (!pp->num_vectors)
339cd761378SDmitry Baryshkov pp->num_vectors = MSI_DEF_NUM_VECTORS;
340cd761378SDmitry Baryshkov num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
341cd761378SDmitry Baryshkov
342cd761378SDmitry Baryshkov if (!pp->msi_irq[0]) {
343db388348SDmitry Baryshkov pp->msi_irq[0] = platform_get_irq_byname_optional(pdev, "msi");
344db388348SDmitry Baryshkov if (pp->msi_irq[0] < 0) {
345db388348SDmitry Baryshkov pp->msi_irq[0] = platform_get_irq(pdev, 0);
346db388348SDmitry Baryshkov if (pp->msi_irq[0] < 0)
347db388348SDmitry Baryshkov return pp->msi_irq[0];
348226ec087SDmitry Baryshkov }
349226ec087SDmitry Baryshkov }
350226ec087SDmitry Baryshkov
351cd761378SDmitry Baryshkov dev_dbg(dev, "Using %d MSI vectors\n", pp->num_vectors);
352cd761378SDmitry Baryshkov
353226ec087SDmitry Baryshkov pp->msi_irq_chip = &dw_pci_msi_bottom_irq_chip;
354226ec087SDmitry Baryshkov
355226ec087SDmitry Baryshkov ret = dw_pcie_allocate_domains(pp);
356226ec087SDmitry Baryshkov if (ret)
357226ec087SDmitry Baryshkov return ret;
358226ec087SDmitry Baryshkov
359db388348SDmitry Baryshkov for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
360db388348SDmitry Baryshkov if (pp->msi_irq[ctrl] > 0)
361db388348SDmitry Baryshkov irq_set_chained_handler_and_data(pp->msi_irq[ctrl],
362226ec087SDmitry Baryshkov dw_chained_msi_isr, pp);
363db388348SDmitry Baryshkov }
364226ec087SDmitry Baryshkov
3656c784e21SSerge Semin /*
3666c784e21SSerge Semin * Even though the iMSI-RX Module supports 64-bit addresses some
3676c784e21SSerge Semin * peripheral PCIe devices may lack 64-bit message support. In
3686c784e21SSerge Semin * order not to miss MSI TLPs from those devices the MSI target
3696c784e21SSerge Semin * address has to be within the lowest 4GB.
3706c784e21SSerge Semin *
3716c784e21SSerge Semin * Note until there is a better alternative found the reservation is
3726c784e21SSerge Semin * done by allocating from the artificially limited DMA-coherent
3736c784e21SSerge Semin * memory.
3746c784e21SSerge Semin */
3756c784e21SSerge Semin ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
376f3a29640SAjay Agarwal if (!ret)
377f3a29640SAjay Agarwal msi_vaddr = dmam_alloc_coherent(dev, sizeof(u64), &pp->msi_data,
378f3a29640SAjay Agarwal GFP_KERNEL);
379226ec087SDmitry Baryshkov
380f3a29640SAjay Agarwal if (!msi_vaddr) {
381f3a29640SAjay Agarwal dev_warn(dev, "Failed to allocate 32-bit MSI address\n");
382f3a29640SAjay Agarwal dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
383423511ecSWill McVicker msi_vaddr = dmam_alloc_coherent(dev, sizeof(u64), &pp->msi_data,
384423511ecSWill McVicker GFP_KERNEL);
385423511ecSWill McVicker if (!msi_vaddr) {
386f3a29640SAjay Agarwal dev_err(dev, "Failed to allocate MSI address\n");
387226ec087SDmitry Baryshkov dw_pcie_free_msi(pp);
388423511ecSWill McVicker return -ENOMEM;
389226ec087SDmitry Baryshkov }
390f3a29640SAjay Agarwal }
391226ec087SDmitry Baryshkov
392226ec087SDmitry Baryshkov return 0;
393226ec087SDmitry Baryshkov }
394226ec087SDmitry Baryshkov
dw_pcie_host_request_msg_tlp_res(struct dw_pcie_rp * pp)395e1a4ec1aSFrank Li static void dw_pcie_host_request_msg_tlp_res(struct dw_pcie_rp *pp)
396e1a4ec1aSFrank Li {
397e1a4ec1aSFrank Li struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
398e1a4ec1aSFrank Li struct resource_entry *win;
399e1a4ec1aSFrank Li struct resource *res;
400e1a4ec1aSFrank Li
401e1a4ec1aSFrank Li win = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM);
402e1a4ec1aSFrank Li if (win) {
403e1a4ec1aSFrank Li res = devm_kzalloc(pci->dev, sizeof(*res), GFP_KERNEL);
404e1a4ec1aSFrank Li if (!res)
405e1a4ec1aSFrank Li return;
406e1a4ec1aSFrank Li
407e1a4ec1aSFrank Li /*
408e1a4ec1aSFrank Li * Allocate MSG TLP region of size 'region_align' at the end of
409e1a4ec1aSFrank Li * the host bridge window.
410e1a4ec1aSFrank Li */
411e1a4ec1aSFrank Li res->start = win->res->end - pci->region_align + 1;
412e1a4ec1aSFrank Li res->end = win->res->end;
413e1a4ec1aSFrank Li res->name = "msg";
414e1a4ec1aSFrank Li res->flags = win->res->flags | IORESOURCE_BUSY;
415e1a4ec1aSFrank Li
416e1a4ec1aSFrank Li if (!devm_request_resource(pci->dev, win->res, res))
417e1a4ec1aSFrank Li pp->msg_res = res;
418e1a4ec1aSFrank Li }
419e1a4ec1aSFrank Li }
420e1a4ec1aSFrank Li
dw_pcie_host_init(struct dw_pcie_rp * pp)42160b3c27fSSerge Semin int dw_pcie_host_init(struct dw_pcie_rp *pp)
4226e0832faSShawn Lin {
4236e0832faSShawn Lin struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
4246e0832faSShawn Lin struct device *dev = pci->dev;
4256e0832faSShawn Lin struct device_node *np = dev->of_node;
4266e0832faSShawn Lin struct platform_device *pdev = to_platform_device(dev);
4277fe71aa8SRob Herring struct resource_entry *win;
4286e0832faSShawn Lin struct pci_host_bridge *bridge;
429bd42f310SSerge Semin struct resource *res;
4306e0832faSShawn Lin int ret;
4316e0832faSShawn Lin
43260a4352fSSerge Semin raw_spin_lock_init(&pp->lock);
4336e0832faSShawn Lin
434ef8c5887SSerge Semin ret = dw_pcie_get_resources(pci);
435ef8c5887SSerge Semin if (ret)
436ef8c5887SSerge Semin return ret;
437ef8c5887SSerge Semin
438bd42f310SSerge Semin res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
439bd42f310SSerge Semin if (res) {
440bd42f310SSerge Semin pp->cfg0_size = resource_size(res);
441bd42f310SSerge Semin pp->cfg0_base = res->start;
4422f5ab5afSRob Herring
443bd42f310SSerge Semin pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res);
4442f5ab5afSRob Herring if (IS_ERR(pp->va_cfg0_base))
4452f5ab5afSRob Herring return PTR_ERR(pp->va_cfg0_base);
4462f5ab5afSRob Herring } else {
4476e0832faSShawn Lin dev_err(dev, "Missing *config* reg space\n");
4482f5ab5afSRob Herring return -ENODEV;
4496e0832faSShawn Lin }
4506e0832faSShawn Lin
451e6fdd3bfSJisheng Zhang bridge = devm_pci_alloc_host_bridge(dev, 0);
4526e0832faSShawn Lin if (!bridge)
4536e0832faSShawn Lin return -ENOMEM;
4546e0832faSShawn Lin
455444ddca5SRob Herring pp->bridge = bridge;
456444ddca5SRob Herring
4572f5ab5afSRob Herring /* Get the I/O range from DT */
4582f5ab5afSRob Herring win = resource_list_first_type(&bridge->windows, IORESOURCE_IO);
4592f5ab5afSRob Herring if (win) {
4600f71c60fSRob Herring pp->io_size = resource_size(win->res);
4610f71c60fSRob Herring pp->io_bus_addr = win->res->start - win->offset;
4620f71c60fSRob Herring pp->io_base = pci_pio_to_address(win->res->start);
4636e0832faSShawn Lin }
4646e0832faSShawn Lin
4657e919677SBjorn Andersson /* Set default bus ops */
4667e919677SBjorn Andersson bridge->ops = &dw_pcie_ops;
4677e919677SBjorn Andersson bridge->child_ops = &dw_child_pcie_ops;
4687e919677SBjorn Andersson
469aea370b2SYoshihiro Shimoda if (pp->ops->init) {
470aea370b2SYoshihiro Shimoda ret = pp->ops->init(pp);
4717e919677SBjorn Andersson if (ret)
4727e919677SBjorn Andersson return ret;
4737e919677SBjorn Andersson }
4747e919677SBjorn Andersson
4759e2b5de5SJisheng Zhang if (pci_msi_enabled()) {
476aea370b2SYoshihiro Shimoda pp->has_msi_ctrl = !(pp->ops->msi_init ||
477f78f0263SRob Herring of_property_read_bool(np, "msi-parent") ||
478f78f0263SRob Herring of_property_read_bool(np, "msi-map"));
479f78f0263SRob Herring
480cd761378SDmitry Baryshkov /*
481cd761378SDmitry Baryshkov * For the has_msi_ctrl case the default assignment is handled
482cd761378SDmitry Baryshkov * in the dw_pcie_msi_host_init().
483cd761378SDmitry Baryshkov */
484cd761378SDmitry Baryshkov if (!pp->has_msi_ctrl && !pp->num_vectors) {
4856e0832faSShawn Lin pp->num_vectors = MSI_DEF_NUM_VECTORS;
486331e9bceSRob Herring } else if (pp->num_vectors > MAX_MSI_IRQS) {
487331e9bceSRob Herring dev_err(dev, "Invalid number of vectors\n");
488c6481d51SSerge Semin ret = -EINVAL;
489c6481d51SSerge Semin goto err_deinit_host;
4906e0832faSShawn Lin }
4916e0832faSShawn Lin
492aea370b2SYoshihiro Shimoda if (pp->ops->msi_init) {
493aea370b2SYoshihiro Shimoda ret = pp->ops->msi_init(pp);
494f78f0263SRob Herring if (ret < 0)
495c6481d51SSerge Semin goto err_deinit_host;
496f78f0263SRob Herring } else if (pp->has_msi_ctrl) {
497226ec087SDmitry Baryshkov ret = dw_pcie_msi_host_init(pp);
498226ec087SDmitry Baryshkov if (ret < 0)
499c6481d51SSerge Semin goto err_deinit_host;
500c6481d51SSerge Semin }
5015bcb1757SRob Herring }
5026e0832faSShawn Lin
50313e9d390SSerge Semin dw_pcie_version_detect(pci);
50413e9d390SSerge Semin
5058bcca265SHou Zhiqiang dw_pcie_iatu_detect(pci);
5066e0832faSShawn Lin
507e1a4ec1aSFrank Li /*
508e1a4ec1aSFrank Li * Allocate the resource for MSG TLP before programming the iATU
509e1a4ec1aSFrank Li * outbound window in dw_pcie_setup_rc(). Since the allocation depends
510e1a4ec1aSFrank Li * on the value of 'region_align', this has to be done after
511e1a4ec1aSFrank Li * dw_pcie_iatu_detect().
512e1a4ec1aSFrank Li *
513e1a4ec1aSFrank Li * Glue drivers need to set 'use_atu_msg' before dw_pcie_host_init() to
514e1a4ec1aSFrank Li * make use of the generic MSG TLP implementation.
515e1a4ec1aSFrank Li */
516e1a4ec1aSFrank Li if (pp->use_atu_msg)
517e1a4ec1aSFrank Li dw_pcie_host_request_msg_tlp_res(pp);
518e1a4ec1aSFrank Li
519939fbcd5SSerge Semin ret = dw_pcie_edma_detect(pci);
520ce06bf57SSerge Semin if (ret)
521ce06bf57SSerge Semin goto err_free_msi;
52259fbab1aSRob Herring
523939fbcd5SSerge Semin ret = dw_pcie_setup_rc(pp);
524939fbcd5SSerge Semin if (ret)
525939fbcd5SSerge Semin goto err_remove_edma;
526939fbcd5SSerge Semin
527c5097b98SJohan Hovold if (!dw_pcie_link_up(pci)) {
528a37beefbSSerge Semin ret = dw_pcie_start_link(pci);
529886a9c13SRob Herring if (ret)
530939fbcd5SSerge Semin goto err_remove_edma;
531c5097b98SJohan Hovold }
532886a9c13SRob Herring
533c5097b98SJohan Hovold /* Ignore errors, the link may come up later */
534c5097b98SJohan Hovold dw_pcie_wait_for_link(pci);
535886a9c13SRob Herring
5366e0832faSShawn Lin bridge->sysdata = pp;
5376e0832faSShawn Lin
5381df79305SRob Herring ret = pci_host_probe(bridge);
539113fa857SSerge Semin if (ret)
540113fa857SSerge Semin goto err_stop_link;
541113fa857SSerge Semin
542aea370b2SYoshihiro Shimoda if (pp->ops->post_init)
543aea370b2SYoshihiro Shimoda pp->ops->post_init(pp);
544a7879456SManivannan Sadhasivam
5456e0832faSShawn Lin return 0;
5466e0832faSShawn Lin
547113fa857SSerge Semin err_stop_link:
548a37beefbSSerge Semin dw_pcie_stop_link(pci);
549113fa857SSerge Semin
550939fbcd5SSerge Semin err_remove_edma:
551939fbcd5SSerge Semin dw_pcie_edma_remove(pci);
552939fbcd5SSerge Semin
5539e2b5de5SJisheng Zhang err_free_msi:
554f78f0263SRob Herring if (pp->has_msi_ctrl)
5559e2b5de5SJisheng Zhang dw_pcie_free_msi(pp);
556c6481d51SSerge Semin
557c6481d51SSerge Semin err_deinit_host:
558aea370b2SYoshihiro Shimoda if (pp->ops->deinit)
559aea370b2SYoshihiro Shimoda pp->ops->deinit(pp);
560c6481d51SSerge Semin
5616e0832faSShawn Lin return ret;
5626e0832faSShawn Lin }
563ca98329dSVidya Sagar EXPORT_SYMBOL_GPL(dw_pcie_host_init);
5646e0832faSShawn Lin
dw_pcie_host_deinit(struct dw_pcie_rp * pp)56560b3c27fSSerge Semin void dw_pcie_host_deinit(struct dw_pcie_rp *pp)
5669d071cadSVidya Sagar {
567113fa857SSerge Semin struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
568113fa857SSerge Semin
5695808d43eSRob Herring pci_stop_root_bus(pp->bridge->bus);
5705808d43eSRob Herring pci_remove_root_bus(pp->bridge->bus);
571113fa857SSerge Semin
572a37beefbSSerge Semin dw_pcie_stop_link(pci);
573113fa857SSerge Semin
574939fbcd5SSerge Semin dw_pcie_edma_remove(pci);
575939fbcd5SSerge Semin
576f78f0263SRob Herring if (pp->has_msi_ctrl)
5779d071cadSVidya Sagar dw_pcie_free_msi(pp);
578c6481d51SSerge Semin
579aea370b2SYoshihiro Shimoda if (pp->ops->deinit)
580aea370b2SYoshihiro Shimoda pp->ops->deinit(pp);
5819d071cadSVidya Sagar }
582ca98329dSVidya Sagar EXPORT_SYMBOL_GPL(dw_pcie_host_deinit);
5839d071cadSVidya Sagar
dw_pcie_other_conf_map_bus(struct pci_bus * bus,unsigned int devfn,int where)584c2b0c098SRob Herring static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,
585c2b0c098SRob Herring unsigned int devfn, int where)
5866e0832faSShawn Lin {
58760b3c27fSSerge Semin struct dw_pcie_rp *pp = bus->sysdata;
5886e0832faSShawn Lin struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
589aa85ef61SYoshihiro Shimoda struct dw_pcie_ob_atu_cfg atu = { 0 };
590ce06bf57SSerge Semin int type, ret;
591ce06bf57SSerge Semin u32 busdev;
5926e0832faSShawn Lin
59315b23906SHou Zhiqiang /*
59415b23906SHou Zhiqiang * Checking whether the link is up here is a last line of defense
59515b23906SHou Zhiqiang * against platforms that forward errors on the system bus as
59615b23906SHou Zhiqiang * SError upon PCI configuration transactions issued when the link
59715b23906SHou Zhiqiang * is down. This check is racy by definition and does not stop
59815b23906SHou Zhiqiang * the system from triggering an SError if the link goes down
59915b23906SHou Zhiqiang * after this check is performed.
60015b23906SHou Zhiqiang */
60115b23906SHou Zhiqiang if (!dw_pcie_link_up(pci))
60215b23906SHou Zhiqiang return NULL;
60315b23906SHou Zhiqiang
6046e0832faSShawn Lin busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
6056e0832faSShawn Lin PCIE_ATU_FUNC(PCI_FUNC(devfn));
6066e0832faSShawn Lin
6072ef6b06aSRob Herring if (pci_is_root_bus(bus->parent))
6086e0832faSShawn Lin type = PCIE_ATU_TYPE_CFG0;
6092ef6b06aSRob Herring else
6106e0832faSShawn Lin type = PCIE_ATU_TYPE_CFG1;
6112ef6b06aSRob Herring
612aa85ef61SYoshihiro Shimoda atu.type = type;
613aa85ef61SYoshihiro Shimoda atu.cpu_addr = pp->cfg0_base;
614aa85ef61SYoshihiro Shimoda atu.pci_addr = busdev;
615aa85ef61SYoshihiro Shimoda atu.size = pp->cfg0_size;
616aa85ef61SYoshihiro Shimoda
617aa85ef61SYoshihiro Shimoda ret = dw_pcie_prog_outbound_atu(pci, &atu);
618ce06bf57SSerge Semin if (ret)
619ce06bf57SSerge Semin return NULL;
620689e349aSAndrey Smirnov
6212ef6b06aSRob Herring return pp->va_cfg0_base + where;
622c2b0c098SRob Herring }
623c2b0c098SRob Herring
dw_pcie_rd_other_conf(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 * val)624c2b0c098SRob Herring static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn,
625c2b0c098SRob Herring int where, int size, u32 *val)
626c2b0c098SRob Herring {
62760b3c27fSSerge Semin struct dw_pcie_rp *pp = bus->sysdata;
628c2b0c098SRob Herring struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
629aa85ef61SYoshihiro Shimoda struct dw_pcie_ob_atu_cfg atu = { 0 };
630ce06bf57SSerge Semin int ret;
631c2b0c098SRob Herring
632c2b0c098SRob Herring ret = pci_generic_config_read(bus, devfn, where, size, val);
633ce06bf57SSerge Semin if (ret != PCIBIOS_SUCCESSFUL)
6346e0832faSShawn Lin return ret;
635ce06bf57SSerge Semin
636ce06bf57SSerge Semin if (pp->cfg0_io_shared) {
637aa85ef61SYoshihiro Shimoda atu.type = PCIE_ATU_TYPE_IO;
638aa85ef61SYoshihiro Shimoda atu.cpu_addr = pp->io_base;
639aa85ef61SYoshihiro Shimoda atu.pci_addr = pp->io_bus_addr;
640aa85ef61SYoshihiro Shimoda atu.size = pp->io_size;
641aa85ef61SYoshihiro Shimoda
642aa85ef61SYoshihiro Shimoda ret = dw_pcie_prog_outbound_atu(pci, &atu);
643ce06bf57SSerge Semin if (ret)
644ce06bf57SSerge Semin return PCIBIOS_SET_FAILED;
645ce06bf57SSerge Semin }
646ce06bf57SSerge Semin
647ce06bf57SSerge Semin return PCIBIOS_SUCCESSFUL;
6486e0832faSShawn Lin }
6496e0832faSShawn Lin
dw_pcie_wr_other_conf(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 val)650c2b0c098SRob Herring static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn,
6516e0832faSShawn Lin int where, int size, u32 val)
6526e0832faSShawn Lin {
65360b3c27fSSerge Semin struct dw_pcie_rp *pp = bus->sysdata;
654c2b0c098SRob Herring struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
655aa85ef61SYoshihiro Shimoda struct dw_pcie_ob_atu_cfg atu = { 0 };
656ce06bf57SSerge Semin int ret;
6576e0832faSShawn Lin
658c2b0c098SRob Herring ret = pci_generic_config_write(bus, devfn, where, size, val);
659ce06bf57SSerge Semin if (ret != PCIBIOS_SUCCESSFUL)
660c2b0c098SRob Herring return ret;
661ce06bf57SSerge Semin
662ce06bf57SSerge Semin if (pp->cfg0_io_shared) {
663aa85ef61SYoshihiro Shimoda atu.type = PCIE_ATU_TYPE_IO;
664aa85ef61SYoshihiro Shimoda atu.cpu_addr = pp->io_base;
665aa85ef61SYoshihiro Shimoda atu.pci_addr = pp->io_bus_addr;
666aa85ef61SYoshihiro Shimoda atu.size = pp->io_size;
667aa85ef61SYoshihiro Shimoda
668aa85ef61SYoshihiro Shimoda ret = dw_pcie_prog_outbound_atu(pci, &atu);
669ce06bf57SSerge Semin if (ret)
670ce06bf57SSerge Semin return PCIBIOS_SET_FAILED;
671ce06bf57SSerge Semin }
672ce06bf57SSerge Semin
673ce06bf57SSerge Semin return PCIBIOS_SUCCESSFUL;
6746e0832faSShawn Lin }
6756e0832faSShawn Lin
676c2b0c098SRob Herring static struct pci_ops dw_child_pcie_ops = {
677c2b0c098SRob Herring .map_bus = dw_pcie_other_conf_map_bus,
678c2b0c098SRob Herring .read = dw_pcie_rd_other_conf,
679c2b0c098SRob Herring .write = dw_pcie_wr_other_conf,
680c2b0c098SRob Herring };
681c2b0c098SRob Herring
dw_pcie_own_conf_map_bus(struct pci_bus * bus,unsigned int devfn,int where)68227e7ed01SRob Herring void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn, int where)
68327e7ed01SRob Herring {
68460b3c27fSSerge Semin struct dw_pcie_rp *pp = bus->sysdata;
68527e7ed01SRob Herring struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
68627e7ed01SRob Herring
68727e7ed01SRob Herring if (PCI_SLOT(devfn) > 0)
68827e7ed01SRob Herring return NULL;
68927e7ed01SRob Herring
69027e7ed01SRob Herring return pci->dbi_base + where;
69127e7ed01SRob Herring }
69227e7ed01SRob Herring EXPORT_SYMBOL_GPL(dw_pcie_own_conf_map_bus);
69327e7ed01SRob Herring
6946e0832faSShawn Lin static struct pci_ops dw_pcie_ops = {
695c2b0c098SRob Herring .map_bus = dw_pcie_own_conf_map_bus,
696c2b0c098SRob Herring .read = pci_generic_config_read,
697c2b0c098SRob Herring .write = pci_generic_config_write,
6986e0832faSShawn Lin };
6996e0832faSShawn Lin
dw_pcie_iatu_setup(struct dw_pcie_rp * pp)700ce06bf57SSerge Semin static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
7016e0832faSShawn Lin {
7026e0832faSShawn Lin struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
703aa85ef61SYoshihiro Shimoda struct dw_pcie_ob_atu_cfg atu = { 0 };
704ce06bf57SSerge Semin struct resource_entry *entry;
705ce06bf57SSerge Semin int i, ret;
706ce06bf57SSerge Semin
707ce06bf57SSerge Semin /* Note the very first outbound ATU is used for CFG IOs */
708ce06bf57SSerge Semin if (!pci->num_ob_windows) {
709ce06bf57SSerge Semin dev_err(pci->dev, "No outbound iATU found\n");
710ce06bf57SSerge Semin return -EINVAL;
711ce06bf57SSerge Semin }
712ce06bf57SSerge Semin
713ce06bf57SSerge Semin /*
7148522e17dSSerge Semin * Ensure all out/inbound windows are disabled before proceeding with
7158522e17dSSerge Semin * the MEM/IO (dma-)ranges setups.
716ce06bf57SSerge Semin */
717ce06bf57SSerge Semin for (i = 0; i < pci->num_ob_windows; i++)
718ce06bf57SSerge Semin dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_OB, i);
719ce06bf57SSerge Semin
7208522e17dSSerge Semin for (i = 0; i < pci->num_ib_windows; i++)
7218522e17dSSerge Semin dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_IB, i);
7228522e17dSSerge Semin
723ce06bf57SSerge Semin i = 0;
724ce06bf57SSerge Semin resource_list_for_each_entry(entry, &pp->bridge->windows) {
725ce06bf57SSerge Semin if (resource_type(entry->res) != IORESOURCE_MEM)
726ce06bf57SSerge Semin continue;
727ce06bf57SSerge Semin
728ce06bf57SSerge Semin if (pci->num_ob_windows <= ++i)
729ce06bf57SSerge Semin break;
730ce06bf57SSerge Semin
731aa85ef61SYoshihiro Shimoda atu.index = i;
732aa85ef61SYoshihiro Shimoda atu.type = PCIE_ATU_TYPE_MEM;
733aa85ef61SYoshihiro Shimoda atu.cpu_addr = entry->res->start;
734aa85ef61SYoshihiro Shimoda atu.pci_addr = entry->res->start - entry->offset;
735e1a4ec1aSFrank Li
736e1a4ec1aSFrank Li /* Adjust iATU size if MSG TLP region was allocated before */
737e1a4ec1aSFrank Li if (pp->msg_res && pp->msg_res->parent == entry->res)
738e1a4ec1aSFrank Li atu.size = resource_size(entry->res) -
739e1a4ec1aSFrank Li resource_size(pp->msg_res);
740e1a4ec1aSFrank Li else
741aa85ef61SYoshihiro Shimoda atu.size = resource_size(entry->res);
742aa85ef61SYoshihiro Shimoda
743aa85ef61SYoshihiro Shimoda ret = dw_pcie_prog_outbound_atu(pci, &atu);
744ce06bf57SSerge Semin if (ret) {
745ce06bf57SSerge Semin dev_err(pci->dev, "Failed to set MEM range %pr\n",
746ce06bf57SSerge Semin entry->res);
747ce06bf57SSerge Semin return ret;
748ce06bf57SSerge Semin }
749ce06bf57SSerge Semin }
750ce06bf57SSerge Semin
751ce06bf57SSerge Semin if (pp->io_size) {
752ce06bf57SSerge Semin if (pci->num_ob_windows > ++i) {
753aa85ef61SYoshihiro Shimoda atu.index = i;
754aa85ef61SYoshihiro Shimoda atu.type = PCIE_ATU_TYPE_IO;
755aa85ef61SYoshihiro Shimoda atu.cpu_addr = pp->io_base;
756aa85ef61SYoshihiro Shimoda atu.pci_addr = pp->io_bus_addr;
757aa85ef61SYoshihiro Shimoda atu.size = pp->io_size;
758aa85ef61SYoshihiro Shimoda
759aa85ef61SYoshihiro Shimoda ret = dw_pcie_prog_outbound_atu(pci, &atu);
760ce06bf57SSerge Semin if (ret) {
761ce06bf57SSerge Semin dev_err(pci->dev, "Failed to set IO range %pr\n",
762ce06bf57SSerge Semin entry->res);
763ce06bf57SSerge Semin return ret;
764ce06bf57SSerge Semin }
765ce06bf57SSerge Semin } else {
766ce06bf57SSerge Semin pp->cfg0_io_shared = true;
767ce06bf57SSerge Semin }
768ce06bf57SSerge Semin }
769ce06bf57SSerge Semin
770ce06bf57SSerge Semin if (pci->num_ob_windows <= i)
7718522e17dSSerge Semin dev_warn(pci->dev, "Ranges exceed outbound iATU size (%d)\n",
772ce06bf57SSerge Semin pci->num_ob_windows);
773ce06bf57SSerge Semin
774e1a4ec1aSFrank Li pp->msg_atu_index = i;
775e1a4ec1aSFrank Li
7768522e17dSSerge Semin i = 0;
7778522e17dSSerge Semin resource_list_for_each_entry(entry, &pp->bridge->dma_ranges) {
7788522e17dSSerge Semin if (resource_type(entry->res) != IORESOURCE_MEM)
7798522e17dSSerge Semin continue;
7808522e17dSSerge Semin
7818522e17dSSerge Semin if (pci->num_ib_windows <= i)
7828522e17dSSerge Semin break;
7838522e17dSSerge Semin
7848522e17dSSerge Semin ret = dw_pcie_prog_inbound_atu(pci, i++, PCIE_ATU_TYPE_MEM,
7858522e17dSSerge Semin entry->res->start,
7868522e17dSSerge Semin entry->res->start - entry->offset,
7878522e17dSSerge Semin resource_size(entry->res));
7888522e17dSSerge Semin if (ret) {
7898522e17dSSerge Semin dev_err(pci->dev, "Failed to set DMA range %pr\n",
7908522e17dSSerge Semin entry->res);
7918522e17dSSerge Semin return ret;
7928522e17dSSerge Semin }
7938522e17dSSerge Semin }
7948522e17dSSerge Semin
7958522e17dSSerge Semin if (pci->num_ib_windows <= i)
7968522e17dSSerge Semin dev_warn(pci->dev, "Dma-ranges exceed inbound iATU size (%u)\n",
7978522e17dSSerge Semin pci->num_ib_windows);
7988522e17dSSerge Semin
799ce06bf57SSerge Semin return 0;
800ce06bf57SSerge Semin }
801ce06bf57SSerge Semin
dw_pcie_setup_rc(struct dw_pcie_rp * pp)802ce06bf57SSerge Semin int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
803ce06bf57SSerge Semin {
804ce06bf57SSerge Semin struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
805ce06bf57SSerge Semin u32 val, ctrl, num_ctrls;
806ce06bf57SSerge Semin int ret;
8076e0832faSShawn Lin
8083924bc2fSVidya Sagar /*
8093924bc2fSVidya Sagar * Enable DBI read-only registers for writing/updating configuration.
8103924bc2fSVidya Sagar * Write permission gets disabled towards the end of this function.
8113924bc2fSVidya Sagar */
8123924bc2fSVidya Sagar dw_pcie_dbi_ro_wr_en(pci);
8133924bc2fSVidya Sagar
8146e0832faSShawn Lin dw_pcie_setup(pci);
8156e0832faSShawn Lin
816f78f0263SRob Herring if (pp->has_msi_ctrl) {
8176e0832faSShawn Lin num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
8186e0832faSShawn Lin
8196e0832faSShawn Lin /* Initialize IRQ Status array */
820830920e0SMarc Zyngier for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
821f81c770dSRob Herring dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK +
8226e0832faSShawn Lin (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
823f81c770dSRob Herring pp->irq_mask[ctrl]);
824f81c770dSRob Herring dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE +
825830920e0SMarc Zyngier (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
826f81c770dSRob Herring ~0);
827830920e0SMarc Zyngier }
828fd8a44bdSKishon Vijay Abraham I }
8296e0832faSShawn Lin
830294353d9SJisheng Zhang dw_pcie_msi_init(pp);
831294353d9SJisheng Zhang
8326e0832faSShawn Lin /* Setup RC BARs */
8336e0832faSShawn Lin dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004);
8346e0832faSShawn Lin dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000);
8356e0832faSShawn Lin
8366e0832faSShawn Lin /* Setup interrupt pins */
8376e0832faSShawn Lin val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE);
8386e0832faSShawn Lin val &= 0xffff00ff;
8396e0832faSShawn Lin val |= 0x00000100;
8406e0832faSShawn Lin dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val);
8416e0832faSShawn Lin
8426e0832faSShawn Lin /* Setup bus numbers */
8436e0832faSShawn Lin val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS);
8446e0832faSShawn Lin val &= 0xff000000;
8456e0832faSShawn Lin val |= 0x00ff0100;
8466e0832faSShawn Lin dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val);
8476e0832faSShawn Lin
8486e0832faSShawn Lin /* Setup command register */
8496e0832faSShawn Lin val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
8506e0832faSShawn Lin val &= 0xffff0000;
8516e0832faSShawn Lin val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
8526e0832faSShawn Lin PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
8536e0832faSShawn Lin dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
8546e0832faSShawn Lin
8556e0832faSShawn Lin /*
856444ddca5SRob Herring * If the platform provides its own child bus config accesses, it means
857444ddca5SRob Herring * the platform uses its own address translation component rather than
858444ddca5SRob Herring * ATU, so we should not program the ATU here.
8596e0832faSShawn Lin */
860c2b0c098SRob Herring if (pp->bridge->child_ops == &dw_child_pcie_ops) {
861ce06bf57SSerge Semin ret = dw_pcie_iatu_setup(pp);
862ce06bf57SSerge Semin if (ret)
863ce06bf57SSerge Semin return ret;
8646e0832faSShawn Lin }
8656e0832faSShawn Lin
866f81c770dSRob Herring dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
8676e0832faSShawn Lin
8686e0832faSShawn Lin /* Program correct class for RC */
869f81c770dSRob Herring dw_pcie_writew_dbi(pci, PCI_CLASS_DEVICE, PCI_CLASS_BRIDGE_PCI);
8706e0832faSShawn Lin
871f81c770dSRob Herring val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
8726e0832faSShawn Lin val |= PORT_LOGIC_SPEED_CHANGE;
873f81c770dSRob Herring dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
8743924bc2fSVidya Sagar
8753924bc2fSVidya Sagar dw_pcie_dbi_ro_wr_dis(pci);
876ce06bf57SSerge Semin
877ce06bf57SSerge Semin return 0;
8786e0832faSShawn Lin }
879ca98329dSVidya Sagar EXPORT_SYMBOL_GPL(dw_pcie_setup_rc);
8804774faf8SFrank Li
dw_pcie_pme_turn_off(struct dw_pcie * pci)881e1a4ec1aSFrank Li static int dw_pcie_pme_turn_off(struct dw_pcie *pci)
882e1a4ec1aSFrank Li {
883e1a4ec1aSFrank Li struct dw_pcie_ob_atu_cfg atu = { 0 };
884e1a4ec1aSFrank Li void __iomem *mem;
885e1a4ec1aSFrank Li int ret;
886e1a4ec1aSFrank Li
887e1a4ec1aSFrank Li if (pci->num_ob_windows <= pci->pp.msg_atu_index)
888e1a4ec1aSFrank Li return -ENOSPC;
889e1a4ec1aSFrank Li
890e1a4ec1aSFrank Li if (!pci->pp.msg_res)
891e1a4ec1aSFrank Li return -ENOSPC;
892e1a4ec1aSFrank Li
893e1a4ec1aSFrank Li atu.code = PCIE_MSG_CODE_PME_TURN_OFF;
894e1a4ec1aSFrank Li atu.routing = PCIE_MSG_TYPE_R_BC;
895e1a4ec1aSFrank Li atu.type = PCIE_ATU_TYPE_MSG;
896e1a4ec1aSFrank Li atu.size = resource_size(pci->pp.msg_res);
897e1a4ec1aSFrank Li atu.index = pci->pp.msg_atu_index;
898e1a4ec1aSFrank Li
899e1a4ec1aSFrank Li atu.cpu_addr = pci->pp.msg_res->start;
900e1a4ec1aSFrank Li
901e1a4ec1aSFrank Li ret = dw_pcie_prog_outbound_atu(pci, &atu);
902e1a4ec1aSFrank Li if (ret)
903e1a4ec1aSFrank Li return ret;
904e1a4ec1aSFrank Li
905e1a4ec1aSFrank Li mem = ioremap(atu.cpu_addr, pci->region_align);
906e1a4ec1aSFrank Li if (!mem)
907e1a4ec1aSFrank Li return -ENOMEM;
908e1a4ec1aSFrank Li
909e1a4ec1aSFrank Li /* A dummy write is converted to a Msg TLP */
910e1a4ec1aSFrank Li writel(0, mem);
911e1a4ec1aSFrank Li
912e1a4ec1aSFrank Li iounmap(mem);
913e1a4ec1aSFrank Li
914e1a4ec1aSFrank Li return 0;
915e1a4ec1aSFrank Li }
916e1a4ec1aSFrank Li
dw_pcie_suspend_noirq(struct dw_pcie * pci)9174774faf8SFrank Li int dw_pcie_suspend_noirq(struct dw_pcie *pci)
9184774faf8SFrank Li {
9194774faf8SFrank Li u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
9204774faf8SFrank Li u32 val;
921e1a4ec1aSFrank Li int ret = 0;
9224774faf8SFrank Li
9234774faf8SFrank Li /*
9244774faf8SFrank Li * If L1SS is supported, then do not put the link into L2 as some
9254774faf8SFrank Li * devices such as NVMe expect low resume latency.
9264774faf8SFrank Li */
9274774faf8SFrank Li if (dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKCTL) & PCI_EXP_LNKCTL_ASPM_L1)
9284774faf8SFrank Li return 0;
9294774faf8SFrank Li
9304774faf8SFrank Li if (dw_pcie_get_ltssm(pci) <= DW_PCIE_LTSSM_DETECT_ACT)
9314774faf8SFrank Li return 0;
9324774faf8SFrank Li
933e1a4ec1aSFrank Li if (pci->pp.ops->pme_turn_off)
9344774faf8SFrank Li pci->pp.ops->pme_turn_off(&pci->pp);
935e1a4ec1aSFrank Li else
936e1a4ec1aSFrank Li ret = dw_pcie_pme_turn_off(pci);
937e1a4ec1aSFrank Li
938e1a4ec1aSFrank Li if (ret)
939e1a4ec1aSFrank Li return ret;
9404774faf8SFrank Li
9414774faf8SFrank Li ret = read_poll_timeout(dw_pcie_get_ltssm, val, val == DW_PCIE_LTSSM_L2_IDLE,
9424774faf8SFrank Li PCIE_PME_TO_L2_TIMEOUT_US/10,
9434774faf8SFrank Li PCIE_PME_TO_L2_TIMEOUT_US, false, pci);
9444774faf8SFrank Li if (ret) {
9454774faf8SFrank Li dev_err(pci->dev, "Timeout waiting for L2 entry! LTSSM: 0x%x\n", val);
9464774faf8SFrank Li return ret;
9474774faf8SFrank Li }
9484774faf8SFrank Li
949aea370b2SYoshihiro Shimoda if (pci->pp.ops->deinit)
950aea370b2SYoshihiro Shimoda pci->pp.ops->deinit(&pci->pp);
9514774faf8SFrank Li
9524774faf8SFrank Li pci->suspended = true;
9534774faf8SFrank Li
9544774faf8SFrank Li return ret;
9554774faf8SFrank Li }
9564774faf8SFrank Li EXPORT_SYMBOL_GPL(dw_pcie_suspend_noirq);
9574774faf8SFrank Li
dw_pcie_resume_noirq(struct dw_pcie * pci)9584774faf8SFrank Li int dw_pcie_resume_noirq(struct dw_pcie *pci)
9594774faf8SFrank Li {
9604774faf8SFrank Li int ret;
9614774faf8SFrank Li
9624774faf8SFrank Li if (!pci->suspended)
9634774faf8SFrank Li return 0;
9644774faf8SFrank Li
9654774faf8SFrank Li pci->suspended = false;
9664774faf8SFrank Li
967aea370b2SYoshihiro Shimoda if (pci->pp.ops->init) {
968aea370b2SYoshihiro Shimoda ret = pci->pp.ops->init(&pci->pp);
9694774faf8SFrank Li if (ret) {
9704774faf8SFrank Li dev_err(pci->dev, "Host init failed: %d\n", ret);
9714774faf8SFrank Li return ret;
9724774faf8SFrank Li }
9734774faf8SFrank Li }
9744774faf8SFrank Li
9754774faf8SFrank Li dw_pcie_setup_rc(&pci->pp);
9764774faf8SFrank Li
9774774faf8SFrank Li ret = dw_pcie_start_link(pci);
9784774faf8SFrank Li if (ret)
9794774faf8SFrank Li return ret;
9804774faf8SFrank Li
9814774faf8SFrank Li ret = dw_pcie_wait_for_link(pci);
9824774faf8SFrank Li if (ret)
9834774faf8SFrank Li return ret;
9844774faf8SFrank Li
9854774faf8SFrank Li return ret;
9864774faf8SFrank Li }
9874774faf8SFrank Li EXPORT_SYMBOL_GPL(dw_pcie_resume_noirq);
988