xref: /linux/drivers/pci/controller/dwc/pcie-designware-host.c (revision 4774faf854f53461fd22daa73f3881fe11b6b755)
16e0832faSShawn Lin // SPDX-License-Identifier: GPL-2.0
26e0832faSShawn Lin /*
36e0832faSShawn Lin  * Synopsys DesignWare PCIe host controller driver
46e0832faSShawn Lin  *
56e0832faSShawn Lin  * Copyright (C) 2013 Samsung Electronics Co., Ltd.
67ecd4a81SAlexander A. Klimov  *		https://www.samsung.com
76e0832faSShawn Lin  *
86e0832faSShawn Lin  * Author: Jingoo Han <jg1.han@samsung.com>
96e0832faSShawn Lin  */
106e0832faSShawn Lin 
11*4774faf8SFrank Li #include <linux/iopoll.h>
126e0832faSShawn Lin #include <linux/irqchip/chained_irq.h>
136e0832faSShawn Lin #include <linux/irqdomain.h>
14bbd8810dSKrzysztof Wilczynski #include <linux/msi.h>
156e0832faSShawn Lin #include <linux/of_address.h>
166e0832faSShawn Lin #include <linux/of_pci.h>
176e0832faSShawn Lin #include <linux/pci_regs.h>
186e0832faSShawn Lin #include <linux/platform_device.h>
196e0832faSShawn Lin 
20*4774faf8SFrank Li #include "../../pci.h"
216e0832faSShawn Lin #include "pcie-designware.h"
226e0832faSShawn Lin 
236e0832faSShawn Lin static struct pci_ops dw_pcie_ops;
24c2b0c098SRob Herring static struct pci_ops dw_child_pcie_ops;
256e0832faSShawn Lin 
266e0832faSShawn Lin static void dw_msi_ack_irq(struct irq_data *d)
276e0832faSShawn Lin {
286e0832faSShawn Lin 	irq_chip_ack_parent(d);
296e0832faSShawn Lin }
306e0832faSShawn Lin 
316e0832faSShawn Lin static void dw_msi_mask_irq(struct irq_data *d)
326e0832faSShawn Lin {
336e0832faSShawn Lin 	pci_msi_mask_irq(d);
346e0832faSShawn Lin 	irq_chip_mask_parent(d);
356e0832faSShawn Lin }
366e0832faSShawn Lin 
376e0832faSShawn Lin static void dw_msi_unmask_irq(struct irq_data *d)
386e0832faSShawn Lin {
396e0832faSShawn Lin 	pci_msi_unmask_irq(d);
406e0832faSShawn Lin 	irq_chip_unmask_parent(d);
416e0832faSShawn Lin }
426e0832faSShawn Lin 
436e0832faSShawn Lin static struct irq_chip dw_pcie_msi_irq_chip = {
446e0832faSShawn Lin 	.name = "PCI-MSI",
456e0832faSShawn Lin 	.irq_ack = dw_msi_ack_irq,
466e0832faSShawn Lin 	.irq_mask = dw_msi_mask_irq,
476e0832faSShawn Lin 	.irq_unmask = dw_msi_unmask_irq,
486e0832faSShawn Lin };
496e0832faSShawn Lin 
506e0832faSShawn Lin static struct msi_domain_info dw_pcie_msi_domain_info = {
516e0832faSShawn Lin 	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
526e0832faSShawn Lin 		   MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
536e0832faSShawn Lin 	.chip	= &dw_pcie_msi_irq_chip,
546e0832faSShawn Lin };
556e0832faSShawn Lin 
566e0832faSShawn Lin /* MSI int handler */
5760b3c27fSSerge Semin irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp)
586e0832faSShawn Lin {
59d21faba1SMarc Zyngier 	int i, pos;
601137e61dSNiklas Cassel 	unsigned long val;
611137e61dSNiklas Cassel 	u32 status, num_ctrls;
626e0832faSShawn Lin 	irqreturn_t ret = IRQ_NONE;
63f81c770dSRob Herring 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
646e0832faSShawn Lin 
656e0832faSShawn Lin 	num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
666e0832faSShawn Lin 
676e0832faSShawn Lin 	for (i = 0; i < num_ctrls; i++) {
68f81c770dSRob Herring 		status = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS +
69f81c770dSRob Herring 					   (i * MSI_REG_CTRL_BLOCK_SIZE));
701137e61dSNiklas Cassel 		if (!status)
716e0832faSShawn Lin 			continue;
726e0832faSShawn Lin 
736e0832faSShawn Lin 		ret = IRQ_HANDLED;
741137e61dSNiklas Cassel 		val = status;
756e0832faSShawn Lin 		pos = 0;
761137e61dSNiklas Cassel 		while ((pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL,
776e0832faSShawn Lin 					    pos)) != MAX_MSI_IRQS_PER_CTRL) {
78d21faba1SMarc Zyngier 			generic_handle_domain_irq(pp->irq_domain,
796e0832faSShawn Lin 						  (i * MAX_MSI_IRQS_PER_CTRL) +
806e0832faSShawn Lin 						  pos);
816e0832faSShawn Lin 			pos++;
826e0832faSShawn Lin 		}
836e0832faSShawn Lin 	}
846e0832faSShawn Lin 
856e0832faSShawn Lin 	return ret;
866e0832faSShawn Lin }
876e0832faSShawn Lin 
886e0832faSShawn Lin /* Chained MSI interrupt service routine */
896e0832faSShawn Lin static void dw_chained_msi_isr(struct irq_desc *desc)
906e0832faSShawn Lin {
916e0832faSShawn Lin 	struct irq_chip *chip = irq_desc_get_chip(desc);
9260b3c27fSSerge Semin 	struct dw_pcie_rp *pp;
936e0832faSShawn Lin 
946e0832faSShawn Lin 	chained_irq_enter(chip, desc);
956e0832faSShawn Lin 
966e0832faSShawn Lin 	pp = irq_desc_get_handler_data(desc);
976e0832faSShawn Lin 	dw_handle_msi_irq(pp);
986e0832faSShawn Lin 
996e0832faSShawn Lin 	chained_irq_exit(chip, desc);
1006e0832faSShawn Lin }
1016e0832faSShawn Lin 
10259ea68b3SGustavo Pimentel static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg)
1036e0832faSShawn Lin {
10460b3c27fSSerge Semin 	struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
1056e0832faSShawn Lin 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
1066e0832faSShawn Lin 	u64 msi_target;
1076e0832faSShawn Lin 
1086e0832faSShawn Lin 	msi_target = (u64)pp->msi_data;
1096e0832faSShawn Lin 
1106e0832faSShawn Lin 	msg->address_lo = lower_32_bits(msi_target);
1116e0832faSShawn Lin 	msg->address_hi = upper_32_bits(msi_target);
1126e0832faSShawn Lin 
11359ea68b3SGustavo Pimentel 	msg->data = d->hwirq;
1146e0832faSShawn Lin 
1156e0832faSShawn Lin 	dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
11659ea68b3SGustavo Pimentel 		(int)d->hwirq, msg->address_hi, msg->address_lo);
1176e0832faSShawn Lin }
1186e0832faSShawn Lin 
119fd5288a3SGustavo Pimentel static int dw_pci_msi_set_affinity(struct irq_data *d,
1206e0832faSShawn Lin 				   const struct cpumask *mask, bool force)
1216e0832faSShawn Lin {
1226e0832faSShawn Lin 	return -EINVAL;
1236e0832faSShawn Lin }
1246e0832faSShawn Lin 
12540e9892eSGustavo Pimentel static void dw_pci_bottom_mask(struct irq_data *d)
1266e0832faSShawn Lin {
12760b3c27fSSerge Semin 	struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
128f81c770dSRob Herring 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
1296e0832faSShawn Lin 	unsigned int res, bit, ctrl;
1306e0832faSShawn Lin 	unsigned long flags;
1316e0832faSShawn Lin 
1326e0832faSShawn Lin 	raw_spin_lock_irqsave(&pp->lock, flags);
1336e0832faSShawn Lin 
13440e9892eSGustavo Pimentel 	ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
1356e0832faSShawn Lin 	res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
13640e9892eSGustavo Pimentel 	bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
1376e0832faSShawn Lin 
13865772257SGustavo Pimentel 	pp->irq_mask[ctrl] |= BIT(bit);
139f81c770dSRob Herring 	dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]);
1406e0832faSShawn Lin 
1416e0832faSShawn Lin 	raw_spin_unlock_irqrestore(&pp->lock, flags);
1426e0832faSShawn Lin }
1436e0832faSShawn Lin 
14440e9892eSGustavo Pimentel static void dw_pci_bottom_unmask(struct irq_data *d)
1456e0832faSShawn Lin {
14660b3c27fSSerge Semin 	struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
147f81c770dSRob Herring 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
1486e0832faSShawn Lin 	unsigned int res, bit, ctrl;
1496e0832faSShawn Lin 	unsigned long flags;
1506e0832faSShawn Lin 
1516e0832faSShawn Lin 	raw_spin_lock_irqsave(&pp->lock, flags);
1526e0832faSShawn Lin 
15340e9892eSGustavo Pimentel 	ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
1546e0832faSShawn Lin 	res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
15540e9892eSGustavo Pimentel 	bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
1566e0832faSShawn Lin 
15765772257SGustavo Pimentel 	pp->irq_mask[ctrl] &= ~BIT(bit);
158f81c770dSRob Herring 	dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]);
1596e0832faSShawn Lin 
1606e0832faSShawn Lin 	raw_spin_unlock_irqrestore(&pp->lock, flags);
1616e0832faSShawn Lin }
1626e0832faSShawn Lin 
1636e0832faSShawn Lin static void dw_pci_bottom_ack(struct irq_data *d)
1646e0832faSShawn Lin {
16560b3c27fSSerge Semin 	struct dw_pcie_rp *pp  = irq_data_get_irq_chip_data(d);
166f81c770dSRob Herring 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
1673f7bb2ecSMarc Zyngier 	unsigned int res, bit, ctrl;
1686e0832faSShawn Lin 
1693f7bb2ecSMarc Zyngier 	ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
1703f7bb2ecSMarc Zyngier 	res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
1713f7bb2ecSMarc Zyngier 	bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
1726e0832faSShawn Lin 
173f81c770dSRob Herring 	dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_STATUS + res, BIT(bit));
1746e0832faSShawn Lin }
1756e0832faSShawn Lin 
1766e0832faSShawn Lin static struct irq_chip dw_pci_msi_bottom_irq_chip = {
1776e0832faSShawn Lin 	.name = "DWPCI-MSI",
1786e0832faSShawn Lin 	.irq_ack = dw_pci_bottom_ack,
1796e0832faSShawn Lin 	.irq_compose_msi_msg = dw_pci_setup_msi_msg,
1806e0832faSShawn Lin 	.irq_set_affinity = dw_pci_msi_set_affinity,
1816e0832faSShawn Lin 	.irq_mask = dw_pci_bottom_mask,
1826e0832faSShawn Lin 	.irq_unmask = dw_pci_bottom_unmask,
1836e0832faSShawn Lin };
1846e0832faSShawn Lin 
1856e0832faSShawn Lin static int dw_pcie_irq_domain_alloc(struct irq_domain *domain,
1866e0832faSShawn Lin 				    unsigned int virq, unsigned int nr_irqs,
1876e0832faSShawn Lin 				    void *args)
1886e0832faSShawn Lin {
18960b3c27fSSerge Semin 	struct dw_pcie_rp *pp = domain->host_data;
1906e0832faSShawn Lin 	unsigned long flags;
1916e0832faSShawn Lin 	u32 i;
1926e0832faSShawn Lin 	int bit;
1936e0832faSShawn Lin 
1946e0832faSShawn Lin 	raw_spin_lock_irqsave(&pp->lock, flags);
1956e0832faSShawn Lin 
1966e0832faSShawn Lin 	bit = bitmap_find_free_region(pp->msi_irq_in_use, pp->num_vectors,
1976e0832faSShawn Lin 				      order_base_2(nr_irqs));
1986e0832faSShawn Lin 
1996e0832faSShawn Lin 	raw_spin_unlock_irqrestore(&pp->lock, flags);
2006e0832faSShawn Lin 
2016e0832faSShawn Lin 	if (bit < 0)
2026e0832faSShawn Lin 		return -ENOSPC;
2036e0832faSShawn Lin 
2046e0832faSShawn Lin 	for (i = 0; i < nr_irqs; i++)
2056e0832faSShawn Lin 		irq_domain_set_info(domain, virq + i, bit + i,
2069f67437bSKishon Vijay Abraham I 				    pp->msi_irq_chip,
2076e0832faSShawn Lin 				    pp, handle_edge_irq,
2086e0832faSShawn Lin 				    NULL, NULL);
2096e0832faSShawn Lin 
2106e0832faSShawn Lin 	return 0;
2116e0832faSShawn Lin }
2126e0832faSShawn Lin 
2136e0832faSShawn Lin static void dw_pcie_irq_domain_free(struct irq_domain *domain,
2146e0832faSShawn Lin 				    unsigned int virq, unsigned int nr_irqs)
2156e0832faSShawn Lin {
2164cfae0f1SGustavo Pimentel 	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
21760b3c27fSSerge Semin 	struct dw_pcie_rp *pp = domain->host_data;
2186e0832faSShawn Lin 	unsigned long flags;
2196e0832faSShawn Lin 
2206e0832faSShawn Lin 	raw_spin_lock_irqsave(&pp->lock, flags);
2216e0832faSShawn Lin 
2224cfae0f1SGustavo Pimentel 	bitmap_release_region(pp->msi_irq_in_use, d->hwirq,
2236e0832faSShawn Lin 			      order_base_2(nr_irqs));
2246e0832faSShawn Lin 
2256e0832faSShawn Lin 	raw_spin_unlock_irqrestore(&pp->lock, flags);
2266e0832faSShawn Lin }
2276e0832faSShawn Lin 
2286e0832faSShawn Lin static const struct irq_domain_ops dw_pcie_msi_domain_ops = {
2296e0832faSShawn Lin 	.alloc	= dw_pcie_irq_domain_alloc,
2306e0832faSShawn Lin 	.free	= dw_pcie_irq_domain_free,
2316e0832faSShawn Lin };
2326e0832faSShawn Lin 
23360b3c27fSSerge Semin int dw_pcie_allocate_domains(struct dw_pcie_rp *pp)
2346e0832faSShawn Lin {
2356e0832faSShawn Lin 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
2366e0832faSShawn Lin 	struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node);
2376e0832faSShawn Lin 
2386e0832faSShawn Lin 	pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors,
2396e0832faSShawn Lin 					       &dw_pcie_msi_domain_ops, pp);
2406e0832faSShawn Lin 	if (!pp->irq_domain) {
2416e0832faSShawn Lin 		dev_err(pci->dev, "Failed to create IRQ domain\n");
2426e0832faSShawn Lin 		return -ENOMEM;
2436e0832faSShawn Lin 	}
2446e0832faSShawn Lin 
2450414b93eSMarc Zyngier 	irq_domain_update_bus_token(pp->irq_domain, DOMAIN_BUS_NEXUS);
2460414b93eSMarc Zyngier 
2476e0832faSShawn Lin 	pp->msi_domain = pci_msi_create_irq_domain(fwnode,
2486e0832faSShawn Lin 						   &dw_pcie_msi_domain_info,
2496e0832faSShawn Lin 						   pp->irq_domain);
2506e0832faSShawn Lin 	if (!pp->msi_domain) {
2516e0832faSShawn Lin 		dev_err(pci->dev, "Failed to create MSI domain\n");
2526e0832faSShawn Lin 		irq_domain_remove(pp->irq_domain);
2536e0832faSShawn Lin 		return -ENOMEM;
2546e0832faSShawn Lin 	}
2556e0832faSShawn Lin 
2566e0832faSShawn Lin 	return 0;
2576e0832faSShawn Lin }
2586e0832faSShawn Lin 
25960b3c27fSSerge Semin static void dw_pcie_free_msi(struct dw_pcie_rp *pp)
2606e0832faSShawn Lin {
261db388348SDmitry Baryshkov 	u32 ctrl;
262db388348SDmitry Baryshkov 
263db388348SDmitry Baryshkov 	for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) {
264db388348SDmitry Baryshkov 		if (pp->msi_irq[ctrl] > 0)
265db388348SDmitry Baryshkov 			irq_set_chained_handler_and_data(pp->msi_irq[ctrl],
266db388348SDmitry Baryshkov 							 NULL, NULL);
267db388348SDmitry Baryshkov 	}
2686e0832faSShawn Lin 
2696e0832faSShawn Lin 	irq_domain_remove(pp->msi_domain);
2706e0832faSShawn Lin 	irq_domain_remove(pp->irq_domain);
2716e0832faSShawn Lin }
2726e0832faSShawn Lin 
27360b3c27fSSerge Semin static void dw_pcie_msi_init(struct dw_pcie_rp *pp)
2746e0832faSShawn Lin {
2756e0832faSShawn Lin 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
27607940c36SJisheng Zhang 	u64 msi_target = (u64)pp->msi_data;
2776e0832faSShawn Lin 
27859fbab1aSRob Herring 	if (!pci_msi_enabled() || !pp->has_msi_ctrl)
279cf627713SRob Herring 		return;
280cf627713SRob Herring 
2816e0832faSShawn Lin 	/* Program the msi_data */
282f81c770dSRob Herring 	dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_LO, lower_32_bits(msi_target));
283f81c770dSRob Herring 	dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_HI, upper_32_bits(msi_target));
2846e0832faSShawn Lin }
2856e0832faSShawn Lin 
286cd761378SDmitry Baryshkov static int dw_pcie_parse_split_msi_irq(struct dw_pcie_rp *pp)
287cd761378SDmitry Baryshkov {
288cd761378SDmitry Baryshkov 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
289cd761378SDmitry Baryshkov 	struct device *dev = pci->dev;
290cd761378SDmitry Baryshkov 	struct platform_device *pdev = to_platform_device(dev);
291cd761378SDmitry Baryshkov 	u32 ctrl, max_vectors;
292cd761378SDmitry Baryshkov 	int irq;
293cd761378SDmitry Baryshkov 
294cd761378SDmitry Baryshkov 	/* Parse any "msiX" IRQs described in the devicetree */
295cd761378SDmitry Baryshkov 	for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) {
296cd761378SDmitry Baryshkov 		char msi_name[] = "msiX";
297cd761378SDmitry Baryshkov 
298cd761378SDmitry Baryshkov 		msi_name[3] = '0' + ctrl;
299cd761378SDmitry Baryshkov 		irq = platform_get_irq_byname_optional(pdev, msi_name);
300cd761378SDmitry Baryshkov 		if (irq == -ENXIO)
301cd761378SDmitry Baryshkov 			break;
302cd761378SDmitry Baryshkov 		if (irq < 0)
303cd761378SDmitry Baryshkov 			return dev_err_probe(dev, irq,
304cd761378SDmitry Baryshkov 					     "Failed to parse MSI IRQ '%s'\n",
305cd761378SDmitry Baryshkov 					     msi_name);
306cd761378SDmitry Baryshkov 
307cd761378SDmitry Baryshkov 		pp->msi_irq[ctrl] = irq;
308cd761378SDmitry Baryshkov 	}
309cd761378SDmitry Baryshkov 
310cd761378SDmitry Baryshkov 	/* If no "msiX" IRQs, caller should fallback to "msi" IRQ */
311cd761378SDmitry Baryshkov 	if (ctrl == 0)
312cd761378SDmitry Baryshkov 		return -ENXIO;
313cd761378SDmitry Baryshkov 
314cd761378SDmitry Baryshkov 	max_vectors = ctrl * MAX_MSI_IRQS_PER_CTRL;
315cd761378SDmitry Baryshkov 	if (pp->num_vectors > max_vectors) {
316cd761378SDmitry Baryshkov 		dev_warn(dev, "Exceeding number of MSI vectors, limiting to %u\n",
317cd761378SDmitry Baryshkov 			 max_vectors);
318cd761378SDmitry Baryshkov 		pp->num_vectors = max_vectors;
319cd761378SDmitry Baryshkov 	}
320cd761378SDmitry Baryshkov 	if (!pp->num_vectors)
321cd761378SDmitry Baryshkov 		pp->num_vectors = max_vectors;
322cd761378SDmitry Baryshkov 
323cd761378SDmitry Baryshkov 	return 0;
324cd761378SDmitry Baryshkov }
325cd761378SDmitry Baryshkov 
326226ec087SDmitry Baryshkov static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
327226ec087SDmitry Baryshkov {
328226ec087SDmitry Baryshkov 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
329226ec087SDmitry Baryshkov 	struct device *dev = pci->dev;
330226ec087SDmitry Baryshkov 	struct platform_device *pdev = to_platform_device(dev);
331423511ecSWill McVicker 	u64 *msi_vaddr;
332226ec087SDmitry Baryshkov 	int ret;
333226ec087SDmitry Baryshkov 	u32 ctrl, num_ctrls;
334226ec087SDmitry Baryshkov 
335cd761378SDmitry Baryshkov 	for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++)
336226ec087SDmitry Baryshkov 		pp->irq_mask[ctrl] = ~0;
337226ec087SDmitry Baryshkov 
338db388348SDmitry Baryshkov 	if (!pp->msi_irq[0]) {
339cd761378SDmitry Baryshkov 		ret = dw_pcie_parse_split_msi_irq(pp);
340cd761378SDmitry Baryshkov 		if (ret < 0 && ret != -ENXIO)
341cd761378SDmitry Baryshkov 			return ret;
342cd761378SDmitry Baryshkov 	}
343cd761378SDmitry Baryshkov 
344cd761378SDmitry Baryshkov 	if (!pp->num_vectors)
345cd761378SDmitry Baryshkov 		pp->num_vectors = MSI_DEF_NUM_VECTORS;
346cd761378SDmitry Baryshkov 	num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
347cd761378SDmitry Baryshkov 
348cd761378SDmitry Baryshkov 	if (!pp->msi_irq[0]) {
349db388348SDmitry Baryshkov 		pp->msi_irq[0] = platform_get_irq_byname_optional(pdev, "msi");
350db388348SDmitry Baryshkov 		if (pp->msi_irq[0] < 0) {
351db388348SDmitry Baryshkov 			pp->msi_irq[0] = platform_get_irq(pdev, 0);
352db388348SDmitry Baryshkov 			if (pp->msi_irq[0] < 0)
353db388348SDmitry Baryshkov 				return pp->msi_irq[0];
354226ec087SDmitry Baryshkov 		}
355226ec087SDmitry Baryshkov 	}
356226ec087SDmitry Baryshkov 
357cd761378SDmitry Baryshkov 	dev_dbg(dev, "Using %d MSI vectors\n", pp->num_vectors);
358cd761378SDmitry Baryshkov 
359226ec087SDmitry Baryshkov 	pp->msi_irq_chip = &dw_pci_msi_bottom_irq_chip;
360226ec087SDmitry Baryshkov 
361226ec087SDmitry Baryshkov 	ret = dw_pcie_allocate_domains(pp);
362226ec087SDmitry Baryshkov 	if (ret)
363226ec087SDmitry Baryshkov 		return ret;
364226ec087SDmitry Baryshkov 
365db388348SDmitry Baryshkov 	for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
366db388348SDmitry Baryshkov 		if (pp->msi_irq[ctrl] > 0)
367db388348SDmitry Baryshkov 			irq_set_chained_handler_and_data(pp->msi_irq[ctrl],
368226ec087SDmitry Baryshkov 						    dw_chained_msi_isr, pp);
369db388348SDmitry Baryshkov 	}
370226ec087SDmitry Baryshkov 
3716c784e21SSerge Semin 	/*
3726c784e21SSerge Semin 	 * Even though the iMSI-RX Module supports 64-bit addresses some
3736c784e21SSerge Semin 	 * peripheral PCIe devices may lack 64-bit message support. In
3746c784e21SSerge Semin 	 * order not to miss MSI TLPs from those devices the MSI target
3756c784e21SSerge Semin 	 * address has to be within the lowest 4GB.
3766c784e21SSerge Semin 	 *
3776c784e21SSerge Semin 	 * Note until there is a better alternative found the reservation is
3786c784e21SSerge Semin 	 * done by allocating from the artificially limited DMA-coherent
3796c784e21SSerge Semin 	 * memory.
3806c784e21SSerge Semin 	 */
3816c784e21SSerge Semin 	ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
382226ec087SDmitry Baryshkov 	if (ret)
383226ec087SDmitry Baryshkov 		dev_warn(dev, "Failed to set DMA mask to 32-bit. Devices with only 32-bit MSI support may not work properly\n");
384226ec087SDmitry Baryshkov 
385423511ecSWill McVicker 	msi_vaddr = dmam_alloc_coherent(dev, sizeof(u64), &pp->msi_data,
386423511ecSWill McVicker 					GFP_KERNEL);
387423511ecSWill McVicker 	if (!msi_vaddr) {
388423511ecSWill McVicker 		dev_err(dev, "Failed to alloc and map MSI data\n");
389226ec087SDmitry Baryshkov 		dw_pcie_free_msi(pp);
390423511ecSWill McVicker 		return -ENOMEM;
391226ec087SDmitry Baryshkov 	}
392226ec087SDmitry Baryshkov 
393226ec087SDmitry Baryshkov 	return 0;
394226ec087SDmitry Baryshkov }
395226ec087SDmitry Baryshkov 
39660b3c27fSSerge Semin int dw_pcie_host_init(struct dw_pcie_rp *pp)
3976e0832faSShawn Lin {
3986e0832faSShawn Lin 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
3996e0832faSShawn Lin 	struct device *dev = pci->dev;
4006e0832faSShawn Lin 	struct device_node *np = dev->of_node;
4016e0832faSShawn Lin 	struct platform_device *pdev = to_platform_device(dev);
4027fe71aa8SRob Herring 	struct resource_entry *win;
4036e0832faSShawn Lin 	struct pci_host_bridge *bridge;
404bd42f310SSerge Semin 	struct resource *res;
4056e0832faSShawn Lin 	int ret;
4066e0832faSShawn Lin 
40760a4352fSSerge Semin 	raw_spin_lock_init(&pp->lock);
4086e0832faSShawn Lin 
409ef8c5887SSerge Semin 	ret = dw_pcie_get_resources(pci);
410ef8c5887SSerge Semin 	if (ret)
411ef8c5887SSerge Semin 		return ret;
412ef8c5887SSerge Semin 
413bd42f310SSerge Semin 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
414bd42f310SSerge Semin 	if (res) {
415bd42f310SSerge Semin 		pp->cfg0_size = resource_size(res);
416bd42f310SSerge Semin 		pp->cfg0_base = res->start;
4172f5ab5afSRob Herring 
418bd42f310SSerge Semin 		pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res);
4192f5ab5afSRob Herring 		if (IS_ERR(pp->va_cfg0_base))
4202f5ab5afSRob Herring 			return PTR_ERR(pp->va_cfg0_base);
4212f5ab5afSRob Herring 	} else {
4226e0832faSShawn Lin 		dev_err(dev, "Missing *config* reg space\n");
4232f5ab5afSRob Herring 		return -ENODEV;
4246e0832faSShawn Lin 	}
4256e0832faSShawn Lin 
426e6fdd3bfSJisheng Zhang 	bridge = devm_pci_alloc_host_bridge(dev, 0);
4276e0832faSShawn Lin 	if (!bridge)
4286e0832faSShawn Lin 		return -ENOMEM;
4296e0832faSShawn Lin 
430444ddca5SRob Herring 	pp->bridge = bridge;
431444ddca5SRob Herring 
4322f5ab5afSRob Herring 	/* Get the I/O range from DT */
4332f5ab5afSRob Herring 	win = resource_list_first_type(&bridge->windows, IORESOURCE_IO);
4342f5ab5afSRob Herring 	if (win) {
4350f71c60fSRob Herring 		pp->io_size = resource_size(win->res);
4360f71c60fSRob Herring 		pp->io_bus_addr = win->res->start - win->offset;
4370f71c60fSRob Herring 		pp->io_base = pci_pio_to_address(win->res->start);
4386e0832faSShawn Lin 	}
4396e0832faSShawn Lin 
4407e919677SBjorn Andersson 	/* Set default bus ops */
4417e919677SBjorn Andersson 	bridge->ops = &dw_pcie_ops;
4427e919677SBjorn Andersson 	bridge->child_ops = &dw_child_pcie_ops;
4437e919677SBjorn Andersson 
4447e919677SBjorn Andersson 	if (pp->ops->host_init) {
4457e919677SBjorn Andersson 		ret = pp->ops->host_init(pp);
4467e919677SBjorn Andersson 		if (ret)
4477e919677SBjorn Andersson 			return ret;
4487e919677SBjorn Andersson 	}
4497e919677SBjorn Andersson 
4509e2b5de5SJisheng Zhang 	if (pci_msi_enabled()) {
451f78f0263SRob Herring 		pp->has_msi_ctrl = !(pp->ops->msi_host_init ||
452f78f0263SRob Herring 				     of_property_read_bool(np, "msi-parent") ||
453f78f0263SRob Herring 				     of_property_read_bool(np, "msi-map"));
454f78f0263SRob Herring 
455cd761378SDmitry Baryshkov 		/*
456cd761378SDmitry Baryshkov 		 * For the has_msi_ctrl case the default assignment is handled
457cd761378SDmitry Baryshkov 		 * in the dw_pcie_msi_host_init().
458cd761378SDmitry Baryshkov 		 */
459cd761378SDmitry Baryshkov 		if (!pp->has_msi_ctrl && !pp->num_vectors) {
4606e0832faSShawn Lin 			pp->num_vectors = MSI_DEF_NUM_VECTORS;
461331e9bceSRob Herring 		} else if (pp->num_vectors > MAX_MSI_IRQS) {
462331e9bceSRob Herring 			dev_err(dev, "Invalid number of vectors\n");
463c6481d51SSerge Semin 			ret = -EINVAL;
464c6481d51SSerge Semin 			goto err_deinit_host;
4656e0832faSShawn Lin 		}
4666e0832faSShawn Lin 
467f78f0263SRob Herring 		if (pp->ops->msi_host_init) {
468f78f0263SRob Herring 			ret = pp->ops->msi_host_init(pp);
469f78f0263SRob Herring 			if (ret < 0)
470c6481d51SSerge Semin 				goto err_deinit_host;
471f78f0263SRob Herring 		} else if (pp->has_msi_ctrl) {
472226ec087SDmitry Baryshkov 			ret = dw_pcie_msi_host_init(pp);
473226ec087SDmitry Baryshkov 			if (ret < 0)
474c6481d51SSerge Semin 				goto err_deinit_host;
475c6481d51SSerge Semin 		}
4765bcb1757SRob Herring 	}
4776e0832faSShawn Lin 
47813e9d390SSerge Semin 	dw_pcie_version_detect(pci);
47913e9d390SSerge Semin 
4808bcca265SHou Zhiqiang 	dw_pcie_iatu_detect(pci);
4816e0832faSShawn Lin 
482939fbcd5SSerge Semin 	ret = dw_pcie_edma_detect(pci);
483ce06bf57SSerge Semin 	if (ret)
484ce06bf57SSerge Semin 		goto err_free_msi;
48559fbab1aSRob Herring 
486939fbcd5SSerge Semin 	ret = dw_pcie_setup_rc(pp);
487939fbcd5SSerge Semin 	if (ret)
488939fbcd5SSerge Semin 		goto err_remove_edma;
489939fbcd5SSerge Semin 
490da56a1bfSAjay Agarwal 	if (dw_pcie_link_up(pci)) {
491da56a1bfSAjay Agarwal 		dw_pcie_print_link_status(pci);
492da56a1bfSAjay Agarwal 	} else {
493a37beefbSSerge Semin 		ret = dw_pcie_start_link(pci);
494886a9c13SRob Herring 		if (ret)
495939fbcd5SSerge Semin 			goto err_remove_edma;
496886a9c13SRob Herring 
497da56a1bfSAjay Agarwal 		if (pci->ops && pci->ops->start_link) {
498da56a1bfSAjay Agarwal 			ret = dw_pcie_wait_for_link(pci);
499da56a1bfSAjay Agarwal 			if (ret)
500da56a1bfSAjay Agarwal 				goto err_stop_link;
501da56a1bfSAjay Agarwal 		}
502da56a1bfSAjay Agarwal 	}
503886a9c13SRob Herring 
5046e0832faSShawn Lin 	bridge->sysdata = pp;
5056e0832faSShawn Lin 
5061df79305SRob Herring 	ret = pci_host_probe(bridge);
507113fa857SSerge Semin 	if (ret)
508113fa857SSerge Semin 		goto err_stop_link;
509113fa857SSerge Semin 
5106e0832faSShawn Lin 	return 0;
5116e0832faSShawn Lin 
512113fa857SSerge Semin err_stop_link:
513a37beefbSSerge Semin 	dw_pcie_stop_link(pci);
514113fa857SSerge Semin 
515939fbcd5SSerge Semin err_remove_edma:
516939fbcd5SSerge Semin 	dw_pcie_edma_remove(pci);
517939fbcd5SSerge Semin 
5189e2b5de5SJisheng Zhang err_free_msi:
519f78f0263SRob Herring 	if (pp->has_msi_ctrl)
5209e2b5de5SJisheng Zhang 		dw_pcie_free_msi(pp);
521c6481d51SSerge Semin 
522c6481d51SSerge Semin err_deinit_host:
523c6481d51SSerge Semin 	if (pp->ops->host_deinit)
524c6481d51SSerge Semin 		pp->ops->host_deinit(pp);
525c6481d51SSerge Semin 
5266e0832faSShawn Lin 	return ret;
5276e0832faSShawn Lin }
528ca98329dSVidya Sagar EXPORT_SYMBOL_GPL(dw_pcie_host_init);
5296e0832faSShawn Lin 
53060b3c27fSSerge Semin void dw_pcie_host_deinit(struct dw_pcie_rp *pp)
5319d071cadSVidya Sagar {
532113fa857SSerge Semin 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
533113fa857SSerge Semin 
5345808d43eSRob Herring 	pci_stop_root_bus(pp->bridge->bus);
5355808d43eSRob Herring 	pci_remove_root_bus(pp->bridge->bus);
536113fa857SSerge Semin 
537a37beefbSSerge Semin 	dw_pcie_stop_link(pci);
538113fa857SSerge Semin 
539939fbcd5SSerge Semin 	dw_pcie_edma_remove(pci);
540939fbcd5SSerge Semin 
541f78f0263SRob Herring 	if (pp->has_msi_ctrl)
5429d071cadSVidya Sagar 		dw_pcie_free_msi(pp);
543c6481d51SSerge Semin 
544c6481d51SSerge Semin 	if (pp->ops->host_deinit)
545c6481d51SSerge Semin 		pp->ops->host_deinit(pp);
5469d071cadSVidya Sagar }
547ca98329dSVidya Sagar EXPORT_SYMBOL_GPL(dw_pcie_host_deinit);
5489d071cadSVidya Sagar 
549c2b0c098SRob Herring static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,
550c2b0c098SRob Herring 						unsigned int devfn, int where)
5516e0832faSShawn Lin {
55260b3c27fSSerge Semin 	struct dw_pcie_rp *pp = bus->sysdata;
5536e0832faSShawn Lin 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
554ce06bf57SSerge Semin 	int type, ret;
555ce06bf57SSerge Semin 	u32 busdev;
5566e0832faSShawn Lin 
55715b23906SHou Zhiqiang 	/*
55815b23906SHou Zhiqiang 	 * Checking whether the link is up here is a last line of defense
55915b23906SHou Zhiqiang 	 * against platforms that forward errors on the system bus as
56015b23906SHou Zhiqiang 	 * SError upon PCI configuration transactions issued when the link
56115b23906SHou Zhiqiang 	 * is down. This check is racy by definition and does not stop
56215b23906SHou Zhiqiang 	 * the system from triggering an SError if the link goes down
56315b23906SHou Zhiqiang 	 * after this check is performed.
56415b23906SHou Zhiqiang 	 */
56515b23906SHou Zhiqiang 	if (!dw_pcie_link_up(pci))
56615b23906SHou Zhiqiang 		return NULL;
56715b23906SHou Zhiqiang 
5686e0832faSShawn Lin 	busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
5696e0832faSShawn Lin 		 PCIE_ATU_FUNC(PCI_FUNC(devfn));
5706e0832faSShawn Lin 
5712ef6b06aSRob Herring 	if (pci_is_root_bus(bus->parent))
5726e0832faSShawn Lin 		type = PCIE_ATU_TYPE_CFG0;
5732ef6b06aSRob Herring 	else
5746e0832faSShawn Lin 		type = PCIE_ATU_TYPE_CFG1;
5752ef6b06aSRob Herring 
576ce06bf57SSerge Semin 	ret = dw_pcie_prog_outbound_atu(pci, 0, type, pp->cfg0_base, busdev,
577ce06bf57SSerge Semin 					pp->cfg0_size);
578ce06bf57SSerge Semin 	if (ret)
579ce06bf57SSerge Semin 		return NULL;
580689e349aSAndrey Smirnov 
5812ef6b06aSRob Herring 	return pp->va_cfg0_base + where;
582c2b0c098SRob Herring }
583c2b0c098SRob Herring 
584c2b0c098SRob Herring static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn,
585c2b0c098SRob Herring 				 int where, int size, u32 *val)
586c2b0c098SRob Herring {
58760b3c27fSSerge Semin 	struct dw_pcie_rp *pp = bus->sysdata;
588c2b0c098SRob Herring 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
589ce06bf57SSerge Semin 	int ret;
590c2b0c098SRob Herring 
591c2b0c098SRob Herring 	ret = pci_generic_config_read(bus, devfn, where, size, val);
592ce06bf57SSerge Semin 	if (ret != PCIBIOS_SUCCESSFUL)
5936e0832faSShawn Lin 		return ret;
594ce06bf57SSerge Semin 
595ce06bf57SSerge Semin 	if (pp->cfg0_io_shared) {
596ce06bf57SSerge Semin 		ret = dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO,
597ce06bf57SSerge Semin 						pp->io_base, pp->io_bus_addr,
598ce06bf57SSerge Semin 						pp->io_size);
599ce06bf57SSerge Semin 		if (ret)
600ce06bf57SSerge Semin 			return PCIBIOS_SET_FAILED;
601ce06bf57SSerge Semin 	}
602ce06bf57SSerge Semin 
603ce06bf57SSerge Semin 	return PCIBIOS_SUCCESSFUL;
6046e0832faSShawn Lin }
6056e0832faSShawn Lin 
606c2b0c098SRob Herring static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn,
6076e0832faSShawn Lin 				 int where, int size, u32 val)
6086e0832faSShawn Lin {
60960b3c27fSSerge Semin 	struct dw_pcie_rp *pp = bus->sysdata;
610c2b0c098SRob Herring 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
611ce06bf57SSerge Semin 	int ret;
6126e0832faSShawn Lin 
613c2b0c098SRob Herring 	ret = pci_generic_config_write(bus, devfn, where, size, val);
614ce06bf57SSerge Semin 	if (ret != PCIBIOS_SUCCESSFUL)
615c2b0c098SRob Herring 		return ret;
616ce06bf57SSerge Semin 
617ce06bf57SSerge Semin 	if (pp->cfg0_io_shared) {
618ce06bf57SSerge Semin 		ret = dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO,
619ce06bf57SSerge Semin 						pp->io_base, pp->io_bus_addr,
620ce06bf57SSerge Semin 						pp->io_size);
621ce06bf57SSerge Semin 		if (ret)
622ce06bf57SSerge Semin 			return PCIBIOS_SET_FAILED;
623ce06bf57SSerge Semin 	}
624ce06bf57SSerge Semin 
625ce06bf57SSerge Semin 	return PCIBIOS_SUCCESSFUL;
6266e0832faSShawn Lin }
6276e0832faSShawn Lin 
628c2b0c098SRob Herring static struct pci_ops dw_child_pcie_ops = {
629c2b0c098SRob Herring 	.map_bus = dw_pcie_other_conf_map_bus,
630c2b0c098SRob Herring 	.read = dw_pcie_rd_other_conf,
631c2b0c098SRob Herring 	.write = dw_pcie_wr_other_conf,
632c2b0c098SRob Herring };
633c2b0c098SRob Herring 
63427e7ed01SRob Herring void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn, int where)
63527e7ed01SRob Herring {
63660b3c27fSSerge Semin 	struct dw_pcie_rp *pp = bus->sysdata;
63727e7ed01SRob Herring 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
63827e7ed01SRob Herring 
63927e7ed01SRob Herring 	if (PCI_SLOT(devfn) > 0)
64027e7ed01SRob Herring 		return NULL;
64127e7ed01SRob Herring 
64227e7ed01SRob Herring 	return pci->dbi_base + where;
64327e7ed01SRob Herring }
64427e7ed01SRob Herring EXPORT_SYMBOL_GPL(dw_pcie_own_conf_map_bus);
64527e7ed01SRob Herring 
6466e0832faSShawn Lin static struct pci_ops dw_pcie_ops = {
647c2b0c098SRob Herring 	.map_bus = dw_pcie_own_conf_map_bus,
648c2b0c098SRob Herring 	.read = pci_generic_config_read,
649c2b0c098SRob Herring 	.write = pci_generic_config_write,
6506e0832faSShawn Lin };
6516e0832faSShawn Lin 
652ce06bf57SSerge Semin static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
6536e0832faSShawn Lin {
6546e0832faSShawn Lin 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
655ce06bf57SSerge Semin 	struct resource_entry *entry;
656ce06bf57SSerge Semin 	int i, ret;
657ce06bf57SSerge Semin 
658ce06bf57SSerge Semin 	/* Note the very first outbound ATU is used for CFG IOs */
659ce06bf57SSerge Semin 	if (!pci->num_ob_windows) {
660ce06bf57SSerge Semin 		dev_err(pci->dev, "No outbound iATU found\n");
661ce06bf57SSerge Semin 		return -EINVAL;
662ce06bf57SSerge Semin 	}
663ce06bf57SSerge Semin 
664ce06bf57SSerge Semin 	/*
6658522e17dSSerge Semin 	 * Ensure all out/inbound windows are disabled before proceeding with
6668522e17dSSerge Semin 	 * the MEM/IO (dma-)ranges setups.
667ce06bf57SSerge Semin 	 */
668ce06bf57SSerge Semin 	for (i = 0; i < pci->num_ob_windows; i++)
669ce06bf57SSerge Semin 		dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_OB, i);
670ce06bf57SSerge Semin 
6718522e17dSSerge Semin 	for (i = 0; i < pci->num_ib_windows; i++)
6728522e17dSSerge Semin 		dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_IB, i);
6738522e17dSSerge Semin 
674ce06bf57SSerge Semin 	i = 0;
675ce06bf57SSerge Semin 	resource_list_for_each_entry(entry, &pp->bridge->windows) {
676ce06bf57SSerge Semin 		if (resource_type(entry->res) != IORESOURCE_MEM)
677ce06bf57SSerge Semin 			continue;
678ce06bf57SSerge Semin 
679ce06bf57SSerge Semin 		if (pci->num_ob_windows <= ++i)
680ce06bf57SSerge Semin 			break;
681ce06bf57SSerge Semin 
682ce06bf57SSerge Semin 		ret = dw_pcie_prog_outbound_atu(pci, i, PCIE_ATU_TYPE_MEM,
683ce06bf57SSerge Semin 						entry->res->start,
684ce06bf57SSerge Semin 						entry->res->start - entry->offset,
685ce06bf57SSerge Semin 						resource_size(entry->res));
686ce06bf57SSerge Semin 		if (ret) {
687ce06bf57SSerge Semin 			dev_err(pci->dev, "Failed to set MEM range %pr\n",
688ce06bf57SSerge Semin 				entry->res);
689ce06bf57SSerge Semin 			return ret;
690ce06bf57SSerge Semin 		}
691ce06bf57SSerge Semin 	}
692ce06bf57SSerge Semin 
693ce06bf57SSerge Semin 	if (pp->io_size) {
694ce06bf57SSerge Semin 		if (pci->num_ob_windows > ++i) {
695ce06bf57SSerge Semin 			ret = dw_pcie_prog_outbound_atu(pci, i, PCIE_ATU_TYPE_IO,
696ce06bf57SSerge Semin 							pp->io_base,
697ce06bf57SSerge Semin 							pp->io_bus_addr,
698ce06bf57SSerge Semin 							pp->io_size);
699ce06bf57SSerge Semin 			if (ret) {
700ce06bf57SSerge Semin 				dev_err(pci->dev, "Failed to set IO range %pr\n",
701ce06bf57SSerge Semin 					entry->res);
702ce06bf57SSerge Semin 				return ret;
703ce06bf57SSerge Semin 			}
704ce06bf57SSerge Semin 		} else {
705ce06bf57SSerge Semin 			pp->cfg0_io_shared = true;
706ce06bf57SSerge Semin 		}
707ce06bf57SSerge Semin 	}
708ce06bf57SSerge Semin 
709ce06bf57SSerge Semin 	if (pci->num_ob_windows <= i)
7108522e17dSSerge Semin 		dev_warn(pci->dev, "Ranges exceed outbound iATU size (%d)\n",
711ce06bf57SSerge Semin 			 pci->num_ob_windows);
712ce06bf57SSerge Semin 
7138522e17dSSerge Semin 	i = 0;
7148522e17dSSerge Semin 	resource_list_for_each_entry(entry, &pp->bridge->dma_ranges) {
7158522e17dSSerge Semin 		if (resource_type(entry->res) != IORESOURCE_MEM)
7168522e17dSSerge Semin 			continue;
7178522e17dSSerge Semin 
7188522e17dSSerge Semin 		if (pci->num_ib_windows <= i)
7198522e17dSSerge Semin 			break;
7208522e17dSSerge Semin 
7218522e17dSSerge Semin 		ret = dw_pcie_prog_inbound_atu(pci, i++, PCIE_ATU_TYPE_MEM,
7228522e17dSSerge Semin 					       entry->res->start,
7238522e17dSSerge Semin 					       entry->res->start - entry->offset,
7248522e17dSSerge Semin 					       resource_size(entry->res));
7258522e17dSSerge Semin 		if (ret) {
7268522e17dSSerge Semin 			dev_err(pci->dev, "Failed to set DMA range %pr\n",
7278522e17dSSerge Semin 				entry->res);
7288522e17dSSerge Semin 			return ret;
7298522e17dSSerge Semin 		}
7308522e17dSSerge Semin 	}
7318522e17dSSerge Semin 
7328522e17dSSerge Semin 	if (pci->num_ib_windows <= i)
7338522e17dSSerge Semin 		dev_warn(pci->dev, "Dma-ranges exceed inbound iATU size (%u)\n",
7348522e17dSSerge Semin 			 pci->num_ib_windows);
7358522e17dSSerge Semin 
736ce06bf57SSerge Semin 	return 0;
737ce06bf57SSerge Semin }
738ce06bf57SSerge Semin 
739ce06bf57SSerge Semin int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
740ce06bf57SSerge Semin {
741ce06bf57SSerge Semin 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
742ce06bf57SSerge Semin 	u32 val, ctrl, num_ctrls;
743ce06bf57SSerge Semin 	int ret;
7446e0832faSShawn Lin 
7453924bc2fSVidya Sagar 	/*
7463924bc2fSVidya Sagar 	 * Enable DBI read-only registers for writing/updating configuration.
7473924bc2fSVidya Sagar 	 * Write permission gets disabled towards the end of this function.
7483924bc2fSVidya Sagar 	 */
7493924bc2fSVidya Sagar 	dw_pcie_dbi_ro_wr_en(pci);
7503924bc2fSVidya Sagar 
7516e0832faSShawn Lin 	dw_pcie_setup(pci);
7526e0832faSShawn Lin 
753f78f0263SRob Herring 	if (pp->has_msi_ctrl) {
7546e0832faSShawn Lin 		num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
7556e0832faSShawn Lin 
7566e0832faSShawn Lin 		/* Initialize IRQ Status array */
757830920e0SMarc Zyngier 		for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
758f81c770dSRob Herring 			dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK +
7596e0832faSShawn Lin 					    (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
760f81c770dSRob Herring 					    pp->irq_mask[ctrl]);
761f81c770dSRob Herring 			dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE +
762830920e0SMarc Zyngier 					    (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
763f81c770dSRob Herring 					    ~0);
764830920e0SMarc Zyngier 		}
765fd8a44bdSKishon Vijay Abraham I 	}
7666e0832faSShawn Lin 
767294353d9SJisheng Zhang 	dw_pcie_msi_init(pp);
768294353d9SJisheng Zhang 
7696e0832faSShawn Lin 	/* Setup RC BARs */
7706e0832faSShawn Lin 	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004);
7716e0832faSShawn Lin 	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000);
7726e0832faSShawn Lin 
7736e0832faSShawn Lin 	/* Setup interrupt pins */
7746e0832faSShawn Lin 	val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE);
7756e0832faSShawn Lin 	val &= 0xffff00ff;
7766e0832faSShawn Lin 	val |= 0x00000100;
7776e0832faSShawn Lin 	dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val);
7786e0832faSShawn Lin 
7796e0832faSShawn Lin 	/* Setup bus numbers */
7806e0832faSShawn Lin 	val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS);
7816e0832faSShawn Lin 	val &= 0xff000000;
7826e0832faSShawn Lin 	val |= 0x00ff0100;
7836e0832faSShawn Lin 	dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val);
7846e0832faSShawn Lin 
7856e0832faSShawn Lin 	/* Setup command register */
7866e0832faSShawn Lin 	val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
7876e0832faSShawn Lin 	val &= 0xffff0000;
7886e0832faSShawn Lin 	val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
7896e0832faSShawn Lin 		PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
7906e0832faSShawn Lin 	dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
7916e0832faSShawn Lin 
7926e0832faSShawn Lin 	/*
793444ddca5SRob Herring 	 * If the platform provides its own child bus config accesses, it means
794444ddca5SRob Herring 	 * the platform uses its own address translation component rather than
795444ddca5SRob Herring 	 * ATU, so we should not program the ATU here.
7966e0832faSShawn Lin 	 */
797c2b0c098SRob Herring 	if (pp->bridge->child_ops == &dw_child_pcie_ops) {
798ce06bf57SSerge Semin 		ret = dw_pcie_iatu_setup(pp);
799ce06bf57SSerge Semin 		if (ret)
800ce06bf57SSerge Semin 			return ret;
8016e0832faSShawn Lin 	}
8026e0832faSShawn Lin 
803f81c770dSRob Herring 	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
8046e0832faSShawn Lin 
8056e0832faSShawn Lin 	/* Program correct class for RC */
806f81c770dSRob Herring 	dw_pcie_writew_dbi(pci, PCI_CLASS_DEVICE, PCI_CLASS_BRIDGE_PCI);
8076e0832faSShawn Lin 
808f81c770dSRob Herring 	val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
8096e0832faSShawn Lin 	val |= PORT_LOGIC_SPEED_CHANGE;
810f81c770dSRob Herring 	dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
8113924bc2fSVidya Sagar 
8123924bc2fSVidya Sagar 	dw_pcie_dbi_ro_wr_dis(pci);
813ce06bf57SSerge Semin 
814ce06bf57SSerge Semin 	return 0;
8156e0832faSShawn Lin }
816ca98329dSVidya Sagar EXPORT_SYMBOL_GPL(dw_pcie_setup_rc);
817*4774faf8SFrank Li 
818*4774faf8SFrank Li int dw_pcie_suspend_noirq(struct dw_pcie *pci)
819*4774faf8SFrank Li {
820*4774faf8SFrank Li 	u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
821*4774faf8SFrank Li 	u32 val;
822*4774faf8SFrank Li 	int ret;
823*4774faf8SFrank Li 
824*4774faf8SFrank Li 	/*
825*4774faf8SFrank Li 	 * If L1SS is supported, then do not put the link into L2 as some
826*4774faf8SFrank Li 	 * devices such as NVMe expect low resume latency.
827*4774faf8SFrank Li 	 */
828*4774faf8SFrank Li 	if (dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKCTL) & PCI_EXP_LNKCTL_ASPM_L1)
829*4774faf8SFrank Li 		return 0;
830*4774faf8SFrank Li 
831*4774faf8SFrank Li 	if (dw_pcie_get_ltssm(pci) <= DW_PCIE_LTSSM_DETECT_ACT)
832*4774faf8SFrank Li 		return 0;
833*4774faf8SFrank Li 
834*4774faf8SFrank Li 	if (!pci->pp.ops->pme_turn_off)
835*4774faf8SFrank Li 		return 0;
836*4774faf8SFrank Li 
837*4774faf8SFrank Li 	pci->pp.ops->pme_turn_off(&pci->pp);
838*4774faf8SFrank Li 
839*4774faf8SFrank Li 	ret = read_poll_timeout(dw_pcie_get_ltssm, val, val == DW_PCIE_LTSSM_L2_IDLE,
840*4774faf8SFrank Li 				PCIE_PME_TO_L2_TIMEOUT_US/10,
841*4774faf8SFrank Li 				PCIE_PME_TO_L2_TIMEOUT_US, false, pci);
842*4774faf8SFrank Li 	if (ret) {
843*4774faf8SFrank Li 		dev_err(pci->dev, "Timeout waiting for L2 entry! LTSSM: 0x%x\n", val);
844*4774faf8SFrank Li 		return ret;
845*4774faf8SFrank Li 	}
846*4774faf8SFrank Li 
847*4774faf8SFrank Li 	if (pci->pp.ops->host_deinit)
848*4774faf8SFrank Li 		pci->pp.ops->host_deinit(&pci->pp);
849*4774faf8SFrank Li 
850*4774faf8SFrank Li 	pci->suspended = true;
851*4774faf8SFrank Li 
852*4774faf8SFrank Li 	return ret;
853*4774faf8SFrank Li }
854*4774faf8SFrank Li EXPORT_SYMBOL_GPL(dw_pcie_suspend_noirq);
855*4774faf8SFrank Li 
856*4774faf8SFrank Li int dw_pcie_resume_noirq(struct dw_pcie *pci)
857*4774faf8SFrank Li {
858*4774faf8SFrank Li 	int ret;
859*4774faf8SFrank Li 
860*4774faf8SFrank Li 	if (!pci->suspended)
861*4774faf8SFrank Li 		return 0;
862*4774faf8SFrank Li 
863*4774faf8SFrank Li 	pci->suspended = false;
864*4774faf8SFrank Li 
865*4774faf8SFrank Li 	if (pci->pp.ops->host_init) {
866*4774faf8SFrank Li 		ret = pci->pp.ops->host_init(&pci->pp);
867*4774faf8SFrank Li 		if (ret) {
868*4774faf8SFrank Li 			dev_err(pci->dev, "Host init failed: %d\n", ret);
869*4774faf8SFrank Li 			return ret;
870*4774faf8SFrank Li 		}
871*4774faf8SFrank Li 	}
872*4774faf8SFrank Li 
873*4774faf8SFrank Li 	dw_pcie_setup_rc(&pci->pp);
874*4774faf8SFrank Li 
875*4774faf8SFrank Li 	ret = dw_pcie_start_link(pci);
876*4774faf8SFrank Li 	if (ret)
877*4774faf8SFrank Li 		return ret;
878*4774faf8SFrank Li 
879*4774faf8SFrank Li 	ret = dw_pcie_wait_for_link(pci);
880*4774faf8SFrank Li 	if (ret)
881*4774faf8SFrank Li 		return ret;
882*4774faf8SFrank Li 
883*4774faf8SFrank Li 	return ret;
884*4774faf8SFrank Li }
885*4774faf8SFrank Li EXPORT_SYMBOL_GPL(dw_pcie_resume_noirq);
886