xref: /linux/drivers/pci/controller/pci-tegra.c (revision 4e95bc268b915c3a19ec8b9110f61e4ea41a1ed0)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * PCIe host controller driver for Tegra SoCs
4  *
5  * Copyright (c) 2010, CompuLab, Ltd.
6  * Author: Mike Rapoport <mike@compulab.co.il>
7  *
8  * Based on NVIDIA PCIe driver
9  * Copyright (c) 2008-2009, NVIDIA Corporation.
10  *
11  * Bits taken from arch/arm/mach-dove/pcie.c
12  *
13  * Author: Thierry Reding <treding@nvidia.com>
14  */
15 
16 #include <linux/clk.h>
17 #include <linux/debugfs.h>
18 #include <linux/delay.h>
19 #include <linux/export.h>
20 #include <linux/interrupt.h>
21 #include <linux/iopoll.h>
22 #include <linux/irq.h>
23 #include <linux/irqdomain.h>
24 #include <linux/kernel.h>
25 #include <linux/init.h>
26 #include <linux/module.h>
27 #include <linux/msi.h>
28 #include <linux/of_address.h>
29 #include <linux/of_pci.h>
30 #include <linux/of_platform.h>
31 #include <linux/pci.h>
32 #include <linux/phy/phy.h>
33 #include <linux/platform_device.h>
34 #include <linux/reset.h>
35 #include <linux/sizes.h>
36 #include <linux/slab.h>
37 #include <linux/vmalloc.h>
38 #include <linux/regulator/consumer.h>
39 
40 #include <soc/tegra/cpuidle.h>
41 #include <soc/tegra/pmc.h>
42 
43 #include "../pci.h"
44 
45 #define INT_PCI_MSI_NR (8 * 32)
46 
47 /* register definitions */
48 
49 #define AFI_AXI_BAR0_SZ	0x00
50 #define AFI_AXI_BAR1_SZ	0x04
51 #define AFI_AXI_BAR2_SZ	0x08
52 #define AFI_AXI_BAR3_SZ	0x0c
53 #define AFI_AXI_BAR4_SZ	0x10
54 #define AFI_AXI_BAR5_SZ	0x14
55 
56 #define AFI_AXI_BAR0_START	0x18
57 #define AFI_AXI_BAR1_START	0x1c
58 #define AFI_AXI_BAR2_START	0x20
59 #define AFI_AXI_BAR3_START	0x24
60 #define AFI_AXI_BAR4_START	0x28
61 #define AFI_AXI_BAR5_START	0x2c
62 
63 #define AFI_FPCI_BAR0	0x30
64 #define AFI_FPCI_BAR1	0x34
65 #define AFI_FPCI_BAR2	0x38
66 #define AFI_FPCI_BAR3	0x3c
67 #define AFI_FPCI_BAR4	0x40
68 #define AFI_FPCI_BAR5	0x44
69 
70 #define AFI_CACHE_BAR0_SZ	0x48
71 #define AFI_CACHE_BAR0_ST	0x4c
72 #define AFI_CACHE_BAR1_SZ	0x50
73 #define AFI_CACHE_BAR1_ST	0x54
74 
75 #define AFI_MSI_BAR_SZ		0x60
76 #define AFI_MSI_FPCI_BAR_ST	0x64
77 #define AFI_MSI_AXI_BAR_ST	0x68
78 
79 #define AFI_MSI_VEC0		0x6c
80 #define AFI_MSI_VEC1		0x70
81 #define AFI_MSI_VEC2		0x74
82 #define AFI_MSI_VEC3		0x78
83 #define AFI_MSI_VEC4		0x7c
84 #define AFI_MSI_VEC5		0x80
85 #define AFI_MSI_VEC6		0x84
86 #define AFI_MSI_VEC7		0x88
87 
88 #define AFI_MSI_EN_VEC0		0x8c
89 #define AFI_MSI_EN_VEC1		0x90
90 #define AFI_MSI_EN_VEC2		0x94
91 #define AFI_MSI_EN_VEC3		0x98
92 #define AFI_MSI_EN_VEC4		0x9c
93 #define AFI_MSI_EN_VEC5		0xa0
94 #define AFI_MSI_EN_VEC6		0xa4
95 #define AFI_MSI_EN_VEC7		0xa8
96 
97 #define AFI_CONFIGURATION		0xac
98 #define  AFI_CONFIGURATION_EN_FPCI	(1 << 0)
99 
100 #define AFI_FPCI_ERROR_MASKS	0xb0
101 
102 #define AFI_INTR_MASK		0xb4
103 #define  AFI_INTR_MASK_INT_MASK	(1 << 0)
104 #define  AFI_INTR_MASK_MSI_MASK	(1 << 8)
105 
106 #define AFI_INTR_CODE			0xb8
107 #define  AFI_INTR_CODE_MASK		0xf
108 #define  AFI_INTR_INI_SLAVE_ERROR	1
109 #define  AFI_INTR_INI_DECODE_ERROR	2
110 #define  AFI_INTR_TARGET_ABORT		3
111 #define  AFI_INTR_MASTER_ABORT		4
112 #define  AFI_INTR_INVALID_WRITE		5
113 #define  AFI_INTR_LEGACY		6
114 #define  AFI_INTR_FPCI_DECODE_ERROR	7
115 #define  AFI_INTR_AXI_DECODE_ERROR	8
116 #define  AFI_INTR_FPCI_TIMEOUT		9
117 #define  AFI_INTR_PE_PRSNT_SENSE	10
118 #define  AFI_INTR_PE_CLKREQ_SENSE	11
119 #define  AFI_INTR_CLKCLAMP_SENSE	12
120 #define  AFI_INTR_RDY4PD_SENSE		13
121 #define  AFI_INTR_P2P_ERROR		14
122 
123 #define AFI_INTR_SIGNATURE	0xbc
124 #define AFI_UPPER_FPCI_ADDRESS	0xc0
125 #define AFI_SM_INTR_ENABLE	0xc4
126 #define  AFI_SM_INTR_INTA_ASSERT	(1 << 0)
127 #define  AFI_SM_INTR_INTB_ASSERT	(1 << 1)
128 #define  AFI_SM_INTR_INTC_ASSERT	(1 << 2)
129 #define  AFI_SM_INTR_INTD_ASSERT	(1 << 3)
130 #define  AFI_SM_INTR_INTA_DEASSERT	(1 << 4)
131 #define  AFI_SM_INTR_INTB_DEASSERT	(1 << 5)
132 #define  AFI_SM_INTR_INTC_DEASSERT	(1 << 6)
133 #define  AFI_SM_INTR_INTD_DEASSERT	(1 << 7)
134 
135 #define AFI_AFI_INTR_ENABLE		0xc8
136 #define  AFI_INTR_EN_INI_SLVERR		(1 << 0)
137 #define  AFI_INTR_EN_INI_DECERR		(1 << 1)
138 #define  AFI_INTR_EN_TGT_SLVERR		(1 << 2)
139 #define  AFI_INTR_EN_TGT_DECERR		(1 << 3)
140 #define  AFI_INTR_EN_TGT_WRERR		(1 << 4)
141 #define  AFI_INTR_EN_DFPCI_DECERR	(1 << 5)
142 #define  AFI_INTR_EN_AXI_DECERR		(1 << 6)
143 #define  AFI_INTR_EN_FPCI_TIMEOUT	(1 << 7)
144 #define  AFI_INTR_EN_PRSNT_SENSE	(1 << 8)
145 
146 #define AFI_PCIE_PME		0xf0
147 
148 #define AFI_PCIE_CONFIG					0x0f8
149 #define  AFI_PCIE_CONFIG_PCIE_DISABLE(x)		(1 << ((x) + 1))
150 #define  AFI_PCIE_CONFIG_PCIE_DISABLE_ALL		0xe
151 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK	(0xf << 20)
152 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE	(0x0 << 20)
153 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420	(0x0 << 20)
154 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1	(0x0 << 20)
155 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_401	(0x0 << 20)
156 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL	(0x1 << 20)
157 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222	(0x1 << 20)
158 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1	(0x1 << 20)
159 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211	(0x1 << 20)
160 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411	(0x2 << 20)
161 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111	(0x2 << 20)
162 
163 #define AFI_FUSE			0x104
164 #define  AFI_FUSE_PCIE_T0_GEN2_DIS	(1 << 2)
165 
166 #define AFI_PEX0_CTRL			0x110
167 #define AFI_PEX1_CTRL			0x118
168 #define AFI_PEX2_CTRL			0x128
169 #define  AFI_PEX_CTRL_RST		(1 << 0)
170 #define  AFI_PEX_CTRL_CLKREQ_EN		(1 << 1)
171 #define  AFI_PEX_CTRL_REFCLK_EN		(1 << 3)
172 #define  AFI_PEX_CTRL_OVERRIDE_EN	(1 << 4)
173 
174 #define AFI_PLLE_CONTROL		0x160
175 #define  AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL (1 << 9)
176 #define  AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN (1 << 1)
177 
178 #define AFI_PEXBIAS_CTRL_0		0x168
179 
180 #define RP_VEND_XP	0x00000f00
181 #define  RP_VEND_XP_DL_UP	(1 << 30)
182 
183 #define RP_VEND_CTL2 0x00000fa8
184 #define  RP_VEND_CTL2_PCA_ENABLE (1 << 7)
185 
186 #define RP_PRIV_MISC	0x00000fe0
187 #define  RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xe << 0)
188 #define  RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xf << 0)
189 
190 #define RP_LINK_CONTROL_STATUS			0x00000090
191 #define  RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE	0x20000000
192 #define  RP_LINK_CONTROL_STATUS_LINKSTAT_MASK	0x3fff0000
193 
194 #define PADS_CTL_SEL		0x0000009c
195 
196 #define PADS_CTL		0x000000a0
197 #define  PADS_CTL_IDDQ_1L	(1 << 0)
198 #define  PADS_CTL_TX_DATA_EN_1L	(1 << 6)
199 #define  PADS_CTL_RX_DATA_EN_1L	(1 << 10)
200 
201 #define PADS_PLL_CTL_TEGRA20			0x000000b8
202 #define PADS_PLL_CTL_TEGRA30			0x000000b4
203 #define  PADS_PLL_CTL_RST_B4SM			(1 << 1)
204 #define  PADS_PLL_CTL_LOCKDET			(1 << 8)
205 #define  PADS_PLL_CTL_REFCLK_MASK		(0x3 << 16)
206 #define  PADS_PLL_CTL_REFCLK_INTERNAL_CML	(0 << 16)
207 #define  PADS_PLL_CTL_REFCLK_INTERNAL_CMOS	(1 << 16)
208 #define  PADS_PLL_CTL_REFCLK_EXTERNAL		(2 << 16)
209 #define  PADS_PLL_CTL_TXCLKREF_MASK		(0x1 << 20)
210 #define  PADS_PLL_CTL_TXCLKREF_DIV10		(0 << 20)
211 #define  PADS_PLL_CTL_TXCLKREF_DIV5		(1 << 20)
212 #define  PADS_PLL_CTL_TXCLKREF_BUF_EN		(1 << 22)
213 
214 #define PADS_REFCLK_CFG0			0x000000c8
215 #define PADS_REFCLK_CFG1			0x000000cc
216 #define PADS_REFCLK_BIAS			0x000000d0
217 
218 /*
219  * Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit
220  * entries, one entry per PCIe port. These field definitions and desired
221  * values aren't in the TRM, but do come from NVIDIA.
222  */
223 #define PADS_REFCLK_CFG_TERM_SHIFT		2  /* 6:2 */
224 #define PADS_REFCLK_CFG_E_TERM_SHIFT		7
225 #define PADS_REFCLK_CFG_PREDI_SHIFT		8  /* 11:8 */
226 #define PADS_REFCLK_CFG_DRVI_SHIFT		12 /* 15:12 */
227 
228 #define PME_ACK_TIMEOUT 10000
229 
230 struct tegra_msi {
231 	struct msi_controller chip;
232 	DECLARE_BITMAP(used, INT_PCI_MSI_NR);
233 	struct irq_domain *domain;
234 	struct mutex lock;
235 	void *virt;
236 	dma_addr_t phys;
237 	int irq;
238 };
239 
240 /* used to differentiate between Tegra SoC generations */
241 struct tegra_pcie_port_soc {
242 	struct {
243 		u8 turnoff_bit;
244 		u8 ack_bit;
245 	} pme;
246 };
247 
248 struct tegra_pcie_soc {
249 	unsigned int num_ports;
250 	const struct tegra_pcie_port_soc *ports;
251 	unsigned int msi_base_shift;
252 	u32 pads_pll_ctl;
253 	u32 tx_ref_sel;
254 	u32 pads_refclk_cfg0;
255 	u32 pads_refclk_cfg1;
256 	bool has_pex_clkreq_en;
257 	bool has_pex_bias_ctrl;
258 	bool has_intr_prsnt_sense;
259 	bool has_cml_clk;
260 	bool has_gen2;
261 	bool force_pca_enable;
262 	bool program_uphy;
263 };
264 
265 static inline struct tegra_msi *to_tegra_msi(struct msi_controller *chip)
266 {
267 	return container_of(chip, struct tegra_msi, chip);
268 }
269 
270 struct tegra_pcie {
271 	struct device *dev;
272 
273 	void __iomem *pads;
274 	void __iomem *afi;
275 	void __iomem *cfg;
276 	int irq;
277 
278 	struct resource cs;
279 	struct resource io;
280 	struct resource pio;
281 	struct resource mem;
282 	struct resource prefetch;
283 	struct resource busn;
284 
285 	struct {
286 		resource_size_t mem;
287 		resource_size_t io;
288 	} offset;
289 
290 	struct clk *pex_clk;
291 	struct clk *afi_clk;
292 	struct clk *pll_e;
293 	struct clk *cml_clk;
294 
295 	struct reset_control *pex_rst;
296 	struct reset_control *afi_rst;
297 	struct reset_control *pcie_xrst;
298 
299 	bool legacy_phy;
300 	struct phy *phy;
301 
302 	struct tegra_msi msi;
303 
304 	struct list_head ports;
305 	u32 xbar_config;
306 
307 	struct regulator_bulk_data *supplies;
308 	unsigned int num_supplies;
309 
310 	const struct tegra_pcie_soc *soc;
311 	struct dentry *debugfs;
312 };
313 
314 struct tegra_pcie_port {
315 	struct tegra_pcie *pcie;
316 	struct device_node *np;
317 	struct list_head list;
318 	struct resource regs;
319 	void __iomem *base;
320 	unsigned int index;
321 	unsigned int lanes;
322 
323 	struct phy **phys;
324 };
325 
326 struct tegra_pcie_bus {
327 	struct list_head list;
328 	unsigned int nr;
329 };
330 
331 static inline void afi_writel(struct tegra_pcie *pcie, u32 value,
332 			      unsigned long offset)
333 {
334 	writel(value, pcie->afi + offset);
335 }
336 
337 static inline u32 afi_readl(struct tegra_pcie *pcie, unsigned long offset)
338 {
339 	return readl(pcie->afi + offset);
340 }
341 
342 static inline void pads_writel(struct tegra_pcie *pcie, u32 value,
343 			       unsigned long offset)
344 {
345 	writel(value, pcie->pads + offset);
346 }
347 
348 static inline u32 pads_readl(struct tegra_pcie *pcie, unsigned long offset)
349 {
350 	return readl(pcie->pads + offset);
351 }
352 
353 /*
354  * The configuration space mapping on Tegra is somewhat similar to the ECAM
355  * defined by PCIe. However it deviates a bit in how the 4 bits for extended
356  * register accesses are mapped:
357  *
358  *    [27:24] extended register number
359  *    [23:16] bus number
360  *    [15:11] device number
361  *    [10: 8] function number
362  *    [ 7: 0] register number
363  *
364  * Mapping the whole extended configuration space would require 256 MiB of
365  * virtual address space, only a small part of which will actually be used.
366  *
367  * To work around this, a 4 KiB region is used to generate the required
368  * configuration transaction with relevant B:D:F and register offset values.
369  * This is achieved by dynamically programming base address and size of
370  * AFI_AXI_BAR used for end point config space mapping to make sure that the
371  * address (access to which generates correct config transaction) falls in
372  * this 4 KiB region.
373  */
374 static unsigned int tegra_pcie_conf_offset(u8 bus, unsigned int devfn,
375 					   unsigned int where)
376 {
377 	return ((where & 0xf00) << 16) | (bus << 16) | (PCI_SLOT(devfn) << 11) |
378 	       (PCI_FUNC(devfn) << 8) | (where & 0xff);
379 }
380 
381 static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus,
382 					unsigned int devfn,
383 					int where)
384 {
385 	struct tegra_pcie *pcie = bus->sysdata;
386 	void __iomem *addr = NULL;
387 
388 	if (bus->number == 0) {
389 		unsigned int slot = PCI_SLOT(devfn);
390 		struct tegra_pcie_port *port;
391 
392 		list_for_each_entry(port, &pcie->ports, list) {
393 			if (port->index + 1 == slot) {
394 				addr = port->base + (where & ~3);
395 				break;
396 			}
397 		}
398 	} else {
399 		unsigned int offset;
400 		u32 base;
401 
402 		offset = tegra_pcie_conf_offset(bus->number, devfn, where);
403 
404 		/* move 4 KiB window to offset within the FPCI region */
405 		base = 0xfe100000 + ((offset & ~(SZ_4K - 1)) >> 8);
406 		afi_writel(pcie, base, AFI_FPCI_BAR0);
407 
408 		/* move to correct offset within the 4 KiB page */
409 		addr = pcie->cfg + (offset & (SZ_4K - 1));
410 	}
411 
412 	return addr;
413 }
414 
415 static int tegra_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
416 				  int where, int size, u32 *value)
417 {
418 	if (bus->number == 0)
419 		return pci_generic_config_read32(bus, devfn, where, size,
420 						 value);
421 
422 	return pci_generic_config_read(bus, devfn, where, size, value);
423 }
424 
425 static int tegra_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
426 				   int where, int size, u32 value)
427 {
428 	if (bus->number == 0)
429 		return pci_generic_config_write32(bus, devfn, where, size,
430 						  value);
431 
432 	return pci_generic_config_write(bus, devfn, where, size, value);
433 }
434 
435 static struct pci_ops tegra_pcie_ops = {
436 	.map_bus = tegra_pcie_map_bus,
437 	.read = tegra_pcie_config_read,
438 	.write = tegra_pcie_config_write,
439 };
440 
441 static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port)
442 {
443 	unsigned long ret = 0;
444 
445 	switch (port->index) {
446 	case 0:
447 		ret = AFI_PEX0_CTRL;
448 		break;
449 
450 	case 1:
451 		ret = AFI_PEX1_CTRL;
452 		break;
453 
454 	case 2:
455 		ret = AFI_PEX2_CTRL;
456 		break;
457 	}
458 
459 	return ret;
460 }
461 
462 static void tegra_pcie_port_reset(struct tegra_pcie_port *port)
463 {
464 	unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
465 	unsigned long value;
466 
467 	/* pulse reset signal */
468 	value = afi_readl(port->pcie, ctrl);
469 	value &= ~AFI_PEX_CTRL_RST;
470 	afi_writel(port->pcie, value, ctrl);
471 
472 	usleep_range(1000, 2000);
473 
474 	value = afi_readl(port->pcie, ctrl);
475 	value |= AFI_PEX_CTRL_RST;
476 	afi_writel(port->pcie, value, ctrl);
477 }
478 
479 static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
480 {
481 	unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
482 	const struct tegra_pcie_soc *soc = port->pcie->soc;
483 	unsigned long value;
484 
485 	/* enable reference clock */
486 	value = afi_readl(port->pcie, ctrl);
487 	value |= AFI_PEX_CTRL_REFCLK_EN;
488 
489 	if (soc->has_pex_clkreq_en)
490 		value |= AFI_PEX_CTRL_CLKREQ_EN;
491 
492 	value |= AFI_PEX_CTRL_OVERRIDE_EN;
493 
494 	afi_writel(port->pcie, value, ctrl);
495 
496 	tegra_pcie_port_reset(port);
497 
498 	if (soc->force_pca_enable) {
499 		value = readl(port->base + RP_VEND_CTL2);
500 		value |= RP_VEND_CTL2_PCA_ENABLE;
501 		writel(value, port->base + RP_VEND_CTL2);
502 	}
503 }
504 
505 static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
506 {
507 	unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
508 	const struct tegra_pcie_soc *soc = port->pcie->soc;
509 	unsigned long value;
510 
511 	/* assert port reset */
512 	value = afi_readl(port->pcie, ctrl);
513 	value &= ~AFI_PEX_CTRL_RST;
514 	afi_writel(port->pcie, value, ctrl);
515 
516 	/* disable reference clock */
517 	value = afi_readl(port->pcie, ctrl);
518 
519 	if (soc->has_pex_clkreq_en)
520 		value &= ~AFI_PEX_CTRL_CLKREQ_EN;
521 
522 	value &= ~AFI_PEX_CTRL_REFCLK_EN;
523 	afi_writel(port->pcie, value, ctrl);
524 }
525 
526 static void tegra_pcie_port_free(struct tegra_pcie_port *port)
527 {
528 	struct tegra_pcie *pcie = port->pcie;
529 	struct device *dev = pcie->dev;
530 
531 	devm_iounmap(dev, port->base);
532 	devm_release_mem_region(dev, port->regs.start,
533 				resource_size(&port->regs));
534 	list_del(&port->list);
535 	devm_kfree(dev, port);
536 }
537 
538 /* Tegra PCIE root complex wrongly reports device class */
539 static void tegra_pcie_fixup_class(struct pci_dev *dev)
540 {
541 	dev->class = PCI_CLASS_BRIDGE_PCI << 8;
542 }
543 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class);
544 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class);
545 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_fixup_class);
546 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_fixup_class);
547 
548 /* Tegra PCIE requires relaxed ordering */
549 static void tegra_pcie_relax_enable(struct pci_dev *dev)
550 {
551 	pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
552 }
553 DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable);
554 
555 static int tegra_pcie_request_resources(struct tegra_pcie *pcie)
556 {
557 	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
558 	struct list_head *windows = &host->windows;
559 	struct device *dev = pcie->dev;
560 	int err;
561 
562 	pci_add_resource_offset(windows, &pcie->pio, pcie->offset.io);
563 	pci_add_resource_offset(windows, &pcie->mem, pcie->offset.mem);
564 	pci_add_resource_offset(windows, &pcie->prefetch, pcie->offset.mem);
565 	pci_add_resource(windows, &pcie->busn);
566 
567 	err = devm_request_pci_bus_resources(dev, windows);
568 	if (err < 0) {
569 		pci_free_resource_list(windows);
570 		return err;
571 	}
572 
573 	pci_remap_iospace(&pcie->pio, pcie->io.start);
574 
575 	return 0;
576 }
577 
578 static void tegra_pcie_free_resources(struct tegra_pcie *pcie)
579 {
580 	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
581 	struct list_head *windows = &host->windows;
582 
583 	pci_unmap_iospace(&pcie->pio);
584 	pci_free_resource_list(windows);
585 }
586 
587 static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
588 {
589 	struct tegra_pcie *pcie = pdev->bus->sysdata;
590 	int irq;
591 
592 	tegra_cpuidle_pcie_irqs_in_use();
593 
594 	irq = of_irq_parse_and_map_pci(pdev, slot, pin);
595 	if (!irq)
596 		irq = pcie->irq;
597 
598 	return irq;
599 }
600 
601 static irqreturn_t tegra_pcie_isr(int irq, void *arg)
602 {
603 	const char *err_msg[] = {
604 		"Unknown",
605 		"AXI slave error",
606 		"AXI decode error",
607 		"Target abort",
608 		"Master abort",
609 		"Invalid write",
610 		"Legacy interrupt",
611 		"Response decoding error",
612 		"AXI response decoding error",
613 		"Transaction timeout",
614 		"Slot present pin change",
615 		"Slot clock request change",
616 		"TMS clock ramp change",
617 		"TMS ready for power down",
618 		"Peer2Peer error",
619 	};
620 	struct tegra_pcie *pcie = arg;
621 	struct device *dev = pcie->dev;
622 	u32 code, signature;
623 
624 	code = afi_readl(pcie, AFI_INTR_CODE) & AFI_INTR_CODE_MASK;
625 	signature = afi_readl(pcie, AFI_INTR_SIGNATURE);
626 	afi_writel(pcie, 0, AFI_INTR_CODE);
627 
628 	if (code == AFI_INTR_LEGACY)
629 		return IRQ_NONE;
630 
631 	if (code >= ARRAY_SIZE(err_msg))
632 		code = 0;
633 
634 	/*
635 	 * do not pollute kernel log with master abort reports since they
636 	 * happen a lot during enumeration
637 	 */
638 	if (code == AFI_INTR_MASTER_ABORT)
639 		dev_dbg(dev, "%s, signature: %08x\n", err_msg[code], signature);
640 	else
641 		dev_err(dev, "%s, signature: %08x\n", err_msg[code], signature);
642 
643 	if (code == AFI_INTR_TARGET_ABORT || code == AFI_INTR_MASTER_ABORT ||
644 	    code == AFI_INTR_FPCI_DECODE_ERROR) {
645 		u32 fpci = afi_readl(pcie, AFI_UPPER_FPCI_ADDRESS) & 0xff;
646 		u64 address = (u64)fpci << 32 | (signature & 0xfffffffc);
647 
648 		if (code == AFI_INTR_MASTER_ABORT)
649 			dev_dbg(dev, "  FPCI address: %10llx\n", address);
650 		else
651 			dev_err(dev, "  FPCI address: %10llx\n", address);
652 	}
653 
654 	return IRQ_HANDLED;
655 }
656 
657 /*
658  * FPCI map is as follows:
659  * - 0xfdfc000000: I/O space
660  * - 0xfdfe000000: type 0 configuration space
661  * - 0xfdff000000: type 1 configuration space
662  * - 0xfe00000000: type 0 extended configuration space
663  * - 0xfe10000000: type 1 extended configuration space
664  */
665 static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
666 {
667 	u32 fpci_bar, size, axi_address;
668 
669 	/* Bar 0: type 1 extended configuration space */
670 	size = resource_size(&pcie->cs);
671 	afi_writel(pcie, pcie->cs.start, AFI_AXI_BAR0_START);
672 	afi_writel(pcie, size >> 12, AFI_AXI_BAR0_SZ);
673 
674 	/* Bar 1: downstream IO bar */
675 	fpci_bar = 0xfdfc0000;
676 	size = resource_size(&pcie->io);
677 	axi_address = pcie->io.start;
678 	afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
679 	afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
680 	afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
681 
682 	/* Bar 2: prefetchable memory BAR */
683 	fpci_bar = (((pcie->prefetch.start >> 12) & 0x0fffffff) << 4) | 0x1;
684 	size = resource_size(&pcie->prefetch);
685 	axi_address = pcie->prefetch.start;
686 	afi_writel(pcie, axi_address, AFI_AXI_BAR2_START);
687 	afi_writel(pcie, size >> 12, AFI_AXI_BAR2_SZ);
688 	afi_writel(pcie, fpci_bar, AFI_FPCI_BAR2);
689 
690 	/* Bar 3: non prefetchable memory BAR */
691 	fpci_bar = (((pcie->mem.start >> 12) & 0x0fffffff) << 4) | 0x1;
692 	size = resource_size(&pcie->mem);
693 	axi_address = pcie->mem.start;
694 	afi_writel(pcie, axi_address, AFI_AXI_BAR3_START);
695 	afi_writel(pcie, size >> 12, AFI_AXI_BAR3_SZ);
696 	afi_writel(pcie, fpci_bar, AFI_FPCI_BAR3);
697 
698 	/* NULL out the remaining BARs as they are not used */
699 	afi_writel(pcie, 0, AFI_AXI_BAR4_START);
700 	afi_writel(pcie, 0, AFI_AXI_BAR4_SZ);
701 	afi_writel(pcie, 0, AFI_FPCI_BAR4);
702 
703 	afi_writel(pcie, 0, AFI_AXI_BAR5_START);
704 	afi_writel(pcie, 0, AFI_AXI_BAR5_SZ);
705 	afi_writel(pcie, 0, AFI_FPCI_BAR5);
706 
707 	/* map all upstream transactions as uncached */
708 	afi_writel(pcie, 0, AFI_CACHE_BAR0_ST);
709 	afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ);
710 	afi_writel(pcie, 0, AFI_CACHE_BAR1_ST);
711 	afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ);
712 
713 	/* MSI translations are setup only when needed */
714 	afi_writel(pcie, 0, AFI_MSI_FPCI_BAR_ST);
715 	afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
716 	afi_writel(pcie, 0, AFI_MSI_AXI_BAR_ST);
717 	afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
718 }
719 
720 static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout)
721 {
722 	const struct tegra_pcie_soc *soc = pcie->soc;
723 	u32 value;
724 
725 	timeout = jiffies + msecs_to_jiffies(timeout);
726 
727 	while (time_before(jiffies, timeout)) {
728 		value = pads_readl(pcie, soc->pads_pll_ctl);
729 		if (value & PADS_PLL_CTL_LOCKDET)
730 			return 0;
731 	}
732 
733 	return -ETIMEDOUT;
734 }
735 
736 static int tegra_pcie_phy_enable(struct tegra_pcie *pcie)
737 {
738 	struct device *dev = pcie->dev;
739 	const struct tegra_pcie_soc *soc = pcie->soc;
740 	u32 value;
741 	int err;
742 
743 	/* initialize internal PHY, enable up to 16 PCIE lanes */
744 	pads_writel(pcie, 0x0, PADS_CTL_SEL);
745 
746 	/* override IDDQ to 1 on all 4 lanes */
747 	value = pads_readl(pcie, PADS_CTL);
748 	value |= PADS_CTL_IDDQ_1L;
749 	pads_writel(pcie, value, PADS_CTL);
750 
751 	/*
752 	 * Set up PHY PLL inputs select PLLE output as refclock,
753 	 * set TX ref sel to div10 (not div5).
754 	 */
755 	value = pads_readl(pcie, soc->pads_pll_ctl);
756 	value &= ~(PADS_PLL_CTL_REFCLK_MASK | PADS_PLL_CTL_TXCLKREF_MASK);
757 	value |= PADS_PLL_CTL_REFCLK_INTERNAL_CML | soc->tx_ref_sel;
758 	pads_writel(pcie, value, soc->pads_pll_ctl);
759 
760 	/* reset PLL */
761 	value = pads_readl(pcie, soc->pads_pll_ctl);
762 	value &= ~PADS_PLL_CTL_RST_B4SM;
763 	pads_writel(pcie, value, soc->pads_pll_ctl);
764 
765 	usleep_range(20, 100);
766 
767 	/* take PLL out of reset  */
768 	value = pads_readl(pcie, soc->pads_pll_ctl);
769 	value |= PADS_PLL_CTL_RST_B4SM;
770 	pads_writel(pcie, value, soc->pads_pll_ctl);
771 
772 	/* wait for the PLL to lock */
773 	err = tegra_pcie_pll_wait(pcie, 500);
774 	if (err < 0) {
775 		dev_err(dev, "PLL failed to lock: %d\n", err);
776 		return err;
777 	}
778 
779 	/* turn off IDDQ override */
780 	value = pads_readl(pcie, PADS_CTL);
781 	value &= ~PADS_CTL_IDDQ_1L;
782 	pads_writel(pcie, value, PADS_CTL);
783 
784 	/* enable TX/RX data */
785 	value = pads_readl(pcie, PADS_CTL);
786 	value |= PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L;
787 	pads_writel(pcie, value, PADS_CTL);
788 
789 	return 0;
790 }
791 
792 static int tegra_pcie_phy_disable(struct tegra_pcie *pcie)
793 {
794 	const struct tegra_pcie_soc *soc = pcie->soc;
795 	u32 value;
796 
797 	/* disable TX/RX data */
798 	value = pads_readl(pcie, PADS_CTL);
799 	value &= ~(PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L);
800 	pads_writel(pcie, value, PADS_CTL);
801 
802 	/* override IDDQ */
803 	value = pads_readl(pcie, PADS_CTL);
804 	value |= PADS_CTL_IDDQ_1L;
805 	pads_writel(pcie, value, PADS_CTL);
806 
807 	/* reset PLL */
808 	value = pads_readl(pcie, soc->pads_pll_ctl);
809 	value &= ~PADS_PLL_CTL_RST_B4SM;
810 	pads_writel(pcie, value, soc->pads_pll_ctl);
811 
812 	usleep_range(20, 100);
813 
814 	return 0;
815 }
816 
817 static int tegra_pcie_port_phy_power_on(struct tegra_pcie_port *port)
818 {
819 	struct device *dev = port->pcie->dev;
820 	unsigned int i;
821 	int err;
822 
823 	for (i = 0; i < port->lanes; i++) {
824 		err = phy_power_on(port->phys[i]);
825 		if (err < 0) {
826 			dev_err(dev, "failed to power on PHY#%u: %d\n", i, err);
827 			return err;
828 		}
829 	}
830 
831 	return 0;
832 }
833 
834 static int tegra_pcie_port_phy_power_off(struct tegra_pcie_port *port)
835 {
836 	struct device *dev = port->pcie->dev;
837 	unsigned int i;
838 	int err;
839 
840 	for (i = 0; i < port->lanes; i++) {
841 		err = phy_power_off(port->phys[i]);
842 		if (err < 0) {
843 			dev_err(dev, "failed to power off PHY#%u: %d\n", i,
844 				err);
845 			return err;
846 		}
847 	}
848 
849 	return 0;
850 }
851 
852 static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie)
853 {
854 	struct device *dev = pcie->dev;
855 	const struct tegra_pcie_soc *soc = pcie->soc;
856 	struct tegra_pcie_port *port;
857 	int err;
858 
859 	if (pcie->legacy_phy) {
860 		if (pcie->phy)
861 			err = phy_power_on(pcie->phy);
862 		else
863 			err = tegra_pcie_phy_enable(pcie);
864 
865 		if (err < 0)
866 			dev_err(dev, "failed to power on PHY: %d\n", err);
867 
868 		return err;
869 	}
870 
871 	list_for_each_entry(port, &pcie->ports, list) {
872 		err = tegra_pcie_port_phy_power_on(port);
873 		if (err < 0) {
874 			dev_err(dev,
875 				"failed to power on PCIe port %u PHY: %d\n",
876 				port->index, err);
877 			return err;
878 		}
879 	}
880 
881 	/* Configure the reference clock driver */
882 	pads_writel(pcie, soc->pads_refclk_cfg0, PADS_REFCLK_CFG0);
883 
884 	if (soc->num_ports > 2)
885 		pads_writel(pcie, soc->pads_refclk_cfg1, PADS_REFCLK_CFG1);
886 
887 	return 0;
888 }
889 
890 static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie)
891 {
892 	struct device *dev = pcie->dev;
893 	struct tegra_pcie_port *port;
894 	int err;
895 
896 	if (pcie->legacy_phy) {
897 		if (pcie->phy)
898 			err = phy_power_off(pcie->phy);
899 		else
900 			err = tegra_pcie_phy_disable(pcie);
901 
902 		if (err < 0)
903 			dev_err(dev, "failed to power off PHY: %d\n", err);
904 
905 		return err;
906 	}
907 
908 	list_for_each_entry(port, &pcie->ports, list) {
909 		err = tegra_pcie_port_phy_power_off(port);
910 		if (err < 0) {
911 			dev_err(dev,
912 				"failed to power off PCIe port %u PHY: %d\n",
913 				port->index, err);
914 			return err;
915 		}
916 	}
917 
918 	return 0;
919 }
920 
921 static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
922 {
923 	struct device *dev = pcie->dev;
924 	const struct tegra_pcie_soc *soc = pcie->soc;
925 	struct tegra_pcie_port *port;
926 	unsigned long value;
927 	int err;
928 
929 	/* enable PLL power down */
930 	if (pcie->phy) {
931 		value = afi_readl(pcie, AFI_PLLE_CONTROL);
932 		value &= ~AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL;
933 		value |= AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN;
934 		afi_writel(pcie, value, AFI_PLLE_CONTROL);
935 	}
936 
937 	/* power down PCIe slot clock bias pad */
938 	if (soc->has_pex_bias_ctrl)
939 		afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0);
940 
941 	/* configure mode and disable all ports */
942 	value = afi_readl(pcie, AFI_PCIE_CONFIG);
943 	value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK;
944 	value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config;
945 
946 	list_for_each_entry(port, &pcie->ports, list)
947 		value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
948 
949 	afi_writel(pcie, value, AFI_PCIE_CONFIG);
950 
951 	if (soc->has_gen2) {
952 		value = afi_readl(pcie, AFI_FUSE);
953 		value &= ~AFI_FUSE_PCIE_T0_GEN2_DIS;
954 		afi_writel(pcie, value, AFI_FUSE);
955 	} else {
956 		value = afi_readl(pcie, AFI_FUSE);
957 		value |= AFI_FUSE_PCIE_T0_GEN2_DIS;
958 		afi_writel(pcie, value, AFI_FUSE);
959 	}
960 
961 	if (soc->program_uphy) {
962 		err = tegra_pcie_phy_power_on(pcie);
963 		if (err < 0) {
964 			dev_err(dev, "failed to power on PHY(s): %d\n", err);
965 			return err;
966 		}
967 	}
968 
969 	/* take the PCIe interface module out of reset */
970 	reset_control_deassert(pcie->pcie_xrst);
971 
972 	/* finally enable PCIe */
973 	value = afi_readl(pcie, AFI_CONFIGURATION);
974 	value |= AFI_CONFIGURATION_EN_FPCI;
975 	afi_writel(pcie, value, AFI_CONFIGURATION);
976 
977 	value = AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR |
978 		AFI_INTR_EN_TGT_SLVERR | AFI_INTR_EN_TGT_DECERR |
979 		AFI_INTR_EN_TGT_WRERR | AFI_INTR_EN_DFPCI_DECERR;
980 
981 	if (soc->has_intr_prsnt_sense)
982 		value |= AFI_INTR_EN_PRSNT_SENSE;
983 
984 	afi_writel(pcie, value, AFI_AFI_INTR_ENABLE);
985 	afi_writel(pcie, 0xffffffff, AFI_SM_INTR_ENABLE);
986 
987 	/* don't enable MSI for now, only when needed */
988 	afi_writel(pcie, AFI_INTR_MASK_INT_MASK, AFI_INTR_MASK);
989 
990 	/* disable all exceptions */
991 	afi_writel(pcie, 0, AFI_FPCI_ERROR_MASKS);
992 
993 	return 0;
994 }
995 
996 static void tegra_pcie_disable_controller(struct tegra_pcie *pcie)
997 {
998 	int err;
999 
1000 	reset_control_assert(pcie->pcie_xrst);
1001 
1002 	if (pcie->soc->program_uphy) {
1003 		err = tegra_pcie_phy_power_off(pcie);
1004 		if (err < 0)
1005 			dev_err(pcie->dev, "failed to power off PHY(s): %d\n",
1006 				err);
1007 	}
1008 }
1009 
1010 static void tegra_pcie_power_off(struct tegra_pcie *pcie)
1011 {
1012 	struct device *dev = pcie->dev;
1013 	const struct tegra_pcie_soc *soc = pcie->soc;
1014 	int err;
1015 
1016 	reset_control_assert(pcie->afi_rst);
1017 	reset_control_assert(pcie->pex_rst);
1018 
1019 	clk_disable_unprepare(pcie->pll_e);
1020 	if (soc->has_cml_clk)
1021 		clk_disable_unprepare(pcie->cml_clk);
1022 	clk_disable_unprepare(pcie->afi_clk);
1023 	clk_disable_unprepare(pcie->pex_clk);
1024 
1025 	if (!dev->pm_domain)
1026 		tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1027 
1028 	err = regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
1029 	if (err < 0)
1030 		dev_warn(dev, "failed to disable regulators: %d\n", err);
1031 }
1032 
1033 static int tegra_pcie_power_on(struct tegra_pcie *pcie)
1034 {
1035 	struct device *dev = pcie->dev;
1036 	const struct tegra_pcie_soc *soc = pcie->soc;
1037 	int err;
1038 
1039 	reset_control_assert(pcie->pcie_xrst);
1040 	reset_control_assert(pcie->afi_rst);
1041 	reset_control_assert(pcie->pex_rst);
1042 
1043 	if (!dev->pm_domain)
1044 		tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1045 
1046 	/* enable regulators */
1047 	err = regulator_bulk_enable(pcie->num_supplies, pcie->supplies);
1048 	if (err < 0)
1049 		dev_err(dev, "failed to enable regulators: %d\n", err);
1050 
1051 	if (dev->pm_domain) {
1052 		err = clk_prepare_enable(pcie->pex_clk);
1053 		if (err) {
1054 			dev_err(dev, "failed to enable PEX clock: %d\n", err);
1055 			return err;
1056 		}
1057 		reset_control_deassert(pcie->pex_rst);
1058 	} else {
1059 		err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE,
1060 							pcie->pex_clk,
1061 							pcie->pex_rst);
1062 		if (err) {
1063 			dev_err(dev, "powerup sequence failed: %d\n", err);
1064 			return err;
1065 		}
1066 	}
1067 
1068 	reset_control_deassert(pcie->afi_rst);
1069 
1070 	err = clk_prepare_enable(pcie->afi_clk);
1071 	if (err < 0) {
1072 		dev_err(dev, "failed to enable AFI clock: %d\n", err);
1073 		return err;
1074 	}
1075 
1076 	if (soc->has_cml_clk) {
1077 		err = clk_prepare_enable(pcie->cml_clk);
1078 		if (err < 0) {
1079 			dev_err(dev, "failed to enable CML clock: %d\n", err);
1080 			return err;
1081 		}
1082 	}
1083 
1084 	err = clk_prepare_enable(pcie->pll_e);
1085 	if (err < 0) {
1086 		dev_err(dev, "failed to enable PLLE clock: %d\n", err);
1087 		return err;
1088 	}
1089 
1090 	return 0;
1091 }
1092 
1093 static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
1094 {
1095 	struct device *dev = pcie->dev;
1096 	const struct tegra_pcie_soc *soc = pcie->soc;
1097 
1098 	pcie->pex_clk = devm_clk_get(dev, "pex");
1099 	if (IS_ERR(pcie->pex_clk))
1100 		return PTR_ERR(pcie->pex_clk);
1101 
1102 	pcie->afi_clk = devm_clk_get(dev, "afi");
1103 	if (IS_ERR(pcie->afi_clk))
1104 		return PTR_ERR(pcie->afi_clk);
1105 
1106 	pcie->pll_e = devm_clk_get(dev, "pll_e");
1107 	if (IS_ERR(pcie->pll_e))
1108 		return PTR_ERR(pcie->pll_e);
1109 
1110 	if (soc->has_cml_clk) {
1111 		pcie->cml_clk = devm_clk_get(dev, "cml");
1112 		if (IS_ERR(pcie->cml_clk))
1113 			return PTR_ERR(pcie->cml_clk);
1114 	}
1115 
1116 	return 0;
1117 }
1118 
1119 static int tegra_pcie_resets_get(struct tegra_pcie *pcie)
1120 {
1121 	struct device *dev = pcie->dev;
1122 
1123 	pcie->pex_rst = devm_reset_control_get_exclusive(dev, "pex");
1124 	if (IS_ERR(pcie->pex_rst))
1125 		return PTR_ERR(pcie->pex_rst);
1126 
1127 	pcie->afi_rst = devm_reset_control_get_exclusive(dev, "afi");
1128 	if (IS_ERR(pcie->afi_rst))
1129 		return PTR_ERR(pcie->afi_rst);
1130 
1131 	pcie->pcie_xrst = devm_reset_control_get_exclusive(dev, "pcie_x");
1132 	if (IS_ERR(pcie->pcie_xrst))
1133 		return PTR_ERR(pcie->pcie_xrst);
1134 
1135 	return 0;
1136 }
1137 
1138 static int tegra_pcie_phys_get_legacy(struct tegra_pcie *pcie)
1139 {
1140 	struct device *dev = pcie->dev;
1141 	int err;
1142 
1143 	pcie->phy = devm_phy_optional_get(dev, "pcie");
1144 	if (IS_ERR(pcie->phy)) {
1145 		err = PTR_ERR(pcie->phy);
1146 		dev_err(dev, "failed to get PHY: %d\n", err);
1147 		return err;
1148 	}
1149 
1150 	err = phy_init(pcie->phy);
1151 	if (err < 0) {
1152 		dev_err(dev, "failed to initialize PHY: %d\n", err);
1153 		return err;
1154 	}
1155 
1156 	pcie->legacy_phy = true;
1157 
1158 	return 0;
1159 }
1160 
1161 static struct phy *devm_of_phy_optional_get_index(struct device *dev,
1162 						  struct device_node *np,
1163 						  const char *consumer,
1164 						  unsigned int index)
1165 {
1166 	struct phy *phy;
1167 	char *name;
1168 
1169 	name = kasprintf(GFP_KERNEL, "%s-%u", consumer, index);
1170 	if (!name)
1171 		return ERR_PTR(-ENOMEM);
1172 
1173 	phy = devm_of_phy_get(dev, np, name);
1174 	kfree(name);
1175 
1176 	if (IS_ERR(phy) && PTR_ERR(phy) == -ENODEV)
1177 		phy = NULL;
1178 
1179 	return phy;
1180 }
1181 
1182 static int tegra_pcie_port_get_phys(struct tegra_pcie_port *port)
1183 {
1184 	struct device *dev = port->pcie->dev;
1185 	struct phy *phy;
1186 	unsigned int i;
1187 	int err;
1188 
1189 	port->phys = devm_kcalloc(dev, sizeof(phy), port->lanes, GFP_KERNEL);
1190 	if (!port->phys)
1191 		return -ENOMEM;
1192 
1193 	for (i = 0; i < port->lanes; i++) {
1194 		phy = devm_of_phy_optional_get_index(dev, port->np, "pcie", i);
1195 		if (IS_ERR(phy)) {
1196 			dev_err(dev, "failed to get PHY#%u: %ld\n", i,
1197 				PTR_ERR(phy));
1198 			return PTR_ERR(phy);
1199 		}
1200 
1201 		err = phy_init(phy);
1202 		if (err < 0) {
1203 			dev_err(dev, "failed to initialize PHY#%u: %d\n", i,
1204 				err);
1205 			return err;
1206 		}
1207 
1208 		port->phys[i] = phy;
1209 	}
1210 
1211 	return 0;
1212 }
1213 
1214 static int tegra_pcie_phys_get(struct tegra_pcie *pcie)
1215 {
1216 	const struct tegra_pcie_soc *soc = pcie->soc;
1217 	struct device_node *np = pcie->dev->of_node;
1218 	struct tegra_pcie_port *port;
1219 	int err;
1220 
1221 	if (!soc->has_gen2 || of_find_property(np, "phys", NULL) != NULL)
1222 		return tegra_pcie_phys_get_legacy(pcie);
1223 
1224 	list_for_each_entry(port, &pcie->ports, list) {
1225 		err = tegra_pcie_port_get_phys(port);
1226 		if (err < 0)
1227 			return err;
1228 	}
1229 
1230 	return 0;
1231 }
1232 
1233 static void tegra_pcie_phys_put(struct tegra_pcie *pcie)
1234 {
1235 	struct tegra_pcie_port *port;
1236 	struct device *dev = pcie->dev;
1237 	int err, i;
1238 
1239 	if (pcie->legacy_phy) {
1240 		err = phy_exit(pcie->phy);
1241 		if (err < 0)
1242 			dev_err(dev, "failed to teardown PHY: %d\n", err);
1243 		return;
1244 	}
1245 
1246 	list_for_each_entry(port, &pcie->ports, list) {
1247 		for (i = 0; i < port->lanes; i++) {
1248 			err = phy_exit(port->phys[i]);
1249 			if (err < 0)
1250 				dev_err(dev, "failed to teardown PHY#%u: %d\n",
1251 					i, err);
1252 		}
1253 	}
1254 }
1255 
1256 
1257 static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
1258 {
1259 	struct device *dev = pcie->dev;
1260 	struct platform_device *pdev = to_platform_device(dev);
1261 	struct resource *pads, *afi, *res;
1262 	const struct tegra_pcie_soc *soc = pcie->soc;
1263 	int err;
1264 
1265 	err = tegra_pcie_clocks_get(pcie);
1266 	if (err) {
1267 		dev_err(dev, "failed to get clocks: %d\n", err);
1268 		return err;
1269 	}
1270 
1271 	err = tegra_pcie_resets_get(pcie);
1272 	if (err) {
1273 		dev_err(dev, "failed to get resets: %d\n", err);
1274 		return err;
1275 	}
1276 
1277 	if (soc->program_uphy) {
1278 		err = tegra_pcie_phys_get(pcie);
1279 		if (err < 0) {
1280 			dev_err(dev, "failed to get PHYs: %d\n", err);
1281 			return err;
1282 		}
1283 	}
1284 
1285 	pads = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pads");
1286 	pcie->pads = devm_ioremap_resource(dev, pads);
1287 	if (IS_ERR(pcie->pads)) {
1288 		err = PTR_ERR(pcie->pads);
1289 		goto phys_put;
1290 	}
1291 
1292 	afi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "afi");
1293 	pcie->afi = devm_ioremap_resource(dev, afi);
1294 	if (IS_ERR(pcie->afi)) {
1295 		err = PTR_ERR(pcie->afi);
1296 		goto phys_put;
1297 	}
1298 
1299 	/* request configuration space, but remap later, on demand */
1300 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs");
1301 	if (!res) {
1302 		err = -EADDRNOTAVAIL;
1303 		goto phys_put;
1304 	}
1305 
1306 	pcie->cs = *res;
1307 
1308 	/* constrain configuration space to 4 KiB */
1309 	pcie->cs.end = pcie->cs.start + SZ_4K - 1;
1310 
1311 	pcie->cfg = devm_ioremap_resource(dev, &pcie->cs);
1312 	if (IS_ERR(pcie->cfg)) {
1313 		err = PTR_ERR(pcie->cfg);
1314 		goto phys_put;
1315 	}
1316 
1317 	/* request interrupt */
1318 	err = platform_get_irq_byname(pdev, "intr");
1319 	if (err < 0) {
1320 		dev_err(dev, "failed to get IRQ: %d\n", err);
1321 		goto phys_put;
1322 	}
1323 
1324 	pcie->irq = err;
1325 
1326 	err = request_irq(pcie->irq, tegra_pcie_isr, IRQF_SHARED, "PCIE", pcie);
1327 	if (err) {
1328 		dev_err(dev, "failed to register IRQ: %d\n", err);
1329 		goto phys_put;
1330 	}
1331 
1332 	return 0;
1333 
1334 phys_put:
1335 	if (soc->program_uphy)
1336 		tegra_pcie_phys_put(pcie);
1337 	return err;
1338 }
1339 
1340 static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
1341 {
1342 	const struct tegra_pcie_soc *soc = pcie->soc;
1343 
1344 	if (pcie->irq > 0)
1345 		free_irq(pcie->irq, pcie);
1346 
1347 	if (soc->program_uphy)
1348 		tegra_pcie_phys_put(pcie);
1349 
1350 	return 0;
1351 }
1352 
1353 static void tegra_pcie_pme_turnoff(struct tegra_pcie_port *port)
1354 {
1355 	struct tegra_pcie *pcie = port->pcie;
1356 	const struct tegra_pcie_soc *soc = pcie->soc;
1357 	int err;
1358 	u32 val;
1359 	u8 ack_bit;
1360 
1361 	val = afi_readl(pcie, AFI_PCIE_PME);
1362 	val |= (0x1 << soc->ports[port->index].pme.turnoff_bit);
1363 	afi_writel(pcie, val, AFI_PCIE_PME);
1364 
1365 	ack_bit = soc->ports[port->index].pme.ack_bit;
1366 	err = readl_poll_timeout(pcie->afi + AFI_PCIE_PME, val,
1367 				 val & (0x1 << ack_bit), 1, PME_ACK_TIMEOUT);
1368 	if (err)
1369 		dev_err(pcie->dev, "PME Ack is not received on port: %d\n",
1370 			port->index);
1371 
1372 	usleep_range(10000, 11000);
1373 
1374 	val = afi_readl(pcie, AFI_PCIE_PME);
1375 	val &= ~(0x1 << soc->ports[port->index].pme.turnoff_bit);
1376 	afi_writel(pcie, val, AFI_PCIE_PME);
1377 }
1378 
1379 static int tegra_msi_alloc(struct tegra_msi *chip)
1380 {
1381 	int msi;
1382 
1383 	mutex_lock(&chip->lock);
1384 
1385 	msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR);
1386 	if (msi < INT_PCI_MSI_NR)
1387 		set_bit(msi, chip->used);
1388 	else
1389 		msi = -ENOSPC;
1390 
1391 	mutex_unlock(&chip->lock);
1392 
1393 	return msi;
1394 }
1395 
1396 static void tegra_msi_free(struct tegra_msi *chip, unsigned long irq)
1397 {
1398 	struct device *dev = chip->chip.dev;
1399 
1400 	mutex_lock(&chip->lock);
1401 
1402 	if (!test_bit(irq, chip->used))
1403 		dev_err(dev, "trying to free unused MSI#%lu\n", irq);
1404 	else
1405 		clear_bit(irq, chip->used);
1406 
1407 	mutex_unlock(&chip->lock);
1408 }
1409 
1410 static irqreturn_t tegra_pcie_msi_irq(int irq, void *data)
1411 {
1412 	struct tegra_pcie *pcie = data;
1413 	struct device *dev = pcie->dev;
1414 	struct tegra_msi *msi = &pcie->msi;
1415 	unsigned int i, processed = 0;
1416 
1417 	for (i = 0; i < 8; i++) {
1418 		unsigned long reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
1419 
1420 		while (reg) {
1421 			unsigned int offset = find_first_bit(&reg, 32);
1422 			unsigned int index = i * 32 + offset;
1423 			unsigned int irq;
1424 
1425 			/* clear the interrupt */
1426 			afi_writel(pcie, 1 << offset, AFI_MSI_VEC0 + i * 4);
1427 
1428 			irq = irq_find_mapping(msi->domain, index);
1429 			if (irq) {
1430 				if (test_bit(index, msi->used))
1431 					generic_handle_irq(irq);
1432 				else
1433 					dev_info(dev, "unhandled MSI\n");
1434 			} else {
1435 				/*
1436 				 * that's weird who triggered this?
1437 				 * just clear it
1438 				 */
1439 				dev_info(dev, "unexpected MSI\n");
1440 			}
1441 
1442 			/* see if there's any more pending in this vector */
1443 			reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
1444 
1445 			processed++;
1446 		}
1447 	}
1448 
1449 	return processed > 0 ? IRQ_HANDLED : IRQ_NONE;
1450 }
1451 
1452 static int tegra_msi_setup_irq(struct msi_controller *chip,
1453 			       struct pci_dev *pdev, struct msi_desc *desc)
1454 {
1455 	struct tegra_msi *msi = to_tegra_msi(chip);
1456 	struct msi_msg msg;
1457 	unsigned int irq;
1458 	int hwirq;
1459 
1460 	hwirq = tegra_msi_alloc(msi);
1461 	if (hwirq < 0)
1462 		return hwirq;
1463 
1464 	irq = irq_create_mapping(msi->domain, hwirq);
1465 	if (!irq) {
1466 		tegra_msi_free(msi, hwirq);
1467 		return -EINVAL;
1468 	}
1469 
1470 	irq_set_msi_desc(irq, desc);
1471 
1472 	msg.address_lo = lower_32_bits(msi->phys);
1473 	msg.address_hi = upper_32_bits(msi->phys);
1474 	msg.data = hwirq;
1475 
1476 	pci_write_msi_msg(irq, &msg);
1477 
1478 	return 0;
1479 }
1480 
1481 static void tegra_msi_teardown_irq(struct msi_controller *chip,
1482 				   unsigned int irq)
1483 {
1484 	struct tegra_msi *msi = to_tegra_msi(chip);
1485 	struct irq_data *d = irq_get_irq_data(irq);
1486 	irq_hw_number_t hwirq = irqd_to_hwirq(d);
1487 
1488 	irq_dispose_mapping(irq);
1489 	tegra_msi_free(msi, hwirq);
1490 }
1491 
1492 static struct irq_chip tegra_msi_irq_chip = {
1493 	.name = "Tegra PCIe MSI",
1494 	.irq_enable = pci_msi_unmask_irq,
1495 	.irq_disable = pci_msi_mask_irq,
1496 	.irq_mask = pci_msi_mask_irq,
1497 	.irq_unmask = pci_msi_unmask_irq,
1498 };
1499 
1500 static int tegra_msi_map(struct irq_domain *domain, unsigned int irq,
1501 			 irq_hw_number_t hwirq)
1502 {
1503 	irq_set_chip_and_handler(irq, &tegra_msi_irq_chip, handle_simple_irq);
1504 	irq_set_chip_data(irq, domain->host_data);
1505 
1506 	tegra_cpuidle_pcie_irqs_in_use();
1507 
1508 	return 0;
1509 }
1510 
1511 static const struct irq_domain_ops msi_domain_ops = {
1512 	.map = tegra_msi_map,
1513 };
1514 
1515 static int tegra_pcie_msi_setup(struct tegra_pcie *pcie)
1516 {
1517 	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
1518 	struct platform_device *pdev = to_platform_device(pcie->dev);
1519 	struct tegra_msi *msi = &pcie->msi;
1520 	struct device *dev = pcie->dev;
1521 	int err;
1522 
1523 	mutex_init(&msi->lock);
1524 
1525 	msi->chip.dev = dev;
1526 	msi->chip.setup_irq = tegra_msi_setup_irq;
1527 	msi->chip.teardown_irq = tegra_msi_teardown_irq;
1528 
1529 	msi->domain = irq_domain_add_linear(dev->of_node, INT_PCI_MSI_NR,
1530 					    &msi_domain_ops, &msi->chip);
1531 	if (!msi->domain) {
1532 		dev_err(dev, "failed to create IRQ domain\n");
1533 		return -ENOMEM;
1534 	}
1535 
1536 	err = platform_get_irq_byname(pdev, "msi");
1537 	if (err < 0) {
1538 		dev_err(dev, "failed to get IRQ: %d\n", err);
1539 		goto free_irq_domain;
1540 	}
1541 
1542 	msi->irq = err;
1543 
1544 	err = request_irq(msi->irq, tegra_pcie_msi_irq, IRQF_NO_THREAD,
1545 			  tegra_msi_irq_chip.name, pcie);
1546 	if (err < 0) {
1547 		dev_err(dev, "failed to request IRQ: %d\n", err);
1548 		goto free_irq_domain;
1549 	}
1550 
1551 	/* Though the PCIe controller can address >32-bit address space, to
1552 	 * facilitate endpoints that support only 32-bit MSI target address,
1553 	 * the mask is set to 32-bit to make sure that MSI target address is
1554 	 * always a 32-bit address
1555 	 */
1556 	err = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
1557 	if (err < 0) {
1558 		dev_err(dev, "failed to set DMA coherent mask: %d\n", err);
1559 		goto free_irq;
1560 	}
1561 
1562 	msi->virt = dma_alloc_attrs(dev, PAGE_SIZE, &msi->phys, GFP_KERNEL,
1563 				    DMA_ATTR_NO_KERNEL_MAPPING);
1564 	if (!msi->virt) {
1565 		dev_err(dev, "failed to allocate DMA memory for MSI\n");
1566 		err = -ENOMEM;
1567 		goto free_irq;
1568 	}
1569 
1570 	host->msi = &msi->chip;
1571 
1572 	return 0;
1573 
1574 free_irq:
1575 	free_irq(msi->irq, pcie);
1576 free_irq_domain:
1577 	irq_domain_remove(msi->domain);
1578 	return err;
1579 }
1580 
1581 static void tegra_pcie_enable_msi(struct tegra_pcie *pcie)
1582 {
1583 	const struct tegra_pcie_soc *soc = pcie->soc;
1584 	struct tegra_msi *msi = &pcie->msi;
1585 	u32 reg;
1586 
1587 	afi_writel(pcie, msi->phys >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST);
1588 	afi_writel(pcie, msi->phys, AFI_MSI_AXI_BAR_ST);
1589 	/* this register is in 4K increments */
1590 	afi_writel(pcie, 1, AFI_MSI_BAR_SZ);
1591 
1592 	/* enable all MSI vectors */
1593 	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC0);
1594 	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC1);
1595 	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC2);
1596 	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC3);
1597 	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC4);
1598 	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC5);
1599 	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC6);
1600 	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC7);
1601 
1602 	/* and unmask the MSI interrupt */
1603 	reg = afi_readl(pcie, AFI_INTR_MASK);
1604 	reg |= AFI_INTR_MASK_MSI_MASK;
1605 	afi_writel(pcie, reg, AFI_INTR_MASK);
1606 }
1607 
1608 static void tegra_pcie_msi_teardown(struct tegra_pcie *pcie)
1609 {
1610 	struct tegra_msi *msi = &pcie->msi;
1611 	unsigned int i, irq;
1612 
1613 	dma_free_attrs(pcie->dev, PAGE_SIZE, msi->virt, msi->phys,
1614 		       DMA_ATTR_NO_KERNEL_MAPPING);
1615 
1616 	if (msi->irq > 0)
1617 		free_irq(msi->irq, pcie);
1618 
1619 	for (i = 0; i < INT_PCI_MSI_NR; i++) {
1620 		irq = irq_find_mapping(msi->domain, i);
1621 		if (irq > 0)
1622 			irq_dispose_mapping(irq);
1623 	}
1624 
1625 	irq_domain_remove(msi->domain);
1626 }
1627 
1628 static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
1629 {
1630 	u32 value;
1631 
1632 	/* mask the MSI interrupt */
1633 	value = afi_readl(pcie, AFI_INTR_MASK);
1634 	value &= ~AFI_INTR_MASK_MSI_MASK;
1635 	afi_writel(pcie, value, AFI_INTR_MASK);
1636 
1637 	/* disable all MSI vectors */
1638 	afi_writel(pcie, 0, AFI_MSI_EN_VEC0);
1639 	afi_writel(pcie, 0, AFI_MSI_EN_VEC1);
1640 	afi_writel(pcie, 0, AFI_MSI_EN_VEC2);
1641 	afi_writel(pcie, 0, AFI_MSI_EN_VEC3);
1642 	afi_writel(pcie, 0, AFI_MSI_EN_VEC4);
1643 	afi_writel(pcie, 0, AFI_MSI_EN_VEC5);
1644 	afi_writel(pcie, 0, AFI_MSI_EN_VEC6);
1645 	afi_writel(pcie, 0, AFI_MSI_EN_VEC7);
1646 
1647 	return 0;
1648 }
1649 
1650 static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
1651 				      u32 *xbar)
1652 {
1653 	struct device *dev = pcie->dev;
1654 	struct device_node *np = dev->of_node;
1655 
1656 	if (of_device_is_compatible(np, "nvidia,tegra186-pcie")) {
1657 		switch (lanes) {
1658 		case 0x010004:
1659 			dev_info(dev, "4x1, 1x1 configuration\n");
1660 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_401;
1661 			return 0;
1662 
1663 		case 0x010102:
1664 			dev_info(dev, "2x1, 1X1, 1x1 configuration\n");
1665 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211;
1666 			return 0;
1667 
1668 		case 0x010101:
1669 			dev_info(dev, "1x1, 1x1, 1x1 configuration\n");
1670 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111;
1671 			return 0;
1672 
1673 		default:
1674 			dev_info(dev, "wrong configuration updated in DT, "
1675 				 "switching to default 2x1, 1x1, 1x1 "
1676 				 "configuration\n");
1677 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211;
1678 			return 0;
1679 		}
1680 	} else if (of_device_is_compatible(np, "nvidia,tegra124-pcie") ||
1681 		   of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
1682 		switch (lanes) {
1683 		case 0x0000104:
1684 			dev_info(dev, "4x1, 1x1 configuration\n");
1685 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1;
1686 			return 0;
1687 
1688 		case 0x0000102:
1689 			dev_info(dev, "2x1, 1x1 configuration\n");
1690 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1;
1691 			return 0;
1692 		}
1693 	} else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
1694 		switch (lanes) {
1695 		case 0x00000204:
1696 			dev_info(dev, "4x1, 2x1 configuration\n");
1697 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420;
1698 			return 0;
1699 
1700 		case 0x00020202:
1701 			dev_info(dev, "2x3 configuration\n");
1702 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222;
1703 			return 0;
1704 
1705 		case 0x00010104:
1706 			dev_info(dev, "4x1, 1x2 configuration\n");
1707 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411;
1708 			return 0;
1709 		}
1710 	} else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
1711 		switch (lanes) {
1712 		case 0x00000004:
1713 			dev_info(dev, "single-mode configuration\n");
1714 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE;
1715 			return 0;
1716 
1717 		case 0x00000202:
1718 			dev_info(dev, "dual-mode configuration\n");
1719 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL;
1720 			return 0;
1721 		}
1722 	}
1723 
1724 	return -EINVAL;
1725 }
1726 
1727 /*
1728  * Check whether a given set of supplies is available in a device tree node.
1729  * This is used to check whether the new or the legacy device tree bindings
1730  * should be used.
1731  */
1732 static bool of_regulator_bulk_available(struct device_node *np,
1733 					struct regulator_bulk_data *supplies,
1734 					unsigned int num_supplies)
1735 {
1736 	char property[32];
1737 	unsigned int i;
1738 
1739 	for (i = 0; i < num_supplies; i++) {
1740 		snprintf(property, 32, "%s-supply", supplies[i].supply);
1741 
1742 		if (of_find_property(np, property, NULL) == NULL)
1743 			return false;
1744 	}
1745 
1746 	return true;
1747 }
1748 
1749 /*
1750  * Old versions of the device tree binding for this device used a set of power
1751  * supplies that didn't match the hardware inputs. This happened to work for a
1752  * number of cases but is not future proof. However to preserve backwards-
1753  * compatibility with old device trees, this function will try to use the old
1754  * set of supplies.
1755  */
1756 static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie)
1757 {
1758 	struct device *dev = pcie->dev;
1759 	struct device_node *np = dev->of_node;
1760 
1761 	if (of_device_is_compatible(np, "nvidia,tegra30-pcie"))
1762 		pcie->num_supplies = 3;
1763 	else if (of_device_is_compatible(np, "nvidia,tegra20-pcie"))
1764 		pcie->num_supplies = 2;
1765 
1766 	if (pcie->num_supplies == 0) {
1767 		dev_err(dev, "device %pOF not supported in legacy mode\n", np);
1768 		return -ENODEV;
1769 	}
1770 
1771 	pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
1772 				      sizeof(*pcie->supplies),
1773 				      GFP_KERNEL);
1774 	if (!pcie->supplies)
1775 		return -ENOMEM;
1776 
1777 	pcie->supplies[0].supply = "pex-clk";
1778 	pcie->supplies[1].supply = "vdd";
1779 
1780 	if (pcie->num_supplies > 2)
1781 		pcie->supplies[2].supply = "avdd";
1782 
1783 	return devm_regulator_bulk_get(dev, pcie->num_supplies, pcie->supplies);
1784 }
1785 
1786 /*
1787  * Obtains the list of regulators required for a particular generation of the
1788  * IP block.
1789  *
1790  * This would've been nice to do simply by providing static tables for use
1791  * with the regulator_bulk_*() API, but unfortunately Tegra30 is a bit quirky
1792  * in that it has two pairs or AVDD_PEX and VDD_PEX supplies (PEXA and PEXB)
1793  * and either seems to be optional depending on which ports are being used.
1794  */
1795 static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
1796 {
1797 	struct device *dev = pcie->dev;
1798 	struct device_node *np = dev->of_node;
1799 	unsigned int i = 0;
1800 
1801 	if (of_device_is_compatible(np, "nvidia,tegra186-pcie")) {
1802 		pcie->num_supplies = 4;
1803 
1804 		pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
1805 					      sizeof(*pcie->supplies),
1806 					      GFP_KERNEL);
1807 		if (!pcie->supplies)
1808 			return -ENOMEM;
1809 
1810 		pcie->supplies[i++].supply = "dvdd-pex";
1811 		pcie->supplies[i++].supply = "hvdd-pex-pll";
1812 		pcie->supplies[i++].supply = "hvdd-pex";
1813 		pcie->supplies[i++].supply = "vddio-pexctl-aud";
1814 	} else if (of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
1815 		pcie->num_supplies = 6;
1816 
1817 		pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
1818 					      sizeof(*pcie->supplies),
1819 					      GFP_KERNEL);
1820 		if (!pcie->supplies)
1821 			return -ENOMEM;
1822 
1823 		pcie->supplies[i++].supply = "avdd-pll-uerefe";
1824 		pcie->supplies[i++].supply = "hvddio-pex";
1825 		pcie->supplies[i++].supply = "dvddio-pex";
1826 		pcie->supplies[i++].supply = "dvdd-pex-pll";
1827 		pcie->supplies[i++].supply = "hvdd-pex-pll-e";
1828 		pcie->supplies[i++].supply = "vddio-pex-ctl";
1829 	} else if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
1830 		pcie->num_supplies = 7;
1831 
1832 		pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
1833 					      sizeof(*pcie->supplies),
1834 					      GFP_KERNEL);
1835 		if (!pcie->supplies)
1836 			return -ENOMEM;
1837 
1838 		pcie->supplies[i++].supply = "avddio-pex";
1839 		pcie->supplies[i++].supply = "dvddio-pex";
1840 		pcie->supplies[i++].supply = "avdd-pex-pll";
1841 		pcie->supplies[i++].supply = "hvdd-pex";
1842 		pcie->supplies[i++].supply = "hvdd-pex-pll-e";
1843 		pcie->supplies[i++].supply = "vddio-pex-ctl";
1844 		pcie->supplies[i++].supply = "avdd-pll-erefe";
1845 	} else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
1846 		bool need_pexa = false, need_pexb = false;
1847 
1848 		/* VDD_PEXA and AVDD_PEXA supply lanes 0 to 3 */
1849 		if (lane_mask & 0x0f)
1850 			need_pexa = true;
1851 
1852 		/* VDD_PEXB and AVDD_PEXB supply lanes 4 to 5 */
1853 		if (lane_mask & 0x30)
1854 			need_pexb = true;
1855 
1856 		pcie->num_supplies = 4 + (need_pexa ? 2 : 0) +
1857 					 (need_pexb ? 2 : 0);
1858 
1859 		pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
1860 					      sizeof(*pcie->supplies),
1861 					      GFP_KERNEL);
1862 		if (!pcie->supplies)
1863 			return -ENOMEM;
1864 
1865 		pcie->supplies[i++].supply = "avdd-pex-pll";
1866 		pcie->supplies[i++].supply = "hvdd-pex";
1867 		pcie->supplies[i++].supply = "vddio-pex-ctl";
1868 		pcie->supplies[i++].supply = "avdd-plle";
1869 
1870 		if (need_pexa) {
1871 			pcie->supplies[i++].supply = "avdd-pexa";
1872 			pcie->supplies[i++].supply = "vdd-pexa";
1873 		}
1874 
1875 		if (need_pexb) {
1876 			pcie->supplies[i++].supply = "avdd-pexb";
1877 			pcie->supplies[i++].supply = "vdd-pexb";
1878 		}
1879 	} else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
1880 		pcie->num_supplies = 5;
1881 
1882 		pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
1883 					      sizeof(*pcie->supplies),
1884 					      GFP_KERNEL);
1885 		if (!pcie->supplies)
1886 			return -ENOMEM;
1887 
1888 		pcie->supplies[0].supply = "avdd-pex";
1889 		pcie->supplies[1].supply = "vdd-pex";
1890 		pcie->supplies[2].supply = "avdd-pex-pll";
1891 		pcie->supplies[3].supply = "avdd-plle";
1892 		pcie->supplies[4].supply = "vddio-pex-clk";
1893 	}
1894 
1895 	if (of_regulator_bulk_available(dev->of_node, pcie->supplies,
1896 					pcie->num_supplies))
1897 		return devm_regulator_bulk_get(dev, pcie->num_supplies,
1898 					       pcie->supplies);
1899 
1900 	/*
1901 	 * If not all regulators are available for this new scheme, assume
1902 	 * that the device tree complies with an older version of the device
1903 	 * tree binding.
1904 	 */
1905 	dev_info(dev, "using legacy DT binding for power supplies\n");
1906 
1907 	devm_kfree(dev, pcie->supplies);
1908 	pcie->num_supplies = 0;
1909 
1910 	return tegra_pcie_get_legacy_regulators(pcie);
1911 }
1912 
1913 static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
1914 {
1915 	struct device *dev = pcie->dev;
1916 	struct device_node *np = dev->of_node, *port;
1917 	const struct tegra_pcie_soc *soc = pcie->soc;
1918 	struct of_pci_range_parser parser;
1919 	struct of_pci_range range;
1920 	u32 lanes = 0, mask = 0;
1921 	unsigned int lane = 0;
1922 	struct resource res;
1923 	int err;
1924 
1925 	if (of_pci_range_parser_init(&parser, np)) {
1926 		dev_err(dev, "missing \"ranges\" property\n");
1927 		return -EINVAL;
1928 	}
1929 
1930 	for_each_of_pci_range(&parser, &range) {
1931 		err = of_pci_range_to_resource(&range, np, &res);
1932 		if (err < 0)
1933 			return err;
1934 
1935 		switch (res.flags & IORESOURCE_TYPE_BITS) {
1936 		case IORESOURCE_IO:
1937 			/* Track the bus -> CPU I/O mapping offset. */
1938 			pcie->offset.io = res.start - range.pci_addr;
1939 
1940 			memcpy(&pcie->pio, &res, sizeof(res));
1941 			pcie->pio.name = np->full_name;
1942 
1943 			/*
1944 			 * The Tegra PCIe host bridge uses this to program the
1945 			 * mapping of the I/O space to the physical address,
1946 			 * so we override the .start and .end fields here that
1947 			 * of_pci_range_to_resource() converted to I/O space.
1948 			 * We also set the IORESOURCE_MEM type to clarify that
1949 			 * the resource is in the physical memory space.
1950 			 */
1951 			pcie->io.start = range.cpu_addr;
1952 			pcie->io.end = range.cpu_addr + range.size - 1;
1953 			pcie->io.flags = IORESOURCE_MEM;
1954 			pcie->io.name = "I/O";
1955 
1956 			memcpy(&res, &pcie->io, sizeof(res));
1957 			break;
1958 
1959 		case IORESOURCE_MEM:
1960 			/*
1961 			 * Track the bus -> CPU memory mapping offset. This
1962 			 * assumes that the prefetchable and non-prefetchable
1963 			 * regions will be the last of type IORESOURCE_MEM in
1964 			 * the ranges property.
1965 			 * */
1966 			pcie->offset.mem = res.start - range.pci_addr;
1967 
1968 			if (res.flags & IORESOURCE_PREFETCH) {
1969 				memcpy(&pcie->prefetch, &res, sizeof(res));
1970 				pcie->prefetch.name = "prefetchable";
1971 			} else {
1972 				memcpy(&pcie->mem, &res, sizeof(res));
1973 				pcie->mem.name = "non-prefetchable";
1974 			}
1975 			break;
1976 		}
1977 	}
1978 
1979 	err = of_pci_parse_bus_range(np, &pcie->busn);
1980 	if (err < 0) {
1981 		dev_err(dev, "failed to parse ranges property: %d\n", err);
1982 		pcie->busn.name = np->name;
1983 		pcie->busn.start = 0;
1984 		pcie->busn.end = 0xff;
1985 		pcie->busn.flags = IORESOURCE_BUS;
1986 	}
1987 
1988 	/* parse root ports */
1989 	for_each_child_of_node(np, port) {
1990 		struct tegra_pcie_port *rp;
1991 		unsigned int index;
1992 		u32 value;
1993 
1994 		err = of_pci_get_devfn(port);
1995 		if (err < 0) {
1996 			dev_err(dev, "failed to parse address: %d\n", err);
1997 			return err;
1998 		}
1999 
2000 		index = PCI_SLOT(err);
2001 
2002 		if (index < 1 || index > soc->num_ports) {
2003 			dev_err(dev, "invalid port number: %d\n", index);
2004 			return -EINVAL;
2005 		}
2006 
2007 		index--;
2008 
2009 		err = of_property_read_u32(port, "nvidia,num-lanes", &value);
2010 		if (err < 0) {
2011 			dev_err(dev, "failed to parse # of lanes: %d\n",
2012 				err);
2013 			return err;
2014 		}
2015 
2016 		if (value > 16) {
2017 			dev_err(dev, "invalid # of lanes: %u\n", value);
2018 			return -EINVAL;
2019 		}
2020 
2021 		lanes |= value << (index << 3);
2022 
2023 		if (!of_device_is_available(port)) {
2024 			lane += value;
2025 			continue;
2026 		}
2027 
2028 		mask |= ((1 << value) - 1) << lane;
2029 		lane += value;
2030 
2031 		rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL);
2032 		if (!rp)
2033 			return -ENOMEM;
2034 
2035 		err = of_address_to_resource(port, 0, &rp->regs);
2036 		if (err < 0) {
2037 			dev_err(dev, "failed to parse address: %d\n", err);
2038 			return err;
2039 		}
2040 
2041 		INIT_LIST_HEAD(&rp->list);
2042 		rp->index = index;
2043 		rp->lanes = value;
2044 		rp->pcie = pcie;
2045 		rp->np = port;
2046 
2047 		rp->base = devm_pci_remap_cfg_resource(dev, &rp->regs);
2048 		if (IS_ERR(rp->base))
2049 			return PTR_ERR(rp->base);
2050 
2051 		list_add_tail(&rp->list, &pcie->ports);
2052 	}
2053 
2054 	err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config);
2055 	if (err < 0) {
2056 		dev_err(dev, "invalid lane configuration\n");
2057 		return err;
2058 	}
2059 
2060 	err = tegra_pcie_get_regulators(pcie, mask);
2061 	if (err < 0)
2062 		return err;
2063 
2064 	return 0;
2065 }
2066 
2067 /*
2068  * FIXME: If there are no PCIe cards attached, then calling this function
2069  * can result in the increase of the bootup time as there are big timeout
2070  * loops.
2071  */
2072 #define TEGRA_PCIE_LINKUP_TIMEOUT	200	/* up to 1.2 seconds */
2073 static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
2074 {
2075 	struct device *dev = port->pcie->dev;
2076 	unsigned int retries = 3;
2077 	unsigned long value;
2078 
2079 	/* override presence detection */
2080 	value = readl(port->base + RP_PRIV_MISC);
2081 	value &= ~RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT;
2082 	value |= RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT;
2083 	writel(value, port->base + RP_PRIV_MISC);
2084 
2085 	do {
2086 		unsigned int timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
2087 
2088 		do {
2089 			value = readl(port->base + RP_VEND_XP);
2090 
2091 			if (value & RP_VEND_XP_DL_UP)
2092 				break;
2093 
2094 			usleep_range(1000, 2000);
2095 		} while (--timeout);
2096 
2097 		if (!timeout) {
2098 			dev_err(dev, "link %u down, retrying\n", port->index);
2099 			goto retry;
2100 		}
2101 
2102 		timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
2103 
2104 		do {
2105 			value = readl(port->base + RP_LINK_CONTROL_STATUS);
2106 
2107 			if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
2108 				return true;
2109 
2110 			usleep_range(1000, 2000);
2111 		} while (--timeout);
2112 
2113 retry:
2114 		tegra_pcie_port_reset(port);
2115 	} while (--retries);
2116 
2117 	return false;
2118 }
2119 
2120 static void tegra_pcie_enable_ports(struct tegra_pcie *pcie)
2121 {
2122 	struct device *dev = pcie->dev;
2123 	struct tegra_pcie_port *port, *tmp;
2124 
2125 	list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
2126 		dev_info(dev, "probing port %u, using %u lanes\n",
2127 			 port->index, port->lanes);
2128 
2129 		tegra_pcie_port_enable(port);
2130 
2131 		if (tegra_pcie_port_check_link(port))
2132 			continue;
2133 
2134 		dev_info(dev, "link %u down, ignoring\n", port->index);
2135 
2136 		tegra_pcie_port_disable(port);
2137 		tegra_pcie_port_free(port);
2138 	}
2139 }
2140 
2141 static void tegra_pcie_disable_ports(struct tegra_pcie *pcie)
2142 {
2143 	struct tegra_pcie_port *port, *tmp;
2144 
2145 	list_for_each_entry_safe(port, tmp, &pcie->ports, list)
2146 		tegra_pcie_port_disable(port);
2147 }
2148 
2149 static const struct tegra_pcie_port_soc tegra20_pcie_ports[] = {
2150 	{ .pme.turnoff_bit = 0, .pme.ack_bit =  5 },
2151 	{ .pme.turnoff_bit = 8, .pme.ack_bit = 10 },
2152 };
2153 
2154 static const struct tegra_pcie_soc tegra20_pcie = {
2155 	.num_ports = 2,
2156 	.ports = tegra20_pcie_ports,
2157 	.msi_base_shift = 0,
2158 	.pads_pll_ctl = PADS_PLL_CTL_TEGRA20,
2159 	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10,
2160 	.pads_refclk_cfg0 = 0xfa5cfa5c,
2161 	.has_pex_clkreq_en = false,
2162 	.has_pex_bias_ctrl = false,
2163 	.has_intr_prsnt_sense = false,
2164 	.has_cml_clk = false,
2165 	.has_gen2 = false,
2166 	.force_pca_enable = false,
2167 	.program_uphy = true,
2168 };
2169 
2170 static const struct tegra_pcie_port_soc tegra30_pcie_ports[] = {
2171 	{ .pme.turnoff_bit =  0, .pme.ack_bit =  5 },
2172 	{ .pme.turnoff_bit =  8, .pme.ack_bit = 10 },
2173 	{ .pme.turnoff_bit = 16, .pme.ack_bit = 18 },
2174 };
2175 
2176 static const struct tegra_pcie_soc tegra30_pcie = {
2177 	.num_ports = 3,
2178 	.ports = tegra30_pcie_ports,
2179 	.msi_base_shift = 8,
2180 	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2181 	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2182 	.pads_refclk_cfg0 = 0xfa5cfa5c,
2183 	.pads_refclk_cfg1 = 0xfa5cfa5c,
2184 	.has_pex_clkreq_en = true,
2185 	.has_pex_bias_ctrl = true,
2186 	.has_intr_prsnt_sense = true,
2187 	.has_cml_clk = true,
2188 	.has_gen2 = false,
2189 	.force_pca_enable = false,
2190 	.program_uphy = true,
2191 };
2192 
2193 static const struct tegra_pcie_soc tegra124_pcie = {
2194 	.num_ports = 2,
2195 	.ports = tegra20_pcie_ports,
2196 	.msi_base_shift = 8,
2197 	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2198 	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2199 	.pads_refclk_cfg0 = 0x44ac44ac,
2200 	.has_pex_clkreq_en = true,
2201 	.has_pex_bias_ctrl = true,
2202 	.has_intr_prsnt_sense = true,
2203 	.has_cml_clk = true,
2204 	.has_gen2 = true,
2205 	.force_pca_enable = false,
2206 	.program_uphy = true,
2207 };
2208 
2209 static const struct tegra_pcie_soc tegra210_pcie = {
2210 	.num_ports = 2,
2211 	.ports = tegra20_pcie_ports,
2212 	.msi_base_shift = 8,
2213 	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2214 	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2215 	.pads_refclk_cfg0 = 0x90b890b8,
2216 	.has_pex_clkreq_en = true,
2217 	.has_pex_bias_ctrl = true,
2218 	.has_intr_prsnt_sense = true,
2219 	.has_cml_clk = true,
2220 	.has_gen2 = true,
2221 	.force_pca_enable = true,
2222 	.program_uphy = true,
2223 };
2224 
2225 static const struct tegra_pcie_port_soc tegra186_pcie_ports[] = {
2226 	{ .pme.turnoff_bit =  0, .pme.ack_bit =  5 },
2227 	{ .pme.turnoff_bit =  8, .pme.ack_bit = 10 },
2228 	{ .pme.turnoff_bit = 12, .pme.ack_bit = 14 },
2229 };
2230 
2231 static const struct tegra_pcie_soc tegra186_pcie = {
2232 	.num_ports = 3,
2233 	.ports = tegra186_pcie_ports,
2234 	.msi_base_shift = 8,
2235 	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2236 	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2237 	.pads_refclk_cfg0 = 0x80b880b8,
2238 	.pads_refclk_cfg1 = 0x000480b8,
2239 	.has_pex_clkreq_en = true,
2240 	.has_pex_bias_ctrl = true,
2241 	.has_intr_prsnt_sense = true,
2242 	.has_cml_clk = false,
2243 	.has_gen2 = true,
2244 	.force_pca_enable = false,
2245 	.program_uphy = false,
2246 };
2247 
2248 static const struct of_device_id tegra_pcie_of_match[] = {
2249 	{ .compatible = "nvidia,tegra186-pcie", .data = &tegra186_pcie },
2250 	{ .compatible = "nvidia,tegra210-pcie", .data = &tegra210_pcie },
2251 	{ .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie },
2252 	{ .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie },
2253 	{ .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie },
2254 	{ },
2255 };
2256 
2257 static void *tegra_pcie_ports_seq_start(struct seq_file *s, loff_t *pos)
2258 {
2259 	struct tegra_pcie *pcie = s->private;
2260 
2261 	if (list_empty(&pcie->ports))
2262 		return NULL;
2263 
2264 	seq_printf(s, "Index  Status\n");
2265 
2266 	return seq_list_start(&pcie->ports, *pos);
2267 }
2268 
2269 static void *tegra_pcie_ports_seq_next(struct seq_file *s, void *v, loff_t *pos)
2270 {
2271 	struct tegra_pcie *pcie = s->private;
2272 
2273 	return seq_list_next(v, &pcie->ports, pos);
2274 }
2275 
2276 static void tegra_pcie_ports_seq_stop(struct seq_file *s, void *v)
2277 {
2278 }
2279 
2280 static int tegra_pcie_ports_seq_show(struct seq_file *s, void *v)
2281 {
2282 	bool up = false, active = false;
2283 	struct tegra_pcie_port *port;
2284 	unsigned int value;
2285 
2286 	port = list_entry(v, struct tegra_pcie_port, list);
2287 
2288 	value = readl(port->base + RP_VEND_XP);
2289 
2290 	if (value & RP_VEND_XP_DL_UP)
2291 		up = true;
2292 
2293 	value = readl(port->base + RP_LINK_CONTROL_STATUS);
2294 
2295 	if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
2296 		active = true;
2297 
2298 	seq_printf(s, "%2u     ", port->index);
2299 
2300 	if (up)
2301 		seq_printf(s, "up");
2302 
2303 	if (active) {
2304 		if (up)
2305 			seq_printf(s, ", ");
2306 
2307 		seq_printf(s, "active");
2308 	}
2309 
2310 	seq_printf(s, "\n");
2311 	return 0;
2312 }
2313 
2314 static const struct seq_operations tegra_pcie_ports_seq_ops = {
2315 	.start = tegra_pcie_ports_seq_start,
2316 	.next = tegra_pcie_ports_seq_next,
2317 	.stop = tegra_pcie_ports_seq_stop,
2318 	.show = tegra_pcie_ports_seq_show,
2319 };
2320 
2321 static int tegra_pcie_ports_open(struct inode *inode, struct file *file)
2322 {
2323 	struct tegra_pcie *pcie = inode->i_private;
2324 	struct seq_file *s;
2325 	int err;
2326 
2327 	err = seq_open(file, &tegra_pcie_ports_seq_ops);
2328 	if (err)
2329 		return err;
2330 
2331 	s = file->private_data;
2332 	s->private = pcie;
2333 
2334 	return 0;
2335 }
2336 
2337 static const struct file_operations tegra_pcie_ports_ops = {
2338 	.owner = THIS_MODULE,
2339 	.open = tegra_pcie_ports_open,
2340 	.read = seq_read,
2341 	.llseek = seq_lseek,
2342 	.release = seq_release,
2343 };
2344 
2345 static void tegra_pcie_debugfs_exit(struct tegra_pcie *pcie)
2346 {
2347 	debugfs_remove_recursive(pcie->debugfs);
2348 	pcie->debugfs = NULL;
2349 }
2350 
2351 static int tegra_pcie_debugfs_init(struct tegra_pcie *pcie)
2352 {
2353 	struct dentry *file;
2354 
2355 	pcie->debugfs = debugfs_create_dir("pcie", NULL);
2356 	if (!pcie->debugfs)
2357 		return -ENOMEM;
2358 
2359 	file = debugfs_create_file("ports", S_IFREG | S_IRUGO, pcie->debugfs,
2360 				   pcie, &tegra_pcie_ports_ops);
2361 	if (!file)
2362 		goto remove;
2363 
2364 	return 0;
2365 
2366 remove:
2367 	tegra_pcie_debugfs_exit(pcie);
2368 	return -ENOMEM;
2369 }
2370 
2371 static int tegra_pcie_probe(struct platform_device *pdev)
2372 {
2373 	struct device *dev = &pdev->dev;
2374 	struct pci_host_bridge *host;
2375 	struct tegra_pcie *pcie;
2376 	struct pci_bus *child;
2377 	int err;
2378 
2379 	host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
2380 	if (!host)
2381 		return -ENOMEM;
2382 
2383 	pcie = pci_host_bridge_priv(host);
2384 	host->sysdata = pcie;
2385 	platform_set_drvdata(pdev, pcie);
2386 
2387 	pcie->soc = of_device_get_match_data(dev);
2388 	INIT_LIST_HEAD(&pcie->ports);
2389 	pcie->dev = dev;
2390 
2391 	err = tegra_pcie_parse_dt(pcie);
2392 	if (err < 0)
2393 		return err;
2394 
2395 	err = tegra_pcie_get_resources(pcie);
2396 	if (err < 0) {
2397 		dev_err(dev, "failed to request resources: %d\n", err);
2398 		return err;
2399 	}
2400 
2401 	err = tegra_pcie_msi_setup(pcie);
2402 	if (err < 0) {
2403 		dev_err(dev, "failed to enable MSI support: %d\n", err);
2404 		goto put_resources;
2405 	}
2406 
2407 	pm_runtime_enable(pcie->dev);
2408 	err = pm_runtime_get_sync(pcie->dev);
2409 	if (err) {
2410 		dev_err(dev, "fail to enable pcie controller: %d\n", err);
2411 		goto teardown_msi;
2412 	}
2413 
2414 	err = tegra_pcie_request_resources(pcie);
2415 	if (err)
2416 		goto pm_runtime_put;
2417 
2418 	host->busnr = pcie->busn.start;
2419 	host->dev.parent = &pdev->dev;
2420 	host->ops = &tegra_pcie_ops;
2421 	host->map_irq = tegra_pcie_map_irq;
2422 	host->swizzle_irq = pci_common_swizzle;
2423 
2424 	err = pci_scan_root_bus_bridge(host);
2425 	if (err < 0) {
2426 		dev_err(dev, "failed to register host: %d\n", err);
2427 		goto free_resources;
2428 	}
2429 
2430 	pci_bus_size_bridges(host->bus);
2431 	pci_bus_assign_resources(host->bus);
2432 
2433 	list_for_each_entry(child, &host->bus->children, node)
2434 		pcie_bus_configure_settings(child);
2435 
2436 	pci_bus_add_devices(host->bus);
2437 
2438 	if (IS_ENABLED(CONFIG_DEBUG_FS)) {
2439 		err = tegra_pcie_debugfs_init(pcie);
2440 		if (err < 0)
2441 			dev_err(dev, "failed to setup debugfs: %d\n", err);
2442 	}
2443 
2444 	return 0;
2445 
2446 free_resources:
2447 	tegra_pcie_free_resources(pcie);
2448 pm_runtime_put:
2449 	pm_runtime_put_sync(pcie->dev);
2450 	pm_runtime_disable(pcie->dev);
2451 teardown_msi:
2452 	tegra_pcie_msi_teardown(pcie);
2453 put_resources:
2454 	tegra_pcie_put_resources(pcie);
2455 	return err;
2456 }
2457 
2458 static int tegra_pcie_remove(struct platform_device *pdev)
2459 {
2460 	struct tegra_pcie *pcie = platform_get_drvdata(pdev);
2461 	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
2462 	struct tegra_pcie_port *port, *tmp;
2463 
2464 	if (IS_ENABLED(CONFIG_DEBUG_FS))
2465 		tegra_pcie_debugfs_exit(pcie);
2466 
2467 	pci_stop_root_bus(host->bus);
2468 	pci_remove_root_bus(host->bus);
2469 	tegra_pcie_free_resources(pcie);
2470 	pm_runtime_put_sync(pcie->dev);
2471 	pm_runtime_disable(pcie->dev);
2472 
2473 	if (IS_ENABLED(CONFIG_PCI_MSI))
2474 		tegra_pcie_msi_teardown(pcie);
2475 
2476 	tegra_pcie_put_resources(pcie);
2477 
2478 	list_for_each_entry_safe(port, tmp, &pcie->ports, list)
2479 		tegra_pcie_port_free(port);
2480 
2481 	return 0;
2482 }
2483 
2484 static int __maybe_unused tegra_pcie_pm_suspend(struct device *dev)
2485 {
2486 	struct tegra_pcie *pcie = dev_get_drvdata(dev);
2487 	struct tegra_pcie_port *port;
2488 
2489 	list_for_each_entry(port, &pcie->ports, list)
2490 		tegra_pcie_pme_turnoff(port);
2491 
2492 	tegra_pcie_disable_ports(pcie);
2493 
2494 	if (IS_ENABLED(CONFIG_PCI_MSI))
2495 		tegra_pcie_disable_msi(pcie);
2496 
2497 	tegra_pcie_disable_controller(pcie);
2498 	tegra_pcie_power_off(pcie);
2499 
2500 	return 0;
2501 }
2502 
2503 static int __maybe_unused tegra_pcie_pm_resume(struct device *dev)
2504 {
2505 	struct tegra_pcie *pcie = dev_get_drvdata(dev);
2506 	int err;
2507 
2508 	err = tegra_pcie_power_on(pcie);
2509 	if (err) {
2510 		dev_err(dev, "tegra pcie power on fail: %d\n", err);
2511 		return err;
2512 	}
2513 	err = tegra_pcie_enable_controller(pcie);
2514 	if (err) {
2515 		dev_err(dev, "tegra pcie controller enable fail: %d\n", err);
2516 		goto poweroff;
2517 	}
2518 	tegra_pcie_setup_translations(pcie);
2519 
2520 	if (IS_ENABLED(CONFIG_PCI_MSI))
2521 		tegra_pcie_enable_msi(pcie);
2522 
2523 	tegra_pcie_enable_ports(pcie);
2524 
2525 	return 0;
2526 
2527 poweroff:
2528 	tegra_pcie_power_off(pcie);
2529 
2530 	return err;
2531 }
2532 
2533 static const struct dev_pm_ops tegra_pcie_pm_ops = {
2534 	SET_RUNTIME_PM_OPS(tegra_pcie_pm_suspend, tegra_pcie_pm_resume, NULL)
2535 	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(tegra_pcie_pm_suspend,
2536 				      tegra_pcie_pm_resume)
2537 };
2538 
2539 static struct platform_driver tegra_pcie_driver = {
2540 	.driver = {
2541 		.name = "tegra-pcie",
2542 		.of_match_table = tegra_pcie_of_match,
2543 		.suppress_bind_attrs = true,
2544 		.pm = &tegra_pcie_pm_ops,
2545 	},
2546 	.probe = tegra_pcie_probe,
2547 	.remove = tegra_pcie_remove,
2548 };
2549 module_platform_driver(tegra_pcie_driver);
2550 MODULE_LICENSE("GPL");
2551