xref: /linux/drivers/pci/controller/pci-tegra.c (revision a48b0872e69428d3d02994dcfad3519f01def7fa)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * PCIe host controller driver for Tegra SoCs
4  *
5  * Copyright (c) 2010, CompuLab, Ltd.
6  * Author: Mike Rapoport <mike@compulab.co.il>
7  *
8  * Based on NVIDIA PCIe driver
9  * Copyright (c) 2008-2009, NVIDIA Corporation.
10  *
11  * Bits taken from arch/arm/mach-dove/pcie.c
12  *
13  * Author: Thierry Reding <treding@nvidia.com>
14  */
15 
16 #include <linux/clk.h>
17 #include <linux/debugfs.h>
18 #include <linux/delay.h>
19 #include <linux/export.h>
20 #include <linux/gpio/consumer.h>
21 #include <linux/interrupt.h>
22 #include <linux/iopoll.h>
23 #include <linux/irq.h>
24 #include <linux/irqchip/chained_irq.h>
25 #include <linux/irqdomain.h>
26 #include <linux/kernel.h>
27 #include <linux/init.h>
28 #include <linux/module.h>
29 #include <linux/msi.h>
30 #include <linux/of_address.h>
31 #include <linux/of_pci.h>
32 #include <linux/of_platform.h>
33 #include <linux/pci.h>
34 #include <linux/phy/phy.h>
35 #include <linux/pinctrl/consumer.h>
36 #include <linux/platform_device.h>
37 #include <linux/reset.h>
38 #include <linux/sizes.h>
39 #include <linux/slab.h>
40 #include <linux/vmalloc.h>
41 #include <linux/regulator/consumer.h>
42 
43 #include <soc/tegra/cpuidle.h>
44 #include <soc/tegra/pmc.h>
45 
46 #include "../pci.h"
47 
48 #define INT_PCI_MSI_NR (8 * 32)
49 
50 /* register definitions */
51 
52 #define AFI_AXI_BAR0_SZ	0x00
53 #define AFI_AXI_BAR1_SZ	0x04
54 #define AFI_AXI_BAR2_SZ	0x08
55 #define AFI_AXI_BAR3_SZ	0x0c
56 #define AFI_AXI_BAR4_SZ	0x10
57 #define AFI_AXI_BAR5_SZ	0x14
58 
59 #define AFI_AXI_BAR0_START	0x18
60 #define AFI_AXI_BAR1_START	0x1c
61 #define AFI_AXI_BAR2_START	0x20
62 #define AFI_AXI_BAR3_START	0x24
63 #define AFI_AXI_BAR4_START	0x28
64 #define AFI_AXI_BAR5_START	0x2c
65 
66 #define AFI_FPCI_BAR0	0x30
67 #define AFI_FPCI_BAR1	0x34
68 #define AFI_FPCI_BAR2	0x38
69 #define AFI_FPCI_BAR3	0x3c
70 #define AFI_FPCI_BAR4	0x40
71 #define AFI_FPCI_BAR5	0x44
72 
73 #define AFI_CACHE_BAR0_SZ	0x48
74 #define AFI_CACHE_BAR0_ST	0x4c
75 #define AFI_CACHE_BAR1_SZ	0x50
76 #define AFI_CACHE_BAR1_ST	0x54
77 
78 #define AFI_MSI_BAR_SZ		0x60
79 #define AFI_MSI_FPCI_BAR_ST	0x64
80 #define AFI_MSI_AXI_BAR_ST	0x68
81 
82 #define AFI_MSI_VEC(x)		(0x6c + ((x) * 4))
83 #define AFI_MSI_EN_VEC(x)	(0x8c + ((x) * 4))
84 
85 #define AFI_CONFIGURATION		0xac
86 #define  AFI_CONFIGURATION_EN_FPCI		(1 << 0)
87 #define  AFI_CONFIGURATION_CLKEN_OVERRIDE	(1 << 31)
88 
89 #define AFI_FPCI_ERROR_MASKS	0xb0
90 
91 #define AFI_INTR_MASK		0xb4
92 #define  AFI_INTR_MASK_INT_MASK	(1 << 0)
93 #define  AFI_INTR_MASK_MSI_MASK	(1 << 8)
94 
95 #define AFI_INTR_CODE			0xb8
96 #define  AFI_INTR_CODE_MASK		0xf
97 #define  AFI_INTR_INI_SLAVE_ERROR	1
98 #define  AFI_INTR_INI_DECODE_ERROR	2
99 #define  AFI_INTR_TARGET_ABORT		3
100 #define  AFI_INTR_MASTER_ABORT		4
101 #define  AFI_INTR_INVALID_WRITE		5
102 #define  AFI_INTR_LEGACY		6
103 #define  AFI_INTR_FPCI_DECODE_ERROR	7
104 #define  AFI_INTR_AXI_DECODE_ERROR	8
105 #define  AFI_INTR_FPCI_TIMEOUT		9
106 #define  AFI_INTR_PE_PRSNT_SENSE	10
107 #define  AFI_INTR_PE_CLKREQ_SENSE	11
108 #define  AFI_INTR_CLKCLAMP_SENSE	12
109 #define  AFI_INTR_RDY4PD_SENSE		13
110 #define  AFI_INTR_P2P_ERROR		14
111 
112 #define AFI_INTR_SIGNATURE	0xbc
113 #define AFI_UPPER_FPCI_ADDRESS	0xc0
114 #define AFI_SM_INTR_ENABLE	0xc4
115 #define  AFI_SM_INTR_INTA_ASSERT	(1 << 0)
116 #define  AFI_SM_INTR_INTB_ASSERT	(1 << 1)
117 #define  AFI_SM_INTR_INTC_ASSERT	(1 << 2)
118 #define  AFI_SM_INTR_INTD_ASSERT	(1 << 3)
119 #define  AFI_SM_INTR_INTA_DEASSERT	(1 << 4)
120 #define  AFI_SM_INTR_INTB_DEASSERT	(1 << 5)
121 #define  AFI_SM_INTR_INTC_DEASSERT	(1 << 6)
122 #define  AFI_SM_INTR_INTD_DEASSERT	(1 << 7)
123 
124 #define AFI_AFI_INTR_ENABLE		0xc8
125 #define  AFI_INTR_EN_INI_SLVERR		(1 << 0)
126 #define  AFI_INTR_EN_INI_DECERR		(1 << 1)
127 #define  AFI_INTR_EN_TGT_SLVERR		(1 << 2)
128 #define  AFI_INTR_EN_TGT_DECERR		(1 << 3)
129 #define  AFI_INTR_EN_TGT_WRERR		(1 << 4)
130 #define  AFI_INTR_EN_DFPCI_DECERR	(1 << 5)
131 #define  AFI_INTR_EN_AXI_DECERR		(1 << 6)
132 #define  AFI_INTR_EN_FPCI_TIMEOUT	(1 << 7)
133 #define  AFI_INTR_EN_PRSNT_SENSE	(1 << 8)
134 
135 #define AFI_PCIE_PME		0xf0
136 
137 #define AFI_PCIE_CONFIG					0x0f8
138 #define  AFI_PCIE_CONFIG_PCIE_DISABLE(x)		(1 << ((x) + 1))
139 #define  AFI_PCIE_CONFIG_PCIE_DISABLE_ALL		0xe
140 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK	(0xf << 20)
141 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE	(0x0 << 20)
142 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420	(0x0 << 20)
143 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1	(0x0 << 20)
144 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_401	(0x0 << 20)
145 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL	(0x1 << 20)
146 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222	(0x1 << 20)
147 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1	(0x1 << 20)
148 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211	(0x1 << 20)
149 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411	(0x2 << 20)
150 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111	(0x2 << 20)
151 #define  AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(x)		(1 << ((x) + 29))
152 #define  AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO_ALL		(0x7 << 29)
153 
154 #define AFI_FUSE			0x104
155 #define  AFI_FUSE_PCIE_T0_GEN2_DIS	(1 << 2)
156 
157 #define AFI_PEX0_CTRL			0x110
158 #define AFI_PEX1_CTRL			0x118
159 #define  AFI_PEX_CTRL_RST		(1 << 0)
160 #define  AFI_PEX_CTRL_CLKREQ_EN		(1 << 1)
161 #define  AFI_PEX_CTRL_REFCLK_EN		(1 << 3)
162 #define  AFI_PEX_CTRL_OVERRIDE_EN	(1 << 4)
163 
164 #define AFI_PLLE_CONTROL		0x160
165 #define  AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL (1 << 9)
166 #define  AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN (1 << 1)
167 
168 #define AFI_PEXBIAS_CTRL_0		0x168
169 
170 #define RP_ECTL_2_R1	0x00000e84
171 #define  RP_ECTL_2_R1_RX_CTLE_1C_MASK		0xffff
172 
173 #define RP_ECTL_4_R1	0x00000e8c
174 #define  RP_ECTL_4_R1_RX_CDR_CTRL_1C_MASK	(0xffff << 16)
175 #define  RP_ECTL_4_R1_RX_CDR_CTRL_1C_SHIFT	16
176 
177 #define RP_ECTL_5_R1	0x00000e90
178 #define  RP_ECTL_5_R1_RX_EQ_CTRL_L_1C_MASK	0xffffffff
179 
180 #define RP_ECTL_6_R1	0x00000e94
181 #define  RP_ECTL_6_R1_RX_EQ_CTRL_H_1C_MASK	0xffffffff
182 
183 #define RP_ECTL_2_R2	0x00000ea4
184 #define  RP_ECTL_2_R2_RX_CTLE_1C_MASK	0xffff
185 
186 #define RP_ECTL_4_R2	0x00000eac
187 #define  RP_ECTL_4_R2_RX_CDR_CTRL_1C_MASK	(0xffff << 16)
188 #define  RP_ECTL_4_R2_RX_CDR_CTRL_1C_SHIFT	16
189 
190 #define RP_ECTL_5_R2	0x00000eb0
191 #define  RP_ECTL_5_R2_RX_EQ_CTRL_L_1C_MASK	0xffffffff
192 
193 #define RP_ECTL_6_R2	0x00000eb4
194 #define  RP_ECTL_6_R2_RX_EQ_CTRL_H_1C_MASK	0xffffffff
195 
196 #define RP_VEND_XP	0x00000f00
197 #define  RP_VEND_XP_DL_UP			(1 << 30)
198 #define  RP_VEND_XP_OPPORTUNISTIC_ACK		(1 << 27)
199 #define  RP_VEND_XP_OPPORTUNISTIC_UPDATEFC	(1 << 28)
200 #define  RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK	(0xff << 18)
201 
202 #define RP_VEND_CTL0	0x00000f44
203 #define  RP_VEND_CTL0_DSK_RST_PULSE_WIDTH_MASK	(0xf << 12)
204 #define  RP_VEND_CTL0_DSK_RST_PULSE_WIDTH	(0x9 << 12)
205 
206 #define RP_VEND_CTL1	0x00000f48
207 #define  RP_VEND_CTL1_ERPT	(1 << 13)
208 
209 #define RP_VEND_XP_BIST	0x00000f4c
210 #define  RP_VEND_XP_BIST_GOTO_L1_L2_AFTER_DLLP_DONE	(1 << 28)
211 
212 #define RP_VEND_CTL2 0x00000fa8
213 #define  RP_VEND_CTL2_PCA_ENABLE (1 << 7)
214 
215 #define RP_PRIV_MISC	0x00000fe0
216 #define  RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT		(0xe << 0)
217 #define  RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT		(0xf << 0)
218 #define  RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD_MASK	(0x7f << 16)
219 #define  RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD		(0xf << 16)
220 #define  RP_PRIV_MISC_CTLR_CLK_CLAMP_ENABLE		(1 << 23)
221 #define  RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD_MASK	(0x7f << 24)
222 #define  RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD		(0xf << 24)
223 #define  RP_PRIV_MISC_TMS_CLK_CLAMP_ENABLE		(1 << 31)
224 
225 #define RP_LINK_CONTROL_STATUS			0x00000090
226 #define  RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE	0x20000000
227 #define  RP_LINK_CONTROL_STATUS_LINKSTAT_MASK	0x3fff0000
228 
229 #define RP_LINK_CONTROL_STATUS_2		0x000000b0
230 
231 #define PADS_CTL_SEL		0x0000009c
232 
233 #define PADS_CTL		0x000000a0
234 #define  PADS_CTL_IDDQ_1L	(1 << 0)
235 #define  PADS_CTL_TX_DATA_EN_1L	(1 << 6)
236 #define  PADS_CTL_RX_DATA_EN_1L	(1 << 10)
237 
238 #define PADS_PLL_CTL_TEGRA20			0x000000b8
239 #define PADS_PLL_CTL_TEGRA30			0x000000b4
240 #define  PADS_PLL_CTL_RST_B4SM			(1 << 1)
241 #define  PADS_PLL_CTL_LOCKDET			(1 << 8)
242 #define  PADS_PLL_CTL_REFCLK_MASK		(0x3 << 16)
243 #define  PADS_PLL_CTL_REFCLK_INTERNAL_CML	(0 << 16)
244 #define  PADS_PLL_CTL_REFCLK_INTERNAL_CMOS	(1 << 16)
245 #define  PADS_PLL_CTL_REFCLK_EXTERNAL		(2 << 16)
246 #define  PADS_PLL_CTL_TXCLKREF_MASK		(0x1 << 20)
247 #define  PADS_PLL_CTL_TXCLKREF_DIV10		(0 << 20)
248 #define  PADS_PLL_CTL_TXCLKREF_DIV5		(1 << 20)
249 #define  PADS_PLL_CTL_TXCLKREF_BUF_EN		(1 << 22)
250 
251 #define PADS_REFCLK_CFG0			0x000000c8
252 #define PADS_REFCLK_CFG1			0x000000cc
253 #define PADS_REFCLK_BIAS			0x000000d0
254 
255 /*
256  * Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit
257  * entries, one entry per PCIe port. These field definitions and desired
258  * values aren't in the TRM, but do come from NVIDIA.
259  */
260 #define PADS_REFCLK_CFG_TERM_SHIFT		2  /* 6:2 */
261 #define PADS_REFCLK_CFG_E_TERM_SHIFT		7
262 #define PADS_REFCLK_CFG_PREDI_SHIFT		8  /* 11:8 */
263 #define PADS_REFCLK_CFG_DRVI_SHIFT		12 /* 15:12 */
264 
265 #define PME_ACK_TIMEOUT 10000
266 #define LINK_RETRAIN_TIMEOUT 100000 /* in usec */
267 
268 struct tegra_msi {
269 	DECLARE_BITMAP(used, INT_PCI_MSI_NR);
270 	struct irq_domain *domain;
271 	struct mutex map_lock;
272 	spinlock_t mask_lock;
273 	void *virt;
274 	dma_addr_t phys;
275 	int irq;
276 };
277 
278 /* used to differentiate between Tegra SoC generations */
279 struct tegra_pcie_port_soc {
280 	struct {
281 		u8 turnoff_bit;
282 		u8 ack_bit;
283 	} pme;
284 };
285 
286 struct tegra_pcie_soc {
287 	unsigned int num_ports;
288 	const struct tegra_pcie_port_soc *ports;
289 	unsigned int msi_base_shift;
290 	unsigned long afi_pex2_ctrl;
291 	u32 pads_pll_ctl;
292 	u32 tx_ref_sel;
293 	u32 pads_refclk_cfg0;
294 	u32 pads_refclk_cfg1;
295 	u32 update_fc_threshold;
296 	bool has_pex_clkreq_en;
297 	bool has_pex_bias_ctrl;
298 	bool has_intr_prsnt_sense;
299 	bool has_cml_clk;
300 	bool has_gen2;
301 	bool force_pca_enable;
302 	bool program_uphy;
303 	bool update_clamp_threshold;
304 	bool program_deskew_time;
305 	bool update_fc_timer;
306 	bool has_cache_bars;
307 	struct {
308 		struct {
309 			u32 rp_ectl_2_r1;
310 			u32 rp_ectl_4_r1;
311 			u32 rp_ectl_5_r1;
312 			u32 rp_ectl_6_r1;
313 			u32 rp_ectl_2_r2;
314 			u32 rp_ectl_4_r2;
315 			u32 rp_ectl_5_r2;
316 			u32 rp_ectl_6_r2;
317 		} regs;
318 		bool enable;
319 	} ectl;
320 };
321 
322 struct tegra_pcie {
323 	struct device *dev;
324 
325 	void __iomem *pads;
326 	void __iomem *afi;
327 	void __iomem *cfg;
328 	int irq;
329 
330 	struct resource cs;
331 
332 	struct clk *pex_clk;
333 	struct clk *afi_clk;
334 	struct clk *pll_e;
335 	struct clk *cml_clk;
336 
337 	struct reset_control *pex_rst;
338 	struct reset_control *afi_rst;
339 	struct reset_control *pcie_xrst;
340 
341 	bool legacy_phy;
342 	struct phy *phy;
343 
344 	struct tegra_msi msi;
345 
346 	struct list_head ports;
347 	u32 xbar_config;
348 
349 	struct regulator_bulk_data *supplies;
350 	unsigned int num_supplies;
351 
352 	const struct tegra_pcie_soc *soc;
353 	struct dentry *debugfs;
354 };
355 
356 static inline struct tegra_pcie *msi_to_pcie(struct tegra_msi *msi)
357 {
358 	return container_of(msi, struct tegra_pcie, msi);
359 }
360 
361 struct tegra_pcie_port {
362 	struct tegra_pcie *pcie;
363 	struct device_node *np;
364 	struct list_head list;
365 	struct resource regs;
366 	void __iomem *base;
367 	unsigned int index;
368 	unsigned int lanes;
369 
370 	struct phy **phys;
371 
372 	struct gpio_desc *reset_gpio;
373 };
374 
375 struct tegra_pcie_bus {
376 	struct list_head list;
377 	unsigned int nr;
378 };
379 
380 static inline void afi_writel(struct tegra_pcie *pcie, u32 value,
381 			      unsigned long offset)
382 {
383 	writel(value, pcie->afi + offset);
384 }
385 
386 static inline u32 afi_readl(struct tegra_pcie *pcie, unsigned long offset)
387 {
388 	return readl(pcie->afi + offset);
389 }
390 
391 static inline void pads_writel(struct tegra_pcie *pcie, u32 value,
392 			       unsigned long offset)
393 {
394 	writel(value, pcie->pads + offset);
395 }
396 
397 static inline u32 pads_readl(struct tegra_pcie *pcie, unsigned long offset)
398 {
399 	return readl(pcie->pads + offset);
400 }
401 
402 /*
403  * The configuration space mapping on Tegra is somewhat similar to the ECAM
404  * defined by PCIe. However it deviates a bit in how the 4 bits for extended
405  * register accesses are mapped:
406  *
407  *    [27:24] extended register number
408  *    [23:16] bus number
409  *    [15:11] device number
410  *    [10: 8] function number
411  *    [ 7: 0] register number
412  *
413  * Mapping the whole extended configuration space would require 256 MiB of
414  * virtual address space, only a small part of which will actually be used.
415  *
416  * To work around this, a 4 KiB region is used to generate the required
417  * configuration transaction with relevant B:D:F and register offset values.
418  * This is achieved by dynamically programming base address and size of
419  * AFI_AXI_BAR used for end point config space mapping to make sure that the
420  * address (access to which generates correct config transaction) falls in
421  * this 4 KiB region.
422  */
423 static unsigned int tegra_pcie_conf_offset(u8 bus, unsigned int devfn,
424 					   unsigned int where)
425 {
426 	return ((where & 0xf00) << 16) | (bus << 16) | (PCI_SLOT(devfn) << 11) |
427 	       (PCI_FUNC(devfn) << 8) | (where & 0xff);
428 }
429 
430 static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus,
431 					unsigned int devfn,
432 					int where)
433 {
434 	struct tegra_pcie *pcie = bus->sysdata;
435 	void __iomem *addr = NULL;
436 
437 	if (bus->number == 0) {
438 		unsigned int slot = PCI_SLOT(devfn);
439 		struct tegra_pcie_port *port;
440 
441 		list_for_each_entry(port, &pcie->ports, list) {
442 			if (port->index + 1 == slot) {
443 				addr = port->base + (where & ~3);
444 				break;
445 			}
446 		}
447 	} else {
448 		unsigned int offset;
449 		u32 base;
450 
451 		offset = tegra_pcie_conf_offset(bus->number, devfn, where);
452 
453 		/* move 4 KiB window to offset within the FPCI region */
454 		base = 0xfe100000 + ((offset & ~(SZ_4K - 1)) >> 8);
455 		afi_writel(pcie, base, AFI_FPCI_BAR0);
456 
457 		/* move to correct offset within the 4 KiB page */
458 		addr = pcie->cfg + (offset & (SZ_4K - 1));
459 	}
460 
461 	return addr;
462 }
463 
464 static int tegra_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
465 				  int where, int size, u32 *value)
466 {
467 	if (bus->number == 0)
468 		return pci_generic_config_read32(bus, devfn, where, size,
469 						 value);
470 
471 	return pci_generic_config_read(bus, devfn, where, size, value);
472 }
473 
474 static int tegra_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
475 				   int where, int size, u32 value)
476 {
477 	if (bus->number == 0)
478 		return pci_generic_config_write32(bus, devfn, where, size,
479 						  value);
480 
481 	return pci_generic_config_write(bus, devfn, where, size, value);
482 }
483 
484 static struct pci_ops tegra_pcie_ops = {
485 	.map_bus = tegra_pcie_map_bus,
486 	.read = tegra_pcie_config_read,
487 	.write = tegra_pcie_config_write,
488 };
489 
490 static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port)
491 {
492 	const struct tegra_pcie_soc *soc = port->pcie->soc;
493 	unsigned long ret = 0;
494 
495 	switch (port->index) {
496 	case 0:
497 		ret = AFI_PEX0_CTRL;
498 		break;
499 
500 	case 1:
501 		ret = AFI_PEX1_CTRL;
502 		break;
503 
504 	case 2:
505 		ret = soc->afi_pex2_ctrl;
506 		break;
507 	}
508 
509 	return ret;
510 }
511 
512 static void tegra_pcie_port_reset(struct tegra_pcie_port *port)
513 {
514 	unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
515 	unsigned long value;
516 
517 	/* pulse reset signal */
518 	if (port->reset_gpio) {
519 		gpiod_set_value(port->reset_gpio, 1);
520 	} else {
521 		value = afi_readl(port->pcie, ctrl);
522 		value &= ~AFI_PEX_CTRL_RST;
523 		afi_writel(port->pcie, value, ctrl);
524 	}
525 
526 	usleep_range(1000, 2000);
527 
528 	if (port->reset_gpio) {
529 		gpiod_set_value(port->reset_gpio, 0);
530 	} else {
531 		value = afi_readl(port->pcie, ctrl);
532 		value |= AFI_PEX_CTRL_RST;
533 		afi_writel(port->pcie, value, ctrl);
534 	}
535 }
536 
537 static void tegra_pcie_enable_rp_features(struct tegra_pcie_port *port)
538 {
539 	const struct tegra_pcie_soc *soc = port->pcie->soc;
540 	u32 value;
541 
542 	/* Enable AER capability */
543 	value = readl(port->base + RP_VEND_CTL1);
544 	value |= RP_VEND_CTL1_ERPT;
545 	writel(value, port->base + RP_VEND_CTL1);
546 
547 	/* Optimal settings to enhance bandwidth */
548 	value = readl(port->base + RP_VEND_XP);
549 	value |= RP_VEND_XP_OPPORTUNISTIC_ACK;
550 	value |= RP_VEND_XP_OPPORTUNISTIC_UPDATEFC;
551 	writel(value, port->base + RP_VEND_XP);
552 
553 	/*
554 	 * LTSSM will wait for DLLP to finish before entering L1 or L2,
555 	 * to avoid truncation of PM messages which results in receiver errors
556 	 */
557 	value = readl(port->base + RP_VEND_XP_BIST);
558 	value |= RP_VEND_XP_BIST_GOTO_L1_L2_AFTER_DLLP_DONE;
559 	writel(value, port->base + RP_VEND_XP_BIST);
560 
561 	value = readl(port->base + RP_PRIV_MISC);
562 	value |= RP_PRIV_MISC_CTLR_CLK_CLAMP_ENABLE;
563 	value |= RP_PRIV_MISC_TMS_CLK_CLAMP_ENABLE;
564 
565 	if (soc->update_clamp_threshold) {
566 		value &= ~(RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD_MASK |
567 				RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD_MASK);
568 		value |= RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD |
569 			RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD;
570 	}
571 
572 	writel(value, port->base + RP_PRIV_MISC);
573 }
574 
575 static void tegra_pcie_program_ectl_settings(struct tegra_pcie_port *port)
576 {
577 	const struct tegra_pcie_soc *soc = port->pcie->soc;
578 	u32 value;
579 
580 	value = readl(port->base + RP_ECTL_2_R1);
581 	value &= ~RP_ECTL_2_R1_RX_CTLE_1C_MASK;
582 	value |= soc->ectl.regs.rp_ectl_2_r1;
583 	writel(value, port->base + RP_ECTL_2_R1);
584 
585 	value = readl(port->base + RP_ECTL_4_R1);
586 	value &= ~RP_ECTL_4_R1_RX_CDR_CTRL_1C_MASK;
587 	value |= soc->ectl.regs.rp_ectl_4_r1 <<
588 				RP_ECTL_4_R1_RX_CDR_CTRL_1C_SHIFT;
589 	writel(value, port->base + RP_ECTL_4_R1);
590 
591 	value = readl(port->base + RP_ECTL_5_R1);
592 	value &= ~RP_ECTL_5_R1_RX_EQ_CTRL_L_1C_MASK;
593 	value |= soc->ectl.regs.rp_ectl_5_r1;
594 	writel(value, port->base + RP_ECTL_5_R1);
595 
596 	value = readl(port->base + RP_ECTL_6_R1);
597 	value &= ~RP_ECTL_6_R1_RX_EQ_CTRL_H_1C_MASK;
598 	value |= soc->ectl.regs.rp_ectl_6_r1;
599 	writel(value, port->base + RP_ECTL_6_R1);
600 
601 	value = readl(port->base + RP_ECTL_2_R2);
602 	value &= ~RP_ECTL_2_R2_RX_CTLE_1C_MASK;
603 	value |= soc->ectl.regs.rp_ectl_2_r2;
604 	writel(value, port->base + RP_ECTL_2_R2);
605 
606 	value = readl(port->base + RP_ECTL_4_R2);
607 	value &= ~RP_ECTL_4_R2_RX_CDR_CTRL_1C_MASK;
608 	value |= soc->ectl.regs.rp_ectl_4_r2 <<
609 				RP_ECTL_4_R2_RX_CDR_CTRL_1C_SHIFT;
610 	writel(value, port->base + RP_ECTL_4_R2);
611 
612 	value = readl(port->base + RP_ECTL_5_R2);
613 	value &= ~RP_ECTL_5_R2_RX_EQ_CTRL_L_1C_MASK;
614 	value |= soc->ectl.regs.rp_ectl_5_r2;
615 	writel(value, port->base + RP_ECTL_5_R2);
616 
617 	value = readl(port->base + RP_ECTL_6_R2);
618 	value &= ~RP_ECTL_6_R2_RX_EQ_CTRL_H_1C_MASK;
619 	value |= soc->ectl.regs.rp_ectl_6_r2;
620 	writel(value, port->base + RP_ECTL_6_R2);
621 }
622 
623 static void tegra_pcie_apply_sw_fixup(struct tegra_pcie_port *port)
624 {
625 	const struct tegra_pcie_soc *soc = port->pcie->soc;
626 	u32 value;
627 
628 	/*
629 	 * Sometimes link speed change from Gen2 to Gen1 fails due to
630 	 * instability in deskew logic on lane-0. Increase the deskew
631 	 * retry time to resolve this issue.
632 	 */
633 	if (soc->program_deskew_time) {
634 		value = readl(port->base + RP_VEND_CTL0);
635 		value &= ~RP_VEND_CTL0_DSK_RST_PULSE_WIDTH_MASK;
636 		value |= RP_VEND_CTL0_DSK_RST_PULSE_WIDTH;
637 		writel(value, port->base + RP_VEND_CTL0);
638 	}
639 
640 	if (soc->update_fc_timer) {
641 		value = readl(port->base + RP_VEND_XP);
642 		value &= ~RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK;
643 		value |= soc->update_fc_threshold;
644 		writel(value, port->base + RP_VEND_XP);
645 	}
646 
647 	/*
648 	 * PCIe link doesn't come up with few legacy PCIe endpoints if
649 	 * root port advertises both Gen-1 and Gen-2 speeds in Tegra.
650 	 * Hence, the strategy followed here is to initially advertise
651 	 * only Gen-1 and after link is up, retrain link to Gen-2 speed
652 	 */
653 	value = readl(port->base + RP_LINK_CONTROL_STATUS_2);
654 	value &= ~PCI_EXP_LNKSTA_CLS;
655 	value |= PCI_EXP_LNKSTA_CLS_2_5GB;
656 	writel(value, port->base + RP_LINK_CONTROL_STATUS_2);
657 }
658 
659 static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
660 {
661 	unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
662 	const struct tegra_pcie_soc *soc = port->pcie->soc;
663 	unsigned long value;
664 
665 	/* enable reference clock */
666 	value = afi_readl(port->pcie, ctrl);
667 	value |= AFI_PEX_CTRL_REFCLK_EN;
668 
669 	if (soc->has_pex_clkreq_en)
670 		value |= AFI_PEX_CTRL_CLKREQ_EN;
671 
672 	value |= AFI_PEX_CTRL_OVERRIDE_EN;
673 
674 	afi_writel(port->pcie, value, ctrl);
675 
676 	tegra_pcie_port_reset(port);
677 
678 	if (soc->force_pca_enable) {
679 		value = readl(port->base + RP_VEND_CTL2);
680 		value |= RP_VEND_CTL2_PCA_ENABLE;
681 		writel(value, port->base + RP_VEND_CTL2);
682 	}
683 
684 	tegra_pcie_enable_rp_features(port);
685 
686 	if (soc->ectl.enable)
687 		tegra_pcie_program_ectl_settings(port);
688 
689 	tegra_pcie_apply_sw_fixup(port);
690 }
691 
692 static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
693 {
694 	unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
695 	const struct tegra_pcie_soc *soc = port->pcie->soc;
696 	unsigned long value;
697 
698 	/* assert port reset */
699 	value = afi_readl(port->pcie, ctrl);
700 	value &= ~AFI_PEX_CTRL_RST;
701 	afi_writel(port->pcie, value, ctrl);
702 
703 	/* disable reference clock */
704 	value = afi_readl(port->pcie, ctrl);
705 
706 	if (soc->has_pex_clkreq_en)
707 		value &= ~AFI_PEX_CTRL_CLKREQ_EN;
708 
709 	value &= ~AFI_PEX_CTRL_REFCLK_EN;
710 	afi_writel(port->pcie, value, ctrl);
711 
712 	/* disable PCIe port and set CLKREQ# as GPIO to allow PLLE power down */
713 	value = afi_readl(port->pcie, AFI_PCIE_CONFIG);
714 	value |= AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
715 	value |= AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(port->index);
716 	afi_writel(port->pcie, value, AFI_PCIE_CONFIG);
717 }
718 
719 static void tegra_pcie_port_free(struct tegra_pcie_port *port)
720 {
721 	struct tegra_pcie *pcie = port->pcie;
722 	struct device *dev = pcie->dev;
723 
724 	devm_iounmap(dev, port->base);
725 	devm_release_mem_region(dev, port->regs.start,
726 				resource_size(&port->regs));
727 	list_del(&port->list);
728 	devm_kfree(dev, port);
729 }
730 
731 /* Tegra PCIE root complex wrongly reports device class */
732 static void tegra_pcie_fixup_class(struct pci_dev *dev)
733 {
734 	dev->class = PCI_CLASS_BRIDGE_PCI << 8;
735 }
736 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class);
737 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class);
738 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_fixup_class);
739 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_fixup_class);
740 
741 /* Tegra20 and Tegra30 PCIE requires relaxed ordering */
742 static void tegra_pcie_relax_enable(struct pci_dev *dev)
743 {
744 	pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
745 }
746 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_relax_enable);
747 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_relax_enable);
748 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_relax_enable);
749 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_relax_enable);
750 
751 static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
752 {
753 	struct tegra_pcie *pcie = pdev->bus->sysdata;
754 	int irq;
755 
756 	tegra_cpuidle_pcie_irqs_in_use();
757 
758 	irq = of_irq_parse_and_map_pci(pdev, slot, pin);
759 	if (!irq)
760 		irq = pcie->irq;
761 
762 	return irq;
763 }
764 
765 static irqreturn_t tegra_pcie_isr(int irq, void *arg)
766 {
767 	const char *err_msg[] = {
768 		"Unknown",
769 		"AXI slave error",
770 		"AXI decode error",
771 		"Target abort",
772 		"Master abort",
773 		"Invalid write",
774 		"Legacy interrupt",
775 		"Response decoding error",
776 		"AXI response decoding error",
777 		"Transaction timeout",
778 		"Slot present pin change",
779 		"Slot clock request change",
780 		"TMS clock ramp change",
781 		"TMS ready for power down",
782 		"Peer2Peer error",
783 	};
784 	struct tegra_pcie *pcie = arg;
785 	struct device *dev = pcie->dev;
786 	u32 code, signature;
787 
788 	code = afi_readl(pcie, AFI_INTR_CODE) & AFI_INTR_CODE_MASK;
789 	signature = afi_readl(pcie, AFI_INTR_SIGNATURE);
790 	afi_writel(pcie, 0, AFI_INTR_CODE);
791 
792 	if (code == AFI_INTR_LEGACY)
793 		return IRQ_NONE;
794 
795 	if (code >= ARRAY_SIZE(err_msg))
796 		code = 0;
797 
798 	/*
799 	 * do not pollute kernel log with master abort reports since they
800 	 * happen a lot during enumeration
801 	 */
802 	if (code == AFI_INTR_MASTER_ABORT || code == AFI_INTR_PE_PRSNT_SENSE)
803 		dev_dbg(dev, "%s, signature: %08x\n", err_msg[code], signature);
804 	else
805 		dev_err(dev, "%s, signature: %08x\n", err_msg[code], signature);
806 
807 	if (code == AFI_INTR_TARGET_ABORT || code == AFI_INTR_MASTER_ABORT ||
808 	    code == AFI_INTR_FPCI_DECODE_ERROR) {
809 		u32 fpci = afi_readl(pcie, AFI_UPPER_FPCI_ADDRESS) & 0xff;
810 		u64 address = (u64)fpci << 32 | (signature & 0xfffffffc);
811 
812 		if (code == AFI_INTR_MASTER_ABORT)
813 			dev_dbg(dev, "  FPCI address: %10llx\n", address);
814 		else
815 			dev_err(dev, "  FPCI address: %10llx\n", address);
816 	}
817 
818 	return IRQ_HANDLED;
819 }
820 
821 /*
822  * FPCI map is as follows:
823  * - 0xfdfc000000: I/O space
824  * - 0xfdfe000000: type 0 configuration space
825  * - 0xfdff000000: type 1 configuration space
826  * - 0xfe00000000: type 0 extended configuration space
827  * - 0xfe10000000: type 1 extended configuration space
828  */
829 static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
830 {
831 	u32 size;
832 	struct resource_entry *entry;
833 	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
834 
835 	/* Bar 0: type 1 extended configuration space */
836 	size = resource_size(&pcie->cs);
837 	afi_writel(pcie, pcie->cs.start, AFI_AXI_BAR0_START);
838 	afi_writel(pcie, size >> 12, AFI_AXI_BAR0_SZ);
839 
840 	resource_list_for_each_entry(entry, &bridge->windows) {
841 		u32 fpci_bar, axi_address;
842 		struct resource *res = entry->res;
843 
844 		size = resource_size(res);
845 
846 		switch (resource_type(res)) {
847 		case IORESOURCE_IO:
848 			/* Bar 1: downstream IO bar */
849 			fpci_bar = 0xfdfc0000;
850 			axi_address = pci_pio_to_address(res->start);
851 			afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
852 			afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
853 			afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
854 			break;
855 		case IORESOURCE_MEM:
856 			fpci_bar = (((res->start >> 12) & 0x0fffffff) << 4) | 0x1;
857 			axi_address = res->start;
858 
859 			if (res->flags & IORESOURCE_PREFETCH) {
860 				/* Bar 2: prefetchable memory BAR */
861 				afi_writel(pcie, axi_address, AFI_AXI_BAR2_START);
862 				afi_writel(pcie, size >> 12, AFI_AXI_BAR2_SZ);
863 				afi_writel(pcie, fpci_bar, AFI_FPCI_BAR2);
864 
865 			} else {
866 				/* Bar 3: non prefetchable memory BAR */
867 				afi_writel(pcie, axi_address, AFI_AXI_BAR3_START);
868 				afi_writel(pcie, size >> 12, AFI_AXI_BAR3_SZ);
869 				afi_writel(pcie, fpci_bar, AFI_FPCI_BAR3);
870 			}
871 			break;
872 		}
873 	}
874 
875 	/* NULL out the remaining BARs as they are not used */
876 	afi_writel(pcie, 0, AFI_AXI_BAR4_START);
877 	afi_writel(pcie, 0, AFI_AXI_BAR4_SZ);
878 	afi_writel(pcie, 0, AFI_FPCI_BAR4);
879 
880 	afi_writel(pcie, 0, AFI_AXI_BAR5_START);
881 	afi_writel(pcie, 0, AFI_AXI_BAR5_SZ);
882 	afi_writel(pcie, 0, AFI_FPCI_BAR5);
883 
884 	if (pcie->soc->has_cache_bars) {
885 		/* map all upstream transactions as uncached */
886 		afi_writel(pcie, 0, AFI_CACHE_BAR0_ST);
887 		afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ);
888 		afi_writel(pcie, 0, AFI_CACHE_BAR1_ST);
889 		afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ);
890 	}
891 
892 	/* MSI translations are setup only when needed */
893 	afi_writel(pcie, 0, AFI_MSI_FPCI_BAR_ST);
894 	afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
895 	afi_writel(pcie, 0, AFI_MSI_AXI_BAR_ST);
896 	afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
897 }
898 
899 static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout)
900 {
901 	const struct tegra_pcie_soc *soc = pcie->soc;
902 	u32 value;
903 
904 	timeout = jiffies + msecs_to_jiffies(timeout);
905 
906 	while (time_before(jiffies, timeout)) {
907 		value = pads_readl(pcie, soc->pads_pll_ctl);
908 		if (value & PADS_PLL_CTL_LOCKDET)
909 			return 0;
910 	}
911 
912 	return -ETIMEDOUT;
913 }
914 
915 static int tegra_pcie_phy_enable(struct tegra_pcie *pcie)
916 {
917 	struct device *dev = pcie->dev;
918 	const struct tegra_pcie_soc *soc = pcie->soc;
919 	u32 value;
920 	int err;
921 
922 	/* initialize internal PHY, enable up to 16 PCIE lanes */
923 	pads_writel(pcie, 0x0, PADS_CTL_SEL);
924 
925 	/* override IDDQ to 1 on all 4 lanes */
926 	value = pads_readl(pcie, PADS_CTL);
927 	value |= PADS_CTL_IDDQ_1L;
928 	pads_writel(pcie, value, PADS_CTL);
929 
930 	/*
931 	 * Set up PHY PLL inputs select PLLE output as refclock,
932 	 * set TX ref sel to div10 (not div5).
933 	 */
934 	value = pads_readl(pcie, soc->pads_pll_ctl);
935 	value &= ~(PADS_PLL_CTL_REFCLK_MASK | PADS_PLL_CTL_TXCLKREF_MASK);
936 	value |= PADS_PLL_CTL_REFCLK_INTERNAL_CML | soc->tx_ref_sel;
937 	pads_writel(pcie, value, soc->pads_pll_ctl);
938 
939 	/* reset PLL */
940 	value = pads_readl(pcie, soc->pads_pll_ctl);
941 	value &= ~PADS_PLL_CTL_RST_B4SM;
942 	pads_writel(pcie, value, soc->pads_pll_ctl);
943 
944 	usleep_range(20, 100);
945 
946 	/* take PLL out of reset  */
947 	value = pads_readl(pcie, soc->pads_pll_ctl);
948 	value |= PADS_PLL_CTL_RST_B4SM;
949 	pads_writel(pcie, value, soc->pads_pll_ctl);
950 
951 	/* wait for the PLL to lock */
952 	err = tegra_pcie_pll_wait(pcie, 500);
953 	if (err < 0) {
954 		dev_err(dev, "PLL failed to lock: %d\n", err);
955 		return err;
956 	}
957 
958 	/* turn off IDDQ override */
959 	value = pads_readl(pcie, PADS_CTL);
960 	value &= ~PADS_CTL_IDDQ_1L;
961 	pads_writel(pcie, value, PADS_CTL);
962 
963 	/* enable TX/RX data */
964 	value = pads_readl(pcie, PADS_CTL);
965 	value |= PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L;
966 	pads_writel(pcie, value, PADS_CTL);
967 
968 	return 0;
969 }
970 
971 static int tegra_pcie_phy_disable(struct tegra_pcie *pcie)
972 {
973 	const struct tegra_pcie_soc *soc = pcie->soc;
974 	u32 value;
975 
976 	/* disable TX/RX data */
977 	value = pads_readl(pcie, PADS_CTL);
978 	value &= ~(PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L);
979 	pads_writel(pcie, value, PADS_CTL);
980 
981 	/* override IDDQ */
982 	value = pads_readl(pcie, PADS_CTL);
983 	value |= PADS_CTL_IDDQ_1L;
984 	pads_writel(pcie, value, PADS_CTL);
985 
986 	/* reset PLL */
987 	value = pads_readl(pcie, soc->pads_pll_ctl);
988 	value &= ~PADS_PLL_CTL_RST_B4SM;
989 	pads_writel(pcie, value, soc->pads_pll_ctl);
990 
991 	usleep_range(20, 100);
992 
993 	return 0;
994 }
995 
996 static int tegra_pcie_port_phy_power_on(struct tegra_pcie_port *port)
997 {
998 	struct device *dev = port->pcie->dev;
999 	unsigned int i;
1000 	int err;
1001 
1002 	for (i = 0; i < port->lanes; i++) {
1003 		err = phy_power_on(port->phys[i]);
1004 		if (err < 0) {
1005 			dev_err(dev, "failed to power on PHY#%u: %d\n", i, err);
1006 			return err;
1007 		}
1008 	}
1009 
1010 	return 0;
1011 }
1012 
1013 static int tegra_pcie_port_phy_power_off(struct tegra_pcie_port *port)
1014 {
1015 	struct device *dev = port->pcie->dev;
1016 	unsigned int i;
1017 	int err;
1018 
1019 	for (i = 0; i < port->lanes; i++) {
1020 		err = phy_power_off(port->phys[i]);
1021 		if (err < 0) {
1022 			dev_err(dev, "failed to power off PHY#%u: %d\n", i,
1023 				err);
1024 			return err;
1025 		}
1026 	}
1027 
1028 	return 0;
1029 }
1030 
1031 static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie)
1032 {
1033 	struct device *dev = pcie->dev;
1034 	struct tegra_pcie_port *port;
1035 	int err;
1036 
1037 	if (pcie->legacy_phy) {
1038 		if (pcie->phy)
1039 			err = phy_power_on(pcie->phy);
1040 		else
1041 			err = tegra_pcie_phy_enable(pcie);
1042 
1043 		if (err < 0)
1044 			dev_err(dev, "failed to power on PHY: %d\n", err);
1045 
1046 		return err;
1047 	}
1048 
1049 	list_for_each_entry(port, &pcie->ports, list) {
1050 		err = tegra_pcie_port_phy_power_on(port);
1051 		if (err < 0) {
1052 			dev_err(dev,
1053 				"failed to power on PCIe port %u PHY: %d\n",
1054 				port->index, err);
1055 			return err;
1056 		}
1057 	}
1058 
1059 	return 0;
1060 }
1061 
1062 static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie)
1063 {
1064 	struct device *dev = pcie->dev;
1065 	struct tegra_pcie_port *port;
1066 	int err;
1067 
1068 	if (pcie->legacy_phy) {
1069 		if (pcie->phy)
1070 			err = phy_power_off(pcie->phy);
1071 		else
1072 			err = tegra_pcie_phy_disable(pcie);
1073 
1074 		if (err < 0)
1075 			dev_err(dev, "failed to power off PHY: %d\n", err);
1076 
1077 		return err;
1078 	}
1079 
1080 	list_for_each_entry(port, &pcie->ports, list) {
1081 		err = tegra_pcie_port_phy_power_off(port);
1082 		if (err < 0) {
1083 			dev_err(dev,
1084 				"failed to power off PCIe port %u PHY: %d\n",
1085 				port->index, err);
1086 			return err;
1087 		}
1088 	}
1089 
1090 	return 0;
1091 }
1092 
1093 static void tegra_pcie_enable_controller(struct tegra_pcie *pcie)
1094 {
1095 	const struct tegra_pcie_soc *soc = pcie->soc;
1096 	struct tegra_pcie_port *port;
1097 	unsigned long value;
1098 
1099 	/* enable PLL power down */
1100 	if (pcie->phy) {
1101 		value = afi_readl(pcie, AFI_PLLE_CONTROL);
1102 		value &= ~AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL;
1103 		value |= AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN;
1104 		afi_writel(pcie, value, AFI_PLLE_CONTROL);
1105 	}
1106 
1107 	/* power down PCIe slot clock bias pad */
1108 	if (soc->has_pex_bias_ctrl)
1109 		afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0);
1110 
1111 	/* configure mode and disable all ports */
1112 	value = afi_readl(pcie, AFI_PCIE_CONFIG);
1113 	value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK;
1114 	value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config;
1115 	value |= AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO_ALL;
1116 
1117 	list_for_each_entry(port, &pcie->ports, list) {
1118 		value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
1119 		value &= ~AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(port->index);
1120 	}
1121 
1122 	afi_writel(pcie, value, AFI_PCIE_CONFIG);
1123 
1124 	if (soc->has_gen2) {
1125 		value = afi_readl(pcie, AFI_FUSE);
1126 		value &= ~AFI_FUSE_PCIE_T0_GEN2_DIS;
1127 		afi_writel(pcie, value, AFI_FUSE);
1128 	} else {
1129 		value = afi_readl(pcie, AFI_FUSE);
1130 		value |= AFI_FUSE_PCIE_T0_GEN2_DIS;
1131 		afi_writel(pcie, value, AFI_FUSE);
1132 	}
1133 
1134 	/* Disable AFI dynamic clock gating and enable PCIe */
1135 	value = afi_readl(pcie, AFI_CONFIGURATION);
1136 	value |= AFI_CONFIGURATION_EN_FPCI;
1137 	value |= AFI_CONFIGURATION_CLKEN_OVERRIDE;
1138 	afi_writel(pcie, value, AFI_CONFIGURATION);
1139 
1140 	value = AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR |
1141 		AFI_INTR_EN_TGT_SLVERR | AFI_INTR_EN_TGT_DECERR |
1142 		AFI_INTR_EN_TGT_WRERR | AFI_INTR_EN_DFPCI_DECERR;
1143 
1144 	if (soc->has_intr_prsnt_sense)
1145 		value |= AFI_INTR_EN_PRSNT_SENSE;
1146 
1147 	afi_writel(pcie, value, AFI_AFI_INTR_ENABLE);
1148 	afi_writel(pcie, 0xffffffff, AFI_SM_INTR_ENABLE);
1149 
1150 	/* don't enable MSI for now, only when needed */
1151 	afi_writel(pcie, AFI_INTR_MASK_INT_MASK, AFI_INTR_MASK);
1152 
1153 	/* disable all exceptions */
1154 	afi_writel(pcie, 0, AFI_FPCI_ERROR_MASKS);
1155 }
1156 
1157 static void tegra_pcie_power_off(struct tegra_pcie *pcie)
1158 {
1159 	struct device *dev = pcie->dev;
1160 	const struct tegra_pcie_soc *soc = pcie->soc;
1161 	int err;
1162 
1163 	reset_control_assert(pcie->afi_rst);
1164 
1165 	clk_disable_unprepare(pcie->pll_e);
1166 	if (soc->has_cml_clk)
1167 		clk_disable_unprepare(pcie->cml_clk);
1168 	clk_disable_unprepare(pcie->afi_clk);
1169 
1170 	if (!dev->pm_domain)
1171 		tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1172 
1173 	err = regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
1174 	if (err < 0)
1175 		dev_warn(dev, "failed to disable regulators: %d\n", err);
1176 }
1177 
1178 static int tegra_pcie_power_on(struct tegra_pcie *pcie)
1179 {
1180 	struct device *dev = pcie->dev;
1181 	const struct tegra_pcie_soc *soc = pcie->soc;
1182 	int err;
1183 
1184 	reset_control_assert(pcie->pcie_xrst);
1185 	reset_control_assert(pcie->afi_rst);
1186 	reset_control_assert(pcie->pex_rst);
1187 
1188 	if (!dev->pm_domain)
1189 		tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1190 
1191 	/* enable regulators */
1192 	err = regulator_bulk_enable(pcie->num_supplies, pcie->supplies);
1193 	if (err < 0)
1194 		dev_err(dev, "failed to enable regulators: %d\n", err);
1195 
1196 	if (!dev->pm_domain) {
1197 		err = tegra_powergate_power_on(TEGRA_POWERGATE_PCIE);
1198 		if (err) {
1199 			dev_err(dev, "failed to power ungate: %d\n", err);
1200 			goto regulator_disable;
1201 		}
1202 		err = tegra_powergate_remove_clamping(TEGRA_POWERGATE_PCIE);
1203 		if (err) {
1204 			dev_err(dev, "failed to remove clamp: %d\n", err);
1205 			goto powergate;
1206 		}
1207 	}
1208 
1209 	err = clk_prepare_enable(pcie->afi_clk);
1210 	if (err < 0) {
1211 		dev_err(dev, "failed to enable AFI clock: %d\n", err);
1212 		goto powergate;
1213 	}
1214 
1215 	if (soc->has_cml_clk) {
1216 		err = clk_prepare_enable(pcie->cml_clk);
1217 		if (err < 0) {
1218 			dev_err(dev, "failed to enable CML clock: %d\n", err);
1219 			goto disable_afi_clk;
1220 		}
1221 	}
1222 
1223 	err = clk_prepare_enable(pcie->pll_e);
1224 	if (err < 0) {
1225 		dev_err(dev, "failed to enable PLLE clock: %d\n", err);
1226 		goto disable_cml_clk;
1227 	}
1228 
1229 	reset_control_deassert(pcie->afi_rst);
1230 
1231 	return 0;
1232 
1233 disable_cml_clk:
1234 	if (soc->has_cml_clk)
1235 		clk_disable_unprepare(pcie->cml_clk);
1236 disable_afi_clk:
1237 	clk_disable_unprepare(pcie->afi_clk);
1238 powergate:
1239 	if (!dev->pm_domain)
1240 		tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1241 regulator_disable:
1242 	regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
1243 
1244 	return err;
1245 }
1246 
1247 static void tegra_pcie_apply_pad_settings(struct tegra_pcie *pcie)
1248 {
1249 	const struct tegra_pcie_soc *soc = pcie->soc;
1250 
1251 	/* Configure the reference clock driver */
1252 	pads_writel(pcie, soc->pads_refclk_cfg0, PADS_REFCLK_CFG0);
1253 
1254 	if (soc->num_ports > 2)
1255 		pads_writel(pcie, soc->pads_refclk_cfg1, PADS_REFCLK_CFG1);
1256 }
1257 
1258 static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
1259 {
1260 	struct device *dev = pcie->dev;
1261 	const struct tegra_pcie_soc *soc = pcie->soc;
1262 
1263 	pcie->pex_clk = devm_clk_get(dev, "pex");
1264 	if (IS_ERR(pcie->pex_clk))
1265 		return PTR_ERR(pcie->pex_clk);
1266 
1267 	pcie->afi_clk = devm_clk_get(dev, "afi");
1268 	if (IS_ERR(pcie->afi_clk))
1269 		return PTR_ERR(pcie->afi_clk);
1270 
1271 	pcie->pll_e = devm_clk_get(dev, "pll_e");
1272 	if (IS_ERR(pcie->pll_e))
1273 		return PTR_ERR(pcie->pll_e);
1274 
1275 	if (soc->has_cml_clk) {
1276 		pcie->cml_clk = devm_clk_get(dev, "cml");
1277 		if (IS_ERR(pcie->cml_clk))
1278 			return PTR_ERR(pcie->cml_clk);
1279 	}
1280 
1281 	return 0;
1282 }
1283 
1284 static int tegra_pcie_resets_get(struct tegra_pcie *pcie)
1285 {
1286 	struct device *dev = pcie->dev;
1287 
1288 	pcie->pex_rst = devm_reset_control_get_exclusive(dev, "pex");
1289 	if (IS_ERR(pcie->pex_rst))
1290 		return PTR_ERR(pcie->pex_rst);
1291 
1292 	pcie->afi_rst = devm_reset_control_get_exclusive(dev, "afi");
1293 	if (IS_ERR(pcie->afi_rst))
1294 		return PTR_ERR(pcie->afi_rst);
1295 
1296 	pcie->pcie_xrst = devm_reset_control_get_exclusive(dev, "pcie_x");
1297 	if (IS_ERR(pcie->pcie_xrst))
1298 		return PTR_ERR(pcie->pcie_xrst);
1299 
1300 	return 0;
1301 }
1302 
1303 static int tegra_pcie_phys_get_legacy(struct tegra_pcie *pcie)
1304 {
1305 	struct device *dev = pcie->dev;
1306 	int err;
1307 
1308 	pcie->phy = devm_phy_optional_get(dev, "pcie");
1309 	if (IS_ERR(pcie->phy)) {
1310 		err = PTR_ERR(pcie->phy);
1311 		dev_err(dev, "failed to get PHY: %d\n", err);
1312 		return err;
1313 	}
1314 
1315 	err = phy_init(pcie->phy);
1316 	if (err < 0) {
1317 		dev_err(dev, "failed to initialize PHY: %d\n", err);
1318 		return err;
1319 	}
1320 
1321 	pcie->legacy_phy = true;
1322 
1323 	return 0;
1324 }
1325 
1326 static struct phy *devm_of_phy_optional_get_index(struct device *dev,
1327 						  struct device_node *np,
1328 						  const char *consumer,
1329 						  unsigned int index)
1330 {
1331 	struct phy *phy;
1332 	char *name;
1333 
1334 	name = kasprintf(GFP_KERNEL, "%s-%u", consumer, index);
1335 	if (!name)
1336 		return ERR_PTR(-ENOMEM);
1337 
1338 	phy = devm_of_phy_get(dev, np, name);
1339 	kfree(name);
1340 
1341 	if (PTR_ERR(phy) == -ENODEV)
1342 		phy = NULL;
1343 
1344 	return phy;
1345 }
1346 
1347 static int tegra_pcie_port_get_phys(struct tegra_pcie_port *port)
1348 {
1349 	struct device *dev = port->pcie->dev;
1350 	struct phy *phy;
1351 	unsigned int i;
1352 	int err;
1353 
1354 	port->phys = devm_kcalloc(dev, sizeof(phy), port->lanes, GFP_KERNEL);
1355 	if (!port->phys)
1356 		return -ENOMEM;
1357 
1358 	for (i = 0; i < port->lanes; i++) {
1359 		phy = devm_of_phy_optional_get_index(dev, port->np, "pcie", i);
1360 		if (IS_ERR(phy)) {
1361 			dev_err(dev, "failed to get PHY#%u: %ld\n", i,
1362 				PTR_ERR(phy));
1363 			return PTR_ERR(phy);
1364 		}
1365 
1366 		err = phy_init(phy);
1367 		if (err < 0) {
1368 			dev_err(dev, "failed to initialize PHY#%u: %d\n", i,
1369 				err);
1370 			return err;
1371 		}
1372 
1373 		port->phys[i] = phy;
1374 	}
1375 
1376 	return 0;
1377 }
1378 
1379 static int tegra_pcie_phys_get(struct tegra_pcie *pcie)
1380 {
1381 	const struct tegra_pcie_soc *soc = pcie->soc;
1382 	struct device_node *np = pcie->dev->of_node;
1383 	struct tegra_pcie_port *port;
1384 	int err;
1385 
1386 	if (!soc->has_gen2 || of_find_property(np, "phys", NULL) != NULL)
1387 		return tegra_pcie_phys_get_legacy(pcie);
1388 
1389 	list_for_each_entry(port, &pcie->ports, list) {
1390 		err = tegra_pcie_port_get_phys(port);
1391 		if (err < 0)
1392 			return err;
1393 	}
1394 
1395 	return 0;
1396 }
1397 
1398 static void tegra_pcie_phys_put(struct tegra_pcie *pcie)
1399 {
1400 	struct tegra_pcie_port *port;
1401 	struct device *dev = pcie->dev;
1402 	int err, i;
1403 
1404 	if (pcie->legacy_phy) {
1405 		err = phy_exit(pcie->phy);
1406 		if (err < 0)
1407 			dev_err(dev, "failed to teardown PHY: %d\n", err);
1408 		return;
1409 	}
1410 
1411 	list_for_each_entry(port, &pcie->ports, list) {
1412 		for (i = 0; i < port->lanes; i++) {
1413 			err = phy_exit(port->phys[i]);
1414 			if (err < 0)
1415 				dev_err(dev, "failed to teardown PHY#%u: %d\n",
1416 					i, err);
1417 		}
1418 	}
1419 }
1420 
1421 static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
1422 {
1423 	struct device *dev = pcie->dev;
1424 	struct platform_device *pdev = to_platform_device(dev);
1425 	struct resource *res;
1426 	const struct tegra_pcie_soc *soc = pcie->soc;
1427 	int err;
1428 
1429 	err = tegra_pcie_clocks_get(pcie);
1430 	if (err) {
1431 		dev_err(dev, "failed to get clocks: %d\n", err);
1432 		return err;
1433 	}
1434 
1435 	err = tegra_pcie_resets_get(pcie);
1436 	if (err) {
1437 		dev_err(dev, "failed to get resets: %d\n", err);
1438 		return err;
1439 	}
1440 
1441 	if (soc->program_uphy) {
1442 		err = tegra_pcie_phys_get(pcie);
1443 		if (err < 0) {
1444 			dev_err(dev, "failed to get PHYs: %d\n", err);
1445 			return err;
1446 		}
1447 	}
1448 
1449 	pcie->pads = devm_platform_ioremap_resource_byname(pdev, "pads");
1450 	if (IS_ERR(pcie->pads)) {
1451 		err = PTR_ERR(pcie->pads);
1452 		goto phys_put;
1453 	}
1454 
1455 	pcie->afi = devm_platform_ioremap_resource_byname(pdev, "afi");
1456 	if (IS_ERR(pcie->afi)) {
1457 		err = PTR_ERR(pcie->afi);
1458 		goto phys_put;
1459 	}
1460 
1461 	/* request configuration space, but remap later, on demand */
1462 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs");
1463 	if (!res) {
1464 		err = -EADDRNOTAVAIL;
1465 		goto phys_put;
1466 	}
1467 
1468 	pcie->cs = *res;
1469 
1470 	/* constrain configuration space to 4 KiB */
1471 	pcie->cs.end = pcie->cs.start + SZ_4K - 1;
1472 
1473 	pcie->cfg = devm_ioremap_resource(dev, &pcie->cs);
1474 	if (IS_ERR(pcie->cfg)) {
1475 		err = PTR_ERR(pcie->cfg);
1476 		goto phys_put;
1477 	}
1478 
1479 	/* request interrupt */
1480 	err = platform_get_irq_byname(pdev, "intr");
1481 	if (err < 0)
1482 		goto phys_put;
1483 
1484 	pcie->irq = err;
1485 
1486 	err = request_irq(pcie->irq, tegra_pcie_isr, IRQF_SHARED, "PCIE", pcie);
1487 	if (err) {
1488 		dev_err(dev, "failed to register IRQ: %d\n", err);
1489 		goto phys_put;
1490 	}
1491 
1492 	return 0;
1493 
1494 phys_put:
1495 	if (soc->program_uphy)
1496 		tegra_pcie_phys_put(pcie);
1497 
1498 	return err;
1499 }
1500 
1501 static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
1502 {
1503 	const struct tegra_pcie_soc *soc = pcie->soc;
1504 
1505 	if (pcie->irq > 0)
1506 		free_irq(pcie->irq, pcie);
1507 
1508 	if (soc->program_uphy)
1509 		tegra_pcie_phys_put(pcie);
1510 
1511 	return 0;
1512 }
1513 
1514 static void tegra_pcie_pme_turnoff(struct tegra_pcie_port *port)
1515 {
1516 	struct tegra_pcie *pcie = port->pcie;
1517 	const struct tegra_pcie_soc *soc = pcie->soc;
1518 	int err;
1519 	u32 val;
1520 	u8 ack_bit;
1521 
1522 	val = afi_readl(pcie, AFI_PCIE_PME);
1523 	val |= (0x1 << soc->ports[port->index].pme.turnoff_bit);
1524 	afi_writel(pcie, val, AFI_PCIE_PME);
1525 
1526 	ack_bit = soc->ports[port->index].pme.ack_bit;
1527 	err = readl_poll_timeout(pcie->afi + AFI_PCIE_PME, val,
1528 				 val & (0x1 << ack_bit), 1, PME_ACK_TIMEOUT);
1529 	if (err)
1530 		dev_err(pcie->dev, "PME Ack is not received on port: %d\n",
1531 			port->index);
1532 
1533 	usleep_range(10000, 11000);
1534 
1535 	val = afi_readl(pcie, AFI_PCIE_PME);
1536 	val &= ~(0x1 << soc->ports[port->index].pme.turnoff_bit);
1537 	afi_writel(pcie, val, AFI_PCIE_PME);
1538 }
1539 
1540 static void tegra_pcie_msi_irq(struct irq_desc *desc)
1541 {
1542 	struct tegra_pcie *pcie = irq_desc_get_handler_data(desc);
1543 	struct irq_chip *chip = irq_desc_get_chip(desc);
1544 	struct tegra_msi *msi = &pcie->msi;
1545 	struct device *dev = pcie->dev;
1546 	unsigned int i;
1547 
1548 	chained_irq_enter(chip, desc);
1549 
1550 	for (i = 0; i < 8; i++) {
1551 		unsigned long reg = afi_readl(pcie, AFI_MSI_VEC(i));
1552 
1553 		while (reg) {
1554 			unsigned int offset = find_first_bit(&reg, 32);
1555 			unsigned int index = i * 32 + offset;
1556 			unsigned int irq;
1557 
1558 			irq = irq_find_mapping(msi->domain->parent, index);
1559 			if (irq) {
1560 				generic_handle_irq(irq);
1561 			} else {
1562 				/*
1563 				 * that's weird who triggered this?
1564 				 * just clear it
1565 				 */
1566 				dev_info(dev, "unexpected MSI\n");
1567 				afi_writel(pcie, BIT(index % 32), AFI_MSI_VEC(index));
1568 			}
1569 
1570 			/* see if there's any more pending in this vector */
1571 			reg = afi_readl(pcie, AFI_MSI_VEC(i));
1572 		}
1573 	}
1574 
1575 	chained_irq_exit(chip, desc);
1576 }
1577 
1578 static void tegra_msi_top_irq_ack(struct irq_data *d)
1579 {
1580 	irq_chip_ack_parent(d);
1581 }
1582 
1583 static void tegra_msi_top_irq_mask(struct irq_data *d)
1584 {
1585 	pci_msi_mask_irq(d);
1586 	irq_chip_mask_parent(d);
1587 }
1588 
1589 static void tegra_msi_top_irq_unmask(struct irq_data *d)
1590 {
1591 	pci_msi_unmask_irq(d);
1592 	irq_chip_unmask_parent(d);
1593 }
1594 
1595 static struct irq_chip tegra_msi_top_chip = {
1596 	.name		= "Tegra PCIe MSI",
1597 	.irq_ack	= tegra_msi_top_irq_ack,
1598 	.irq_mask	= tegra_msi_top_irq_mask,
1599 	.irq_unmask	= tegra_msi_top_irq_unmask,
1600 };
1601 
1602 static void tegra_msi_irq_ack(struct irq_data *d)
1603 {
1604 	struct tegra_msi *msi = irq_data_get_irq_chip_data(d);
1605 	struct tegra_pcie *pcie = msi_to_pcie(msi);
1606 	unsigned int index = d->hwirq / 32;
1607 
1608 	/* clear the interrupt */
1609 	afi_writel(pcie, BIT(d->hwirq % 32), AFI_MSI_VEC(index));
1610 }
1611 
1612 static void tegra_msi_irq_mask(struct irq_data *d)
1613 {
1614 	struct tegra_msi *msi = irq_data_get_irq_chip_data(d);
1615 	struct tegra_pcie *pcie = msi_to_pcie(msi);
1616 	unsigned int index = d->hwirq / 32;
1617 	unsigned long flags;
1618 	u32 value;
1619 
1620 	spin_lock_irqsave(&msi->mask_lock, flags);
1621 	value = afi_readl(pcie, AFI_MSI_EN_VEC(index));
1622 	value &= ~BIT(d->hwirq % 32);
1623 	afi_writel(pcie, value, AFI_MSI_EN_VEC(index));
1624 	spin_unlock_irqrestore(&msi->mask_lock, flags);
1625 }
1626 
1627 static void tegra_msi_irq_unmask(struct irq_data *d)
1628 {
1629 	struct tegra_msi *msi = irq_data_get_irq_chip_data(d);
1630 	struct tegra_pcie *pcie = msi_to_pcie(msi);
1631 	unsigned int index = d->hwirq / 32;
1632 	unsigned long flags;
1633 	u32 value;
1634 
1635 	spin_lock_irqsave(&msi->mask_lock, flags);
1636 	value = afi_readl(pcie, AFI_MSI_EN_VEC(index));
1637 	value |= BIT(d->hwirq % 32);
1638 	afi_writel(pcie, value, AFI_MSI_EN_VEC(index));
1639 	spin_unlock_irqrestore(&msi->mask_lock, flags);
1640 }
1641 
1642 static int tegra_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force)
1643 {
1644 	return -EINVAL;
1645 }
1646 
1647 static void tegra_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
1648 {
1649 	struct tegra_msi *msi = irq_data_get_irq_chip_data(data);
1650 
1651 	msg->address_lo = lower_32_bits(msi->phys);
1652 	msg->address_hi = upper_32_bits(msi->phys);
1653 	msg->data = data->hwirq;
1654 }
1655 
1656 static struct irq_chip tegra_msi_bottom_chip = {
1657 	.name			= "Tegra MSI",
1658 	.irq_ack		= tegra_msi_irq_ack,
1659 	.irq_mask		= tegra_msi_irq_mask,
1660 	.irq_unmask		= tegra_msi_irq_unmask,
1661 	.irq_set_affinity 	= tegra_msi_set_affinity,
1662 	.irq_compose_msi_msg	= tegra_compose_msi_msg,
1663 };
1664 
1665 static int tegra_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
1666 				  unsigned int nr_irqs, void *args)
1667 {
1668 	struct tegra_msi *msi = domain->host_data;
1669 	unsigned int i;
1670 	int hwirq;
1671 
1672 	mutex_lock(&msi->map_lock);
1673 
1674 	hwirq = bitmap_find_free_region(msi->used, INT_PCI_MSI_NR, order_base_2(nr_irqs));
1675 
1676 	mutex_unlock(&msi->map_lock);
1677 
1678 	if (hwirq < 0)
1679 		return -ENOSPC;
1680 
1681 	for (i = 0; i < nr_irqs; i++)
1682 		irq_domain_set_info(domain, virq + i, hwirq + i,
1683 				    &tegra_msi_bottom_chip, domain->host_data,
1684 				    handle_edge_irq, NULL, NULL);
1685 
1686 	tegra_cpuidle_pcie_irqs_in_use();
1687 
1688 	return 0;
1689 }
1690 
1691 static void tegra_msi_domain_free(struct irq_domain *domain, unsigned int virq,
1692 				  unsigned int nr_irqs)
1693 {
1694 	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
1695 	struct tegra_msi *msi = domain->host_data;
1696 
1697 	mutex_lock(&msi->map_lock);
1698 
1699 	bitmap_release_region(msi->used, d->hwirq, order_base_2(nr_irqs));
1700 
1701 	mutex_unlock(&msi->map_lock);
1702 }
1703 
1704 static const struct irq_domain_ops tegra_msi_domain_ops = {
1705 	.alloc = tegra_msi_domain_alloc,
1706 	.free = tegra_msi_domain_free,
1707 };
1708 
1709 static struct msi_domain_info tegra_msi_info = {
1710 	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
1711 		   MSI_FLAG_PCI_MSIX),
1712 	.chip	= &tegra_msi_top_chip,
1713 };
1714 
1715 static int tegra_allocate_domains(struct tegra_msi *msi)
1716 {
1717 	struct tegra_pcie *pcie = msi_to_pcie(msi);
1718 	struct fwnode_handle *fwnode = dev_fwnode(pcie->dev);
1719 	struct irq_domain *parent;
1720 
1721 	parent = irq_domain_create_linear(fwnode, INT_PCI_MSI_NR,
1722 					  &tegra_msi_domain_ops, msi);
1723 	if (!parent) {
1724 		dev_err(pcie->dev, "failed to create IRQ domain\n");
1725 		return -ENOMEM;
1726 	}
1727 	irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS);
1728 
1729 	msi->domain = pci_msi_create_irq_domain(fwnode, &tegra_msi_info, parent);
1730 	if (!msi->domain) {
1731 		dev_err(pcie->dev, "failed to create MSI domain\n");
1732 		irq_domain_remove(parent);
1733 		return -ENOMEM;
1734 	}
1735 
1736 	return 0;
1737 }
1738 
1739 static void tegra_free_domains(struct tegra_msi *msi)
1740 {
1741 	struct irq_domain *parent = msi->domain->parent;
1742 
1743 	irq_domain_remove(msi->domain);
1744 	irq_domain_remove(parent);
1745 }
1746 
1747 static int tegra_pcie_msi_setup(struct tegra_pcie *pcie)
1748 {
1749 	struct platform_device *pdev = to_platform_device(pcie->dev);
1750 	struct tegra_msi *msi = &pcie->msi;
1751 	struct device *dev = pcie->dev;
1752 	int err;
1753 
1754 	mutex_init(&msi->map_lock);
1755 	spin_lock_init(&msi->mask_lock);
1756 
1757 	if (IS_ENABLED(CONFIG_PCI_MSI)) {
1758 		err = tegra_allocate_domains(msi);
1759 		if (err)
1760 			return err;
1761 	}
1762 
1763 	err = platform_get_irq_byname(pdev, "msi");
1764 	if (err < 0)
1765 		goto free_irq_domain;
1766 
1767 	msi->irq = err;
1768 
1769 	irq_set_chained_handler_and_data(msi->irq, tegra_pcie_msi_irq, pcie);
1770 
1771 	/* Though the PCIe controller can address >32-bit address space, to
1772 	 * facilitate endpoints that support only 32-bit MSI target address,
1773 	 * the mask is set to 32-bit to make sure that MSI target address is
1774 	 * always a 32-bit address
1775 	 */
1776 	err = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
1777 	if (err < 0) {
1778 		dev_err(dev, "failed to set DMA coherent mask: %d\n", err);
1779 		goto free_irq;
1780 	}
1781 
1782 	msi->virt = dma_alloc_attrs(dev, PAGE_SIZE, &msi->phys, GFP_KERNEL,
1783 				    DMA_ATTR_NO_KERNEL_MAPPING);
1784 	if (!msi->virt) {
1785 		dev_err(dev, "failed to allocate DMA memory for MSI\n");
1786 		err = -ENOMEM;
1787 		goto free_irq;
1788 	}
1789 
1790 	return 0;
1791 
1792 free_irq:
1793 	irq_set_chained_handler_and_data(msi->irq, NULL, NULL);
1794 free_irq_domain:
1795 	if (IS_ENABLED(CONFIG_PCI_MSI))
1796 		tegra_free_domains(msi);
1797 
1798 	return err;
1799 }
1800 
1801 static void tegra_pcie_enable_msi(struct tegra_pcie *pcie)
1802 {
1803 	const struct tegra_pcie_soc *soc = pcie->soc;
1804 	struct tegra_msi *msi = &pcie->msi;
1805 	u32 reg, msi_state[INT_PCI_MSI_NR / 32];
1806 	int i;
1807 
1808 	afi_writel(pcie, msi->phys >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST);
1809 	afi_writel(pcie, msi->phys, AFI_MSI_AXI_BAR_ST);
1810 	/* this register is in 4K increments */
1811 	afi_writel(pcie, 1, AFI_MSI_BAR_SZ);
1812 
1813 	/* Restore the MSI allocation state */
1814 	bitmap_to_arr32(msi_state, msi->used, INT_PCI_MSI_NR);
1815 	for (i = 0; i < ARRAY_SIZE(msi_state); i++)
1816 		afi_writel(pcie, msi_state[i], AFI_MSI_EN_VEC(i));
1817 
1818 	/* and unmask the MSI interrupt */
1819 	reg = afi_readl(pcie, AFI_INTR_MASK);
1820 	reg |= AFI_INTR_MASK_MSI_MASK;
1821 	afi_writel(pcie, reg, AFI_INTR_MASK);
1822 }
1823 
1824 static void tegra_pcie_msi_teardown(struct tegra_pcie *pcie)
1825 {
1826 	struct tegra_msi *msi = &pcie->msi;
1827 	unsigned int i, irq;
1828 
1829 	dma_free_attrs(pcie->dev, PAGE_SIZE, msi->virt, msi->phys,
1830 		       DMA_ATTR_NO_KERNEL_MAPPING);
1831 
1832 	for (i = 0; i < INT_PCI_MSI_NR; i++) {
1833 		irq = irq_find_mapping(msi->domain, i);
1834 		if (irq > 0)
1835 			irq_domain_free_irqs(irq, 1);
1836 	}
1837 
1838 	irq_set_chained_handler_and_data(msi->irq, NULL, NULL);
1839 
1840 	if (IS_ENABLED(CONFIG_PCI_MSI))
1841 		tegra_free_domains(msi);
1842 }
1843 
1844 static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
1845 {
1846 	u32 value;
1847 
1848 	/* mask the MSI interrupt */
1849 	value = afi_readl(pcie, AFI_INTR_MASK);
1850 	value &= ~AFI_INTR_MASK_MSI_MASK;
1851 	afi_writel(pcie, value, AFI_INTR_MASK);
1852 
1853 	return 0;
1854 }
1855 
1856 static void tegra_pcie_disable_interrupts(struct tegra_pcie *pcie)
1857 {
1858 	u32 value;
1859 
1860 	value = afi_readl(pcie, AFI_INTR_MASK);
1861 	value &= ~AFI_INTR_MASK_INT_MASK;
1862 	afi_writel(pcie, value, AFI_INTR_MASK);
1863 }
1864 
1865 static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
1866 				      u32 *xbar)
1867 {
1868 	struct device *dev = pcie->dev;
1869 	struct device_node *np = dev->of_node;
1870 
1871 	if (of_device_is_compatible(np, "nvidia,tegra186-pcie")) {
1872 		switch (lanes) {
1873 		case 0x010004:
1874 			dev_info(dev, "4x1, 1x1 configuration\n");
1875 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_401;
1876 			return 0;
1877 
1878 		case 0x010102:
1879 			dev_info(dev, "2x1, 1X1, 1x1 configuration\n");
1880 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211;
1881 			return 0;
1882 
1883 		case 0x010101:
1884 			dev_info(dev, "1x1, 1x1, 1x1 configuration\n");
1885 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111;
1886 			return 0;
1887 
1888 		default:
1889 			dev_info(dev, "wrong configuration updated in DT, "
1890 				 "switching to default 2x1, 1x1, 1x1 "
1891 				 "configuration\n");
1892 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211;
1893 			return 0;
1894 		}
1895 	} else if (of_device_is_compatible(np, "nvidia,tegra124-pcie") ||
1896 		   of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
1897 		switch (lanes) {
1898 		case 0x0000104:
1899 			dev_info(dev, "4x1, 1x1 configuration\n");
1900 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1;
1901 			return 0;
1902 
1903 		case 0x0000102:
1904 			dev_info(dev, "2x1, 1x1 configuration\n");
1905 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1;
1906 			return 0;
1907 		}
1908 	} else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
1909 		switch (lanes) {
1910 		case 0x00000204:
1911 			dev_info(dev, "4x1, 2x1 configuration\n");
1912 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420;
1913 			return 0;
1914 
1915 		case 0x00020202:
1916 			dev_info(dev, "2x3 configuration\n");
1917 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222;
1918 			return 0;
1919 
1920 		case 0x00010104:
1921 			dev_info(dev, "4x1, 1x2 configuration\n");
1922 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411;
1923 			return 0;
1924 		}
1925 	} else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
1926 		switch (lanes) {
1927 		case 0x00000004:
1928 			dev_info(dev, "single-mode configuration\n");
1929 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE;
1930 			return 0;
1931 
1932 		case 0x00000202:
1933 			dev_info(dev, "dual-mode configuration\n");
1934 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL;
1935 			return 0;
1936 		}
1937 	}
1938 
1939 	return -EINVAL;
1940 }
1941 
1942 /*
1943  * Check whether a given set of supplies is available in a device tree node.
1944  * This is used to check whether the new or the legacy device tree bindings
1945  * should be used.
1946  */
1947 static bool of_regulator_bulk_available(struct device_node *np,
1948 					struct regulator_bulk_data *supplies,
1949 					unsigned int num_supplies)
1950 {
1951 	char property[32];
1952 	unsigned int i;
1953 
1954 	for (i = 0; i < num_supplies; i++) {
1955 		snprintf(property, 32, "%s-supply", supplies[i].supply);
1956 
1957 		if (of_find_property(np, property, NULL) == NULL)
1958 			return false;
1959 	}
1960 
1961 	return true;
1962 }
1963 
1964 /*
1965  * Old versions of the device tree binding for this device used a set of power
1966  * supplies that didn't match the hardware inputs. This happened to work for a
1967  * number of cases but is not future proof. However to preserve backwards-
1968  * compatibility with old device trees, this function will try to use the old
1969  * set of supplies.
1970  */
1971 static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie)
1972 {
1973 	struct device *dev = pcie->dev;
1974 	struct device_node *np = dev->of_node;
1975 
1976 	if (of_device_is_compatible(np, "nvidia,tegra30-pcie"))
1977 		pcie->num_supplies = 3;
1978 	else if (of_device_is_compatible(np, "nvidia,tegra20-pcie"))
1979 		pcie->num_supplies = 2;
1980 
1981 	if (pcie->num_supplies == 0) {
1982 		dev_err(dev, "device %pOF not supported in legacy mode\n", np);
1983 		return -ENODEV;
1984 	}
1985 
1986 	pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
1987 				      sizeof(*pcie->supplies),
1988 				      GFP_KERNEL);
1989 	if (!pcie->supplies)
1990 		return -ENOMEM;
1991 
1992 	pcie->supplies[0].supply = "pex-clk";
1993 	pcie->supplies[1].supply = "vdd";
1994 
1995 	if (pcie->num_supplies > 2)
1996 		pcie->supplies[2].supply = "avdd";
1997 
1998 	return devm_regulator_bulk_get(dev, pcie->num_supplies, pcie->supplies);
1999 }
2000 
2001 /*
2002  * Obtains the list of regulators required for a particular generation of the
2003  * IP block.
2004  *
2005  * This would've been nice to do simply by providing static tables for use
2006  * with the regulator_bulk_*() API, but unfortunately Tegra30 is a bit quirky
2007  * in that it has two pairs or AVDD_PEX and VDD_PEX supplies (PEXA and PEXB)
2008  * and either seems to be optional depending on which ports are being used.
2009  */
2010 static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
2011 {
2012 	struct device *dev = pcie->dev;
2013 	struct device_node *np = dev->of_node;
2014 	unsigned int i = 0;
2015 
2016 	if (of_device_is_compatible(np, "nvidia,tegra186-pcie")) {
2017 		pcie->num_supplies = 4;
2018 
2019 		pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
2020 					      sizeof(*pcie->supplies),
2021 					      GFP_KERNEL);
2022 		if (!pcie->supplies)
2023 			return -ENOMEM;
2024 
2025 		pcie->supplies[i++].supply = "dvdd-pex";
2026 		pcie->supplies[i++].supply = "hvdd-pex-pll";
2027 		pcie->supplies[i++].supply = "hvdd-pex";
2028 		pcie->supplies[i++].supply = "vddio-pexctl-aud";
2029 	} else if (of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
2030 		pcie->num_supplies = 3;
2031 
2032 		pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
2033 					      sizeof(*pcie->supplies),
2034 					      GFP_KERNEL);
2035 		if (!pcie->supplies)
2036 			return -ENOMEM;
2037 
2038 		pcie->supplies[i++].supply = "hvddio-pex";
2039 		pcie->supplies[i++].supply = "dvddio-pex";
2040 		pcie->supplies[i++].supply = "vddio-pex-ctl";
2041 	} else if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
2042 		pcie->num_supplies = 4;
2043 
2044 		pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
2045 					      sizeof(*pcie->supplies),
2046 					      GFP_KERNEL);
2047 		if (!pcie->supplies)
2048 			return -ENOMEM;
2049 
2050 		pcie->supplies[i++].supply = "avddio-pex";
2051 		pcie->supplies[i++].supply = "dvddio-pex";
2052 		pcie->supplies[i++].supply = "hvdd-pex";
2053 		pcie->supplies[i++].supply = "vddio-pex-ctl";
2054 	} else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
2055 		bool need_pexa = false, need_pexb = false;
2056 
2057 		/* VDD_PEXA and AVDD_PEXA supply lanes 0 to 3 */
2058 		if (lane_mask & 0x0f)
2059 			need_pexa = true;
2060 
2061 		/* VDD_PEXB and AVDD_PEXB supply lanes 4 to 5 */
2062 		if (lane_mask & 0x30)
2063 			need_pexb = true;
2064 
2065 		pcie->num_supplies = 4 + (need_pexa ? 2 : 0) +
2066 					 (need_pexb ? 2 : 0);
2067 
2068 		pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
2069 					      sizeof(*pcie->supplies),
2070 					      GFP_KERNEL);
2071 		if (!pcie->supplies)
2072 			return -ENOMEM;
2073 
2074 		pcie->supplies[i++].supply = "avdd-pex-pll";
2075 		pcie->supplies[i++].supply = "hvdd-pex";
2076 		pcie->supplies[i++].supply = "vddio-pex-ctl";
2077 		pcie->supplies[i++].supply = "avdd-plle";
2078 
2079 		if (need_pexa) {
2080 			pcie->supplies[i++].supply = "avdd-pexa";
2081 			pcie->supplies[i++].supply = "vdd-pexa";
2082 		}
2083 
2084 		if (need_pexb) {
2085 			pcie->supplies[i++].supply = "avdd-pexb";
2086 			pcie->supplies[i++].supply = "vdd-pexb";
2087 		}
2088 	} else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
2089 		pcie->num_supplies = 5;
2090 
2091 		pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
2092 					      sizeof(*pcie->supplies),
2093 					      GFP_KERNEL);
2094 		if (!pcie->supplies)
2095 			return -ENOMEM;
2096 
2097 		pcie->supplies[0].supply = "avdd-pex";
2098 		pcie->supplies[1].supply = "vdd-pex";
2099 		pcie->supplies[2].supply = "avdd-pex-pll";
2100 		pcie->supplies[3].supply = "avdd-plle";
2101 		pcie->supplies[4].supply = "vddio-pex-clk";
2102 	}
2103 
2104 	if (of_regulator_bulk_available(dev->of_node, pcie->supplies,
2105 					pcie->num_supplies))
2106 		return devm_regulator_bulk_get(dev, pcie->num_supplies,
2107 					       pcie->supplies);
2108 
2109 	/*
2110 	 * If not all regulators are available for this new scheme, assume
2111 	 * that the device tree complies with an older version of the device
2112 	 * tree binding.
2113 	 */
2114 	dev_info(dev, "using legacy DT binding for power supplies\n");
2115 
2116 	devm_kfree(dev, pcie->supplies);
2117 	pcie->num_supplies = 0;
2118 
2119 	return tegra_pcie_get_legacy_regulators(pcie);
2120 }
2121 
2122 static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
2123 {
2124 	struct device *dev = pcie->dev;
2125 	struct device_node *np = dev->of_node, *port;
2126 	const struct tegra_pcie_soc *soc = pcie->soc;
2127 	u32 lanes = 0, mask = 0;
2128 	unsigned int lane = 0;
2129 	int err;
2130 
2131 	/* parse root ports */
2132 	for_each_child_of_node(np, port) {
2133 		struct tegra_pcie_port *rp;
2134 		unsigned int index;
2135 		u32 value;
2136 		char *label;
2137 
2138 		err = of_pci_get_devfn(port);
2139 		if (err < 0) {
2140 			dev_err(dev, "failed to parse address: %d\n", err);
2141 			goto err_node_put;
2142 		}
2143 
2144 		index = PCI_SLOT(err);
2145 
2146 		if (index < 1 || index > soc->num_ports) {
2147 			dev_err(dev, "invalid port number: %d\n", index);
2148 			err = -EINVAL;
2149 			goto err_node_put;
2150 		}
2151 
2152 		index--;
2153 
2154 		err = of_property_read_u32(port, "nvidia,num-lanes", &value);
2155 		if (err < 0) {
2156 			dev_err(dev, "failed to parse # of lanes: %d\n",
2157 				err);
2158 			goto err_node_put;
2159 		}
2160 
2161 		if (value > 16) {
2162 			dev_err(dev, "invalid # of lanes: %u\n", value);
2163 			err = -EINVAL;
2164 			goto err_node_put;
2165 		}
2166 
2167 		lanes |= value << (index << 3);
2168 
2169 		if (!of_device_is_available(port)) {
2170 			lane += value;
2171 			continue;
2172 		}
2173 
2174 		mask |= ((1 << value) - 1) << lane;
2175 		lane += value;
2176 
2177 		rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL);
2178 		if (!rp) {
2179 			err = -ENOMEM;
2180 			goto err_node_put;
2181 		}
2182 
2183 		err = of_address_to_resource(port, 0, &rp->regs);
2184 		if (err < 0) {
2185 			dev_err(dev, "failed to parse address: %d\n", err);
2186 			goto err_node_put;
2187 		}
2188 
2189 		INIT_LIST_HEAD(&rp->list);
2190 		rp->index = index;
2191 		rp->lanes = value;
2192 		rp->pcie = pcie;
2193 		rp->np = port;
2194 
2195 		rp->base = devm_pci_remap_cfg_resource(dev, &rp->regs);
2196 		if (IS_ERR(rp->base))
2197 			return PTR_ERR(rp->base);
2198 
2199 		label = devm_kasprintf(dev, GFP_KERNEL, "pex-reset-%u", index);
2200 		if (!label) {
2201 			dev_err(dev, "failed to create reset GPIO label\n");
2202 			return -ENOMEM;
2203 		}
2204 
2205 		/*
2206 		 * Returns -ENOENT if reset-gpios property is not populated
2207 		 * and in this case fall back to using AFI per port register
2208 		 * to toggle PERST# SFIO line.
2209 		 */
2210 		rp->reset_gpio = devm_gpiod_get_from_of_node(dev, port,
2211 							     "reset-gpios", 0,
2212 							     GPIOD_OUT_LOW,
2213 							     label);
2214 		if (IS_ERR(rp->reset_gpio)) {
2215 			if (PTR_ERR(rp->reset_gpio) == -ENOENT) {
2216 				rp->reset_gpio = NULL;
2217 			} else {
2218 				dev_err(dev, "failed to get reset GPIO: %ld\n",
2219 					PTR_ERR(rp->reset_gpio));
2220 				return PTR_ERR(rp->reset_gpio);
2221 			}
2222 		}
2223 
2224 		list_add_tail(&rp->list, &pcie->ports);
2225 	}
2226 
2227 	err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config);
2228 	if (err < 0) {
2229 		dev_err(dev, "invalid lane configuration\n");
2230 		return err;
2231 	}
2232 
2233 	err = tegra_pcie_get_regulators(pcie, mask);
2234 	if (err < 0)
2235 		return err;
2236 
2237 	return 0;
2238 
2239 err_node_put:
2240 	of_node_put(port);
2241 	return err;
2242 }
2243 
2244 /*
2245  * FIXME: If there are no PCIe cards attached, then calling this function
2246  * can result in the increase of the bootup time as there are big timeout
2247  * loops.
2248  */
2249 #define TEGRA_PCIE_LINKUP_TIMEOUT	200	/* up to 1.2 seconds */
2250 static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
2251 {
2252 	struct device *dev = port->pcie->dev;
2253 	unsigned int retries = 3;
2254 	unsigned long value;
2255 
2256 	/* override presence detection */
2257 	value = readl(port->base + RP_PRIV_MISC);
2258 	value &= ~RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT;
2259 	value |= RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT;
2260 	writel(value, port->base + RP_PRIV_MISC);
2261 
2262 	do {
2263 		unsigned int timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
2264 
2265 		do {
2266 			value = readl(port->base + RP_VEND_XP);
2267 
2268 			if (value & RP_VEND_XP_DL_UP)
2269 				break;
2270 
2271 			usleep_range(1000, 2000);
2272 		} while (--timeout);
2273 
2274 		if (!timeout) {
2275 			dev_dbg(dev, "link %u down, retrying\n", port->index);
2276 			goto retry;
2277 		}
2278 
2279 		timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
2280 
2281 		do {
2282 			value = readl(port->base + RP_LINK_CONTROL_STATUS);
2283 
2284 			if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
2285 				return true;
2286 
2287 			usleep_range(1000, 2000);
2288 		} while (--timeout);
2289 
2290 retry:
2291 		tegra_pcie_port_reset(port);
2292 	} while (--retries);
2293 
2294 	return false;
2295 }
2296 
2297 static void tegra_pcie_change_link_speed(struct tegra_pcie *pcie)
2298 {
2299 	struct device *dev = pcie->dev;
2300 	struct tegra_pcie_port *port;
2301 	ktime_t deadline;
2302 	u32 value;
2303 
2304 	list_for_each_entry(port, &pcie->ports, list) {
2305 		/*
2306 		 * "Supported Link Speeds Vector" in "Link Capabilities 2"
2307 		 * is not supported by Tegra. tegra_pcie_change_link_speed()
2308 		 * is called only for Tegra chips which support Gen2.
2309 		 * So there no harm if supported link speed is not verified.
2310 		 */
2311 		value = readl(port->base + RP_LINK_CONTROL_STATUS_2);
2312 		value &= ~PCI_EXP_LNKSTA_CLS;
2313 		value |= PCI_EXP_LNKSTA_CLS_5_0GB;
2314 		writel(value, port->base + RP_LINK_CONTROL_STATUS_2);
2315 
2316 		/*
2317 		 * Poll until link comes back from recovery to avoid race
2318 		 * condition.
2319 		 */
2320 		deadline = ktime_add_us(ktime_get(), LINK_RETRAIN_TIMEOUT);
2321 
2322 		while (ktime_before(ktime_get(), deadline)) {
2323 			value = readl(port->base + RP_LINK_CONTROL_STATUS);
2324 			if ((value & PCI_EXP_LNKSTA_LT) == 0)
2325 				break;
2326 
2327 			usleep_range(2000, 3000);
2328 		}
2329 
2330 		if (value & PCI_EXP_LNKSTA_LT)
2331 			dev_warn(dev, "PCIe port %u link is in recovery\n",
2332 				 port->index);
2333 
2334 		/* Retrain the link */
2335 		value = readl(port->base + RP_LINK_CONTROL_STATUS);
2336 		value |= PCI_EXP_LNKCTL_RL;
2337 		writel(value, port->base + RP_LINK_CONTROL_STATUS);
2338 
2339 		deadline = ktime_add_us(ktime_get(), LINK_RETRAIN_TIMEOUT);
2340 
2341 		while (ktime_before(ktime_get(), deadline)) {
2342 			value = readl(port->base + RP_LINK_CONTROL_STATUS);
2343 			if ((value & PCI_EXP_LNKSTA_LT) == 0)
2344 				break;
2345 
2346 			usleep_range(2000, 3000);
2347 		}
2348 
2349 		if (value & PCI_EXP_LNKSTA_LT)
2350 			dev_err(dev, "failed to retrain link of port %u\n",
2351 				port->index);
2352 	}
2353 }
2354 
2355 static void tegra_pcie_enable_ports(struct tegra_pcie *pcie)
2356 {
2357 	struct device *dev = pcie->dev;
2358 	struct tegra_pcie_port *port, *tmp;
2359 
2360 	list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
2361 		dev_info(dev, "probing port %u, using %u lanes\n",
2362 			 port->index, port->lanes);
2363 
2364 		tegra_pcie_port_enable(port);
2365 	}
2366 
2367 	/* Start LTSSM from Tegra side */
2368 	reset_control_deassert(pcie->pcie_xrst);
2369 
2370 	list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
2371 		if (tegra_pcie_port_check_link(port))
2372 			continue;
2373 
2374 		dev_info(dev, "link %u down, ignoring\n", port->index);
2375 
2376 		tegra_pcie_port_disable(port);
2377 		tegra_pcie_port_free(port);
2378 	}
2379 
2380 	if (pcie->soc->has_gen2)
2381 		tegra_pcie_change_link_speed(pcie);
2382 }
2383 
2384 static void tegra_pcie_disable_ports(struct tegra_pcie *pcie)
2385 {
2386 	struct tegra_pcie_port *port, *tmp;
2387 
2388 	reset_control_assert(pcie->pcie_xrst);
2389 
2390 	list_for_each_entry_safe(port, tmp, &pcie->ports, list)
2391 		tegra_pcie_port_disable(port);
2392 }
2393 
2394 static const struct tegra_pcie_port_soc tegra20_pcie_ports[] = {
2395 	{ .pme.turnoff_bit = 0, .pme.ack_bit =  5 },
2396 	{ .pme.turnoff_bit = 8, .pme.ack_bit = 10 },
2397 };
2398 
2399 static const struct tegra_pcie_soc tegra20_pcie = {
2400 	.num_ports = 2,
2401 	.ports = tegra20_pcie_ports,
2402 	.msi_base_shift = 0,
2403 	.pads_pll_ctl = PADS_PLL_CTL_TEGRA20,
2404 	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10,
2405 	.pads_refclk_cfg0 = 0xfa5cfa5c,
2406 	.has_pex_clkreq_en = false,
2407 	.has_pex_bias_ctrl = false,
2408 	.has_intr_prsnt_sense = false,
2409 	.has_cml_clk = false,
2410 	.has_gen2 = false,
2411 	.force_pca_enable = false,
2412 	.program_uphy = true,
2413 	.update_clamp_threshold = false,
2414 	.program_deskew_time = false,
2415 	.update_fc_timer = false,
2416 	.has_cache_bars = true,
2417 	.ectl.enable = false,
2418 };
2419 
2420 static const struct tegra_pcie_port_soc tegra30_pcie_ports[] = {
2421 	{ .pme.turnoff_bit =  0, .pme.ack_bit =  5 },
2422 	{ .pme.turnoff_bit =  8, .pme.ack_bit = 10 },
2423 	{ .pme.turnoff_bit = 16, .pme.ack_bit = 18 },
2424 };
2425 
2426 static const struct tegra_pcie_soc tegra30_pcie = {
2427 	.num_ports = 3,
2428 	.ports = tegra30_pcie_ports,
2429 	.msi_base_shift = 8,
2430 	.afi_pex2_ctrl = 0x128,
2431 	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2432 	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2433 	.pads_refclk_cfg0 = 0xfa5cfa5c,
2434 	.pads_refclk_cfg1 = 0xfa5cfa5c,
2435 	.has_pex_clkreq_en = true,
2436 	.has_pex_bias_ctrl = true,
2437 	.has_intr_prsnt_sense = true,
2438 	.has_cml_clk = true,
2439 	.has_gen2 = false,
2440 	.force_pca_enable = false,
2441 	.program_uphy = true,
2442 	.update_clamp_threshold = false,
2443 	.program_deskew_time = false,
2444 	.update_fc_timer = false,
2445 	.has_cache_bars = false,
2446 	.ectl.enable = false,
2447 };
2448 
2449 static const struct tegra_pcie_soc tegra124_pcie = {
2450 	.num_ports = 2,
2451 	.ports = tegra20_pcie_ports,
2452 	.msi_base_shift = 8,
2453 	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2454 	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2455 	.pads_refclk_cfg0 = 0x44ac44ac,
2456 	.has_pex_clkreq_en = true,
2457 	.has_pex_bias_ctrl = true,
2458 	.has_intr_prsnt_sense = true,
2459 	.has_cml_clk = true,
2460 	.has_gen2 = true,
2461 	.force_pca_enable = false,
2462 	.program_uphy = true,
2463 	.update_clamp_threshold = true,
2464 	.program_deskew_time = false,
2465 	.update_fc_timer = false,
2466 	.has_cache_bars = false,
2467 	.ectl.enable = false,
2468 };
2469 
2470 static const struct tegra_pcie_soc tegra210_pcie = {
2471 	.num_ports = 2,
2472 	.ports = tegra20_pcie_ports,
2473 	.msi_base_shift = 8,
2474 	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2475 	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2476 	.pads_refclk_cfg0 = 0x90b890b8,
2477 	/* FC threshold is bit[25:18] */
2478 	.update_fc_threshold = 0x01800000,
2479 	.has_pex_clkreq_en = true,
2480 	.has_pex_bias_ctrl = true,
2481 	.has_intr_prsnt_sense = true,
2482 	.has_cml_clk = true,
2483 	.has_gen2 = true,
2484 	.force_pca_enable = true,
2485 	.program_uphy = true,
2486 	.update_clamp_threshold = true,
2487 	.program_deskew_time = true,
2488 	.update_fc_timer = true,
2489 	.has_cache_bars = false,
2490 	.ectl = {
2491 		.regs = {
2492 			.rp_ectl_2_r1 = 0x0000000f,
2493 			.rp_ectl_4_r1 = 0x00000067,
2494 			.rp_ectl_5_r1 = 0x55010000,
2495 			.rp_ectl_6_r1 = 0x00000001,
2496 			.rp_ectl_2_r2 = 0x0000008f,
2497 			.rp_ectl_4_r2 = 0x000000c7,
2498 			.rp_ectl_5_r2 = 0x55010000,
2499 			.rp_ectl_6_r2 = 0x00000001,
2500 		},
2501 		.enable = true,
2502 	},
2503 };
2504 
2505 static const struct tegra_pcie_port_soc tegra186_pcie_ports[] = {
2506 	{ .pme.turnoff_bit =  0, .pme.ack_bit =  5 },
2507 	{ .pme.turnoff_bit =  8, .pme.ack_bit = 10 },
2508 	{ .pme.turnoff_bit = 12, .pme.ack_bit = 14 },
2509 };
2510 
2511 static const struct tegra_pcie_soc tegra186_pcie = {
2512 	.num_ports = 3,
2513 	.ports = tegra186_pcie_ports,
2514 	.msi_base_shift = 8,
2515 	.afi_pex2_ctrl = 0x19c,
2516 	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2517 	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2518 	.pads_refclk_cfg0 = 0x80b880b8,
2519 	.pads_refclk_cfg1 = 0x000480b8,
2520 	.has_pex_clkreq_en = true,
2521 	.has_pex_bias_ctrl = true,
2522 	.has_intr_prsnt_sense = true,
2523 	.has_cml_clk = false,
2524 	.has_gen2 = true,
2525 	.force_pca_enable = false,
2526 	.program_uphy = false,
2527 	.update_clamp_threshold = false,
2528 	.program_deskew_time = false,
2529 	.update_fc_timer = false,
2530 	.has_cache_bars = false,
2531 	.ectl.enable = false,
2532 };
2533 
2534 static const struct of_device_id tegra_pcie_of_match[] = {
2535 	{ .compatible = "nvidia,tegra186-pcie", .data = &tegra186_pcie },
2536 	{ .compatible = "nvidia,tegra210-pcie", .data = &tegra210_pcie },
2537 	{ .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie },
2538 	{ .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie },
2539 	{ .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie },
2540 	{ },
2541 };
2542 
2543 static void *tegra_pcie_ports_seq_start(struct seq_file *s, loff_t *pos)
2544 {
2545 	struct tegra_pcie *pcie = s->private;
2546 
2547 	if (list_empty(&pcie->ports))
2548 		return NULL;
2549 
2550 	seq_printf(s, "Index  Status\n");
2551 
2552 	return seq_list_start(&pcie->ports, *pos);
2553 }
2554 
2555 static void *tegra_pcie_ports_seq_next(struct seq_file *s, void *v, loff_t *pos)
2556 {
2557 	struct tegra_pcie *pcie = s->private;
2558 
2559 	return seq_list_next(v, &pcie->ports, pos);
2560 }
2561 
2562 static void tegra_pcie_ports_seq_stop(struct seq_file *s, void *v)
2563 {
2564 }
2565 
2566 static int tegra_pcie_ports_seq_show(struct seq_file *s, void *v)
2567 {
2568 	bool up = false, active = false;
2569 	struct tegra_pcie_port *port;
2570 	unsigned int value;
2571 
2572 	port = list_entry(v, struct tegra_pcie_port, list);
2573 
2574 	value = readl(port->base + RP_VEND_XP);
2575 
2576 	if (value & RP_VEND_XP_DL_UP)
2577 		up = true;
2578 
2579 	value = readl(port->base + RP_LINK_CONTROL_STATUS);
2580 
2581 	if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
2582 		active = true;
2583 
2584 	seq_printf(s, "%2u     ", port->index);
2585 
2586 	if (up)
2587 		seq_printf(s, "up");
2588 
2589 	if (active) {
2590 		if (up)
2591 			seq_printf(s, ", ");
2592 
2593 		seq_printf(s, "active");
2594 	}
2595 
2596 	seq_printf(s, "\n");
2597 	return 0;
2598 }
2599 
2600 static const struct seq_operations tegra_pcie_ports_sops = {
2601 	.start = tegra_pcie_ports_seq_start,
2602 	.next = tegra_pcie_ports_seq_next,
2603 	.stop = tegra_pcie_ports_seq_stop,
2604 	.show = tegra_pcie_ports_seq_show,
2605 };
2606 
2607 DEFINE_SEQ_ATTRIBUTE(tegra_pcie_ports);
2608 
2609 static void tegra_pcie_debugfs_exit(struct tegra_pcie *pcie)
2610 {
2611 	debugfs_remove_recursive(pcie->debugfs);
2612 	pcie->debugfs = NULL;
2613 }
2614 
2615 static void tegra_pcie_debugfs_init(struct tegra_pcie *pcie)
2616 {
2617 	pcie->debugfs = debugfs_create_dir("pcie", NULL);
2618 
2619 	debugfs_create_file("ports", S_IFREG | S_IRUGO, pcie->debugfs, pcie,
2620 			    &tegra_pcie_ports_fops);
2621 }
2622 
2623 static int tegra_pcie_probe(struct platform_device *pdev)
2624 {
2625 	struct device *dev = &pdev->dev;
2626 	struct pci_host_bridge *host;
2627 	struct tegra_pcie *pcie;
2628 	int err;
2629 
2630 	host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
2631 	if (!host)
2632 		return -ENOMEM;
2633 
2634 	pcie = pci_host_bridge_priv(host);
2635 	host->sysdata = pcie;
2636 	platform_set_drvdata(pdev, pcie);
2637 
2638 	pcie->soc = of_device_get_match_data(dev);
2639 	INIT_LIST_HEAD(&pcie->ports);
2640 	pcie->dev = dev;
2641 
2642 	err = tegra_pcie_parse_dt(pcie);
2643 	if (err < 0)
2644 		return err;
2645 
2646 	err = tegra_pcie_get_resources(pcie);
2647 	if (err < 0) {
2648 		dev_err(dev, "failed to request resources: %d\n", err);
2649 		return err;
2650 	}
2651 
2652 	err = tegra_pcie_msi_setup(pcie);
2653 	if (err < 0) {
2654 		dev_err(dev, "failed to enable MSI support: %d\n", err);
2655 		goto put_resources;
2656 	}
2657 
2658 	pm_runtime_enable(pcie->dev);
2659 	err = pm_runtime_get_sync(pcie->dev);
2660 	if (err < 0) {
2661 		dev_err(dev, "fail to enable pcie controller: %d\n", err);
2662 		goto pm_runtime_put;
2663 	}
2664 
2665 	host->ops = &tegra_pcie_ops;
2666 	host->map_irq = tegra_pcie_map_irq;
2667 
2668 	err = pci_host_probe(host);
2669 	if (err < 0) {
2670 		dev_err(dev, "failed to register host: %d\n", err);
2671 		goto pm_runtime_put;
2672 	}
2673 
2674 	if (IS_ENABLED(CONFIG_DEBUG_FS))
2675 		tegra_pcie_debugfs_init(pcie);
2676 
2677 	return 0;
2678 
2679 pm_runtime_put:
2680 	pm_runtime_put_sync(pcie->dev);
2681 	pm_runtime_disable(pcie->dev);
2682 	tegra_pcie_msi_teardown(pcie);
2683 put_resources:
2684 	tegra_pcie_put_resources(pcie);
2685 	return err;
2686 }
2687 
2688 static int tegra_pcie_remove(struct platform_device *pdev)
2689 {
2690 	struct tegra_pcie *pcie = platform_get_drvdata(pdev);
2691 	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
2692 	struct tegra_pcie_port *port, *tmp;
2693 
2694 	if (IS_ENABLED(CONFIG_DEBUG_FS))
2695 		tegra_pcie_debugfs_exit(pcie);
2696 
2697 	pci_stop_root_bus(host->bus);
2698 	pci_remove_root_bus(host->bus);
2699 	pm_runtime_put_sync(pcie->dev);
2700 	pm_runtime_disable(pcie->dev);
2701 
2702 	if (IS_ENABLED(CONFIG_PCI_MSI))
2703 		tegra_pcie_msi_teardown(pcie);
2704 
2705 	tegra_pcie_put_resources(pcie);
2706 
2707 	list_for_each_entry_safe(port, tmp, &pcie->ports, list)
2708 		tegra_pcie_port_free(port);
2709 
2710 	return 0;
2711 }
2712 
2713 static int __maybe_unused tegra_pcie_pm_suspend(struct device *dev)
2714 {
2715 	struct tegra_pcie *pcie = dev_get_drvdata(dev);
2716 	struct tegra_pcie_port *port;
2717 	int err;
2718 
2719 	list_for_each_entry(port, &pcie->ports, list)
2720 		tegra_pcie_pme_turnoff(port);
2721 
2722 	tegra_pcie_disable_ports(pcie);
2723 
2724 	/*
2725 	 * AFI_INTR is unmasked in tegra_pcie_enable_controller(), mask it to
2726 	 * avoid unwanted interrupts raised by AFI after pex_rst is asserted.
2727 	 */
2728 	tegra_pcie_disable_interrupts(pcie);
2729 
2730 	if (pcie->soc->program_uphy) {
2731 		err = tegra_pcie_phy_power_off(pcie);
2732 		if (err < 0)
2733 			dev_err(dev, "failed to power off PHY(s): %d\n", err);
2734 	}
2735 
2736 	reset_control_assert(pcie->pex_rst);
2737 	clk_disable_unprepare(pcie->pex_clk);
2738 
2739 	if (IS_ENABLED(CONFIG_PCI_MSI))
2740 		tegra_pcie_disable_msi(pcie);
2741 
2742 	pinctrl_pm_select_idle_state(dev);
2743 	tegra_pcie_power_off(pcie);
2744 
2745 	return 0;
2746 }
2747 
2748 static int __maybe_unused tegra_pcie_pm_resume(struct device *dev)
2749 {
2750 	struct tegra_pcie *pcie = dev_get_drvdata(dev);
2751 	int err;
2752 
2753 	err = tegra_pcie_power_on(pcie);
2754 	if (err) {
2755 		dev_err(dev, "tegra pcie power on fail: %d\n", err);
2756 		return err;
2757 	}
2758 
2759 	err = pinctrl_pm_select_default_state(dev);
2760 	if (err < 0) {
2761 		dev_err(dev, "failed to disable PCIe IO DPD: %d\n", err);
2762 		goto poweroff;
2763 	}
2764 
2765 	tegra_pcie_enable_controller(pcie);
2766 	tegra_pcie_setup_translations(pcie);
2767 
2768 	if (IS_ENABLED(CONFIG_PCI_MSI))
2769 		tegra_pcie_enable_msi(pcie);
2770 
2771 	err = clk_prepare_enable(pcie->pex_clk);
2772 	if (err) {
2773 		dev_err(dev, "failed to enable PEX clock: %d\n", err);
2774 		goto pex_dpd_enable;
2775 	}
2776 
2777 	reset_control_deassert(pcie->pex_rst);
2778 
2779 	if (pcie->soc->program_uphy) {
2780 		err = tegra_pcie_phy_power_on(pcie);
2781 		if (err < 0) {
2782 			dev_err(dev, "failed to power on PHY(s): %d\n", err);
2783 			goto disable_pex_clk;
2784 		}
2785 	}
2786 
2787 	tegra_pcie_apply_pad_settings(pcie);
2788 	tegra_pcie_enable_ports(pcie);
2789 
2790 	return 0;
2791 
2792 disable_pex_clk:
2793 	reset_control_assert(pcie->pex_rst);
2794 	clk_disable_unprepare(pcie->pex_clk);
2795 pex_dpd_enable:
2796 	pinctrl_pm_select_idle_state(dev);
2797 poweroff:
2798 	tegra_pcie_power_off(pcie);
2799 
2800 	return err;
2801 }
2802 
2803 static const struct dev_pm_ops tegra_pcie_pm_ops = {
2804 	SET_RUNTIME_PM_OPS(tegra_pcie_pm_suspend, tegra_pcie_pm_resume, NULL)
2805 	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(tegra_pcie_pm_suspend,
2806 				      tegra_pcie_pm_resume)
2807 };
2808 
2809 static struct platform_driver tegra_pcie_driver = {
2810 	.driver = {
2811 		.name = "tegra-pcie",
2812 		.of_match_table = tegra_pcie_of_match,
2813 		.suppress_bind_attrs = true,
2814 		.pm = &tegra_pcie_pm_ops,
2815 	},
2816 	.probe = tegra_pcie_probe,
2817 	.remove = tegra_pcie_remove,
2818 };
2819 module_platform_driver(tegra_pcie_driver);
2820 MODULE_LICENSE("GPL");
2821