xref: /linux/drivers/pci/controller/pci-tegra.c (revision 2f2c7254931f41b5736e3ba12aaa9ac1bbeeeb92)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * PCIe host controller driver for Tegra SoCs
4  *
5  * Copyright (c) 2010, CompuLab, Ltd.
6  * Author: Mike Rapoport <mike@compulab.co.il>
7  *
8  * Based on NVIDIA PCIe driver
9  * Copyright (c) 2008-2009, NVIDIA Corporation.
10  *
11  * Bits taken from arch/arm/mach-dove/pcie.c
12  *
13  * Author: Thierry Reding <treding@nvidia.com>
14  */
15 
16 #include <linux/clk.h>
17 #include <linux/cleanup.h>
18 #include <linux/debugfs.h>
19 #include <linux/delay.h>
20 #include <linux/export.h>
21 #include <linux/gpio/consumer.h>
22 #include <linux/interrupt.h>
23 #include <linux/iopoll.h>
24 #include <linux/irq.h>
25 #include <linux/irqchip/chained_irq.h>
26 #include <linux/irqchip/irq-msi-lib.h>
27 #include <linux/irqdomain.h>
28 #include <linux/kernel.h>
29 #include <linux/init.h>
30 #include <linux/module.h>
31 #include <linux/msi.h>
32 #include <linux/of_address.h>
33 #include <linux/of_pci.h>
34 #include <linux/of_platform.h>
35 #include <linux/pci.h>
36 #include <linux/phy/phy.h>
37 #include <linux/pinctrl/consumer.h>
38 #include <linux/platform_device.h>
39 #include <linux/reset.h>
40 #include <linux/sizes.h>
41 #include <linux/slab.h>
42 #include <linux/vmalloc.h>
43 #include <linux/regulator/consumer.h>
44 
45 #include <soc/tegra/cpuidle.h>
46 #include <soc/tegra/pmc.h>
47 
48 #include "../pci.h"
49 
50 #define INT_PCI_MSI_NR (8 * 32)
51 
52 /* register definitions */
53 
54 #define AFI_AXI_BAR0_SZ	0x00
55 #define AFI_AXI_BAR1_SZ	0x04
56 #define AFI_AXI_BAR2_SZ	0x08
57 #define AFI_AXI_BAR3_SZ	0x0c
58 #define AFI_AXI_BAR4_SZ	0x10
59 #define AFI_AXI_BAR5_SZ	0x14
60 
61 #define AFI_AXI_BAR0_START	0x18
62 #define AFI_AXI_BAR1_START	0x1c
63 #define AFI_AXI_BAR2_START	0x20
64 #define AFI_AXI_BAR3_START	0x24
65 #define AFI_AXI_BAR4_START	0x28
66 #define AFI_AXI_BAR5_START	0x2c
67 
68 #define AFI_FPCI_BAR0	0x30
69 #define AFI_FPCI_BAR1	0x34
70 #define AFI_FPCI_BAR2	0x38
71 #define AFI_FPCI_BAR3	0x3c
72 #define AFI_FPCI_BAR4	0x40
73 #define AFI_FPCI_BAR5	0x44
74 
75 #define AFI_CACHE_BAR0_SZ	0x48
76 #define AFI_CACHE_BAR0_ST	0x4c
77 #define AFI_CACHE_BAR1_SZ	0x50
78 #define AFI_CACHE_BAR1_ST	0x54
79 
80 #define AFI_MSI_BAR_SZ		0x60
81 #define AFI_MSI_FPCI_BAR_ST	0x64
82 #define AFI_MSI_AXI_BAR_ST	0x68
83 
84 #define AFI_MSI_VEC(x)		(0x6c + ((x) * 4))
85 #define AFI_MSI_EN_VEC(x)	(0x8c + ((x) * 4))
86 
87 #define AFI_CONFIGURATION		0xac
88 #define  AFI_CONFIGURATION_EN_FPCI		(1 << 0)
89 #define  AFI_CONFIGURATION_CLKEN_OVERRIDE	(1 << 31)
90 
91 #define AFI_FPCI_ERROR_MASKS	0xb0
92 
93 #define AFI_INTR_MASK		0xb4
94 #define  AFI_INTR_MASK_INT_MASK	(1 << 0)
95 #define  AFI_INTR_MASK_MSI_MASK	(1 << 8)
96 
97 #define AFI_INTR_CODE			0xb8
98 #define  AFI_INTR_CODE_MASK		0xf
99 #define  AFI_INTR_INI_SLAVE_ERROR	1
100 #define  AFI_INTR_INI_DECODE_ERROR	2
101 #define  AFI_INTR_TARGET_ABORT		3
102 #define  AFI_INTR_MASTER_ABORT		4
103 #define  AFI_INTR_INVALID_WRITE		5
104 #define  AFI_INTR_LEGACY		6
105 #define  AFI_INTR_FPCI_DECODE_ERROR	7
106 #define  AFI_INTR_AXI_DECODE_ERROR	8
107 #define  AFI_INTR_FPCI_TIMEOUT		9
108 #define  AFI_INTR_PE_PRSNT_SENSE	10
109 #define  AFI_INTR_PE_CLKREQ_SENSE	11
110 #define  AFI_INTR_CLKCLAMP_SENSE	12
111 #define  AFI_INTR_RDY4PD_SENSE		13
112 #define  AFI_INTR_P2P_ERROR		14
113 
114 #define AFI_INTR_SIGNATURE	0xbc
115 #define AFI_UPPER_FPCI_ADDRESS	0xc0
116 #define AFI_SM_INTR_ENABLE	0xc4
117 #define  AFI_SM_INTR_INTA_ASSERT	(1 << 0)
118 #define  AFI_SM_INTR_INTB_ASSERT	(1 << 1)
119 #define  AFI_SM_INTR_INTC_ASSERT	(1 << 2)
120 #define  AFI_SM_INTR_INTD_ASSERT	(1 << 3)
121 #define  AFI_SM_INTR_INTA_DEASSERT	(1 << 4)
122 #define  AFI_SM_INTR_INTB_DEASSERT	(1 << 5)
123 #define  AFI_SM_INTR_INTC_DEASSERT	(1 << 6)
124 #define  AFI_SM_INTR_INTD_DEASSERT	(1 << 7)
125 
126 #define AFI_AFI_INTR_ENABLE		0xc8
127 #define  AFI_INTR_EN_INI_SLVERR		(1 << 0)
128 #define  AFI_INTR_EN_INI_DECERR		(1 << 1)
129 #define  AFI_INTR_EN_TGT_SLVERR		(1 << 2)
130 #define  AFI_INTR_EN_TGT_DECERR		(1 << 3)
131 #define  AFI_INTR_EN_TGT_WRERR		(1 << 4)
132 #define  AFI_INTR_EN_DFPCI_DECERR	(1 << 5)
133 #define  AFI_INTR_EN_AXI_DECERR		(1 << 6)
134 #define  AFI_INTR_EN_FPCI_TIMEOUT	(1 << 7)
135 #define  AFI_INTR_EN_PRSNT_SENSE	(1 << 8)
136 
137 #define AFI_PCIE_PME		0xf0
138 
139 #define AFI_PCIE_CONFIG					0x0f8
140 #define  AFI_PCIE_CONFIG_PCIE_DISABLE(x)		(1 << ((x) + 1))
141 #define  AFI_PCIE_CONFIG_PCIE_DISABLE_ALL		0xe
142 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK	(0xf << 20)
143 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE	(0x0 << 20)
144 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420	(0x0 << 20)
145 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1	(0x0 << 20)
146 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_401	(0x0 << 20)
147 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL	(0x1 << 20)
148 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222	(0x1 << 20)
149 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1	(0x1 << 20)
150 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211	(0x1 << 20)
151 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411	(0x2 << 20)
152 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111	(0x2 << 20)
153 #define  AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(x)		(1 << ((x) + 29))
154 #define  AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO_ALL		(0x7 << 29)
155 
156 #define AFI_FUSE			0x104
157 #define  AFI_FUSE_PCIE_T0_GEN2_DIS	(1 << 2)
158 
159 #define AFI_PEX0_CTRL			0x110
160 #define AFI_PEX1_CTRL			0x118
161 #define  AFI_PEX_CTRL_RST		(1 << 0)
162 #define  AFI_PEX_CTRL_CLKREQ_EN		(1 << 1)
163 #define  AFI_PEX_CTRL_REFCLK_EN		(1 << 3)
164 #define  AFI_PEX_CTRL_OVERRIDE_EN	(1 << 4)
165 
166 #define AFI_PLLE_CONTROL		0x160
167 #define  AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL (1 << 9)
168 #define  AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN (1 << 1)
169 
170 #define AFI_PEXBIAS_CTRL_0		0x168
171 
172 #define RP_ECTL_2_R1	0x00000e84
173 #define  RP_ECTL_2_R1_RX_CTLE_1C_MASK		0xffff
174 
175 #define RP_ECTL_4_R1	0x00000e8c
176 #define  RP_ECTL_4_R1_RX_CDR_CTRL_1C_MASK	(0xffff << 16)
177 #define  RP_ECTL_4_R1_RX_CDR_CTRL_1C_SHIFT	16
178 
179 #define RP_ECTL_5_R1	0x00000e90
180 #define  RP_ECTL_5_R1_RX_EQ_CTRL_L_1C_MASK	0xffffffff
181 
182 #define RP_ECTL_6_R1	0x00000e94
183 #define  RP_ECTL_6_R1_RX_EQ_CTRL_H_1C_MASK	0xffffffff
184 
185 #define RP_ECTL_2_R2	0x00000ea4
186 #define  RP_ECTL_2_R2_RX_CTLE_1C_MASK	0xffff
187 
188 #define RP_ECTL_4_R2	0x00000eac
189 #define  RP_ECTL_4_R2_RX_CDR_CTRL_1C_MASK	(0xffff << 16)
190 #define  RP_ECTL_4_R2_RX_CDR_CTRL_1C_SHIFT	16
191 
192 #define RP_ECTL_5_R2	0x00000eb0
193 #define  RP_ECTL_5_R2_RX_EQ_CTRL_L_1C_MASK	0xffffffff
194 
195 #define RP_ECTL_6_R2	0x00000eb4
196 #define  RP_ECTL_6_R2_RX_EQ_CTRL_H_1C_MASK	0xffffffff
197 
198 #define RP_VEND_XP	0x00000f00
199 #define  RP_VEND_XP_DL_UP			(1 << 30)
200 #define  RP_VEND_XP_OPPORTUNISTIC_ACK		(1 << 27)
201 #define  RP_VEND_XP_OPPORTUNISTIC_UPDATEFC	(1 << 28)
202 #define  RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK	(0xff << 18)
203 
204 #define RP_VEND_CTL0	0x00000f44
205 #define  RP_VEND_CTL0_DSK_RST_PULSE_WIDTH_MASK	(0xf << 12)
206 #define  RP_VEND_CTL0_DSK_RST_PULSE_WIDTH	(0x9 << 12)
207 
208 #define RP_VEND_CTL1	0x00000f48
209 #define  RP_VEND_CTL1_ERPT	(1 << 13)
210 
211 #define RP_VEND_XP_BIST	0x00000f4c
212 #define  RP_VEND_XP_BIST_GOTO_L1_L2_AFTER_DLLP_DONE	(1 << 28)
213 
214 #define RP_VEND_CTL2 0x00000fa8
215 #define  RP_VEND_CTL2_PCA_ENABLE (1 << 7)
216 
217 #define RP_PRIV_MISC	0x00000fe0
218 #define  RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT		(0xe << 0)
219 #define  RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT		(0xf << 0)
220 #define  RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD_MASK	(0x7f << 16)
221 #define  RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD		(0xf << 16)
222 #define  RP_PRIV_MISC_CTLR_CLK_CLAMP_ENABLE		(1 << 23)
223 #define  RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD_MASK	(0x7f << 24)
224 #define  RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD		(0xf << 24)
225 #define  RP_PRIV_MISC_TMS_CLK_CLAMP_ENABLE		(1 << 31)
226 
227 #define RP_LINK_CONTROL_STATUS			0x00000090
228 #define  RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE	0x20000000
229 #define  RP_LINK_CONTROL_STATUS_LINKSTAT_MASK	0x3fff0000
230 
231 #define RP_LINK_CONTROL_STATUS_2		0x000000b0
232 
233 #define PADS_CTL_SEL		0x0000009c
234 
235 #define PADS_CTL		0x000000a0
236 #define  PADS_CTL_IDDQ_1L	(1 << 0)
237 #define  PADS_CTL_TX_DATA_EN_1L	(1 << 6)
238 #define  PADS_CTL_RX_DATA_EN_1L	(1 << 10)
239 
240 #define PADS_PLL_CTL_TEGRA20			0x000000b8
241 #define PADS_PLL_CTL_TEGRA30			0x000000b4
242 #define  PADS_PLL_CTL_RST_B4SM			(1 << 1)
243 #define  PADS_PLL_CTL_LOCKDET			(1 << 8)
244 #define  PADS_PLL_CTL_REFCLK_MASK		(0x3 << 16)
245 #define  PADS_PLL_CTL_REFCLK_INTERNAL_CML	(0 << 16)
246 #define  PADS_PLL_CTL_REFCLK_INTERNAL_CMOS	(1 << 16)
247 #define  PADS_PLL_CTL_REFCLK_EXTERNAL		(2 << 16)
248 #define  PADS_PLL_CTL_TXCLKREF_MASK		(0x1 << 20)
249 #define  PADS_PLL_CTL_TXCLKREF_DIV10		(0 << 20)
250 #define  PADS_PLL_CTL_TXCLKREF_DIV5		(1 << 20)
251 #define  PADS_PLL_CTL_TXCLKREF_BUF_EN		(1 << 22)
252 
253 #define PADS_REFCLK_CFG0			0x000000c8
254 #define PADS_REFCLK_CFG1			0x000000cc
255 #define PADS_REFCLK_BIAS			0x000000d0
256 
257 /*
258  * Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit
259  * entries, one entry per PCIe port. These field definitions and desired
260  * values aren't in the TRM, but do come from NVIDIA.
261  */
262 #define PADS_REFCLK_CFG_TERM_SHIFT		2  /* 6:2 */
263 #define PADS_REFCLK_CFG_E_TERM_SHIFT		7
264 #define PADS_REFCLK_CFG_PREDI_SHIFT		8  /* 11:8 */
265 #define PADS_REFCLK_CFG_DRVI_SHIFT		12 /* 15:12 */
266 
267 #define PME_ACK_TIMEOUT 10000
268 #define LINK_RETRAIN_TIMEOUT 100000 /* in usec */
269 
270 struct tegra_msi {
271 	DECLARE_BITMAP(used, INT_PCI_MSI_NR);
272 	struct irq_domain *domain;
273 	struct mutex map_lock;
274 	raw_spinlock_t mask_lock;
275 	void *virt;
276 	dma_addr_t phys;
277 	int irq;
278 };
279 
280 /* used to differentiate between Tegra SoC generations */
281 struct tegra_pcie_port_soc {
282 	struct {
283 		u8 turnoff_bit;
284 		u8 ack_bit;
285 	} pme;
286 };
287 
288 struct tegra_pcie_soc {
289 	unsigned int num_ports;
290 	const struct tegra_pcie_port_soc *ports;
291 	unsigned int msi_base_shift;
292 	unsigned long afi_pex2_ctrl;
293 	u32 pads_pll_ctl;
294 	u32 tx_ref_sel;
295 	u32 pads_refclk_cfg0;
296 	u32 pads_refclk_cfg1;
297 	u32 update_fc_threshold;
298 	bool has_pex_clkreq_en;
299 	bool has_pex_bias_ctrl;
300 	bool has_intr_prsnt_sense;
301 	bool has_cml_clk;
302 	bool has_gen2;
303 	bool force_pca_enable;
304 	bool program_uphy;
305 	bool update_clamp_threshold;
306 	bool program_deskew_time;
307 	bool update_fc_timer;
308 	bool has_cache_bars;
309 	struct {
310 		struct {
311 			u32 rp_ectl_2_r1;
312 			u32 rp_ectl_4_r1;
313 			u32 rp_ectl_5_r1;
314 			u32 rp_ectl_6_r1;
315 			u32 rp_ectl_2_r2;
316 			u32 rp_ectl_4_r2;
317 			u32 rp_ectl_5_r2;
318 			u32 rp_ectl_6_r2;
319 		} regs;
320 		bool enable;
321 	} ectl;
322 };
323 
324 struct tegra_pcie {
325 	struct device *dev;
326 
327 	void __iomem *pads;
328 	void __iomem *afi;
329 	void __iomem *cfg;
330 	int irq;
331 
332 	struct resource cs;
333 
334 	struct clk *pex_clk;
335 	struct clk *afi_clk;
336 	struct clk *pll_e;
337 	struct clk *cml_clk;
338 
339 	struct reset_control *pex_rst;
340 	struct reset_control *afi_rst;
341 	struct reset_control *pcie_xrst;
342 
343 	bool legacy_phy;
344 	struct phy *phy;
345 
346 	struct tegra_msi msi;
347 
348 	struct list_head ports;
349 	u32 xbar_config;
350 
351 	struct regulator_bulk_data *supplies;
352 	unsigned int num_supplies;
353 
354 	const struct tegra_pcie_soc *soc;
355 	struct dentry *debugfs;
356 };
357 
msi_to_pcie(struct tegra_msi * msi)358 static inline struct tegra_pcie *msi_to_pcie(struct tegra_msi *msi)
359 {
360 	return container_of(msi, struct tegra_pcie, msi);
361 }
362 
363 struct tegra_pcie_port {
364 	struct tegra_pcie *pcie;
365 	struct device_node *np;
366 	struct list_head list;
367 	struct resource regs;
368 	void __iomem *base;
369 	unsigned int index;
370 	unsigned int lanes;
371 
372 	struct phy **phys;
373 
374 	struct gpio_desc *reset_gpio;
375 };
376 
afi_writel(struct tegra_pcie * pcie,u32 value,unsigned long offset)377 static inline void afi_writel(struct tegra_pcie *pcie, u32 value,
378 			      unsigned long offset)
379 {
380 	writel(value, pcie->afi + offset);
381 }
382 
afi_readl(struct tegra_pcie * pcie,unsigned long offset)383 static inline u32 afi_readl(struct tegra_pcie *pcie, unsigned long offset)
384 {
385 	return readl(pcie->afi + offset);
386 }
387 
pads_writel(struct tegra_pcie * pcie,u32 value,unsigned long offset)388 static inline void pads_writel(struct tegra_pcie *pcie, u32 value,
389 			       unsigned long offset)
390 {
391 	writel(value, pcie->pads + offset);
392 }
393 
pads_readl(struct tegra_pcie * pcie,unsigned long offset)394 static inline u32 pads_readl(struct tegra_pcie *pcie, unsigned long offset)
395 {
396 	return readl(pcie->pads + offset);
397 }
398 
399 /*
400  * The configuration space mapping on Tegra is somewhat similar to the ECAM
401  * defined by PCIe. However it deviates a bit in how the 4 bits for extended
402  * register accesses are mapped:
403  *
404  *    [27:24] extended register number
405  *    [23:16] bus number
406  *    [15:11] device number
407  *    [10: 8] function number
408  *    [ 7: 0] register number
409  *
410  * Mapping the whole extended configuration space would require 256 MiB of
411  * virtual address space, only a small part of which will actually be used.
412  *
413  * To work around this, a 4 KiB region is used to generate the required
414  * configuration transaction with relevant B:D:F and register offset values.
415  * This is achieved by dynamically programming base address and size of
416  * AFI_AXI_BAR used for end point config space mapping to make sure that the
417  * address (access to which generates correct config transaction) falls in
418  * this 4 KiB region.
419  */
tegra_pcie_conf_offset(u8 bus,unsigned int devfn,unsigned int where)420 static unsigned int tegra_pcie_conf_offset(u8 bus, unsigned int devfn,
421 					   unsigned int where)
422 {
423 	return ((where & 0xf00) << 16) | (bus << 16) | (PCI_SLOT(devfn) << 11) |
424 	       (PCI_FUNC(devfn) << 8) | (where & 0xff);
425 }
426 
tegra_pcie_map_bus(struct pci_bus * bus,unsigned int devfn,int where)427 static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus,
428 					unsigned int devfn,
429 					int where)
430 {
431 	struct tegra_pcie *pcie = bus->sysdata;
432 	void __iomem *addr = NULL;
433 
434 	if (bus->number == 0) {
435 		unsigned int slot = PCI_SLOT(devfn);
436 		struct tegra_pcie_port *port;
437 
438 		list_for_each_entry(port, &pcie->ports, list) {
439 			if (port->index + 1 == slot) {
440 				addr = port->base + (where & ~3);
441 				break;
442 			}
443 		}
444 	} else {
445 		unsigned int offset;
446 		u32 base;
447 
448 		offset = tegra_pcie_conf_offset(bus->number, devfn, where);
449 
450 		/* move 4 KiB window to offset within the FPCI region */
451 		base = 0xfe100000 + ((offset & ~(SZ_4K - 1)) >> 8);
452 		afi_writel(pcie, base, AFI_FPCI_BAR0);
453 
454 		/* move to correct offset within the 4 KiB page */
455 		addr = pcie->cfg + (offset & (SZ_4K - 1));
456 	}
457 
458 	return addr;
459 }
460 
tegra_pcie_config_read(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 * value)461 static int tegra_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
462 				  int where, int size, u32 *value)
463 {
464 	if (bus->number == 0)
465 		return pci_generic_config_read32(bus, devfn, where, size,
466 						 value);
467 
468 	return pci_generic_config_read(bus, devfn, where, size, value);
469 }
470 
tegra_pcie_config_write(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 value)471 static int tegra_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
472 				   int where, int size, u32 value)
473 {
474 	if (bus->number == 0)
475 		return pci_generic_config_write32(bus, devfn, where, size,
476 						  value);
477 
478 	return pci_generic_config_write(bus, devfn, where, size, value);
479 }
480 
481 static struct pci_ops tegra_pcie_ops = {
482 	.map_bus = tegra_pcie_map_bus,
483 	.read = tegra_pcie_config_read,
484 	.write = tegra_pcie_config_write,
485 };
486 
tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port * port)487 static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port)
488 {
489 	const struct tegra_pcie_soc *soc = port->pcie->soc;
490 	unsigned long ret = 0;
491 
492 	switch (port->index) {
493 	case 0:
494 		ret = AFI_PEX0_CTRL;
495 		break;
496 
497 	case 1:
498 		ret = AFI_PEX1_CTRL;
499 		break;
500 
501 	case 2:
502 		ret = soc->afi_pex2_ctrl;
503 		break;
504 	}
505 
506 	return ret;
507 }
508 
tegra_pcie_port_reset(struct tegra_pcie_port * port)509 static void tegra_pcie_port_reset(struct tegra_pcie_port *port)
510 {
511 	unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
512 	unsigned long value;
513 
514 	/* pulse reset signal */
515 	if (port->reset_gpio) {
516 		gpiod_set_value(port->reset_gpio, 1);
517 	} else {
518 		value = afi_readl(port->pcie, ctrl);
519 		value &= ~AFI_PEX_CTRL_RST;
520 		afi_writel(port->pcie, value, ctrl);
521 	}
522 
523 	usleep_range(1000, 2000);
524 
525 	if (port->reset_gpio) {
526 		gpiod_set_value(port->reset_gpio, 0);
527 	} else {
528 		value = afi_readl(port->pcie, ctrl);
529 		value |= AFI_PEX_CTRL_RST;
530 		afi_writel(port->pcie, value, ctrl);
531 	}
532 }
533 
tegra_pcie_enable_rp_features(struct tegra_pcie_port * port)534 static void tegra_pcie_enable_rp_features(struct tegra_pcie_port *port)
535 {
536 	const struct tegra_pcie_soc *soc = port->pcie->soc;
537 	u32 value;
538 
539 	/* Enable AER capability */
540 	value = readl(port->base + RP_VEND_CTL1);
541 	value |= RP_VEND_CTL1_ERPT;
542 	writel(value, port->base + RP_VEND_CTL1);
543 
544 	/* Optimal settings to enhance bandwidth */
545 	value = readl(port->base + RP_VEND_XP);
546 	value |= RP_VEND_XP_OPPORTUNISTIC_ACK;
547 	value |= RP_VEND_XP_OPPORTUNISTIC_UPDATEFC;
548 	writel(value, port->base + RP_VEND_XP);
549 
550 	/*
551 	 * LTSSM will wait for DLLP to finish before entering L1 or L2,
552 	 * to avoid truncation of PM messages which results in receiver errors
553 	 */
554 	value = readl(port->base + RP_VEND_XP_BIST);
555 	value |= RP_VEND_XP_BIST_GOTO_L1_L2_AFTER_DLLP_DONE;
556 	writel(value, port->base + RP_VEND_XP_BIST);
557 
558 	value = readl(port->base + RP_PRIV_MISC);
559 	value |= RP_PRIV_MISC_CTLR_CLK_CLAMP_ENABLE;
560 	value |= RP_PRIV_MISC_TMS_CLK_CLAMP_ENABLE;
561 
562 	if (soc->update_clamp_threshold) {
563 		value &= ~(RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD_MASK |
564 				RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD_MASK);
565 		value |= RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD |
566 			RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD;
567 	}
568 
569 	writel(value, port->base + RP_PRIV_MISC);
570 }
571 
tegra_pcie_program_ectl_settings(struct tegra_pcie_port * port)572 static void tegra_pcie_program_ectl_settings(struct tegra_pcie_port *port)
573 {
574 	const struct tegra_pcie_soc *soc = port->pcie->soc;
575 	u32 value;
576 
577 	value = readl(port->base + RP_ECTL_2_R1);
578 	value &= ~RP_ECTL_2_R1_RX_CTLE_1C_MASK;
579 	value |= soc->ectl.regs.rp_ectl_2_r1;
580 	writel(value, port->base + RP_ECTL_2_R1);
581 
582 	value = readl(port->base + RP_ECTL_4_R1);
583 	value &= ~RP_ECTL_4_R1_RX_CDR_CTRL_1C_MASK;
584 	value |= soc->ectl.regs.rp_ectl_4_r1 <<
585 				RP_ECTL_4_R1_RX_CDR_CTRL_1C_SHIFT;
586 	writel(value, port->base + RP_ECTL_4_R1);
587 
588 	value = readl(port->base + RP_ECTL_5_R1);
589 	value &= ~RP_ECTL_5_R1_RX_EQ_CTRL_L_1C_MASK;
590 	value |= soc->ectl.regs.rp_ectl_5_r1;
591 	writel(value, port->base + RP_ECTL_5_R1);
592 
593 	value = readl(port->base + RP_ECTL_6_R1);
594 	value &= ~RP_ECTL_6_R1_RX_EQ_CTRL_H_1C_MASK;
595 	value |= soc->ectl.regs.rp_ectl_6_r1;
596 	writel(value, port->base + RP_ECTL_6_R1);
597 
598 	value = readl(port->base + RP_ECTL_2_R2);
599 	value &= ~RP_ECTL_2_R2_RX_CTLE_1C_MASK;
600 	value |= soc->ectl.regs.rp_ectl_2_r2;
601 	writel(value, port->base + RP_ECTL_2_R2);
602 
603 	value = readl(port->base + RP_ECTL_4_R2);
604 	value &= ~RP_ECTL_4_R2_RX_CDR_CTRL_1C_MASK;
605 	value |= soc->ectl.regs.rp_ectl_4_r2 <<
606 				RP_ECTL_4_R2_RX_CDR_CTRL_1C_SHIFT;
607 	writel(value, port->base + RP_ECTL_4_R2);
608 
609 	value = readl(port->base + RP_ECTL_5_R2);
610 	value &= ~RP_ECTL_5_R2_RX_EQ_CTRL_L_1C_MASK;
611 	value |= soc->ectl.regs.rp_ectl_5_r2;
612 	writel(value, port->base + RP_ECTL_5_R2);
613 
614 	value = readl(port->base + RP_ECTL_6_R2);
615 	value &= ~RP_ECTL_6_R2_RX_EQ_CTRL_H_1C_MASK;
616 	value |= soc->ectl.regs.rp_ectl_6_r2;
617 	writel(value, port->base + RP_ECTL_6_R2);
618 }
619 
tegra_pcie_apply_sw_fixup(struct tegra_pcie_port * port)620 static void tegra_pcie_apply_sw_fixup(struct tegra_pcie_port *port)
621 {
622 	const struct tegra_pcie_soc *soc = port->pcie->soc;
623 	u32 value;
624 
625 	/*
626 	 * Sometimes link speed change from Gen2 to Gen1 fails due to
627 	 * instability in deskew logic on lane-0. Increase the deskew
628 	 * retry time to resolve this issue.
629 	 */
630 	if (soc->program_deskew_time) {
631 		value = readl(port->base + RP_VEND_CTL0);
632 		value &= ~RP_VEND_CTL0_DSK_RST_PULSE_WIDTH_MASK;
633 		value |= RP_VEND_CTL0_DSK_RST_PULSE_WIDTH;
634 		writel(value, port->base + RP_VEND_CTL0);
635 	}
636 
637 	if (soc->update_fc_timer) {
638 		value = readl(port->base + RP_VEND_XP);
639 		value &= ~RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK;
640 		value |= soc->update_fc_threshold;
641 		writel(value, port->base + RP_VEND_XP);
642 	}
643 
644 	/*
645 	 * PCIe link doesn't come up with few legacy PCIe endpoints if
646 	 * root port advertises both Gen-1 and Gen-2 speeds in Tegra.
647 	 * Hence, the strategy followed here is to initially advertise
648 	 * only Gen-1 and after link is up, retrain link to Gen-2 speed
649 	 */
650 	value = readl(port->base + RP_LINK_CONTROL_STATUS_2);
651 	value &= ~PCI_EXP_LNKSTA_CLS;
652 	value |= PCI_EXP_LNKSTA_CLS_2_5GB;
653 	writel(value, port->base + RP_LINK_CONTROL_STATUS_2);
654 }
655 
tegra_pcie_port_enable(struct tegra_pcie_port * port)656 static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
657 {
658 	unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
659 	const struct tegra_pcie_soc *soc = port->pcie->soc;
660 	unsigned long value;
661 
662 	/* enable reference clock */
663 	value = afi_readl(port->pcie, ctrl);
664 	value |= AFI_PEX_CTRL_REFCLK_EN;
665 
666 	if (soc->has_pex_clkreq_en)
667 		value |= AFI_PEX_CTRL_CLKREQ_EN;
668 
669 	value |= AFI_PEX_CTRL_OVERRIDE_EN;
670 
671 	afi_writel(port->pcie, value, ctrl);
672 
673 	tegra_pcie_port_reset(port);
674 
675 	if (soc->force_pca_enable) {
676 		value = readl(port->base + RP_VEND_CTL2);
677 		value |= RP_VEND_CTL2_PCA_ENABLE;
678 		writel(value, port->base + RP_VEND_CTL2);
679 	}
680 
681 	tegra_pcie_enable_rp_features(port);
682 
683 	if (soc->ectl.enable)
684 		tegra_pcie_program_ectl_settings(port);
685 
686 	tegra_pcie_apply_sw_fixup(port);
687 }
688 
tegra_pcie_port_disable(struct tegra_pcie_port * port)689 static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
690 {
691 	unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
692 	const struct tegra_pcie_soc *soc = port->pcie->soc;
693 	unsigned long value;
694 
695 	/* assert port reset */
696 	value = afi_readl(port->pcie, ctrl);
697 	value &= ~AFI_PEX_CTRL_RST;
698 	afi_writel(port->pcie, value, ctrl);
699 
700 	/* disable reference clock */
701 	value = afi_readl(port->pcie, ctrl);
702 
703 	if (soc->has_pex_clkreq_en)
704 		value &= ~AFI_PEX_CTRL_CLKREQ_EN;
705 
706 	value &= ~AFI_PEX_CTRL_REFCLK_EN;
707 	afi_writel(port->pcie, value, ctrl);
708 
709 	/* disable PCIe port and set CLKREQ# as GPIO to allow PLLE power down */
710 	value = afi_readl(port->pcie, AFI_PCIE_CONFIG);
711 	value |= AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
712 	value |= AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(port->index);
713 	afi_writel(port->pcie, value, AFI_PCIE_CONFIG);
714 }
715 
tegra_pcie_port_free(struct tegra_pcie_port * port)716 static void tegra_pcie_port_free(struct tegra_pcie_port *port)
717 {
718 	struct tegra_pcie *pcie = port->pcie;
719 	struct device *dev = pcie->dev;
720 
721 	devm_iounmap(dev, port->base);
722 	devm_release_mem_region(dev, port->regs.start,
723 				resource_size(&port->regs));
724 	list_del(&port->list);
725 	devm_kfree(dev, port);
726 }
727 
728 /* Tegra PCIE root complex wrongly reports device class */
tegra_pcie_fixup_class(struct pci_dev * dev)729 static void tegra_pcie_fixup_class(struct pci_dev *dev)
730 {
731 	dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL;
732 }
733 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class);
734 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class);
735 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_fixup_class);
736 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_fixup_class);
737 
738 /* Tegra20 and Tegra30 PCIE requires relaxed ordering */
tegra_pcie_relax_enable(struct pci_dev * dev)739 static void tegra_pcie_relax_enable(struct pci_dev *dev)
740 {
741 	pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
742 }
743 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_relax_enable);
744 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_relax_enable);
745 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_relax_enable);
746 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_relax_enable);
747 
tegra_pcie_map_irq(const struct pci_dev * pdev,u8 slot,u8 pin)748 static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
749 {
750 	struct tegra_pcie *pcie = pdev->bus->sysdata;
751 	int irq;
752 
753 	tegra_cpuidle_pcie_irqs_in_use();
754 
755 	irq = of_irq_parse_and_map_pci(pdev, slot, pin);
756 	if (!irq)
757 		irq = pcie->irq;
758 
759 	return irq;
760 }
761 
tegra_pcie_isr(int irq,void * arg)762 static irqreturn_t tegra_pcie_isr(int irq, void *arg)
763 {
764 	static const char * const err_msg[] = {
765 		"Unknown",
766 		"AXI slave error",
767 		"AXI decode error",
768 		"Target abort",
769 		"Master abort",
770 		"Invalid write",
771 		"Legacy interrupt",
772 		"Response decoding error",
773 		"AXI response decoding error",
774 		"Transaction timeout",
775 		"Slot present pin change",
776 		"Slot clock request change",
777 		"TMS clock ramp change",
778 		"TMS ready for power down",
779 		"Peer2Peer error",
780 	};
781 	struct tegra_pcie *pcie = arg;
782 	struct device *dev = pcie->dev;
783 	u32 code, signature;
784 
785 	code = afi_readl(pcie, AFI_INTR_CODE) & AFI_INTR_CODE_MASK;
786 	signature = afi_readl(pcie, AFI_INTR_SIGNATURE);
787 	afi_writel(pcie, 0, AFI_INTR_CODE);
788 
789 	if (code == AFI_INTR_LEGACY)
790 		return IRQ_NONE;
791 
792 	if (code >= ARRAY_SIZE(err_msg))
793 		code = 0;
794 
795 	/*
796 	 * do not pollute kernel log with master abort reports since they
797 	 * happen a lot during enumeration
798 	 */
799 	if (code == AFI_INTR_MASTER_ABORT || code == AFI_INTR_PE_PRSNT_SENSE)
800 		dev_dbg(dev, "%s, signature: %08x\n", err_msg[code], signature);
801 	else
802 		dev_err(dev, "%s, signature: %08x\n", err_msg[code], signature);
803 
804 	if (code == AFI_INTR_TARGET_ABORT || code == AFI_INTR_MASTER_ABORT ||
805 	    code == AFI_INTR_FPCI_DECODE_ERROR) {
806 		u32 fpci = afi_readl(pcie, AFI_UPPER_FPCI_ADDRESS) & 0xff;
807 		u64 address = (u64)fpci << 32 | (signature & 0xfffffffc);
808 
809 		if (code == AFI_INTR_MASTER_ABORT)
810 			dev_dbg(dev, "  FPCI address: %10llx\n", address);
811 		else
812 			dev_err(dev, "  FPCI address: %10llx\n", address);
813 	}
814 
815 	return IRQ_HANDLED;
816 }
817 
818 /*
819  * FPCI map is as follows:
820  * - 0xfdfc000000: I/O space
821  * - 0xfdfe000000: type 0 configuration space
822  * - 0xfdff000000: type 1 configuration space
823  * - 0xfe00000000: type 0 extended configuration space
824  * - 0xfe10000000: type 1 extended configuration space
825  */
tegra_pcie_setup_translations(struct tegra_pcie * pcie)826 static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
827 {
828 	u32 size;
829 	struct resource_entry *entry;
830 	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
831 
832 	/* Bar 0: type 1 extended configuration space */
833 	size = resource_size(&pcie->cs);
834 	afi_writel(pcie, pcie->cs.start, AFI_AXI_BAR0_START);
835 	afi_writel(pcie, size >> 12, AFI_AXI_BAR0_SZ);
836 
837 	resource_list_for_each_entry(entry, &bridge->windows) {
838 		u32 fpci_bar, axi_address;
839 		struct resource *res = entry->res;
840 
841 		size = resource_size(res);
842 
843 		switch (resource_type(res)) {
844 		case IORESOURCE_IO:
845 			/* Bar 1: downstream IO bar */
846 			fpci_bar = 0xfdfc0000;
847 			axi_address = pci_pio_to_address(res->start);
848 			afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
849 			afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
850 			afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
851 			break;
852 		case IORESOURCE_MEM:
853 			fpci_bar = (((res->start >> 12) & 0x0fffffff) << 4) | 0x1;
854 			axi_address = res->start;
855 
856 			if (res->flags & IORESOURCE_PREFETCH) {
857 				/* Bar 2: prefetchable memory BAR */
858 				afi_writel(pcie, axi_address, AFI_AXI_BAR2_START);
859 				afi_writel(pcie, size >> 12, AFI_AXI_BAR2_SZ);
860 				afi_writel(pcie, fpci_bar, AFI_FPCI_BAR2);
861 
862 			} else {
863 				/* Bar 3: non prefetchable memory BAR */
864 				afi_writel(pcie, axi_address, AFI_AXI_BAR3_START);
865 				afi_writel(pcie, size >> 12, AFI_AXI_BAR3_SZ);
866 				afi_writel(pcie, fpci_bar, AFI_FPCI_BAR3);
867 			}
868 			break;
869 		}
870 	}
871 
872 	/* NULL out the remaining BARs as they are not used */
873 	afi_writel(pcie, 0, AFI_AXI_BAR4_START);
874 	afi_writel(pcie, 0, AFI_AXI_BAR4_SZ);
875 	afi_writel(pcie, 0, AFI_FPCI_BAR4);
876 
877 	afi_writel(pcie, 0, AFI_AXI_BAR5_START);
878 	afi_writel(pcie, 0, AFI_AXI_BAR5_SZ);
879 	afi_writel(pcie, 0, AFI_FPCI_BAR5);
880 
881 	if (pcie->soc->has_cache_bars) {
882 		/* map all upstream transactions as uncached */
883 		afi_writel(pcie, 0, AFI_CACHE_BAR0_ST);
884 		afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ);
885 		afi_writel(pcie, 0, AFI_CACHE_BAR1_ST);
886 		afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ);
887 	}
888 
889 	/* MSI translations are setup only when needed */
890 	afi_writel(pcie, 0, AFI_MSI_FPCI_BAR_ST);
891 	afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
892 	afi_writel(pcie, 0, AFI_MSI_AXI_BAR_ST);
893 	afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
894 }
895 
tegra_pcie_pll_wait(struct tegra_pcie * pcie,unsigned long timeout)896 static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout)
897 {
898 	const struct tegra_pcie_soc *soc = pcie->soc;
899 	u32 value;
900 
901 	timeout = jiffies + msecs_to_jiffies(timeout);
902 
903 	while (time_before(jiffies, timeout)) {
904 		value = pads_readl(pcie, soc->pads_pll_ctl);
905 		if (value & PADS_PLL_CTL_LOCKDET)
906 			return 0;
907 	}
908 
909 	return -ETIMEDOUT;
910 }
911 
tegra_pcie_phy_enable(struct tegra_pcie * pcie)912 static int tegra_pcie_phy_enable(struct tegra_pcie *pcie)
913 {
914 	struct device *dev = pcie->dev;
915 	const struct tegra_pcie_soc *soc = pcie->soc;
916 	u32 value;
917 	int err;
918 
919 	/* initialize internal PHY, enable up to 16 PCIE lanes */
920 	pads_writel(pcie, 0x0, PADS_CTL_SEL);
921 
922 	/* override IDDQ to 1 on all 4 lanes */
923 	value = pads_readl(pcie, PADS_CTL);
924 	value |= PADS_CTL_IDDQ_1L;
925 	pads_writel(pcie, value, PADS_CTL);
926 
927 	/*
928 	 * Set up PHY PLL inputs select PLLE output as refclock,
929 	 * set TX ref sel to div10 (not div5).
930 	 */
931 	value = pads_readl(pcie, soc->pads_pll_ctl);
932 	value &= ~(PADS_PLL_CTL_REFCLK_MASK | PADS_PLL_CTL_TXCLKREF_MASK);
933 	value |= PADS_PLL_CTL_REFCLK_INTERNAL_CML | soc->tx_ref_sel;
934 	pads_writel(pcie, value, soc->pads_pll_ctl);
935 
936 	/* reset PLL */
937 	value = pads_readl(pcie, soc->pads_pll_ctl);
938 	value &= ~PADS_PLL_CTL_RST_B4SM;
939 	pads_writel(pcie, value, soc->pads_pll_ctl);
940 
941 	usleep_range(20, 100);
942 
943 	/* take PLL out of reset  */
944 	value = pads_readl(pcie, soc->pads_pll_ctl);
945 	value |= PADS_PLL_CTL_RST_B4SM;
946 	pads_writel(pcie, value, soc->pads_pll_ctl);
947 
948 	/* wait for the PLL to lock */
949 	err = tegra_pcie_pll_wait(pcie, 500);
950 	if (err < 0) {
951 		dev_err(dev, "PLL failed to lock: %d\n", err);
952 		return err;
953 	}
954 
955 	/* turn off IDDQ override */
956 	value = pads_readl(pcie, PADS_CTL);
957 	value &= ~PADS_CTL_IDDQ_1L;
958 	pads_writel(pcie, value, PADS_CTL);
959 
960 	/* enable TX/RX data */
961 	value = pads_readl(pcie, PADS_CTL);
962 	value |= PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L;
963 	pads_writel(pcie, value, PADS_CTL);
964 
965 	return 0;
966 }
967 
tegra_pcie_phy_disable(struct tegra_pcie * pcie)968 static int tegra_pcie_phy_disable(struct tegra_pcie *pcie)
969 {
970 	const struct tegra_pcie_soc *soc = pcie->soc;
971 	u32 value;
972 
973 	/* disable TX/RX data */
974 	value = pads_readl(pcie, PADS_CTL);
975 	value &= ~(PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L);
976 	pads_writel(pcie, value, PADS_CTL);
977 
978 	/* override IDDQ */
979 	value = pads_readl(pcie, PADS_CTL);
980 	value |= PADS_CTL_IDDQ_1L;
981 	pads_writel(pcie, value, PADS_CTL);
982 
983 	/* reset PLL */
984 	value = pads_readl(pcie, soc->pads_pll_ctl);
985 	value &= ~PADS_PLL_CTL_RST_B4SM;
986 	pads_writel(pcie, value, soc->pads_pll_ctl);
987 
988 	usleep_range(20, 100);
989 
990 	return 0;
991 }
992 
tegra_pcie_port_phy_power_on(struct tegra_pcie_port * port)993 static int tegra_pcie_port_phy_power_on(struct tegra_pcie_port *port)
994 {
995 	struct device *dev = port->pcie->dev;
996 	unsigned int i;
997 	int err;
998 
999 	for (i = 0; i < port->lanes; i++) {
1000 		err = phy_power_on(port->phys[i]);
1001 		if (err < 0) {
1002 			dev_err(dev, "failed to power on PHY#%u: %d\n", i, err);
1003 			return err;
1004 		}
1005 	}
1006 
1007 	return 0;
1008 }
1009 
tegra_pcie_port_phy_power_off(struct tegra_pcie_port * port)1010 static int tegra_pcie_port_phy_power_off(struct tegra_pcie_port *port)
1011 {
1012 	struct device *dev = port->pcie->dev;
1013 	unsigned int i;
1014 	int err;
1015 
1016 	for (i = 0; i < port->lanes; i++) {
1017 		err = phy_power_off(port->phys[i]);
1018 		if (err < 0) {
1019 			dev_err(dev, "failed to power off PHY#%u: %d\n", i,
1020 				err);
1021 			return err;
1022 		}
1023 	}
1024 
1025 	return 0;
1026 }
1027 
tegra_pcie_phy_power_on(struct tegra_pcie * pcie)1028 static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie)
1029 {
1030 	struct device *dev = pcie->dev;
1031 	struct tegra_pcie_port *port;
1032 	int err;
1033 
1034 	if (pcie->legacy_phy) {
1035 		if (pcie->phy)
1036 			err = phy_power_on(pcie->phy);
1037 		else
1038 			err = tegra_pcie_phy_enable(pcie);
1039 
1040 		if (err < 0)
1041 			dev_err(dev, "failed to power on PHY: %d\n", err);
1042 
1043 		return err;
1044 	}
1045 
1046 	list_for_each_entry(port, &pcie->ports, list) {
1047 		err = tegra_pcie_port_phy_power_on(port);
1048 		if (err < 0) {
1049 			dev_err(dev,
1050 				"failed to power on PCIe port %u PHY: %d\n",
1051 				port->index, err);
1052 			return err;
1053 		}
1054 	}
1055 
1056 	return 0;
1057 }
1058 
tegra_pcie_phy_power_off(struct tegra_pcie * pcie)1059 static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie)
1060 {
1061 	struct device *dev = pcie->dev;
1062 	struct tegra_pcie_port *port;
1063 	int err;
1064 
1065 	if (pcie->legacy_phy) {
1066 		if (pcie->phy)
1067 			err = phy_power_off(pcie->phy);
1068 		else
1069 			err = tegra_pcie_phy_disable(pcie);
1070 
1071 		if (err < 0)
1072 			dev_err(dev, "failed to power off PHY: %d\n", err);
1073 
1074 		return err;
1075 	}
1076 
1077 	list_for_each_entry(port, &pcie->ports, list) {
1078 		err = tegra_pcie_port_phy_power_off(port);
1079 		if (err < 0) {
1080 			dev_err(dev,
1081 				"failed to power off PCIe port %u PHY: %d\n",
1082 				port->index, err);
1083 			return err;
1084 		}
1085 	}
1086 
1087 	return 0;
1088 }
1089 
tegra_pcie_enable_controller(struct tegra_pcie * pcie)1090 static void tegra_pcie_enable_controller(struct tegra_pcie *pcie)
1091 {
1092 	const struct tegra_pcie_soc *soc = pcie->soc;
1093 	struct tegra_pcie_port *port;
1094 	unsigned long value;
1095 
1096 	/* enable PLL power down */
1097 	if (pcie->phy) {
1098 		value = afi_readl(pcie, AFI_PLLE_CONTROL);
1099 		value &= ~AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL;
1100 		value |= AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN;
1101 		afi_writel(pcie, value, AFI_PLLE_CONTROL);
1102 	}
1103 
1104 	/* power down PCIe slot clock bias pad */
1105 	if (soc->has_pex_bias_ctrl)
1106 		afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0);
1107 
1108 	/* configure mode and disable all ports */
1109 	value = afi_readl(pcie, AFI_PCIE_CONFIG);
1110 	value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK;
1111 	value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config;
1112 	value |= AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO_ALL;
1113 
1114 	list_for_each_entry(port, &pcie->ports, list) {
1115 		value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
1116 		value &= ~AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(port->index);
1117 	}
1118 
1119 	afi_writel(pcie, value, AFI_PCIE_CONFIG);
1120 
1121 	if (soc->has_gen2) {
1122 		value = afi_readl(pcie, AFI_FUSE);
1123 		value &= ~AFI_FUSE_PCIE_T0_GEN2_DIS;
1124 		afi_writel(pcie, value, AFI_FUSE);
1125 	} else {
1126 		value = afi_readl(pcie, AFI_FUSE);
1127 		value |= AFI_FUSE_PCIE_T0_GEN2_DIS;
1128 		afi_writel(pcie, value, AFI_FUSE);
1129 	}
1130 
1131 	/* Disable AFI dynamic clock gating and enable PCIe */
1132 	value = afi_readl(pcie, AFI_CONFIGURATION);
1133 	value |= AFI_CONFIGURATION_EN_FPCI;
1134 	value |= AFI_CONFIGURATION_CLKEN_OVERRIDE;
1135 	afi_writel(pcie, value, AFI_CONFIGURATION);
1136 
1137 	value = AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR |
1138 		AFI_INTR_EN_TGT_SLVERR | AFI_INTR_EN_TGT_DECERR |
1139 		AFI_INTR_EN_TGT_WRERR | AFI_INTR_EN_DFPCI_DECERR;
1140 
1141 	if (soc->has_intr_prsnt_sense)
1142 		value |= AFI_INTR_EN_PRSNT_SENSE;
1143 
1144 	afi_writel(pcie, value, AFI_AFI_INTR_ENABLE);
1145 	afi_writel(pcie, 0xffffffff, AFI_SM_INTR_ENABLE);
1146 
1147 	/* don't enable MSI for now, only when needed */
1148 	afi_writel(pcie, AFI_INTR_MASK_INT_MASK, AFI_INTR_MASK);
1149 
1150 	/* disable all exceptions */
1151 	afi_writel(pcie, 0, AFI_FPCI_ERROR_MASKS);
1152 }
1153 
tegra_pcie_power_off(struct tegra_pcie * pcie)1154 static void tegra_pcie_power_off(struct tegra_pcie *pcie)
1155 {
1156 	struct device *dev = pcie->dev;
1157 	const struct tegra_pcie_soc *soc = pcie->soc;
1158 	int err;
1159 
1160 	reset_control_assert(pcie->afi_rst);
1161 
1162 	clk_disable_unprepare(pcie->pll_e);
1163 	if (soc->has_cml_clk)
1164 		clk_disable_unprepare(pcie->cml_clk);
1165 	clk_disable_unprepare(pcie->afi_clk);
1166 
1167 	if (!dev->pm_domain)
1168 		tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1169 
1170 	err = regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
1171 	if (err < 0)
1172 		dev_warn(dev, "failed to disable regulators: %d\n", err);
1173 }
1174 
tegra_pcie_power_on(struct tegra_pcie * pcie)1175 static int tegra_pcie_power_on(struct tegra_pcie *pcie)
1176 {
1177 	struct device *dev = pcie->dev;
1178 	const struct tegra_pcie_soc *soc = pcie->soc;
1179 	int err;
1180 
1181 	reset_control_assert(pcie->pcie_xrst);
1182 	reset_control_assert(pcie->afi_rst);
1183 	reset_control_assert(pcie->pex_rst);
1184 
1185 	if (!dev->pm_domain)
1186 		tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1187 
1188 	/* enable regulators */
1189 	err = regulator_bulk_enable(pcie->num_supplies, pcie->supplies);
1190 	if (err < 0)
1191 		dev_err(dev, "failed to enable regulators: %d\n", err);
1192 
1193 	if (!dev->pm_domain) {
1194 		err = tegra_powergate_power_on(TEGRA_POWERGATE_PCIE);
1195 		if (err) {
1196 			dev_err(dev, "failed to power ungate: %d\n", err);
1197 			goto regulator_disable;
1198 		}
1199 		err = tegra_powergate_remove_clamping(TEGRA_POWERGATE_PCIE);
1200 		if (err) {
1201 			dev_err(dev, "failed to remove clamp: %d\n", err);
1202 			goto powergate;
1203 		}
1204 	}
1205 
1206 	err = clk_prepare_enable(pcie->afi_clk);
1207 	if (err < 0) {
1208 		dev_err(dev, "failed to enable AFI clock: %d\n", err);
1209 		goto powergate;
1210 	}
1211 
1212 	if (soc->has_cml_clk) {
1213 		err = clk_prepare_enable(pcie->cml_clk);
1214 		if (err < 0) {
1215 			dev_err(dev, "failed to enable CML clock: %d\n", err);
1216 			goto disable_afi_clk;
1217 		}
1218 	}
1219 
1220 	err = clk_prepare_enable(pcie->pll_e);
1221 	if (err < 0) {
1222 		dev_err(dev, "failed to enable PLLE clock: %d\n", err);
1223 		goto disable_cml_clk;
1224 	}
1225 
1226 	reset_control_deassert(pcie->afi_rst);
1227 
1228 	return 0;
1229 
1230 disable_cml_clk:
1231 	if (soc->has_cml_clk)
1232 		clk_disable_unprepare(pcie->cml_clk);
1233 disable_afi_clk:
1234 	clk_disable_unprepare(pcie->afi_clk);
1235 powergate:
1236 	if (!dev->pm_domain)
1237 		tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1238 regulator_disable:
1239 	regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
1240 
1241 	return err;
1242 }
1243 
tegra_pcie_apply_pad_settings(struct tegra_pcie * pcie)1244 static void tegra_pcie_apply_pad_settings(struct tegra_pcie *pcie)
1245 {
1246 	const struct tegra_pcie_soc *soc = pcie->soc;
1247 
1248 	/* Configure the reference clock driver */
1249 	pads_writel(pcie, soc->pads_refclk_cfg0, PADS_REFCLK_CFG0);
1250 
1251 	if (soc->num_ports > 2)
1252 		pads_writel(pcie, soc->pads_refclk_cfg1, PADS_REFCLK_CFG1);
1253 }
1254 
tegra_pcie_clocks_get(struct tegra_pcie * pcie)1255 static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
1256 {
1257 	struct device *dev = pcie->dev;
1258 	const struct tegra_pcie_soc *soc = pcie->soc;
1259 
1260 	pcie->pex_clk = devm_clk_get(dev, "pex");
1261 	if (IS_ERR(pcie->pex_clk))
1262 		return PTR_ERR(pcie->pex_clk);
1263 
1264 	pcie->afi_clk = devm_clk_get(dev, "afi");
1265 	if (IS_ERR(pcie->afi_clk))
1266 		return PTR_ERR(pcie->afi_clk);
1267 
1268 	pcie->pll_e = devm_clk_get(dev, "pll_e");
1269 	if (IS_ERR(pcie->pll_e))
1270 		return PTR_ERR(pcie->pll_e);
1271 
1272 	if (soc->has_cml_clk) {
1273 		pcie->cml_clk = devm_clk_get(dev, "cml");
1274 		if (IS_ERR(pcie->cml_clk))
1275 			return PTR_ERR(pcie->cml_clk);
1276 	}
1277 
1278 	return 0;
1279 }
1280 
tegra_pcie_resets_get(struct tegra_pcie * pcie)1281 static int tegra_pcie_resets_get(struct tegra_pcie *pcie)
1282 {
1283 	struct device *dev = pcie->dev;
1284 
1285 	pcie->pex_rst = devm_reset_control_get_exclusive(dev, "pex");
1286 	if (IS_ERR(pcie->pex_rst))
1287 		return PTR_ERR(pcie->pex_rst);
1288 
1289 	pcie->afi_rst = devm_reset_control_get_exclusive(dev, "afi");
1290 	if (IS_ERR(pcie->afi_rst))
1291 		return PTR_ERR(pcie->afi_rst);
1292 
1293 	pcie->pcie_xrst = devm_reset_control_get_exclusive(dev, "pcie_x");
1294 	if (IS_ERR(pcie->pcie_xrst))
1295 		return PTR_ERR(pcie->pcie_xrst);
1296 
1297 	return 0;
1298 }
1299 
tegra_pcie_phys_get_legacy(struct tegra_pcie * pcie)1300 static int tegra_pcie_phys_get_legacy(struct tegra_pcie *pcie)
1301 {
1302 	struct device *dev = pcie->dev;
1303 	int err;
1304 
1305 	pcie->phy = devm_phy_optional_get(dev, "pcie");
1306 	if (IS_ERR(pcie->phy)) {
1307 		err = PTR_ERR(pcie->phy);
1308 		dev_err(dev, "failed to get PHY: %d\n", err);
1309 		return err;
1310 	}
1311 
1312 	err = phy_init(pcie->phy);
1313 	if (err < 0) {
1314 		dev_err(dev, "failed to initialize PHY: %d\n", err);
1315 		return err;
1316 	}
1317 
1318 	pcie->legacy_phy = true;
1319 
1320 	return 0;
1321 }
1322 
devm_of_phy_optional_get_index(struct device * dev,struct device_node * np,const char * consumer,unsigned int index)1323 static struct phy *devm_of_phy_optional_get_index(struct device *dev,
1324 						  struct device_node *np,
1325 						  const char *consumer,
1326 						  unsigned int index)
1327 {
1328 	struct phy *phy;
1329 	char *name;
1330 
1331 	name = kasprintf(GFP_KERNEL, "%s-%u", consumer, index);
1332 	if (!name)
1333 		return ERR_PTR(-ENOMEM);
1334 
1335 	phy = devm_of_phy_optional_get(dev, np, name);
1336 	kfree(name);
1337 
1338 	return phy;
1339 }
1340 
tegra_pcie_port_get_phys(struct tegra_pcie_port * port)1341 static int tegra_pcie_port_get_phys(struct tegra_pcie_port *port)
1342 {
1343 	struct device *dev = port->pcie->dev;
1344 	struct phy *phy;
1345 	unsigned int i;
1346 	int err;
1347 
1348 	port->phys = devm_kcalloc(dev, port->lanes, sizeof(phy), GFP_KERNEL);
1349 	if (!port->phys)
1350 		return -ENOMEM;
1351 
1352 	for (i = 0; i < port->lanes; i++) {
1353 		phy = devm_of_phy_optional_get_index(dev, port->np, "pcie", i);
1354 		if (IS_ERR(phy)) {
1355 			dev_err(dev, "failed to get PHY#%u: %ld\n", i,
1356 				PTR_ERR(phy));
1357 			return PTR_ERR(phy);
1358 		}
1359 
1360 		err = phy_init(phy);
1361 		if (err < 0) {
1362 			dev_err(dev, "failed to initialize PHY#%u: %d\n", i,
1363 				err);
1364 			return err;
1365 		}
1366 
1367 		port->phys[i] = phy;
1368 	}
1369 
1370 	return 0;
1371 }
1372 
tegra_pcie_phys_get(struct tegra_pcie * pcie)1373 static int tegra_pcie_phys_get(struct tegra_pcie *pcie)
1374 {
1375 	const struct tegra_pcie_soc *soc = pcie->soc;
1376 	struct device_node *np = pcie->dev->of_node;
1377 	struct tegra_pcie_port *port;
1378 	int err;
1379 
1380 	if (!soc->has_gen2 || of_property_present(np, "phys"))
1381 		return tegra_pcie_phys_get_legacy(pcie);
1382 
1383 	list_for_each_entry(port, &pcie->ports, list) {
1384 		err = tegra_pcie_port_get_phys(port);
1385 		if (err < 0)
1386 			return err;
1387 	}
1388 
1389 	return 0;
1390 }
1391 
tegra_pcie_phys_put(struct tegra_pcie * pcie)1392 static void tegra_pcie_phys_put(struct tegra_pcie *pcie)
1393 {
1394 	struct tegra_pcie_port *port;
1395 	struct device *dev = pcie->dev;
1396 	int err, i;
1397 
1398 	if (pcie->legacy_phy) {
1399 		err = phy_exit(pcie->phy);
1400 		if (err < 0)
1401 			dev_err(dev, "failed to teardown PHY: %d\n", err);
1402 		return;
1403 	}
1404 
1405 	list_for_each_entry(port, &pcie->ports, list) {
1406 		for (i = 0; i < port->lanes; i++) {
1407 			err = phy_exit(port->phys[i]);
1408 			if (err < 0)
1409 				dev_err(dev, "failed to teardown PHY#%u: %d\n",
1410 					i, err);
1411 		}
1412 	}
1413 }
1414 
tegra_pcie_get_resources(struct tegra_pcie * pcie)1415 static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
1416 {
1417 	struct device *dev = pcie->dev;
1418 	struct platform_device *pdev = to_platform_device(dev);
1419 	struct resource *res;
1420 	const struct tegra_pcie_soc *soc = pcie->soc;
1421 	int err;
1422 
1423 	err = tegra_pcie_clocks_get(pcie);
1424 	if (err) {
1425 		dev_err(dev, "failed to get clocks: %d\n", err);
1426 		return err;
1427 	}
1428 
1429 	err = tegra_pcie_resets_get(pcie);
1430 	if (err) {
1431 		dev_err(dev, "failed to get resets: %d\n", err);
1432 		return err;
1433 	}
1434 
1435 	if (soc->program_uphy) {
1436 		err = tegra_pcie_phys_get(pcie);
1437 		if (err < 0) {
1438 			dev_err(dev, "failed to get PHYs: %d\n", err);
1439 			return err;
1440 		}
1441 	}
1442 
1443 	pcie->pads = devm_platform_ioremap_resource_byname(pdev, "pads");
1444 	if (IS_ERR(pcie->pads)) {
1445 		err = PTR_ERR(pcie->pads);
1446 		goto phys_put;
1447 	}
1448 
1449 	pcie->afi = devm_platform_ioremap_resource_byname(pdev, "afi");
1450 	if (IS_ERR(pcie->afi)) {
1451 		err = PTR_ERR(pcie->afi);
1452 		goto phys_put;
1453 	}
1454 
1455 	/* request configuration space, but remap later, on demand */
1456 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs");
1457 	if (!res) {
1458 		err = -EADDRNOTAVAIL;
1459 		goto phys_put;
1460 	}
1461 
1462 	pcie->cs = *res;
1463 
1464 	/* constrain configuration space to 4 KiB */
1465 	resource_set_size(&pcie->cs, SZ_4K);
1466 
1467 	pcie->cfg = devm_ioremap_resource(dev, &pcie->cs);
1468 	if (IS_ERR(pcie->cfg)) {
1469 		err = PTR_ERR(pcie->cfg);
1470 		goto phys_put;
1471 	}
1472 
1473 	/* request interrupt */
1474 	err = platform_get_irq_byname(pdev, "intr");
1475 	if (err < 0)
1476 		goto phys_put;
1477 
1478 	pcie->irq = err;
1479 
1480 	err = request_irq(pcie->irq, tegra_pcie_isr, IRQF_SHARED, "PCIE", pcie);
1481 	if (err) {
1482 		dev_err(dev, "failed to register IRQ: %d\n", err);
1483 		goto phys_put;
1484 	}
1485 
1486 	return 0;
1487 
1488 phys_put:
1489 	if (soc->program_uphy)
1490 		tegra_pcie_phys_put(pcie);
1491 
1492 	return err;
1493 }
1494 
tegra_pcie_put_resources(struct tegra_pcie * pcie)1495 static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
1496 {
1497 	const struct tegra_pcie_soc *soc = pcie->soc;
1498 
1499 	if (pcie->irq > 0)
1500 		free_irq(pcie->irq, pcie);
1501 
1502 	if (soc->program_uphy)
1503 		tegra_pcie_phys_put(pcie);
1504 
1505 	return 0;
1506 }
1507 
tegra_pcie_pme_turnoff(struct tegra_pcie_port * port)1508 static void tegra_pcie_pme_turnoff(struct tegra_pcie_port *port)
1509 {
1510 	struct tegra_pcie *pcie = port->pcie;
1511 	const struct tegra_pcie_soc *soc = pcie->soc;
1512 	int err;
1513 	u32 val;
1514 	u8 ack_bit;
1515 
1516 	val = afi_readl(pcie, AFI_PCIE_PME);
1517 	val |= (0x1 << soc->ports[port->index].pme.turnoff_bit);
1518 	afi_writel(pcie, val, AFI_PCIE_PME);
1519 
1520 	ack_bit = soc->ports[port->index].pme.ack_bit;
1521 	err = readl_poll_timeout(pcie->afi + AFI_PCIE_PME, val,
1522 				 val & (0x1 << ack_bit), 1, PME_ACK_TIMEOUT);
1523 	if (err)
1524 		dev_err(pcie->dev, "PME Ack is not received on port: %d\n",
1525 			port->index);
1526 
1527 	usleep_range(10000, 11000);
1528 
1529 	val = afi_readl(pcie, AFI_PCIE_PME);
1530 	val &= ~(0x1 << soc->ports[port->index].pme.turnoff_bit);
1531 	afi_writel(pcie, val, AFI_PCIE_PME);
1532 }
1533 
tegra_pcie_msi_irq(struct irq_desc * desc)1534 static void tegra_pcie_msi_irq(struct irq_desc *desc)
1535 {
1536 	struct tegra_pcie *pcie = irq_desc_get_handler_data(desc);
1537 	struct irq_chip *chip = irq_desc_get_chip(desc);
1538 	struct tegra_msi *msi = &pcie->msi;
1539 	struct device *dev = pcie->dev;
1540 	unsigned int i;
1541 
1542 	chained_irq_enter(chip, desc);
1543 
1544 	for (i = 0; i < 8; i++) {
1545 		unsigned long reg = afi_readl(pcie, AFI_MSI_VEC(i));
1546 
1547 		while (reg) {
1548 			unsigned int offset = find_first_bit(&reg, 32);
1549 			unsigned int index = i * 32 + offset;
1550 			int ret;
1551 
1552 			ret = generic_handle_domain_irq(msi->domain, index);
1553 			if (ret) {
1554 				/*
1555 				 * that's weird who triggered this?
1556 				 * just clear it
1557 				 */
1558 				dev_info(dev, "unexpected MSI\n");
1559 				afi_writel(pcie, BIT(index % 32), AFI_MSI_VEC(index));
1560 			}
1561 
1562 			/* see if there's any more pending in this vector */
1563 			reg = afi_readl(pcie, AFI_MSI_VEC(i));
1564 		}
1565 	}
1566 
1567 	chained_irq_exit(chip, desc);
1568 }
1569 
tegra_msi_irq_ack(struct irq_data * d)1570 static void tegra_msi_irq_ack(struct irq_data *d)
1571 {
1572 	struct tegra_msi *msi = irq_data_get_irq_chip_data(d);
1573 	struct tegra_pcie *pcie = msi_to_pcie(msi);
1574 	unsigned int index = d->hwirq / 32;
1575 
1576 	/* clear the interrupt */
1577 	afi_writel(pcie, BIT(d->hwirq % 32), AFI_MSI_VEC(index));
1578 }
1579 
tegra_msi_irq_mask(struct irq_data * d)1580 static void tegra_msi_irq_mask(struct irq_data *d)
1581 {
1582 	struct tegra_msi *msi = irq_data_get_irq_chip_data(d);
1583 	struct tegra_pcie *pcie = msi_to_pcie(msi);
1584 	unsigned int index = d->hwirq / 32;
1585 	u32 value;
1586 
1587 	scoped_guard(raw_spinlock_irqsave, &msi->mask_lock) {
1588 		value = afi_readl(pcie, AFI_MSI_EN_VEC(index));
1589 		value &= ~BIT(d->hwirq % 32);
1590 		afi_writel(pcie, value, AFI_MSI_EN_VEC(index));
1591 	}
1592 }
1593 
tegra_msi_irq_unmask(struct irq_data * d)1594 static void tegra_msi_irq_unmask(struct irq_data *d)
1595 {
1596 	struct tegra_msi *msi = irq_data_get_irq_chip_data(d);
1597 	struct tegra_pcie *pcie = msi_to_pcie(msi);
1598 	unsigned int index = d->hwirq / 32;
1599 	u32 value;
1600 
1601 	scoped_guard(raw_spinlock_irqsave, &msi->mask_lock) {
1602 		value = afi_readl(pcie, AFI_MSI_EN_VEC(index));
1603 		value |= BIT(d->hwirq % 32);
1604 		afi_writel(pcie, value, AFI_MSI_EN_VEC(index));
1605 	}
1606 }
1607 
tegra_compose_msi_msg(struct irq_data * data,struct msi_msg * msg)1608 static void tegra_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
1609 {
1610 	struct tegra_msi *msi = irq_data_get_irq_chip_data(data);
1611 
1612 	msg->address_lo = lower_32_bits(msi->phys);
1613 	msg->address_hi = upper_32_bits(msi->phys);
1614 	msg->data = data->hwirq;
1615 }
1616 
1617 static struct irq_chip tegra_msi_bottom_chip = {
1618 	.name			= "Tegra MSI",
1619 	.irq_ack		= tegra_msi_irq_ack,
1620 	.irq_mask		= tegra_msi_irq_mask,
1621 	.irq_unmask		= tegra_msi_irq_unmask,
1622 	.irq_compose_msi_msg	= tegra_compose_msi_msg,
1623 };
1624 
tegra_msi_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * args)1625 static int tegra_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
1626 				  unsigned int nr_irqs, void *args)
1627 {
1628 	struct tegra_msi *msi = domain->host_data;
1629 	unsigned int i;
1630 	int hwirq;
1631 
1632 	mutex_lock(&msi->map_lock);
1633 
1634 	hwirq = bitmap_find_free_region(msi->used, INT_PCI_MSI_NR, order_base_2(nr_irqs));
1635 
1636 	mutex_unlock(&msi->map_lock);
1637 
1638 	if (hwirq < 0)
1639 		return -ENOSPC;
1640 
1641 	for (i = 0; i < nr_irqs; i++)
1642 		irq_domain_set_info(domain, virq + i, hwirq + i,
1643 				    &tegra_msi_bottom_chip, domain->host_data,
1644 				    handle_edge_irq, NULL, NULL);
1645 
1646 	tegra_cpuidle_pcie_irqs_in_use();
1647 
1648 	return 0;
1649 }
1650 
tegra_msi_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)1651 static void tegra_msi_domain_free(struct irq_domain *domain, unsigned int virq,
1652 				  unsigned int nr_irqs)
1653 {
1654 	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
1655 	struct tegra_msi *msi = domain->host_data;
1656 
1657 	mutex_lock(&msi->map_lock);
1658 
1659 	bitmap_release_region(msi->used, d->hwirq, order_base_2(nr_irqs));
1660 
1661 	mutex_unlock(&msi->map_lock);
1662 }
1663 
1664 static const struct irq_domain_ops tegra_msi_domain_ops = {
1665 	.alloc = tegra_msi_domain_alloc,
1666 	.free = tegra_msi_domain_free,
1667 };
1668 
1669 static const struct msi_parent_ops tegra_msi_parent_ops = {
1670 	.supported_flags	= (MSI_GENERIC_FLAGS_MASK	|
1671 				   MSI_FLAG_PCI_MSIX),
1672 	.required_flags		= (MSI_FLAG_USE_DEF_DOM_OPS	|
1673 				   MSI_FLAG_USE_DEF_CHIP_OPS	|
1674 				   MSI_FLAG_PCI_MSI_MASK_PARENT	|
1675 				   MSI_FLAG_NO_AFFINITY),
1676 	.chip_flags		= MSI_CHIP_FLAG_SET_ACK,
1677 	.bus_select_token	= DOMAIN_BUS_PCI_MSI,
1678 	.init_dev_msi_info	= msi_lib_init_dev_msi_info,
1679 };
1680 
tegra_allocate_domains(struct tegra_msi * msi)1681 static int tegra_allocate_domains(struct tegra_msi *msi)
1682 {
1683 	struct tegra_pcie *pcie = msi_to_pcie(msi);
1684 	struct fwnode_handle *fwnode = dev_fwnode(pcie->dev);
1685 	struct irq_domain_info info = {
1686 		.fwnode		= fwnode,
1687 		.ops		= &tegra_msi_domain_ops,
1688 		.size		= INT_PCI_MSI_NR,
1689 		.host_data	= msi,
1690 	};
1691 
1692 	msi->domain = msi_create_parent_irq_domain(&info, &tegra_msi_parent_ops);
1693 	if (!msi->domain) {
1694 		dev_err(pcie->dev, "failed to create MSI domain\n");
1695 		return -ENOMEM;
1696 	}
1697 	return 0;
1698 }
1699 
tegra_free_domains(struct tegra_msi * msi)1700 static void tegra_free_domains(struct tegra_msi *msi)
1701 {
1702 	irq_domain_remove(msi->domain);
1703 }
1704 
tegra_pcie_msi_setup(struct tegra_pcie * pcie)1705 static int tegra_pcie_msi_setup(struct tegra_pcie *pcie)
1706 {
1707 	struct platform_device *pdev = to_platform_device(pcie->dev);
1708 	struct tegra_msi *msi = &pcie->msi;
1709 	struct device *dev = pcie->dev;
1710 	int err;
1711 
1712 	mutex_init(&msi->map_lock);
1713 	raw_spin_lock_init(&msi->mask_lock);
1714 
1715 	if (IS_ENABLED(CONFIG_PCI_MSI)) {
1716 		err = tegra_allocate_domains(msi);
1717 		if (err)
1718 			return err;
1719 	}
1720 
1721 	err = platform_get_irq_byname(pdev, "msi");
1722 	if (err < 0)
1723 		goto free_irq_domain;
1724 
1725 	msi->irq = err;
1726 
1727 	irq_set_chained_handler_and_data(msi->irq, tegra_pcie_msi_irq, pcie);
1728 
1729 	/* Though the PCIe controller can address >32-bit address space, to
1730 	 * facilitate endpoints that support only 32-bit MSI target address,
1731 	 * the mask is set to 32-bit to make sure that MSI target address is
1732 	 * always a 32-bit address
1733 	 */
1734 	err = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
1735 	if (err < 0) {
1736 		dev_err(dev, "failed to set DMA coherent mask: %d\n", err);
1737 		goto free_irq;
1738 	}
1739 
1740 	msi->virt = dma_alloc_attrs(dev, PAGE_SIZE, &msi->phys, GFP_KERNEL,
1741 				    DMA_ATTR_NO_KERNEL_MAPPING);
1742 	if (!msi->virt) {
1743 		dev_err(dev, "failed to allocate DMA memory for MSI\n");
1744 		err = -ENOMEM;
1745 		goto free_irq;
1746 	}
1747 
1748 	return 0;
1749 
1750 free_irq:
1751 	irq_set_chained_handler_and_data(msi->irq, NULL, NULL);
1752 free_irq_domain:
1753 	if (IS_ENABLED(CONFIG_PCI_MSI))
1754 		tegra_free_domains(msi);
1755 
1756 	return err;
1757 }
1758 
tegra_pcie_enable_msi(struct tegra_pcie * pcie)1759 static void tegra_pcie_enable_msi(struct tegra_pcie *pcie)
1760 {
1761 	const struct tegra_pcie_soc *soc = pcie->soc;
1762 	struct tegra_msi *msi = &pcie->msi;
1763 	u32 reg, msi_state[INT_PCI_MSI_NR / 32];
1764 	int i;
1765 
1766 	afi_writel(pcie, msi->phys >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST);
1767 	afi_writel(pcie, msi->phys, AFI_MSI_AXI_BAR_ST);
1768 	/* this register is in 4K increments */
1769 	afi_writel(pcie, 1, AFI_MSI_BAR_SZ);
1770 
1771 	/* Restore the MSI allocation state */
1772 	bitmap_to_arr32(msi_state, msi->used, INT_PCI_MSI_NR);
1773 	for (i = 0; i < ARRAY_SIZE(msi_state); i++)
1774 		afi_writel(pcie, msi_state[i], AFI_MSI_EN_VEC(i));
1775 
1776 	/* and unmask the MSI interrupt */
1777 	reg = afi_readl(pcie, AFI_INTR_MASK);
1778 	reg |= AFI_INTR_MASK_MSI_MASK;
1779 	afi_writel(pcie, reg, AFI_INTR_MASK);
1780 }
1781 
tegra_pcie_msi_teardown(struct tegra_pcie * pcie)1782 static void tegra_pcie_msi_teardown(struct tegra_pcie *pcie)
1783 {
1784 	struct tegra_msi *msi = &pcie->msi;
1785 	unsigned int i, irq;
1786 
1787 	dma_free_attrs(pcie->dev, PAGE_SIZE, msi->virt, msi->phys,
1788 		       DMA_ATTR_NO_KERNEL_MAPPING);
1789 
1790 	for (i = 0; i < INT_PCI_MSI_NR; i++) {
1791 		irq = irq_find_mapping(msi->domain, i);
1792 		if (irq > 0)
1793 			irq_domain_free_irqs(irq, 1);
1794 	}
1795 
1796 	irq_set_chained_handler_and_data(msi->irq, NULL, NULL);
1797 
1798 	if (IS_ENABLED(CONFIG_PCI_MSI))
1799 		tegra_free_domains(msi);
1800 }
1801 
tegra_pcie_disable_msi(struct tegra_pcie * pcie)1802 static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
1803 {
1804 	u32 value;
1805 
1806 	/* mask the MSI interrupt */
1807 	value = afi_readl(pcie, AFI_INTR_MASK);
1808 	value &= ~AFI_INTR_MASK_MSI_MASK;
1809 	afi_writel(pcie, value, AFI_INTR_MASK);
1810 
1811 	return 0;
1812 }
1813 
tegra_pcie_disable_interrupts(struct tegra_pcie * pcie)1814 static void tegra_pcie_disable_interrupts(struct tegra_pcie *pcie)
1815 {
1816 	u32 value;
1817 
1818 	value = afi_readl(pcie, AFI_INTR_MASK);
1819 	value &= ~AFI_INTR_MASK_INT_MASK;
1820 	afi_writel(pcie, value, AFI_INTR_MASK);
1821 }
1822 
tegra_pcie_get_xbar_config(struct tegra_pcie * pcie,u32 lanes,u32 * xbar)1823 static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
1824 				      u32 *xbar)
1825 {
1826 	struct device *dev = pcie->dev;
1827 	struct device_node *np = dev->of_node;
1828 
1829 	if (of_device_is_compatible(np, "nvidia,tegra186-pcie")) {
1830 		switch (lanes) {
1831 		case 0x010004:
1832 			dev_info(dev, "4x1, 1x1 configuration\n");
1833 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_401;
1834 			return 0;
1835 
1836 		case 0x010102:
1837 			dev_info(dev, "2x1, 1X1, 1x1 configuration\n");
1838 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211;
1839 			return 0;
1840 
1841 		case 0x010101:
1842 			dev_info(dev, "1x1, 1x1, 1x1 configuration\n");
1843 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111;
1844 			return 0;
1845 
1846 		default:
1847 			dev_info(dev, "wrong configuration updated in DT, "
1848 				 "switching to default 2x1, 1x1, 1x1 "
1849 				 "configuration\n");
1850 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211;
1851 			return 0;
1852 		}
1853 	} else if (of_device_is_compatible(np, "nvidia,tegra124-pcie") ||
1854 		   of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
1855 		switch (lanes) {
1856 		case 0x0000104:
1857 			dev_info(dev, "4x1, 1x1 configuration\n");
1858 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1;
1859 			return 0;
1860 
1861 		case 0x0000102:
1862 			dev_info(dev, "2x1, 1x1 configuration\n");
1863 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1;
1864 			return 0;
1865 		}
1866 	} else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
1867 		switch (lanes) {
1868 		case 0x00000204:
1869 			dev_info(dev, "4x1, 2x1 configuration\n");
1870 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420;
1871 			return 0;
1872 
1873 		case 0x00020202:
1874 			dev_info(dev, "2x3 configuration\n");
1875 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222;
1876 			return 0;
1877 
1878 		case 0x00010104:
1879 			dev_info(dev, "4x1, 1x2 configuration\n");
1880 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411;
1881 			return 0;
1882 		}
1883 	} else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
1884 		switch (lanes) {
1885 		case 0x00000004:
1886 			dev_info(dev, "single-mode configuration\n");
1887 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE;
1888 			return 0;
1889 
1890 		case 0x00000202:
1891 			dev_info(dev, "dual-mode configuration\n");
1892 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL;
1893 			return 0;
1894 		}
1895 	}
1896 
1897 	return -EINVAL;
1898 }
1899 
1900 /*
1901  * Check whether a given set of supplies is available in a device tree node.
1902  * This is used to check whether the new or the legacy device tree bindings
1903  * should be used.
1904  */
of_regulator_bulk_available(struct device_node * np,struct regulator_bulk_data * supplies,unsigned int num_supplies)1905 static bool of_regulator_bulk_available(struct device_node *np,
1906 					struct regulator_bulk_data *supplies,
1907 					unsigned int num_supplies)
1908 {
1909 	char property[32];
1910 	unsigned int i;
1911 
1912 	for (i = 0; i < num_supplies; i++) {
1913 		snprintf(property, 32, "%s-supply", supplies[i].supply);
1914 
1915 		if (!of_property_present(np, property))
1916 			return false;
1917 	}
1918 
1919 	return true;
1920 }
1921 
1922 /*
1923  * Old versions of the device tree binding for this device used a set of power
1924  * supplies that didn't match the hardware inputs. This happened to work for a
1925  * number of cases but is not future proof. However to preserve backwards-
1926  * compatibility with old device trees, this function will try to use the old
1927  * set of supplies.
1928  */
tegra_pcie_get_legacy_regulators(struct tegra_pcie * pcie)1929 static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie)
1930 {
1931 	struct device *dev = pcie->dev;
1932 	struct device_node *np = dev->of_node;
1933 
1934 	if (of_device_is_compatible(np, "nvidia,tegra30-pcie"))
1935 		pcie->num_supplies = 3;
1936 	else if (of_device_is_compatible(np, "nvidia,tegra20-pcie"))
1937 		pcie->num_supplies = 2;
1938 
1939 	if (pcie->num_supplies == 0) {
1940 		dev_err(dev, "device %pOF not supported in legacy mode\n", np);
1941 		return -ENODEV;
1942 	}
1943 
1944 	pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
1945 				      sizeof(*pcie->supplies),
1946 				      GFP_KERNEL);
1947 	if (!pcie->supplies)
1948 		return -ENOMEM;
1949 
1950 	pcie->supplies[0].supply = "pex-clk";
1951 	pcie->supplies[1].supply = "vdd";
1952 
1953 	if (pcie->num_supplies > 2)
1954 		pcie->supplies[2].supply = "avdd";
1955 
1956 	return devm_regulator_bulk_get(dev, pcie->num_supplies, pcie->supplies);
1957 }
1958 
1959 /*
1960  * Obtains the list of regulators required for a particular generation of the
1961  * IP block.
1962  *
1963  * This would've been nice to do simply by providing static tables for use
1964  * with the regulator_bulk_*() API, but unfortunately Tegra30 is a bit quirky
1965  * in that it has two pairs or AVDD_PEX and VDD_PEX supplies (PEXA and PEXB)
1966  * and either seems to be optional depending on which ports are being used.
1967  */
tegra_pcie_get_regulators(struct tegra_pcie * pcie,u32 lane_mask)1968 static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
1969 {
1970 	struct device *dev = pcie->dev;
1971 	struct device_node *np = dev->of_node;
1972 	unsigned int i = 0;
1973 
1974 	if (of_device_is_compatible(np, "nvidia,tegra186-pcie")) {
1975 		pcie->num_supplies = 4;
1976 
1977 		pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
1978 					      sizeof(*pcie->supplies),
1979 					      GFP_KERNEL);
1980 		if (!pcie->supplies)
1981 			return -ENOMEM;
1982 
1983 		pcie->supplies[i++].supply = "dvdd-pex";
1984 		pcie->supplies[i++].supply = "hvdd-pex-pll";
1985 		pcie->supplies[i++].supply = "hvdd-pex";
1986 		pcie->supplies[i++].supply = "vddio-pexctl-aud";
1987 	} else if (of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
1988 		pcie->num_supplies = 3;
1989 
1990 		pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
1991 					      sizeof(*pcie->supplies),
1992 					      GFP_KERNEL);
1993 		if (!pcie->supplies)
1994 			return -ENOMEM;
1995 
1996 		pcie->supplies[i++].supply = "hvddio-pex";
1997 		pcie->supplies[i++].supply = "dvddio-pex";
1998 		pcie->supplies[i++].supply = "vddio-pex-ctl";
1999 	} else if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
2000 		pcie->num_supplies = 4;
2001 
2002 		pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
2003 					      sizeof(*pcie->supplies),
2004 					      GFP_KERNEL);
2005 		if (!pcie->supplies)
2006 			return -ENOMEM;
2007 
2008 		pcie->supplies[i++].supply = "avddio-pex";
2009 		pcie->supplies[i++].supply = "dvddio-pex";
2010 		pcie->supplies[i++].supply = "hvdd-pex";
2011 		pcie->supplies[i++].supply = "vddio-pex-ctl";
2012 	} else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
2013 		bool need_pexa = false, need_pexb = false;
2014 
2015 		/* VDD_PEXA and AVDD_PEXA supply lanes 0 to 3 */
2016 		if (lane_mask & 0x0f)
2017 			need_pexa = true;
2018 
2019 		/* VDD_PEXB and AVDD_PEXB supply lanes 4 to 5 */
2020 		if (lane_mask & 0x30)
2021 			need_pexb = true;
2022 
2023 		pcie->num_supplies = 4 + (need_pexa ? 2 : 0) +
2024 					 (need_pexb ? 2 : 0);
2025 
2026 		pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
2027 					      sizeof(*pcie->supplies),
2028 					      GFP_KERNEL);
2029 		if (!pcie->supplies)
2030 			return -ENOMEM;
2031 
2032 		pcie->supplies[i++].supply = "avdd-pex-pll";
2033 		pcie->supplies[i++].supply = "hvdd-pex";
2034 		pcie->supplies[i++].supply = "vddio-pex-ctl";
2035 		pcie->supplies[i++].supply = "avdd-plle";
2036 
2037 		if (need_pexa) {
2038 			pcie->supplies[i++].supply = "avdd-pexa";
2039 			pcie->supplies[i++].supply = "vdd-pexa";
2040 		}
2041 
2042 		if (need_pexb) {
2043 			pcie->supplies[i++].supply = "avdd-pexb";
2044 			pcie->supplies[i++].supply = "vdd-pexb";
2045 		}
2046 	} else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
2047 		pcie->num_supplies = 5;
2048 
2049 		pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
2050 					      sizeof(*pcie->supplies),
2051 					      GFP_KERNEL);
2052 		if (!pcie->supplies)
2053 			return -ENOMEM;
2054 
2055 		pcie->supplies[0].supply = "avdd-pex";
2056 		pcie->supplies[1].supply = "vdd-pex";
2057 		pcie->supplies[2].supply = "avdd-pex-pll";
2058 		pcie->supplies[3].supply = "avdd-plle";
2059 		pcie->supplies[4].supply = "vddio-pex-clk";
2060 	}
2061 
2062 	if (of_regulator_bulk_available(dev->of_node, pcie->supplies,
2063 					pcie->num_supplies))
2064 		return devm_regulator_bulk_get(dev, pcie->num_supplies,
2065 					       pcie->supplies);
2066 
2067 	/*
2068 	 * If not all regulators are available for this new scheme, assume
2069 	 * that the device tree complies with an older version of the device
2070 	 * tree binding.
2071 	 */
2072 	dev_info(dev, "using legacy DT binding for power supplies\n");
2073 
2074 	devm_kfree(dev, pcie->supplies);
2075 	pcie->num_supplies = 0;
2076 
2077 	return tegra_pcie_get_legacy_regulators(pcie);
2078 }
2079 
tegra_pcie_parse_dt(struct tegra_pcie * pcie)2080 static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
2081 {
2082 	struct device *dev = pcie->dev;
2083 	struct device_node *np = dev->of_node;
2084 	const struct tegra_pcie_soc *soc = pcie->soc;
2085 	u32 lanes = 0, mask = 0;
2086 	unsigned int lane = 0;
2087 	int err;
2088 
2089 	/* parse root ports */
2090 	for_each_child_of_node_scoped(np, port) {
2091 		struct tegra_pcie_port *rp;
2092 		unsigned int index;
2093 		u32 value;
2094 		char *label;
2095 
2096 		err = of_pci_get_devfn(port);
2097 		if (err < 0)
2098 			return dev_err_probe(dev, err, "failed to parse address\n");
2099 
2100 		index = PCI_SLOT(err);
2101 
2102 		if (index < 1 || index > soc->num_ports)
2103 			return dev_err_probe(dev, -EINVAL,
2104 					     "invalid port number: %d\n", index);
2105 
2106 		index--;
2107 
2108 		err = of_property_read_u32(port, "nvidia,num-lanes", &value);
2109 		if (err < 0)
2110 			return dev_err_probe(dev, err,
2111 					     "failed to parse # of lanes\n");
2112 
2113 		if (value > 16)
2114 			return dev_err_probe(dev, -EINVAL,
2115 					     "invalid # of lanes: %u\n", value);
2116 
2117 		lanes |= value << (index << 3);
2118 
2119 		if (!of_device_is_available(port)) {
2120 			lane += value;
2121 			continue;
2122 		}
2123 
2124 		mask |= ((1 << value) - 1) << lane;
2125 		lane += value;
2126 
2127 		rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL);
2128 		if (!rp)
2129 			return -ENOMEM;
2130 
2131 		err = of_address_to_resource(port, 0, &rp->regs);
2132 		if (err < 0)
2133 			return dev_err_probe(dev, err, "failed to parse address\n");
2134 
2135 		INIT_LIST_HEAD(&rp->list);
2136 		rp->index = index;
2137 		rp->lanes = value;
2138 		rp->pcie = pcie;
2139 		rp->np = port;
2140 
2141 		rp->base = devm_pci_remap_cfg_resource(dev, &rp->regs);
2142 		if (IS_ERR(rp->base))
2143 			return PTR_ERR(rp->base);
2144 
2145 		label = devm_kasprintf(dev, GFP_KERNEL, "pex-reset-%u", index);
2146 		if (!label)
2147 			return -ENOMEM;
2148 
2149 		/*
2150 		 * Returns -ENOENT if reset-gpios property is not populated
2151 		 * and in this case fall back to using AFI per port register
2152 		 * to toggle PERST# SFIO line.
2153 		 */
2154 		rp->reset_gpio = devm_fwnode_gpiod_get(dev,
2155 						       of_fwnode_handle(port),
2156 						       "reset",
2157 						       GPIOD_OUT_LOW,
2158 						       label);
2159 		if (IS_ERR(rp->reset_gpio)) {
2160 			if (PTR_ERR(rp->reset_gpio) == -ENOENT)
2161 				rp->reset_gpio = NULL;
2162 			else
2163 				return dev_err_probe(dev, PTR_ERR(rp->reset_gpio),
2164 						     "failed to get reset GPIO\n");
2165 		}
2166 
2167 		list_add_tail(&rp->list, &pcie->ports);
2168 	}
2169 
2170 	err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config);
2171 	if (err < 0)
2172 		return dev_err_probe(dev, err,
2173 				     "invalid lane configuration\n");
2174 
2175 	err = tegra_pcie_get_regulators(pcie, mask);
2176 	if (err < 0)
2177 		return err;
2178 
2179 	return 0;
2180 }
2181 
2182 /*
2183  * FIXME: If there are no PCIe cards attached, then calling this function
2184  * can result in the increase of the bootup time as there are big timeout
2185  * loops.
2186  */
2187 #define TEGRA_PCIE_LINKUP_TIMEOUT	200	/* up to 1.2 seconds */
tegra_pcie_port_check_link(struct tegra_pcie_port * port)2188 static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
2189 {
2190 	struct device *dev = port->pcie->dev;
2191 	unsigned int retries = 3;
2192 	unsigned long value;
2193 
2194 	/* override presence detection */
2195 	value = readl(port->base + RP_PRIV_MISC);
2196 	value &= ~RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT;
2197 	value |= RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT;
2198 	writel(value, port->base + RP_PRIV_MISC);
2199 
2200 	do {
2201 		unsigned int timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
2202 
2203 		do {
2204 			value = readl(port->base + RP_VEND_XP);
2205 
2206 			if (value & RP_VEND_XP_DL_UP)
2207 				break;
2208 
2209 			usleep_range(1000, 2000);
2210 		} while (--timeout);
2211 
2212 		if (!timeout) {
2213 			dev_dbg(dev, "link %u down, retrying\n", port->index);
2214 			goto retry;
2215 		}
2216 
2217 		timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
2218 
2219 		do {
2220 			value = readl(port->base + RP_LINK_CONTROL_STATUS);
2221 
2222 			if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
2223 				return true;
2224 
2225 			usleep_range(1000, 2000);
2226 		} while (--timeout);
2227 
2228 retry:
2229 		tegra_pcie_port_reset(port);
2230 	} while (--retries);
2231 
2232 	return false;
2233 }
2234 
tegra_pcie_change_link_speed(struct tegra_pcie * pcie)2235 static void tegra_pcie_change_link_speed(struct tegra_pcie *pcie)
2236 {
2237 	struct device *dev = pcie->dev;
2238 	struct tegra_pcie_port *port;
2239 	ktime_t deadline;
2240 	u32 value;
2241 
2242 	list_for_each_entry(port, &pcie->ports, list) {
2243 		/*
2244 		 * "Supported Link Speeds Vector" in "Link Capabilities 2"
2245 		 * is not supported by Tegra. tegra_pcie_change_link_speed()
2246 		 * is called only for Tegra chips which support Gen2.
2247 		 * So there no harm if supported link speed is not verified.
2248 		 */
2249 		value = readl(port->base + RP_LINK_CONTROL_STATUS_2);
2250 		value &= ~PCI_EXP_LNKSTA_CLS;
2251 		value |= PCI_EXP_LNKSTA_CLS_5_0GB;
2252 		writel(value, port->base + RP_LINK_CONTROL_STATUS_2);
2253 
2254 		/*
2255 		 * Poll until link comes back from recovery to avoid race
2256 		 * condition.
2257 		 */
2258 		deadline = ktime_add_us(ktime_get(), LINK_RETRAIN_TIMEOUT);
2259 
2260 		while (ktime_before(ktime_get(), deadline)) {
2261 			value = readl(port->base + RP_LINK_CONTROL_STATUS);
2262 			if ((value & PCI_EXP_LNKSTA_LT) == 0)
2263 				break;
2264 
2265 			usleep_range(2000, 3000);
2266 		}
2267 
2268 		if (value & PCI_EXP_LNKSTA_LT)
2269 			dev_warn(dev, "PCIe port %u link is in recovery\n",
2270 				 port->index);
2271 
2272 		/* Retrain the link */
2273 		value = readl(port->base + RP_LINK_CONTROL_STATUS);
2274 		value |= PCI_EXP_LNKCTL_RL;
2275 		writel(value, port->base + RP_LINK_CONTROL_STATUS);
2276 
2277 		deadline = ktime_add_us(ktime_get(), LINK_RETRAIN_TIMEOUT);
2278 
2279 		while (ktime_before(ktime_get(), deadline)) {
2280 			value = readl(port->base + RP_LINK_CONTROL_STATUS);
2281 			if ((value & PCI_EXP_LNKSTA_LT) == 0)
2282 				break;
2283 
2284 			usleep_range(2000, 3000);
2285 		}
2286 
2287 		if (value & PCI_EXP_LNKSTA_LT)
2288 			dev_err(dev, "failed to retrain link of port %u\n",
2289 				port->index);
2290 	}
2291 }
2292 
tegra_pcie_enable_ports(struct tegra_pcie * pcie)2293 static void tegra_pcie_enable_ports(struct tegra_pcie *pcie)
2294 {
2295 	struct device *dev = pcie->dev;
2296 	struct tegra_pcie_port *port, *tmp;
2297 
2298 	list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
2299 		dev_info(dev, "probing port %u, using %u lanes\n",
2300 			 port->index, port->lanes);
2301 
2302 		tegra_pcie_port_enable(port);
2303 	}
2304 
2305 	/* Start LTSSM from Tegra side */
2306 	reset_control_deassert(pcie->pcie_xrst);
2307 
2308 	list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
2309 		if (tegra_pcie_port_check_link(port))
2310 			continue;
2311 
2312 		dev_info(dev, "link %u down, ignoring\n", port->index);
2313 
2314 		tegra_pcie_port_disable(port);
2315 		tegra_pcie_port_free(port);
2316 	}
2317 
2318 	if (pcie->soc->has_gen2)
2319 		tegra_pcie_change_link_speed(pcie);
2320 }
2321 
tegra_pcie_disable_ports(struct tegra_pcie * pcie)2322 static void tegra_pcie_disable_ports(struct tegra_pcie *pcie)
2323 {
2324 	struct tegra_pcie_port *port, *tmp;
2325 
2326 	reset_control_assert(pcie->pcie_xrst);
2327 
2328 	list_for_each_entry_safe(port, tmp, &pcie->ports, list)
2329 		tegra_pcie_port_disable(port);
2330 }
2331 
2332 static const struct tegra_pcie_port_soc tegra20_pcie_ports[] = {
2333 	{ .pme.turnoff_bit = 0, .pme.ack_bit =  5 },
2334 	{ .pme.turnoff_bit = 8, .pme.ack_bit = 10 },
2335 };
2336 
2337 static const struct tegra_pcie_soc tegra20_pcie = {
2338 	.num_ports = 2,
2339 	.ports = tegra20_pcie_ports,
2340 	.msi_base_shift = 0,
2341 	.pads_pll_ctl = PADS_PLL_CTL_TEGRA20,
2342 	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10,
2343 	.pads_refclk_cfg0 = 0xfa5cfa5c,
2344 	.has_pex_clkreq_en = false,
2345 	.has_pex_bias_ctrl = false,
2346 	.has_intr_prsnt_sense = false,
2347 	.has_cml_clk = false,
2348 	.has_gen2 = false,
2349 	.force_pca_enable = false,
2350 	.program_uphy = true,
2351 	.update_clamp_threshold = false,
2352 	.program_deskew_time = false,
2353 	.update_fc_timer = false,
2354 	.has_cache_bars = true,
2355 	.ectl.enable = false,
2356 };
2357 
2358 static const struct tegra_pcie_port_soc tegra30_pcie_ports[] = {
2359 	{ .pme.turnoff_bit =  0, .pme.ack_bit =  5 },
2360 	{ .pme.turnoff_bit =  8, .pme.ack_bit = 10 },
2361 	{ .pme.turnoff_bit = 16, .pme.ack_bit = 18 },
2362 };
2363 
2364 static const struct tegra_pcie_soc tegra30_pcie = {
2365 	.num_ports = 3,
2366 	.ports = tegra30_pcie_ports,
2367 	.msi_base_shift = 8,
2368 	.afi_pex2_ctrl = 0x128,
2369 	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2370 	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2371 	.pads_refclk_cfg0 = 0xfa5cfa5c,
2372 	.pads_refclk_cfg1 = 0xfa5cfa5c,
2373 	.has_pex_clkreq_en = true,
2374 	.has_pex_bias_ctrl = true,
2375 	.has_intr_prsnt_sense = true,
2376 	.has_cml_clk = true,
2377 	.has_gen2 = false,
2378 	.force_pca_enable = false,
2379 	.program_uphy = true,
2380 	.update_clamp_threshold = false,
2381 	.program_deskew_time = false,
2382 	.update_fc_timer = false,
2383 	.has_cache_bars = false,
2384 	.ectl.enable = false,
2385 };
2386 
2387 static const struct tegra_pcie_soc tegra124_pcie = {
2388 	.num_ports = 2,
2389 	.ports = tegra20_pcie_ports,
2390 	.msi_base_shift = 8,
2391 	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2392 	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2393 	.pads_refclk_cfg0 = 0x44ac44ac,
2394 	.has_pex_clkreq_en = true,
2395 	.has_pex_bias_ctrl = true,
2396 	.has_intr_prsnt_sense = true,
2397 	.has_cml_clk = true,
2398 	.has_gen2 = true,
2399 	.force_pca_enable = false,
2400 	.program_uphy = true,
2401 	.update_clamp_threshold = true,
2402 	.program_deskew_time = false,
2403 	.update_fc_timer = false,
2404 	.has_cache_bars = false,
2405 	.ectl.enable = false,
2406 };
2407 
2408 static const struct tegra_pcie_soc tegra210_pcie = {
2409 	.num_ports = 2,
2410 	.ports = tegra20_pcie_ports,
2411 	.msi_base_shift = 8,
2412 	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2413 	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2414 	.pads_refclk_cfg0 = 0x90b890b8,
2415 	/* FC threshold is bit[25:18] */
2416 	.update_fc_threshold = 0x01800000,
2417 	.has_pex_clkreq_en = true,
2418 	.has_pex_bias_ctrl = true,
2419 	.has_intr_prsnt_sense = true,
2420 	.has_cml_clk = true,
2421 	.has_gen2 = true,
2422 	.force_pca_enable = true,
2423 	.program_uphy = true,
2424 	.update_clamp_threshold = true,
2425 	.program_deskew_time = true,
2426 	.update_fc_timer = true,
2427 	.has_cache_bars = false,
2428 	.ectl = {
2429 		.regs = {
2430 			.rp_ectl_2_r1 = 0x0000000f,
2431 			.rp_ectl_4_r1 = 0x00000067,
2432 			.rp_ectl_5_r1 = 0x55010000,
2433 			.rp_ectl_6_r1 = 0x00000001,
2434 			.rp_ectl_2_r2 = 0x0000008f,
2435 			.rp_ectl_4_r2 = 0x000000c7,
2436 			.rp_ectl_5_r2 = 0x55010000,
2437 			.rp_ectl_6_r2 = 0x00000001,
2438 		},
2439 		.enable = true,
2440 	},
2441 };
2442 
2443 static const struct tegra_pcie_port_soc tegra186_pcie_ports[] = {
2444 	{ .pme.turnoff_bit =  0, .pme.ack_bit =  5 },
2445 	{ .pme.turnoff_bit =  8, .pme.ack_bit = 10 },
2446 	{ .pme.turnoff_bit = 12, .pme.ack_bit = 14 },
2447 };
2448 
2449 static const struct tegra_pcie_soc tegra186_pcie = {
2450 	.num_ports = 3,
2451 	.ports = tegra186_pcie_ports,
2452 	.msi_base_shift = 8,
2453 	.afi_pex2_ctrl = 0x19c,
2454 	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2455 	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2456 	.pads_refclk_cfg0 = 0x80b880b8,
2457 	.pads_refclk_cfg1 = 0x000480b8,
2458 	.has_pex_clkreq_en = true,
2459 	.has_pex_bias_ctrl = true,
2460 	.has_intr_prsnt_sense = true,
2461 	.has_cml_clk = false,
2462 	.has_gen2 = true,
2463 	.force_pca_enable = false,
2464 	.program_uphy = false,
2465 	.update_clamp_threshold = false,
2466 	.program_deskew_time = false,
2467 	.update_fc_timer = false,
2468 	.has_cache_bars = false,
2469 	.ectl.enable = false,
2470 };
2471 
2472 static const struct of_device_id tegra_pcie_of_match[] = {
2473 	{ .compatible = "nvidia,tegra186-pcie", .data = &tegra186_pcie },
2474 	{ .compatible = "nvidia,tegra210-pcie", .data = &tegra210_pcie },
2475 	{ .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie },
2476 	{ .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie },
2477 	{ .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie },
2478 	{ },
2479 };
2480 MODULE_DEVICE_TABLE(of, tegra_pcie_of_match);
2481 
tegra_pcie_ports_seq_start(struct seq_file * s,loff_t * pos)2482 static void *tegra_pcie_ports_seq_start(struct seq_file *s, loff_t *pos)
2483 {
2484 	struct tegra_pcie *pcie = s->private;
2485 
2486 	if (list_empty(&pcie->ports))
2487 		return NULL;
2488 
2489 	seq_puts(s, "Index  Status\n");
2490 
2491 	return seq_list_start(&pcie->ports, *pos);
2492 }
2493 
tegra_pcie_ports_seq_next(struct seq_file * s,void * v,loff_t * pos)2494 static void *tegra_pcie_ports_seq_next(struct seq_file *s, void *v, loff_t *pos)
2495 {
2496 	struct tegra_pcie *pcie = s->private;
2497 
2498 	return seq_list_next(v, &pcie->ports, pos);
2499 }
2500 
tegra_pcie_ports_seq_stop(struct seq_file * s,void * v)2501 static void tegra_pcie_ports_seq_stop(struct seq_file *s, void *v)
2502 {
2503 }
2504 
tegra_pcie_ports_seq_show(struct seq_file * s,void * v)2505 static int tegra_pcie_ports_seq_show(struct seq_file *s, void *v)
2506 {
2507 	bool up = false, active = false;
2508 	struct tegra_pcie_port *port;
2509 	unsigned int value;
2510 
2511 	port = list_entry(v, struct tegra_pcie_port, list);
2512 
2513 	value = readl(port->base + RP_VEND_XP);
2514 
2515 	if (value & RP_VEND_XP_DL_UP)
2516 		up = true;
2517 
2518 	value = readl(port->base + RP_LINK_CONTROL_STATUS);
2519 
2520 	if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
2521 		active = true;
2522 
2523 	seq_printf(s, "%2u     ", port->index);
2524 
2525 	if (up)
2526 		seq_puts(s, "up");
2527 
2528 	if (active) {
2529 		if (up)
2530 			seq_puts(s, ", ");
2531 
2532 		seq_puts(s, "active");
2533 	}
2534 
2535 	seq_puts(s, "\n");
2536 	return 0;
2537 }
2538 
2539 static const struct seq_operations tegra_pcie_ports_sops = {
2540 	.start = tegra_pcie_ports_seq_start,
2541 	.next = tegra_pcie_ports_seq_next,
2542 	.stop = tegra_pcie_ports_seq_stop,
2543 	.show = tegra_pcie_ports_seq_show,
2544 };
2545 
2546 DEFINE_SEQ_ATTRIBUTE(tegra_pcie_ports);
2547 
tegra_pcie_debugfs_exit(struct tegra_pcie * pcie)2548 static void tegra_pcie_debugfs_exit(struct tegra_pcie *pcie)
2549 {
2550 	debugfs_remove_recursive(pcie->debugfs);
2551 	pcie->debugfs = NULL;
2552 }
2553 
tegra_pcie_debugfs_init(struct tegra_pcie * pcie)2554 static void tegra_pcie_debugfs_init(struct tegra_pcie *pcie)
2555 {
2556 	pcie->debugfs = debugfs_create_dir("pcie", NULL);
2557 
2558 	debugfs_create_file("ports", S_IFREG | S_IRUGO, pcie->debugfs, pcie,
2559 			    &tegra_pcie_ports_fops);
2560 }
2561 
tegra_pcie_probe(struct platform_device * pdev)2562 static int tegra_pcie_probe(struct platform_device *pdev)
2563 {
2564 	struct device *dev = &pdev->dev;
2565 	struct pci_host_bridge *host;
2566 	struct tegra_pcie *pcie;
2567 	int err;
2568 
2569 	host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
2570 	if (!host)
2571 		return -ENOMEM;
2572 
2573 	pcie = pci_host_bridge_priv(host);
2574 	host->sysdata = pcie;
2575 	platform_set_drvdata(pdev, pcie);
2576 
2577 	pcie->soc = of_device_get_match_data(dev);
2578 	INIT_LIST_HEAD(&pcie->ports);
2579 	pcie->dev = dev;
2580 
2581 	err = tegra_pcie_parse_dt(pcie);
2582 	if (err < 0)
2583 		return err;
2584 
2585 	err = tegra_pcie_get_resources(pcie);
2586 	if (err < 0) {
2587 		dev_err(dev, "failed to request resources: %d\n", err);
2588 		return err;
2589 	}
2590 
2591 	err = tegra_pcie_msi_setup(pcie);
2592 	if (err < 0) {
2593 		dev_err(dev, "failed to enable MSI support: %d\n", err);
2594 		goto put_resources;
2595 	}
2596 
2597 	pm_runtime_enable(pcie->dev);
2598 	err = pm_runtime_get_sync(pcie->dev);
2599 	if (err < 0) {
2600 		dev_err(dev, "fail to enable pcie controller: %d\n", err);
2601 		goto pm_runtime_put;
2602 	}
2603 
2604 	host->ops = &tegra_pcie_ops;
2605 	host->map_irq = tegra_pcie_map_irq;
2606 
2607 	err = pci_host_probe(host);
2608 	if (err < 0) {
2609 		dev_err(dev, "failed to register host: %d\n", err);
2610 		goto pm_runtime_put;
2611 	}
2612 
2613 	if (IS_ENABLED(CONFIG_DEBUG_FS))
2614 		tegra_pcie_debugfs_init(pcie);
2615 
2616 	return 0;
2617 
2618 pm_runtime_put:
2619 	pm_runtime_put_sync(pcie->dev);
2620 	pm_runtime_disable(pcie->dev);
2621 	tegra_pcie_msi_teardown(pcie);
2622 put_resources:
2623 	tegra_pcie_put_resources(pcie);
2624 	return err;
2625 }
2626 
tegra_pcie_remove(struct platform_device * pdev)2627 static void tegra_pcie_remove(struct platform_device *pdev)
2628 {
2629 	struct tegra_pcie *pcie = platform_get_drvdata(pdev);
2630 	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
2631 	struct tegra_pcie_port *port, *tmp;
2632 
2633 	if (IS_ENABLED(CONFIG_DEBUG_FS))
2634 		tegra_pcie_debugfs_exit(pcie);
2635 
2636 	pci_stop_root_bus(host->bus);
2637 	pci_remove_root_bus(host->bus);
2638 	pm_runtime_put_sync(pcie->dev);
2639 	pm_runtime_disable(pcie->dev);
2640 
2641 	if (IS_ENABLED(CONFIG_PCI_MSI))
2642 		tegra_pcie_msi_teardown(pcie);
2643 
2644 	tegra_pcie_put_resources(pcie);
2645 
2646 	list_for_each_entry_safe(port, tmp, &pcie->ports, list)
2647 		tegra_pcie_port_free(port);
2648 }
2649 
tegra_pcie_pm_suspend(struct device * dev)2650 static int tegra_pcie_pm_suspend(struct device *dev)
2651 {
2652 	struct tegra_pcie *pcie = dev_get_drvdata(dev);
2653 	struct tegra_pcie_port *port;
2654 	int err;
2655 
2656 	list_for_each_entry(port, &pcie->ports, list)
2657 		tegra_pcie_pme_turnoff(port);
2658 
2659 	tegra_pcie_disable_ports(pcie);
2660 
2661 	/*
2662 	 * AFI_INTR is unmasked in tegra_pcie_enable_controller(), mask it to
2663 	 * avoid unwanted interrupts raised by AFI after pex_rst is asserted.
2664 	 */
2665 	tegra_pcie_disable_interrupts(pcie);
2666 
2667 	if (pcie->soc->program_uphy) {
2668 		err = tegra_pcie_phy_power_off(pcie);
2669 		if (err < 0)
2670 			dev_err(dev, "failed to power off PHY(s): %d\n", err);
2671 	}
2672 
2673 	reset_control_assert(pcie->pex_rst);
2674 	clk_disable_unprepare(pcie->pex_clk);
2675 
2676 	if (IS_ENABLED(CONFIG_PCI_MSI))
2677 		tegra_pcie_disable_msi(pcie);
2678 
2679 	pinctrl_pm_select_idle_state(dev);
2680 	tegra_pcie_power_off(pcie);
2681 
2682 	return 0;
2683 }
2684 
tegra_pcie_pm_resume(struct device * dev)2685 static int tegra_pcie_pm_resume(struct device *dev)
2686 {
2687 	struct tegra_pcie *pcie = dev_get_drvdata(dev);
2688 	int err;
2689 
2690 	err = tegra_pcie_power_on(pcie);
2691 	if (err) {
2692 		dev_err(dev, "tegra pcie power on fail: %d\n", err);
2693 		return err;
2694 	}
2695 
2696 	err = pinctrl_pm_select_default_state(dev);
2697 	if (err < 0) {
2698 		dev_err(dev, "failed to disable PCIe IO DPD: %d\n", err);
2699 		goto poweroff;
2700 	}
2701 
2702 	tegra_pcie_enable_controller(pcie);
2703 	tegra_pcie_setup_translations(pcie);
2704 
2705 	if (IS_ENABLED(CONFIG_PCI_MSI))
2706 		tegra_pcie_enable_msi(pcie);
2707 
2708 	err = clk_prepare_enable(pcie->pex_clk);
2709 	if (err) {
2710 		dev_err(dev, "failed to enable PEX clock: %d\n", err);
2711 		goto pex_dpd_enable;
2712 	}
2713 
2714 	reset_control_deassert(pcie->pex_rst);
2715 
2716 	if (pcie->soc->program_uphy) {
2717 		err = tegra_pcie_phy_power_on(pcie);
2718 		if (err < 0) {
2719 			dev_err(dev, "failed to power on PHY(s): %d\n", err);
2720 			goto disable_pex_clk;
2721 		}
2722 	}
2723 
2724 	tegra_pcie_apply_pad_settings(pcie);
2725 	tegra_pcie_enable_ports(pcie);
2726 
2727 	return 0;
2728 
2729 disable_pex_clk:
2730 	reset_control_assert(pcie->pex_rst);
2731 	clk_disable_unprepare(pcie->pex_clk);
2732 pex_dpd_enable:
2733 	pinctrl_pm_select_idle_state(dev);
2734 poweroff:
2735 	tegra_pcie_power_off(pcie);
2736 
2737 	return err;
2738 }
2739 
2740 static const struct dev_pm_ops tegra_pcie_pm_ops = {
2741 	RUNTIME_PM_OPS(tegra_pcie_pm_suspend, tegra_pcie_pm_resume, NULL)
2742 	NOIRQ_SYSTEM_SLEEP_PM_OPS(tegra_pcie_pm_suspend, tegra_pcie_pm_resume)
2743 };
2744 
2745 static struct platform_driver tegra_pcie_driver = {
2746 	.driver = {
2747 		.name = "tegra-pcie",
2748 		.of_match_table = tegra_pcie_of_match,
2749 		.suppress_bind_attrs = true,
2750 		.pm = &tegra_pcie_pm_ops,
2751 	},
2752 	.probe = tegra_pcie_probe,
2753 	.remove = tegra_pcie_remove,
2754 };
2755 module_platform_driver(tegra_pcie_driver);
2756