xref: /linux/drivers/pci/controller/pci-tegra.c (revision 44ed0f35df343d00b8d38006854f96e333104a66)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * PCIe host controller driver for Tegra SoCs
4  *
5  * Copyright (c) 2010, CompuLab, Ltd.
6  * Author: Mike Rapoport <mike@compulab.co.il>
7  *
8  * Based on NVIDIA PCIe driver
9  * Copyright (c) 2008-2009, NVIDIA Corporation.
10  *
11  * Bits taken from arch/arm/mach-dove/pcie.c
12  *
13  * Author: Thierry Reding <treding@nvidia.com>
14  */
15 
16 #include <linux/clk.h>
17 #include <linux/debugfs.h>
18 #include <linux/delay.h>
19 #include <linux/export.h>
20 #include <linux/gpio/consumer.h>
21 #include <linux/interrupt.h>
22 #include <linux/iopoll.h>
23 #include <linux/irq.h>
24 #include <linux/irqchip/chained_irq.h>
25 #include <linux/irqchip/irq-msi-lib.h>
26 #include <linux/irqdomain.h>
27 #include <linux/kernel.h>
28 #include <linux/init.h>
29 #include <linux/module.h>
30 #include <linux/msi.h>
31 #include <linux/of_address.h>
32 #include <linux/of_pci.h>
33 #include <linux/of_platform.h>
34 #include <linux/pci.h>
35 #include <linux/phy/phy.h>
36 #include <linux/pinctrl/consumer.h>
37 #include <linux/platform_device.h>
38 #include <linux/reset.h>
39 #include <linux/sizes.h>
40 #include <linux/slab.h>
41 #include <linux/vmalloc.h>
42 #include <linux/regulator/consumer.h>
43 
44 #include <soc/tegra/cpuidle.h>
45 #include <soc/tegra/pmc.h>
46 
47 #include "../pci.h"
48 
49 #define INT_PCI_MSI_NR (8 * 32)
50 
51 /* register definitions */
52 
53 #define AFI_AXI_BAR0_SZ	0x00
54 #define AFI_AXI_BAR1_SZ	0x04
55 #define AFI_AXI_BAR2_SZ	0x08
56 #define AFI_AXI_BAR3_SZ	0x0c
57 #define AFI_AXI_BAR4_SZ	0x10
58 #define AFI_AXI_BAR5_SZ	0x14
59 
60 #define AFI_AXI_BAR0_START	0x18
61 #define AFI_AXI_BAR1_START	0x1c
62 #define AFI_AXI_BAR2_START	0x20
63 #define AFI_AXI_BAR3_START	0x24
64 #define AFI_AXI_BAR4_START	0x28
65 #define AFI_AXI_BAR5_START	0x2c
66 
67 #define AFI_FPCI_BAR0	0x30
68 #define AFI_FPCI_BAR1	0x34
69 #define AFI_FPCI_BAR2	0x38
70 #define AFI_FPCI_BAR3	0x3c
71 #define AFI_FPCI_BAR4	0x40
72 #define AFI_FPCI_BAR5	0x44
73 
74 #define AFI_CACHE_BAR0_SZ	0x48
75 #define AFI_CACHE_BAR0_ST	0x4c
76 #define AFI_CACHE_BAR1_SZ	0x50
77 #define AFI_CACHE_BAR1_ST	0x54
78 
79 #define AFI_MSI_BAR_SZ		0x60
80 #define AFI_MSI_FPCI_BAR_ST	0x64
81 #define AFI_MSI_AXI_BAR_ST	0x68
82 
83 #define AFI_MSI_VEC(x)		(0x6c + ((x) * 4))
84 #define AFI_MSI_EN_VEC(x)	(0x8c + ((x) * 4))
85 
86 #define AFI_CONFIGURATION		0xac
87 #define  AFI_CONFIGURATION_EN_FPCI		(1 << 0)
88 #define  AFI_CONFIGURATION_CLKEN_OVERRIDE	(1 << 31)
89 
90 #define AFI_FPCI_ERROR_MASKS	0xb0
91 
92 #define AFI_INTR_MASK		0xb4
93 #define  AFI_INTR_MASK_INT_MASK	(1 << 0)
94 #define  AFI_INTR_MASK_MSI_MASK	(1 << 8)
95 
96 #define AFI_INTR_CODE			0xb8
97 #define  AFI_INTR_CODE_MASK		0xf
98 #define  AFI_INTR_INI_SLAVE_ERROR	1
99 #define  AFI_INTR_INI_DECODE_ERROR	2
100 #define  AFI_INTR_TARGET_ABORT		3
101 #define  AFI_INTR_MASTER_ABORT		4
102 #define  AFI_INTR_INVALID_WRITE		5
103 #define  AFI_INTR_LEGACY		6
104 #define  AFI_INTR_FPCI_DECODE_ERROR	7
105 #define  AFI_INTR_AXI_DECODE_ERROR	8
106 #define  AFI_INTR_FPCI_TIMEOUT		9
107 #define  AFI_INTR_PE_PRSNT_SENSE	10
108 #define  AFI_INTR_PE_CLKREQ_SENSE	11
109 #define  AFI_INTR_CLKCLAMP_SENSE	12
110 #define  AFI_INTR_RDY4PD_SENSE		13
111 #define  AFI_INTR_P2P_ERROR		14
112 
113 #define AFI_INTR_SIGNATURE	0xbc
114 #define AFI_UPPER_FPCI_ADDRESS	0xc0
115 #define AFI_SM_INTR_ENABLE	0xc4
116 #define  AFI_SM_INTR_INTA_ASSERT	(1 << 0)
117 #define  AFI_SM_INTR_INTB_ASSERT	(1 << 1)
118 #define  AFI_SM_INTR_INTC_ASSERT	(1 << 2)
119 #define  AFI_SM_INTR_INTD_ASSERT	(1 << 3)
120 #define  AFI_SM_INTR_INTA_DEASSERT	(1 << 4)
121 #define  AFI_SM_INTR_INTB_DEASSERT	(1 << 5)
122 #define  AFI_SM_INTR_INTC_DEASSERT	(1 << 6)
123 #define  AFI_SM_INTR_INTD_DEASSERT	(1 << 7)
124 
125 #define AFI_AFI_INTR_ENABLE		0xc8
126 #define  AFI_INTR_EN_INI_SLVERR		(1 << 0)
127 #define  AFI_INTR_EN_INI_DECERR		(1 << 1)
128 #define  AFI_INTR_EN_TGT_SLVERR		(1 << 2)
129 #define  AFI_INTR_EN_TGT_DECERR		(1 << 3)
130 #define  AFI_INTR_EN_TGT_WRERR		(1 << 4)
131 #define  AFI_INTR_EN_DFPCI_DECERR	(1 << 5)
132 #define  AFI_INTR_EN_AXI_DECERR		(1 << 6)
133 #define  AFI_INTR_EN_FPCI_TIMEOUT	(1 << 7)
134 #define  AFI_INTR_EN_PRSNT_SENSE	(1 << 8)
135 
136 #define AFI_PCIE_PME		0xf0
137 
138 #define AFI_PCIE_CONFIG					0x0f8
139 #define  AFI_PCIE_CONFIG_PCIE_DISABLE(x)		(1 << ((x) + 1))
140 #define  AFI_PCIE_CONFIG_PCIE_DISABLE_ALL		0xe
141 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK	(0xf << 20)
142 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE	(0x0 << 20)
143 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420	(0x0 << 20)
144 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1	(0x0 << 20)
145 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_401	(0x0 << 20)
146 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL	(0x1 << 20)
147 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222	(0x1 << 20)
148 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1	(0x1 << 20)
149 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211	(0x1 << 20)
150 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411	(0x2 << 20)
151 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111	(0x2 << 20)
152 #define  AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(x)		(1 << ((x) + 29))
153 #define  AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO_ALL		(0x7 << 29)
154 
155 #define AFI_FUSE			0x104
156 #define  AFI_FUSE_PCIE_T0_GEN2_DIS	(1 << 2)
157 
158 #define AFI_PEX0_CTRL			0x110
159 #define AFI_PEX1_CTRL			0x118
160 #define  AFI_PEX_CTRL_RST		(1 << 0)
161 #define  AFI_PEX_CTRL_CLKREQ_EN		(1 << 1)
162 #define  AFI_PEX_CTRL_REFCLK_EN		(1 << 3)
163 #define  AFI_PEX_CTRL_OVERRIDE_EN	(1 << 4)
164 
165 #define AFI_PLLE_CONTROL		0x160
166 #define  AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL (1 << 9)
167 #define  AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN (1 << 1)
168 
169 #define AFI_PEXBIAS_CTRL_0		0x168
170 
171 #define RP_ECTL_2_R1	0x00000e84
172 #define  RP_ECTL_2_R1_RX_CTLE_1C_MASK		0xffff
173 
174 #define RP_ECTL_4_R1	0x00000e8c
175 #define  RP_ECTL_4_R1_RX_CDR_CTRL_1C_MASK	(0xffff << 16)
176 #define  RP_ECTL_4_R1_RX_CDR_CTRL_1C_SHIFT	16
177 
178 #define RP_ECTL_5_R1	0x00000e90
179 #define  RP_ECTL_5_R1_RX_EQ_CTRL_L_1C_MASK	0xffffffff
180 
181 #define RP_ECTL_6_R1	0x00000e94
182 #define  RP_ECTL_6_R1_RX_EQ_CTRL_H_1C_MASK	0xffffffff
183 
184 #define RP_ECTL_2_R2	0x00000ea4
185 #define  RP_ECTL_2_R2_RX_CTLE_1C_MASK	0xffff
186 
187 #define RP_ECTL_4_R2	0x00000eac
188 #define  RP_ECTL_4_R2_RX_CDR_CTRL_1C_MASK	(0xffff << 16)
189 #define  RP_ECTL_4_R2_RX_CDR_CTRL_1C_SHIFT	16
190 
191 #define RP_ECTL_5_R2	0x00000eb0
192 #define  RP_ECTL_5_R2_RX_EQ_CTRL_L_1C_MASK	0xffffffff
193 
194 #define RP_ECTL_6_R2	0x00000eb4
195 #define  RP_ECTL_6_R2_RX_EQ_CTRL_H_1C_MASK	0xffffffff
196 
197 #define RP_VEND_XP	0x00000f00
198 #define  RP_VEND_XP_DL_UP			(1 << 30)
199 #define  RP_VEND_XP_OPPORTUNISTIC_ACK		(1 << 27)
200 #define  RP_VEND_XP_OPPORTUNISTIC_UPDATEFC	(1 << 28)
201 #define  RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK	(0xff << 18)
202 
203 #define RP_VEND_CTL0	0x00000f44
204 #define  RP_VEND_CTL0_DSK_RST_PULSE_WIDTH_MASK	(0xf << 12)
205 #define  RP_VEND_CTL0_DSK_RST_PULSE_WIDTH	(0x9 << 12)
206 
207 #define RP_VEND_CTL1	0x00000f48
208 #define  RP_VEND_CTL1_ERPT	(1 << 13)
209 
210 #define RP_VEND_XP_BIST	0x00000f4c
211 #define  RP_VEND_XP_BIST_GOTO_L1_L2_AFTER_DLLP_DONE	(1 << 28)
212 
213 #define RP_VEND_CTL2 0x00000fa8
214 #define  RP_VEND_CTL2_PCA_ENABLE (1 << 7)
215 
216 #define RP_PRIV_MISC	0x00000fe0
217 #define  RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT		(0xe << 0)
218 #define  RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT		(0xf << 0)
219 #define  RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD_MASK	(0x7f << 16)
220 #define  RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD		(0xf << 16)
221 #define  RP_PRIV_MISC_CTLR_CLK_CLAMP_ENABLE		(1 << 23)
222 #define  RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD_MASK	(0x7f << 24)
223 #define  RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD		(0xf << 24)
224 #define  RP_PRIV_MISC_TMS_CLK_CLAMP_ENABLE		(1 << 31)
225 
226 #define RP_LINK_CONTROL_STATUS			0x00000090
227 #define  RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE	0x20000000
228 #define  RP_LINK_CONTROL_STATUS_LINKSTAT_MASK	0x3fff0000
229 
230 #define RP_LINK_CONTROL_STATUS_2		0x000000b0
231 
232 #define PADS_CTL_SEL		0x0000009c
233 
234 #define PADS_CTL		0x000000a0
235 #define  PADS_CTL_IDDQ_1L	(1 << 0)
236 #define  PADS_CTL_TX_DATA_EN_1L	(1 << 6)
237 #define  PADS_CTL_RX_DATA_EN_1L	(1 << 10)
238 
239 #define PADS_PLL_CTL_TEGRA20			0x000000b8
240 #define PADS_PLL_CTL_TEGRA30			0x000000b4
241 #define  PADS_PLL_CTL_RST_B4SM			(1 << 1)
242 #define  PADS_PLL_CTL_LOCKDET			(1 << 8)
243 #define  PADS_PLL_CTL_REFCLK_MASK		(0x3 << 16)
244 #define  PADS_PLL_CTL_REFCLK_INTERNAL_CML	(0 << 16)
245 #define  PADS_PLL_CTL_REFCLK_INTERNAL_CMOS	(1 << 16)
246 #define  PADS_PLL_CTL_REFCLK_EXTERNAL		(2 << 16)
247 #define  PADS_PLL_CTL_TXCLKREF_MASK		(0x1 << 20)
248 #define  PADS_PLL_CTL_TXCLKREF_DIV10		(0 << 20)
249 #define  PADS_PLL_CTL_TXCLKREF_DIV5		(1 << 20)
250 #define  PADS_PLL_CTL_TXCLKREF_BUF_EN		(1 << 22)
251 
252 #define PADS_REFCLK_CFG0			0x000000c8
253 #define PADS_REFCLK_CFG1			0x000000cc
254 #define PADS_REFCLK_BIAS			0x000000d0
255 
256 /*
257  * Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit
258  * entries, one entry per PCIe port. These field definitions and desired
259  * values aren't in the TRM, but do come from NVIDIA.
260  */
261 #define PADS_REFCLK_CFG_TERM_SHIFT		2  /* 6:2 */
262 #define PADS_REFCLK_CFG_E_TERM_SHIFT		7
263 #define PADS_REFCLK_CFG_PREDI_SHIFT		8  /* 11:8 */
264 #define PADS_REFCLK_CFG_DRVI_SHIFT		12 /* 15:12 */
265 
266 #define PME_ACK_TIMEOUT 10000
267 #define LINK_RETRAIN_TIMEOUT 100000 /* in usec */
268 
269 struct tegra_msi {
270 	DECLARE_BITMAP(used, INT_PCI_MSI_NR);
271 	struct irq_domain *domain;
272 	struct mutex map_lock;
273 	spinlock_t mask_lock;
274 	void *virt;
275 	dma_addr_t phys;
276 	int irq;
277 };
278 
279 /* used to differentiate between Tegra SoC generations */
280 struct tegra_pcie_port_soc {
281 	struct {
282 		u8 turnoff_bit;
283 		u8 ack_bit;
284 	} pme;
285 };
286 
287 struct tegra_pcie_soc {
288 	unsigned int num_ports;
289 	const struct tegra_pcie_port_soc *ports;
290 	unsigned int msi_base_shift;
291 	unsigned long afi_pex2_ctrl;
292 	u32 pads_pll_ctl;
293 	u32 tx_ref_sel;
294 	u32 pads_refclk_cfg0;
295 	u32 pads_refclk_cfg1;
296 	u32 update_fc_threshold;
297 	bool has_pex_clkreq_en;
298 	bool has_pex_bias_ctrl;
299 	bool has_intr_prsnt_sense;
300 	bool has_cml_clk;
301 	bool has_gen2;
302 	bool force_pca_enable;
303 	bool program_uphy;
304 	bool update_clamp_threshold;
305 	bool program_deskew_time;
306 	bool update_fc_timer;
307 	bool has_cache_bars;
308 	struct {
309 		struct {
310 			u32 rp_ectl_2_r1;
311 			u32 rp_ectl_4_r1;
312 			u32 rp_ectl_5_r1;
313 			u32 rp_ectl_6_r1;
314 			u32 rp_ectl_2_r2;
315 			u32 rp_ectl_4_r2;
316 			u32 rp_ectl_5_r2;
317 			u32 rp_ectl_6_r2;
318 		} regs;
319 		bool enable;
320 	} ectl;
321 };
322 
323 struct tegra_pcie {
324 	struct device *dev;
325 
326 	void __iomem *pads;
327 	void __iomem *afi;
328 	void __iomem *cfg;
329 	int irq;
330 
331 	struct resource cs;
332 
333 	struct clk *pex_clk;
334 	struct clk *afi_clk;
335 	struct clk *pll_e;
336 	struct clk *cml_clk;
337 
338 	struct reset_control *pex_rst;
339 	struct reset_control *afi_rst;
340 	struct reset_control *pcie_xrst;
341 
342 	bool legacy_phy;
343 	struct phy *phy;
344 
345 	struct tegra_msi msi;
346 
347 	struct list_head ports;
348 	u32 xbar_config;
349 
350 	struct regulator_bulk_data *supplies;
351 	unsigned int num_supplies;
352 
353 	const struct tegra_pcie_soc *soc;
354 	struct dentry *debugfs;
355 };
356 
msi_to_pcie(struct tegra_msi * msi)357 static inline struct tegra_pcie *msi_to_pcie(struct tegra_msi *msi)
358 {
359 	return container_of(msi, struct tegra_pcie, msi);
360 }
361 
362 struct tegra_pcie_port {
363 	struct tegra_pcie *pcie;
364 	struct device_node *np;
365 	struct list_head list;
366 	struct resource regs;
367 	void __iomem *base;
368 	unsigned int index;
369 	unsigned int lanes;
370 
371 	struct phy **phys;
372 
373 	struct gpio_desc *reset_gpio;
374 };
375 
afi_writel(struct tegra_pcie * pcie,u32 value,unsigned long offset)376 static inline void afi_writel(struct tegra_pcie *pcie, u32 value,
377 			      unsigned long offset)
378 {
379 	writel(value, pcie->afi + offset);
380 }
381 
afi_readl(struct tegra_pcie * pcie,unsigned long offset)382 static inline u32 afi_readl(struct tegra_pcie *pcie, unsigned long offset)
383 {
384 	return readl(pcie->afi + offset);
385 }
386 
pads_writel(struct tegra_pcie * pcie,u32 value,unsigned long offset)387 static inline void pads_writel(struct tegra_pcie *pcie, u32 value,
388 			       unsigned long offset)
389 {
390 	writel(value, pcie->pads + offset);
391 }
392 
pads_readl(struct tegra_pcie * pcie,unsigned long offset)393 static inline u32 pads_readl(struct tegra_pcie *pcie, unsigned long offset)
394 {
395 	return readl(pcie->pads + offset);
396 }
397 
398 /*
399  * The configuration space mapping on Tegra is somewhat similar to the ECAM
400  * defined by PCIe. However it deviates a bit in how the 4 bits for extended
401  * register accesses are mapped:
402  *
403  *    [27:24] extended register number
404  *    [23:16] bus number
405  *    [15:11] device number
406  *    [10: 8] function number
407  *    [ 7: 0] register number
408  *
409  * Mapping the whole extended configuration space would require 256 MiB of
410  * virtual address space, only a small part of which will actually be used.
411  *
412  * To work around this, a 4 KiB region is used to generate the required
413  * configuration transaction with relevant B:D:F and register offset values.
414  * This is achieved by dynamically programming base address and size of
415  * AFI_AXI_BAR used for end point config space mapping to make sure that the
416  * address (access to which generates correct config transaction) falls in
417  * this 4 KiB region.
418  */
tegra_pcie_conf_offset(u8 bus,unsigned int devfn,unsigned int where)419 static unsigned int tegra_pcie_conf_offset(u8 bus, unsigned int devfn,
420 					   unsigned int where)
421 {
422 	return ((where & 0xf00) << 16) | (bus << 16) | (PCI_SLOT(devfn) << 11) |
423 	       (PCI_FUNC(devfn) << 8) | (where & 0xff);
424 }
425 
tegra_pcie_map_bus(struct pci_bus * bus,unsigned int devfn,int where)426 static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus,
427 					unsigned int devfn,
428 					int where)
429 {
430 	struct tegra_pcie *pcie = bus->sysdata;
431 	void __iomem *addr = NULL;
432 
433 	if (bus->number == 0) {
434 		unsigned int slot = PCI_SLOT(devfn);
435 		struct tegra_pcie_port *port;
436 
437 		list_for_each_entry(port, &pcie->ports, list) {
438 			if (port->index + 1 == slot) {
439 				addr = port->base + (where & ~3);
440 				break;
441 			}
442 		}
443 	} else {
444 		unsigned int offset;
445 		u32 base;
446 
447 		offset = tegra_pcie_conf_offset(bus->number, devfn, where);
448 
449 		/* move 4 KiB window to offset within the FPCI region */
450 		base = 0xfe100000 + ((offset & ~(SZ_4K - 1)) >> 8);
451 		afi_writel(pcie, base, AFI_FPCI_BAR0);
452 
453 		/* move to correct offset within the 4 KiB page */
454 		addr = pcie->cfg + (offset & (SZ_4K - 1));
455 	}
456 
457 	return addr;
458 }
459 
tegra_pcie_config_read(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 * value)460 static int tegra_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
461 				  int where, int size, u32 *value)
462 {
463 	if (bus->number == 0)
464 		return pci_generic_config_read32(bus, devfn, where, size,
465 						 value);
466 
467 	return pci_generic_config_read(bus, devfn, where, size, value);
468 }
469 
tegra_pcie_config_write(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 value)470 static int tegra_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
471 				   int where, int size, u32 value)
472 {
473 	if (bus->number == 0)
474 		return pci_generic_config_write32(bus, devfn, where, size,
475 						  value);
476 
477 	return pci_generic_config_write(bus, devfn, where, size, value);
478 }
479 
480 static struct pci_ops tegra_pcie_ops = {
481 	.map_bus = tegra_pcie_map_bus,
482 	.read = tegra_pcie_config_read,
483 	.write = tegra_pcie_config_write,
484 };
485 
tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port * port)486 static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port)
487 {
488 	const struct tegra_pcie_soc *soc = port->pcie->soc;
489 	unsigned long ret = 0;
490 
491 	switch (port->index) {
492 	case 0:
493 		ret = AFI_PEX0_CTRL;
494 		break;
495 
496 	case 1:
497 		ret = AFI_PEX1_CTRL;
498 		break;
499 
500 	case 2:
501 		ret = soc->afi_pex2_ctrl;
502 		break;
503 	}
504 
505 	return ret;
506 }
507 
tegra_pcie_port_reset(struct tegra_pcie_port * port)508 static void tegra_pcie_port_reset(struct tegra_pcie_port *port)
509 {
510 	unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
511 	unsigned long value;
512 
513 	/* pulse reset signal */
514 	if (port->reset_gpio) {
515 		gpiod_set_value(port->reset_gpio, 1);
516 	} else {
517 		value = afi_readl(port->pcie, ctrl);
518 		value &= ~AFI_PEX_CTRL_RST;
519 		afi_writel(port->pcie, value, ctrl);
520 	}
521 
522 	usleep_range(1000, 2000);
523 
524 	if (port->reset_gpio) {
525 		gpiod_set_value(port->reset_gpio, 0);
526 	} else {
527 		value = afi_readl(port->pcie, ctrl);
528 		value |= AFI_PEX_CTRL_RST;
529 		afi_writel(port->pcie, value, ctrl);
530 	}
531 }
532 
tegra_pcie_enable_rp_features(struct tegra_pcie_port * port)533 static void tegra_pcie_enable_rp_features(struct tegra_pcie_port *port)
534 {
535 	const struct tegra_pcie_soc *soc = port->pcie->soc;
536 	u32 value;
537 
538 	/* Enable AER capability */
539 	value = readl(port->base + RP_VEND_CTL1);
540 	value |= RP_VEND_CTL1_ERPT;
541 	writel(value, port->base + RP_VEND_CTL1);
542 
543 	/* Optimal settings to enhance bandwidth */
544 	value = readl(port->base + RP_VEND_XP);
545 	value |= RP_VEND_XP_OPPORTUNISTIC_ACK;
546 	value |= RP_VEND_XP_OPPORTUNISTIC_UPDATEFC;
547 	writel(value, port->base + RP_VEND_XP);
548 
549 	/*
550 	 * LTSSM will wait for DLLP to finish before entering L1 or L2,
551 	 * to avoid truncation of PM messages which results in receiver errors
552 	 */
553 	value = readl(port->base + RP_VEND_XP_BIST);
554 	value |= RP_VEND_XP_BIST_GOTO_L1_L2_AFTER_DLLP_DONE;
555 	writel(value, port->base + RP_VEND_XP_BIST);
556 
557 	value = readl(port->base + RP_PRIV_MISC);
558 	value |= RP_PRIV_MISC_CTLR_CLK_CLAMP_ENABLE;
559 	value |= RP_PRIV_MISC_TMS_CLK_CLAMP_ENABLE;
560 
561 	if (soc->update_clamp_threshold) {
562 		value &= ~(RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD_MASK |
563 				RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD_MASK);
564 		value |= RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD |
565 			RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD;
566 	}
567 
568 	writel(value, port->base + RP_PRIV_MISC);
569 }
570 
tegra_pcie_program_ectl_settings(struct tegra_pcie_port * port)571 static void tegra_pcie_program_ectl_settings(struct tegra_pcie_port *port)
572 {
573 	const struct tegra_pcie_soc *soc = port->pcie->soc;
574 	u32 value;
575 
576 	value = readl(port->base + RP_ECTL_2_R1);
577 	value &= ~RP_ECTL_2_R1_RX_CTLE_1C_MASK;
578 	value |= soc->ectl.regs.rp_ectl_2_r1;
579 	writel(value, port->base + RP_ECTL_2_R1);
580 
581 	value = readl(port->base + RP_ECTL_4_R1);
582 	value &= ~RP_ECTL_4_R1_RX_CDR_CTRL_1C_MASK;
583 	value |= soc->ectl.regs.rp_ectl_4_r1 <<
584 				RP_ECTL_4_R1_RX_CDR_CTRL_1C_SHIFT;
585 	writel(value, port->base + RP_ECTL_4_R1);
586 
587 	value = readl(port->base + RP_ECTL_5_R1);
588 	value &= ~RP_ECTL_5_R1_RX_EQ_CTRL_L_1C_MASK;
589 	value |= soc->ectl.regs.rp_ectl_5_r1;
590 	writel(value, port->base + RP_ECTL_5_R1);
591 
592 	value = readl(port->base + RP_ECTL_6_R1);
593 	value &= ~RP_ECTL_6_R1_RX_EQ_CTRL_H_1C_MASK;
594 	value |= soc->ectl.regs.rp_ectl_6_r1;
595 	writel(value, port->base + RP_ECTL_6_R1);
596 
597 	value = readl(port->base + RP_ECTL_2_R2);
598 	value &= ~RP_ECTL_2_R2_RX_CTLE_1C_MASK;
599 	value |= soc->ectl.regs.rp_ectl_2_r2;
600 	writel(value, port->base + RP_ECTL_2_R2);
601 
602 	value = readl(port->base + RP_ECTL_4_R2);
603 	value &= ~RP_ECTL_4_R2_RX_CDR_CTRL_1C_MASK;
604 	value |= soc->ectl.regs.rp_ectl_4_r2 <<
605 				RP_ECTL_4_R2_RX_CDR_CTRL_1C_SHIFT;
606 	writel(value, port->base + RP_ECTL_4_R2);
607 
608 	value = readl(port->base + RP_ECTL_5_R2);
609 	value &= ~RP_ECTL_5_R2_RX_EQ_CTRL_L_1C_MASK;
610 	value |= soc->ectl.regs.rp_ectl_5_r2;
611 	writel(value, port->base + RP_ECTL_5_R2);
612 
613 	value = readl(port->base + RP_ECTL_6_R2);
614 	value &= ~RP_ECTL_6_R2_RX_EQ_CTRL_H_1C_MASK;
615 	value |= soc->ectl.regs.rp_ectl_6_r2;
616 	writel(value, port->base + RP_ECTL_6_R2);
617 }
618 
tegra_pcie_apply_sw_fixup(struct tegra_pcie_port * port)619 static void tegra_pcie_apply_sw_fixup(struct tegra_pcie_port *port)
620 {
621 	const struct tegra_pcie_soc *soc = port->pcie->soc;
622 	u32 value;
623 
624 	/*
625 	 * Sometimes link speed change from Gen2 to Gen1 fails due to
626 	 * instability in deskew logic on lane-0. Increase the deskew
627 	 * retry time to resolve this issue.
628 	 */
629 	if (soc->program_deskew_time) {
630 		value = readl(port->base + RP_VEND_CTL0);
631 		value &= ~RP_VEND_CTL0_DSK_RST_PULSE_WIDTH_MASK;
632 		value |= RP_VEND_CTL0_DSK_RST_PULSE_WIDTH;
633 		writel(value, port->base + RP_VEND_CTL0);
634 	}
635 
636 	if (soc->update_fc_timer) {
637 		value = readl(port->base + RP_VEND_XP);
638 		value &= ~RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK;
639 		value |= soc->update_fc_threshold;
640 		writel(value, port->base + RP_VEND_XP);
641 	}
642 
643 	/*
644 	 * PCIe link doesn't come up with few legacy PCIe endpoints if
645 	 * root port advertises both Gen-1 and Gen-2 speeds in Tegra.
646 	 * Hence, the strategy followed here is to initially advertise
647 	 * only Gen-1 and after link is up, retrain link to Gen-2 speed
648 	 */
649 	value = readl(port->base + RP_LINK_CONTROL_STATUS_2);
650 	value &= ~PCI_EXP_LNKSTA_CLS;
651 	value |= PCI_EXP_LNKSTA_CLS_2_5GB;
652 	writel(value, port->base + RP_LINK_CONTROL_STATUS_2);
653 }
654 
tegra_pcie_port_enable(struct tegra_pcie_port * port)655 static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
656 {
657 	unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
658 	const struct tegra_pcie_soc *soc = port->pcie->soc;
659 	unsigned long value;
660 
661 	/* enable reference clock */
662 	value = afi_readl(port->pcie, ctrl);
663 	value |= AFI_PEX_CTRL_REFCLK_EN;
664 
665 	if (soc->has_pex_clkreq_en)
666 		value |= AFI_PEX_CTRL_CLKREQ_EN;
667 
668 	value |= AFI_PEX_CTRL_OVERRIDE_EN;
669 
670 	afi_writel(port->pcie, value, ctrl);
671 
672 	tegra_pcie_port_reset(port);
673 
674 	if (soc->force_pca_enable) {
675 		value = readl(port->base + RP_VEND_CTL2);
676 		value |= RP_VEND_CTL2_PCA_ENABLE;
677 		writel(value, port->base + RP_VEND_CTL2);
678 	}
679 
680 	tegra_pcie_enable_rp_features(port);
681 
682 	if (soc->ectl.enable)
683 		tegra_pcie_program_ectl_settings(port);
684 
685 	tegra_pcie_apply_sw_fixup(port);
686 }
687 
tegra_pcie_port_disable(struct tegra_pcie_port * port)688 static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
689 {
690 	unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
691 	const struct tegra_pcie_soc *soc = port->pcie->soc;
692 	unsigned long value;
693 
694 	/* assert port reset */
695 	value = afi_readl(port->pcie, ctrl);
696 	value &= ~AFI_PEX_CTRL_RST;
697 	afi_writel(port->pcie, value, ctrl);
698 
699 	/* disable reference clock */
700 	value = afi_readl(port->pcie, ctrl);
701 
702 	if (soc->has_pex_clkreq_en)
703 		value &= ~AFI_PEX_CTRL_CLKREQ_EN;
704 
705 	value &= ~AFI_PEX_CTRL_REFCLK_EN;
706 	afi_writel(port->pcie, value, ctrl);
707 
708 	/* disable PCIe port and set CLKREQ# as GPIO to allow PLLE power down */
709 	value = afi_readl(port->pcie, AFI_PCIE_CONFIG);
710 	value |= AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
711 	value |= AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(port->index);
712 	afi_writel(port->pcie, value, AFI_PCIE_CONFIG);
713 }
714 
tegra_pcie_port_free(struct tegra_pcie_port * port)715 static void tegra_pcie_port_free(struct tegra_pcie_port *port)
716 {
717 	struct tegra_pcie *pcie = port->pcie;
718 	struct device *dev = pcie->dev;
719 
720 	devm_iounmap(dev, port->base);
721 	devm_release_mem_region(dev, port->regs.start,
722 				resource_size(&port->regs));
723 	list_del(&port->list);
724 	devm_kfree(dev, port);
725 }
726 
727 /* Tegra PCIE root complex wrongly reports device class */
tegra_pcie_fixup_class(struct pci_dev * dev)728 static void tegra_pcie_fixup_class(struct pci_dev *dev)
729 {
730 	dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL;
731 }
732 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class);
733 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class);
734 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_fixup_class);
735 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_fixup_class);
736 
737 /* Tegra20 and Tegra30 PCIE requires relaxed ordering */
tegra_pcie_relax_enable(struct pci_dev * dev)738 static void tegra_pcie_relax_enable(struct pci_dev *dev)
739 {
740 	pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
741 }
742 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_relax_enable);
743 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_relax_enable);
744 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_relax_enable);
745 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_relax_enable);
746 
tegra_pcie_map_irq(const struct pci_dev * pdev,u8 slot,u8 pin)747 static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
748 {
749 	struct tegra_pcie *pcie = pdev->bus->sysdata;
750 	int irq;
751 
752 	tegra_cpuidle_pcie_irqs_in_use();
753 
754 	irq = of_irq_parse_and_map_pci(pdev, slot, pin);
755 	if (!irq)
756 		irq = pcie->irq;
757 
758 	return irq;
759 }
760 
tegra_pcie_isr(int irq,void * arg)761 static irqreturn_t tegra_pcie_isr(int irq, void *arg)
762 {
763 	static const char * const err_msg[] = {
764 		"Unknown",
765 		"AXI slave error",
766 		"AXI decode error",
767 		"Target abort",
768 		"Master abort",
769 		"Invalid write",
770 		"Legacy interrupt",
771 		"Response decoding error",
772 		"AXI response decoding error",
773 		"Transaction timeout",
774 		"Slot present pin change",
775 		"Slot clock request change",
776 		"TMS clock ramp change",
777 		"TMS ready for power down",
778 		"Peer2Peer error",
779 	};
780 	struct tegra_pcie *pcie = arg;
781 	struct device *dev = pcie->dev;
782 	u32 code, signature;
783 
784 	code = afi_readl(pcie, AFI_INTR_CODE) & AFI_INTR_CODE_MASK;
785 	signature = afi_readl(pcie, AFI_INTR_SIGNATURE);
786 	afi_writel(pcie, 0, AFI_INTR_CODE);
787 
788 	if (code == AFI_INTR_LEGACY)
789 		return IRQ_NONE;
790 
791 	if (code >= ARRAY_SIZE(err_msg))
792 		code = 0;
793 
794 	/*
795 	 * do not pollute kernel log with master abort reports since they
796 	 * happen a lot during enumeration
797 	 */
798 	if (code == AFI_INTR_MASTER_ABORT || code == AFI_INTR_PE_PRSNT_SENSE)
799 		dev_dbg(dev, "%s, signature: %08x\n", err_msg[code], signature);
800 	else
801 		dev_err(dev, "%s, signature: %08x\n", err_msg[code], signature);
802 
803 	if (code == AFI_INTR_TARGET_ABORT || code == AFI_INTR_MASTER_ABORT ||
804 	    code == AFI_INTR_FPCI_DECODE_ERROR) {
805 		u32 fpci = afi_readl(pcie, AFI_UPPER_FPCI_ADDRESS) & 0xff;
806 		u64 address = (u64)fpci << 32 | (signature & 0xfffffffc);
807 
808 		if (code == AFI_INTR_MASTER_ABORT)
809 			dev_dbg(dev, "  FPCI address: %10llx\n", address);
810 		else
811 			dev_err(dev, "  FPCI address: %10llx\n", address);
812 	}
813 
814 	return IRQ_HANDLED;
815 }
816 
817 /*
818  * FPCI map is as follows:
819  * - 0xfdfc000000: I/O space
820  * - 0xfdfe000000: type 0 configuration space
821  * - 0xfdff000000: type 1 configuration space
822  * - 0xfe00000000: type 0 extended configuration space
823  * - 0xfe10000000: type 1 extended configuration space
824  */
tegra_pcie_setup_translations(struct tegra_pcie * pcie)825 static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
826 {
827 	u32 size;
828 	struct resource_entry *entry;
829 	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
830 
831 	/* Bar 0: type 1 extended configuration space */
832 	size = resource_size(&pcie->cs);
833 	afi_writel(pcie, pcie->cs.start, AFI_AXI_BAR0_START);
834 	afi_writel(pcie, size >> 12, AFI_AXI_BAR0_SZ);
835 
836 	resource_list_for_each_entry(entry, &bridge->windows) {
837 		u32 fpci_bar, axi_address;
838 		struct resource *res = entry->res;
839 
840 		size = resource_size(res);
841 
842 		switch (resource_type(res)) {
843 		case IORESOURCE_IO:
844 			/* Bar 1: downstream IO bar */
845 			fpci_bar = 0xfdfc0000;
846 			axi_address = pci_pio_to_address(res->start);
847 			afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
848 			afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
849 			afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
850 			break;
851 		case IORESOURCE_MEM:
852 			fpci_bar = (((res->start >> 12) & 0x0fffffff) << 4) | 0x1;
853 			axi_address = res->start;
854 
855 			if (res->flags & IORESOURCE_PREFETCH) {
856 				/* Bar 2: prefetchable memory BAR */
857 				afi_writel(pcie, axi_address, AFI_AXI_BAR2_START);
858 				afi_writel(pcie, size >> 12, AFI_AXI_BAR2_SZ);
859 				afi_writel(pcie, fpci_bar, AFI_FPCI_BAR2);
860 
861 			} else {
862 				/* Bar 3: non prefetchable memory BAR */
863 				afi_writel(pcie, axi_address, AFI_AXI_BAR3_START);
864 				afi_writel(pcie, size >> 12, AFI_AXI_BAR3_SZ);
865 				afi_writel(pcie, fpci_bar, AFI_FPCI_BAR3);
866 			}
867 			break;
868 		}
869 	}
870 
871 	/* NULL out the remaining BARs as they are not used */
872 	afi_writel(pcie, 0, AFI_AXI_BAR4_START);
873 	afi_writel(pcie, 0, AFI_AXI_BAR4_SZ);
874 	afi_writel(pcie, 0, AFI_FPCI_BAR4);
875 
876 	afi_writel(pcie, 0, AFI_AXI_BAR5_START);
877 	afi_writel(pcie, 0, AFI_AXI_BAR5_SZ);
878 	afi_writel(pcie, 0, AFI_FPCI_BAR5);
879 
880 	if (pcie->soc->has_cache_bars) {
881 		/* map all upstream transactions as uncached */
882 		afi_writel(pcie, 0, AFI_CACHE_BAR0_ST);
883 		afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ);
884 		afi_writel(pcie, 0, AFI_CACHE_BAR1_ST);
885 		afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ);
886 	}
887 
888 	/* MSI translations are setup only when needed */
889 	afi_writel(pcie, 0, AFI_MSI_FPCI_BAR_ST);
890 	afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
891 	afi_writel(pcie, 0, AFI_MSI_AXI_BAR_ST);
892 	afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
893 }
894 
tegra_pcie_pll_wait(struct tegra_pcie * pcie,unsigned long timeout)895 static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout)
896 {
897 	const struct tegra_pcie_soc *soc = pcie->soc;
898 	u32 value;
899 
900 	timeout = jiffies + msecs_to_jiffies(timeout);
901 
902 	while (time_before(jiffies, timeout)) {
903 		value = pads_readl(pcie, soc->pads_pll_ctl);
904 		if (value & PADS_PLL_CTL_LOCKDET)
905 			return 0;
906 	}
907 
908 	return -ETIMEDOUT;
909 }
910 
tegra_pcie_phy_enable(struct tegra_pcie * pcie)911 static int tegra_pcie_phy_enable(struct tegra_pcie *pcie)
912 {
913 	struct device *dev = pcie->dev;
914 	const struct tegra_pcie_soc *soc = pcie->soc;
915 	u32 value;
916 	int err;
917 
918 	/* initialize internal PHY, enable up to 16 PCIE lanes */
919 	pads_writel(pcie, 0x0, PADS_CTL_SEL);
920 
921 	/* override IDDQ to 1 on all 4 lanes */
922 	value = pads_readl(pcie, PADS_CTL);
923 	value |= PADS_CTL_IDDQ_1L;
924 	pads_writel(pcie, value, PADS_CTL);
925 
926 	/*
927 	 * Set up PHY PLL inputs select PLLE output as refclock,
928 	 * set TX ref sel to div10 (not div5).
929 	 */
930 	value = pads_readl(pcie, soc->pads_pll_ctl);
931 	value &= ~(PADS_PLL_CTL_REFCLK_MASK | PADS_PLL_CTL_TXCLKREF_MASK);
932 	value |= PADS_PLL_CTL_REFCLK_INTERNAL_CML | soc->tx_ref_sel;
933 	pads_writel(pcie, value, soc->pads_pll_ctl);
934 
935 	/* reset PLL */
936 	value = pads_readl(pcie, soc->pads_pll_ctl);
937 	value &= ~PADS_PLL_CTL_RST_B4SM;
938 	pads_writel(pcie, value, soc->pads_pll_ctl);
939 
940 	usleep_range(20, 100);
941 
942 	/* take PLL out of reset  */
943 	value = pads_readl(pcie, soc->pads_pll_ctl);
944 	value |= PADS_PLL_CTL_RST_B4SM;
945 	pads_writel(pcie, value, soc->pads_pll_ctl);
946 
947 	/* wait for the PLL to lock */
948 	err = tegra_pcie_pll_wait(pcie, 500);
949 	if (err < 0) {
950 		dev_err(dev, "PLL failed to lock: %d\n", err);
951 		return err;
952 	}
953 
954 	/* turn off IDDQ override */
955 	value = pads_readl(pcie, PADS_CTL);
956 	value &= ~PADS_CTL_IDDQ_1L;
957 	pads_writel(pcie, value, PADS_CTL);
958 
959 	/* enable TX/RX data */
960 	value = pads_readl(pcie, PADS_CTL);
961 	value |= PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L;
962 	pads_writel(pcie, value, PADS_CTL);
963 
964 	return 0;
965 }
966 
tegra_pcie_phy_disable(struct tegra_pcie * pcie)967 static int tegra_pcie_phy_disable(struct tegra_pcie *pcie)
968 {
969 	const struct tegra_pcie_soc *soc = pcie->soc;
970 	u32 value;
971 
972 	/* disable TX/RX data */
973 	value = pads_readl(pcie, PADS_CTL);
974 	value &= ~(PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L);
975 	pads_writel(pcie, value, PADS_CTL);
976 
977 	/* override IDDQ */
978 	value = pads_readl(pcie, PADS_CTL);
979 	value |= PADS_CTL_IDDQ_1L;
980 	pads_writel(pcie, value, PADS_CTL);
981 
982 	/* reset PLL */
983 	value = pads_readl(pcie, soc->pads_pll_ctl);
984 	value &= ~PADS_PLL_CTL_RST_B4SM;
985 	pads_writel(pcie, value, soc->pads_pll_ctl);
986 
987 	usleep_range(20, 100);
988 
989 	return 0;
990 }
991 
tegra_pcie_port_phy_power_on(struct tegra_pcie_port * port)992 static int tegra_pcie_port_phy_power_on(struct tegra_pcie_port *port)
993 {
994 	struct device *dev = port->pcie->dev;
995 	unsigned int i;
996 	int err;
997 
998 	for (i = 0; i < port->lanes; i++) {
999 		err = phy_power_on(port->phys[i]);
1000 		if (err < 0) {
1001 			dev_err(dev, "failed to power on PHY#%u: %d\n", i, err);
1002 			return err;
1003 		}
1004 	}
1005 
1006 	return 0;
1007 }
1008 
tegra_pcie_port_phy_power_off(struct tegra_pcie_port * port)1009 static int tegra_pcie_port_phy_power_off(struct tegra_pcie_port *port)
1010 {
1011 	struct device *dev = port->pcie->dev;
1012 	unsigned int i;
1013 	int err;
1014 
1015 	for (i = 0; i < port->lanes; i++) {
1016 		err = phy_power_off(port->phys[i]);
1017 		if (err < 0) {
1018 			dev_err(dev, "failed to power off PHY#%u: %d\n", i,
1019 				err);
1020 			return err;
1021 		}
1022 	}
1023 
1024 	return 0;
1025 }
1026 
tegra_pcie_phy_power_on(struct tegra_pcie * pcie)1027 static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie)
1028 {
1029 	struct device *dev = pcie->dev;
1030 	struct tegra_pcie_port *port;
1031 	int err;
1032 
1033 	if (pcie->legacy_phy) {
1034 		if (pcie->phy)
1035 			err = phy_power_on(pcie->phy);
1036 		else
1037 			err = tegra_pcie_phy_enable(pcie);
1038 
1039 		if (err < 0)
1040 			dev_err(dev, "failed to power on PHY: %d\n", err);
1041 
1042 		return err;
1043 	}
1044 
1045 	list_for_each_entry(port, &pcie->ports, list) {
1046 		err = tegra_pcie_port_phy_power_on(port);
1047 		if (err < 0) {
1048 			dev_err(dev,
1049 				"failed to power on PCIe port %u PHY: %d\n",
1050 				port->index, err);
1051 			return err;
1052 		}
1053 	}
1054 
1055 	return 0;
1056 }
1057 
tegra_pcie_phy_power_off(struct tegra_pcie * pcie)1058 static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie)
1059 {
1060 	struct device *dev = pcie->dev;
1061 	struct tegra_pcie_port *port;
1062 	int err;
1063 
1064 	if (pcie->legacy_phy) {
1065 		if (pcie->phy)
1066 			err = phy_power_off(pcie->phy);
1067 		else
1068 			err = tegra_pcie_phy_disable(pcie);
1069 
1070 		if (err < 0)
1071 			dev_err(dev, "failed to power off PHY: %d\n", err);
1072 
1073 		return err;
1074 	}
1075 
1076 	list_for_each_entry(port, &pcie->ports, list) {
1077 		err = tegra_pcie_port_phy_power_off(port);
1078 		if (err < 0) {
1079 			dev_err(dev,
1080 				"failed to power off PCIe port %u PHY: %d\n",
1081 				port->index, err);
1082 			return err;
1083 		}
1084 	}
1085 
1086 	return 0;
1087 }
1088 
tegra_pcie_enable_controller(struct tegra_pcie * pcie)1089 static void tegra_pcie_enable_controller(struct tegra_pcie *pcie)
1090 {
1091 	const struct tegra_pcie_soc *soc = pcie->soc;
1092 	struct tegra_pcie_port *port;
1093 	unsigned long value;
1094 
1095 	/* enable PLL power down */
1096 	if (pcie->phy) {
1097 		value = afi_readl(pcie, AFI_PLLE_CONTROL);
1098 		value &= ~AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL;
1099 		value |= AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN;
1100 		afi_writel(pcie, value, AFI_PLLE_CONTROL);
1101 	}
1102 
1103 	/* power down PCIe slot clock bias pad */
1104 	if (soc->has_pex_bias_ctrl)
1105 		afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0);
1106 
1107 	/* configure mode and disable all ports */
1108 	value = afi_readl(pcie, AFI_PCIE_CONFIG);
1109 	value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK;
1110 	value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config;
1111 	value |= AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO_ALL;
1112 
1113 	list_for_each_entry(port, &pcie->ports, list) {
1114 		value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
1115 		value &= ~AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(port->index);
1116 	}
1117 
1118 	afi_writel(pcie, value, AFI_PCIE_CONFIG);
1119 
1120 	if (soc->has_gen2) {
1121 		value = afi_readl(pcie, AFI_FUSE);
1122 		value &= ~AFI_FUSE_PCIE_T0_GEN2_DIS;
1123 		afi_writel(pcie, value, AFI_FUSE);
1124 	} else {
1125 		value = afi_readl(pcie, AFI_FUSE);
1126 		value |= AFI_FUSE_PCIE_T0_GEN2_DIS;
1127 		afi_writel(pcie, value, AFI_FUSE);
1128 	}
1129 
1130 	/* Disable AFI dynamic clock gating and enable PCIe */
1131 	value = afi_readl(pcie, AFI_CONFIGURATION);
1132 	value |= AFI_CONFIGURATION_EN_FPCI;
1133 	value |= AFI_CONFIGURATION_CLKEN_OVERRIDE;
1134 	afi_writel(pcie, value, AFI_CONFIGURATION);
1135 
1136 	value = AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR |
1137 		AFI_INTR_EN_TGT_SLVERR | AFI_INTR_EN_TGT_DECERR |
1138 		AFI_INTR_EN_TGT_WRERR | AFI_INTR_EN_DFPCI_DECERR;
1139 
1140 	if (soc->has_intr_prsnt_sense)
1141 		value |= AFI_INTR_EN_PRSNT_SENSE;
1142 
1143 	afi_writel(pcie, value, AFI_AFI_INTR_ENABLE);
1144 	afi_writel(pcie, 0xffffffff, AFI_SM_INTR_ENABLE);
1145 
1146 	/* don't enable MSI for now, only when needed */
1147 	afi_writel(pcie, AFI_INTR_MASK_INT_MASK, AFI_INTR_MASK);
1148 
1149 	/* disable all exceptions */
1150 	afi_writel(pcie, 0, AFI_FPCI_ERROR_MASKS);
1151 }
1152 
tegra_pcie_power_off(struct tegra_pcie * pcie)1153 static void tegra_pcie_power_off(struct tegra_pcie *pcie)
1154 {
1155 	struct device *dev = pcie->dev;
1156 	const struct tegra_pcie_soc *soc = pcie->soc;
1157 	int err;
1158 
1159 	reset_control_assert(pcie->afi_rst);
1160 
1161 	clk_disable_unprepare(pcie->pll_e);
1162 	if (soc->has_cml_clk)
1163 		clk_disable_unprepare(pcie->cml_clk);
1164 	clk_disable_unprepare(pcie->afi_clk);
1165 
1166 	if (!dev->pm_domain)
1167 		tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1168 
1169 	err = regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
1170 	if (err < 0)
1171 		dev_warn(dev, "failed to disable regulators: %d\n", err);
1172 }
1173 
tegra_pcie_power_on(struct tegra_pcie * pcie)1174 static int tegra_pcie_power_on(struct tegra_pcie *pcie)
1175 {
1176 	struct device *dev = pcie->dev;
1177 	const struct tegra_pcie_soc *soc = pcie->soc;
1178 	int err;
1179 
1180 	reset_control_assert(pcie->pcie_xrst);
1181 	reset_control_assert(pcie->afi_rst);
1182 	reset_control_assert(pcie->pex_rst);
1183 
1184 	if (!dev->pm_domain)
1185 		tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1186 
1187 	/* enable regulators */
1188 	err = regulator_bulk_enable(pcie->num_supplies, pcie->supplies);
1189 	if (err < 0)
1190 		dev_err(dev, "failed to enable regulators: %d\n", err);
1191 
1192 	if (!dev->pm_domain) {
1193 		err = tegra_powergate_power_on(TEGRA_POWERGATE_PCIE);
1194 		if (err) {
1195 			dev_err(dev, "failed to power ungate: %d\n", err);
1196 			goto regulator_disable;
1197 		}
1198 		err = tegra_powergate_remove_clamping(TEGRA_POWERGATE_PCIE);
1199 		if (err) {
1200 			dev_err(dev, "failed to remove clamp: %d\n", err);
1201 			goto powergate;
1202 		}
1203 	}
1204 
1205 	err = clk_prepare_enable(pcie->afi_clk);
1206 	if (err < 0) {
1207 		dev_err(dev, "failed to enable AFI clock: %d\n", err);
1208 		goto powergate;
1209 	}
1210 
1211 	if (soc->has_cml_clk) {
1212 		err = clk_prepare_enable(pcie->cml_clk);
1213 		if (err < 0) {
1214 			dev_err(dev, "failed to enable CML clock: %d\n", err);
1215 			goto disable_afi_clk;
1216 		}
1217 	}
1218 
1219 	err = clk_prepare_enable(pcie->pll_e);
1220 	if (err < 0) {
1221 		dev_err(dev, "failed to enable PLLE clock: %d\n", err);
1222 		goto disable_cml_clk;
1223 	}
1224 
1225 	reset_control_deassert(pcie->afi_rst);
1226 
1227 	return 0;
1228 
1229 disable_cml_clk:
1230 	if (soc->has_cml_clk)
1231 		clk_disable_unprepare(pcie->cml_clk);
1232 disable_afi_clk:
1233 	clk_disable_unprepare(pcie->afi_clk);
1234 powergate:
1235 	if (!dev->pm_domain)
1236 		tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1237 regulator_disable:
1238 	regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
1239 
1240 	return err;
1241 }
1242 
tegra_pcie_apply_pad_settings(struct tegra_pcie * pcie)1243 static void tegra_pcie_apply_pad_settings(struct tegra_pcie *pcie)
1244 {
1245 	const struct tegra_pcie_soc *soc = pcie->soc;
1246 
1247 	/* Configure the reference clock driver */
1248 	pads_writel(pcie, soc->pads_refclk_cfg0, PADS_REFCLK_CFG0);
1249 
1250 	if (soc->num_ports > 2)
1251 		pads_writel(pcie, soc->pads_refclk_cfg1, PADS_REFCLK_CFG1);
1252 }
1253 
tegra_pcie_clocks_get(struct tegra_pcie * pcie)1254 static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
1255 {
1256 	struct device *dev = pcie->dev;
1257 	const struct tegra_pcie_soc *soc = pcie->soc;
1258 
1259 	pcie->pex_clk = devm_clk_get(dev, "pex");
1260 	if (IS_ERR(pcie->pex_clk))
1261 		return PTR_ERR(pcie->pex_clk);
1262 
1263 	pcie->afi_clk = devm_clk_get(dev, "afi");
1264 	if (IS_ERR(pcie->afi_clk))
1265 		return PTR_ERR(pcie->afi_clk);
1266 
1267 	pcie->pll_e = devm_clk_get(dev, "pll_e");
1268 	if (IS_ERR(pcie->pll_e))
1269 		return PTR_ERR(pcie->pll_e);
1270 
1271 	if (soc->has_cml_clk) {
1272 		pcie->cml_clk = devm_clk_get(dev, "cml");
1273 		if (IS_ERR(pcie->cml_clk))
1274 			return PTR_ERR(pcie->cml_clk);
1275 	}
1276 
1277 	return 0;
1278 }
1279 
tegra_pcie_resets_get(struct tegra_pcie * pcie)1280 static int tegra_pcie_resets_get(struct tegra_pcie *pcie)
1281 {
1282 	struct device *dev = pcie->dev;
1283 
1284 	pcie->pex_rst = devm_reset_control_get_exclusive(dev, "pex");
1285 	if (IS_ERR(pcie->pex_rst))
1286 		return PTR_ERR(pcie->pex_rst);
1287 
1288 	pcie->afi_rst = devm_reset_control_get_exclusive(dev, "afi");
1289 	if (IS_ERR(pcie->afi_rst))
1290 		return PTR_ERR(pcie->afi_rst);
1291 
1292 	pcie->pcie_xrst = devm_reset_control_get_exclusive(dev, "pcie_x");
1293 	if (IS_ERR(pcie->pcie_xrst))
1294 		return PTR_ERR(pcie->pcie_xrst);
1295 
1296 	return 0;
1297 }
1298 
tegra_pcie_phys_get_legacy(struct tegra_pcie * pcie)1299 static int tegra_pcie_phys_get_legacy(struct tegra_pcie *pcie)
1300 {
1301 	struct device *dev = pcie->dev;
1302 	int err;
1303 
1304 	pcie->phy = devm_phy_optional_get(dev, "pcie");
1305 	if (IS_ERR(pcie->phy)) {
1306 		err = PTR_ERR(pcie->phy);
1307 		dev_err(dev, "failed to get PHY: %d\n", err);
1308 		return err;
1309 	}
1310 
1311 	err = phy_init(pcie->phy);
1312 	if (err < 0) {
1313 		dev_err(dev, "failed to initialize PHY: %d\n", err);
1314 		return err;
1315 	}
1316 
1317 	pcie->legacy_phy = true;
1318 
1319 	return 0;
1320 }
1321 
devm_of_phy_optional_get_index(struct device * dev,struct device_node * np,const char * consumer,unsigned int index)1322 static struct phy *devm_of_phy_optional_get_index(struct device *dev,
1323 						  struct device_node *np,
1324 						  const char *consumer,
1325 						  unsigned int index)
1326 {
1327 	struct phy *phy;
1328 	char *name;
1329 
1330 	name = kasprintf(GFP_KERNEL, "%s-%u", consumer, index);
1331 	if (!name)
1332 		return ERR_PTR(-ENOMEM);
1333 
1334 	phy = devm_of_phy_optional_get(dev, np, name);
1335 	kfree(name);
1336 
1337 	return phy;
1338 }
1339 
tegra_pcie_port_get_phys(struct tegra_pcie_port * port)1340 static int tegra_pcie_port_get_phys(struct tegra_pcie_port *port)
1341 {
1342 	struct device *dev = port->pcie->dev;
1343 	struct phy *phy;
1344 	unsigned int i;
1345 	int err;
1346 
1347 	port->phys = devm_kcalloc(dev, sizeof(phy), port->lanes, GFP_KERNEL);
1348 	if (!port->phys)
1349 		return -ENOMEM;
1350 
1351 	for (i = 0; i < port->lanes; i++) {
1352 		phy = devm_of_phy_optional_get_index(dev, port->np, "pcie", i);
1353 		if (IS_ERR(phy)) {
1354 			dev_err(dev, "failed to get PHY#%u: %ld\n", i,
1355 				PTR_ERR(phy));
1356 			return PTR_ERR(phy);
1357 		}
1358 
1359 		err = phy_init(phy);
1360 		if (err < 0) {
1361 			dev_err(dev, "failed to initialize PHY#%u: %d\n", i,
1362 				err);
1363 			return err;
1364 		}
1365 
1366 		port->phys[i] = phy;
1367 	}
1368 
1369 	return 0;
1370 }
1371 
tegra_pcie_phys_get(struct tegra_pcie * pcie)1372 static int tegra_pcie_phys_get(struct tegra_pcie *pcie)
1373 {
1374 	const struct tegra_pcie_soc *soc = pcie->soc;
1375 	struct device_node *np = pcie->dev->of_node;
1376 	struct tegra_pcie_port *port;
1377 	int err;
1378 
1379 	if (!soc->has_gen2 || of_property_present(np, "phys"))
1380 		return tegra_pcie_phys_get_legacy(pcie);
1381 
1382 	list_for_each_entry(port, &pcie->ports, list) {
1383 		err = tegra_pcie_port_get_phys(port);
1384 		if (err < 0)
1385 			return err;
1386 	}
1387 
1388 	return 0;
1389 }
1390 
tegra_pcie_phys_put(struct tegra_pcie * pcie)1391 static void tegra_pcie_phys_put(struct tegra_pcie *pcie)
1392 {
1393 	struct tegra_pcie_port *port;
1394 	struct device *dev = pcie->dev;
1395 	int err, i;
1396 
1397 	if (pcie->legacy_phy) {
1398 		err = phy_exit(pcie->phy);
1399 		if (err < 0)
1400 			dev_err(dev, "failed to teardown PHY: %d\n", err);
1401 		return;
1402 	}
1403 
1404 	list_for_each_entry(port, &pcie->ports, list) {
1405 		for (i = 0; i < port->lanes; i++) {
1406 			err = phy_exit(port->phys[i]);
1407 			if (err < 0)
1408 				dev_err(dev, "failed to teardown PHY#%u: %d\n",
1409 					i, err);
1410 		}
1411 	}
1412 }
1413 
tegra_pcie_get_resources(struct tegra_pcie * pcie)1414 static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
1415 {
1416 	struct device *dev = pcie->dev;
1417 	struct platform_device *pdev = to_platform_device(dev);
1418 	struct resource *res;
1419 	const struct tegra_pcie_soc *soc = pcie->soc;
1420 	int err;
1421 
1422 	err = tegra_pcie_clocks_get(pcie);
1423 	if (err) {
1424 		dev_err(dev, "failed to get clocks: %d\n", err);
1425 		return err;
1426 	}
1427 
1428 	err = tegra_pcie_resets_get(pcie);
1429 	if (err) {
1430 		dev_err(dev, "failed to get resets: %d\n", err);
1431 		return err;
1432 	}
1433 
1434 	if (soc->program_uphy) {
1435 		err = tegra_pcie_phys_get(pcie);
1436 		if (err < 0) {
1437 			dev_err(dev, "failed to get PHYs: %d\n", err);
1438 			return err;
1439 		}
1440 	}
1441 
1442 	pcie->pads = devm_platform_ioremap_resource_byname(pdev, "pads");
1443 	if (IS_ERR(pcie->pads)) {
1444 		err = PTR_ERR(pcie->pads);
1445 		goto phys_put;
1446 	}
1447 
1448 	pcie->afi = devm_platform_ioremap_resource_byname(pdev, "afi");
1449 	if (IS_ERR(pcie->afi)) {
1450 		err = PTR_ERR(pcie->afi);
1451 		goto phys_put;
1452 	}
1453 
1454 	/* request configuration space, but remap later, on demand */
1455 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs");
1456 	if (!res) {
1457 		err = -EADDRNOTAVAIL;
1458 		goto phys_put;
1459 	}
1460 
1461 	pcie->cs = *res;
1462 
1463 	/* constrain configuration space to 4 KiB */
1464 	resource_set_size(&pcie->cs, SZ_4K);
1465 
1466 	pcie->cfg = devm_ioremap_resource(dev, &pcie->cs);
1467 	if (IS_ERR(pcie->cfg)) {
1468 		err = PTR_ERR(pcie->cfg);
1469 		goto phys_put;
1470 	}
1471 
1472 	/* request interrupt */
1473 	err = platform_get_irq_byname(pdev, "intr");
1474 	if (err < 0)
1475 		goto phys_put;
1476 
1477 	pcie->irq = err;
1478 
1479 	err = request_irq(pcie->irq, tegra_pcie_isr, IRQF_SHARED, "PCIE", pcie);
1480 	if (err) {
1481 		dev_err(dev, "failed to register IRQ: %d\n", err);
1482 		goto phys_put;
1483 	}
1484 
1485 	return 0;
1486 
1487 phys_put:
1488 	if (soc->program_uphy)
1489 		tegra_pcie_phys_put(pcie);
1490 
1491 	return err;
1492 }
1493 
tegra_pcie_put_resources(struct tegra_pcie * pcie)1494 static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
1495 {
1496 	const struct tegra_pcie_soc *soc = pcie->soc;
1497 
1498 	if (pcie->irq > 0)
1499 		free_irq(pcie->irq, pcie);
1500 
1501 	if (soc->program_uphy)
1502 		tegra_pcie_phys_put(pcie);
1503 
1504 	return 0;
1505 }
1506 
tegra_pcie_pme_turnoff(struct tegra_pcie_port * port)1507 static void tegra_pcie_pme_turnoff(struct tegra_pcie_port *port)
1508 {
1509 	struct tegra_pcie *pcie = port->pcie;
1510 	const struct tegra_pcie_soc *soc = pcie->soc;
1511 	int err;
1512 	u32 val;
1513 	u8 ack_bit;
1514 
1515 	val = afi_readl(pcie, AFI_PCIE_PME);
1516 	val |= (0x1 << soc->ports[port->index].pme.turnoff_bit);
1517 	afi_writel(pcie, val, AFI_PCIE_PME);
1518 
1519 	ack_bit = soc->ports[port->index].pme.ack_bit;
1520 	err = readl_poll_timeout(pcie->afi + AFI_PCIE_PME, val,
1521 				 val & (0x1 << ack_bit), 1, PME_ACK_TIMEOUT);
1522 	if (err)
1523 		dev_err(pcie->dev, "PME Ack is not received on port: %d\n",
1524 			port->index);
1525 
1526 	usleep_range(10000, 11000);
1527 
1528 	val = afi_readl(pcie, AFI_PCIE_PME);
1529 	val &= ~(0x1 << soc->ports[port->index].pme.turnoff_bit);
1530 	afi_writel(pcie, val, AFI_PCIE_PME);
1531 }
1532 
tegra_pcie_msi_irq(struct irq_desc * desc)1533 static void tegra_pcie_msi_irq(struct irq_desc *desc)
1534 {
1535 	struct tegra_pcie *pcie = irq_desc_get_handler_data(desc);
1536 	struct irq_chip *chip = irq_desc_get_chip(desc);
1537 	struct tegra_msi *msi = &pcie->msi;
1538 	struct device *dev = pcie->dev;
1539 	unsigned int i;
1540 
1541 	chained_irq_enter(chip, desc);
1542 
1543 	for (i = 0; i < 8; i++) {
1544 		unsigned long reg = afi_readl(pcie, AFI_MSI_VEC(i));
1545 
1546 		while (reg) {
1547 			unsigned int offset = find_first_bit(&reg, 32);
1548 			unsigned int index = i * 32 + offset;
1549 			int ret;
1550 
1551 			ret = generic_handle_domain_irq(msi->domain, index);
1552 			if (ret) {
1553 				/*
1554 				 * that's weird who triggered this?
1555 				 * just clear it
1556 				 */
1557 				dev_info(dev, "unexpected MSI\n");
1558 				afi_writel(pcie, BIT(index % 32), AFI_MSI_VEC(index));
1559 			}
1560 
1561 			/* see if there's any more pending in this vector */
1562 			reg = afi_readl(pcie, AFI_MSI_VEC(i));
1563 		}
1564 	}
1565 
1566 	chained_irq_exit(chip, desc);
1567 }
1568 
tegra_msi_irq_ack(struct irq_data * d)1569 static void tegra_msi_irq_ack(struct irq_data *d)
1570 {
1571 	struct tegra_msi *msi = irq_data_get_irq_chip_data(d);
1572 	struct tegra_pcie *pcie = msi_to_pcie(msi);
1573 	unsigned int index = d->hwirq / 32;
1574 
1575 	/* clear the interrupt */
1576 	afi_writel(pcie, BIT(d->hwirq % 32), AFI_MSI_VEC(index));
1577 }
1578 
tegra_msi_irq_mask(struct irq_data * d)1579 static void tegra_msi_irq_mask(struct irq_data *d)
1580 {
1581 	struct tegra_msi *msi = irq_data_get_irq_chip_data(d);
1582 	struct tegra_pcie *pcie = msi_to_pcie(msi);
1583 	unsigned int index = d->hwirq / 32;
1584 	unsigned long flags;
1585 	u32 value;
1586 
1587 	spin_lock_irqsave(&msi->mask_lock, flags);
1588 	value = afi_readl(pcie, AFI_MSI_EN_VEC(index));
1589 	value &= ~BIT(d->hwirq % 32);
1590 	afi_writel(pcie, value, AFI_MSI_EN_VEC(index));
1591 	spin_unlock_irqrestore(&msi->mask_lock, flags);
1592 }
1593 
tegra_msi_irq_unmask(struct irq_data * d)1594 static void tegra_msi_irq_unmask(struct irq_data *d)
1595 {
1596 	struct tegra_msi *msi = irq_data_get_irq_chip_data(d);
1597 	struct tegra_pcie *pcie = msi_to_pcie(msi);
1598 	unsigned int index = d->hwirq / 32;
1599 	unsigned long flags;
1600 	u32 value;
1601 
1602 	spin_lock_irqsave(&msi->mask_lock, flags);
1603 	value = afi_readl(pcie, AFI_MSI_EN_VEC(index));
1604 	value |= BIT(d->hwirq % 32);
1605 	afi_writel(pcie, value, AFI_MSI_EN_VEC(index));
1606 	spin_unlock_irqrestore(&msi->mask_lock, flags);
1607 }
1608 
tegra_compose_msi_msg(struct irq_data * data,struct msi_msg * msg)1609 static void tegra_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
1610 {
1611 	struct tegra_msi *msi = irq_data_get_irq_chip_data(data);
1612 
1613 	msg->address_lo = lower_32_bits(msi->phys);
1614 	msg->address_hi = upper_32_bits(msi->phys);
1615 	msg->data = data->hwirq;
1616 }
1617 
1618 static struct irq_chip tegra_msi_bottom_chip = {
1619 	.name			= "Tegra MSI",
1620 	.irq_ack		= tegra_msi_irq_ack,
1621 	.irq_mask		= tegra_msi_irq_mask,
1622 	.irq_unmask		= tegra_msi_irq_unmask,
1623 	.irq_compose_msi_msg	= tegra_compose_msi_msg,
1624 };
1625 
tegra_msi_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * args)1626 static int tegra_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
1627 				  unsigned int nr_irqs, void *args)
1628 {
1629 	struct tegra_msi *msi = domain->host_data;
1630 	unsigned int i;
1631 	int hwirq;
1632 
1633 	mutex_lock(&msi->map_lock);
1634 
1635 	hwirq = bitmap_find_free_region(msi->used, INT_PCI_MSI_NR, order_base_2(nr_irqs));
1636 
1637 	mutex_unlock(&msi->map_lock);
1638 
1639 	if (hwirq < 0)
1640 		return -ENOSPC;
1641 
1642 	for (i = 0; i < nr_irqs; i++)
1643 		irq_domain_set_info(domain, virq + i, hwirq + i,
1644 				    &tegra_msi_bottom_chip, domain->host_data,
1645 				    handle_edge_irq, NULL, NULL);
1646 
1647 	tegra_cpuidle_pcie_irqs_in_use();
1648 
1649 	return 0;
1650 }
1651 
tegra_msi_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)1652 static void tegra_msi_domain_free(struct irq_domain *domain, unsigned int virq,
1653 				  unsigned int nr_irqs)
1654 {
1655 	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
1656 	struct tegra_msi *msi = domain->host_data;
1657 
1658 	mutex_lock(&msi->map_lock);
1659 
1660 	bitmap_release_region(msi->used, d->hwirq, order_base_2(nr_irqs));
1661 
1662 	mutex_unlock(&msi->map_lock);
1663 }
1664 
1665 static const struct irq_domain_ops tegra_msi_domain_ops = {
1666 	.alloc = tegra_msi_domain_alloc,
1667 	.free = tegra_msi_domain_free,
1668 };
1669 
1670 static const struct msi_parent_ops tegra_msi_parent_ops = {
1671 	.supported_flags	= (MSI_GENERIC_FLAGS_MASK	|
1672 				   MSI_FLAG_PCI_MSIX),
1673 	.required_flags		= (MSI_FLAG_USE_DEF_DOM_OPS	|
1674 				   MSI_FLAG_USE_DEF_CHIP_OPS	|
1675 				   MSI_FLAG_PCI_MSI_MASK_PARENT	|
1676 				   MSI_FLAG_NO_AFFINITY),
1677 	.chip_flags		= MSI_CHIP_FLAG_SET_ACK,
1678 	.bus_select_token	= DOMAIN_BUS_PCI_MSI,
1679 	.init_dev_msi_info	= msi_lib_init_dev_msi_info,
1680 };
1681 
tegra_allocate_domains(struct tegra_msi * msi)1682 static int tegra_allocate_domains(struct tegra_msi *msi)
1683 {
1684 	struct tegra_pcie *pcie = msi_to_pcie(msi);
1685 	struct fwnode_handle *fwnode = dev_fwnode(pcie->dev);
1686 	struct irq_domain_info info = {
1687 		.fwnode		= fwnode,
1688 		.ops		= &tegra_msi_domain_ops,
1689 		.size		= INT_PCI_MSI_NR,
1690 		.host_data	= msi,
1691 	};
1692 
1693 	msi->domain = msi_create_parent_irq_domain(&info, &tegra_msi_parent_ops);
1694 	if (!msi->domain) {
1695 		dev_err(pcie->dev, "failed to create MSI domain\n");
1696 		return -ENOMEM;
1697 	}
1698 	return 0;
1699 }
1700 
tegra_free_domains(struct tegra_msi * msi)1701 static void tegra_free_domains(struct tegra_msi *msi)
1702 {
1703 	irq_domain_remove(msi->domain);
1704 }
1705 
tegra_pcie_msi_setup(struct tegra_pcie * pcie)1706 static int tegra_pcie_msi_setup(struct tegra_pcie *pcie)
1707 {
1708 	struct platform_device *pdev = to_platform_device(pcie->dev);
1709 	struct tegra_msi *msi = &pcie->msi;
1710 	struct device *dev = pcie->dev;
1711 	int err;
1712 
1713 	mutex_init(&msi->map_lock);
1714 	spin_lock_init(&msi->mask_lock);
1715 
1716 	if (IS_ENABLED(CONFIG_PCI_MSI)) {
1717 		err = tegra_allocate_domains(msi);
1718 		if (err)
1719 			return err;
1720 	}
1721 
1722 	err = platform_get_irq_byname(pdev, "msi");
1723 	if (err < 0)
1724 		goto free_irq_domain;
1725 
1726 	msi->irq = err;
1727 
1728 	irq_set_chained_handler_and_data(msi->irq, tegra_pcie_msi_irq, pcie);
1729 
1730 	/* Though the PCIe controller can address >32-bit address space, to
1731 	 * facilitate endpoints that support only 32-bit MSI target address,
1732 	 * the mask is set to 32-bit to make sure that MSI target address is
1733 	 * always a 32-bit address
1734 	 */
1735 	err = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
1736 	if (err < 0) {
1737 		dev_err(dev, "failed to set DMA coherent mask: %d\n", err);
1738 		goto free_irq;
1739 	}
1740 
1741 	msi->virt = dma_alloc_attrs(dev, PAGE_SIZE, &msi->phys, GFP_KERNEL,
1742 				    DMA_ATTR_NO_KERNEL_MAPPING);
1743 	if (!msi->virt) {
1744 		dev_err(dev, "failed to allocate DMA memory for MSI\n");
1745 		err = -ENOMEM;
1746 		goto free_irq;
1747 	}
1748 
1749 	return 0;
1750 
1751 free_irq:
1752 	irq_set_chained_handler_and_data(msi->irq, NULL, NULL);
1753 free_irq_domain:
1754 	if (IS_ENABLED(CONFIG_PCI_MSI))
1755 		tegra_free_domains(msi);
1756 
1757 	return err;
1758 }
1759 
tegra_pcie_enable_msi(struct tegra_pcie * pcie)1760 static void tegra_pcie_enable_msi(struct tegra_pcie *pcie)
1761 {
1762 	const struct tegra_pcie_soc *soc = pcie->soc;
1763 	struct tegra_msi *msi = &pcie->msi;
1764 	u32 reg, msi_state[INT_PCI_MSI_NR / 32];
1765 	int i;
1766 
1767 	afi_writel(pcie, msi->phys >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST);
1768 	afi_writel(pcie, msi->phys, AFI_MSI_AXI_BAR_ST);
1769 	/* this register is in 4K increments */
1770 	afi_writel(pcie, 1, AFI_MSI_BAR_SZ);
1771 
1772 	/* Restore the MSI allocation state */
1773 	bitmap_to_arr32(msi_state, msi->used, INT_PCI_MSI_NR);
1774 	for (i = 0; i < ARRAY_SIZE(msi_state); i++)
1775 		afi_writel(pcie, msi_state[i], AFI_MSI_EN_VEC(i));
1776 
1777 	/* and unmask the MSI interrupt */
1778 	reg = afi_readl(pcie, AFI_INTR_MASK);
1779 	reg |= AFI_INTR_MASK_MSI_MASK;
1780 	afi_writel(pcie, reg, AFI_INTR_MASK);
1781 }
1782 
tegra_pcie_msi_teardown(struct tegra_pcie * pcie)1783 static void tegra_pcie_msi_teardown(struct tegra_pcie *pcie)
1784 {
1785 	struct tegra_msi *msi = &pcie->msi;
1786 	unsigned int i, irq;
1787 
1788 	dma_free_attrs(pcie->dev, PAGE_SIZE, msi->virt, msi->phys,
1789 		       DMA_ATTR_NO_KERNEL_MAPPING);
1790 
1791 	for (i = 0; i < INT_PCI_MSI_NR; i++) {
1792 		irq = irq_find_mapping(msi->domain, i);
1793 		if (irq > 0)
1794 			irq_domain_free_irqs(irq, 1);
1795 	}
1796 
1797 	irq_set_chained_handler_and_data(msi->irq, NULL, NULL);
1798 
1799 	if (IS_ENABLED(CONFIG_PCI_MSI))
1800 		tegra_free_domains(msi);
1801 }
1802 
tegra_pcie_disable_msi(struct tegra_pcie * pcie)1803 static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
1804 {
1805 	u32 value;
1806 
1807 	/* mask the MSI interrupt */
1808 	value = afi_readl(pcie, AFI_INTR_MASK);
1809 	value &= ~AFI_INTR_MASK_MSI_MASK;
1810 	afi_writel(pcie, value, AFI_INTR_MASK);
1811 
1812 	return 0;
1813 }
1814 
tegra_pcie_disable_interrupts(struct tegra_pcie * pcie)1815 static void tegra_pcie_disable_interrupts(struct tegra_pcie *pcie)
1816 {
1817 	u32 value;
1818 
1819 	value = afi_readl(pcie, AFI_INTR_MASK);
1820 	value &= ~AFI_INTR_MASK_INT_MASK;
1821 	afi_writel(pcie, value, AFI_INTR_MASK);
1822 }
1823 
tegra_pcie_get_xbar_config(struct tegra_pcie * pcie,u32 lanes,u32 * xbar)1824 static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
1825 				      u32 *xbar)
1826 {
1827 	struct device *dev = pcie->dev;
1828 	struct device_node *np = dev->of_node;
1829 
1830 	if (of_device_is_compatible(np, "nvidia,tegra186-pcie")) {
1831 		switch (lanes) {
1832 		case 0x010004:
1833 			dev_info(dev, "4x1, 1x1 configuration\n");
1834 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_401;
1835 			return 0;
1836 
1837 		case 0x010102:
1838 			dev_info(dev, "2x1, 1X1, 1x1 configuration\n");
1839 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211;
1840 			return 0;
1841 
1842 		case 0x010101:
1843 			dev_info(dev, "1x1, 1x1, 1x1 configuration\n");
1844 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111;
1845 			return 0;
1846 
1847 		default:
1848 			dev_info(dev, "wrong configuration updated in DT, "
1849 				 "switching to default 2x1, 1x1, 1x1 "
1850 				 "configuration\n");
1851 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211;
1852 			return 0;
1853 		}
1854 	} else if (of_device_is_compatible(np, "nvidia,tegra124-pcie") ||
1855 		   of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
1856 		switch (lanes) {
1857 		case 0x0000104:
1858 			dev_info(dev, "4x1, 1x1 configuration\n");
1859 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1;
1860 			return 0;
1861 
1862 		case 0x0000102:
1863 			dev_info(dev, "2x1, 1x1 configuration\n");
1864 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1;
1865 			return 0;
1866 		}
1867 	} else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
1868 		switch (lanes) {
1869 		case 0x00000204:
1870 			dev_info(dev, "4x1, 2x1 configuration\n");
1871 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420;
1872 			return 0;
1873 
1874 		case 0x00020202:
1875 			dev_info(dev, "2x3 configuration\n");
1876 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222;
1877 			return 0;
1878 
1879 		case 0x00010104:
1880 			dev_info(dev, "4x1, 1x2 configuration\n");
1881 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411;
1882 			return 0;
1883 		}
1884 	} else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
1885 		switch (lanes) {
1886 		case 0x00000004:
1887 			dev_info(dev, "single-mode configuration\n");
1888 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE;
1889 			return 0;
1890 
1891 		case 0x00000202:
1892 			dev_info(dev, "dual-mode configuration\n");
1893 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL;
1894 			return 0;
1895 		}
1896 	}
1897 
1898 	return -EINVAL;
1899 }
1900 
1901 /*
1902  * Check whether a given set of supplies is available in a device tree node.
1903  * This is used to check whether the new or the legacy device tree bindings
1904  * should be used.
1905  */
of_regulator_bulk_available(struct device_node * np,struct regulator_bulk_data * supplies,unsigned int num_supplies)1906 static bool of_regulator_bulk_available(struct device_node *np,
1907 					struct regulator_bulk_data *supplies,
1908 					unsigned int num_supplies)
1909 {
1910 	char property[32];
1911 	unsigned int i;
1912 
1913 	for (i = 0; i < num_supplies; i++) {
1914 		snprintf(property, 32, "%s-supply", supplies[i].supply);
1915 
1916 		if (!of_property_present(np, property))
1917 			return false;
1918 	}
1919 
1920 	return true;
1921 }
1922 
1923 /*
1924  * Old versions of the device tree binding for this device used a set of power
1925  * supplies that didn't match the hardware inputs. This happened to work for a
1926  * number of cases but is not future proof. However to preserve backwards-
1927  * compatibility with old device trees, this function will try to use the old
1928  * set of supplies.
1929  */
tegra_pcie_get_legacy_regulators(struct tegra_pcie * pcie)1930 static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie)
1931 {
1932 	struct device *dev = pcie->dev;
1933 	struct device_node *np = dev->of_node;
1934 
1935 	if (of_device_is_compatible(np, "nvidia,tegra30-pcie"))
1936 		pcie->num_supplies = 3;
1937 	else if (of_device_is_compatible(np, "nvidia,tegra20-pcie"))
1938 		pcie->num_supplies = 2;
1939 
1940 	if (pcie->num_supplies == 0) {
1941 		dev_err(dev, "device %pOF not supported in legacy mode\n", np);
1942 		return -ENODEV;
1943 	}
1944 
1945 	pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
1946 				      sizeof(*pcie->supplies),
1947 				      GFP_KERNEL);
1948 	if (!pcie->supplies)
1949 		return -ENOMEM;
1950 
1951 	pcie->supplies[0].supply = "pex-clk";
1952 	pcie->supplies[1].supply = "vdd";
1953 
1954 	if (pcie->num_supplies > 2)
1955 		pcie->supplies[2].supply = "avdd";
1956 
1957 	return devm_regulator_bulk_get(dev, pcie->num_supplies, pcie->supplies);
1958 }
1959 
1960 /*
1961  * Obtains the list of regulators required for a particular generation of the
1962  * IP block.
1963  *
1964  * This would've been nice to do simply by providing static tables for use
1965  * with the regulator_bulk_*() API, but unfortunately Tegra30 is a bit quirky
1966  * in that it has two pairs or AVDD_PEX and VDD_PEX supplies (PEXA and PEXB)
1967  * and either seems to be optional depending on which ports are being used.
1968  */
tegra_pcie_get_regulators(struct tegra_pcie * pcie,u32 lane_mask)1969 static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
1970 {
1971 	struct device *dev = pcie->dev;
1972 	struct device_node *np = dev->of_node;
1973 	unsigned int i = 0;
1974 
1975 	if (of_device_is_compatible(np, "nvidia,tegra186-pcie")) {
1976 		pcie->num_supplies = 4;
1977 
1978 		pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
1979 					      sizeof(*pcie->supplies),
1980 					      GFP_KERNEL);
1981 		if (!pcie->supplies)
1982 			return -ENOMEM;
1983 
1984 		pcie->supplies[i++].supply = "dvdd-pex";
1985 		pcie->supplies[i++].supply = "hvdd-pex-pll";
1986 		pcie->supplies[i++].supply = "hvdd-pex";
1987 		pcie->supplies[i++].supply = "vddio-pexctl-aud";
1988 	} else if (of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
1989 		pcie->num_supplies = 3;
1990 
1991 		pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
1992 					      sizeof(*pcie->supplies),
1993 					      GFP_KERNEL);
1994 		if (!pcie->supplies)
1995 			return -ENOMEM;
1996 
1997 		pcie->supplies[i++].supply = "hvddio-pex";
1998 		pcie->supplies[i++].supply = "dvddio-pex";
1999 		pcie->supplies[i++].supply = "vddio-pex-ctl";
2000 	} else if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
2001 		pcie->num_supplies = 4;
2002 
2003 		pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
2004 					      sizeof(*pcie->supplies),
2005 					      GFP_KERNEL);
2006 		if (!pcie->supplies)
2007 			return -ENOMEM;
2008 
2009 		pcie->supplies[i++].supply = "avddio-pex";
2010 		pcie->supplies[i++].supply = "dvddio-pex";
2011 		pcie->supplies[i++].supply = "hvdd-pex";
2012 		pcie->supplies[i++].supply = "vddio-pex-ctl";
2013 	} else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
2014 		bool need_pexa = false, need_pexb = false;
2015 
2016 		/* VDD_PEXA and AVDD_PEXA supply lanes 0 to 3 */
2017 		if (lane_mask & 0x0f)
2018 			need_pexa = true;
2019 
2020 		/* VDD_PEXB and AVDD_PEXB supply lanes 4 to 5 */
2021 		if (lane_mask & 0x30)
2022 			need_pexb = true;
2023 
2024 		pcie->num_supplies = 4 + (need_pexa ? 2 : 0) +
2025 					 (need_pexb ? 2 : 0);
2026 
2027 		pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
2028 					      sizeof(*pcie->supplies),
2029 					      GFP_KERNEL);
2030 		if (!pcie->supplies)
2031 			return -ENOMEM;
2032 
2033 		pcie->supplies[i++].supply = "avdd-pex-pll";
2034 		pcie->supplies[i++].supply = "hvdd-pex";
2035 		pcie->supplies[i++].supply = "vddio-pex-ctl";
2036 		pcie->supplies[i++].supply = "avdd-plle";
2037 
2038 		if (need_pexa) {
2039 			pcie->supplies[i++].supply = "avdd-pexa";
2040 			pcie->supplies[i++].supply = "vdd-pexa";
2041 		}
2042 
2043 		if (need_pexb) {
2044 			pcie->supplies[i++].supply = "avdd-pexb";
2045 			pcie->supplies[i++].supply = "vdd-pexb";
2046 		}
2047 	} else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
2048 		pcie->num_supplies = 5;
2049 
2050 		pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
2051 					      sizeof(*pcie->supplies),
2052 					      GFP_KERNEL);
2053 		if (!pcie->supplies)
2054 			return -ENOMEM;
2055 
2056 		pcie->supplies[0].supply = "avdd-pex";
2057 		pcie->supplies[1].supply = "vdd-pex";
2058 		pcie->supplies[2].supply = "avdd-pex-pll";
2059 		pcie->supplies[3].supply = "avdd-plle";
2060 		pcie->supplies[4].supply = "vddio-pex-clk";
2061 	}
2062 
2063 	if (of_regulator_bulk_available(dev->of_node, pcie->supplies,
2064 					pcie->num_supplies))
2065 		return devm_regulator_bulk_get(dev, pcie->num_supplies,
2066 					       pcie->supplies);
2067 
2068 	/*
2069 	 * If not all regulators are available for this new scheme, assume
2070 	 * that the device tree complies with an older version of the device
2071 	 * tree binding.
2072 	 */
2073 	dev_info(dev, "using legacy DT binding for power supplies\n");
2074 
2075 	devm_kfree(dev, pcie->supplies);
2076 	pcie->num_supplies = 0;
2077 
2078 	return tegra_pcie_get_legacy_regulators(pcie);
2079 }
2080 
tegra_pcie_parse_dt(struct tegra_pcie * pcie)2081 static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
2082 {
2083 	struct device *dev = pcie->dev;
2084 	struct device_node *np = dev->of_node;
2085 	const struct tegra_pcie_soc *soc = pcie->soc;
2086 	u32 lanes = 0, mask = 0;
2087 	unsigned int lane = 0;
2088 	int err;
2089 
2090 	/* parse root ports */
2091 	for_each_child_of_node_scoped(np, port) {
2092 		struct tegra_pcie_port *rp;
2093 		unsigned int index;
2094 		u32 value;
2095 		char *label;
2096 
2097 		err = of_pci_get_devfn(port);
2098 		if (err < 0)
2099 			return dev_err_probe(dev, err, "failed to parse address\n");
2100 
2101 		index = PCI_SLOT(err);
2102 
2103 		if (index < 1 || index > soc->num_ports)
2104 			return dev_err_probe(dev, -EINVAL,
2105 					     "invalid port number: %d\n", index);
2106 
2107 		index--;
2108 
2109 		err = of_property_read_u32(port, "nvidia,num-lanes", &value);
2110 		if (err < 0)
2111 			return dev_err_probe(dev, err,
2112 					     "failed to parse # of lanes\n");
2113 
2114 		if (value > 16)
2115 			return dev_err_probe(dev, -EINVAL,
2116 					     "invalid # of lanes: %u\n", value);
2117 
2118 		lanes |= value << (index << 3);
2119 
2120 		if (!of_device_is_available(port)) {
2121 			lane += value;
2122 			continue;
2123 		}
2124 
2125 		mask |= ((1 << value) - 1) << lane;
2126 		lane += value;
2127 
2128 		rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL);
2129 		if (!rp)
2130 			return -ENOMEM;
2131 
2132 		err = of_address_to_resource(port, 0, &rp->regs);
2133 		if (err < 0)
2134 			return dev_err_probe(dev, err, "failed to parse address\n");
2135 
2136 		INIT_LIST_HEAD(&rp->list);
2137 		rp->index = index;
2138 		rp->lanes = value;
2139 		rp->pcie = pcie;
2140 		rp->np = port;
2141 
2142 		rp->base = devm_pci_remap_cfg_resource(dev, &rp->regs);
2143 		if (IS_ERR(rp->base))
2144 			return PTR_ERR(rp->base);
2145 
2146 		label = devm_kasprintf(dev, GFP_KERNEL, "pex-reset-%u", index);
2147 		if (!label)
2148 			return -ENOMEM;
2149 
2150 		/*
2151 		 * Returns -ENOENT if reset-gpios property is not populated
2152 		 * and in this case fall back to using AFI per port register
2153 		 * to toggle PERST# SFIO line.
2154 		 */
2155 		rp->reset_gpio = devm_fwnode_gpiod_get(dev,
2156 						       of_fwnode_handle(port),
2157 						       "reset",
2158 						       GPIOD_OUT_LOW,
2159 						       label);
2160 		if (IS_ERR(rp->reset_gpio)) {
2161 			if (PTR_ERR(rp->reset_gpio) == -ENOENT)
2162 				rp->reset_gpio = NULL;
2163 			else
2164 				return dev_err_probe(dev, PTR_ERR(rp->reset_gpio),
2165 						     "failed to get reset GPIO\n");
2166 		}
2167 
2168 		list_add_tail(&rp->list, &pcie->ports);
2169 	}
2170 
2171 	err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config);
2172 	if (err < 0)
2173 		return dev_err_probe(dev, err,
2174 				     "invalid lane configuration\n");
2175 
2176 	err = tegra_pcie_get_regulators(pcie, mask);
2177 	if (err < 0)
2178 		return err;
2179 
2180 	return 0;
2181 }
2182 
2183 /*
2184  * FIXME: If there are no PCIe cards attached, then calling this function
2185  * can result in the increase of the bootup time as there are big timeout
2186  * loops.
2187  */
2188 #define TEGRA_PCIE_LINKUP_TIMEOUT	200	/* up to 1.2 seconds */
tegra_pcie_port_check_link(struct tegra_pcie_port * port)2189 static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
2190 {
2191 	struct device *dev = port->pcie->dev;
2192 	unsigned int retries = 3;
2193 	unsigned long value;
2194 
2195 	/* override presence detection */
2196 	value = readl(port->base + RP_PRIV_MISC);
2197 	value &= ~RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT;
2198 	value |= RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT;
2199 	writel(value, port->base + RP_PRIV_MISC);
2200 
2201 	do {
2202 		unsigned int timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
2203 
2204 		do {
2205 			value = readl(port->base + RP_VEND_XP);
2206 
2207 			if (value & RP_VEND_XP_DL_UP)
2208 				break;
2209 
2210 			usleep_range(1000, 2000);
2211 		} while (--timeout);
2212 
2213 		if (!timeout) {
2214 			dev_dbg(dev, "link %u down, retrying\n", port->index);
2215 			goto retry;
2216 		}
2217 
2218 		timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
2219 
2220 		do {
2221 			value = readl(port->base + RP_LINK_CONTROL_STATUS);
2222 
2223 			if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
2224 				return true;
2225 
2226 			usleep_range(1000, 2000);
2227 		} while (--timeout);
2228 
2229 retry:
2230 		tegra_pcie_port_reset(port);
2231 	} while (--retries);
2232 
2233 	return false;
2234 }
2235 
tegra_pcie_change_link_speed(struct tegra_pcie * pcie)2236 static void tegra_pcie_change_link_speed(struct tegra_pcie *pcie)
2237 {
2238 	struct device *dev = pcie->dev;
2239 	struct tegra_pcie_port *port;
2240 	ktime_t deadline;
2241 	u32 value;
2242 
2243 	list_for_each_entry(port, &pcie->ports, list) {
2244 		/*
2245 		 * "Supported Link Speeds Vector" in "Link Capabilities 2"
2246 		 * is not supported by Tegra. tegra_pcie_change_link_speed()
2247 		 * is called only for Tegra chips which support Gen2.
2248 		 * So there no harm if supported link speed is not verified.
2249 		 */
2250 		value = readl(port->base + RP_LINK_CONTROL_STATUS_2);
2251 		value &= ~PCI_EXP_LNKSTA_CLS;
2252 		value |= PCI_EXP_LNKSTA_CLS_5_0GB;
2253 		writel(value, port->base + RP_LINK_CONTROL_STATUS_2);
2254 
2255 		/*
2256 		 * Poll until link comes back from recovery to avoid race
2257 		 * condition.
2258 		 */
2259 		deadline = ktime_add_us(ktime_get(), LINK_RETRAIN_TIMEOUT);
2260 
2261 		while (ktime_before(ktime_get(), deadline)) {
2262 			value = readl(port->base + RP_LINK_CONTROL_STATUS);
2263 			if ((value & PCI_EXP_LNKSTA_LT) == 0)
2264 				break;
2265 
2266 			usleep_range(2000, 3000);
2267 		}
2268 
2269 		if (value & PCI_EXP_LNKSTA_LT)
2270 			dev_warn(dev, "PCIe port %u link is in recovery\n",
2271 				 port->index);
2272 
2273 		/* Retrain the link */
2274 		value = readl(port->base + RP_LINK_CONTROL_STATUS);
2275 		value |= PCI_EXP_LNKCTL_RL;
2276 		writel(value, port->base + RP_LINK_CONTROL_STATUS);
2277 
2278 		deadline = ktime_add_us(ktime_get(), LINK_RETRAIN_TIMEOUT);
2279 
2280 		while (ktime_before(ktime_get(), deadline)) {
2281 			value = readl(port->base + RP_LINK_CONTROL_STATUS);
2282 			if ((value & PCI_EXP_LNKSTA_LT) == 0)
2283 				break;
2284 
2285 			usleep_range(2000, 3000);
2286 		}
2287 
2288 		if (value & PCI_EXP_LNKSTA_LT)
2289 			dev_err(dev, "failed to retrain link of port %u\n",
2290 				port->index);
2291 	}
2292 }
2293 
tegra_pcie_enable_ports(struct tegra_pcie * pcie)2294 static void tegra_pcie_enable_ports(struct tegra_pcie *pcie)
2295 {
2296 	struct device *dev = pcie->dev;
2297 	struct tegra_pcie_port *port, *tmp;
2298 
2299 	list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
2300 		dev_info(dev, "probing port %u, using %u lanes\n",
2301 			 port->index, port->lanes);
2302 
2303 		tegra_pcie_port_enable(port);
2304 	}
2305 
2306 	/* Start LTSSM from Tegra side */
2307 	reset_control_deassert(pcie->pcie_xrst);
2308 
2309 	list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
2310 		if (tegra_pcie_port_check_link(port))
2311 			continue;
2312 
2313 		dev_info(dev, "link %u down, ignoring\n", port->index);
2314 
2315 		tegra_pcie_port_disable(port);
2316 		tegra_pcie_port_free(port);
2317 	}
2318 
2319 	if (pcie->soc->has_gen2)
2320 		tegra_pcie_change_link_speed(pcie);
2321 }
2322 
tegra_pcie_disable_ports(struct tegra_pcie * pcie)2323 static void tegra_pcie_disable_ports(struct tegra_pcie *pcie)
2324 {
2325 	struct tegra_pcie_port *port, *tmp;
2326 
2327 	reset_control_assert(pcie->pcie_xrst);
2328 
2329 	list_for_each_entry_safe(port, tmp, &pcie->ports, list)
2330 		tegra_pcie_port_disable(port);
2331 }
2332 
2333 static const struct tegra_pcie_port_soc tegra20_pcie_ports[] = {
2334 	{ .pme.turnoff_bit = 0, .pme.ack_bit =  5 },
2335 	{ .pme.turnoff_bit = 8, .pme.ack_bit = 10 },
2336 };
2337 
2338 static const struct tegra_pcie_soc tegra20_pcie = {
2339 	.num_ports = 2,
2340 	.ports = tegra20_pcie_ports,
2341 	.msi_base_shift = 0,
2342 	.pads_pll_ctl = PADS_PLL_CTL_TEGRA20,
2343 	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10,
2344 	.pads_refclk_cfg0 = 0xfa5cfa5c,
2345 	.has_pex_clkreq_en = false,
2346 	.has_pex_bias_ctrl = false,
2347 	.has_intr_prsnt_sense = false,
2348 	.has_cml_clk = false,
2349 	.has_gen2 = false,
2350 	.force_pca_enable = false,
2351 	.program_uphy = true,
2352 	.update_clamp_threshold = false,
2353 	.program_deskew_time = false,
2354 	.update_fc_timer = false,
2355 	.has_cache_bars = true,
2356 	.ectl.enable = false,
2357 };
2358 
2359 static const struct tegra_pcie_port_soc tegra30_pcie_ports[] = {
2360 	{ .pme.turnoff_bit =  0, .pme.ack_bit =  5 },
2361 	{ .pme.turnoff_bit =  8, .pme.ack_bit = 10 },
2362 	{ .pme.turnoff_bit = 16, .pme.ack_bit = 18 },
2363 };
2364 
2365 static const struct tegra_pcie_soc tegra30_pcie = {
2366 	.num_ports = 3,
2367 	.ports = tegra30_pcie_ports,
2368 	.msi_base_shift = 8,
2369 	.afi_pex2_ctrl = 0x128,
2370 	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2371 	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2372 	.pads_refclk_cfg0 = 0xfa5cfa5c,
2373 	.pads_refclk_cfg1 = 0xfa5cfa5c,
2374 	.has_pex_clkreq_en = true,
2375 	.has_pex_bias_ctrl = true,
2376 	.has_intr_prsnt_sense = true,
2377 	.has_cml_clk = true,
2378 	.has_gen2 = false,
2379 	.force_pca_enable = false,
2380 	.program_uphy = true,
2381 	.update_clamp_threshold = false,
2382 	.program_deskew_time = false,
2383 	.update_fc_timer = false,
2384 	.has_cache_bars = false,
2385 	.ectl.enable = false,
2386 };
2387 
2388 static const struct tegra_pcie_soc tegra124_pcie = {
2389 	.num_ports = 2,
2390 	.ports = tegra20_pcie_ports,
2391 	.msi_base_shift = 8,
2392 	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2393 	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2394 	.pads_refclk_cfg0 = 0x44ac44ac,
2395 	.has_pex_clkreq_en = true,
2396 	.has_pex_bias_ctrl = true,
2397 	.has_intr_prsnt_sense = true,
2398 	.has_cml_clk = true,
2399 	.has_gen2 = true,
2400 	.force_pca_enable = false,
2401 	.program_uphy = true,
2402 	.update_clamp_threshold = true,
2403 	.program_deskew_time = false,
2404 	.update_fc_timer = false,
2405 	.has_cache_bars = false,
2406 	.ectl.enable = false,
2407 };
2408 
2409 static const struct tegra_pcie_soc tegra210_pcie = {
2410 	.num_ports = 2,
2411 	.ports = tegra20_pcie_ports,
2412 	.msi_base_shift = 8,
2413 	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2414 	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2415 	.pads_refclk_cfg0 = 0x90b890b8,
2416 	/* FC threshold is bit[25:18] */
2417 	.update_fc_threshold = 0x01800000,
2418 	.has_pex_clkreq_en = true,
2419 	.has_pex_bias_ctrl = true,
2420 	.has_intr_prsnt_sense = true,
2421 	.has_cml_clk = true,
2422 	.has_gen2 = true,
2423 	.force_pca_enable = true,
2424 	.program_uphy = true,
2425 	.update_clamp_threshold = true,
2426 	.program_deskew_time = true,
2427 	.update_fc_timer = true,
2428 	.has_cache_bars = false,
2429 	.ectl = {
2430 		.regs = {
2431 			.rp_ectl_2_r1 = 0x0000000f,
2432 			.rp_ectl_4_r1 = 0x00000067,
2433 			.rp_ectl_5_r1 = 0x55010000,
2434 			.rp_ectl_6_r1 = 0x00000001,
2435 			.rp_ectl_2_r2 = 0x0000008f,
2436 			.rp_ectl_4_r2 = 0x000000c7,
2437 			.rp_ectl_5_r2 = 0x55010000,
2438 			.rp_ectl_6_r2 = 0x00000001,
2439 		},
2440 		.enable = true,
2441 	},
2442 };
2443 
2444 static const struct tegra_pcie_port_soc tegra186_pcie_ports[] = {
2445 	{ .pme.turnoff_bit =  0, .pme.ack_bit =  5 },
2446 	{ .pme.turnoff_bit =  8, .pme.ack_bit = 10 },
2447 	{ .pme.turnoff_bit = 12, .pme.ack_bit = 14 },
2448 };
2449 
2450 static const struct tegra_pcie_soc tegra186_pcie = {
2451 	.num_ports = 3,
2452 	.ports = tegra186_pcie_ports,
2453 	.msi_base_shift = 8,
2454 	.afi_pex2_ctrl = 0x19c,
2455 	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2456 	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2457 	.pads_refclk_cfg0 = 0x80b880b8,
2458 	.pads_refclk_cfg1 = 0x000480b8,
2459 	.has_pex_clkreq_en = true,
2460 	.has_pex_bias_ctrl = true,
2461 	.has_intr_prsnt_sense = true,
2462 	.has_cml_clk = false,
2463 	.has_gen2 = true,
2464 	.force_pca_enable = false,
2465 	.program_uphy = false,
2466 	.update_clamp_threshold = false,
2467 	.program_deskew_time = false,
2468 	.update_fc_timer = false,
2469 	.has_cache_bars = false,
2470 	.ectl.enable = false,
2471 };
2472 
2473 static const struct of_device_id tegra_pcie_of_match[] = {
2474 	{ .compatible = "nvidia,tegra186-pcie", .data = &tegra186_pcie },
2475 	{ .compatible = "nvidia,tegra210-pcie", .data = &tegra210_pcie },
2476 	{ .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie },
2477 	{ .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie },
2478 	{ .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie },
2479 	{ },
2480 };
2481 MODULE_DEVICE_TABLE(of, tegra_pcie_of_match);
2482 
tegra_pcie_ports_seq_start(struct seq_file * s,loff_t * pos)2483 static void *tegra_pcie_ports_seq_start(struct seq_file *s, loff_t *pos)
2484 {
2485 	struct tegra_pcie *pcie = s->private;
2486 
2487 	if (list_empty(&pcie->ports))
2488 		return NULL;
2489 
2490 	seq_puts(s, "Index  Status\n");
2491 
2492 	return seq_list_start(&pcie->ports, *pos);
2493 }
2494 
tegra_pcie_ports_seq_next(struct seq_file * s,void * v,loff_t * pos)2495 static void *tegra_pcie_ports_seq_next(struct seq_file *s, void *v, loff_t *pos)
2496 {
2497 	struct tegra_pcie *pcie = s->private;
2498 
2499 	return seq_list_next(v, &pcie->ports, pos);
2500 }
2501 
tegra_pcie_ports_seq_stop(struct seq_file * s,void * v)2502 static void tegra_pcie_ports_seq_stop(struct seq_file *s, void *v)
2503 {
2504 }
2505 
tegra_pcie_ports_seq_show(struct seq_file * s,void * v)2506 static int tegra_pcie_ports_seq_show(struct seq_file *s, void *v)
2507 {
2508 	bool up = false, active = false;
2509 	struct tegra_pcie_port *port;
2510 	unsigned int value;
2511 
2512 	port = list_entry(v, struct tegra_pcie_port, list);
2513 
2514 	value = readl(port->base + RP_VEND_XP);
2515 
2516 	if (value & RP_VEND_XP_DL_UP)
2517 		up = true;
2518 
2519 	value = readl(port->base + RP_LINK_CONTROL_STATUS);
2520 
2521 	if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
2522 		active = true;
2523 
2524 	seq_printf(s, "%2u     ", port->index);
2525 
2526 	if (up)
2527 		seq_puts(s, "up");
2528 
2529 	if (active) {
2530 		if (up)
2531 			seq_puts(s, ", ");
2532 
2533 		seq_puts(s, "active");
2534 	}
2535 
2536 	seq_puts(s, "\n");
2537 	return 0;
2538 }
2539 
2540 static const struct seq_operations tegra_pcie_ports_sops = {
2541 	.start = tegra_pcie_ports_seq_start,
2542 	.next = tegra_pcie_ports_seq_next,
2543 	.stop = tegra_pcie_ports_seq_stop,
2544 	.show = tegra_pcie_ports_seq_show,
2545 };
2546 
2547 DEFINE_SEQ_ATTRIBUTE(tegra_pcie_ports);
2548 
tegra_pcie_debugfs_exit(struct tegra_pcie * pcie)2549 static void tegra_pcie_debugfs_exit(struct tegra_pcie *pcie)
2550 {
2551 	debugfs_remove_recursive(pcie->debugfs);
2552 	pcie->debugfs = NULL;
2553 }
2554 
tegra_pcie_debugfs_init(struct tegra_pcie * pcie)2555 static void tegra_pcie_debugfs_init(struct tegra_pcie *pcie)
2556 {
2557 	pcie->debugfs = debugfs_create_dir("pcie", NULL);
2558 
2559 	debugfs_create_file("ports", S_IFREG | S_IRUGO, pcie->debugfs, pcie,
2560 			    &tegra_pcie_ports_fops);
2561 }
2562 
tegra_pcie_probe(struct platform_device * pdev)2563 static int tegra_pcie_probe(struct platform_device *pdev)
2564 {
2565 	struct device *dev = &pdev->dev;
2566 	struct pci_host_bridge *host;
2567 	struct tegra_pcie *pcie;
2568 	int err;
2569 
2570 	host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
2571 	if (!host)
2572 		return -ENOMEM;
2573 
2574 	pcie = pci_host_bridge_priv(host);
2575 	host->sysdata = pcie;
2576 	platform_set_drvdata(pdev, pcie);
2577 
2578 	pcie->soc = of_device_get_match_data(dev);
2579 	INIT_LIST_HEAD(&pcie->ports);
2580 	pcie->dev = dev;
2581 
2582 	err = tegra_pcie_parse_dt(pcie);
2583 	if (err < 0)
2584 		return err;
2585 
2586 	err = tegra_pcie_get_resources(pcie);
2587 	if (err < 0) {
2588 		dev_err(dev, "failed to request resources: %d\n", err);
2589 		return err;
2590 	}
2591 
2592 	err = tegra_pcie_msi_setup(pcie);
2593 	if (err < 0) {
2594 		dev_err(dev, "failed to enable MSI support: %d\n", err);
2595 		goto put_resources;
2596 	}
2597 
2598 	pm_runtime_enable(pcie->dev);
2599 	err = pm_runtime_get_sync(pcie->dev);
2600 	if (err < 0) {
2601 		dev_err(dev, "fail to enable pcie controller: %d\n", err);
2602 		goto pm_runtime_put;
2603 	}
2604 
2605 	host->ops = &tegra_pcie_ops;
2606 	host->map_irq = tegra_pcie_map_irq;
2607 
2608 	err = pci_host_probe(host);
2609 	if (err < 0) {
2610 		dev_err(dev, "failed to register host: %d\n", err);
2611 		goto pm_runtime_put;
2612 	}
2613 
2614 	if (IS_ENABLED(CONFIG_DEBUG_FS))
2615 		tegra_pcie_debugfs_init(pcie);
2616 
2617 	return 0;
2618 
2619 pm_runtime_put:
2620 	pm_runtime_put_sync(pcie->dev);
2621 	pm_runtime_disable(pcie->dev);
2622 	tegra_pcie_msi_teardown(pcie);
2623 put_resources:
2624 	tegra_pcie_put_resources(pcie);
2625 	return err;
2626 }
2627 
tegra_pcie_remove(struct platform_device * pdev)2628 static void tegra_pcie_remove(struct platform_device *pdev)
2629 {
2630 	struct tegra_pcie *pcie = platform_get_drvdata(pdev);
2631 	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
2632 	struct tegra_pcie_port *port, *tmp;
2633 
2634 	if (IS_ENABLED(CONFIG_DEBUG_FS))
2635 		tegra_pcie_debugfs_exit(pcie);
2636 
2637 	pci_stop_root_bus(host->bus);
2638 	pci_remove_root_bus(host->bus);
2639 	pm_runtime_put_sync(pcie->dev);
2640 	pm_runtime_disable(pcie->dev);
2641 
2642 	if (IS_ENABLED(CONFIG_PCI_MSI))
2643 		tegra_pcie_msi_teardown(pcie);
2644 
2645 	tegra_pcie_put_resources(pcie);
2646 
2647 	list_for_each_entry_safe(port, tmp, &pcie->ports, list)
2648 		tegra_pcie_port_free(port);
2649 }
2650 
tegra_pcie_pm_suspend(struct device * dev)2651 static int tegra_pcie_pm_suspend(struct device *dev)
2652 {
2653 	struct tegra_pcie *pcie = dev_get_drvdata(dev);
2654 	struct tegra_pcie_port *port;
2655 	int err;
2656 
2657 	list_for_each_entry(port, &pcie->ports, list)
2658 		tegra_pcie_pme_turnoff(port);
2659 
2660 	tegra_pcie_disable_ports(pcie);
2661 
2662 	/*
2663 	 * AFI_INTR is unmasked in tegra_pcie_enable_controller(), mask it to
2664 	 * avoid unwanted interrupts raised by AFI after pex_rst is asserted.
2665 	 */
2666 	tegra_pcie_disable_interrupts(pcie);
2667 
2668 	if (pcie->soc->program_uphy) {
2669 		err = tegra_pcie_phy_power_off(pcie);
2670 		if (err < 0)
2671 			dev_err(dev, "failed to power off PHY(s): %d\n", err);
2672 	}
2673 
2674 	reset_control_assert(pcie->pex_rst);
2675 	clk_disable_unprepare(pcie->pex_clk);
2676 
2677 	if (IS_ENABLED(CONFIG_PCI_MSI))
2678 		tegra_pcie_disable_msi(pcie);
2679 
2680 	pinctrl_pm_select_idle_state(dev);
2681 	tegra_pcie_power_off(pcie);
2682 
2683 	return 0;
2684 }
2685 
tegra_pcie_pm_resume(struct device * dev)2686 static int tegra_pcie_pm_resume(struct device *dev)
2687 {
2688 	struct tegra_pcie *pcie = dev_get_drvdata(dev);
2689 	int err;
2690 
2691 	err = tegra_pcie_power_on(pcie);
2692 	if (err) {
2693 		dev_err(dev, "tegra pcie power on fail: %d\n", err);
2694 		return err;
2695 	}
2696 
2697 	err = pinctrl_pm_select_default_state(dev);
2698 	if (err < 0) {
2699 		dev_err(dev, "failed to disable PCIe IO DPD: %d\n", err);
2700 		goto poweroff;
2701 	}
2702 
2703 	tegra_pcie_enable_controller(pcie);
2704 	tegra_pcie_setup_translations(pcie);
2705 
2706 	if (IS_ENABLED(CONFIG_PCI_MSI))
2707 		tegra_pcie_enable_msi(pcie);
2708 
2709 	err = clk_prepare_enable(pcie->pex_clk);
2710 	if (err) {
2711 		dev_err(dev, "failed to enable PEX clock: %d\n", err);
2712 		goto pex_dpd_enable;
2713 	}
2714 
2715 	reset_control_deassert(pcie->pex_rst);
2716 
2717 	if (pcie->soc->program_uphy) {
2718 		err = tegra_pcie_phy_power_on(pcie);
2719 		if (err < 0) {
2720 			dev_err(dev, "failed to power on PHY(s): %d\n", err);
2721 			goto disable_pex_clk;
2722 		}
2723 	}
2724 
2725 	tegra_pcie_apply_pad_settings(pcie);
2726 	tegra_pcie_enable_ports(pcie);
2727 
2728 	return 0;
2729 
2730 disable_pex_clk:
2731 	reset_control_assert(pcie->pex_rst);
2732 	clk_disable_unprepare(pcie->pex_clk);
2733 pex_dpd_enable:
2734 	pinctrl_pm_select_idle_state(dev);
2735 poweroff:
2736 	tegra_pcie_power_off(pcie);
2737 
2738 	return err;
2739 }
2740 
2741 static const struct dev_pm_ops tegra_pcie_pm_ops = {
2742 	RUNTIME_PM_OPS(tegra_pcie_pm_suspend, tegra_pcie_pm_resume, NULL)
2743 	NOIRQ_SYSTEM_SLEEP_PM_OPS(tegra_pcie_pm_suspend, tegra_pcie_pm_resume)
2744 };
2745 
2746 static struct platform_driver tegra_pcie_driver = {
2747 	.driver = {
2748 		.name = "tegra-pcie",
2749 		.of_match_table = tegra_pcie_of_match,
2750 		.suppress_bind_attrs = true,
2751 		.pm = &tegra_pcie_pm_ops,
2752 	},
2753 	.probe = tegra_pcie_probe,
2754 	.remove = tegra_pcie_remove,
2755 };
2756 module_platform_driver(tegra_pcie_driver);
2757