xref: /linux/drivers/pci/controller/dwc/pcie-tegra194.c (revision 0526b56cbc3c489642bd6a5fe4b718dea7ef0ee8)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * PCIe host controller driver for the following SoCs
4  * Tegra194
5  * Tegra234
6  *
7  * Copyright (C) 2019-2022 NVIDIA Corporation.
8  *
9  * Author: Vidya Sagar <vidyas@nvidia.com>
10  */
11 
12 #include <linux/clk.h>
13 #include <linux/debugfs.h>
14 #include <linux/delay.h>
15 #include <linux/gpio.h>
16 #include <linux/gpio/consumer.h>
17 #include <linux/interrupt.h>
18 #include <linux/iopoll.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/of.h>
22 #include <linux/of_device.h>
23 #include <linux/of_gpio.h>
24 #include <linux/of_pci.h>
25 #include <linux/pci.h>
26 #include <linux/phy/phy.h>
27 #include <linux/pinctrl/consumer.h>
28 #include <linux/platform_device.h>
29 #include <linux/pm_runtime.h>
30 #include <linux/random.h>
31 #include <linux/reset.h>
32 #include <linux/resource.h>
33 #include <linux/types.h>
34 #include "pcie-designware.h"
35 #include <soc/tegra/bpmp.h>
36 #include <soc/tegra/bpmp-abi.h>
37 #include "../../pci.h"
38 
39 #define TEGRA194_DWC_IP_VER			0x490A
40 #define TEGRA234_DWC_IP_VER			0x562A
41 
42 #define APPL_PINMUX				0x0
43 #define APPL_PINMUX_PEX_RST			BIT(0)
44 #define APPL_PINMUX_CLKREQ_OVERRIDE_EN		BIT(2)
45 #define APPL_PINMUX_CLKREQ_OVERRIDE		BIT(3)
46 #define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN	BIT(4)
47 #define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE	BIT(5)
48 
49 #define APPL_CTRL				0x4
50 #define APPL_CTRL_SYS_PRE_DET_STATE		BIT(6)
51 #define APPL_CTRL_LTSSM_EN			BIT(7)
52 #define APPL_CTRL_HW_HOT_RST_EN			BIT(20)
53 #define APPL_CTRL_HW_HOT_RST_MODE_MASK		GENMASK(1, 0)
54 #define APPL_CTRL_HW_HOT_RST_MODE_SHIFT		22
55 #define APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST	0x1
56 #define APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST_LTSSM_EN	0x2
57 
58 #define APPL_INTR_EN_L0_0			0x8
59 #define APPL_INTR_EN_L0_0_LINK_STATE_INT_EN	BIT(0)
60 #define APPL_INTR_EN_L0_0_MSI_RCV_INT_EN	BIT(4)
61 #define APPL_INTR_EN_L0_0_INT_INT_EN		BIT(8)
62 #define APPL_INTR_EN_L0_0_PCI_CMD_EN_INT_EN	BIT(15)
63 #define APPL_INTR_EN_L0_0_CDM_REG_CHK_INT_EN	BIT(19)
64 #define APPL_INTR_EN_L0_0_SYS_INTR_EN		BIT(30)
65 #define APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN	BIT(31)
66 
67 #define APPL_INTR_STATUS_L0			0xC
68 #define APPL_INTR_STATUS_L0_LINK_STATE_INT	BIT(0)
69 #define APPL_INTR_STATUS_L0_INT_INT		BIT(8)
70 #define APPL_INTR_STATUS_L0_PCI_CMD_EN_INT	BIT(15)
71 #define APPL_INTR_STATUS_L0_PEX_RST_INT		BIT(16)
72 #define APPL_INTR_STATUS_L0_CDM_REG_CHK_INT	BIT(18)
73 
74 #define APPL_INTR_EN_L1_0_0				0x1C
75 #define APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN	BIT(1)
76 #define APPL_INTR_EN_L1_0_0_RDLH_LINK_UP_INT_EN		BIT(3)
77 #define APPL_INTR_EN_L1_0_0_HOT_RESET_DONE_INT_EN	BIT(30)
78 
79 #define APPL_INTR_STATUS_L1_0_0				0x20
80 #define APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED	BIT(1)
81 #define APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED	BIT(3)
82 #define APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE		BIT(30)
83 
84 #define APPL_INTR_STATUS_L1_1			0x2C
85 #define APPL_INTR_STATUS_L1_2			0x30
86 #define APPL_INTR_STATUS_L1_3			0x34
87 #define APPL_INTR_STATUS_L1_6			0x3C
88 #define APPL_INTR_STATUS_L1_7			0x40
89 #define APPL_INTR_STATUS_L1_15_CFG_BME_CHGED	BIT(1)
90 
91 #define APPL_INTR_EN_L1_8_0			0x44
92 #define APPL_INTR_EN_L1_8_BW_MGT_INT_EN		BIT(2)
93 #define APPL_INTR_EN_L1_8_AUTO_BW_INT_EN	BIT(3)
94 #define APPL_INTR_EN_L1_8_INTX_EN		BIT(11)
95 #define APPL_INTR_EN_L1_8_AER_INT_EN		BIT(15)
96 
97 #define APPL_INTR_STATUS_L1_8_0			0x4C
98 #define APPL_INTR_STATUS_L1_8_0_EDMA_INT_MASK	GENMASK(11, 6)
99 #define APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS	BIT(2)
100 #define APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS	BIT(3)
101 
102 #define APPL_INTR_STATUS_L1_9			0x54
103 #define APPL_INTR_STATUS_L1_10			0x58
104 #define APPL_INTR_STATUS_L1_11			0x64
105 #define APPL_INTR_STATUS_L1_13			0x74
106 #define APPL_INTR_STATUS_L1_14			0x78
107 #define APPL_INTR_STATUS_L1_15			0x7C
108 #define APPL_INTR_STATUS_L1_17			0x88
109 
110 #define APPL_INTR_EN_L1_18				0x90
111 #define APPL_INTR_EN_L1_18_CDM_REG_CHK_CMPLT		BIT(2)
112 #define APPL_INTR_EN_L1_18_CDM_REG_CHK_CMP_ERR		BIT(1)
113 #define APPL_INTR_EN_L1_18_CDM_REG_CHK_LOGIC_ERR	BIT(0)
114 
115 #define APPL_INTR_STATUS_L1_18				0x94
116 #define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT	BIT(2)
117 #define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR	BIT(1)
118 #define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR	BIT(0)
119 
120 #define APPL_MSI_CTRL_1				0xAC
121 
122 #define APPL_MSI_CTRL_2				0xB0
123 
124 #define APPL_LEGACY_INTX			0xB8
125 
126 #define APPL_LTR_MSG_1				0xC4
127 #define LTR_MSG_REQ				BIT(15)
128 #define LTR_MST_NO_SNOOP_SHIFT			16
129 
130 #define APPL_LTR_MSG_2				0xC8
131 #define APPL_LTR_MSG_2_LTR_MSG_REQ_STATE	BIT(3)
132 
133 #define APPL_LINK_STATUS			0xCC
134 #define APPL_LINK_STATUS_RDLH_LINK_UP		BIT(0)
135 
136 #define APPL_DEBUG				0xD0
137 #define APPL_DEBUG_PM_LINKST_IN_L2_LAT		BIT(21)
138 #define APPL_DEBUG_PM_LINKST_IN_L0		0x11
139 #define APPL_DEBUG_LTSSM_STATE_MASK		GENMASK(8, 3)
140 #define APPL_DEBUG_LTSSM_STATE_SHIFT		3
141 #define LTSSM_STATE_PRE_DETECT			5
142 
143 #define APPL_RADM_STATUS			0xE4
144 #define APPL_PM_XMT_TURNOFF_STATE		BIT(0)
145 
146 #define APPL_DM_TYPE				0x100
147 #define APPL_DM_TYPE_MASK			GENMASK(3, 0)
148 #define APPL_DM_TYPE_RP				0x4
149 #define APPL_DM_TYPE_EP				0x0
150 
151 #define APPL_CFG_BASE_ADDR			0x104
152 #define APPL_CFG_BASE_ADDR_MASK			GENMASK(31, 12)
153 
154 #define APPL_CFG_IATU_DMA_BASE_ADDR		0x108
155 #define APPL_CFG_IATU_DMA_BASE_ADDR_MASK	GENMASK(31, 18)
156 
157 #define APPL_CFG_MISC				0x110
158 #define APPL_CFG_MISC_SLV_EP_MODE		BIT(14)
159 #define APPL_CFG_MISC_ARCACHE_MASK		GENMASK(13, 10)
160 #define APPL_CFG_MISC_ARCACHE_SHIFT		10
161 #define APPL_CFG_MISC_ARCACHE_VAL		3
162 
163 #define APPL_CFG_SLCG_OVERRIDE			0x114
164 #define APPL_CFG_SLCG_OVERRIDE_SLCG_EN_MASTER	BIT(0)
165 
166 #define APPL_CAR_RESET_OVRD				0x12C
167 #define APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N	BIT(0)
168 
169 #define IO_BASE_IO_DECODE				BIT(0)
170 #define IO_BASE_IO_DECODE_BIT8				BIT(8)
171 
172 #define CFG_PREF_MEM_LIMIT_BASE_MEM_DECODE		BIT(0)
173 #define CFG_PREF_MEM_LIMIT_BASE_MEM_LIMIT_DECODE	BIT(16)
174 
175 #define CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF	0x718
176 #define CFG_TIMER_CTRL_ACK_NAK_SHIFT	(19)
177 
178 #define N_FTS_VAL					52
179 #define FTS_VAL						52
180 
181 #define GEN3_EQ_CONTROL_OFF			0x8a8
182 #define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT	8
183 #define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK	GENMASK(23, 8)
184 #define GEN3_EQ_CONTROL_OFF_FB_MODE_MASK	GENMASK(3, 0)
185 
186 #define PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT	0x8D0
187 #define AMBA_ERROR_RESPONSE_CRS_SHIFT		3
188 #define AMBA_ERROR_RESPONSE_CRS_MASK		GENMASK(1, 0)
189 #define AMBA_ERROR_RESPONSE_CRS_OKAY		0
190 #define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFFFFFF	1
191 #define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001	2
192 
193 #define MSIX_ADDR_MATCH_LOW_OFF			0x940
194 #define MSIX_ADDR_MATCH_LOW_OFF_EN		BIT(0)
195 #define MSIX_ADDR_MATCH_LOW_OFF_MASK		GENMASK(31, 2)
196 
197 #define MSIX_ADDR_MATCH_HIGH_OFF		0x944
198 #define MSIX_ADDR_MATCH_HIGH_OFF_MASK		GENMASK(31, 0)
199 
200 #define PORT_LOGIC_MSIX_DOORBELL			0x948
201 
202 #define CAP_SPCIE_CAP_OFF			0x154
203 #define CAP_SPCIE_CAP_OFF_DSP_TX_PRESET0_MASK	GENMASK(3, 0)
204 #define CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_MASK	GENMASK(11, 8)
205 #define CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_SHIFT	8
206 
207 #define PME_ACK_TIMEOUT 10000
208 
209 #define LTSSM_TIMEOUT 50000	/* 50ms */
210 
211 #define GEN3_GEN4_EQ_PRESET_INIT	5
212 
213 #define GEN1_CORE_CLK_FREQ	62500000
214 #define GEN2_CORE_CLK_FREQ	125000000
215 #define GEN3_CORE_CLK_FREQ	250000000
216 #define GEN4_CORE_CLK_FREQ	500000000
217 
218 #define LTR_MSG_TIMEOUT		(100 * 1000)
219 
220 #define PERST_DEBOUNCE_TIME	(5 * 1000)
221 
222 #define EP_STATE_DISABLED	0
223 #define EP_STATE_ENABLED	1
224 
225 static const unsigned int pcie_gen_freq[] = {
226 	GEN1_CORE_CLK_FREQ,
227 	GEN2_CORE_CLK_FREQ,
228 	GEN3_CORE_CLK_FREQ,
229 	GEN4_CORE_CLK_FREQ
230 };
231 
232 struct tegra_pcie_dw_of_data {
233 	u32 version;
234 	enum dw_pcie_device_mode mode;
235 	bool has_msix_doorbell_access_fix;
236 	bool has_sbr_reset_fix;
237 	bool has_l1ss_exit_fix;
238 	bool has_ltr_req_fix;
239 	u32 cdm_chk_int_en_bit;
240 	u32 gen4_preset_vec;
241 	u8 n_fts[2];
242 };
243 
244 struct tegra_pcie_dw {
245 	struct device *dev;
246 	struct resource *appl_res;
247 	struct resource *dbi_res;
248 	struct resource *atu_dma_res;
249 	void __iomem *appl_base;
250 	struct clk *core_clk;
251 	struct reset_control *core_apb_rst;
252 	struct reset_control *core_rst;
253 	struct dw_pcie pci;
254 	struct tegra_bpmp *bpmp;
255 
256 	struct tegra_pcie_dw_of_data *of_data;
257 
258 	bool supports_clkreq;
259 	bool enable_cdm_check;
260 	bool enable_srns;
261 	bool link_state;
262 	bool update_fc_fixup;
263 	bool enable_ext_refclk;
264 	u8 init_link_width;
265 	u32 msi_ctrl_int;
266 	u32 num_lanes;
267 	u32 cid;
268 	u32 cfg_link_cap_l1sub;
269 	u32 ras_des_cap;
270 	u32 pcie_cap_base;
271 	u32 aspm_cmrt;
272 	u32 aspm_pwr_on_t;
273 	u32 aspm_l0s_enter_lat;
274 
275 	struct regulator *pex_ctl_supply;
276 	struct regulator *slot_ctl_3v3;
277 	struct regulator *slot_ctl_12v;
278 
279 	unsigned int phy_count;
280 	struct phy **phys;
281 
282 	struct dentry *debugfs;
283 
284 	/* Endpoint mode specific */
285 	struct gpio_desc *pex_rst_gpiod;
286 	struct gpio_desc *pex_refclk_sel_gpiod;
287 	unsigned int pex_rst_irq;
288 	int ep_state;
289 	long link_status;
290 };
291 
292 static inline struct tegra_pcie_dw *to_tegra_pcie(struct dw_pcie *pci)
293 {
294 	return container_of(pci, struct tegra_pcie_dw, pci);
295 }
296 
297 static inline void appl_writel(struct tegra_pcie_dw *pcie, const u32 value,
298 			       const u32 reg)
299 {
300 	writel_relaxed(value, pcie->appl_base + reg);
301 }
302 
303 static inline u32 appl_readl(struct tegra_pcie_dw *pcie, const u32 reg)
304 {
305 	return readl_relaxed(pcie->appl_base + reg);
306 }
307 
308 struct tegra_pcie_soc {
309 	enum dw_pcie_device_mode mode;
310 };
311 
312 static void apply_bad_link_workaround(struct dw_pcie_rp *pp)
313 {
314 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
315 	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
316 	u32 current_link_width;
317 	u16 val;
318 
319 	/*
320 	 * NOTE:- Since this scenario is uncommon and link as such is not
321 	 * stable anyway, not waiting to confirm if link is really
322 	 * transitioning to Gen-2 speed
323 	 */
324 	val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);
325 	if (val & PCI_EXP_LNKSTA_LBMS) {
326 		current_link_width = (val & PCI_EXP_LNKSTA_NLW) >>
327 				     PCI_EXP_LNKSTA_NLW_SHIFT;
328 		if (pcie->init_link_width > current_link_width) {
329 			dev_warn(pci->dev, "PCIe link is bad, width reduced\n");
330 			val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
331 						PCI_EXP_LNKCTL2);
332 			val &= ~PCI_EXP_LNKCTL2_TLS;
333 			val |= PCI_EXP_LNKCTL2_TLS_2_5GT;
334 			dw_pcie_writew_dbi(pci, pcie->pcie_cap_base +
335 					   PCI_EXP_LNKCTL2, val);
336 
337 			val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
338 						PCI_EXP_LNKCTL);
339 			val |= PCI_EXP_LNKCTL_RL;
340 			dw_pcie_writew_dbi(pci, pcie->pcie_cap_base +
341 					   PCI_EXP_LNKCTL, val);
342 		}
343 	}
344 }
345 
346 static irqreturn_t tegra_pcie_rp_irq_handler(int irq, void *arg)
347 {
348 	struct tegra_pcie_dw *pcie = arg;
349 	struct dw_pcie *pci = &pcie->pci;
350 	struct dw_pcie_rp *pp = &pci->pp;
351 	u32 val, status_l0, status_l1;
352 	u16 val_w;
353 
354 	status_l0 = appl_readl(pcie, APPL_INTR_STATUS_L0);
355 	if (status_l0 & APPL_INTR_STATUS_L0_LINK_STATE_INT) {
356 		status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0);
357 		appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_0_0);
358 		if (!pcie->of_data->has_sbr_reset_fix &&
359 		    status_l1 & APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED) {
360 			/* SBR & Surprise Link Down WAR */
361 			val = appl_readl(pcie, APPL_CAR_RESET_OVRD);
362 			val &= ~APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N;
363 			appl_writel(pcie, val, APPL_CAR_RESET_OVRD);
364 			udelay(1);
365 			val = appl_readl(pcie, APPL_CAR_RESET_OVRD);
366 			val |= APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N;
367 			appl_writel(pcie, val, APPL_CAR_RESET_OVRD);
368 
369 			val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
370 			val |= PORT_LOGIC_SPEED_CHANGE;
371 			dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
372 		}
373 	}
374 
375 	if (status_l0 & APPL_INTR_STATUS_L0_INT_INT) {
376 		status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_8_0);
377 		if (status_l1 & APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS) {
378 			appl_writel(pcie,
379 				    APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS,
380 				    APPL_INTR_STATUS_L1_8_0);
381 			apply_bad_link_workaround(pp);
382 		}
383 		if (status_l1 & APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS) {
384 			val_w = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
385 						  PCI_EXP_LNKSTA);
386 			val_w |= PCI_EXP_LNKSTA_LBMS;
387 			dw_pcie_writew_dbi(pci, pcie->pcie_cap_base +
388 					   PCI_EXP_LNKSTA, val_w);
389 
390 			appl_writel(pcie,
391 				    APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS,
392 				    APPL_INTR_STATUS_L1_8_0);
393 
394 			val_w = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
395 						  PCI_EXP_LNKSTA);
396 			dev_dbg(pci->dev, "Link Speed : Gen-%u\n", val_w &
397 				PCI_EXP_LNKSTA_CLS);
398 		}
399 	}
400 
401 	if (status_l0 & APPL_INTR_STATUS_L0_CDM_REG_CHK_INT) {
402 		status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_18);
403 		val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
404 		if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT) {
405 			dev_info(pci->dev, "CDM check complete\n");
406 			val |= PCIE_PL_CHK_REG_CHK_REG_COMPLETE;
407 		}
408 		if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR) {
409 			dev_err(pci->dev, "CDM comparison mismatch\n");
410 			val |= PCIE_PL_CHK_REG_CHK_REG_COMPARISON_ERROR;
411 		}
412 		if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR) {
413 			dev_err(pci->dev, "CDM Logic error\n");
414 			val |= PCIE_PL_CHK_REG_CHK_REG_LOGIC_ERROR;
415 		}
416 		dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val);
417 		val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_ERR_ADDR);
418 		dev_err(pci->dev, "CDM Error Address Offset = 0x%08X\n", val);
419 	}
420 
421 	return IRQ_HANDLED;
422 }
423 
424 static void pex_ep_event_hot_rst_done(struct tegra_pcie_dw *pcie)
425 {
426 	u32 val;
427 
428 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0);
429 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0);
430 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1);
431 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2);
432 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3);
433 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6);
434 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7);
435 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0);
436 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9);
437 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10);
438 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11);
439 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13);
440 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14);
441 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15);
442 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17);
443 	appl_writel(pcie, 0xFFFFFFFF, APPL_MSI_CTRL_2);
444 
445 	val = appl_readl(pcie, APPL_CTRL);
446 	val |= APPL_CTRL_LTSSM_EN;
447 	appl_writel(pcie, val, APPL_CTRL);
448 }
449 
450 static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg)
451 {
452 	struct tegra_pcie_dw *pcie = arg;
453 	struct dw_pcie_ep *ep = &pcie->pci.ep;
454 	struct dw_pcie *pci = &pcie->pci;
455 	u32 val, speed;
456 
457 	if (test_and_clear_bit(0, &pcie->link_status))
458 		dw_pcie_ep_linkup(ep);
459 
460 	speed = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA) &
461 		PCI_EXP_LNKSTA_CLS;
462 	clk_set_rate(pcie->core_clk, pcie_gen_freq[speed - 1]);
463 
464 	if (pcie->of_data->has_ltr_req_fix)
465 		return IRQ_HANDLED;
466 
467 	/* If EP doesn't advertise L1SS, just return */
468 	val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub);
469 	if (!(val & (PCI_L1SS_CAP_ASPM_L1_1 | PCI_L1SS_CAP_ASPM_L1_2)))
470 		return IRQ_HANDLED;
471 
472 	/* Check if BME is set to '1' */
473 	val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
474 	if (val & PCI_COMMAND_MASTER) {
475 		ktime_t timeout;
476 
477 		/* 110us for both snoop and no-snoop */
478 		val = 110 | (2 << PCI_LTR_SCALE_SHIFT) | LTR_MSG_REQ;
479 		val |= (val << LTR_MST_NO_SNOOP_SHIFT);
480 		appl_writel(pcie, val, APPL_LTR_MSG_1);
481 
482 		/* Send LTR upstream */
483 		val = appl_readl(pcie, APPL_LTR_MSG_2);
484 		val |= APPL_LTR_MSG_2_LTR_MSG_REQ_STATE;
485 		appl_writel(pcie, val, APPL_LTR_MSG_2);
486 
487 		timeout = ktime_add_us(ktime_get(), LTR_MSG_TIMEOUT);
488 		for (;;) {
489 			val = appl_readl(pcie, APPL_LTR_MSG_2);
490 			if (!(val & APPL_LTR_MSG_2_LTR_MSG_REQ_STATE))
491 				break;
492 			if (ktime_after(ktime_get(), timeout))
493 				break;
494 			usleep_range(1000, 1100);
495 		}
496 		if (val & APPL_LTR_MSG_2_LTR_MSG_REQ_STATE)
497 			dev_err(pcie->dev, "Failed to send LTR message\n");
498 	}
499 
500 	return IRQ_HANDLED;
501 }
502 
503 static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg)
504 {
505 	struct tegra_pcie_dw *pcie = arg;
506 	int spurious = 1;
507 	u32 status_l0, status_l1, link_status;
508 
509 	status_l0 = appl_readl(pcie, APPL_INTR_STATUS_L0);
510 	if (status_l0 & APPL_INTR_STATUS_L0_LINK_STATE_INT) {
511 		status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0);
512 		appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_0_0);
513 
514 		if (status_l1 & APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE)
515 			pex_ep_event_hot_rst_done(pcie);
516 
517 		if (status_l1 & APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED) {
518 			link_status = appl_readl(pcie, APPL_LINK_STATUS);
519 			if (link_status & APPL_LINK_STATUS_RDLH_LINK_UP) {
520 				dev_dbg(pcie->dev, "Link is up with Host\n");
521 				set_bit(0, &pcie->link_status);
522 				return IRQ_WAKE_THREAD;
523 			}
524 		}
525 
526 		spurious = 0;
527 	}
528 
529 	if (status_l0 & APPL_INTR_STATUS_L0_PCI_CMD_EN_INT) {
530 		status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_15);
531 		appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_15);
532 
533 		if (status_l1 & APPL_INTR_STATUS_L1_15_CFG_BME_CHGED)
534 			return IRQ_WAKE_THREAD;
535 
536 		spurious = 0;
537 	}
538 
539 	if (spurious) {
540 		dev_warn(pcie->dev, "Random interrupt (STATUS = 0x%08X)\n",
541 			 status_l0);
542 		appl_writel(pcie, status_l0, APPL_INTR_STATUS_L0);
543 	}
544 
545 	return IRQ_HANDLED;
546 }
547 
548 static int tegra_pcie_dw_rd_own_conf(struct pci_bus *bus, u32 devfn, int where,
549 				     int size, u32 *val)
550 {
551 	struct dw_pcie_rp *pp = bus->sysdata;
552 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
553 	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
554 
555 	/*
556 	 * This is an endpoint mode specific register happen to appear even
557 	 * when controller is operating in root port mode and system hangs
558 	 * when it is accessed with link being in ASPM-L1 state.
559 	 * So skip accessing it altogether
560 	 */
561 	if (!pcie->of_data->has_msix_doorbell_access_fix &&
562 	    !PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL) {
563 		*val = 0x00000000;
564 		return PCIBIOS_SUCCESSFUL;
565 	}
566 
567 	return pci_generic_config_read(bus, devfn, where, size, val);
568 }
569 
570 static int tegra_pcie_dw_wr_own_conf(struct pci_bus *bus, u32 devfn, int where,
571 				     int size, u32 val)
572 {
573 	struct dw_pcie_rp *pp = bus->sysdata;
574 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
575 	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
576 
577 	/*
578 	 * This is an endpoint mode specific register happen to appear even
579 	 * when controller is operating in root port mode and system hangs
580 	 * when it is accessed with link being in ASPM-L1 state.
581 	 * So skip accessing it altogether
582 	 */
583 	if (!pcie->of_data->has_msix_doorbell_access_fix &&
584 	    !PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL)
585 		return PCIBIOS_SUCCESSFUL;
586 
587 	return pci_generic_config_write(bus, devfn, where, size, val);
588 }
589 
590 static struct pci_ops tegra_pci_ops = {
591 	.map_bus = dw_pcie_own_conf_map_bus,
592 	.read = tegra_pcie_dw_rd_own_conf,
593 	.write = tegra_pcie_dw_wr_own_conf,
594 };
595 
596 #if defined(CONFIG_PCIEASPM)
597 static void disable_aspm_l11(struct tegra_pcie_dw *pcie)
598 {
599 	u32 val;
600 
601 	val = dw_pcie_readl_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub);
602 	val &= ~PCI_L1SS_CAP_ASPM_L1_1;
603 	dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val);
604 }
605 
606 static void disable_aspm_l12(struct tegra_pcie_dw *pcie)
607 {
608 	u32 val;
609 
610 	val = dw_pcie_readl_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub);
611 	val &= ~PCI_L1SS_CAP_ASPM_L1_2;
612 	dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val);
613 }
614 
615 static inline u32 event_counter_prog(struct tegra_pcie_dw *pcie, u32 event)
616 {
617 	u32 val;
618 
619 	val = dw_pcie_readl_dbi(&pcie->pci, pcie->ras_des_cap +
620 				PCIE_RAS_DES_EVENT_COUNTER_CONTROL);
621 	val &= ~(EVENT_COUNTER_EVENT_SEL_MASK << EVENT_COUNTER_EVENT_SEL_SHIFT);
622 	val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT;
623 	val |= event << EVENT_COUNTER_EVENT_SEL_SHIFT;
624 	val |= EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT;
625 	dw_pcie_writel_dbi(&pcie->pci, pcie->ras_des_cap +
626 			   PCIE_RAS_DES_EVENT_COUNTER_CONTROL, val);
627 	val = dw_pcie_readl_dbi(&pcie->pci, pcie->ras_des_cap +
628 				PCIE_RAS_DES_EVENT_COUNTER_DATA);
629 
630 	return val;
631 }
632 
633 static int aspm_state_cnt(struct seq_file *s, void *data)
634 {
635 	struct tegra_pcie_dw *pcie = (struct tegra_pcie_dw *)
636 				     dev_get_drvdata(s->private);
637 	u32 val;
638 
639 	seq_printf(s, "Tx L0s entry count : %u\n",
640 		   event_counter_prog(pcie, EVENT_COUNTER_EVENT_Tx_L0S));
641 
642 	seq_printf(s, "Rx L0s entry count : %u\n",
643 		   event_counter_prog(pcie, EVENT_COUNTER_EVENT_Rx_L0S));
644 
645 	seq_printf(s, "Link L1 entry count : %u\n",
646 		   event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1));
647 
648 	seq_printf(s, "Link L1.1 entry count : %u\n",
649 		   event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1_1));
650 
651 	seq_printf(s, "Link L1.2 entry count : %u\n",
652 		   event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1_2));
653 
654 	/* Clear all counters */
655 	dw_pcie_writel_dbi(&pcie->pci, pcie->ras_des_cap +
656 			   PCIE_RAS_DES_EVENT_COUNTER_CONTROL,
657 			   EVENT_COUNTER_ALL_CLEAR);
658 
659 	/* Re-enable counting */
660 	val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT;
661 	val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT;
662 	dw_pcie_writel_dbi(&pcie->pci, pcie->ras_des_cap +
663 			   PCIE_RAS_DES_EVENT_COUNTER_CONTROL, val);
664 
665 	return 0;
666 }
667 
668 static void init_host_aspm(struct tegra_pcie_dw *pcie)
669 {
670 	struct dw_pcie *pci = &pcie->pci;
671 	u32 val;
672 
673 	val = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_L1SS);
674 	pcie->cfg_link_cap_l1sub = val + PCI_L1SS_CAP;
675 
676 	pcie->ras_des_cap = dw_pcie_find_ext_capability(&pcie->pci,
677 							PCI_EXT_CAP_ID_VNDR);
678 
679 	/* Enable ASPM counters */
680 	val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT;
681 	val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT;
682 	dw_pcie_writel_dbi(pci, pcie->ras_des_cap +
683 			   PCIE_RAS_DES_EVENT_COUNTER_CONTROL, val);
684 
685 	/* Program T_cmrt and T_pwr_on values */
686 	val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub);
687 	val &= ~(PCI_L1SS_CAP_CM_RESTORE_TIME | PCI_L1SS_CAP_P_PWR_ON_VALUE);
688 	val |= (pcie->aspm_cmrt << 8);
689 	val |= (pcie->aspm_pwr_on_t << 19);
690 	dw_pcie_writel_dbi(pci, pcie->cfg_link_cap_l1sub, val);
691 
692 	/* Program L0s and L1 entrance latencies */
693 	val = dw_pcie_readl_dbi(pci, PCIE_PORT_AFR);
694 	val &= ~PORT_AFR_L0S_ENTRANCE_LAT_MASK;
695 	val |= (pcie->aspm_l0s_enter_lat << PORT_AFR_L0S_ENTRANCE_LAT_SHIFT);
696 	val |= PORT_AFR_ENTER_ASPM;
697 	dw_pcie_writel_dbi(pci, PCIE_PORT_AFR, val);
698 }
699 
700 static void init_debugfs(struct tegra_pcie_dw *pcie)
701 {
702 	debugfs_create_devm_seqfile(pcie->dev, "aspm_state_cnt", pcie->debugfs,
703 				    aspm_state_cnt);
704 }
705 #else
706 static inline void disable_aspm_l12(struct tegra_pcie_dw *pcie) { return; }
707 static inline void disable_aspm_l11(struct tegra_pcie_dw *pcie) { return; }
708 static inline void init_host_aspm(struct tegra_pcie_dw *pcie) { return; }
709 static inline void init_debugfs(struct tegra_pcie_dw *pcie) { return; }
710 #endif
711 
712 static void tegra_pcie_enable_system_interrupts(struct dw_pcie_rp *pp)
713 {
714 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
715 	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
716 	u32 val;
717 	u16 val_w;
718 
719 	val = appl_readl(pcie, APPL_INTR_EN_L0_0);
720 	val |= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN;
721 	appl_writel(pcie, val, APPL_INTR_EN_L0_0);
722 
723 	if (!pcie->of_data->has_sbr_reset_fix) {
724 		val = appl_readl(pcie, APPL_INTR_EN_L1_0_0);
725 		val |= APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN;
726 		appl_writel(pcie, val, APPL_INTR_EN_L1_0_0);
727 	}
728 
729 	if (pcie->enable_cdm_check) {
730 		val = appl_readl(pcie, APPL_INTR_EN_L0_0);
731 		val |= pcie->of_data->cdm_chk_int_en_bit;
732 		appl_writel(pcie, val, APPL_INTR_EN_L0_0);
733 
734 		val = appl_readl(pcie, APPL_INTR_EN_L1_18);
735 		val |= APPL_INTR_EN_L1_18_CDM_REG_CHK_CMP_ERR;
736 		val |= APPL_INTR_EN_L1_18_CDM_REG_CHK_LOGIC_ERR;
737 		appl_writel(pcie, val, APPL_INTR_EN_L1_18);
738 	}
739 
740 	val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base +
741 				  PCI_EXP_LNKSTA);
742 	pcie->init_link_width = (val_w & PCI_EXP_LNKSTA_NLW) >>
743 				PCI_EXP_LNKSTA_NLW_SHIFT;
744 
745 	val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base +
746 				  PCI_EXP_LNKCTL);
747 	val_w |= PCI_EXP_LNKCTL_LBMIE;
748 	dw_pcie_writew_dbi(&pcie->pci, pcie->pcie_cap_base + PCI_EXP_LNKCTL,
749 			   val_w);
750 }
751 
752 static void tegra_pcie_enable_legacy_interrupts(struct dw_pcie_rp *pp)
753 {
754 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
755 	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
756 	u32 val;
757 
758 	/* Enable legacy interrupt generation */
759 	val = appl_readl(pcie, APPL_INTR_EN_L0_0);
760 	val |= APPL_INTR_EN_L0_0_SYS_INTR_EN;
761 	val |= APPL_INTR_EN_L0_0_INT_INT_EN;
762 	appl_writel(pcie, val, APPL_INTR_EN_L0_0);
763 
764 	val = appl_readl(pcie, APPL_INTR_EN_L1_8_0);
765 	val |= APPL_INTR_EN_L1_8_INTX_EN;
766 	val |= APPL_INTR_EN_L1_8_AUTO_BW_INT_EN;
767 	val |= APPL_INTR_EN_L1_8_BW_MGT_INT_EN;
768 	if (IS_ENABLED(CONFIG_PCIEAER))
769 		val |= APPL_INTR_EN_L1_8_AER_INT_EN;
770 	appl_writel(pcie, val, APPL_INTR_EN_L1_8_0);
771 }
772 
773 static void tegra_pcie_enable_msi_interrupts(struct dw_pcie_rp *pp)
774 {
775 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
776 	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
777 	u32 val;
778 
779 	/* Enable MSI interrupt generation */
780 	val = appl_readl(pcie, APPL_INTR_EN_L0_0);
781 	val |= APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN;
782 	val |= APPL_INTR_EN_L0_0_MSI_RCV_INT_EN;
783 	appl_writel(pcie, val, APPL_INTR_EN_L0_0);
784 }
785 
786 static void tegra_pcie_enable_interrupts(struct dw_pcie_rp *pp)
787 {
788 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
789 	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
790 
791 	/* Clear interrupt statuses before enabling interrupts */
792 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0);
793 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0);
794 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1);
795 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2);
796 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3);
797 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6);
798 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7);
799 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0);
800 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9);
801 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10);
802 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11);
803 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13);
804 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14);
805 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15);
806 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17);
807 
808 	tegra_pcie_enable_system_interrupts(pp);
809 	tegra_pcie_enable_legacy_interrupts(pp);
810 	if (IS_ENABLED(CONFIG_PCI_MSI))
811 		tegra_pcie_enable_msi_interrupts(pp);
812 }
813 
814 static void config_gen3_gen4_eq_presets(struct tegra_pcie_dw *pcie)
815 {
816 	struct dw_pcie *pci = &pcie->pci;
817 	u32 val, offset, i;
818 
819 	/* Program init preset */
820 	for (i = 0; i < pcie->num_lanes; i++) {
821 		val = dw_pcie_readw_dbi(pci, CAP_SPCIE_CAP_OFF + (i * 2));
822 		val &= ~CAP_SPCIE_CAP_OFF_DSP_TX_PRESET0_MASK;
823 		val |= GEN3_GEN4_EQ_PRESET_INIT;
824 		val &= ~CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_MASK;
825 		val |= (GEN3_GEN4_EQ_PRESET_INIT <<
826 			   CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_SHIFT);
827 		dw_pcie_writew_dbi(pci, CAP_SPCIE_CAP_OFF + (i * 2), val);
828 
829 		offset = dw_pcie_find_ext_capability(pci,
830 						     PCI_EXT_CAP_ID_PL_16GT) +
831 				PCI_PL_16GT_LE_CTRL;
832 		val = dw_pcie_readb_dbi(pci, offset + i);
833 		val &= ~PCI_PL_16GT_LE_CTRL_DSP_TX_PRESET_MASK;
834 		val |= GEN3_GEN4_EQ_PRESET_INIT;
835 		val &= ~PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_MASK;
836 		val |= (GEN3_GEN4_EQ_PRESET_INIT <<
837 			PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_SHIFT);
838 		dw_pcie_writeb_dbi(pci, offset + i, val);
839 	}
840 
841 	val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
842 	val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
843 	dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
844 
845 	val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
846 	val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK;
847 	val |= (0x3ff << GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT);
848 	val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK;
849 	dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val);
850 
851 	val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
852 	val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
853 	val |= (0x1 << GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT);
854 	dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
855 
856 	val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
857 	val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK;
858 	val |= (pcie->of_data->gen4_preset_vec <<
859 		GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT);
860 	val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK;
861 	dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val);
862 
863 	val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
864 	val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
865 	dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
866 }
867 
868 static int tegra_pcie_dw_host_init(struct dw_pcie_rp *pp)
869 {
870 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
871 	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
872 	u32 val;
873 	u16 val_16;
874 
875 	pp->bridge->ops = &tegra_pci_ops;
876 
877 	if (!pcie->pcie_cap_base)
878 		pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci,
879 							      PCI_CAP_ID_EXP);
880 
881 	val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_DEVCTL);
882 	val_16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
883 	val_16 |= PCI_EXP_DEVCTL_PAYLOAD_256B;
884 	dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_DEVCTL, val_16);
885 
886 	val = dw_pcie_readl_dbi(pci, PCI_IO_BASE);
887 	val &= ~(IO_BASE_IO_DECODE | IO_BASE_IO_DECODE_BIT8);
888 	dw_pcie_writel_dbi(pci, PCI_IO_BASE, val);
889 
890 	val = dw_pcie_readl_dbi(pci, PCI_PREF_MEMORY_BASE);
891 	val |= CFG_PREF_MEM_LIMIT_BASE_MEM_DECODE;
892 	val |= CFG_PREF_MEM_LIMIT_BASE_MEM_LIMIT_DECODE;
893 	dw_pcie_writel_dbi(pci, PCI_PREF_MEMORY_BASE, val);
894 
895 	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
896 
897 	/* Enable as 0xFFFF0001 response for CRS */
898 	val = dw_pcie_readl_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT);
899 	val &= ~(AMBA_ERROR_RESPONSE_CRS_MASK << AMBA_ERROR_RESPONSE_CRS_SHIFT);
900 	val |= (AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 <<
901 		AMBA_ERROR_RESPONSE_CRS_SHIFT);
902 	dw_pcie_writel_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT, val);
903 
904 	/* Configure Max lane width from DT */
905 	val = dw_pcie_readl_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP);
906 	val &= ~PCI_EXP_LNKCAP_MLW;
907 	val |= (pcie->num_lanes << PCI_EXP_LNKSTA_NLW_SHIFT);
908 	dw_pcie_writel_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP, val);
909 
910 	/* Clear Slot Clock Configuration bit if SRNS configuration */
911 	if (pcie->enable_srns) {
912 		val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
913 					   PCI_EXP_LNKSTA);
914 		val_16 &= ~PCI_EXP_LNKSTA_SLC;
915 		dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA,
916 				   val_16);
917 	}
918 
919 	config_gen3_gen4_eq_presets(pcie);
920 
921 	init_host_aspm(pcie);
922 
923 	/* Disable ASPM-L1SS advertisement if there is no CLKREQ routing */
924 	if (!pcie->supports_clkreq) {
925 		disable_aspm_l11(pcie);
926 		disable_aspm_l12(pcie);
927 	}
928 
929 	if (!pcie->of_data->has_l1ss_exit_fix) {
930 		val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
931 		val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
932 		dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
933 	}
934 
935 	if (pcie->update_fc_fixup) {
936 		val = dw_pcie_readl_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF);
937 		val |= 0x1 << CFG_TIMER_CTRL_ACK_NAK_SHIFT;
938 		dw_pcie_writel_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF, val);
939 	}
940 
941 	clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ);
942 
943 	return 0;
944 }
945 
946 static int tegra_pcie_dw_start_link(struct dw_pcie *pci)
947 {
948 	u32 val, offset, speed, tmp;
949 	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
950 	struct dw_pcie_rp *pp = &pci->pp;
951 	bool retry = true;
952 
953 	if (pcie->of_data->mode == DW_PCIE_EP_TYPE) {
954 		enable_irq(pcie->pex_rst_irq);
955 		return 0;
956 	}
957 
958 retry_link:
959 	/* Assert RST */
960 	val = appl_readl(pcie, APPL_PINMUX);
961 	val &= ~APPL_PINMUX_PEX_RST;
962 	appl_writel(pcie, val, APPL_PINMUX);
963 
964 	usleep_range(100, 200);
965 
966 	/* Enable LTSSM */
967 	val = appl_readl(pcie, APPL_CTRL);
968 	val |= APPL_CTRL_LTSSM_EN;
969 	appl_writel(pcie, val, APPL_CTRL);
970 
971 	/* De-assert RST */
972 	val = appl_readl(pcie, APPL_PINMUX);
973 	val |= APPL_PINMUX_PEX_RST;
974 	appl_writel(pcie, val, APPL_PINMUX);
975 
976 	msleep(100);
977 
978 	if (dw_pcie_wait_for_link(pci)) {
979 		if (!retry)
980 			return 0;
981 		/*
982 		 * There are some endpoints which can't get the link up if
983 		 * root port has Data Link Feature (DLF) enabled.
984 		 * Refer Spec rev 4.0 ver 1.0 sec 3.4.2 & 7.7.4 for more info
985 		 * on Scaled Flow Control and DLF.
986 		 * So, need to confirm that is indeed the case here and attempt
987 		 * link up once again with DLF disabled.
988 		 */
989 		val = appl_readl(pcie, APPL_DEBUG);
990 		val &= APPL_DEBUG_LTSSM_STATE_MASK;
991 		val >>= APPL_DEBUG_LTSSM_STATE_SHIFT;
992 		tmp = appl_readl(pcie, APPL_LINK_STATUS);
993 		tmp &= APPL_LINK_STATUS_RDLH_LINK_UP;
994 		if (!(val == 0x11 && !tmp)) {
995 			/* Link is down for all good reasons */
996 			return 0;
997 		}
998 
999 		dev_info(pci->dev, "Link is down in DLL");
1000 		dev_info(pci->dev, "Trying again with DLFE disabled\n");
1001 		/* Disable LTSSM */
1002 		val = appl_readl(pcie, APPL_CTRL);
1003 		val &= ~APPL_CTRL_LTSSM_EN;
1004 		appl_writel(pcie, val, APPL_CTRL);
1005 
1006 		reset_control_assert(pcie->core_rst);
1007 		reset_control_deassert(pcie->core_rst);
1008 
1009 		offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_DLF);
1010 		val = dw_pcie_readl_dbi(pci, offset + PCI_DLF_CAP);
1011 		val &= ~PCI_DLF_EXCHANGE_ENABLE;
1012 		dw_pcie_writel_dbi(pci, offset + PCI_DLF_CAP, val);
1013 
1014 		tegra_pcie_dw_host_init(pp);
1015 		dw_pcie_setup_rc(pp);
1016 
1017 		retry = false;
1018 		goto retry_link;
1019 	}
1020 
1021 	speed = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA) &
1022 		PCI_EXP_LNKSTA_CLS;
1023 	clk_set_rate(pcie->core_clk, pcie_gen_freq[speed - 1]);
1024 
1025 	tegra_pcie_enable_interrupts(pp);
1026 
1027 	return 0;
1028 }
1029 
1030 static int tegra_pcie_dw_link_up(struct dw_pcie *pci)
1031 {
1032 	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
1033 	u32 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);
1034 
1035 	return !!(val & PCI_EXP_LNKSTA_DLLLA);
1036 }
1037 
1038 static void tegra_pcie_dw_stop_link(struct dw_pcie *pci)
1039 {
1040 	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
1041 
1042 	disable_irq(pcie->pex_rst_irq);
1043 }
1044 
1045 static const struct dw_pcie_ops tegra_dw_pcie_ops = {
1046 	.link_up = tegra_pcie_dw_link_up,
1047 	.start_link = tegra_pcie_dw_start_link,
1048 	.stop_link = tegra_pcie_dw_stop_link,
1049 };
1050 
1051 static const struct dw_pcie_host_ops tegra_pcie_dw_host_ops = {
1052 	.host_init = tegra_pcie_dw_host_init,
1053 };
1054 
1055 static void tegra_pcie_disable_phy(struct tegra_pcie_dw *pcie)
1056 {
1057 	unsigned int phy_count = pcie->phy_count;
1058 
1059 	while (phy_count--) {
1060 		phy_power_off(pcie->phys[phy_count]);
1061 		phy_exit(pcie->phys[phy_count]);
1062 	}
1063 }
1064 
1065 static int tegra_pcie_enable_phy(struct tegra_pcie_dw *pcie)
1066 {
1067 	unsigned int i;
1068 	int ret;
1069 
1070 	for (i = 0; i < pcie->phy_count; i++) {
1071 		ret = phy_init(pcie->phys[i]);
1072 		if (ret < 0)
1073 			goto phy_power_off;
1074 
1075 		ret = phy_power_on(pcie->phys[i]);
1076 		if (ret < 0)
1077 			goto phy_exit;
1078 	}
1079 
1080 	return 0;
1081 
1082 phy_power_off:
1083 	while (i--) {
1084 		phy_power_off(pcie->phys[i]);
1085 phy_exit:
1086 		phy_exit(pcie->phys[i]);
1087 	}
1088 
1089 	return ret;
1090 }
1091 
1092 static int tegra_pcie_dw_parse_dt(struct tegra_pcie_dw *pcie)
1093 {
1094 	struct platform_device *pdev = to_platform_device(pcie->dev);
1095 	struct device_node *np = pcie->dev->of_node;
1096 	int ret;
1097 
1098 	pcie->dbi_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
1099 	if (!pcie->dbi_res) {
1100 		dev_err(pcie->dev, "Failed to find \"dbi\" region\n");
1101 		return -ENODEV;
1102 	}
1103 
1104 	ret = of_property_read_u32(np, "nvidia,aspm-cmrt-us", &pcie->aspm_cmrt);
1105 	if (ret < 0) {
1106 		dev_info(pcie->dev, "Failed to read ASPM T_cmrt: %d\n", ret);
1107 		return ret;
1108 	}
1109 
1110 	ret = of_property_read_u32(np, "nvidia,aspm-pwr-on-t-us",
1111 				   &pcie->aspm_pwr_on_t);
1112 	if (ret < 0)
1113 		dev_info(pcie->dev, "Failed to read ASPM Power On time: %d\n",
1114 			 ret);
1115 
1116 	ret = of_property_read_u32(np, "nvidia,aspm-l0s-entrance-latency-us",
1117 				   &pcie->aspm_l0s_enter_lat);
1118 	if (ret < 0)
1119 		dev_info(pcie->dev,
1120 			 "Failed to read ASPM L0s Entrance latency: %d\n", ret);
1121 
1122 	ret = of_property_read_u32(np, "num-lanes", &pcie->num_lanes);
1123 	if (ret < 0) {
1124 		dev_err(pcie->dev, "Failed to read num-lanes: %d\n", ret);
1125 		return ret;
1126 	}
1127 
1128 	ret = of_property_read_u32_index(np, "nvidia,bpmp", 1, &pcie->cid);
1129 	if (ret) {
1130 		dev_err(pcie->dev, "Failed to read Controller-ID: %d\n", ret);
1131 		return ret;
1132 	}
1133 
1134 	ret = of_property_count_strings(np, "phy-names");
1135 	if (ret < 0) {
1136 		dev_err(pcie->dev, "Failed to find PHY entries: %d\n",
1137 			ret);
1138 		return ret;
1139 	}
1140 	pcie->phy_count = ret;
1141 
1142 	if (of_property_read_bool(np, "nvidia,update-fc-fixup"))
1143 		pcie->update_fc_fixup = true;
1144 
1145 	/* RP using an external REFCLK is supported only in Tegra234 */
1146 	if (pcie->of_data->version == TEGRA194_DWC_IP_VER) {
1147 		if (pcie->of_data->mode == DW_PCIE_EP_TYPE)
1148 			pcie->enable_ext_refclk = true;
1149 	} else {
1150 		pcie->enable_ext_refclk =
1151 			of_property_read_bool(pcie->dev->of_node,
1152 					      "nvidia,enable-ext-refclk");
1153 	}
1154 
1155 	pcie->supports_clkreq =
1156 		of_property_read_bool(pcie->dev->of_node, "supports-clkreq");
1157 
1158 	pcie->enable_cdm_check =
1159 		of_property_read_bool(np, "snps,enable-cdm-check");
1160 
1161 	if (pcie->of_data->version == TEGRA234_DWC_IP_VER)
1162 		pcie->enable_srns =
1163 			of_property_read_bool(np, "nvidia,enable-srns");
1164 
1165 	if (pcie->of_data->mode == DW_PCIE_RC_TYPE)
1166 		return 0;
1167 
1168 	/* Endpoint mode specific DT entries */
1169 	pcie->pex_rst_gpiod = devm_gpiod_get(pcie->dev, "reset", GPIOD_IN);
1170 	if (IS_ERR(pcie->pex_rst_gpiod)) {
1171 		int err = PTR_ERR(pcie->pex_rst_gpiod);
1172 		const char *level = KERN_ERR;
1173 
1174 		if (err == -EPROBE_DEFER)
1175 			level = KERN_DEBUG;
1176 
1177 		dev_printk(level, pcie->dev,
1178 			   dev_fmt("Failed to get PERST GPIO: %d\n"),
1179 			   err);
1180 		return err;
1181 	}
1182 
1183 	pcie->pex_refclk_sel_gpiod = devm_gpiod_get(pcie->dev,
1184 						    "nvidia,refclk-select",
1185 						    GPIOD_OUT_HIGH);
1186 	if (IS_ERR(pcie->pex_refclk_sel_gpiod)) {
1187 		int err = PTR_ERR(pcie->pex_refclk_sel_gpiod);
1188 		const char *level = KERN_ERR;
1189 
1190 		if (err == -EPROBE_DEFER)
1191 			level = KERN_DEBUG;
1192 
1193 		dev_printk(level, pcie->dev,
1194 			   dev_fmt("Failed to get REFCLK select GPIOs: %d\n"),
1195 			   err);
1196 		pcie->pex_refclk_sel_gpiod = NULL;
1197 	}
1198 
1199 	return 0;
1200 }
1201 
1202 static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie,
1203 					  bool enable)
1204 {
1205 	struct mrq_uphy_response resp;
1206 	struct tegra_bpmp_message msg;
1207 	struct mrq_uphy_request req;
1208 
1209 	/*
1210 	 * Controller-5 doesn't need to have its state set by BPMP-FW in
1211 	 * Tegra194
1212 	 */
1213 	if (pcie->of_data->version == TEGRA194_DWC_IP_VER && pcie->cid == 5)
1214 		return 0;
1215 
1216 	memset(&req, 0, sizeof(req));
1217 	memset(&resp, 0, sizeof(resp));
1218 
1219 	req.cmd = CMD_UPHY_PCIE_CONTROLLER_STATE;
1220 	req.controller_state.pcie_controller = pcie->cid;
1221 	req.controller_state.enable = enable;
1222 
1223 	memset(&msg, 0, sizeof(msg));
1224 	msg.mrq = MRQ_UPHY;
1225 	msg.tx.data = &req;
1226 	msg.tx.size = sizeof(req);
1227 	msg.rx.data = &resp;
1228 	msg.rx.size = sizeof(resp);
1229 
1230 	return tegra_bpmp_transfer(pcie->bpmp, &msg);
1231 }
1232 
1233 static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie,
1234 					 bool enable)
1235 {
1236 	struct mrq_uphy_response resp;
1237 	struct tegra_bpmp_message msg;
1238 	struct mrq_uphy_request req;
1239 
1240 	memset(&req, 0, sizeof(req));
1241 	memset(&resp, 0, sizeof(resp));
1242 
1243 	if (enable) {
1244 		req.cmd = CMD_UPHY_PCIE_EP_CONTROLLER_PLL_INIT;
1245 		req.ep_ctrlr_pll_init.ep_controller = pcie->cid;
1246 	} else {
1247 		req.cmd = CMD_UPHY_PCIE_EP_CONTROLLER_PLL_OFF;
1248 		req.ep_ctrlr_pll_off.ep_controller = pcie->cid;
1249 	}
1250 
1251 	memset(&msg, 0, sizeof(msg));
1252 	msg.mrq = MRQ_UPHY;
1253 	msg.tx.data = &req;
1254 	msg.tx.size = sizeof(req);
1255 	msg.rx.data = &resp;
1256 	msg.rx.size = sizeof(resp);
1257 
1258 	return tegra_bpmp_transfer(pcie->bpmp, &msg);
1259 }
1260 
1261 static void tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw *pcie)
1262 {
1263 	struct dw_pcie_rp *pp = &pcie->pci.pp;
1264 	struct pci_bus *child, *root_bus = NULL;
1265 	struct pci_dev *pdev;
1266 
1267 	/*
1268 	 * link doesn't go into L2 state with some of the endpoints with Tegra
1269 	 * if they are not in D0 state. So, need to make sure that immediate
1270 	 * downstream devices are in D0 state before sending PME_TurnOff to put
1271 	 * link into L2 state.
1272 	 * This is as per PCI Express Base r4.0 v1.0 September 27-2017,
1273 	 * 5.2 Link State Power Management (Page #428).
1274 	 */
1275 
1276 	list_for_each_entry(child, &pp->bridge->bus->children, node) {
1277 		/* Bring downstream devices to D0 if they are not already in */
1278 		if (child->parent == pp->bridge->bus) {
1279 			root_bus = child;
1280 			break;
1281 		}
1282 	}
1283 
1284 	if (!root_bus) {
1285 		dev_err(pcie->dev, "Failed to find downstream devices\n");
1286 		return;
1287 	}
1288 
1289 	list_for_each_entry(pdev, &root_bus->devices, bus_list) {
1290 		if (PCI_SLOT(pdev->devfn) == 0) {
1291 			if (pci_set_power_state(pdev, PCI_D0))
1292 				dev_err(pcie->dev,
1293 					"Failed to transition %s to D0 state\n",
1294 					dev_name(&pdev->dev));
1295 		}
1296 	}
1297 }
1298 
1299 static int tegra_pcie_get_slot_regulators(struct tegra_pcie_dw *pcie)
1300 {
1301 	pcie->slot_ctl_3v3 = devm_regulator_get_optional(pcie->dev, "vpcie3v3");
1302 	if (IS_ERR(pcie->slot_ctl_3v3)) {
1303 		if (PTR_ERR(pcie->slot_ctl_3v3) != -ENODEV)
1304 			return PTR_ERR(pcie->slot_ctl_3v3);
1305 
1306 		pcie->slot_ctl_3v3 = NULL;
1307 	}
1308 
1309 	pcie->slot_ctl_12v = devm_regulator_get_optional(pcie->dev, "vpcie12v");
1310 	if (IS_ERR(pcie->slot_ctl_12v)) {
1311 		if (PTR_ERR(pcie->slot_ctl_12v) != -ENODEV)
1312 			return PTR_ERR(pcie->slot_ctl_12v);
1313 
1314 		pcie->slot_ctl_12v = NULL;
1315 	}
1316 
1317 	return 0;
1318 }
1319 
1320 static int tegra_pcie_enable_slot_regulators(struct tegra_pcie_dw *pcie)
1321 {
1322 	int ret;
1323 
1324 	if (pcie->slot_ctl_3v3) {
1325 		ret = regulator_enable(pcie->slot_ctl_3v3);
1326 		if (ret < 0) {
1327 			dev_err(pcie->dev,
1328 				"Failed to enable 3.3V slot supply: %d\n", ret);
1329 			return ret;
1330 		}
1331 	}
1332 
1333 	if (pcie->slot_ctl_12v) {
1334 		ret = regulator_enable(pcie->slot_ctl_12v);
1335 		if (ret < 0) {
1336 			dev_err(pcie->dev,
1337 				"Failed to enable 12V slot supply: %d\n", ret);
1338 			goto fail_12v_enable;
1339 		}
1340 	}
1341 
1342 	/*
1343 	 * According to PCI Express Card Electromechanical Specification
1344 	 * Revision 1.1, Table-2.4, T_PVPERL (Power stable to PERST# inactive)
1345 	 * should be a minimum of 100ms.
1346 	 */
1347 	if (pcie->slot_ctl_3v3 || pcie->slot_ctl_12v)
1348 		msleep(100);
1349 
1350 	return 0;
1351 
1352 fail_12v_enable:
1353 	if (pcie->slot_ctl_3v3)
1354 		regulator_disable(pcie->slot_ctl_3v3);
1355 	return ret;
1356 }
1357 
1358 static void tegra_pcie_disable_slot_regulators(struct tegra_pcie_dw *pcie)
1359 {
1360 	if (pcie->slot_ctl_12v)
1361 		regulator_disable(pcie->slot_ctl_12v);
1362 	if (pcie->slot_ctl_3v3)
1363 		regulator_disable(pcie->slot_ctl_3v3);
1364 }
1365 
1366 static int tegra_pcie_config_controller(struct tegra_pcie_dw *pcie,
1367 					bool en_hw_hot_rst)
1368 {
1369 	int ret;
1370 	u32 val;
1371 
1372 	ret = tegra_pcie_bpmp_set_ctrl_state(pcie, true);
1373 	if (ret) {
1374 		dev_err(pcie->dev,
1375 			"Failed to enable controller %u: %d\n", pcie->cid, ret);
1376 		return ret;
1377 	}
1378 
1379 	if (pcie->enable_ext_refclk) {
1380 		ret = tegra_pcie_bpmp_set_pll_state(pcie, true);
1381 		if (ret) {
1382 			dev_err(pcie->dev, "Failed to init UPHY: %d\n", ret);
1383 			goto fail_pll_init;
1384 		}
1385 	}
1386 
1387 	ret = tegra_pcie_enable_slot_regulators(pcie);
1388 	if (ret < 0)
1389 		goto fail_slot_reg_en;
1390 
1391 	ret = regulator_enable(pcie->pex_ctl_supply);
1392 	if (ret < 0) {
1393 		dev_err(pcie->dev, "Failed to enable regulator: %d\n", ret);
1394 		goto fail_reg_en;
1395 	}
1396 
1397 	ret = clk_prepare_enable(pcie->core_clk);
1398 	if (ret) {
1399 		dev_err(pcie->dev, "Failed to enable core clock: %d\n", ret);
1400 		goto fail_core_clk;
1401 	}
1402 
1403 	ret = reset_control_deassert(pcie->core_apb_rst);
1404 	if (ret) {
1405 		dev_err(pcie->dev, "Failed to deassert core APB reset: %d\n",
1406 			ret);
1407 		goto fail_core_apb_rst;
1408 	}
1409 
1410 	if (en_hw_hot_rst || pcie->of_data->has_sbr_reset_fix) {
1411 		/* Enable HW_HOT_RST mode */
1412 		val = appl_readl(pcie, APPL_CTRL);
1413 		val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
1414 			 APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
1415 		val |= (APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST_LTSSM_EN <<
1416 			APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
1417 		val |= APPL_CTRL_HW_HOT_RST_EN;
1418 		appl_writel(pcie, val, APPL_CTRL);
1419 	}
1420 
1421 	ret = tegra_pcie_enable_phy(pcie);
1422 	if (ret) {
1423 		dev_err(pcie->dev, "Failed to enable PHY: %d\n", ret);
1424 		goto fail_phy;
1425 	}
1426 
1427 	/* Update CFG base address */
1428 	appl_writel(pcie, pcie->dbi_res->start & APPL_CFG_BASE_ADDR_MASK,
1429 		    APPL_CFG_BASE_ADDR);
1430 
1431 	/* Configure this core for RP mode operation */
1432 	appl_writel(pcie, APPL_DM_TYPE_RP, APPL_DM_TYPE);
1433 
1434 	appl_writel(pcie, 0x0, APPL_CFG_SLCG_OVERRIDE);
1435 
1436 	val = appl_readl(pcie, APPL_CTRL);
1437 	appl_writel(pcie, val | APPL_CTRL_SYS_PRE_DET_STATE, APPL_CTRL);
1438 
1439 	val = appl_readl(pcie, APPL_CFG_MISC);
1440 	val |= (APPL_CFG_MISC_ARCACHE_VAL << APPL_CFG_MISC_ARCACHE_SHIFT);
1441 	appl_writel(pcie, val, APPL_CFG_MISC);
1442 
1443 	if (pcie->enable_srns || pcie->enable_ext_refclk) {
1444 		/*
1445 		 * When Tegra PCIe RP is using external clock, it cannot supply
1446 		 * same clock to its downstream hierarchy. Hence, gate PCIe RP
1447 		 * REFCLK out pads when RP & EP are using separate clocks or RP
1448 		 * is using an external REFCLK.
1449 		 */
1450 		val = appl_readl(pcie, APPL_PINMUX);
1451 		val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN;
1452 		val &= ~APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE;
1453 		appl_writel(pcie, val, APPL_PINMUX);
1454 	}
1455 
1456 	if (!pcie->supports_clkreq) {
1457 		val = appl_readl(pcie, APPL_PINMUX);
1458 		val |= APPL_PINMUX_CLKREQ_OVERRIDE_EN;
1459 		val &= ~APPL_PINMUX_CLKREQ_OVERRIDE;
1460 		appl_writel(pcie, val, APPL_PINMUX);
1461 	}
1462 
1463 	/* Update iATU_DMA base address */
1464 	appl_writel(pcie,
1465 		    pcie->atu_dma_res->start & APPL_CFG_IATU_DMA_BASE_ADDR_MASK,
1466 		    APPL_CFG_IATU_DMA_BASE_ADDR);
1467 
1468 	reset_control_deassert(pcie->core_rst);
1469 
1470 	return ret;
1471 
1472 fail_phy:
1473 	reset_control_assert(pcie->core_apb_rst);
1474 fail_core_apb_rst:
1475 	clk_disable_unprepare(pcie->core_clk);
1476 fail_core_clk:
1477 	regulator_disable(pcie->pex_ctl_supply);
1478 fail_reg_en:
1479 	tegra_pcie_disable_slot_regulators(pcie);
1480 fail_slot_reg_en:
1481 	if (pcie->enable_ext_refclk)
1482 		tegra_pcie_bpmp_set_pll_state(pcie, false);
1483 fail_pll_init:
1484 	tegra_pcie_bpmp_set_ctrl_state(pcie, false);
1485 
1486 	return ret;
1487 }
1488 
1489 static void tegra_pcie_unconfig_controller(struct tegra_pcie_dw *pcie)
1490 {
1491 	int ret;
1492 
1493 	ret = reset_control_assert(pcie->core_rst);
1494 	if (ret)
1495 		dev_err(pcie->dev, "Failed to assert \"core\" reset: %d\n", ret);
1496 
1497 	tegra_pcie_disable_phy(pcie);
1498 
1499 	ret = reset_control_assert(pcie->core_apb_rst);
1500 	if (ret)
1501 		dev_err(pcie->dev, "Failed to assert APB reset: %d\n", ret);
1502 
1503 	clk_disable_unprepare(pcie->core_clk);
1504 
1505 	ret = regulator_disable(pcie->pex_ctl_supply);
1506 	if (ret)
1507 		dev_err(pcie->dev, "Failed to disable regulator: %d\n", ret);
1508 
1509 	tegra_pcie_disable_slot_regulators(pcie);
1510 
1511 	if (pcie->enable_ext_refclk) {
1512 		ret = tegra_pcie_bpmp_set_pll_state(pcie, false);
1513 		if (ret)
1514 			dev_err(pcie->dev, "Failed to deinit UPHY: %d\n", ret);
1515 	}
1516 
1517 	ret = tegra_pcie_bpmp_set_ctrl_state(pcie, false);
1518 	if (ret)
1519 		dev_err(pcie->dev, "Failed to disable controller %d: %d\n",
1520 			pcie->cid, ret);
1521 }
1522 
1523 static int tegra_pcie_init_controller(struct tegra_pcie_dw *pcie)
1524 {
1525 	struct dw_pcie *pci = &pcie->pci;
1526 	struct dw_pcie_rp *pp = &pci->pp;
1527 	int ret;
1528 
1529 	ret = tegra_pcie_config_controller(pcie, false);
1530 	if (ret < 0)
1531 		return ret;
1532 
1533 	pp->ops = &tegra_pcie_dw_host_ops;
1534 
1535 	ret = dw_pcie_host_init(pp);
1536 	if (ret < 0) {
1537 		dev_err(pcie->dev, "Failed to add PCIe port: %d\n", ret);
1538 		goto fail_host_init;
1539 	}
1540 
1541 	return 0;
1542 
1543 fail_host_init:
1544 	tegra_pcie_unconfig_controller(pcie);
1545 	return ret;
1546 }
1547 
1548 static int tegra_pcie_try_link_l2(struct tegra_pcie_dw *pcie)
1549 {
1550 	u32 val;
1551 
1552 	if (!tegra_pcie_dw_link_up(&pcie->pci))
1553 		return 0;
1554 
1555 	val = appl_readl(pcie, APPL_RADM_STATUS);
1556 	val |= APPL_PM_XMT_TURNOFF_STATE;
1557 	appl_writel(pcie, val, APPL_RADM_STATUS);
1558 
1559 	return readl_poll_timeout_atomic(pcie->appl_base + APPL_DEBUG, val,
1560 				 val & APPL_DEBUG_PM_LINKST_IN_L2_LAT,
1561 				 1, PME_ACK_TIMEOUT);
1562 }
1563 
1564 static void tegra_pcie_dw_pme_turnoff(struct tegra_pcie_dw *pcie)
1565 {
1566 	u32 data;
1567 	int err;
1568 
1569 	if (!tegra_pcie_dw_link_up(&pcie->pci)) {
1570 		dev_dbg(pcie->dev, "PCIe link is not up...!\n");
1571 		return;
1572 	}
1573 
1574 	/*
1575 	 * PCIe controller exits from L2 only if reset is applied, so
1576 	 * controller doesn't handle interrupts. But in cases where
1577 	 * L2 entry fails, PERST# is asserted which can trigger surprise
1578 	 * link down AER. However this function call happens in
1579 	 * suspend_noirq(), so AER interrupt will not be processed.
1580 	 * Disable all interrupts to avoid such a scenario.
1581 	 */
1582 	appl_writel(pcie, 0x0, APPL_INTR_EN_L0_0);
1583 
1584 	if (tegra_pcie_try_link_l2(pcie)) {
1585 		dev_info(pcie->dev, "Link didn't transition to L2 state\n");
1586 		/*
1587 		 * TX lane clock freq will reset to Gen1 only if link is in L2
1588 		 * or detect state.
1589 		 * So apply pex_rst to end point to force RP to go into detect
1590 		 * state
1591 		 */
1592 		data = appl_readl(pcie, APPL_PINMUX);
1593 		data &= ~APPL_PINMUX_PEX_RST;
1594 		appl_writel(pcie, data, APPL_PINMUX);
1595 
1596 		/*
1597 		 * Some cards do not go to detect state even after de-asserting
1598 		 * PERST#. So, de-assert LTSSM to bring link to detect state.
1599 		 */
1600 		data = readl(pcie->appl_base + APPL_CTRL);
1601 		data &= ~APPL_CTRL_LTSSM_EN;
1602 		writel(data, pcie->appl_base + APPL_CTRL);
1603 
1604 		err = readl_poll_timeout_atomic(pcie->appl_base + APPL_DEBUG,
1605 						data,
1606 						((data &
1607 						APPL_DEBUG_LTSSM_STATE_MASK) >>
1608 						APPL_DEBUG_LTSSM_STATE_SHIFT) ==
1609 						LTSSM_STATE_PRE_DETECT,
1610 						1, LTSSM_TIMEOUT);
1611 		if (err)
1612 			dev_info(pcie->dev, "Link didn't go to detect state\n");
1613 	}
1614 	/*
1615 	 * DBI registers may not be accessible after this as PLL-E would be
1616 	 * down depending on how CLKREQ is pulled by end point
1617 	 */
1618 	data = appl_readl(pcie, APPL_PINMUX);
1619 	data |= (APPL_PINMUX_CLKREQ_OVERRIDE_EN | APPL_PINMUX_CLKREQ_OVERRIDE);
1620 	/* Cut REFCLK to slot */
1621 	data |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN;
1622 	data &= ~APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE;
1623 	appl_writel(pcie, data, APPL_PINMUX);
1624 }
1625 
1626 static void tegra_pcie_deinit_controller(struct tegra_pcie_dw *pcie)
1627 {
1628 	tegra_pcie_downstream_dev_to_D0(pcie);
1629 	dw_pcie_host_deinit(&pcie->pci.pp);
1630 	tegra_pcie_dw_pme_turnoff(pcie);
1631 	tegra_pcie_unconfig_controller(pcie);
1632 }
1633 
1634 static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie)
1635 {
1636 	struct device *dev = pcie->dev;
1637 	char *name;
1638 	int ret;
1639 
1640 	pm_runtime_enable(dev);
1641 
1642 	ret = pm_runtime_get_sync(dev);
1643 	if (ret < 0) {
1644 		dev_err(dev, "Failed to get runtime sync for PCIe dev: %d\n",
1645 			ret);
1646 		goto fail_pm_get_sync;
1647 	}
1648 
1649 	ret = pinctrl_pm_select_default_state(dev);
1650 	if (ret < 0) {
1651 		dev_err(dev, "Failed to configure sideband pins: %d\n", ret);
1652 		goto fail_pm_get_sync;
1653 	}
1654 
1655 	ret = tegra_pcie_init_controller(pcie);
1656 	if (ret < 0) {
1657 		dev_err(dev, "Failed to initialize controller: %d\n", ret);
1658 		goto fail_pm_get_sync;
1659 	}
1660 
1661 	pcie->link_state = tegra_pcie_dw_link_up(&pcie->pci);
1662 	if (!pcie->link_state) {
1663 		ret = -ENOMEDIUM;
1664 		goto fail_host_init;
1665 	}
1666 
1667 	name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
1668 	if (!name) {
1669 		ret = -ENOMEM;
1670 		goto fail_host_init;
1671 	}
1672 
1673 	pcie->debugfs = debugfs_create_dir(name, NULL);
1674 	init_debugfs(pcie);
1675 
1676 	return ret;
1677 
1678 fail_host_init:
1679 	tegra_pcie_deinit_controller(pcie);
1680 fail_pm_get_sync:
1681 	pm_runtime_put_sync(dev);
1682 	pm_runtime_disable(dev);
1683 	return ret;
1684 }
1685 
1686 static void pex_ep_event_pex_rst_assert(struct tegra_pcie_dw *pcie)
1687 {
1688 	u32 val;
1689 	int ret;
1690 
1691 	if (pcie->ep_state == EP_STATE_DISABLED)
1692 		return;
1693 
1694 	/* Disable LTSSM */
1695 	val = appl_readl(pcie, APPL_CTRL);
1696 	val &= ~APPL_CTRL_LTSSM_EN;
1697 	appl_writel(pcie, val, APPL_CTRL);
1698 
1699 	ret = readl_poll_timeout(pcie->appl_base + APPL_DEBUG, val,
1700 				 ((val & APPL_DEBUG_LTSSM_STATE_MASK) >>
1701 				 APPL_DEBUG_LTSSM_STATE_SHIFT) ==
1702 				 LTSSM_STATE_PRE_DETECT,
1703 				 1, LTSSM_TIMEOUT);
1704 	if (ret)
1705 		dev_err(pcie->dev, "Failed to go Detect state: %d\n", ret);
1706 
1707 	reset_control_assert(pcie->core_rst);
1708 
1709 	tegra_pcie_disable_phy(pcie);
1710 
1711 	reset_control_assert(pcie->core_apb_rst);
1712 
1713 	clk_disable_unprepare(pcie->core_clk);
1714 
1715 	pm_runtime_put_sync(pcie->dev);
1716 
1717 	if (pcie->enable_ext_refclk) {
1718 		ret = tegra_pcie_bpmp_set_pll_state(pcie, false);
1719 		if (ret)
1720 			dev_err(pcie->dev, "Failed to turn off UPHY: %d\n",
1721 				ret);
1722 	}
1723 
1724 	ret = tegra_pcie_bpmp_set_pll_state(pcie, false);
1725 	if (ret)
1726 		dev_err(pcie->dev, "Failed to turn off UPHY: %d\n", ret);
1727 
1728 	pcie->ep_state = EP_STATE_DISABLED;
1729 	dev_dbg(pcie->dev, "Uninitialization of endpoint is completed\n");
1730 }
1731 
1732 static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)
1733 {
1734 	struct dw_pcie *pci = &pcie->pci;
1735 	struct dw_pcie_ep *ep = &pci->ep;
1736 	struct device *dev = pcie->dev;
1737 	u32 val;
1738 	int ret;
1739 	u16 val_16;
1740 
1741 	if (pcie->ep_state == EP_STATE_ENABLED)
1742 		return;
1743 
1744 	ret = pm_runtime_resume_and_get(dev);
1745 	if (ret < 0) {
1746 		dev_err(dev, "Failed to get runtime sync for PCIe dev: %d\n",
1747 			ret);
1748 		return;
1749 	}
1750 
1751 	ret = tegra_pcie_bpmp_set_ctrl_state(pcie, true);
1752 	if (ret) {
1753 		dev_err(pcie->dev, "Failed to enable controller %u: %d\n",
1754 			pcie->cid, ret);
1755 		goto fail_set_ctrl_state;
1756 	}
1757 
1758 	if (pcie->enable_ext_refclk) {
1759 		ret = tegra_pcie_bpmp_set_pll_state(pcie, true);
1760 		if (ret) {
1761 			dev_err(dev, "Failed to init UPHY for PCIe EP: %d\n",
1762 				ret);
1763 			goto fail_pll_init;
1764 		}
1765 	}
1766 
1767 	ret = clk_prepare_enable(pcie->core_clk);
1768 	if (ret) {
1769 		dev_err(dev, "Failed to enable core clock: %d\n", ret);
1770 		goto fail_core_clk_enable;
1771 	}
1772 
1773 	ret = reset_control_deassert(pcie->core_apb_rst);
1774 	if (ret) {
1775 		dev_err(dev, "Failed to deassert core APB reset: %d\n", ret);
1776 		goto fail_core_apb_rst;
1777 	}
1778 
1779 	ret = tegra_pcie_enable_phy(pcie);
1780 	if (ret) {
1781 		dev_err(dev, "Failed to enable PHY: %d\n", ret);
1782 		goto fail_phy;
1783 	}
1784 
1785 	/* Clear any stale interrupt statuses */
1786 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0);
1787 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0);
1788 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1);
1789 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2);
1790 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3);
1791 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6);
1792 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7);
1793 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0);
1794 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9);
1795 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10);
1796 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11);
1797 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13);
1798 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14);
1799 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15);
1800 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17);
1801 
1802 	/* configure this core for EP mode operation */
1803 	val = appl_readl(pcie, APPL_DM_TYPE);
1804 	val &= ~APPL_DM_TYPE_MASK;
1805 	val |= APPL_DM_TYPE_EP;
1806 	appl_writel(pcie, val, APPL_DM_TYPE);
1807 
1808 	appl_writel(pcie, 0x0, APPL_CFG_SLCG_OVERRIDE);
1809 
1810 	val = appl_readl(pcie, APPL_CTRL);
1811 	val |= APPL_CTRL_SYS_PRE_DET_STATE;
1812 	val |= APPL_CTRL_HW_HOT_RST_EN;
1813 	appl_writel(pcie, val, APPL_CTRL);
1814 
1815 	val = appl_readl(pcie, APPL_CFG_MISC);
1816 	val |= APPL_CFG_MISC_SLV_EP_MODE;
1817 	val |= (APPL_CFG_MISC_ARCACHE_VAL << APPL_CFG_MISC_ARCACHE_SHIFT);
1818 	appl_writel(pcie, val, APPL_CFG_MISC);
1819 
1820 	val = appl_readl(pcie, APPL_PINMUX);
1821 	val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN;
1822 	val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE;
1823 	appl_writel(pcie, val, APPL_PINMUX);
1824 
1825 	appl_writel(pcie, pcie->dbi_res->start & APPL_CFG_BASE_ADDR_MASK,
1826 		    APPL_CFG_BASE_ADDR);
1827 
1828 	appl_writel(pcie, pcie->atu_dma_res->start &
1829 		    APPL_CFG_IATU_DMA_BASE_ADDR_MASK,
1830 		    APPL_CFG_IATU_DMA_BASE_ADDR);
1831 
1832 	val = appl_readl(pcie, APPL_INTR_EN_L0_0);
1833 	val |= APPL_INTR_EN_L0_0_SYS_INTR_EN;
1834 	val |= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN;
1835 	val |= APPL_INTR_EN_L0_0_PCI_CMD_EN_INT_EN;
1836 	appl_writel(pcie, val, APPL_INTR_EN_L0_0);
1837 
1838 	val = appl_readl(pcie, APPL_INTR_EN_L1_0_0);
1839 	val |= APPL_INTR_EN_L1_0_0_HOT_RESET_DONE_INT_EN;
1840 	val |= APPL_INTR_EN_L1_0_0_RDLH_LINK_UP_INT_EN;
1841 	appl_writel(pcie, val, APPL_INTR_EN_L1_0_0);
1842 
1843 	reset_control_deassert(pcie->core_rst);
1844 
1845 	if (pcie->update_fc_fixup) {
1846 		val = dw_pcie_readl_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF);
1847 		val |= 0x1 << CFG_TIMER_CTRL_ACK_NAK_SHIFT;
1848 		dw_pcie_writel_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF, val);
1849 	}
1850 
1851 	config_gen3_gen4_eq_presets(pcie);
1852 
1853 	init_host_aspm(pcie);
1854 
1855 	/* Disable ASPM-L1SS advertisement if there is no CLKREQ routing */
1856 	if (!pcie->supports_clkreq) {
1857 		disable_aspm_l11(pcie);
1858 		disable_aspm_l12(pcie);
1859 	}
1860 
1861 	if (!pcie->of_data->has_l1ss_exit_fix) {
1862 		val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
1863 		val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
1864 		dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
1865 	}
1866 
1867 	pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci,
1868 						      PCI_CAP_ID_EXP);
1869 
1870 	val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_DEVCTL);
1871 	val_16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
1872 	val_16 |= PCI_EXP_DEVCTL_PAYLOAD_256B;
1873 	dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_DEVCTL, val_16);
1874 
1875 	/* Clear Slot Clock Configuration bit if SRNS configuration */
1876 	if (pcie->enable_srns) {
1877 		val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
1878 					   PCI_EXP_LNKSTA);
1879 		val_16 &= ~PCI_EXP_LNKSTA_SLC;
1880 		dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA,
1881 				   val_16);
1882 	}
1883 
1884 	clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ);
1885 
1886 	val = (ep->msi_mem_phys & MSIX_ADDR_MATCH_LOW_OFF_MASK);
1887 	val |= MSIX_ADDR_MATCH_LOW_OFF_EN;
1888 	dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_LOW_OFF, val);
1889 	val = (upper_32_bits(ep->msi_mem_phys) & MSIX_ADDR_MATCH_HIGH_OFF_MASK);
1890 	dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_HIGH_OFF, val);
1891 
1892 	ret = dw_pcie_ep_init_complete(ep);
1893 	if (ret) {
1894 		dev_err(dev, "Failed to complete initialization: %d\n", ret);
1895 		goto fail_init_complete;
1896 	}
1897 
1898 	dw_pcie_ep_init_notify(ep);
1899 
1900 	/* Program the private control to allow sending LTR upstream */
1901 	if (pcie->of_data->has_ltr_req_fix) {
1902 		val = appl_readl(pcie, APPL_LTR_MSG_2);
1903 		val |= APPL_LTR_MSG_2_LTR_MSG_REQ_STATE;
1904 		appl_writel(pcie, val, APPL_LTR_MSG_2);
1905 	}
1906 
1907 	/* Enable LTSSM */
1908 	val = appl_readl(pcie, APPL_CTRL);
1909 	val |= APPL_CTRL_LTSSM_EN;
1910 	appl_writel(pcie, val, APPL_CTRL);
1911 
1912 	pcie->ep_state = EP_STATE_ENABLED;
1913 	dev_dbg(dev, "Initialization of endpoint is completed\n");
1914 
1915 	return;
1916 
1917 fail_init_complete:
1918 	reset_control_assert(pcie->core_rst);
1919 	tegra_pcie_disable_phy(pcie);
1920 fail_phy:
1921 	reset_control_assert(pcie->core_apb_rst);
1922 fail_core_apb_rst:
1923 	clk_disable_unprepare(pcie->core_clk);
1924 fail_core_clk_enable:
1925 	tegra_pcie_bpmp_set_pll_state(pcie, false);
1926 fail_pll_init:
1927 	tegra_pcie_bpmp_set_ctrl_state(pcie, false);
1928 fail_set_ctrl_state:
1929 	pm_runtime_put_sync(dev);
1930 }
1931 
1932 static irqreturn_t tegra_pcie_ep_pex_rst_irq(int irq, void *arg)
1933 {
1934 	struct tegra_pcie_dw *pcie = arg;
1935 
1936 	if (gpiod_get_value(pcie->pex_rst_gpiod))
1937 		pex_ep_event_pex_rst_assert(pcie);
1938 	else
1939 		pex_ep_event_pex_rst_deassert(pcie);
1940 
1941 	return IRQ_HANDLED;
1942 }
1943 
1944 static int tegra_pcie_ep_raise_legacy_irq(struct tegra_pcie_dw *pcie, u16 irq)
1945 {
1946 	/* Tegra194 supports only INTA */
1947 	if (irq > 1)
1948 		return -EINVAL;
1949 
1950 	appl_writel(pcie, 1, APPL_LEGACY_INTX);
1951 	usleep_range(1000, 2000);
1952 	appl_writel(pcie, 0, APPL_LEGACY_INTX);
1953 	return 0;
1954 }
1955 
1956 static int tegra_pcie_ep_raise_msi_irq(struct tegra_pcie_dw *pcie, u16 irq)
1957 {
1958 	if (unlikely(irq > 31))
1959 		return -EINVAL;
1960 
1961 	appl_writel(pcie, BIT(irq), APPL_MSI_CTRL_1);
1962 
1963 	return 0;
1964 }
1965 
1966 static int tegra_pcie_ep_raise_msix_irq(struct tegra_pcie_dw *pcie, u16 irq)
1967 {
1968 	struct dw_pcie_ep *ep = &pcie->pci.ep;
1969 
1970 	writel(irq, ep->msi_mem);
1971 
1972 	return 0;
1973 }
1974 
1975 static int tegra_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
1976 				   enum pci_epc_irq_type type,
1977 				   u16 interrupt_num)
1978 {
1979 	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
1980 	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
1981 
1982 	switch (type) {
1983 	case PCI_EPC_IRQ_LEGACY:
1984 		return tegra_pcie_ep_raise_legacy_irq(pcie, interrupt_num);
1985 
1986 	case PCI_EPC_IRQ_MSI:
1987 		return tegra_pcie_ep_raise_msi_irq(pcie, interrupt_num);
1988 
1989 	case PCI_EPC_IRQ_MSIX:
1990 		return tegra_pcie_ep_raise_msix_irq(pcie, interrupt_num);
1991 
1992 	default:
1993 		dev_err(pci->dev, "Unknown IRQ type\n");
1994 		return -EPERM;
1995 	}
1996 
1997 	return 0;
1998 }
1999 
2000 static const struct pci_epc_features tegra_pcie_epc_features = {
2001 	.linkup_notifier = true,
2002 	.core_init_notifier = true,
2003 	.msi_capable = false,
2004 	.msix_capable = false,
2005 	.reserved_bar = 1 << BAR_2 | 1 << BAR_3 | 1 << BAR_4 | 1 << BAR_5,
2006 	.bar_fixed_64bit = 1 << BAR_0,
2007 	.bar_fixed_size[0] = SZ_1M,
2008 };
2009 
2010 static const struct pci_epc_features*
2011 tegra_pcie_ep_get_features(struct dw_pcie_ep *ep)
2012 {
2013 	return &tegra_pcie_epc_features;
2014 }
2015 
2016 static const struct dw_pcie_ep_ops pcie_ep_ops = {
2017 	.raise_irq = tegra_pcie_ep_raise_irq,
2018 	.get_features = tegra_pcie_ep_get_features,
2019 };
2020 
2021 static int tegra_pcie_config_ep(struct tegra_pcie_dw *pcie,
2022 				struct platform_device *pdev)
2023 {
2024 	struct dw_pcie *pci = &pcie->pci;
2025 	struct device *dev = pcie->dev;
2026 	struct dw_pcie_ep *ep;
2027 	char *name;
2028 	int ret;
2029 
2030 	ep = &pci->ep;
2031 	ep->ops = &pcie_ep_ops;
2032 
2033 	ep->page_size = SZ_64K;
2034 
2035 	ret = gpiod_set_debounce(pcie->pex_rst_gpiod, PERST_DEBOUNCE_TIME);
2036 	if (ret < 0) {
2037 		dev_err(dev, "Failed to set PERST GPIO debounce time: %d\n",
2038 			ret);
2039 		return ret;
2040 	}
2041 
2042 	ret = gpiod_to_irq(pcie->pex_rst_gpiod);
2043 	if (ret < 0) {
2044 		dev_err(dev, "Failed to get IRQ for PERST GPIO: %d\n", ret);
2045 		return ret;
2046 	}
2047 	pcie->pex_rst_irq = (unsigned int)ret;
2048 
2049 	name = devm_kasprintf(dev, GFP_KERNEL, "tegra_pcie_%u_pex_rst_irq",
2050 			      pcie->cid);
2051 	if (!name) {
2052 		dev_err(dev, "Failed to create PERST IRQ string\n");
2053 		return -ENOMEM;
2054 	}
2055 
2056 	irq_set_status_flags(pcie->pex_rst_irq, IRQ_NOAUTOEN);
2057 
2058 	pcie->ep_state = EP_STATE_DISABLED;
2059 
2060 	ret = devm_request_threaded_irq(dev, pcie->pex_rst_irq, NULL,
2061 					tegra_pcie_ep_pex_rst_irq,
2062 					IRQF_TRIGGER_RISING |
2063 					IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
2064 					name, (void *)pcie);
2065 	if (ret < 0) {
2066 		dev_err(dev, "Failed to request IRQ for PERST: %d\n", ret);
2067 		return ret;
2068 	}
2069 
2070 	pm_runtime_enable(dev);
2071 
2072 	ret = dw_pcie_ep_init(ep);
2073 	if (ret) {
2074 		dev_err(dev, "Failed to initialize DWC Endpoint subsystem: %d\n",
2075 			ret);
2076 		pm_runtime_disable(dev);
2077 		return ret;
2078 	}
2079 
2080 	return 0;
2081 }
2082 
2083 static int tegra_pcie_dw_probe(struct platform_device *pdev)
2084 {
2085 	const struct tegra_pcie_dw_of_data *data;
2086 	struct device *dev = &pdev->dev;
2087 	struct resource *atu_dma_res;
2088 	struct tegra_pcie_dw *pcie;
2089 	struct dw_pcie_rp *pp;
2090 	struct dw_pcie *pci;
2091 	struct phy **phys;
2092 	char *name;
2093 	int ret;
2094 	u32 i;
2095 
2096 	data = of_device_get_match_data(dev);
2097 
2098 	pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
2099 	if (!pcie)
2100 		return -ENOMEM;
2101 
2102 	pci = &pcie->pci;
2103 	pci->dev = &pdev->dev;
2104 	pci->ops = &tegra_dw_pcie_ops;
2105 	pcie->dev = &pdev->dev;
2106 	pcie->of_data = (struct tegra_pcie_dw_of_data *)data;
2107 	pci->n_fts[0] = pcie->of_data->n_fts[0];
2108 	pci->n_fts[1] = pcie->of_data->n_fts[1];
2109 	pp = &pci->pp;
2110 	pp->num_vectors = MAX_MSI_IRQS;
2111 
2112 	ret = tegra_pcie_dw_parse_dt(pcie);
2113 	if (ret < 0) {
2114 		const char *level = KERN_ERR;
2115 
2116 		if (ret == -EPROBE_DEFER)
2117 			level = KERN_DEBUG;
2118 
2119 		dev_printk(level, dev,
2120 			   dev_fmt("Failed to parse device tree: %d\n"),
2121 			   ret);
2122 		return ret;
2123 	}
2124 
2125 	ret = tegra_pcie_get_slot_regulators(pcie);
2126 	if (ret < 0) {
2127 		const char *level = KERN_ERR;
2128 
2129 		if (ret == -EPROBE_DEFER)
2130 			level = KERN_DEBUG;
2131 
2132 		dev_printk(level, dev,
2133 			   dev_fmt("Failed to get slot regulators: %d\n"),
2134 			   ret);
2135 		return ret;
2136 	}
2137 
2138 	if (pcie->pex_refclk_sel_gpiod)
2139 		gpiod_set_value(pcie->pex_refclk_sel_gpiod, 1);
2140 
2141 	pcie->pex_ctl_supply = devm_regulator_get(dev, "vddio-pex-ctl");
2142 	if (IS_ERR(pcie->pex_ctl_supply)) {
2143 		ret = PTR_ERR(pcie->pex_ctl_supply);
2144 		if (ret != -EPROBE_DEFER)
2145 			dev_err(dev, "Failed to get regulator: %ld\n",
2146 				PTR_ERR(pcie->pex_ctl_supply));
2147 		return ret;
2148 	}
2149 
2150 	pcie->core_clk = devm_clk_get(dev, "core");
2151 	if (IS_ERR(pcie->core_clk)) {
2152 		dev_err(dev, "Failed to get core clock: %ld\n",
2153 			PTR_ERR(pcie->core_clk));
2154 		return PTR_ERR(pcie->core_clk);
2155 	}
2156 
2157 	pcie->appl_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2158 						      "appl");
2159 	if (!pcie->appl_res) {
2160 		dev_err(dev, "Failed to find \"appl\" region\n");
2161 		return -ENODEV;
2162 	}
2163 
2164 	pcie->appl_base = devm_ioremap_resource(dev, pcie->appl_res);
2165 	if (IS_ERR(pcie->appl_base))
2166 		return PTR_ERR(pcie->appl_base);
2167 
2168 	pcie->core_apb_rst = devm_reset_control_get(dev, "apb");
2169 	if (IS_ERR(pcie->core_apb_rst)) {
2170 		dev_err(dev, "Failed to get APB reset: %ld\n",
2171 			PTR_ERR(pcie->core_apb_rst));
2172 		return PTR_ERR(pcie->core_apb_rst);
2173 	}
2174 
2175 	phys = devm_kcalloc(dev, pcie->phy_count, sizeof(*phys), GFP_KERNEL);
2176 	if (!phys)
2177 		return -ENOMEM;
2178 
2179 	for (i = 0; i < pcie->phy_count; i++) {
2180 		name = kasprintf(GFP_KERNEL, "p2u-%u", i);
2181 		if (!name) {
2182 			dev_err(dev, "Failed to create P2U string\n");
2183 			return -ENOMEM;
2184 		}
2185 		phys[i] = devm_phy_get(dev, name);
2186 		kfree(name);
2187 		if (IS_ERR(phys[i])) {
2188 			ret = PTR_ERR(phys[i]);
2189 			if (ret != -EPROBE_DEFER)
2190 				dev_err(dev, "Failed to get PHY: %d\n", ret);
2191 			return ret;
2192 		}
2193 	}
2194 
2195 	pcie->phys = phys;
2196 
2197 	atu_dma_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2198 						   "atu_dma");
2199 	if (!atu_dma_res) {
2200 		dev_err(dev, "Failed to find \"atu_dma\" region\n");
2201 		return -ENODEV;
2202 	}
2203 	pcie->atu_dma_res = atu_dma_res;
2204 
2205 	pci->atu_size = resource_size(atu_dma_res);
2206 	pci->atu_base = devm_ioremap_resource(dev, atu_dma_res);
2207 	if (IS_ERR(pci->atu_base))
2208 		return PTR_ERR(pci->atu_base);
2209 
2210 	pcie->core_rst = devm_reset_control_get(dev, "core");
2211 	if (IS_ERR(pcie->core_rst)) {
2212 		dev_err(dev, "Failed to get core reset: %ld\n",
2213 			PTR_ERR(pcie->core_rst));
2214 		return PTR_ERR(pcie->core_rst);
2215 	}
2216 
2217 	pp->irq = platform_get_irq_byname(pdev, "intr");
2218 	if (pp->irq < 0)
2219 		return pp->irq;
2220 
2221 	pcie->bpmp = tegra_bpmp_get(dev);
2222 	if (IS_ERR(pcie->bpmp))
2223 		return PTR_ERR(pcie->bpmp);
2224 
2225 	platform_set_drvdata(pdev, pcie);
2226 
2227 	switch (pcie->of_data->mode) {
2228 	case DW_PCIE_RC_TYPE:
2229 		ret = devm_request_irq(dev, pp->irq, tegra_pcie_rp_irq_handler,
2230 				       IRQF_SHARED, "tegra-pcie-intr", pcie);
2231 		if (ret) {
2232 			dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq,
2233 				ret);
2234 			goto fail;
2235 		}
2236 
2237 		ret = tegra_pcie_config_rp(pcie);
2238 		if (ret && ret != -ENOMEDIUM)
2239 			goto fail;
2240 		else
2241 			return 0;
2242 		break;
2243 
2244 	case DW_PCIE_EP_TYPE:
2245 		ret = devm_request_threaded_irq(dev, pp->irq,
2246 						tegra_pcie_ep_hard_irq,
2247 						tegra_pcie_ep_irq_thread,
2248 						IRQF_SHARED | IRQF_ONESHOT,
2249 						"tegra-pcie-ep-intr", pcie);
2250 		if (ret) {
2251 			dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq,
2252 				ret);
2253 			goto fail;
2254 		}
2255 
2256 		ret = tegra_pcie_config_ep(pcie, pdev);
2257 		if (ret < 0)
2258 			goto fail;
2259 		break;
2260 
2261 	default:
2262 		dev_err(dev, "Invalid PCIe device type %d\n",
2263 			pcie->of_data->mode);
2264 	}
2265 
2266 fail:
2267 	tegra_bpmp_put(pcie->bpmp);
2268 	return ret;
2269 }
2270 
2271 static int tegra_pcie_dw_remove(struct platform_device *pdev)
2272 {
2273 	struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev);
2274 
2275 	if (pcie->of_data->mode == DW_PCIE_RC_TYPE) {
2276 		if (!pcie->link_state)
2277 			return 0;
2278 
2279 		debugfs_remove_recursive(pcie->debugfs);
2280 		tegra_pcie_deinit_controller(pcie);
2281 		pm_runtime_put_sync(pcie->dev);
2282 	} else {
2283 		disable_irq(pcie->pex_rst_irq);
2284 		pex_ep_event_pex_rst_assert(pcie);
2285 	}
2286 
2287 	pm_runtime_disable(pcie->dev);
2288 	tegra_bpmp_put(pcie->bpmp);
2289 	if (pcie->pex_refclk_sel_gpiod)
2290 		gpiod_set_value(pcie->pex_refclk_sel_gpiod, 0);
2291 
2292 	return 0;
2293 }
2294 
2295 static int tegra_pcie_dw_suspend_late(struct device *dev)
2296 {
2297 	struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
2298 	u32 val;
2299 
2300 	if (pcie->of_data->mode == DW_PCIE_EP_TYPE) {
2301 		dev_err(dev, "Failed to Suspend as Tegra PCIe is in EP mode\n");
2302 		return -EPERM;
2303 	}
2304 
2305 	if (!pcie->link_state)
2306 		return 0;
2307 
2308 	/* Enable HW_HOT_RST mode */
2309 	if (!pcie->of_data->has_sbr_reset_fix) {
2310 		val = appl_readl(pcie, APPL_CTRL);
2311 		val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
2312 			 APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
2313 		val |= APPL_CTRL_HW_HOT_RST_EN;
2314 		appl_writel(pcie, val, APPL_CTRL);
2315 	}
2316 
2317 	return 0;
2318 }
2319 
2320 static int tegra_pcie_dw_suspend_noirq(struct device *dev)
2321 {
2322 	struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
2323 
2324 	if (!pcie->link_state)
2325 		return 0;
2326 
2327 	tegra_pcie_downstream_dev_to_D0(pcie);
2328 	tegra_pcie_dw_pme_turnoff(pcie);
2329 	tegra_pcie_unconfig_controller(pcie);
2330 
2331 	return 0;
2332 }
2333 
2334 static int tegra_pcie_dw_resume_noirq(struct device *dev)
2335 {
2336 	struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
2337 	int ret;
2338 
2339 	if (!pcie->link_state)
2340 		return 0;
2341 
2342 	ret = tegra_pcie_config_controller(pcie, true);
2343 	if (ret < 0)
2344 		return ret;
2345 
2346 	ret = tegra_pcie_dw_host_init(&pcie->pci.pp);
2347 	if (ret < 0) {
2348 		dev_err(dev, "Failed to init host: %d\n", ret);
2349 		goto fail_host_init;
2350 	}
2351 
2352 	dw_pcie_setup_rc(&pcie->pci.pp);
2353 
2354 	ret = tegra_pcie_dw_start_link(&pcie->pci);
2355 	if (ret < 0)
2356 		goto fail_host_init;
2357 
2358 	return 0;
2359 
2360 fail_host_init:
2361 	tegra_pcie_unconfig_controller(pcie);
2362 	return ret;
2363 }
2364 
2365 static int tegra_pcie_dw_resume_early(struct device *dev)
2366 {
2367 	struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
2368 	u32 val;
2369 
2370 	if (pcie->of_data->mode == DW_PCIE_EP_TYPE) {
2371 		dev_err(dev, "Suspend is not supported in EP mode");
2372 		return -ENOTSUPP;
2373 	}
2374 
2375 	if (!pcie->link_state)
2376 		return 0;
2377 
2378 	/* Disable HW_HOT_RST mode */
2379 	if (!pcie->of_data->has_sbr_reset_fix) {
2380 		val = appl_readl(pcie, APPL_CTRL);
2381 		val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
2382 			 APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
2383 		val |= APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST <<
2384 		       APPL_CTRL_HW_HOT_RST_MODE_SHIFT;
2385 		val &= ~APPL_CTRL_HW_HOT_RST_EN;
2386 		appl_writel(pcie, val, APPL_CTRL);
2387 	}
2388 
2389 	return 0;
2390 }
2391 
2392 static void tegra_pcie_dw_shutdown(struct platform_device *pdev)
2393 {
2394 	struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev);
2395 
2396 	if (pcie->of_data->mode == DW_PCIE_RC_TYPE) {
2397 		if (!pcie->link_state)
2398 			return;
2399 
2400 		debugfs_remove_recursive(pcie->debugfs);
2401 		tegra_pcie_downstream_dev_to_D0(pcie);
2402 
2403 		disable_irq(pcie->pci.pp.irq);
2404 		if (IS_ENABLED(CONFIG_PCI_MSI))
2405 			disable_irq(pcie->pci.pp.msi_irq[0]);
2406 
2407 		tegra_pcie_dw_pme_turnoff(pcie);
2408 		tegra_pcie_unconfig_controller(pcie);
2409 		pm_runtime_put_sync(pcie->dev);
2410 	} else {
2411 		disable_irq(pcie->pex_rst_irq);
2412 		pex_ep_event_pex_rst_assert(pcie);
2413 	}
2414 }
2415 
2416 static const struct tegra_pcie_dw_of_data tegra194_pcie_dw_rc_of_data = {
2417 	.version = TEGRA194_DWC_IP_VER,
2418 	.mode = DW_PCIE_RC_TYPE,
2419 	.cdm_chk_int_en_bit = BIT(19),
2420 	/* Gen4 - 5, 6, 8 and 9 presets enabled */
2421 	.gen4_preset_vec = 0x360,
2422 	.n_fts = { 52, 52 },
2423 };
2424 
2425 static const struct tegra_pcie_dw_of_data tegra194_pcie_dw_ep_of_data = {
2426 	.version = TEGRA194_DWC_IP_VER,
2427 	.mode = DW_PCIE_EP_TYPE,
2428 	.cdm_chk_int_en_bit = BIT(19),
2429 	/* Gen4 - 5, 6, 8 and 9 presets enabled */
2430 	.gen4_preset_vec = 0x360,
2431 	.n_fts = { 52, 52 },
2432 };
2433 
2434 static const struct tegra_pcie_dw_of_data tegra234_pcie_dw_rc_of_data = {
2435 	.version = TEGRA234_DWC_IP_VER,
2436 	.mode = DW_PCIE_RC_TYPE,
2437 	.has_msix_doorbell_access_fix = true,
2438 	.has_sbr_reset_fix = true,
2439 	.has_l1ss_exit_fix = true,
2440 	.cdm_chk_int_en_bit = BIT(18),
2441 	/* Gen4 - 6, 8 and 9 presets enabled */
2442 	.gen4_preset_vec = 0x340,
2443 	.n_fts = { 52, 80 },
2444 };
2445 
2446 static const struct tegra_pcie_dw_of_data tegra234_pcie_dw_ep_of_data = {
2447 	.version = TEGRA234_DWC_IP_VER,
2448 	.mode = DW_PCIE_EP_TYPE,
2449 	.has_l1ss_exit_fix = true,
2450 	.has_ltr_req_fix = true,
2451 	.cdm_chk_int_en_bit = BIT(18),
2452 	/* Gen4 - 6, 8 and 9 presets enabled */
2453 	.gen4_preset_vec = 0x340,
2454 	.n_fts = { 52, 80 },
2455 };
2456 
2457 static const struct of_device_id tegra_pcie_dw_of_match[] = {
2458 	{
2459 		.compatible = "nvidia,tegra194-pcie",
2460 		.data = &tegra194_pcie_dw_rc_of_data,
2461 	},
2462 	{
2463 		.compatible = "nvidia,tegra194-pcie-ep",
2464 		.data = &tegra194_pcie_dw_ep_of_data,
2465 	},
2466 	{
2467 		.compatible = "nvidia,tegra234-pcie",
2468 		.data = &tegra234_pcie_dw_rc_of_data,
2469 	},
2470 	{
2471 		.compatible = "nvidia,tegra234-pcie-ep",
2472 		.data = &tegra234_pcie_dw_ep_of_data,
2473 	},
2474 	{}
2475 };
2476 
2477 static const struct dev_pm_ops tegra_pcie_dw_pm_ops = {
2478 	.suspend_late = tegra_pcie_dw_suspend_late,
2479 	.suspend_noirq = tegra_pcie_dw_suspend_noirq,
2480 	.resume_noirq = tegra_pcie_dw_resume_noirq,
2481 	.resume_early = tegra_pcie_dw_resume_early,
2482 };
2483 
2484 static struct platform_driver tegra_pcie_dw_driver = {
2485 	.probe = tegra_pcie_dw_probe,
2486 	.remove = tegra_pcie_dw_remove,
2487 	.shutdown = tegra_pcie_dw_shutdown,
2488 	.driver = {
2489 		.name	= "tegra194-pcie",
2490 		.pm = &tegra_pcie_dw_pm_ops,
2491 		.of_match_table = tegra_pcie_dw_of_match,
2492 	},
2493 };
2494 module_platform_driver(tegra_pcie_dw_driver);
2495 
2496 MODULE_DEVICE_TABLE(of, tegra_pcie_dw_of_match);
2497 
2498 MODULE_AUTHOR("Vidya Sagar <vidyas@nvidia.com>");
2499 MODULE_DESCRIPTION("NVIDIA PCIe host controller driver");
2500 MODULE_LICENSE("GPL v2");
2501