xref: /linux/drivers/pci/controller/dwc/pcie-tegra194.c (revision 43dfc13ca972988e620a6edb72956981b75ab6b0)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * PCIe host controller driver for the following SoCs
4  * Tegra194
5  * Tegra234
6  *
7  * Copyright (C) 2019-2022 NVIDIA Corporation.
8  *
9  * Author: Vidya Sagar <vidyas@nvidia.com>
10  */
11 
12 #include <linux/bitfield.h>
13 #include <linux/clk.h>
14 #include <linux/debugfs.h>
15 #include <linux/delay.h>
16 #include <linux/gpio/consumer.h>
17 #include <linux/interconnect.h>
18 #include <linux/interrupt.h>
19 #include <linux/iopoll.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/of.h>
23 #include <linux/of_pci.h>
24 #include <linux/pci.h>
25 #include <linux/phy/phy.h>
26 #include <linux/pinctrl/consumer.h>
27 #include <linux/platform_device.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/random.h>
30 #include <linux/reset.h>
31 #include <linux/resource.h>
32 #include <linux/types.h>
33 #include "pcie-designware.h"
34 #include <soc/tegra/bpmp.h>
35 #include <soc/tegra/bpmp-abi.h>
36 #include "../../pci.h"
37 
38 #define TEGRA194_DWC_IP_VER			0x490A
39 #define TEGRA234_DWC_IP_VER			0x562A
40 
41 #define APPL_PINMUX				0x0
42 #define APPL_PINMUX_PEX_RST			BIT(0)
43 #define APPL_PINMUX_CLKREQ_OVERRIDE_EN		BIT(2)
44 #define APPL_PINMUX_CLKREQ_OVERRIDE		BIT(3)
45 #define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN	BIT(4)
46 #define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE	BIT(5)
47 
48 #define APPL_CTRL				0x4
49 #define APPL_CTRL_SYS_PRE_DET_STATE		BIT(6)
50 #define APPL_CTRL_LTSSM_EN			BIT(7)
51 #define APPL_CTRL_HW_HOT_RST_EN			BIT(20)
52 #define APPL_CTRL_HW_HOT_RST_MODE_MASK		GENMASK(1, 0)
53 #define APPL_CTRL_HW_HOT_RST_MODE_SHIFT		22
54 #define APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST	0x1
55 #define APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST_LTSSM_EN	0x2
56 
57 #define APPL_INTR_EN_L0_0			0x8
58 #define APPL_INTR_EN_L0_0_LINK_STATE_INT_EN	BIT(0)
59 #define APPL_INTR_EN_L0_0_MSI_RCV_INT_EN	BIT(4)
60 #define APPL_INTR_EN_L0_0_INT_INT_EN		BIT(8)
61 #define APPL_INTR_EN_L0_0_PCI_CMD_EN_INT_EN	BIT(15)
62 #define APPL_INTR_EN_L0_0_CDM_REG_CHK_INT_EN	BIT(19)
63 #define APPL_INTR_EN_L0_0_SYS_INTR_EN		BIT(30)
64 #define APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN	BIT(31)
65 
66 #define APPL_INTR_STATUS_L0			0xC
67 #define APPL_INTR_STATUS_L0_LINK_STATE_INT	BIT(0)
68 #define APPL_INTR_STATUS_L0_INT_INT		BIT(8)
69 #define APPL_INTR_STATUS_L0_PCI_CMD_EN_INT	BIT(15)
70 #define APPL_INTR_STATUS_L0_PEX_RST_INT		BIT(16)
71 #define APPL_INTR_STATUS_L0_CDM_REG_CHK_INT	BIT(18)
72 
73 #define APPL_INTR_EN_L1_0_0				0x1C
74 #define APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN	BIT(1)
75 #define APPL_INTR_EN_L1_0_0_RDLH_LINK_UP_INT_EN		BIT(3)
76 #define APPL_INTR_EN_L1_0_0_HOT_RESET_DONE_INT_EN	BIT(30)
77 
78 #define APPL_INTR_STATUS_L1_0_0				0x20
79 #define APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED	BIT(1)
80 #define APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED	BIT(3)
81 #define APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE		BIT(30)
82 
83 #define APPL_INTR_STATUS_L1_1			0x2C
84 #define APPL_INTR_STATUS_L1_2			0x30
85 #define APPL_INTR_STATUS_L1_3			0x34
86 #define APPL_INTR_STATUS_L1_6			0x3C
87 #define APPL_INTR_STATUS_L1_7			0x40
88 #define APPL_INTR_STATUS_L1_15_CFG_BME_CHGED	BIT(1)
89 
90 #define APPL_INTR_EN_L1_8_0			0x44
91 #define APPL_INTR_EN_L1_8_BW_MGT_INT_EN		BIT(2)
92 #define APPL_INTR_EN_L1_8_AUTO_BW_INT_EN	BIT(3)
93 #define APPL_INTR_EN_L1_8_INTX_EN		BIT(11)
94 #define APPL_INTR_EN_L1_8_AER_INT_EN		BIT(15)
95 
96 #define APPL_INTR_STATUS_L1_8_0			0x4C
97 #define APPL_INTR_STATUS_L1_8_0_EDMA_INT_MASK	GENMASK(11, 6)
98 #define APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS	BIT(2)
99 #define APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS	BIT(3)
100 
101 #define APPL_INTR_STATUS_L1_9			0x54
102 #define APPL_INTR_STATUS_L1_10			0x58
103 #define APPL_INTR_STATUS_L1_11			0x64
104 #define APPL_INTR_STATUS_L1_13			0x74
105 #define APPL_INTR_STATUS_L1_14			0x78
106 #define APPL_INTR_STATUS_L1_15			0x7C
107 #define APPL_INTR_STATUS_L1_17			0x88
108 
109 #define APPL_INTR_EN_L1_18				0x90
110 #define APPL_INTR_EN_L1_18_CDM_REG_CHK_CMPLT		BIT(2)
111 #define APPL_INTR_EN_L1_18_CDM_REG_CHK_CMP_ERR		BIT(1)
112 #define APPL_INTR_EN_L1_18_CDM_REG_CHK_LOGIC_ERR	BIT(0)
113 
114 #define APPL_INTR_STATUS_L1_18				0x94
115 #define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT	BIT(2)
116 #define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR	BIT(1)
117 #define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR	BIT(0)
118 
119 #define APPL_MSI_CTRL_1				0xAC
120 
121 #define APPL_MSI_CTRL_2				0xB0
122 
123 #define APPL_LEGACY_INTX			0xB8
124 
125 #define APPL_LTR_MSG_1				0xC4
126 #define LTR_MSG_REQ				BIT(15)
127 #define LTR_NOSNOOP_MSG_REQ			BIT(31)
128 
129 #define APPL_LTR_MSG_2				0xC8
130 #define APPL_LTR_MSG_2_LTR_MSG_REQ_STATE	BIT(3)
131 
132 #define APPL_LINK_STATUS			0xCC
133 #define APPL_LINK_STATUS_RDLH_LINK_UP		BIT(0)
134 
135 #define APPL_DEBUG				0xD0
136 #define APPL_DEBUG_PM_LINKST_IN_L2_LAT		BIT(21)
137 #define APPL_DEBUG_PM_LINKST_IN_L0		0x11
138 #define APPL_DEBUG_LTSSM_STATE_MASK		GENMASK(8, 3)
139 #define APPL_DEBUG_LTSSM_STATE_SHIFT		3
140 #define LTSSM_STATE_PRE_DETECT			5
141 
142 #define APPL_RADM_STATUS			0xE4
143 #define APPL_PM_XMT_TURNOFF_STATE		BIT(0)
144 
145 #define APPL_DM_TYPE				0x100
146 #define APPL_DM_TYPE_MASK			GENMASK(3, 0)
147 #define APPL_DM_TYPE_RP				0x4
148 #define APPL_DM_TYPE_EP				0x0
149 
150 #define APPL_CFG_BASE_ADDR			0x104
151 #define APPL_CFG_BASE_ADDR_MASK			GENMASK(31, 12)
152 
153 #define APPL_CFG_IATU_DMA_BASE_ADDR		0x108
154 #define APPL_CFG_IATU_DMA_BASE_ADDR_MASK	GENMASK(31, 18)
155 
156 #define APPL_CFG_MISC				0x110
157 #define APPL_CFG_MISC_SLV_EP_MODE		BIT(14)
158 #define APPL_CFG_MISC_ARCACHE_MASK		GENMASK(13, 10)
159 #define APPL_CFG_MISC_ARCACHE_SHIFT		10
160 #define APPL_CFG_MISC_ARCACHE_VAL		3
161 
162 #define APPL_CFG_SLCG_OVERRIDE			0x114
163 #define APPL_CFG_SLCG_OVERRIDE_SLCG_EN_MASTER	BIT(0)
164 
165 #define APPL_CAR_RESET_OVRD				0x12C
166 #define APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N	BIT(0)
167 
168 #define IO_BASE_IO_DECODE				BIT(0)
169 #define IO_BASE_IO_DECODE_BIT8				BIT(8)
170 
171 #define CFG_PREF_MEM_LIMIT_BASE_MEM_DECODE		BIT(0)
172 #define CFG_PREF_MEM_LIMIT_BASE_MEM_LIMIT_DECODE	BIT(16)
173 
174 #define CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF	0x718
175 #define CFG_TIMER_CTRL_ACK_NAK_SHIFT	(19)
176 
177 #define N_FTS_VAL					52
178 #define FTS_VAL						52
179 
180 #define PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT	0x8D0
181 #define AMBA_ERROR_RESPONSE_RRS_SHIFT		3
182 #define AMBA_ERROR_RESPONSE_RRS_MASK		GENMASK(1, 0)
183 #define AMBA_ERROR_RESPONSE_RRS_OKAY		0
184 #define AMBA_ERROR_RESPONSE_RRS_OKAY_FFFFFFFF	1
185 #define AMBA_ERROR_RESPONSE_RRS_OKAY_FFFF0001	2
186 
187 #define MSIX_ADDR_MATCH_LOW_OFF			0x940
188 #define MSIX_ADDR_MATCH_LOW_OFF_EN		BIT(0)
189 #define MSIX_ADDR_MATCH_LOW_OFF_MASK		GENMASK(31, 2)
190 
191 #define MSIX_ADDR_MATCH_HIGH_OFF		0x944
192 #define MSIX_ADDR_MATCH_HIGH_OFF_MASK		GENMASK(31, 0)
193 
194 #define PORT_LOGIC_MSIX_DOORBELL			0x948
195 
196 #define CAP_SPCIE_CAP_OFF			0x154
197 #define CAP_SPCIE_CAP_OFF_DSP_TX_PRESET0_MASK	GENMASK(3, 0)
198 #define CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_MASK	GENMASK(11, 8)
199 #define CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_SHIFT	8
200 
201 #define PME_ACK_TIMEOUT 10000
202 
203 #define LTSSM_TIMEOUT 50000	/* 50ms */
204 
205 #define GEN3_GEN4_EQ_PRESET_INIT	5
206 
207 #define GEN1_CORE_CLK_FREQ	62500000
208 #define GEN2_CORE_CLK_FREQ	125000000
209 #define GEN3_CORE_CLK_FREQ	250000000
210 #define GEN4_CORE_CLK_FREQ	500000000
211 
212 #define LTR_MSG_TIMEOUT		(100 * 1000)
213 
214 #define PERST_DEBOUNCE_TIME	(5 * 1000)
215 
216 #define EP_STATE_DISABLED	0
217 #define EP_STATE_ENABLED	1
218 
219 static const unsigned int pcie_gen_freq[] = {
220 	GEN1_CORE_CLK_FREQ,	/* PCI_EXP_LNKSTA_CLS == 0; undefined */
221 	GEN1_CORE_CLK_FREQ,
222 	GEN2_CORE_CLK_FREQ,
223 	GEN3_CORE_CLK_FREQ,
224 	GEN4_CORE_CLK_FREQ
225 };
226 
227 struct tegra_pcie_dw_of_data {
228 	u32 version;
229 	enum dw_pcie_device_mode mode;
230 	bool has_msix_doorbell_access_fix;
231 	bool has_sbr_reset_fix;
232 	bool has_l1ss_exit_fix;
233 	bool has_ltr_req_fix;
234 	u32 cdm_chk_int_en_bit;
235 	u32 gen4_preset_vec;
236 	u8 n_fts[2];
237 };
238 
239 struct tegra_pcie_dw {
240 	struct device *dev;
241 	struct resource *appl_res;
242 	struct resource *dbi_res;
243 	struct resource *atu_dma_res;
244 	void __iomem *appl_base;
245 	struct clk *core_clk;
246 	struct reset_control *core_apb_rst;
247 	struct reset_control *core_rst;
248 	struct dw_pcie pci;
249 	struct tegra_bpmp *bpmp;
250 
251 	struct tegra_pcie_dw_of_data *of_data;
252 
253 	bool supports_clkreq;
254 	bool enable_cdm_check;
255 	bool enable_srns;
256 	bool link_state;
257 	bool update_fc_fixup;
258 	bool enable_ext_refclk;
259 	u8 init_link_width;
260 	u32 msi_ctrl_int;
261 	u32 num_lanes;
262 	u32 cid;
263 	u32 ras_des_cap;
264 	u32 pcie_cap_base;
265 	u32 aspm_cmrt;
266 	u32 aspm_pwr_on_t;
267 	u32 aspm_l0s_enter_lat;
268 
269 	struct regulator *pex_ctl_supply;
270 	struct regulator *slot_ctl_3v3;
271 	struct regulator *slot_ctl_12v;
272 
273 	unsigned int phy_count;
274 	struct phy **phys;
275 
276 	struct dentry *debugfs;
277 
278 	/* Endpoint mode specific */
279 	struct gpio_desc *pex_rst_gpiod;
280 	struct gpio_desc *pex_refclk_sel_gpiod;
281 	unsigned int pex_rst_irq;
282 	int ep_state;
283 	long link_status;
284 	struct icc_path *icc_path;
285 };
286 
287 static inline struct tegra_pcie_dw *to_tegra_pcie(struct dw_pcie *pci)
288 {
289 	return container_of(pci, struct tegra_pcie_dw, pci);
290 }
291 
292 static inline void appl_writel(struct tegra_pcie_dw *pcie, const u32 value,
293 			       const u32 reg)
294 {
295 	writel_relaxed(value, pcie->appl_base + reg);
296 }
297 
298 static inline u32 appl_readl(struct tegra_pcie_dw *pcie, const u32 reg)
299 {
300 	return readl_relaxed(pcie->appl_base + reg);
301 }
302 
303 static void tegra_pcie_icc_set(struct tegra_pcie_dw *pcie)
304 {
305 	struct dw_pcie *pci = &pcie->pci;
306 	u32 val, speed, width;
307 
308 	val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);
309 
310 	speed = FIELD_GET(PCI_EXP_LNKSTA_CLS, val);
311 	width = FIELD_GET(PCI_EXP_LNKSTA_NLW, val);
312 
313 	val = width * PCIE_SPEED2MBS_ENC(pcie_link_speed[speed]);
314 
315 	if (icc_set_bw(pcie->icc_path, Mbps_to_icc(val), 0))
316 		dev_err(pcie->dev, "can't set bw[%u]\n", val);
317 
318 	if (speed >= ARRAY_SIZE(pcie_gen_freq))
319 		speed = 0;
320 
321 	clk_set_rate(pcie->core_clk, pcie_gen_freq[speed]);
322 }
323 
324 static void apply_bad_link_workaround(struct dw_pcie_rp *pp)
325 {
326 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
327 	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
328 	u32 current_link_width;
329 	u16 val;
330 
331 	/*
332 	 * NOTE:- Since this scenario is uncommon and link as such is not
333 	 * stable anyway, not waiting to confirm if link is really
334 	 * transitioning to Gen-2 speed
335 	 */
336 	val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);
337 	if (val & PCI_EXP_LNKSTA_LBMS) {
338 		current_link_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, val);
339 		if (pcie->init_link_width > current_link_width) {
340 			dev_warn(pci->dev, "PCIe link is bad, width reduced\n");
341 			val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
342 						PCI_EXP_LNKCTL2);
343 			val &= ~PCI_EXP_LNKCTL2_TLS;
344 			val |= PCI_EXP_LNKCTL2_TLS_2_5GT;
345 			dw_pcie_writew_dbi(pci, pcie->pcie_cap_base +
346 					   PCI_EXP_LNKCTL2, val);
347 
348 			val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
349 						PCI_EXP_LNKCTL);
350 			val |= PCI_EXP_LNKCTL_RL;
351 			dw_pcie_writew_dbi(pci, pcie->pcie_cap_base +
352 					   PCI_EXP_LNKCTL, val);
353 		}
354 	}
355 }
356 
357 static irqreturn_t tegra_pcie_rp_irq_handler(int irq, void *arg)
358 {
359 	struct tegra_pcie_dw *pcie = arg;
360 	struct dw_pcie *pci = &pcie->pci;
361 	struct dw_pcie_rp *pp = &pci->pp;
362 	u32 val, status_l0, status_l1;
363 	u16 val_w;
364 
365 	status_l0 = appl_readl(pcie, APPL_INTR_STATUS_L0);
366 	if (status_l0 & APPL_INTR_STATUS_L0_LINK_STATE_INT) {
367 		status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0);
368 		appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_0_0);
369 		if (!pcie->of_data->has_sbr_reset_fix &&
370 		    status_l1 & APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED) {
371 			/* SBR & Surprise Link Down WAR */
372 			val = appl_readl(pcie, APPL_CAR_RESET_OVRD);
373 			val &= ~APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N;
374 			appl_writel(pcie, val, APPL_CAR_RESET_OVRD);
375 			udelay(1);
376 			val = appl_readl(pcie, APPL_CAR_RESET_OVRD);
377 			val |= APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N;
378 			appl_writel(pcie, val, APPL_CAR_RESET_OVRD);
379 
380 			val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
381 			val |= PORT_LOGIC_SPEED_CHANGE;
382 			dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
383 		}
384 	}
385 
386 	if (status_l0 & APPL_INTR_STATUS_L0_INT_INT) {
387 		status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_8_0);
388 		if (status_l1 & APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS) {
389 			appl_writel(pcie,
390 				    APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS,
391 				    APPL_INTR_STATUS_L1_8_0);
392 			apply_bad_link_workaround(pp);
393 		}
394 		if (status_l1 & APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS) {
395 			val_w = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
396 						  PCI_EXP_LNKSTA);
397 			val_w |= PCI_EXP_LNKSTA_LBMS;
398 			dw_pcie_writew_dbi(pci, pcie->pcie_cap_base +
399 					   PCI_EXP_LNKSTA, val_w);
400 
401 			appl_writel(pcie,
402 				    APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS,
403 				    APPL_INTR_STATUS_L1_8_0);
404 
405 			val_w = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
406 						  PCI_EXP_LNKSTA);
407 			dev_dbg(pci->dev, "Link Speed : Gen-%u\n", val_w &
408 				PCI_EXP_LNKSTA_CLS);
409 		}
410 	}
411 
412 	if (status_l0 & APPL_INTR_STATUS_L0_CDM_REG_CHK_INT) {
413 		status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_18);
414 		val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
415 		if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT) {
416 			dev_info(pci->dev, "CDM check complete\n");
417 			val |= PCIE_PL_CHK_REG_CHK_REG_COMPLETE;
418 		}
419 		if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR) {
420 			dev_err(pci->dev, "CDM comparison mismatch\n");
421 			val |= PCIE_PL_CHK_REG_CHK_REG_COMPARISON_ERROR;
422 		}
423 		if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR) {
424 			dev_err(pci->dev, "CDM Logic error\n");
425 			val |= PCIE_PL_CHK_REG_CHK_REG_LOGIC_ERROR;
426 		}
427 		dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val);
428 		val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_ERR_ADDR);
429 		dev_err(pci->dev, "CDM Error Address Offset = 0x%08X\n", val);
430 	}
431 
432 	return IRQ_HANDLED;
433 }
434 
435 static void pex_ep_event_hot_rst_done(struct tegra_pcie_dw *pcie)
436 {
437 	u32 val;
438 
439 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0);
440 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0);
441 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1);
442 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2);
443 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3);
444 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6);
445 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7);
446 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0);
447 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9);
448 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10);
449 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11);
450 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13);
451 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14);
452 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15);
453 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17);
454 	appl_writel(pcie, 0xFFFFFFFF, APPL_MSI_CTRL_2);
455 
456 	val = appl_readl(pcie, APPL_CTRL);
457 	val |= APPL_CTRL_LTSSM_EN;
458 	appl_writel(pcie, val, APPL_CTRL);
459 }
460 
461 static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg)
462 {
463 	struct tegra_pcie_dw *pcie = arg;
464 	struct dw_pcie_ep *ep = &pcie->pci.ep;
465 	struct dw_pcie *pci = &pcie->pci;
466 	u32 val;
467 
468 	if (test_and_clear_bit(0, &pcie->link_status))
469 		dw_pcie_ep_linkup(ep);
470 
471 	tegra_pcie_icc_set(pcie);
472 
473 	if (pcie->of_data->has_ltr_req_fix)
474 		return IRQ_HANDLED;
475 
476 	/* If EP doesn't advertise L1SS, just return */
477 	if (!pci->l1ss_support)
478 		return IRQ_HANDLED;
479 
480 	/* Check if BME is set to '1' */
481 	val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
482 	if (val & PCI_COMMAND_MASTER) {
483 		ktime_t timeout;
484 
485 		/* 110us for both snoop and no-snoop */
486 		val = FIELD_PREP(PCI_LTR_VALUE_MASK, 110) |
487 		      FIELD_PREP(PCI_LTR_SCALE_MASK, 2) |
488 		      LTR_MSG_REQ |
489 		      FIELD_PREP(PCI_LTR_NOSNOOP_VALUE, 110) |
490 		      FIELD_PREP(PCI_LTR_NOSNOOP_SCALE, 2) |
491 		      LTR_NOSNOOP_MSG_REQ;
492 		appl_writel(pcie, val, APPL_LTR_MSG_1);
493 
494 		/* Send LTR upstream */
495 		val = appl_readl(pcie, APPL_LTR_MSG_2);
496 		val |= APPL_LTR_MSG_2_LTR_MSG_REQ_STATE;
497 		appl_writel(pcie, val, APPL_LTR_MSG_2);
498 
499 		timeout = ktime_add_us(ktime_get(), LTR_MSG_TIMEOUT);
500 		for (;;) {
501 			val = appl_readl(pcie, APPL_LTR_MSG_2);
502 			if (!(val & APPL_LTR_MSG_2_LTR_MSG_REQ_STATE))
503 				break;
504 			if (ktime_after(ktime_get(), timeout))
505 				break;
506 			usleep_range(1000, 1100);
507 		}
508 		if (val & APPL_LTR_MSG_2_LTR_MSG_REQ_STATE)
509 			dev_err(pcie->dev, "Failed to send LTR message\n");
510 	}
511 
512 	return IRQ_HANDLED;
513 }
514 
515 static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg)
516 {
517 	struct tegra_pcie_dw *pcie = arg;
518 	int spurious = 1;
519 	u32 status_l0, status_l1, link_status;
520 
521 	status_l0 = appl_readl(pcie, APPL_INTR_STATUS_L0);
522 	if (status_l0 & APPL_INTR_STATUS_L0_LINK_STATE_INT) {
523 		status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0);
524 		appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_0_0);
525 
526 		if (status_l1 & APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE)
527 			pex_ep_event_hot_rst_done(pcie);
528 
529 		if (status_l1 & APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED) {
530 			link_status = appl_readl(pcie, APPL_LINK_STATUS);
531 			if (link_status & APPL_LINK_STATUS_RDLH_LINK_UP) {
532 				dev_dbg(pcie->dev, "Link is up with Host\n");
533 				set_bit(0, &pcie->link_status);
534 				return IRQ_WAKE_THREAD;
535 			}
536 		}
537 
538 		spurious = 0;
539 	}
540 
541 	if (status_l0 & APPL_INTR_STATUS_L0_PCI_CMD_EN_INT) {
542 		status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_15);
543 		appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_15);
544 
545 		if (status_l1 & APPL_INTR_STATUS_L1_15_CFG_BME_CHGED)
546 			return IRQ_WAKE_THREAD;
547 
548 		spurious = 0;
549 	}
550 
551 	if (spurious) {
552 		dev_warn(pcie->dev, "Random interrupt (STATUS = 0x%08X)\n",
553 			 status_l0);
554 		appl_writel(pcie, status_l0, APPL_INTR_STATUS_L0);
555 	}
556 
557 	return IRQ_HANDLED;
558 }
559 
560 static int tegra_pcie_dw_rd_own_conf(struct pci_bus *bus, u32 devfn, int where,
561 				     int size, u32 *val)
562 {
563 	struct dw_pcie_rp *pp = bus->sysdata;
564 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
565 	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
566 
567 	/*
568 	 * This is an endpoint mode specific register happen to appear even
569 	 * when controller is operating in root port mode and system hangs
570 	 * when it is accessed with link being in ASPM-L1 state.
571 	 * So skip accessing it altogether
572 	 */
573 	if (!pcie->of_data->has_msix_doorbell_access_fix &&
574 	    !PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL) {
575 		*val = 0x00000000;
576 		return PCIBIOS_SUCCESSFUL;
577 	}
578 
579 	return pci_generic_config_read(bus, devfn, where, size, val);
580 }
581 
582 static int tegra_pcie_dw_wr_own_conf(struct pci_bus *bus, u32 devfn, int where,
583 				     int size, u32 val)
584 {
585 	struct dw_pcie_rp *pp = bus->sysdata;
586 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
587 	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
588 
589 	/*
590 	 * This is an endpoint mode specific register happen to appear even
591 	 * when controller is operating in root port mode and system hangs
592 	 * when it is accessed with link being in ASPM-L1 state.
593 	 * So skip accessing it altogether
594 	 */
595 	if (!pcie->of_data->has_msix_doorbell_access_fix &&
596 	    !PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL)
597 		return PCIBIOS_SUCCESSFUL;
598 
599 	return pci_generic_config_write(bus, devfn, where, size, val);
600 }
601 
602 static struct pci_ops tegra_pci_ops = {
603 	.map_bus = dw_pcie_own_conf_map_bus,
604 	.read = tegra_pcie_dw_rd_own_conf,
605 	.write = tegra_pcie_dw_wr_own_conf,
606 };
607 
608 #if defined(CONFIG_PCIEASPM)
609 static inline u32 event_counter_prog(struct tegra_pcie_dw *pcie, u32 event)
610 {
611 	u32 val;
612 
613 	val = dw_pcie_readl_dbi(&pcie->pci, pcie->ras_des_cap +
614 				PCIE_RAS_DES_EVENT_COUNTER_CONTROL);
615 	val &= ~(EVENT_COUNTER_EVENT_SEL_MASK << EVENT_COUNTER_EVENT_SEL_SHIFT);
616 	val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT;
617 	val |= event << EVENT_COUNTER_EVENT_SEL_SHIFT;
618 	val |= EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT;
619 	dw_pcie_writel_dbi(&pcie->pci, pcie->ras_des_cap +
620 			   PCIE_RAS_DES_EVENT_COUNTER_CONTROL, val);
621 	val = dw_pcie_readl_dbi(&pcie->pci, pcie->ras_des_cap +
622 				PCIE_RAS_DES_EVENT_COUNTER_DATA);
623 
624 	return val;
625 }
626 
627 static int aspm_state_cnt(struct seq_file *s, void *data)
628 {
629 	struct tegra_pcie_dw *pcie = (struct tegra_pcie_dw *)
630 				     dev_get_drvdata(s->private);
631 	u32 val;
632 
633 	seq_printf(s, "Tx L0s entry count : %u\n",
634 		   event_counter_prog(pcie, EVENT_COUNTER_EVENT_Tx_L0S));
635 
636 	seq_printf(s, "Rx L0s entry count : %u\n",
637 		   event_counter_prog(pcie, EVENT_COUNTER_EVENT_Rx_L0S));
638 
639 	seq_printf(s, "Link L1 entry count : %u\n",
640 		   event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1));
641 
642 	seq_printf(s, "Link L1.1 entry count : %u\n",
643 		   event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1_1));
644 
645 	seq_printf(s, "Link L1.2 entry count : %u\n",
646 		   event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1_2));
647 
648 	/* Clear all counters */
649 	dw_pcie_writel_dbi(&pcie->pci, pcie->ras_des_cap +
650 			   PCIE_RAS_DES_EVENT_COUNTER_CONTROL,
651 			   EVENT_COUNTER_ALL_CLEAR);
652 
653 	/* Re-enable counting */
654 	val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT;
655 	val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT;
656 	dw_pcie_writel_dbi(&pcie->pci, pcie->ras_des_cap +
657 			   PCIE_RAS_DES_EVENT_COUNTER_CONTROL, val);
658 
659 	return 0;
660 }
661 
662 static void init_host_aspm(struct tegra_pcie_dw *pcie)
663 {
664 	struct dw_pcie *pci = &pcie->pci;
665 	u32 l1ss, val;
666 
667 	l1ss = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_L1SS);
668 
669 	pcie->ras_des_cap = dw_pcie_find_ext_capability(&pcie->pci,
670 							PCI_EXT_CAP_ID_VNDR);
671 
672 	/* Enable ASPM counters */
673 	val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT;
674 	val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT;
675 	dw_pcie_writel_dbi(pci, pcie->ras_des_cap +
676 			   PCIE_RAS_DES_EVENT_COUNTER_CONTROL, val);
677 
678 	/* Program T_cmrt and T_pwr_on values */
679 	val = dw_pcie_readl_dbi(pci, l1ss + PCI_L1SS_CAP);
680 	val &= ~(PCI_L1SS_CAP_CM_RESTORE_TIME | PCI_L1SS_CAP_P_PWR_ON_VALUE);
681 	val |= (pcie->aspm_cmrt << 8);
682 	val |= (pcie->aspm_pwr_on_t << 19);
683 	dw_pcie_writel_dbi(pci, l1ss + PCI_L1SS_CAP, val);
684 
685 	if (pcie->supports_clkreq)
686 		pci->l1ss_support = true;
687 
688 	/* Program L0s and L1 entrance latencies */
689 	val = dw_pcie_readl_dbi(pci, PCIE_PORT_AFR);
690 	val &= ~PORT_AFR_L0S_ENTRANCE_LAT_MASK;
691 	val |= (pcie->aspm_l0s_enter_lat << PORT_AFR_L0S_ENTRANCE_LAT_SHIFT);
692 	val |= PORT_AFR_ENTER_ASPM;
693 	dw_pcie_writel_dbi(pci, PCIE_PORT_AFR, val);
694 }
695 
696 static void init_debugfs(struct tegra_pcie_dw *pcie)
697 {
698 	struct device *dev = pcie->dev;
699 	char *name;
700 
701 	name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
702 	if (!name)
703 		return;
704 
705 	pcie->debugfs = debugfs_create_dir(name, NULL);
706 
707 	debugfs_create_devm_seqfile(dev, "aspm_state_cnt", pcie->debugfs,
708 				    aspm_state_cnt);
709 }
710 #else
711 static inline void init_host_aspm(struct tegra_pcie_dw *pcie) { return; }
712 static inline void init_debugfs(struct tegra_pcie_dw *pcie) { return; }
713 #endif
714 
715 static void tegra_pcie_enable_system_interrupts(struct dw_pcie_rp *pp)
716 {
717 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
718 	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
719 	u32 val;
720 	u16 val_w;
721 
722 	val = appl_readl(pcie, APPL_INTR_EN_L0_0);
723 	val |= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN;
724 	appl_writel(pcie, val, APPL_INTR_EN_L0_0);
725 
726 	if (!pcie->of_data->has_sbr_reset_fix) {
727 		val = appl_readl(pcie, APPL_INTR_EN_L1_0_0);
728 		val |= APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN;
729 		appl_writel(pcie, val, APPL_INTR_EN_L1_0_0);
730 	}
731 
732 	if (pcie->enable_cdm_check) {
733 		val = appl_readl(pcie, APPL_INTR_EN_L0_0);
734 		val |= pcie->of_data->cdm_chk_int_en_bit;
735 		appl_writel(pcie, val, APPL_INTR_EN_L0_0);
736 
737 		val = appl_readl(pcie, APPL_INTR_EN_L1_18);
738 		val |= APPL_INTR_EN_L1_18_CDM_REG_CHK_CMP_ERR;
739 		val |= APPL_INTR_EN_L1_18_CDM_REG_CHK_LOGIC_ERR;
740 		appl_writel(pcie, val, APPL_INTR_EN_L1_18);
741 	}
742 
743 	val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base +
744 				  PCI_EXP_LNKSTA);
745 	pcie->init_link_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, val_w);
746 
747 	val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base +
748 				  PCI_EXP_LNKCTL);
749 	val_w |= PCI_EXP_LNKCTL_LBMIE;
750 	dw_pcie_writew_dbi(&pcie->pci, pcie->pcie_cap_base + PCI_EXP_LNKCTL,
751 			   val_w);
752 }
753 
754 static void tegra_pcie_enable_intx_interrupts(struct dw_pcie_rp *pp)
755 {
756 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
757 	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
758 	u32 val;
759 
760 	/* Enable INTX interrupt generation */
761 	val = appl_readl(pcie, APPL_INTR_EN_L0_0);
762 	val |= APPL_INTR_EN_L0_0_SYS_INTR_EN;
763 	val |= APPL_INTR_EN_L0_0_INT_INT_EN;
764 	appl_writel(pcie, val, APPL_INTR_EN_L0_0);
765 
766 	val = appl_readl(pcie, APPL_INTR_EN_L1_8_0);
767 	val |= APPL_INTR_EN_L1_8_INTX_EN;
768 	val |= APPL_INTR_EN_L1_8_AUTO_BW_INT_EN;
769 	val |= APPL_INTR_EN_L1_8_BW_MGT_INT_EN;
770 	if (IS_ENABLED(CONFIG_PCIEAER))
771 		val |= APPL_INTR_EN_L1_8_AER_INT_EN;
772 	appl_writel(pcie, val, APPL_INTR_EN_L1_8_0);
773 }
774 
775 static void tegra_pcie_enable_msi_interrupts(struct dw_pcie_rp *pp)
776 {
777 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
778 	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
779 	u32 val;
780 
781 	/* Enable MSI interrupt generation */
782 	val = appl_readl(pcie, APPL_INTR_EN_L0_0);
783 	val |= APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN;
784 	val |= APPL_INTR_EN_L0_0_MSI_RCV_INT_EN;
785 	appl_writel(pcie, val, APPL_INTR_EN_L0_0);
786 }
787 
788 static void tegra_pcie_enable_interrupts(struct dw_pcie_rp *pp)
789 {
790 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
791 	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
792 
793 	/* Clear interrupt statuses before enabling interrupts */
794 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0);
795 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0);
796 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1);
797 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2);
798 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3);
799 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6);
800 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7);
801 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0);
802 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9);
803 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10);
804 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11);
805 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13);
806 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14);
807 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15);
808 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17);
809 
810 	tegra_pcie_enable_system_interrupts(pp);
811 	tegra_pcie_enable_intx_interrupts(pp);
812 	if (IS_ENABLED(CONFIG_PCI_MSI))
813 		tegra_pcie_enable_msi_interrupts(pp);
814 }
815 
816 static void config_gen3_gen4_eq_presets(struct tegra_pcie_dw *pcie)
817 {
818 	struct dw_pcie *pci = &pcie->pci;
819 	u32 val, offset, i;
820 
821 	/* Program init preset */
822 	for (i = 0; i < pcie->num_lanes; i++) {
823 		val = dw_pcie_readw_dbi(pci, CAP_SPCIE_CAP_OFF + (i * 2));
824 		val &= ~CAP_SPCIE_CAP_OFF_DSP_TX_PRESET0_MASK;
825 		val |= GEN3_GEN4_EQ_PRESET_INIT;
826 		val &= ~CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_MASK;
827 		val |= (GEN3_GEN4_EQ_PRESET_INIT <<
828 			   CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_SHIFT);
829 		dw_pcie_writew_dbi(pci, CAP_SPCIE_CAP_OFF + (i * 2), val);
830 
831 		offset = dw_pcie_find_ext_capability(pci,
832 						     PCI_EXT_CAP_ID_PL_16GT) +
833 				PCI_PL_16GT_LE_CTRL;
834 		val = dw_pcie_readb_dbi(pci, offset + i);
835 		val &= ~PCI_PL_16GT_LE_CTRL_DSP_TX_PRESET_MASK;
836 		val |= GEN3_GEN4_EQ_PRESET_INIT;
837 		val &= ~PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_MASK;
838 		val |= (GEN3_GEN4_EQ_PRESET_INIT <<
839 			PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_SHIFT);
840 		dw_pcie_writeb_dbi(pci, offset + i, val);
841 	}
842 
843 	val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
844 	val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
845 	dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
846 
847 	val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
848 	val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC;
849 	val |= FIELD_PREP(GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC, 0x3ff);
850 	val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE;
851 	dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val);
852 
853 	val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
854 	val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
855 	val |= (0x1 << GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT);
856 	dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
857 
858 	val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
859 	val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC;
860 	val |= FIELD_PREP(GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC,
861 			  pcie->of_data->gen4_preset_vec);
862 	val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE;
863 	dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val);
864 
865 	val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
866 	val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
867 	dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
868 }
869 
870 static int tegra_pcie_dw_host_init(struct dw_pcie_rp *pp)
871 {
872 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
873 	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
874 	u32 val;
875 	u16 val_16;
876 
877 	pp->bridge->ops = &tegra_pci_ops;
878 
879 	if (!pcie->pcie_cap_base)
880 		pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci,
881 							      PCI_CAP_ID_EXP);
882 
883 	val = dw_pcie_readl_dbi(pci, PCI_IO_BASE);
884 	val &= ~(IO_BASE_IO_DECODE | IO_BASE_IO_DECODE_BIT8);
885 	dw_pcie_writel_dbi(pci, PCI_IO_BASE, val);
886 
887 	val = dw_pcie_readl_dbi(pci, PCI_PREF_MEMORY_BASE);
888 	val |= CFG_PREF_MEM_LIMIT_BASE_MEM_DECODE;
889 	val |= CFG_PREF_MEM_LIMIT_BASE_MEM_LIMIT_DECODE;
890 	dw_pcie_writel_dbi(pci, PCI_PREF_MEMORY_BASE, val);
891 
892 	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
893 
894 	/* Enable as 0xFFFF0001 response for RRS */
895 	val = dw_pcie_readl_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT);
896 	val &= ~(AMBA_ERROR_RESPONSE_RRS_MASK << AMBA_ERROR_RESPONSE_RRS_SHIFT);
897 	val |= (AMBA_ERROR_RESPONSE_RRS_OKAY_FFFF0001 <<
898 		AMBA_ERROR_RESPONSE_RRS_SHIFT);
899 	dw_pcie_writel_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT, val);
900 
901 	/* Clear Slot Clock Configuration bit if SRNS configuration */
902 	if (pcie->enable_srns) {
903 		val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
904 					   PCI_EXP_LNKSTA);
905 		val_16 &= ~PCI_EXP_LNKSTA_SLC;
906 		dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA,
907 				   val_16);
908 	}
909 
910 	config_gen3_gen4_eq_presets(pcie);
911 
912 	init_host_aspm(pcie);
913 
914 	if (!pcie->of_data->has_l1ss_exit_fix) {
915 		val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
916 		val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
917 		dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
918 	}
919 
920 	if (pcie->update_fc_fixup) {
921 		val = dw_pcie_readl_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF);
922 		val |= 0x1 << CFG_TIMER_CTRL_ACK_NAK_SHIFT;
923 		dw_pcie_writel_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF, val);
924 	}
925 
926 	clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ);
927 
928 	return 0;
929 }
930 
931 static int tegra_pcie_dw_start_link(struct dw_pcie *pci)
932 {
933 	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
934 	struct dw_pcie_rp *pp = &pci->pp;
935 	u32 val, offset, tmp;
936 	bool retry = true;
937 
938 	if (pcie->of_data->mode == DW_PCIE_EP_TYPE) {
939 		enable_irq(pcie->pex_rst_irq);
940 		return 0;
941 	}
942 
943 retry_link:
944 	/* Assert RST */
945 	val = appl_readl(pcie, APPL_PINMUX);
946 	val &= ~APPL_PINMUX_PEX_RST;
947 	appl_writel(pcie, val, APPL_PINMUX);
948 
949 	usleep_range(100, 200);
950 
951 	/* Enable LTSSM */
952 	val = appl_readl(pcie, APPL_CTRL);
953 	val |= APPL_CTRL_LTSSM_EN;
954 	appl_writel(pcie, val, APPL_CTRL);
955 
956 	/* De-assert RST */
957 	val = appl_readl(pcie, APPL_PINMUX);
958 	val |= APPL_PINMUX_PEX_RST;
959 	appl_writel(pcie, val, APPL_PINMUX);
960 
961 	msleep(100);
962 
963 	if (dw_pcie_wait_for_link(pci)) {
964 		if (!retry)
965 			return 0;
966 		/*
967 		 * There are some endpoints which can't get the link up if
968 		 * root port has Data Link Feature (DLF) enabled.
969 		 * Refer Spec rev 4.0 ver 1.0 sec 3.4.2 & 7.7.4 for more info
970 		 * on Scaled Flow Control and DLF.
971 		 * So, need to confirm that is indeed the case here and attempt
972 		 * link up once again with DLF disabled.
973 		 */
974 		val = appl_readl(pcie, APPL_DEBUG);
975 		val &= APPL_DEBUG_LTSSM_STATE_MASK;
976 		val >>= APPL_DEBUG_LTSSM_STATE_SHIFT;
977 		tmp = appl_readl(pcie, APPL_LINK_STATUS);
978 		tmp &= APPL_LINK_STATUS_RDLH_LINK_UP;
979 		if (!(val == 0x11 && !tmp)) {
980 			/* Link is down for all good reasons */
981 			return 0;
982 		}
983 
984 		dev_info(pci->dev, "Link is down in DLL");
985 		dev_info(pci->dev, "Trying again with DLFE disabled\n");
986 		/* Disable LTSSM */
987 		val = appl_readl(pcie, APPL_CTRL);
988 		val &= ~APPL_CTRL_LTSSM_EN;
989 		appl_writel(pcie, val, APPL_CTRL);
990 
991 		reset_control_assert(pcie->core_rst);
992 		reset_control_deassert(pcie->core_rst);
993 
994 		offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_DLF);
995 		val = dw_pcie_readl_dbi(pci, offset + PCI_DLF_CAP);
996 		val &= ~PCI_DLF_EXCHANGE_ENABLE;
997 		dw_pcie_writel_dbi(pci, offset + PCI_DLF_CAP, val);
998 
999 		tegra_pcie_dw_host_init(pp);
1000 		dw_pcie_setup_rc(pp);
1001 
1002 		retry = false;
1003 		goto retry_link;
1004 	}
1005 
1006 	tegra_pcie_icc_set(pcie);
1007 
1008 	tegra_pcie_enable_interrupts(pp);
1009 
1010 	return 0;
1011 }
1012 
1013 static bool tegra_pcie_dw_link_up(struct dw_pcie *pci)
1014 {
1015 	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
1016 	u32 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);
1017 
1018 	return val & PCI_EXP_LNKSTA_DLLLA;
1019 }
1020 
1021 static void tegra_pcie_dw_stop_link(struct dw_pcie *pci)
1022 {
1023 	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
1024 
1025 	disable_irq(pcie->pex_rst_irq);
1026 }
1027 
1028 static const struct dw_pcie_ops tegra_dw_pcie_ops = {
1029 	.link_up = tegra_pcie_dw_link_up,
1030 	.start_link = tegra_pcie_dw_start_link,
1031 	.stop_link = tegra_pcie_dw_stop_link,
1032 };
1033 
1034 static const struct dw_pcie_host_ops tegra_pcie_dw_host_ops = {
1035 	.init = tegra_pcie_dw_host_init,
1036 };
1037 
1038 static void tegra_pcie_disable_phy(struct tegra_pcie_dw *pcie)
1039 {
1040 	unsigned int phy_count = pcie->phy_count;
1041 
1042 	while (phy_count--) {
1043 		phy_power_off(pcie->phys[phy_count]);
1044 		phy_exit(pcie->phys[phy_count]);
1045 	}
1046 }
1047 
1048 static int tegra_pcie_enable_phy(struct tegra_pcie_dw *pcie)
1049 {
1050 	unsigned int i;
1051 	int ret;
1052 
1053 	for (i = 0; i < pcie->phy_count; i++) {
1054 		ret = phy_init(pcie->phys[i]);
1055 		if (ret < 0)
1056 			goto phy_power_off;
1057 
1058 		ret = phy_power_on(pcie->phys[i]);
1059 		if (ret < 0)
1060 			goto phy_exit;
1061 	}
1062 
1063 	return 0;
1064 
1065 phy_power_off:
1066 	while (i--) {
1067 		phy_power_off(pcie->phys[i]);
1068 phy_exit:
1069 		phy_exit(pcie->phys[i]);
1070 	}
1071 
1072 	return ret;
1073 }
1074 
1075 static int tegra_pcie_dw_parse_dt(struct tegra_pcie_dw *pcie)
1076 {
1077 	struct platform_device *pdev = to_platform_device(pcie->dev);
1078 	struct device_node *np = pcie->dev->of_node;
1079 	int ret;
1080 
1081 	pcie->dbi_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
1082 	if (!pcie->dbi_res) {
1083 		dev_err(pcie->dev, "Failed to find \"dbi\" region\n");
1084 		return -ENODEV;
1085 	}
1086 
1087 	ret = of_property_read_u32(np, "nvidia,aspm-cmrt-us", &pcie->aspm_cmrt);
1088 	if (ret < 0) {
1089 		dev_info(pcie->dev, "Failed to read ASPM T_cmrt: %d\n", ret);
1090 		return ret;
1091 	}
1092 
1093 	ret = of_property_read_u32(np, "nvidia,aspm-pwr-on-t-us",
1094 				   &pcie->aspm_pwr_on_t);
1095 	if (ret < 0)
1096 		dev_info(pcie->dev, "Failed to read ASPM Power On time: %d\n",
1097 			 ret);
1098 
1099 	ret = of_property_read_u32(np, "nvidia,aspm-l0s-entrance-latency-us",
1100 				   &pcie->aspm_l0s_enter_lat);
1101 	if (ret < 0)
1102 		dev_info(pcie->dev,
1103 			 "Failed to read ASPM L0s Entrance latency: %d\n", ret);
1104 
1105 	ret = of_property_read_u32(np, "num-lanes", &pcie->num_lanes);
1106 	if (ret < 0) {
1107 		dev_err(pcie->dev, "Failed to read num-lanes: %d\n", ret);
1108 		return ret;
1109 	}
1110 
1111 	ret = of_property_read_u32_index(np, "nvidia,bpmp", 1, &pcie->cid);
1112 	if (ret) {
1113 		dev_err(pcie->dev, "Failed to read Controller-ID: %d\n", ret);
1114 		return ret;
1115 	}
1116 
1117 	ret = of_property_count_strings(np, "phy-names");
1118 	if (ret < 0) {
1119 		dev_err(pcie->dev, "Failed to find PHY entries: %d\n",
1120 			ret);
1121 		return ret;
1122 	}
1123 	pcie->phy_count = ret;
1124 
1125 	if (of_property_read_bool(np, "nvidia,update-fc-fixup"))
1126 		pcie->update_fc_fixup = true;
1127 
1128 	/* RP using an external REFCLK is supported only in Tegra234 */
1129 	if (pcie->of_data->version == TEGRA194_DWC_IP_VER) {
1130 		if (pcie->of_data->mode == DW_PCIE_EP_TYPE)
1131 			pcie->enable_ext_refclk = true;
1132 	} else {
1133 		pcie->enable_ext_refclk =
1134 			of_property_read_bool(pcie->dev->of_node,
1135 					      "nvidia,enable-ext-refclk");
1136 	}
1137 
1138 	pcie->supports_clkreq =
1139 		of_property_read_bool(pcie->dev->of_node, "supports-clkreq");
1140 
1141 	pcie->enable_cdm_check =
1142 		of_property_read_bool(np, "snps,enable-cdm-check");
1143 
1144 	if (pcie->of_data->version == TEGRA234_DWC_IP_VER)
1145 		pcie->enable_srns =
1146 			of_property_read_bool(np, "nvidia,enable-srns");
1147 
1148 	if (pcie->of_data->mode == DW_PCIE_RC_TYPE)
1149 		return 0;
1150 
1151 	/* Endpoint mode specific DT entries */
1152 	pcie->pex_rst_gpiod = devm_gpiod_get(pcie->dev, "reset", GPIOD_IN);
1153 	if (IS_ERR(pcie->pex_rst_gpiod)) {
1154 		int err = PTR_ERR(pcie->pex_rst_gpiod);
1155 		const char *level = KERN_ERR;
1156 
1157 		if (err == -EPROBE_DEFER)
1158 			level = KERN_DEBUG;
1159 
1160 		dev_printk(level, pcie->dev,
1161 			   dev_fmt("Failed to get PERST GPIO: %d\n"),
1162 			   err);
1163 		return err;
1164 	}
1165 
1166 	pcie->pex_refclk_sel_gpiod = devm_gpiod_get(pcie->dev,
1167 						    "nvidia,refclk-select",
1168 						    GPIOD_OUT_HIGH);
1169 	if (IS_ERR(pcie->pex_refclk_sel_gpiod)) {
1170 		int err = PTR_ERR(pcie->pex_refclk_sel_gpiod);
1171 		const char *level = KERN_ERR;
1172 
1173 		if (err == -EPROBE_DEFER)
1174 			level = KERN_DEBUG;
1175 
1176 		dev_printk(level, pcie->dev,
1177 			   dev_fmt("Failed to get REFCLK select GPIOs: %d\n"),
1178 			   err);
1179 		pcie->pex_refclk_sel_gpiod = NULL;
1180 	}
1181 
1182 	return 0;
1183 }
1184 
1185 static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie,
1186 					  bool enable)
1187 {
1188 	struct mrq_uphy_response resp;
1189 	struct tegra_bpmp_message msg;
1190 	struct mrq_uphy_request req;
1191 	int err;
1192 
1193 	/*
1194 	 * Controller-5 doesn't need to have its state set by BPMP-FW in
1195 	 * Tegra194
1196 	 */
1197 	if (pcie->of_data->version == TEGRA194_DWC_IP_VER && pcie->cid == 5)
1198 		return 0;
1199 
1200 	memset(&req, 0, sizeof(req));
1201 	memset(&resp, 0, sizeof(resp));
1202 
1203 	req.cmd = CMD_UPHY_PCIE_CONTROLLER_STATE;
1204 	req.controller_state.pcie_controller = pcie->cid;
1205 	req.controller_state.enable = enable;
1206 
1207 	memset(&msg, 0, sizeof(msg));
1208 	msg.mrq = MRQ_UPHY;
1209 	msg.tx.data = &req;
1210 	msg.tx.size = sizeof(req);
1211 	msg.rx.data = &resp;
1212 	msg.rx.size = sizeof(resp);
1213 
1214 	err = tegra_bpmp_transfer(pcie->bpmp, &msg);
1215 	if (err)
1216 		return err;
1217 	if (msg.rx.ret)
1218 		return -EINVAL;
1219 
1220 	return 0;
1221 }
1222 
1223 static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie,
1224 					 bool enable)
1225 {
1226 	struct mrq_uphy_response resp;
1227 	struct tegra_bpmp_message msg;
1228 	struct mrq_uphy_request req;
1229 	int err;
1230 
1231 	memset(&req, 0, sizeof(req));
1232 	memset(&resp, 0, sizeof(resp));
1233 
1234 	if (enable) {
1235 		req.cmd = CMD_UPHY_PCIE_EP_CONTROLLER_PLL_INIT;
1236 		req.ep_ctrlr_pll_init.ep_controller = pcie->cid;
1237 	} else {
1238 		req.cmd = CMD_UPHY_PCIE_EP_CONTROLLER_PLL_OFF;
1239 		req.ep_ctrlr_pll_off.ep_controller = pcie->cid;
1240 	}
1241 
1242 	memset(&msg, 0, sizeof(msg));
1243 	msg.mrq = MRQ_UPHY;
1244 	msg.tx.data = &req;
1245 	msg.tx.size = sizeof(req);
1246 	msg.rx.data = &resp;
1247 	msg.rx.size = sizeof(resp);
1248 
1249 	err = tegra_bpmp_transfer(pcie->bpmp, &msg);
1250 	if (err)
1251 		return err;
1252 	if (msg.rx.ret)
1253 		return -EINVAL;
1254 
1255 	return 0;
1256 }
1257 
1258 static void tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw *pcie)
1259 {
1260 	struct dw_pcie_rp *pp = &pcie->pci.pp;
1261 	struct pci_bus *child, *root_port_bus = NULL;
1262 	struct pci_dev *pdev;
1263 
1264 	/*
1265 	 * link doesn't go into L2 state with some of the endpoints with Tegra
1266 	 * if they are not in D0 state. So, need to make sure that immediate
1267 	 * downstream devices are in D0 state before sending PME_TurnOff to put
1268 	 * link into L2 state.
1269 	 * This is as per PCI Express Base r4.0 v1.0 September 27-2017,
1270 	 * 5.2 Link State Power Management (Page #428).
1271 	 */
1272 
1273 	list_for_each_entry(child, &pp->bridge->bus->children, node) {
1274 		if (child->parent == pp->bridge->bus) {
1275 			root_port_bus = child;
1276 			break;
1277 		}
1278 	}
1279 
1280 	if (!root_port_bus) {
1281 		dev_err(pcie->dev, "Failed to find downstream bus of Root Port\n");
1282 		return;
1283 	}
1284 
1285 	/* Bring downstream devices to D0 if they are not already in */
1286 	list_for_each_entry(pdev, &root_port_bus->devices, bus_list) {
1287 		if (PCI_SLOT(pdev->devfn) == 0) {
1288 			if (pci_set_power_state(pdev, PCI_D0))
1289 				dev_err(pcie->dev,
1290 					"Failed to transition %s to D0 state\n",
1291 					dev_name(&pdev->dev));
1292 		}
1293 	}
1294 }
1295 
1296 static int tegra_pcie_get_slot_regulators(struct tegra_pcie_dw *pcie)
1297 {
1298 	pcie->slot_ctl_3v3 = devm_regulator_get_optional(pcie->dev, "vpcie3v3");
1299 	if (IS_ERR(pcie->slot_ctl_3v3)) {
1300 		if (PTR_ERR(pcie->slot_ctl_3v3) != -ENODEV)
1301 			return PTR_ERR(pcie->slot_ctl_3v3);
1302 
1303 		pcie->slot_ctl_3v3 = NULL;
1304 	}
1305 
1306 	pcie->slot_ctl_12v = devm_regulator_get_optional(pcie->dev, "vpcie12v");
1307 	if (IS_ERR(pcie->slot_ctl_12v)) {
1308 		if (PTR_ERR(pcie->slot_ctl_12v) != -ENODEV)
1309 			return PTR_ERR(pcie->slot_ctl_12v);
1310 
1311 		pcie->slot_ctl_12v = NULL;
1312 	}
1313 
1314 	return 0;
1315 }
1316 
1317 static int tegra_pcie_enable_slot_regulators(struct tegra_pcie_dw *pcie)
1318 {
1319 	int ret;
1320 
1321 	if (pcie->slot_ctl_3v3) {
1322 		ret = regulator_enable(pcie->slot_ctl_3v3);
1323 		if (ret < 0) {
1324 			dev_err(pcie->dev,
1325 				"Failed to enable 3.3V slot supply: %d\n", ret);
1326 			return ret;
1327 		}
1328 	}
1329 
1330 	if (pcie->slot_ctl_12v) {
1331 		ret = regulator_enable(pcie->slot_ctl_12v);
1332 		if (ret < 0) {
1333 			dev_err(pcie->dev,
1334 				"Failed to enable 12V slot supply: %d\n", ret);
1335 			goto fail_12v_enable;
1336 		}
1337 	}
1338 
1339 	/*
1340 	 * According to PCI Express Card Electromechanical Specification
1341 	 * Revision 1.1, Table-2.4, T_PVPERL (Power stable to PERST# inactive)
1342 	 * should be a minimum of 100ms.
1343 	 */
1344 	if (pcie->slot_ctl_3v3 || pcie->slot_ctl_12v)
1345 		msleep(100);
1346 
1347 	return 0;
1348 
1349 fail_12v_enable:
1350 	if (pcie->slot_ctl_3v3)
1351 		regulator_disable(pcie->slot_ctl_3v3);
1352 	return ret;
1353 }
1354 
1355 static void tegra_pcie_disable_slot_regulators(struct tegra_pcie_dw *pcie)
1356 {
1357 	if (pcie->slot_ctl_12v)
1358 		regulator_disable(pcie->slot_ctl_12v);
1359 	if (pcie->slot_ctl_3v3)
1360 		regulator_disable(pcie->slot_ctl_3v3);
1361 }
1362 
1363 static int tegra_pcie_config_controller(struct tegra_pcie_dw *pcie,
1364 					bool en_hw_hot_rst)
1365 {
1366 	int ret;
1367 	u32 val;
1368 
1369 	ret = tegra_pcie_bpmp_set_ctrl_state(pcie, true);
1370 	if (ret) {
1371 		dev_err(pcie->dev,
1372 			"Failed to enable controller %u: %d\n", pcie->cid, ret);
1373 		return ret;
1374 	}
1375 
1376 	if (pcie->enable_ext_refclk) {
1377 		ret = tegra_pcie_bpmp_set_pll_state(pcie, true);
1378 		if (ret) {
1379 			dev_err(pcie->dev, "Failed to init UPHY: %d\n", ret);
1380 			goto fail_pll_init;
1381 		}
1382 	}
1383 
1384 	ret = tegra_pcie_enable_slot_regulators(pcie);
1385 	if (ret < 0)
1386 		goto fail_slot_reg_en;
1387 
1388 	ret = regulator_enable(pcie->pex_ctl_supply);
1389 	if (ret < 0) {
1390 		dev_err(pcie->dev, "Failed to enable regulator: %d\n", ret);
1391 		goto fail_reg_en;
1392 	}
1393 
1394 	ret = clk_prepare_enable(pcie->core_clk);
1395 	if (ret) {
1396 		dev_err(pcie->dev, "Failed to enable core clock: %d\n", ret);
1397 		goto fail_core_clk;
1398 	}
1399 
1400 	ret = reset_control_deassert(pcie->core_apb_rst);
1401 	if (ret) {
1402 		dev_err(pcie->dev, "Failed to deassert core APB reset: %d\n",
1403 			ret);
1404 		goto fail_core_apb_rst;
1405 	}
1406 
1407 	if (en_hw_hot_rst || pcie->of_data->has_sbr_reset_fix) {
1408 		/* Enable HW_HOT_RST mode */
1409 		val = appl_readl(pcie, APPL_CTRL);
1410 		val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
1411 			 APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
1412 		val |= (APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST_LTSSM_EN <<
1413 			APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
1414 		val |= APPL_CTRL_HW_HOT_RST_EN;
1415 		appl_writel(pcie, val, APPL_CTRL);
1416 	}
1417 
1418 	ret = tegra_pcie_enable_phy(pcie);
1419 	if (ret) {
1420 		dev_err(pcie->dev, "Failed to enable PHY: %d\n", ret);
1421 		goto fail_phy;
1422 	}
1423 
1424 	/* Update CFG base address */
1425 	appl_writel(pcie, pcie->dbi_res->start & APPL_CFG_BASE_ADDR_MASK,
1426 		    APPL_CFG_BASE_ADDR);
1427 
1428 	/* Configure this core for RP mode operation */
1429 	appl_writel(pcie, APPL_DM_TYPE_RP, APPL_DM_TYPE);
1430 
1431 	appl_writel(pcie, 0x0, APPL_CFG_SLCG_OVERRIDE);
1432 
1433 	val = appl_readl(pcie, APPL_CTRL);
1434 	appl_writel(pcie, val | APPL_CTRL_SYS_PRE_DET_STATE, APPL_CTRL);
1435 
1436 	val = appl_readl(pcie, APPL_CFG_MISC);
1437 	val |= (APPL_CFG_MISC_ARCACHE_VAL << APPL_CFG_MISC_ARCACHE_SHIFT);
1438 	appl_writel(pcie, val, APPL_CFG_MISC);
1439 
1440 	if (pcie->enable_srns || pcie->enable_ext_refclk) {
1441 		/*
1442 		 * When Tegra PCIe RP is using external clock, it cannot supply
1443 		 * same clock to its downstream hierarchy. Hence, gate PCIe RP
1444 		 * REFCLK out pads when RP & EP are using separate clocks or RP
1445 		 * is using an external REFCLK.
1446 		 */
1447 		val = appl_readl(pcie, APPL_PINMUX);
1448 		val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN;
1449 		val &= ~APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE;
1450 		appl_writel(pcie, val, APPL_PINMUX);
1451 	}
1452 
1453 	if (!pcie->supports_clkreq) {
1454 		val = appl_readl(pcie, APPL_PINMUX);
1455 		val |= APPL_PINMUX_CLKREQ_OVERRIDE_EN;
1456 		val &= ~APPL_PINMUX_CLKREQ_OVERRIDE;
1457 		appl_writel(pcie, val, APPL_PINMUX);
1458 	}
1459 
1460 	/* Update iATU_DMA base address */
1461 	appl_writel(pcie,
1462 		    pcie->atu_dma_res->start & APPL_CFG_IATU_DMA_BASE_ADDR_MASK,
1463 		    APPL_CFG_IATU_DMA_BASE_ADDR);
1464 
1465 	reset_control_deassert(pcie->core_rst);
1466 
1467 	return ret;
1468 
1469 fail_phy:
1470 	reset_control_assert(pcie->core_apb_rst);
1471 fail_core_apb_rst:
1472 	clk_disable_unprepare(pcie->core_clk);
1473 fail_core_clk:
1474 	regulator_disable(pcie->pex_ctl_supply);
1475 fail_reg_en:
1476 	tegra_pcie_disable_slot_regulators(pcie);
1477 fail_slot_reg_en:
1478 	if (pcie->enable_ext_refclk)
1479 		tegra_pcie_bpmp_set_pll_state(pcie, false);
1480 fail_pll_init:
1481 	tegra_pcie_bpmp_set_ctrl_state(pcie, false);
1482 
1483 	return ret;
1484 }
1485 
1486 static void tegra_pcie_unconfig_controller(struct tegra_pcie_dw *pcie)
1487 {
1488 	int ret;
1489 
1490 	ret = reset_control_assert(pcie->core_rst);
1491 	if (ret)
1492 		dev_err(pcie->dev, "Failed to assert \"core\" reset: %d\n", ret);
1493 
1494 	tegra_pcie_disable_phy(pcie);
1495 
1496 	ret = reset_control_assert(pcie->core_apb_rst);
1497 	if (ret)
1498 		dev_err(pcie->dev, "Failed to assert APB reset: %d\n", ret);
1499 
1500 	clk_disable_unprepare(pcie->core_clk);
1501 
1502 	ret = regulator_disable(pcie->pex_ctl_supply);
1503 	if (ret)
1504 		dev_err(pcie->dev, "Failed to disable regulator: %d\n", ret);
1505 
1506 	tegra_pcie_disable_slot_regulators(pcie);
1507 
1508 	if (pcie->enable_ext_refclk) {
1509 		ret = tegra_pcie_bpmp_set_pll_state(pcie, false);
1510 		if (ret)
1511 			dev_err(pcie->dev, "Failed to deinit UPHY: %d\n", ret);
1512 	}
1513 
1514 	ret = tegra_pcie_bpmp_set_ctrl_state(pcie, false);
1515 	if (ret)
1516 		dev_err(pcie->dev, "Failed to disable controller %d: %d\n",
1517 			pcie->cid, ret);
1518 }
1519 
1520 static int tegra_pcie_init_controller(struct tegra_pcie_dw *pcie)
1521 {
1522 	struct dw_pcie *pci = &pcie->pci;
1523 	struct dw_pcie_rp *pp = &pci->pp;
1524 	int ret;
1525 
1526 	ret = tegra_pcie_config_controller(pcie, false);
1527 	if (ret < 0)
1528 		return ret;
1529 
1530 	pp->ops = &tegra_pcie_dw_host_ops;
1531 
1532 	ret = dw_pcie_host_init(pp);
1533 	if (ret < 0) {
1534 		dev_err(pcie->dev, "Failed to add PCIe port: %d\n", ret);
1535 		goto fail_host_init;
1536 	}
1537 
1538 	return 0;
1539 
1540 fail_host_init:
1541 	tegra_pcie_unconfig_controller(pcie);
1542 	return ret;
1543 }
1544 
1545 static int tegra_pcie_try_link_l2(struct tegra_pcie_dw *pcie)
1546 {
1547 	u32 val;
1548 
1549 	if (!tegra_pcie_dw_link_up(&pcie->pci))
1550 		return 0;
1551 
1552 	val = appl_readl(pcie, APPL_RADM_STATUS);
1553 	val |= APPL_PM_XMT_TURNOFF_STATE;
1554 	appl_writel(pcie, val, APPL_RADM_STATUS);
1555 
1556 	return readl_poll_timeout_atomic(pcie->appl_base + APPL_DEBUG, val,
1557 				 val & APPL_DEBUG_PM_LINKST_IN_L2_LAT,
1558 				 1, PME_ACK_TIMEOUT);
1559 }
1560 
1561 static void tegra_pcie_dw_pme_turnoff(struct tegra_pcie_dw *pcie)
1562 {
1563 	u32 data;
1564 	int err;
1565 
1566 	if (!tegra_pcie_dw_link_up(&pcie->pci)) {
1567 		dev_dbg(pcie->dev, "PCIe link is not up...!\n");
1568 		return;
1569 	}
1570 
1571 	/*
1572 	 * PCIe controller exits from L2 only if reset is applied, so
1573 	 * controller doesn't handle interrupts. But in cases where
1574 	 * L2 entry fails, PERST# is asserted which can trigger surprise
1575 	 * link down AER. However this function call happens in
1576 	 * suspend_noirq(), so AER interrupt will not be processed.
1577 	 * Disable all interrupts to avoid such a scenario.
1578 	 */
1579 	appl_writel(pcie, 0x0, APPL_INTR_EN_L0_0);
1580 
1581 	if (tegra_pcie_try_link_l2(pcie)) {
1582 		dev_info(pcie->dev, "Link didn't transition to L2 state\n");
1583 		/*
1584 		 * TX lane clock freq will reset to Gen1 only if link is in L2
1585 		 * or detect state.
1586 		 * So apply pex_rst to end point to force RP to go into detect
1587 		 * state
1588 		 */
1589 		data = appl_readl(pcie, APPL_PINMUX);
1590 		data &= ~APPL_PINMUX_PEX_RST;
1591 		appl_writel(pcie, data, APPL_PINMUX);
1592 
1593 		/*
1594 		 * Some cards do not go to detect state even after de-asserting
1595 		 * PERST#. So, de-assert LTSSM to bring link to detect state.
1596 		 */
1597 		data = readl(pcie->appl_base + APPL_CTRL);
1598 		data &= ~APPL_CTRL_LTSSM_EN;
1599 		writel(data, pcie->appl_base + APPL_CTRL);
1600 
1601 		err = readl_poll_timeout_atomic(pcie->appl_base + APPL_DEBUG,
1602 						data,
1603 						((data &
1604 						APPL_DEBUG_LTSSM_STATE_MASK) >>
1605 						APPL_DEBUG_LTSSM_STATE_SHIFT) ==
1606 						LTSSM_STATE_PRE_DETECT,
1607 						1, LTSSM_TIMEOUT);
1608 		if (err)
1609 			dev_info(pcie->dev, "Link didn't go to detect state\n");
1610 	}
1611 	/*
1612 	 * DBI registers may not be accessible after this as PLL-E would be
1613 	 * down depending on how CLKREQ is pulled by end point
1614 	 */
1615 	data = appl_readl(pcie, APPL_PINMUX);
1616 	data |= (APPL_PINMUX_CLKREQ_OVERRIDE_EN | APPL_PINMUX_CLKREQ_OVERRIDE);
1617 	/* Cut REFCLK to slot */
1618 	data |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN;
1619 	data &= ~APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE;
1620 	appl_writel(pcie, data, APPL_PINMUX);
1621 }
1622 
1623 static void tegra_pcie_deinit_controller(struct tegra_pcie_dw *pcie)
1624 {
1625 	tegra_pcie_downstream_dev_to_D0(pcie);
1626 	dw_pcie_host_deinit(&pcie->pci.pp);
1627 	tegra_pcie_dw_pme_turnoff(pcie);
1628 	tegra_pcie_unconfig_controller(pcie);
1629 }
1630 
1631 static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie)
1632 {
1633 	struct device *dev = pcie->dev;
1634 	int ret;
1635 
1636 	pm_runtime_enable(dev);
1637 
1638 	ret = pm_runtime_get_sync(dev);
1639 	if (ret < 0) {
1640 		dev_err(dev, "Failed to get runtime sync for PCIe dev: %d\n",
1641 			ret);
1642 		goto fail_pm_get_sync;
1643 	}
1644 
1645 	ret = pinctrl_pm_select_default_state(dev);
1646 	if (ret < 0) {
1647 		dev_err(dev, "Failed to configure sideband pins: %d\n", ret);
1648 		goto fail_pm_get_sync;
1649 	}
1650 
1651 	ret = tegra_pcie_init_controller(pcie);
1652 	if (ret < 0) {
1653 		dev_err(dev, "Failed to initialize controller: %d\n", ret);
1654 		goto fail_pm_get_sync;
1655 	}
1656 
1657 	pcie->link_state = tegra_pcie_dw_link_up(&pcie->pci);
1658 	if (!pcie->link_state) {
1659 		ret = -ENOMEDIUM;
1660 		goto fail_host_init;
1661 	}
1662 
1663 	init_debugfs(pcie);
1664 
1665 	return ret;
1666 
1667 fail_host_init:
1668 	tegra_pcie_deinit_controller(pcie);
1669 fail_pm_get_sync:
1670 	pm_runtime_put_sync(dev);
1671 	pm_runtime_disable(dev);
1672 	return ret;
1673 }
1674 
1675 static void pex_ep_event_pex_rst_assert(struct tegra_pcie_dw *pcie)
1676 {
1677 	u32 val;
1678 	int ret;
1679 
1680 	if (pcie->ep_state == EP_STATE_DISABLED)
1681 		return;
1682 
1683 	/* Disable LTSSM */
1684 	val = appl_readl(pcie, APPL_CTRL);
1685 	val &= ~APPL_CTRL_LTSSM_EN;
1686 	appl_writel(pcie, val, APPL_CTRL);
1687 
1688 	ret = readl_poll_timeout(pcie->appl_base + APPL_DEBUG, val,
1689 				 ((val & APPL_DEBUG_LTSSM_STATE_MASK) >>
1690 				 APPL_DEBUG_LTSSM_STATE_SHIFT) ==
1691 				 LTSSM_STATE_PRE_DETECT,
1692 				 1, LTSSM_TIMEOUT);
1693 	if (ret)
1694 		dev_err(pcie->dev, "Failed to go Detect state: %d\n", ret);
1695 
1696 	reset_control_assert(pcie->core_rst);
1697 
1698 	tegra_pcie_disable_phy(pcie);
1699 
1700 	reset_control_assert(pcie->core_apb_rst);
1701 
1702 	clk_disable_unprepare(pcie->core_clk);
1703 
1704 	pm_runtime_put_sync(pcie->dev);
1705 
1706 	if (pcie->enable_ext_refclk) {
1707 		ret = tegra_pcie_bpmp_set_pll_state(pcie, false);
1708 		if (ret)
1709 			dev_err(pcie->dev, "Failed to turn off UPHY: %d\n",
1710 				ret);
1711 	}
1712 
1713 	ret = tegra_pcie_bpmp_set_ctrl_state(pcie, false);
1714 	if (ret)
1715 		dev_err(pcie->dev, "Failed to disable controller: %d\n", ret);
1716 
1717 	pcie->ep_state = EP_STATE_DISABLED;
1718 	dev_dbg(pcie->dev, "Uninitialization of endpoint is completed\n");
1719 }
1720 
1721 static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)
1722 {
1723 	struct dw_pcie *pci = &pcie->pci;
1724 	struct dw_pcie_ep *ep = &pci->ep;
1725 	struct device *dev = pcie->dev;
1726 	u32 val;
1727 	int ret;
1728 	u16 val_16;
1729 
1730 	if (pcie->ep_state == EP_STATE_ENABLED)
1731 		return;
1732 
1733 	ret = pm_runtime_resume_and_get(dev);
1734 	if (ret < 0) {
1735 		dev_err(dev, "Failed to get runtime sync for PCIe dev: %d\n",
1736 			ret);
1737 		return;
1738 	}
1739 
1740 	ret = tegra_pcie_bpmp_set_ctrl_state(pcie, true);
1741 	if (ret) {
1742 		dev_err(pcie->dev, "Failed to enable controller %u: %d\n",
1743 			pcie->cid, ret);
1744 		goto fail_set_ctrl_state;
1745 	}
1746 
1747 	if (pcie->enable_ext_refclk) {
1748 		ret = tegra_pcie_bpmp_set_pll_state(pcie, true);
1749 		if (ret) {
1750 			dev_err(dev, "Failed to init UPHY for PCIe EP: %d\n",
1751 				ret);
1752 			goto fail_pll_init;
1753 		}
1754 	}
1755 
1756 	ret = clk_prepare_enable(pcie->core_clk);
1757 	if (ret) {
1758 		dev_err(dev, "Failed to enable core clock: %d\n", ret);
1759 		goto fail_core_clk_enable;
1760 	}
1761 
1762 	ret = reset_control_deassert(pcie->core_apb_rst);
1763 	if (ret) {
1764 		dev_err(dev, "Failed to deassert core APB reset: %d\n", ret);
1765 		goto fail_core_apb_rst;
1766 	}
1767 
1768 	ret = tegra_pcie_enable_phy(pcie);
1769 	if (ret) {
1770 		dev_err(dev, "Failed to enable PHY: %d\n", ret);
1771 		goto fail_phy;
1772 	}
1773 
1774 	/* Perform cleanup that requires refclk */
1775 	pci_epc_deinit_notify(pcie->pci.ep.epc);
1776 	dw_pcie_ep_cleanup(&pcie->pci.ep);
1777 
1778 	/* Clear any stale interrupt statuses */
1779 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0);
1780 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0);
1781 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1);
1782 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2);
1783 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3);
1784 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6);
1785 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7);
1786 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0);
1787 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9);
1788 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10);
1789 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11);
1790 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13);
1791 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14);
1792 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15);
1793 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17);
1794 
1795 	/* configure this core for EP mode operation */
1796 	val = appl_readl(pcie, APPL_DM_TYPE);
1797 	val &= ~APPL_DM_TYPE_MASK;
1798 	val |= APPL_DM_TYPE_EP;
1799 	appl_writel(pcie, val, APPL_DM_TYPE);
1800 
1801 	appl_writel(pcie, 0x0, APPL_CFG_SLCG_OVERRIDE);
1802 
1803 	val = appl_readl(pcie, APPL_CTRL);
1804 	val |= APPL_CTRL_SYS_PRE_DET_STATE;
1805 	val |= APPL_CTRL_HW_HOT_RST_EN;
1806 	appl_writel(pcie, val, APPL_CTRL);
1807 
1808 	val = appl_readl(pcie, APPL_CFG_MISC);
1809 	val |= APPL_CFG_MISC_SLV_EP_MODE;
1810 	val |= (APPL_CFG_MISC_ARCACHE_VAL << APPL_CFG_MISC_ARCACHE_SHIFT);
1811 	appl_writel(pcie, val, APPL_CFG_MISC);
1812 
1813 	val = appl_readl(pcie, APPL_PINMUX);
1814 	val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN;
1815 	val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE;
1816 	appl_writel(pcie, val, APPL_PINMUX);
1817 
1818 	appl_writel(pcie, pcie->dbi_res->start & APPL_CFG_BASE_ADDR_MASK,
1819 		    APPL_CFG_BASE_ADDR);
1820 
1821 	appl_writel(pcie, pcie->atu_dma_res->start &
1822 		    APPL_CFG_IATU_DMA_BASE_ADDR_MASK,
1823 		    APPL_CFG_IATU_DMA_BASE_ADDR);
1824 
1825 	val = appl_readl(pcie, APPL_INTR_EN_L0_0);
1826 	val |= APPL_INTR_EN_L0_0_SYS_INTR_EN;
1827 	val |= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN;
1828 	val |= APPL_INTR_EN_L0_0_PCI_CMD_EN_INT_EN;
1829 	appl_writel(pcie, val, APPL_INTR_EN_L0_0);
1830 
1831 	val = appl_readl(pcie, APPL_INTR_EN_L1_0_0);
1832 	val |= APPL_INTR_EN_L1_0_0_HOT_RESET_DONE_INT_EN;
1833 	val |= APPL_INTR_EN_L1_0_0_RDLH_LINK_UP_INT_EN;
1834 	appl_writel(pcie, val, APPL_INTR_EN_L1_0_0);
1835 
1836 	reset_control_deassert(pcie->core_rst);
1837 
1838 	if (pcie->update_fc_fixup) {
1839 		val = dw_pcie_readl_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF);
1840 		val |= 0x1 << CFG_TIMER_CTRL_ACK_NAK_SHIFT;
1841 		dw_pcie_writel_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF, val);
1842 	}
1843 
1844 	config_gen3_gen4_eq_presets(pcie);
1845 
1846 	init_host_aspm(pcie);
1847 
1848 	if (!pcie->of_data->has_l1ss_exit_fix) {
1849 		val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
1850 		val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
1851 		dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
1852 	}
1853 
1854 	pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci,
1855 						      PCI_CAP_ID_EXP);
1856 
1857 	/* Clear Slot Clock Configuration bit if SRNS configuration */
1858 	if (pcie->enable_srns) {
1859 		val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
1860 					   PCI_EXP_LNKSTA);
1861 		val_16 &= ~PCI_EXP_LNKSTA_SLC;
1862 		dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA,
1863 				   val_16);
1864 	}
1865 
1866 	clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ);
1867 
1868 	val = (ep->msi_mem_phys & MSIX_ADDR_MATCH_LOW_OFF_MASK);
1869 	val |= MSIX_ADDR_MATCH_LOW_OFF_EN;
1870 	dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_LOW_OFF, val);
1871 	val = (upper_32_bits(ep->msi_mem_phys) & MSIX_ADDR_MATCH_HIGH_OFF_MASK);
1872 	dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_HIGH_OFF, val);
1873 
1874 	ret = dw_pcie_ep_init_registers(ep);
1875 	if (ret) {
1876 		dev_err(dev, "Failed to complete initialization: %d\n", ret);
1877 		goto fail_init_complete;
1878 	}
1879 
1880 	pci_epc_init_notify(ep->epc);
1881 
1882 	/* Program the private control to allow sending LTR upstream */
1883 	if (pcie->of_data->has_ltr_req_fix) {
1884 		val = appl_readl(pcie, APPL_LTR_MSG_2);
1885 		val |= APPL_LTR_MSG_2_LTR_MSG_REQ_STATE;
1886 		appl_writel(pcie, val, APPL_LTR_MSG_2);
1887 	}
1888 
1889 	/* Enable LTSSM */
1890 	val = appl_readl(pcie, APPL_CTRL);
1891 	val |= APPL_CTRL_LTSSM_EN;
1892 	appl_writel(pcie, val, APPL_CTRL);
1893 
1894 	pcie->ep_state = EP_STATE_ENABLED;
1895 	dev_dbg(dev, "Initialization of endpoint is completed\n");
1896 
1897 	return;
1898 
1899 fail_init_complete:
1900 	reset_control_assert(pcie->core_rst);
1901 	tegra_pcie_disable_phy(pcie);
1902 fail_phy:
1903 	reset_control_assert(pcie->core_apb_rst);
1904 fail_core_apb_rst:
1905 	clk_disable_unprepare(pcie->core_clk);
1906 fail_core_clk_enable:
1907 	tegra_pcie_bpmp_set_pll_state(pcie, false);
1908 fail_pll_init:
1909 	tegra_pcie_bpmp_set_ctrl_state(pcie, false);
1910 fail_set_ctrl_state:
1911 	pm_runtime_put_sync(dev);
1912 }
1913 
1914 static irqreturn_t tegra_pcie_ep_pex_rst_irq(int irq, void *arg)
1915 {
1916 	struct tegra_pcie_dw *pcie = arg;
1917 
1918 	if (gpiod_get_value(pcie->pex_rst_gpiod))
1919 		pex_ep_event_pex_rst_assert(pcie);
1920 	else
1921 		pex_ep_event_pex_rst_deassert(pcie);
1922 
1923 	return IRQ_HANDLED;
1924 }
1925 
1926 static void tegra_pcie_ep_init(struct dw_pcie_ep *ep)
1927 {
1928 	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
1929 	enum pci_barno bar;
1930 
1931 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
1932 		dw_pcie_ep_reset_bar(pci, bar);
1933 };
1934 
1935 static int tegra_pcie_ep_raise_intx_irq(struct tegra_pcie_dw *pcie, u16 irq)
1936 {
1937 	/* Tegra194 supports only INTA */
1938 	if (irq > 1)
1939 		return -EINVAL;
1940 
1941 	appl_writel(pcie, 1, APPL_LEGACY_INTX);
1942 	usleep_range(1000, 2000);
1943 	appl_writel(pcie, 0, APPL_LEGACY_INTX);
1944 	return 0;
1945 }
1946 
1947 static int tegra_pcie_ep_raise_msi_irq(struct tegra_pcie_dw *pcie, u16 irq)
1948 {
1949 	if (unlikely(irq > 32))
1950 		return -EINVAL;
1951 
1952 	appl_writel(pcie, BIT(irq - 1), APPL_MSI_CTRL_1);
1953 
1954 	return 0;
1955 }
1956 
1957 static int tegra_pcie_ep_raise_msix_irq(struct tegra_pcie_dw *pcie, u16 irq)
1958 {
1959 	struct dw_pcie_ep *ep = &pcie->pci.ep;
1960 
1961 	writel(irq, ep->msi_mem);
1962 
1963 	return 0;
1964 }
1965 
1966 static int tegra_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
1967 				   unsigned int type, u16 interrupt_num)
1968 {
1969 	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
1970 	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
1971 
1972 	switch (type) {
1973 	case PCI_IRQ_INTX:
1974 		return tegra_pcie_ep_raise_intx_irq(pcie, interrupt_num);
1975 
1976 	case PCI_IRQ_MSI:
1977 		return tegra_pcie_ep_raise_msi_irq(pcie, interrupt_num);
1978 
1979 	case PCI_IRQ_MSIX:
1980 		return tegra_pcie_ep_raise_msix_irq(pcie, interrupt_num);
1981 
1982 	default:
1983 		dev_err(pci->dev, "Unknown IRQ type\n");
1984 		return -EPERM;
1985 	}
1986 
1987 	return 0;
1988 }
1989 
1990 static const struct pci_epc_features tegra_pcie_epc_features = {
1991 	.linkup_notifier = true,
1992 	.msi_capable = true,
1993 	.bar[BAR_0] = { .type = BAR_FIXED, .fixed_size = SZ_1M,
1994 			.only_64bit = true, },
1995 	.bar[BAR_1] = { .type = BAR_RESERVED, },
1996 	.bar[BAR_2] = { .type = BAR_RESERVED, },
1997 	.bar[BAR_3] = { .type = BAR_RESERVED, },
1998 	.bar[BAR_4] = { .type = BAR_RESERVED, },
1999 	.bar[BAR_5] = { .type = BAR_RESERVED, },
2000 	.align = SZ_64K,
2001 };
2002 
2003 static const struct pci_epc_features*
2004 tegra_pcie_ep_get_features(struct dw_pcie_ep *ep)
2005 {
2006 	return &tegra_pcie_epc_features;
2007 }
2008 
2009 static const struct dw_pcie_ep_ops pcie_ep_ops = {
2010 	.init = tegra_pcie_ep_init,
2011 	.raise_irq = tegra_pcie_ep_raise_irq,
2012 	.get_features = tegra_pcie_ep_get_features,
2013 };
2014 
2015 static int tegra_pcie_config_ep(struct tegra_pcie_dw *pcie,
2016 				struct platform_device *pdev)
2017 {
2018 	struct dw_pcie *pci = &pcie->pci;
2019 	struct device *dev = pcie->dev;
2020 	struct dw_pcie_ep *ep;
2021 	char *name;
2022 	int ret;
2023 
2024 	ep = &pci->ep;
2025 	ep->ops = &pcie_ep_ops;
2026 
2027 	ep->page_size = SZ_64K;
2028 
2029 	ret = gpiod_set_debounce(pcie->pex_rst_gpiod, PERST_DEBOUNCE_TIME);
2030 	if (ret < 0) {
2031 		dev_err(dev, "Failed to set PERST GPIO debounce time: %d\n",
2032 			ret);
2033 		return ret;
2034 	}
2035 
2036 	ret = gpiod_to_irq(pcie->pex_rst_gpiod);
2037 	if (ret < 0) {
2038 		dev_err(dev, "Failed to get IRQ for PERST GPIO: %d\n", ret);
2039 		return ret;
2040 	}
2041 	pcie->pex_rst_irq = (unsigned int)ret;
2042 
2043 	name = devm_kasprintf(dev, GFP_KERNEL, "tegra_pcie_%u_pex_rst_irq",
2044 			      pcie->cid);
2045 	if (!name) {
2046 		dev_err(dev, "Failed to create PERST IRQ string\n");
2047 		return -ENOMEM;
2048 	}
2049 
2050 	irq_set_status_flags(pcie->pex_rst_irq, IRQ_NOAUTOEN);
2051 
2052 	pcie->ep_state = EP_STATE_DISABLED;
2053 
2054 	ret = devm_request_threaded_irq(dev, pcie->pex_rst_irq, NULL,
2055 					tegra_pcie_ep_pex_rst_irq,
2056 					IRQF_TRIGGER_RISING |
2057 					IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
2058 					name, (void *)pcie);
2059 	if (ret < 0) {
2060 		dev_err(dev, "Failed to request IRQ for PERST: %d\n", ret);
2061 		return ret;
2062 	}
2063 
2064 	pm_runtime_enable(dev);
2065 
2066 	ret = dw_pcie_ep_init(ep);
2067 	if (ret) {
2068 		dev_err(dev, "Failed to initialize DWC Endpoint subsystem: %d\n",
2069 			ret);
2070 		pm_runtime_disable(dev);
2071 		return ret;
2072 	}
2073 
2074 	return 0;
2075 }
2076 
2077 static int tegra_pcie_dw_probe(struct platform_device *pdev)
2078 {
2079 	const struct tegra_pcie_dw_of_data *data;
2080 	struct device *dev = &pdev->dev;
2081 	struct resource *atu_dma_res;
2082 	struct tegra_pcie_dw *pcie;
2083 	struct dw_pcie_rp *pp;
2084 	struct dw_pcie *pci;
2085 	struct phy **phys;
2086 	char *name;
2087 	int ret;
2088 	u32 i;
2089 
2090 	data = of_device_get_match_data(dev);
2091 
2092 	pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
2093 	if (!pcie)
2094 		return -ENOMEM;
2095 
2096 	pci = &pcie->pci;
2097 	pci->dev = &pdev->dev;
2098 	pci->ops = &tegra_dw_pcie_ops;
2099 	pcie->dev = &pdev->dev;
2100 	pcie->of_data = (struct tegra_pcie_dw_of_data *)data;
2101 	pci->n_fts[0] = pcie->of_data->n_fts[0];
2102 	pci->n_fts[1] = pcie->of_data->n_fts[1];
2103 	pp = &pci->pp;
2104 	pp->num_vectors = MAX_MSI_IRQS;
2105 
2106 	ret = tegra_pcie_dw_parse_dt(pcie);
2107 	if (ret < 0) {
2108 		const char *level = KERN_ERR;
2109 
2110 		if (ret == -EPROBE_DEFER)
2111 			level = KERN_DEBUG;
2112 
2113 		dev_printk(level, dev,
2114 			   dev_fmt("Failed to parse device tree: %d\n"),
2115 			   ret);
2116 		return ret;
2117 	}
2118 
2119 	ret = tegra_pcie_get_slot_regulators(pcie);
2120 	if (ret < 0) {
2121 		const char *level = KERN_ERR;
2122 
2123 		if (ret == -EPROBE_DEFER)
2124 			level = KERN_DEBUG;
2125 
2126 		dev_printk(level, dev,
2127 			   dev_fmt("Failed to get slot regulators: %d\n"),
2128 			   ret);
2129 		return ret;
2130 	}
2131 
2132 	if (pcie->pex_refclk_sel_gpiod)
2133 		gpiod_set_value(pcie->pex_refclk_sel_gpiod, 1);
2134 
2135 	pcie->pex_ctl_supply = devm_regulator_get(dev, "vddio-pex-ctl");
2136 	if (IS_ERR(pcie->pex_ctl_supply)) {
2137 		ret = PTR_ERR(pcie->pex_ctl_supply);
2138 		if (ret != -EPROBE_DEFER)
2139 			dev_err(dev, "Failed to get regulator: %ld\n",
2140 				PTR_ERR(pcie->pex_ctl_supply));
2141 		return ret;
2142 	}
2143 
2144 	pcie->core_clk = devm_clk_get(dev, "core");
2145 	if (IS_ERR(pcie->core_clk)) {
2146 		dev_err(dev, "Failed to get core clock: %ld\n",
2147 			PTR_ERR(pcie->core_clk));
2148 		return PTR_ERR(pcie->core_clk);
2149 	}
2150 
2151 	pcie->appl_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2152 						      "appl");
2153 	if (!pcie->appl_res) {
2154 		dev_err(dev, "Failed to find \"appl\" region\n");
2155 		return -ENODEV;
2156 	}
2157 
2158 	pcie->appl_base = devm_ioremap_resource(dev, pcie->appl_res);
2159 	if (IS_ERR(pcie->appl_base))
2160 		return PTR_ERR(pcie->appl_base);
2161 
2162 	pcie->core_apb_rst = devm_reset_control_get(dev, "apb");
2163 	if (IS_ERR(pcie->core_apb_rst)) {
2164 		dev_err(dev, "Failed to get APB reset: %ld\n",
2165 			PTR_ERR(pcie->core_apb_rst));
2166 		return PTR_ERR(pcie->core_apb_rst);
2167 	}
2168 
2169 	phys = devm_kcalloc(dev, pcie->phy_count, sizeof(*phys), GFP_KERNEL);
2170 	if (!phys)
2171 		return -ENOMEM;
2172 
2173 	for (i = 0; i < pcie->phy_count; i++) {
2174 		name = kasprintf(GFP_KERNEL, "p2u-%u", i);
2175 		if (!name) {
2176 			dev_err(dev, "Failed to create P2U string\n");
2177 			return -ENOMEM;
2178 		}
2179 		phys[i] = devm_phy_get(dev, name);
2180 		kfree(name);
2181 		if (IS_ERR(phys[i])) {
2182 			ret = PTR_ERR(phys[i]);
2183 			if (ret != -EPROBE_DEFER)
2184 				dev_err(dev, "Failed to get PHY: %d\n", ret);
2185 			return ret;
2186 		}
2187 	}
2188 
2189 	pcie->phys = phys;
2190 
2191 	atu_dma_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2192 						   "atu_dma");
2193 	if (!atu_dma_res) {
2194 		dev_err(dev, "Failed to find \"atu_dma\" region\n");
2195 		return -ENODEV;
2196 	}
2197 	pcie->atu_dma_res = atu_dma_res;
2198 
2199 	pci->atu_size = resource_size(atu_dma_res);
2200 	pci->atu_base = devm_ioremap_resource(dev, atu_dma_res);
2201 	if (IS_ERR(pci->atu_base))
2202 		return PTR_ERR(pci->atu_base);
2203 
2204 	pcie->core_rst = devm_reset_control_get(dev, "core");
2205 	if (IS_ERR(pcie->core_rst)) {
2206 		dev_err(dev, "Failed to get core reset: %ld\n",
2207 			PTR_ERR(pcie->core_rst));
2208 		return PTR_ERR(pcie->core_rst);
2209 	}
2210 
2211 	pp->irq = platform_get_irq_byname(pdev, "intr");
2212 	if (pp->irq < 0)
2213 		return pp->irq;
2214 
2215 	pcie->bpmp = tegra_bpmp_get(dev);
2216 	if (IS_ERR(pcie->bpmp))
2217 		return PTR_ERR(pcie->bpmp);
2218 
2219 	platform_set_drvdata(pdev, pcie);
2220 
2221 	pcie->icc_path = devm_of_icc_get(&pdev->dev, "write");
2222 	ret = PTR_ERR_OR_ZERO(pcie->icc_path);
2223 	if (ret) {
2224 		tegra_bpmp_put(pcie->bpmp);
2225 		dev_err_probe(&pdev->dev, ret, "failed to get write interconnect\n");
2226 		return ret;
2227 	}
2228 
2229 	switch (pcie->of_data->mode) {
2230 	case DW_PCIE_RC_TYPE:
2231 		ret = devm_request_irq(dev, pp->irq, tegra_pcie_rp_irq_handler,
2232 				       IRQF_SHARED, "tegra-pcie-intr", pcie);
2233 		if (ret) {
2234 			dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq,
2235 				ret);
2236 			goto fail;
2237 		}
2238 
2239 		ret = tegra_pcie_config_rp(pcie);
2240 		if (ret && ret != -ENOMEDIUM)
2241 			goto fail;
2242 		else
2243 			return 0;
2244 		break;
2245 
2246 	case DW_PCIE_EP_TYPE:
2247 		ret = devm_request_threaded_irq(dev, pp->irq,
2248 						tegra_pcie_ep_hard_irq,
2249 						tegra_pcie_ep_irq_thread,
2250 						IRQF_SHARED | IRQF_ONESHOT,
2251 						"tegra-pcie-ep-intr", pcie);
2252 		if (ret) {
2253 			dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq,
2254 				ret);
2255 			goto fail;
2256 		}
2257 
2258 		ret = tegra_pcie_config_ep(pcie, pdev);
2259 		if (ret < 0)
2260 			goto fail;
2261 		else
2262 			return 0;
2263 		break;
2264 
2265 	default:
2266 		dev_err(dev, "Invalid PCIe device type %d\n",
2267 			pcie->of_data->mode);
2268 		ret = -EINVAL;
2269 	}
2270 
2271 fail:
2272 	tegra_bpmp_put(pcie->bpmp);
2273 	return ret;
2274 }
2275 
2276 static void tegra_pcie_dw_remove(struct platform_device *pdev)
2277 {
2278 	struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev);
2279 
2280 	if (pcie->of_data->mode == DW_PCIE_RC_TYPE) {
2281 		if (!pcie->link_state)
2282 			return;
2283 
2284 		debugfs_remove_recursive(pcie->debugfs);
2285 		tegra_pcie_deinit_controller(pcie);
2286 		pm_runtime_put_sync(pcie->dev);
2287 	} else {
2288 		disable_irq(pcie->pex_rst_irq);
2289 		pex_ep_event_pex_rst_assert(pcie);
2290 	}
2291 
2292 	pm_runtime_disable(pcie->dev);
2293 	tegra_bpmp_put(pcie->bpmp);
2294 	if (pcie->pex_refclk_sel_gpiod)
2295 		gpiod_set_value(pcie->pex_refclk_sel_gpiod, 0);
2296 }
2297 
2298 static int tegra_pcie_dw_suspend_late(struct device *dev)
2299 {
2300 	struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
2301 	u32 val;
2302 
2303 	if (pcie->of_data->mode == DW_PCIE_EP_TYPE) {
2304 		dev_err(dev, "Failed to Suspend as Tegra PCIe is in EP mode\n");
2305 		return -EPERM;
2306 	}
2307 
2308 	if (!pcie->link_state)
2309 		return 0;
2310 
2311 	/* Enable HW_HOT_RST mode */
2312 	if (!pcie->of_data->has_sbr_reset_fix) {
2313 		val = appl_readl(pcie, APPL_CTRL);
2314 		val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
2315 			 APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
2316 		val |= APPL_CTRL_HW_HOT_RST_EN;
2317 		appl_writel(pcie, val, APPL_CTRL);
2318 	}
2319 
2320 	return 0;
2321 }
2322 
2323 static int tegra_pcie_dw_suspend_noirq(struct device *dev)
2324 {
2325 	struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
2326 
2327 	if (!pcie->link_state)
2328 		return 0;
2329 
2330 	tegra_pcie_downstream_dev_to_D0(pcie);
2331 	tegra_pcie_dw_pme_turnoff(pcie);
2332 	tegra_pcie_unconfig_controller(pcie);
2333 
2334 	return 0;
2335 }
2336 
2337 static int tegra_pcie_dw_resume_noirq(struct device *dev)
2338 {
2339 	struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
2340 	int ret;
2341 
2342 	if (!pcie->link_state)
2343 		return 0;
2344 
2345 	ret = tegra_pcie_config_controller(pcie, true);
2346 	if (ret < 0)
2347 		return ret;
2348 
2349 	ret = tegra_pcie_dw_host_init(&pcie->pci.pp);
2350 	if (ret < 0) {
2351 		dev_err(dev, "Failed to init host: %d\n", ret);
2352 		goto fail_host_init;
2353 	}
2354 
2355 	dw_pcie_setup_rc(&pcie->pci.pp);
2356 
2357 	ret = tegra_pcie_dw_start_link(&pcie->pci);
2358 	if (ret < 0)
2359 		goto fail_host_init;
2360 
2361 	return 0;
2362 
2363 fail_host_init:
2364 	tegra_pcie_unconfig_controller(pcie);
2365 	return ret;
2366 }
2367 
2368 static int tegra_pcie_dw_resume_early(struct device *dev)
2369 {
2370 	struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
2371 	u32 val;
2372 
2373 	if (pcie->of_data->mode == DW_PCIE_EP_TYPE) {
2374 		dev_err(dev, "Suspend is not supported in EP mode");
2375 		return -ENOTSUPP;
2376 	}
2377 
2378 	if (!pcie->link_state)
2379 		return 0;
2380 
2381 	/* Disable HW_HOT_RST mode */
2382 	if (!pcie->of_data->has_sbr_reset_fix) {
2383 		val = appl_readl(pcie, APPL_CTRL);
2384 		val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
2385 			 APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
2386 		val |= APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST <<
2387 		       APPL_CTRL_HW_HOT_RST_MODE_SHIFT;
2388 		val &= ~APPL_CTRL_HW_HOT_RST_EN;
2389 		appl_writel(pcie, val, APPL_CTRL);
2390 	}
2391 
2392 	return 0;
2393 }
2394 
2395 static void tegra_pcie_dw_shutdown(struct platform_device *pdev)
2396 {
2397 	struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev);
2398 
2399 	if (pcie->of_data->mode == DW_PCIE_RC_TYPE) {
2400 		if (!pcie->link_state)
2401 			return;
2402 
2403 		debugfs_remove_recursive(pcie->debugfs);
2404 		tegra_pcie_downstream_dev_to_D0(pcie);
2405 
2406 		disable_irq(pcie->pci.pp.irq);
2407 		if (IS_ENABLED(CONFIG_PCI_MSI))
2408 			disable_irq(pcie->pci.pp.msi_irq[0]);
2409 
2410 		tegra_pcie_dw_pme_turnoff(pcie);
2411 		tegra_pcie_unconfig_controller(pcie);
2412 		pm_runtime_put_sync(pcie->dev);
2413 	} else {
2414 		disable_irq(pcie->pex_rst_irq);
2415 		pex_ep_event_pex_rst_assert(pcie);
2416 	}
2417 }
2418 
2419 static const struct tegra_pcie_dw_of_data tegra194_pcie_dw_rc_of_data = {
2420 	.version = TEGRA194_DWC_IP_VER,
2421 	.mode = DW_PCIE_RC_TYPE,
2422 	.cdm_chk_int_en_bit = BIT(19),
2423 	/* Gen4 - 5, 6, 8 and 9 presets enabled */
2424 	.gen4_preset_vec = 0x360,
2425 	.n_fts = { 52, 52 },
2426 };
2427 
2428 static const struct tegra_pcie_dw_of_data tegra194_pcie_dw_ep_of_data = {
2429 	.version = TEGRA194_DWC_IP_VER,
2430 	.mode = DW_PCIE_EP_TYPE,
2431 	.cdm_chk_int_en_bit = BIT(19),
2432 	/* Gen4 - 5, 6, 8 and 9 presets enabled */
2433 	.gen4_preset_vec = 0x360,
2434 	.n_fts = { 52, 52 },
2435 };
2436 
2437 static const struct tegra_pcie_dw_of_data tegra234_pcie_dw_rc_of_data = {
2438 	.version = TEGRA234_DWC_IP_VER,
2439 	.mode = DW_PCIE_RC_TYPE,
2440 	.has_msix_doorbell_access_fix = true,
2441 	.has_sbr_reset_fix = true,
2442 	.has_l1ss_exit_fix = true,
2443 	.cdm_chk_int_en_bit = BIT(18),
2444 	/* Gen4 - 6, 8 and 9 presets enabled */
2445 	.gen4_preset_vec = 0x340,
2446 	.n_fts = { 52, 80 },
2447 };
2448 
2449 static const struct tegra_pcie_dw_of_data tegra234_pcie_dw_ep_of_data = {
2450 	.version = TEGRA234_DWC_IP_VER,
2451 	.mode = DW_PCIE_EP_TYPE,
2452 	.has_l1ss_exit_fix = true,
2453 	.has_ltr_req_fix = true,
2454 	.cdm_chk_int_en_bit = BIT(18),
2455 	/* Gen4 - 6, 8 and 9 presets enabled */
2456 	.gen4_preset_vec = 0x340,
2457 	.n_fts = { 52, 80 },
2458 };
2459 
2460 static const struct of_device_id tegra_pcie_dw_of_match[] = {
2461 	{
2462 		.compatible = "nvidia,tegra194-pcie",
2463 		.data = &tegra194_pcie_dw_rc_of_data,
2464 	},
2465 	{
2466 		.compatible = "nvidia,tegra194-pcie-ep",
2467 		.data = &tegra194_pcie_dw_ep_of_data,
2468 	},
2469 	{
2470 		.compatible = "nvidia,tegra234-pcie",
2471 		.data = &tegra234_pcie_dw_rc_of_data,
2472 	},
2473 	{
2474 		.compatible = "nvidia,tegra234-pcie-ep",
2475 		.data = &tegra234_pcie_dw_ep_of_data,
2476 	},
2477 	{}
2478 };
2479 
2480 static const struct dev_pm_ops tegra_pcie_dw_pm_ops = {
2481 	.suspend_late = tegra_pcie_dw_suspend_late,
2482 	.suspend_noirq = tegra_pcie_dw_suspend_noirq,
2483 	.resume_noirq = tegra_pcie_dw_resume_noirq,
2484 	.resume_early = tegra_pcie_dw_resume_early,
2485 };
2486 
2487 static struct platform_driver tegra_pcie_dw_driver = {
2488 	.probe = tegra_pcie_dw_probe,
2489 	.remove = tegra_pcie_dw_remove,
2490 	.shutdown = tegra_pcie_dw_shutdown,
2491 	.driver = {
2492 		.name	= "tegra194-pcie",
2493 		.pm = &tegra_pcie_dw_pm_ops,
2494 		.of_match_table = tegra_pcie_dw_of_match,
2495 	},
2496 };
2497 module_platform_driver(tegra_pcie_dw_driver);
2498 
2499 MODULE_DEVICE_TABLE(of, tegra_pcie_dw_of_match);
2500 
2501 MODULE_AUTHOR("Vidya Sagar <vidyas@nvidia.com>");
2502 MODULE_DESCRIPTION("NVIDIA PCIe host controller driver");
2503 MODULE_LICENSE("GPL v2");
2504