xref: /linux/drivers/pci/controller/dwc/pcie-qcom.c (revision 235f0da3274690f540aa53fccf77d433e344e4b8)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Qualcomm PCIe root complex driver
4  *
5  * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
6  * Copyright 2015 Linaro Limited.
7  *
8  * Author: Stanimir Varbanov <svarbanov@mm-sol.com>
9  */
10 
11 #include <linux/clk.h>
12 #include <linux/crc8.h>
13 #include <linux/debugfs.h>
14 #include <linux/delay.h>
15 #include <linux/gpio/consumer.h>
16 #include <linux/interconnect.h>
17 #include <linux/interrupt.h>
18 #include <linux/io.h>
19 #include <linux/iopoll.h>
20 #include <linux/kernel.h>
21 #include <linux/limits.h>
22 #include <linux/init.h>
23 #include <linux/of.h>
24 #include <linux/pci.h>
25 #include <linux/pm_opp.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/platform_device.h>
28 #include <linux/phy/pcie.h>
29 #include <linux/phy/phy.h>
30 #include <linux/regulator/consumer.h>
31 #include <linux/reset.h>
32 #include <linux/slab.h>
33 #include <linux/types.h>
34 #include <linux/units.h>
35 
36 #include "../../pci.h"
37 #include "pcie-designware.h"
38 
39 /* PARF registers */
40 #define PARF_SYS_CTRL				0x00
41 #define PARF_PM_CTRL				0x20
42 #define PARF_PCS_DEEMPH				0x34
43 #define PARF_PCS_SWING				0x38
44 #define PARF_PHY_CTRL				0x40
45 #define PARF_PHY_REFCLK				0x4c
46 #define PARF_CONFIG_BITS			0x50
47 #define PARF_DBI_BASE_ADDR			0x168
48 #define PARF_MHI_CLOCK_RESET_CTRL		0x174
49 #define PARF_AXI_MSTR_WR_ADDR_HALT		0x178
50 #define PARF_AXI_MSTR_WR_ADDR_HALT_V2		0x1a8
51 #define PARF_Q2A_FLUSH				0x1ac
52 #define PARF_LTSSM				0x1b0
53 #define PARF_SID_OFFSET				0x234
54 #define PARF_BDF_TRANSLATE_CFG			0x24c
55 #define PARF_SLV_ADDR_SPACE_SIZE		0x358
56 #define PARF_NO_SNOOP_OVERIDE			0x3d4
57 #define PARF_DEVICE_TYPE			0x1000
58 #define PARF_BDF_TO_SID_TABLE_N			0x2000
59 #define PARF_BDF_TO_SID_CFG			0x2c00
60 
61 /* ELBI registers */
62 #define ELBI_SYS_CTRL				0x04
63 
64 /* DBI registers */
65 #define AXI_MSTR_RESP_COMP_CTRL0		0x818
66 #define AXI_MSTR_RESP_COMP_CTRL1		0x81c
67 
68 /* MHI registers */
69 #define PARF_DEBUG_CNT_PM_LINKST_IN_L2		0xc04
70 #define PARF_DEBUG_CNT_PM_LINKST_IN_L1		0xc0c
71 #define PARF_DEBUG_CNT_PM_LINKST_IN_L0S		0xc10
72 #define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1	0xc84
73 #define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2	0xc88
74 
75 /* PARF_SYS_CTRL register fields */
76 #define MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN	BIT(29)
77 #define MST_WAKEUP_EN				BIT(13)
78 #define SLV_WAKEUP_EN				BIT(12)
79 #define MSTR_ACLK_CGC_DIS			BIT(10)
80 #define SLV_ACLK_CGC_DIS			BIT(9)
81 #define CORE_CLK_CGC_DIS			BIT(6)
82 #define AUX_PWR_DET				BIT(4)
83 #define L23_CLK_RMV_DIS				BIT(2)
84 #define L1_CLK_RMV_DIS				BIT(1)
85 
86 /* PARF_PM_CTRL register fields */
87 #define REQ_NOT_ENTR_L1				BIT(5)
88 
89 /* PARF_PCS_DEEMPH register fields */
90 #define PCS_DEEMPH_TX_DEEMPH_GEN1(x)		FIELD_PREP(GENMASK(21, 16), x)
91 #define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x)	FIELD_PREP(GENMASK(13, 8), x)
92 #define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x)	FIELD_PREP(GENMASK(5, 0), x)
93 
94 /* PARF_PCS_SWING register fields */
95 #define PCS_SWING_TX_SWING_FULL(x)		FIELD_PREP(GENMASK(14, 8), x)
96 #define PCS_SWING_TX_SWING_LOW(x)		FIELD_PREP(GENMASK(6, 0), x)
97 
98 /* PARF_PHY_CTRL register fields */
99 #define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK	GENMASK(20, 16)
100 #define PHY_CTRL_PHY_TX0_TERM_OFFSET(x)		FIELD_PREP(PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK, x)
101 #define PHY_TEST_PWR_DOWN			BIT(0)
102 
103 /* PARF_PHY_REFCLK register fields */
104 #define PHY_REFCLK_SSP_EN			BIT(16)
105 #define PHY_REFCLK_USE_PAD			BIT(12)
106 
107 /* PARF_CONFIG_BITS register fields */
108 #define PHY_RX0_EQ(x)				FIELD_PREP(GENMASK(26, 24), x)
109 
110 /* PARF_SLV_ADDR_SPACE_SIZE register value */
111 #define SLV_ADDR_SPACE_SZ			0x10000000
112 
113 /* PARF_MHI_CLOCK_RESET_CTRL register fields */
114 #define AHB_CLK_EN				BIT(0)
115 #define MSTR_AXI_CLK_EN				BIT(1)
116 #define BYPASS					BIT(4)
117 
118 /* PARF_AXI_MSTR_WR_ADDR_HALT register fields */
119 #define EN					BIT(31)
120 
121 /* PARF_LTSSM register fields */
122 #define LTSSM_EN				BIT(8)
123 
124 /* PARF_NO_SNOOP_OVERIDE register fields */
125 #define WR_NO_SNOOP_OVERIDE_EN			BIT(1)
126 #define RD_NO_SNOOP_OVERIDE_EN			BIT(3)
127 
128 /* PARF_DEVICE_TYPE register fields */
129 #define DEVICE_TYPE_RC				0x4
130 
131 /* PARF_BDF_TO_SID_CFG fields */
132 #define BDF_TO_SID_BYPASS			BIT(0)
133 
134 /* ELBI_SYS_CTRL register fields */
135 #define ELBI_SYS_CTRL_LT_ENABLE			BIT(0)
136 
137 /* AXI_MSTR_RESP_COMP_CTRL0 register fields */
138 #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K	0x4
139 #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K	0x5
140 
141 /* AXI_MSTR_RESP_COMP_CTRL1 register fields */
142 #define CFG_BRIDGE_SB_INIT			BIT(0)
143 
144 /* PCI_EXP_SLTCAP register fields */
145 #define PCIE_CAP_SLOT_POWER_LIMIT_VAL		FIELD_PREP(PCI_EXP_SLTCAP_SPLV, 250)
146 #define PCIE_CAP_SLOT_POWER_LIMIT_SCALE		FIELD_PREP(PCI_EXP_SLTCAP_SPLS, 1)
147 #define PCIE_CAP_SLOT_VAL			(PCI_EXP_SLTCAP_ABP | \
148 						PCI_EXP_SLTCAP_PCP | \
149 						PCI_EXP_SLTCAP_MRLSP | \
150 						PCI_EXP_SLTCAP_AIP | \
151 						PCI_EXP_SLTCAP_PIP | \
152 						PCI_EXP_SLTCAP_HPS | \
153 						PCI_EXP_SLTCAP_EIP | \
154 						PCIE_CAP_SLOT_POWER_LIMIT_VAL | \
155 						PCIE_CAP_SLOT_POWER_LIMIT_SCALE)
156 
157 #define PERST_DELAY_US				1000
158 
159 #define QCOM_PCIE_CRC8_POLYNOMIAL		(BIT(2) | BIT(1) | BIT(0))
160 
161 #define QCOM_PCIE_LINK_SPEED_TO_BW(speed) \
162 		Mbps_to_icc(PCIE_SPEED2MBS_ENC(pcie_link_speed[speed]))
163 
164 struct qcom_pcie_resources_1_0_0 {
165 	struct clk_bulk_data *clks;
166 	int num_clks;
167 	struct reset_control *core;
168 	struct regulator *vdda;
169 };
170 
171 #define QCOM_PCIE_2_1_0_MAX_RESETS		6
172 #define QCOM_PCIE_2_1_0_MAX_SUPPLY		3
173 struct qcom_pcie_resources_2_1_0 {
174 	struct clk_bulk_data *clks;
175 	int num_clks;
176 	struct reset_control_bulk_data resets[QCOM_PCIE_2_1_0_MAX_RESETS];
177 	int num_resets;
178 	struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY];
179 };
180 
181 #define QCOM_PCIE_2_3_2_MAX_SUPPLY		2
182 struct qcom_pcie_resources_2_3_2 {
183 	struct clk_bulk_data *clks;
184 	int num_clks;
185 	struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY];
186 };
187 
188 #define QCOM_PCIE_2_3_3_MAX_RESETS		7
189 struct qcom_pcie_resources_2_3_3 {
190 	struct clk_bulk_data *clks;
191 	int num_clks;
192 	struct reset_control_bulk_data rst[QCOM_PCIE_2_3_3_MAX_RESETS];
193 };
194 
195 #define QCOM_PCIE_2_4_0_MAX_RESETS		12
196 struct qcom_pcie_resources_2_4_0 {
197 	struct clk_bulk_data *clks;
198 	int num_clks;
199 	struct reset_control_bulk_data resets[QCOM_PCIE_2_4_0_MAX_RESETS];
200 	int num_resets;
201 };
202 
203 #define QCOM_PCIE_2_7_0_MAX_SUPPLIES		2
204 struct qcom_pcie_resources_2_7_0 {
205 	struct clk_bulk_data *clks;
206 	int num_clks;
207 	struct regulator_bulk_data supplies[QCOM_PCIE_2_7_0_MAX_SUPPLIES];
208 	struct reset_control *rst;
209 };
210 
211 struct qcom_pcie_resources_2_9_0 {
212 	struct clk_bulk_data *clks;
213 	int num_clks;
214 	struct reset_control *rst;
215 };
216 
217 union qcom_pcie_resources {
218 	struct qcom_pcie_resources_1_0_0 v1_0_0;
219 	struct qcom_pcie_resources_2_1_0 v2_1_0;
220 	struct qcom_pcie_resources_2_3_2 v2_3_2;
221 	struct qcom_pcie_resources_2_3_3 v2_3_3;
222 	struct qcom_pcie_resources_2_4_0 v2_4_0;
223 	struct qcom_pcie_resources_2_7_0 v2_7_0;
224 	struct qcom_pcie_resources_2_9_0 v2_9_0;
225 };
226 
227 struct qcom_pcie;
228 
229 struct qcom_pcie_ops {
230 	int (*get_resources)(struct qcom_pcie *pcie);
231 	int (*init)(struct qcom_pcie *pcie);
232 	int (*post_init)(struct qcom_pcie *pcie);
233 	void (*host_post_init)(struct qcom_pcie *pcie);
234 	void (*deinit)(struct qcom_pcie *pcie);
235 	void (*ltssm_enable)(struct qcom_pcie *pcie);
236 	int (*config_sid)(struct qcom_pcie *pcie);
237 };
238 
239  /**
240   * struct qcom_pcie_cfg - Per SoC config struct
241   * @ops: qcom PCIe ops structure
242   * @override_no_snoop: Override NO_SNOOP attribute in TLP to enable cache
243   * snooping
244   */
245 struct qcom_pcie_cfg {
246 	const struct qcom_pcie_ops *ops;
247 	bool override_no_snoop;
248 	bool no_l0s;
249 };
250 
251 struct qcom_pcie {
252 	struct dw_pcie *pci;
253 	void __iomem *parf;			/* DT parf */
254 	void __iomem *elbi;			/* DT elbi */
255 	void __iomem *mhi;
256 	union qcom_pcie_resources res;
257 	struct phy *phy;
258 	struct gpio_desc *reset;
259 	struct icc_path *icc_mem;
260 	struct icc_path *icc_cpu;
261 	const struct qcom_pcie_cfg *cfg;
262 	struct dentry *debugfs;
263 	bool suspended;
264 	bool use_pm_opp;
265 };
266 
267 #define to_qcom_pcie(x)		dev_get_drvdata((x)->dev)
268 
269 static void qcom_ep_reset_assert(struct qcom_pcie *pcie)
270 {
271 	gpiod_set_value_cansleep(pcie->reset, 1);
272 	usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
273 }
274 
275 static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
276 {
277 	/* Ensure that PERST has been asserted for at least 100 ms */
278 	msleep(100);
279 	gpiod_set_value_cansleep(pcie->reset, 0);
280 	usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
281 }
282 
283 static int qcom_pcie_start_link(struct dw_pcie *pci)
284 {
285 	struct qcom_pcie *pcie = to_qcom_pcie(pci);
286 
287 	/* Enable Link Training state machine */
288 	if (pcie->cfg->ops->ltssm_enable)
289 		pcie->cfg->ops->ltssm_enable(pcie);
290 
291 	return 0;
292 }
293 
294 static void qcom_pcie_clear_aspm_l0s(struct dw_pcie *pci)
295 {
296 	struct qcom_pcie *pcie = to_qcom_pcie(pci);
297 	u16 offset;
298 	u32 val;
299 
300 	if (!pcie->cfg->no_l0s)
301 		return;
302 
303 	offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
304 
305 	dw_pcie_dbi_ro_wr_en(pci);
306 
307 	val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP);
308 	val &= ~PCI_EXP_LNKCAP_ASPM_L0S;
309 	writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP);
310 
311 	dw_pcie_dbi_ro_wr_dis(pci);
312 }
313 
314 static void qcom_pcie_clear_hpc(struct dw_pcie *pci)
315 {
316 	u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
317 	u32 val;
318 
319 	dw_pcie_dbi_ro_wr_en(pci);
320 
321 	val = readl(pci->dbi_base + offset + PCI_EXP_SLTCAP);
322 	val &= ~PCI_EXP_SLTCAP_HPC;
323 	writel(val, pci->dbi_base + offset + PCI_EXP_SLTCAP);
324 
325 	dw_pcie_dbi_ro_wr_dis(pci);
326 }
327 
328 static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie)
329 {
330 	u32 val;
331 
332 	/* enable link training */
333 	val = readl(pcie->elbi + ELBI_SYS_CTRL);
334 	val |= ELBI_SYS_CTRL_LT_ENABLE;
335 	writel(val, pcie->elbi + ELBI_SYS_CTRL);
336 }
337 
338 static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
339 {
340 	struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
341 	struct dw_pcie *pci = pcie->pci;
342 	struct device *dev = pci->dev;
343 	bool is_apq = of_device_is_compatible(dev->of_node, "qcom,pcie-apq8064");
344 	int ret;
345 
346 	res->supplies[0].supply = "vdda";
347 	res->supplies[1].supply = "vdda_phy";
348 	res->supplies[2].supply = "vdda_refclk";
349 	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
350 				      res->supplies);
351 	if (ret)
352 		return ret;
353 
354 	res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
355 	if (res->num_clks < 0) {
356 		dev_err(dev, "Failed to get clocks\n");
357 		return res->num_clks;
358 	}
359 
360 	res->resets[0].id = "pci";
361 	res->resets[1].id = "axi";
362 	res->resets[2].id = "ahb";
363 	res->resets[3].id = "por";
364 	res->resets[4].id = "phy";
365 	res->resets[5].id = "ext";
366 
367 	/* ext is optional on APQ8016 */
368 	res->num_resets = is_apq ? 5 : 6;
369 	ret = devm_reset_control_bulk_get_exclusive(dev, res->num_resets, res->resets);
370 	if (ret < 0)
371 		return ret;
372 
373 	return 0;
374 }
375 
376 static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie)
377 {
378 	struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
379 
380 	clk_bulk_disable_unprepare(res->num_clks, res->clks);
381 	reset_control_bulk_assert(res->num_resets, res->resets);
382 
383 	writel(1, pcie->parf + PARF_PHY_CTRL);
384 
385 	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
386 }
387 
388 static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
389 {
390 	struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
391 	struct dw_pcie *pci = pcie->pci;
392 	struct device *dev = pci->dev;
393 	int ret;
394 
395 	/* reset the PCIe interface as uboot can leave it undefined state */
396 	ret = reset_control_bulk_assert(res->num_resets, res->resets);
397 	if (ret < 0) {
398 		dev_err(dev, "cannot assert resets\n");
399 		return ret;
400 	}
401 
402 	ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
403 	if (ret < 0) {
404 		dev_err(dev, "cannot enable regulators\n");
405 		return ret;
406 	}
407 
408 	ret = reset_control_bulk_deassert(res->num_resets, res->resets);
409 	if (ret < 0) {
410 		dev_err(dev, "cannot deassert resets\n");
411 		regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
412 		return ret;
413 	}
414 
415 	return 0;
416 }
417 
418 static int qcom_pcie_post_init_2_1_0(struct qcom_pcie *pcie)
419 {
420 	struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
421 	struct dw_pcie *pci = pcie->pci;
422 	struct device *dev = pci->dev;
423 	struct device_node *node = dev->of_node;
424 	u32 val;
425 	int ret;
426 
427 	/* enable PCIe clocks and resets */
428 	val = readl(pcie->parf + PARF_PHY_CTRL);
429 	val &= ~PHY_TEST_PWR_DOWN;
430 	writel(val, pcie->parf + PARF_PHY_CTRL);
431 
432 	ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
433 	if (ret)
434 		return ret;
435 
436 	if (of_device_is_compatible(node, "qcom,pcie-ipq8064") ||
437 	    of_device_is_compatible(node, "qcom,pcie-ipq8064-v2")) {
438 		writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) |
439 			       PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(24) |
440 			       PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(34),
441 		       pcie->parf + PARF_PCS_DEEMPH);
442 		writel(PCS_SWING_TX_SWING_FULL(120) |
443 			       PCS_SWING_TX_SWING_LOW(120),
444 		       pcie->parf + PARF_PCS_SWING);
445 		writel(PHY_RX0_EQ(4), pcie->parf + PARF_CONFIG_BITS);
446 	}
447 
448 	if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) {
449 		/* set TX termination offset */
450 		val = readl(pcie->parf + PARF_PHY_CTRL);
451 		val &= ~PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK;
452 		val |= PHY_CTRL_PHY_TX0_TERM_OFFSET(7);
453 		writel(val, pcie->parf + PARF_PHY_CTRL);
454 	}
455 
456 	/* enable external reference clock */
457 	val = readl(pcie->parf + PARF_PHY_REFCLK);
458 	/* USE_PAD is required only for ipq806x */
459 	if (!of_device_is_compatible(node, "qcom,pcie-apq8064"))
460 		val &= ~PHY_REFCLK_USE_PAD;
461 	val |= PHY_REFCLK_SSP_EN;
462 	writel(val, pcie->parf + PARF_PHY_REFCLK);
463 
464 	/* wait for clock acquisition */
465 	usleep_range(1000, 1500);
466 
467 	/* Set the Max TLP size to 2K, instead of using default of 4K */
468 	writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K,
469 	       pci->dbi_base + AXI_MSTR_RESP_COMP_CTRL0);
470 	writel(CFG_BRIDGE_SB_INIT,
471 	       pci->dbi_base + AXI_MSTR_RESP_COMP_CTRL1);
472 
473 	qcom_pcie_clear_hpc(pcie->pci);
474 
475 	return 0;
476 }
477 
478 static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie)
479 {
480 	struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
481 	struct dw_pcie *pci = pcie->pci;
482 	struct device *dev = pci->dev;
483 
484 	res->vdda = devm_regulator_get(dev, "vdda");
485 	if (IS_ERR(res->vdda))
486 		return PTR_ERR(res->vdda);
487 
488 	res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
489 	if (res->num_clks < 0) {
490 		dev_err(dev, "Failed to get clocks\n");
491 		return res->num_clks;
492 	}
493 
494 	res->core = devm_reset_control_get_exclusive(dev, "core");
495 	return PTR_ERR_OR_ZERO(res->core);
496 }
497 
498 static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie)
499 {
500 	struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
501 
502 	reset_control_assert(res->core);
503 	clk_bulk_disable_unprepare(res->num_clks, res->clks);
504 	regulator_disable(res->vdda);
505 }
506 
507 static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie)
508 {
509 	struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
510 	struct dw_pcie *pci = pcie->pci;
511 	struct device *dev = pci->dev;
512 	int ret;
513 
514 	ret = reset_control_deassert(res->core);
515 	if (ret) {
516 		dev_err(dev, "cannot deassert core reset\n");
517 		return ret;
518 	}
519 
520 	ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
521 	if (ret) {
522 		dev_err(dev, "cannot prepare/enable clocks\n");
523 		goto err_assert_reset;
524 	}
525 
526 	ret = regulator_enable(res->vdda);
527 	if (ret) {
528 		dev_err(dev, "cannot enable vdda regulator\n");
529 		goto err_disable_clks;
530 	}
531 
532 	return 0;
533 
534 err_disable_clks:
535 	clk_bulk_disable_unprepare(res->num_clks, res->clks);
536 err_assert_reset:
537 	reset_control_assert(res->core);
538 
539 	return ret;
540 }
541 
542 static int qcom_pcie_post_init_1_0_0(struct qcom_pcie *pcie)
543 {
544 	/* change DBI base address */
545 	writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
546 
547 	if (IS_ENABLED(CONFIG_PCI_MSI)) {
548 		u32 val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT);
549 
550 		val |= EN;
551 		writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT);
552 	}
553 
554 	qcom_pcie_clear_hpc(pcie->pci);
555 
556 	return 0;
557 }
558 
559 static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie)
560 {
561 	u32 val;
562 
563 	/* enable link training */
564 	val = readl(pcie->parf + PARF_LTSSM);
565 	val |= LTSSM_EN;
566 	writel(val, pcie->parf + PARF_LTSSM);
567 }
568 
569 static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie)
570 {
571 	struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
572 	struct dw_pcie *pci = pcie->pci;
573 	struct device *dev = pci->dev;
574 	int ret;
575 
576 	res->supplies[0].supply = "vdda";
577 	res->supplies[1].supply = "vddpe-3v3";
578 	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
579 				      res->supplies);
580 	if (ret)
581 		return ret;
582 
583 	res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
584 	if (res->num_clks < 0) {
585 		dev_err(dev, "Failed to get clocks\n");
586 		return res->num_clks;
587 	}
588 
589 	return 0;
590 }
591 
592 static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie)
593 {
594 	struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
595 
596 	clk_bulk_disable_unprepare(res->num_clks, res->clks);
597 	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
598 }
599 
600 static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie)
601 {
602 	struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
603 	struct dw_pcie *pci = pcie->pci;
604 	struct device *dev = pci->dev;
605 	int ret;
606 
607 	ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
608 	if (ret < 0) {
609 		dev_err(dev, "cannot enable regulators\n");
610 		return ret;
611 	}
612 
613 	ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
614 	if (ret) {
615 		dev_err(dev, "cannot prepare/enable clocks\n");
616 		regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
617 		return ret;
618 	}
619 
620 	return 0;
621 }
622 
623 static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie)
624 {
625 	u32 val;
626 
627 	/* enable PCIe clocks and resets */
628 	val = readl(pcie->parf + PARF_PHY_CTRL);
629 	val &= ~PHY_TEST_PWR_DOWN;
630 	writel(val, pcie->parf + PARF_PHY_CTRL);
631 
632 	/* change DBI base address */
633 	writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
634 
635 	/* MAC PHY_POWERDOWN MUX DISABLE  */
636 	val = readl(pcie->parf + PARF_SYS_CTRL);
637 	val &= ~MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN;
638 	writel(val, pcie->parf + PARF_SYS_CTRL);
639 
640 	val = readl(pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
641 	val |= BYPASS;
642 	writel(val, pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
643 
644 	val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2);
645 	val |= EN;
646 	writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2);
647 
648 	qcom_pcie_clear_hpc(pcie->pci);
649 
650 	return 0;
651 }
652 
653 static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie)
654 {
655 	struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
656 	struct dw_pcie *pci = pcie->pci;
657 	struct device *dev = pci->dev;
658 	bool is_ipq = of_device_is_compatible(dev->of_node, "qcom,pcie-ipq4019");
659 	int ret;
660 
661 	res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
662 	if (res->num_clks < 0) {
663 		dev_err(dev, "Failed to get clocks\n");
664 		return res->num_clks;
665 	}
666 
667 	res->resets[0].id = "axi_m";
668 	res->resets[1].id = "axi_s";
669 	res->resets[2].id = "axi_m_sticky";
670 	res->resets[3].id = "pipe_sticky";
671 	res->resets[4].id = "pwr";
672 	res->resets[5].id = "ahb";
673 	res->resets[6].id = "pipe";
674 	res->resets[7].id = "axi_m_vmid";
675 	res->resets[8].id = "axi_s_xpu";
676 	res->resets[9].id = "parf";
677 	res->resets[10].id = "phy";
678 	res->resets[11].id = "phy_ahb";
679 
680 	res->num_resets = is_ipq ? 12 : 6;
681 
682 	ret = devm_reset_control_bulk_get_exclusive(dev, res->num_resets, res->resets);
683 	if (ret < 0)
684 		return ret;
685 
686 	return 0;
687 }
688 
689 static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie)
690 {
691 	struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
692 
693 	reset_control_bulk_assert(res->num_resets, res->resets);
694 	clk_bulk_disable_unprepare(res->num_clks, res->clks);
695 }
696 
697 static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie)
698 {
699 	struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
700 	struct dw_pcie *pci = pcie->pci;
701 	struct device *dev = pci->dev;
702 	int ret;
703 
704 	ret = reset_control_bulk_assert(res->num_resets, res->resets);
705 	if (ret < 0) {
706 		dev_err(dev, "cannot assert resets\n");
707 		return ret;
708 	}
709 
710 	usleep_range(10000, 12000);
711 
712 	ret = reset_control_bulk_deassert(res->num_resets, res->resets);
713 	if (ret < 0) {
714 		dev_err(dev, "cannot deassert resets\n");
715 		return ret;
716 	}
717 
718 	usleep_range(10000, 12000);
719 
720 	ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
721 	if (ret) {
722 		reset_control_bulk_assert(res->num_resets, res->resets);
723 		return ret;
724 	}
725 
726 	return 0;
727 }
728 
729 static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie)
730 {
731 	struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
732 	struct dw_pcie *pci = pcie->pci;
733 	struct device *dev = pci->dev;
734 	int ret;
735 
736 	res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
737 	if (res->num_clks < 0) {
738 		dev_err(dev, "Failed to get clocks\n");
739 		return res->num_clks;
740 	}
741 
742 	res->rst[0].id = "axi_m";
743 	res->rst[1].id = "axi_s";
744 	res->rst[2].id = "pipe";
745 	res->rst[3].id = "axi_m_sticky";
746 	res->rst[4].id = "sticky";
747 	res->rst[5].id = "ahb";
748 	res->rst[6].id = "sleep";
749 
750 	ret = devm_reset_control_bulk_get_exclusive(dev, ARRAY_SIZE(res->rst), res->rst);
751 	if (ret < 0)
752 		return ret;
753 
754 	return 0;
755 }
756 
757 static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie)
758 {
759 	struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
760 
761 	clk_bulk_disable_unprepare(res->num_clks, res->clks);
762 }
763 
764 static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
765 {
766 	struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
767 	struct dw_pcie *pci = pcie->pci;
768 	struct device *dev = pci->dev;
769 	int ret;
770 
771 	ret = reset_control_bulk_assert(ARRAY_SIZE(res->rst), res->rst);
772 	if (ret < 0) {
773 		dev_err(dev, "cannot assert resets\n");
774 		return ret;
775 	}
776 
777 	usleep_range(2000, 2500);
778 
779 	ret = reset_control_bulk_deassert(ARRAY_SIZE(res->rst), res->rst);
780 	if (ret < 0) {
781 		dev_err(dev, "cannot deassert resets\n");
782 		return ret;
783 	}
784 
785 	/*
786 	 * Don't have a way to see if the reset has completed.
787 	 * Wait for some time.
788 	 */
789 	usleep_range(2000, 2500);
790 
791 	ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
792 	if (ret) {
793 		dev_err(dev, "cannot prepare/enable clocks\n");
794 		goto err_assert_resets;
795 	}
796 
797 	return 0;
798 
799 err_assert_resets:
800 	/*
801 	 * Not checking for failure, will anyway return
802 	 * the original failure in 'ret'.
803 	 */
804 	reset_control_bulk_assert(ARRAY_SIZE(res->rst), res->rst);
805 
806 	return ret;
807 }
808 
809 static int qcom_pcie_post_init_2_3_3(struct qcom_pcie *pcie)
810 {
811 	struct dw_pcie *pci = pcie->pci;
812 	u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
813 	u32 val;
814 
815 	writel(SLV_ADDR_SPACE_SZ, pcie->parf + PARF_SLV_ADDR_SPACE_SIZE);
816 
817 	val = readl(pcie->parf + PARF_PHY_CTRL);
818 	val &= ~PHY_TEST_PWR_DOWN;
819 	writel(val, pcie->parf + PARF_PHY_CTRL);
820 
821 	writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
822 
823 	writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS
824 		| SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |
825 		AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS,
826 		pcie->parf + PARF_SYS_CTRL);
827 	writel(0, pcie->parf + PARF_Q2A_FLUSH);
828 
829 	writel(PCI_COMMAND_MASTER, pci->dbi_base + PCI_COMMAND);
830 
831 	dw_pcie_dbi_ro_wr_en(pci);
832 
833 	writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP);
834 
835 	val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP);
836 	val &= ~PCI_EXP_LNKCAP_ASPMS;
837 	writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP);
838 
839 	writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset +
840 		PCI_EXP_DEVCTL2);
841 
842 	dw_pcie_dbi_ro_wr_dis(pci);
843 
844 	return 0;
845 }
846 
847 static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)
848 {
849 	struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
850 	struct dw_pcie *pci = pcie->pci;
851 	struct device *dev = pci->dev;
852 	int ret;
853 
854 	res->rst = devm_reset_control_array_get_exclusive(dev);
855 	if (IS_ERR(res->rst))
856 		return PTR_ERR(res->rst);
857 
858 	res->supplies[0].supply = "vdda";
859 	res->supplies[1].supply = "vddpe-3v3";
860 	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
861 				      res->supplies);
862 	if (ret)
863 		return ret;
864 
865 	res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
866 	if (res->num_clks < 0) {
867 		dev_err(dev, "Failed to get clocks\n");
868 		return res->num_clks;
869 	}
870 
871 	return 0;
872 }
873 
874 static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)
875 {
876 	struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
877 	struct dw_pcie *pci = pcie->pci;
878 	struct device *dev = pci->dev;
879 	u32 val;
880 	int ret;
881 
882 	ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
883 	if (ret < 0) {
884 		dev_err(dev, "cannot enable regulators\n");
885 		return ret;
886 	}
887 
888 	ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
889 	if (ret < 0)
890 		goto err_disable_regulators;
891 
892 	ret = reset_control_assert(res->rst);
893 	if (ret) {
894 		dev_err(dev, "reset assert failed (%d)\n", ret);
895 		goto err_disable_clocks;
896 	}
897 
898 	usleep_range(1000, 1500);
899 
900 	ret = reset_control_deassert(res->rst);
901 	if (ret) {
902 		dev_err(dev, "reset deassert failed (%d)\n", ret);
903 		goto err_disable_clocks;
904 	}
905 
906 	/* Wait for reset to complete, required on SM8450 */
907 	usleep_range(1000, 1500);
908 
909 	/* configure PCIe to RC mode */
910 	writel(DEVICE_TYPE_RC, pcie->parf + PARF_DEVICE_TYPE);
911 
912 	/* enable PCIe clocks and resets */
913 	val = readl(pcie->parf + PARF_PHY_CTRL);
914 	val &= ~PHY_TEST_PWR_DOWN;
915 	writel(val, pcie->parf + PARF_PHY_CTRL);
916 
917 	/* change DBI base address */
918 	writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
919 
920 	/* MAC PHY_POWERDOWN MUX DISABLE  */
921 	val = readl(pcie->parf + PARF_SYS_CTRL);
922 	val &= ~MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN;
923 	writel(val, pcie->parf + PARF_SYS_CTRL);
924 
925 	val = readl(pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
926 	val |= BYPASS;
927 	writel(val, pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
928 
929 	/* Enable L1 and L1SS */
930 	val = readl(pcie->parf + PARF_PM_CTRL);
931 	val &= ~REQ_NOT_ENTR_L1;
932 	writel(val, pcie->parf + PARF_PM_CTRL);
933 
934 	val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2);
935 	val |= EN;
936 	writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2);
937 
938 	return 0;
939 err_disable_clocks:
940 	clk_bulk_disable_unprepare(res->num_clks, res->clks);
941 err_disable_regulators:
942 	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
943 
944 	return ret;
945 }
946 
947 static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie)
948 {
949 	const struct qcom_pcie_cfg *pcie_cfg = pcie->cfg;
950 
951 	if (pcie_cfg->override_no_snoop)
952 		writel(WR_NO_SNOOP_OVERIDE_EN | RD_NO_SNOOP_OVERIDE_EN,
953 				pcie->parf + PARF_NO_SNOOP_OVERIDE);
954 
955 	qcom_pcie_clear_aspm_l0s(pcie->pci);
956 	qcom_pcie_clear_hpc(pcie->pci);
957 
958 	return 0;
959 }
960 
961 static int qcom_pcie_enable_aspm(struct pci_dev *pdev, void *userdata)
962 {
963 	/*
964 	 * Downstream devices need to be in D0 state before enabling PCI PM
965 	 * substates.
966 	 */
967 	pci_set_power_state_locked(pdev, PCI_D0);
968 	pci_enable_link_state_locked(pdev, PCIE_LINK_STATE_ALL);
969 
970 	return 0;
971 }
972 
973 static void qcom_pcie_host_post_init_2_7_0(struct qcom_pcie *pcie)
974 {
975 	struct dw_pcie_rp *pp = &pcie->pci->pp;
976 
977 	pci_walk_bus(pp->bridge->bus, qcom_pcie_enable_aspm, NULL);
978 }
979 
980 static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie)
981 {
982 	struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
983 
984 	clk_bulk_disable_unprepare(res->num_clks, res->clks);
985 
986 	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
987 }
988 
989 static int qcom_pcie_config_sid_1_9_0(struct qcom_pcie *pcie)
990 {
991 	/* iommu map structure */
992 	struct {
993 		u32 bdf;
994 		u32 phandle;
995 		u32 smmu_sid;
996 		u32 smmu_sid_len;
997 	} *map;
998 	void __iomem *bdf_to_sid_base = pcie->parf + PARF_BDF_TO_SID_TABLE_N;
999 	struct device *dev = pcie->pci->dev;
1000 	u8 qcom_pcie_crc8_table[CRC8_TABLE_SIZE];
1001 	int i, nr_map, size = 0;
1002 	u32 smmu_sid_base;
1003 	u32 val;
1004 
1005 	of_get_property(dev->of_node, "iommu-map", &size);
1006 	if (!size)
1007 		return 0;
1008 
1009 	/* Enable BDF to SID translation by disabling bypass mode (default) */
1010 	val = readl(pcie->parf + PARF_BDF_TO_SID_CFG);
1011 	val &= ~BDF_TO_SID_BYPASS;
1012 	writel(val, pcie->parf + PARF_BDF_TO_SID_CFG);
1013 
1014 	map = kzalloc(size, GFP_KERNEL);
1015 	if (!map)
1016 		return -ENOMEM;
1017 
1018 	of_property_read_u32_array(dev->of_node, "iommu-map", (u32 *)map,
1019 				   size / sizeof(u32));
1020 
1021 	nr_map = size / (sizeof(*map));
1022 
1023 	crc8_populate_msb(qcom_pcie_crc8_table, QCOM_PCIE_CRC8_POLYNOMIAL);
1024 
1025 	/* Registers need to be zero out first */
1026 	memset_io(bdf_to_sid_base, 0, CRC8_TABLE_SIZE * sizeof(u32));
1027 
1028 	/* Extract the SMMU SID base from the first entry of iommu-map */
1029 	smmu_sid_base = map[0].smmu_sid;
1030 
1031 	/* Look for an available entry to hold the mapping */
1032 	for (i = 0; i < nr_map; i++) {
1033 		__be16 bdf_be = cpu_to_be16(map[i].bdf);
1034 		u32 val;
1035 		u8 hash;
1036 
1037 		hash = crc8(qcom_pcie_crc8_table, (u8 *)&bdf_be, sizeof(bdf_be), 0);
1038 
1039 		val = readl(bdf_to_sid_base + hash * sizeof(u32));
1040 
1041 		/* If the register is already populated, look for next available entry */
1042 		while (val) {
1043 			u8 current_hash = hash++;
1044 			u8 next_mask = 0xff;
1045 
1046 			/* If NEXT field is NULL then update it with next hash */
1047 			if (!(val & next_mask)) {
1048 				val |= (u32)hash;
1049 				writel(val, bdf_to_sid_base + current_hash * sizeof(u32));
1050 			}
1051 
1052 			val = readl(bdf_to_sid_base + hash * sizeof(u32));
1053 		}
1054 
1055 		/* BDF [31:16] | SID [15:8] | NEXT [7:0] */
1056 		val = map[i].bdf << 16 | (map[i].smmu_sid - smmu_sid_base) << 8 | 0;
1057 		writel(val, bdf_to_sid_base + hash * sizeof(u32));
1058 	}
1059 
1060 	kfree(map);
1061 
1062 	return 0;
1063 }
1064 
1065 static int qcom_pcie_get_resources_2_9_0(struct qcom_pcie *pcie)
1066 {
1067 	struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
1068 	struct dw_pcie *pci = pcie->pci;
1069 	struct device *dev = pci->dev;
1070 
1071 	res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
1072 	if (res->num_clks < 0) {
1073 		dev_err(dev, "Failed to get clocks\n");
1074 		return res->num_clks;
1075 	}
1076 
1077 	res->rst = devm_reset_control_array_get_exclusive(dev);
1078 	if (IS_ERR(res->rst))
1079 		return PTR_ERR(res->rst);
1080 
1081 	return 0;
1082 }
1083 
1084 static void qcom_pcie_deinit_2_9_0(struct qcom_pcie *pcie)
1085 {
1086 	struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
1087 
1088 	clk_bulk_disable_unprepare(res->num_clks, res->clks);
1089 }
1090 
1091 static int qcom_pcie_init_2_9_0(struct qcom_pcie *pcie)
1092 {
1093 	struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
1094 	struct device *dev = pcie->pci->dev;
1095 	int ret;
1096 
1097 	ret = reset_control_assert(res->rst);
1098 	if (ret) {
1099 		dev_err(dev, "reset assert failed (%d)\n", ret);
1100 		return ret;
1101 	}
1102 
1103 	/*
1104 	 * Delay periods before and after reset deassert are working values
1105 	 * from downstream Codeaurora kernel
1106 	 */
1107 	usleep_range(2000, 2500);
1108 
1109 	ret = reset_control_deassert(res->rst);
1110 	if (ret) {
1111 		dev_err(dev, "reset deassert failed (%d)\n", ret);
1112 		return ret;
1113 	}
1114 
1115 	usleep_range(2000, 2500);
1116 
1117 	return clk_bulk_prepare_enable(res->num_clks, res->clks);
1118 }
1119 
1120 static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie)
1121 {
1122 	struct dw_pcie *pci = pcie->pci;
1123 	u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
1124 	u32 val;
1125 	int i;
1126 
1127 	writel(SLV_ADDR_SPACE_SZ,
1128 		pcie->parf + PARF_SLV_ADDR_SPACE_SIZE);
1129 
1130 	val = readl(pcie->parf + PARF_PHY_CTRL);
1131 	val &= ~PHY_TEST_PWR_DOWN;
1132 	writel(val, pcie->parf + PARF_PHY_CTRL);
1133 
1134 	writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
1135 
1136 	writel(DEVICE_TYPE_RC, pcie->parf + PARF_DEVICE_TYPE);
1137 	writel(BYPASS | MSTR_AXI_CLK_EN | AHB_CLK_EN,
1138 		pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
1139 	writel(GEN3_RELATED_OFF_RXEQ_RGRDLESS_RXTS |
1140 		GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL,
1141 		pci->dbi_base + GEN3_RELATED_OFF);
1142 
1143 	writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS |
1144 		SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |
1145 		AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS,
1146 		pcie->parf + PARF_SYS_CTRL);
1147 
1148 	writel(0, pcie->parf + PARF_Q2A_FLUSH);
1149 
1150 	dw_pcie_dbi_ro_wr_en(pci);
1151 
1152 	writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP);
1153 
1154 	val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP);
1155 	val &= ~PCI_EXP_LNKCAP_ASPMS;
1156 	writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP);
1157 
1158 	writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset +
1159 			PCI_EXP_DEVCTL2);
1160 
1161 	dw_pcie_dbi_ro_wr_dis(pci);
1162 
1163 	for (i = 0; i < 256; i++)
1164 		writel(0, pcie->parf + PARF_BDF_TO_SID_TABLE_N + (4 * i));
1165 
1166 	return 0;
1167 }
1168 
1169 static int qcom_pcie_link_up(struct dw_pcie *pci)
1170 {
1171 	u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
1172 	u16 val = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA);
1173 
1174 	return !!(val & PCI_EXP_LNKSTA_DLLLA);
1175 }
1176 
1177 static int qcom_pcie_host_init(struct dw_pcie_rp *pp)
1178 {
1179 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
1180 	struct qcom_pcie *pcie = to_qcom_pcie(pci);
1181 	int ret;
1182 
1183 	qcom_ep_reset_assert(pcie);
1184 
1185 	ret = pcie->cfg->ops->init(pcie);
1186 	if (ret)
1187 		return ret;
1188 
1189 	ret = phy_set_mode_ext(pcie->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_RC);
1190 	if (ret)
1191 		goto err_deinit;
1192 
1193 	ret = phy_power_on(pcie->phy);
1194 	if (ret)
1195 		goto err_deinit;
1196 
1197 	if (pcie->cfg->ops->post_init) {
1198 		ret = pcie->cfg->ops->post_init(pcie);
1199 		if (ret)
1200 			goto err_disable_phy;
1201 	}
1202 
1203 	qcom_ep_reset_deassert(pcie);
1204 
1205 	if (pcie->cfg->ops->config_sid) {
1206 		ret = pcie->cfg->ops->config_sid(pcie);
1207 		if (ret)
1208 			goto err_assert_reset;
1209 	}
1210 
1211 	return 0;
1212 
1213 err_assert_reset:
1214 	qcom_ep_reset_assert(pcie);
1215 err_disable_phy:
1216 	phy_power_off(pcie->phy);
1217 err_deinit:
1218 	pcie->cfg->ops->deinit(pcie);
1219 
1220 	return ret;
1221 }
1222 
1223 static void qcom_pcie_host_deinit(struct dw_pcie_rp *pp)
1224 {
1225 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
1226 	struct qcom_pcie *pcie = to_qcom_pcie(pci);
1227 
1228 	qcom_ep_reset_assert(pcie);
1229 	phy_power_off(pcie->phy);
1230 	pcie->cfg->ops->deinit(pcie);
1231 }
1232 
1233 static void qcom_pcie_host_post_init(struct dw_pcie_rp *pp)
1234 {
1235 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
1236 	struct qcom_pcie *pcie = to_qcom_pcie(pci);
1237 
1238 	if (pcie->cfg->ops->host_post_init)
1239 		pcie->cfg->ops->host_post_init(pcie);
1240 }
1241 
1242 static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {
1243 	.init		= qcom_pcie_host_init,
1244 	.deinit		= qcom_pcie_host_deinit,
1245 	.post_init	= qcom_pcie_host_post_init,
1246 };
1247 
1248 /* Qcom IP rev.: 2.1.0	Synopsys IP rev.: 4.01a */
1249 static const struct qcom_pcie_ops ops_2_1_0 = {
1250 	.get_resources = qcom_pcie_get_resources_2_1_0,
1251 	.init = qcom_pcie_init_2_1_0,
1252 	.post_init = qcom_pcie_post_init_2_1_0,
1253 	.deinit = qcom_pcie_deinit_2_1_0,
1254 	.ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
1255 };
1256 
1257 /* Qcom IP rev.: 1.0.0	Synopsys IP rev.: 4.11a */
1258 static const struct qcom_pcie_ops ops_1_0_0 = {
1259 	.get_resources = qcom_pcie_get_resources_1_0_0,
1260 	.init = qcom_pcie_init_1_0_0,
1261 	.post_init = qcom_pcie_post_init_1_0_0,
1262 	.deinit = qcom_pcie_deinit_1_0_0,
1263 	.ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
1264 };
1265 
1266 /* Qcom IP rev.: 2.3.2	Synopsys IP rev.: 4.21a */
1267 static const struct qcom_pcie_ops ops_2_3_2 = {
1268 	.get_resources = qcom_pcie_get_resources_2_3_2,
1269 	.init = qcom_pcie_init_2_3_2,
1270 	.post_init = qcom_pcie_post_init_2_3_2,
1271 	.deinit = qcom_pcie_deinit_2_3_2,
1272 	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1273 };
1274 
1275 /* Qcom IP rev.: 2.4.0	Synopsys IP rev.: 4.20a */
1276 static const struct qcom_pcie_ops ops_2_4_0 = {
1277 	.get_resources = qcom_pcie_get_resources_2_4_0,
1278 	.init = qcom_pcie_init_2_4_0,
1279 	.post_init = qcom_pcie_post_init_2_3_2,
1280 	.deinit = qcom_pcie_deinit_2_4_0,
1281 	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1282 };
1283 
1284 /* Qcom IP rev.: 2.3.3	Synopsys IP rev.: 4.30a */
1285 static const struct qcom_pcie_ops ops_2_3_3 = {
1286 	.get_resources = qcom_pcie_get_resources_2_3_3,
1287 	.init = qcom_pcie_init_2_3_3,
1288 	.post_init = qcom_pcie_post_init_2_3_3,
1289 	.deinit = qcom_pcie_deinit_2_3_3,
1290 	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1291 };
1292 
1293 /* Qcom IP rev.: 2.7.0	Synopsys IP rev.: 4.30a */
1294 static const struct qcom_pcie_ops ops_2_7_0 = {
1295 	.get_resources = qcom_pcie_get_resources_2_7_0,
1296 	.init = qcom_pcie_init_2_7_0,
1297 	.post_init = qcom_pcie_post_init_2_7_0,
1298 	.deinit = qcom_pcie_deinit_2_7_0,
1299 	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1300 };
1301 
1302 /* Qcom IP rev.: 1.9.0 */
1303 static const struct qcom_pcie_ops ops_1_9_0 = {
1304 	.get_resources = qcom_pcie_get_resources_2_7_0,
1305 	.init = qcom_pcie_init_2_7_0,
1306 	.post_init = qcom_pcie_post_init_2_7_0,
1307 	.host_post_init = qcom_pcie_host_post_init_2_7_0,
1308 	.deinit = qcom_pcie_deinit_2_7_0,
1309 	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1310 	.config_sid = qcom_pcie_config_sid_1_9_0,
1311 };
1312 
1313 /* Qcom IP rev.: 2.9.0  Synopsys IP rev.: 5.00a */
1314 static const struct qcom_pcie_ops ops_2_9_0 = {
1315 	.get_resources = qcom_pcie_get_resources_2_9_0,
1316 	.init = qcom_pcie_init_2_9_0,
1317 	.post_init = qcom_pcie_post_init_2_9_0,
1318 	.deinit = qcom_pcie_deinit_2_9_0,
1319 	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1320 };
1321 
1322 static const struct qcom_pcie_cfg cfg_1_0_0 = {
1323 	.ops = &ops_1_0_0,
1324 };
1325 
1326 static const struct qcom_pcie_cfg cfg_1_9_0 = {
1327 	.ops = &ops_1_9_0,
1328 };
1329 
1330 static const struct qcom_pcie_cfg cfg_1_34_0 = {
1331 	.ops = &ops_1_9_0,
1332 	.override_no_snoop = true,
1333 };
1334 
1335 static const struct qcom_pcie_cfg cfg_2_1_0 = {
1336 	.ops = &ops_2_1_0,
1337 };
1338 
1339 static const struct qcom_pcie_cfg cfg_2_3_2 = {
1340 	.ops = &ops_2_3_2,
1341 };
1342 
1343 static const struct qcom_pcie_cfg cfg_2_3_3 = {
1344 	.ops = &ops_2_3_3,
1345 };
1346 
1347 static const struct qcom_pcie_cfg cfg_2_4_0 = {
1348 	.ops = &ops_2_4_0,
1349 };
1350 
1351 static const struct qcom_pcie_cfg cfg_2_7_0 = {
1352 	.ops = &ops_2_7_0,
1353 };
1354 
1355 static const struct qcom_pcie_cfg cfg_2_9_0 = {
1356 	.ops = &ops_2_9_0,
1357 };
1358 
1359 static const struct qcom_pcie_cfg cfg_sc8280xp = {
1360 	.ops = &ops_1_9_0,
1361 	.no_l0s = true,
1362 };
1363 
1364 static const struct dw_pcie_ops dw_pcie_ops = {
1365 	.link_up = qcom_pcie_link_up,
1366 	.start_link = qcom_pcie_start_link,
1367 };
1368 
1369 static int qcom_pcie_icc_init(struct qcom_pcie *pcie)
1370 {
1371 	struct dw_pcie *pci = pcie->pci;
1372 	int ret;
1373 
1374 	pcie->icc_mem = devm_of_icc_get(pci->dev, "pcie-mem");
1375 	if (IS_ERR(pcie->icc_mem))
1376 		return PTR_ERR(pcie->icc_mem);
1377 
1378 	pcie->icc_cpu = devm_of_icc_get(pci->dev, "cpu-pcie");
1379 	if (IS_ERR(pcie->icc_cpu))
1380 		return PTR_ERR(pcie->icc_cpu);
1381 	/*
1382 	 * Some Qualcomm platforms require interconnect bandwidth constraints
1383 	 * to be set before enabling interconnect clocks.
1384 	 *
1385 	 * Set an initial peak bandwidth corresponding to single-lane Gen 1
1386 	 * for the pcie-mem path.
1387 	 */
1388 	ret = icc_set_bw(pcie->icc_mem, 0, QCOM_PCIE_LINK_SPEED_TO_BW(1));
1389 	if (ret) {
1390 		dev_err(pci->dev, "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n",
1391 			ret);
1392 		return ret;
1393 	}
1394 
1395 	/*
1396 	 * Since the CPU-PCIe path is only used for activities like register
1397 	 * access of the host controller and endpoint Config/BAR space access,
1398 	 * HW team has recommended to use a minimal bandwidth of 1KBps just to
1399 	 * keep the path active.
1400 	 */
1401 	ret = icc_set_bw(pcie->icc_cpu, 0, kBps_to_icc(1));
1402 	if (ret) {
1403 		dev_err(pci->dev, "Failed to set bandwidth for CPU-PCIe interconnect path: %d\n",
1404 			ret);
1405 		icc_set_bw(pcie->icc_mem, 0, 0);
1406 		return ret;
1407 	}
1408 
1409 	return 0;
1410 }
1411 
1412 static void qcom_pcie_icc_opp_update(struct qcom_pcie *pcie)
1413 {
1414 	u32 offset, status, width, speed;
1415 	struct dw_pcie *pci = pcie->pci;
1416 	unsigned long freq_kbps;
1417 	struct dev_pm_opp *opp;
1418 	int ret, freq_mbps;
1419 
1420 	offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
1421 	status = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA);
1422 
1423 	/* Only update constraints if link is up. */
1424 	if (!(status & PCI_EXP_LNKSTA_DLLLA))
1425 		return;
1426 
1427 	speed = FIELD_GET(PCI_EXP_LNKSTA_CLS, status);
1428 	width = FIELD_GET(PCI_EXP_LNKSTA_NLW, status);
1429 
1430 	if (pcie->icc_mem) {
1431 		ret = icc_set_bw(pcie->icc_mem, 0,
1432 				 width * QCOM_PCIE_LINK_SPEED_TO_BW(speed));
1433 		if (ret) {
1434 			dev_err(pci->dev, "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n",
1435 				ret);
1436 		}
1437 	} else if (pcie->use_pm_opp) {
1438 		freq_mbps = pcie_dev_speed_mbps(pcie_link_speed[speed]);
1439 		if (freq_mbps < 0)
1440 			return;
1441 
1442 		freq_kbps = freq_mbps * KILO;
1443 		opp = dev_pm_opp_find_freq_exact(pci->dev, freq_kbps * width,
1444 						 true);
1445 		if (!IS_ERR(opp)) {
1446 			ret = dev_pm_opp_set_opp(pci->dev, opp);
1447 			if (ret)
1448 				dev_err(pci->dev, "Failed to set OPP for freq (%lu): %d\n",
1449 					freq_kbps * width, ret);
1450 			dev_pm_opp_put(opp);
1451 		}
1452 	}
1453 }
1454 
1455 static int qcom_pcie_link_transition_count(struct seq_file *s, void *data)
1456 {
1457 	struct qcom_pcie *pcie = (struct qcom_pcie *)dev_get_drvdata(s->private);
1458 
1459 	seq_printf(s, "L0s transition count: %u\n",
1460 		   readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L0S));
1461 
1462 	seq_printf(s, "L1 transition count: %u\n",
1463 		   readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L1));
1464 
1465 	seq_printf(s, "L1.1 transition count: %u\n",
1466 		   readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1));
1467 
1468 	seq_printf(s, "L1.2 transition count: %u\n",
1469 		   readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2));
1470 
1471 	seq_printf(s, "L2 transition count: %u\n",
1472 		   readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L2));
1473 
1474 	return 0;
1475 }
1476 
1477 static void qcom_pcie_init_debugfs(struct qcom_pcie *pcie)
1478 {
1479 	struct dw_pcie *pci = pcie->pci;
1480 	struct device *dev = pci->dev;
1481 	char *name;
1482 
1483 	name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
1484 	if (!name)
1485 		return;
1486 
1487 	pcie->debugfs = debugfs_create_dir(name, NULL);
1488 	debugfs_create_devm_seqfile(dev, "link_transition_count", pcie->debugfs,
1489 				    qcom_pcie_link_transition_count);
1490 }
1491 
1492 static int qcom_pcie_probe(struct platform_device *pdev)
1493 {
1494 	const struct qcom_pcie_cfg *pcie_cfg;
1495 	unsigned long max_freq = ULONG_MAX;
1496 	struct device *dev = &pdev->dev;
1497 	struct dev_pm_opp *opp;
1498 	struct qcom_pcie *pcie;
1499 	struct dw_pcie_rp *pp;
1500 	struct resource *res;
1501 	struct dw_pcie *pci;
1502 	int ret;
1503 
1504 	pcie_cfg = of_device_get_match_data(dev);
1505 	if (!pcie_cfg || !pcie_cfg->ops) {
1506 		dev_err(dev, "Invalid platform data\n");
1507 		return -EINVAL;
1508 	}
1509 
1510 	pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
1511 	if (!pcie)
1512 		return -ENOMEM;
1513 
1514 	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
1515 	if (!pci)
1516 		return -ENOMEM;
1517 
1518 	pm_runtime_enable(dev);
1519 	ret = pm_runtime_get_sync(dev);
1520 	if (ret < 0)
1521 		goto err_pm_runtime_put;
1522 
1523 	pci->dev = dev;
1524 	pci->ops = &dw_pcie_ops;
1525 	pp = &pci->pp;
1526 
1527 	pcie->pci = pci;
1528 
1529 	pcie->cfg = pcie_cfg;
1530 
1531 	pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH);
1532 	if (IS_ERR(pcie->reset)) {
1533 		ret = PTR_ERR(pcie->reset);
1534 		goto err_pm_runtime_put;
1535 	}
1536 
1537 	pcie->parf = devm_platform_ioremap_resource_byname(pdev, "parf");
1538 	if (IS_ERR(pcie->parf)) {
1539 		ret = PTR_ERR(pcie->parf);
1540 		goto err_pm_runtime_put;
1541 	}
1542 
1543 	pcie->elbi = devm_platform_ioremap_resource_byname(pdev, "elbi");
1544 	if (IS_ERR(pcie->elbi)) {
1545 		ret = PTR_ERR(pcie->elbi);
1546 		goto err_pm_runtime_put;
1547 	}
1548 
1549 	/* MHI region is optional */
1550 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mhi");
1551 	if (res) {
1552 		pcie->mhi = devm_ioremap_resource(dev, res);
1553 		if (IS_ERR(pcie->mhi)) {
1554 			ret = PTR_ERR(pcie->mhi);
1555 			goto err_pm_runtime_put;
1556 		}
1557 	}
1558 
1559 	pcie->phy = devm_phy_optional_get(dev, "pciephy");
1560 	if (IS_ERR(pcie->phy)) {
1561 		ret = PTR_ERR(pcie->phy);
1562 		goto err_pm_runtime_put;
1563 	}
1564 
1565 	/* OPP table is optional */
1566 	ret = devm_pm_opp_of_add_table(dev);
1567 	if (ret && ret != -ENODEV) {
1568 		dev_err_probe(dev, ret, "Failed to add OPP table\n");
1569 		goto err_pm_runtime_put;
1570 	}
1571 
1572 	/*
1573 	 * Before the PCIe link is initialized, vote for highest OPP in the OPP
1574 	 * table, so that we are voting for maximum voltage corner for the
1575 	 * link to come up in maximum supported speed. At the end of the
1576 	 * probe(), OPP will be updated using qcom_pcie_icc_opp_update().
1577 	 */
1578 	if (!ret) {
1579 		opp = dev_pm_opp_find_freq_floor(dev, &max_freq);
1580 		if (IS_ERR(opp)) {
1581 			ret = PTR_ERR(opp);
1582 			dev_err_probe(pci->dev, ret,
1583 				      "Unable to find max freq OPP\n");
1584 			goto err_pm_runtime_put;
1585 		} else {
1586 			ret = dev_pm_opp_set_opp(dev, opp);
1587 		}
1588 
1589 		dev_pm_opp_put(opp);
1590 		if (ret) {
1591 			dev_err_probe(pci->dev, ret,
1592 				      "Failed to set OPP for freq %lu\n",
1593 				      max_freq);
1594 			goto err_pm_runtime_put;
1595 		}
1596 
1597 		pcie->use_pm_opp = true;
1598 	} else {
1599 		/* Skip ICC init if OPP is supported as it is handled by OPP */
1600 		ret = qcom_pcie_icc_init(pcie);
1601 		if (ret)
1602 			goto err_pm_runtime_put;
1603 	}
1604 
1605 	ret = pcie->cfg->ops->get_resources(pcie);
1606 	if (ret)
1607 		goto err_pm_runtime_put;
1608 
1609 	pp->ops = &qcom_pcie_dw_ops;
1610 
1611 	ret = phy_init(pcie->phy);
1612 	if (ret)
1613 		goto err_pm_runtime_put;
1614 
1615 	platform_set_drvdata(pdev, pcie);
1616 
1617 	ret = dw_pcie_host_init(pp);
1618 	if (ret) {
1619 		dev_err(dev, "cannot initialize host\n");
1620 		goto err_phy_exit;
1621 	}
1622 
1623 	qcom_pcie_icc_opp_update(pcie);
1624 
1625 	if (pcie->mhi)
1626 		qcom_pcie_init_debugfs(pcie);
1627 
1628 	return 0;
1629 
1630 err_phy_exit:
1631 	phy_exit(pcie->phy);
1632 err_pm_runtime_put:
1633 	pm_runtime_put(dev);
1634 	pm_runtime_disable(dev);
1635 
1636 	return ret;
1637 }
1638 
1639 static int qcom_pcie_suspend_noirq(struct device *dev)
1640 {
1641 	struct qcom_pcie *pcie = dev_get_drvdata(dev);
1642 	int ret = 0;
1643 
1644 	/*
1645 	 * Set minimum bandwidth required to keep data path functional during
1646 	 * suspend.
1647 	 */
1648 	if (pcie->icc_mem) {
1649 		ret = icc_set_bw(pcie->icc_mem, 0, kBps_to_icc(1));
1650 		if (ret) {
1651 			dev_err(dev,
1652 				"Failed to set bandwidth for PCIe-MEM interconnect path: %d\n",
1653 				ret);
1654 			return ret;
1655 		}
1656 	}
1657 
1658 	/*
1659 	 * Turn OFF the resources only for controllers without active PCIe
1660 	 * devices. For controllers with active devices, the resources are kept
1661 	 * ON and the link is expected to be in L0/L1 (sub)states.
1662 	 *
1663 	 * Turning OFF the resources for controllers with active PCIe devices
1664 	 * will trigger access violation during the end of the suspend cycle,
1665 	 * as kernel tries to access the PCIe devices config space for masking
1666 	 * MSIs.
1667 	 *
1668 	 * Also, it is not desirable to put the link into L2/L3 state as that
1669 	 * implies VDD supply will be removed and the devices may go into
1670 	 * powerdown state. This will affect the lifetime of the storage devices
1671 	 * like NVMe.
1672 	 */
1673 	if (!dw_pcie_link_up(pcie->pci)) {
1674 		qcom_pcie_host_deinit(&pcie->pci->pp);
1675 		pcie->suspended = true;
1676 	}
1677 
1678 	/*
1679 	 * Only disable CPU-PCIe interconnect path if the suspend is non-S2RAM.
1680 	 * Because on some platforms, DBI access can happen very late during the
1681 	 * S2RAM and a non-active CPU-PCIe interconnect path may lead to NoC
1682 	 * error.
1683 	 */
1684 	if (pm_suspend_target_state != PM_SUSPEND_MEM) {
1685 		ret = icc_disable(pcie->icc_cpu);
1686 		if (ret)
1687 			dev_err(dev, "Failed to disable CPU-PCIe interconnect path: %d\n", ret);
1688 
1689 		if (pcie->use_pm_opp)
1690 			dev_pm_opp_set_opp(pcie->pci->dev, NULL);
1691 	}
1692 	return ret;
1693 }
1694 
1695 static int qcom_pcie_resume_noirq(struct device *dev)
1696 {
1697 	struct qcom_pcie *pcie = dev_get_drvdata(dev);
1698 	int ret;
1699 
1700 	if (pm_suspend_target_state != PM_SUSPEND_MEM) {
1701 		ret = icc_enable(pcie->icc_cpu);
1702 		if (ret) {
1703 			dev_err(dev, "Failed to enable CPU-PCIe interconnect path: %d\n", ret);
1704 			return ret;
1705 		}
1706 	}
1707 
1708 	if (pcie->suspended) {
1709 		ret = qcom_pcie_host_init(&pcie->pci->pp);
1710 		if (ret)
1711 			return ret;
1712 
1713 		pcie->suspended = false;
1714 	}
1715 
1716 	qcom_pcie_icc_opp_update(pcie);
1717 
1718 	return 0;
1719 }
1720 
1721 static const struct of_device_id qcom_pcie_match[] = {
1722 	{ .compatible = "qcom,pcie-apq8064", .data = &cfg_2_1_0 },
1723 	{ .compatible = "qcom,pcie-apq8084", .data = &cfg_1_0_0 },
1724 	{ .compatible = "qcom,pcie-ipq4019", .data = &cfg_2_4_0 },
1725 	{ .compatible = "qcom,pcie-ipq6018", .data = &cfg_2_9_0 },
1726 	{ .compatible = "qcom,pcie-ipq8064", .data = &cfg_2_1_0 },
1727 	{ .compatible = "qcom,pcie-ipq8064-v2", .data = &cfg_2_1_0 },
1728 	{ .compatible = "qcom,pcie-ipq8074", .data = &cfg_2_3_3 },
1729 	{ .compatible = "qcom,pcie-ipq8074-gen3", .data = &cfg_2_9_0 },
1730 	{ .compatible = "qcom,pcie-msm8996", .data = &cfg_2_3_2 },
1731 	{ .compatible = "qcom,pcie-qcs404", .data = &cfg_2_4_0 },
1732 	{ .compatible = "qcom,pcie-sa8540p", .data = &cfg_sc8280xp },
1733 	{ .compatible = "qcom,pcie-sa8775p", .data = &cfg_1_34_0},
1734 	{ .compatible = "qcom,pcie-sc7280", .data = &cfg_1_9_0 },
1735 	{ .compatible = "qcom,pcie-sc8180x", .data = &cfg_1_9_0 },
1736 	{ .compatible = "qcom,pcie-sc8280xp", .data = &cfg_sc8280xp },
1737 	{ .compatible = "qcom,pcie-sdm845", .data = &cfg_2_7_0 },
1738 	{ .compatible = "qcom,pcie-sdx55", .data = &cfg_1_9_0 },
1739 	{ .compatible = "qcom,pcie-sm8150", .data = &cfg_1_9_0 },
1740 	{ .compatible = "qcom,pcie-sm8250", .data = &cfg_1_9_0 },
1741 	{ .compatible = "qcom,pcie-sm8350", .data = &cfg_1_9_0 },
1742 	{ .compatible = "qcom,pcie-sm8450-pcie0", .data = &cfg_1_9_0 },
1743 	{ .compatible = "qcom,pcie-sm8450-pcie1", .data = &cfg_1_9_0 },
1744 	{ .compatible = "qcom,pcie-sm8550", .data = &cfg_1_9_0 },
1745 	{ .compatible = "qcom,pcie-x1e80100", .data = &cfg_1_9_0 },
1746 	{ }
1747 };
1748 
1749 static void qcom_fixup_class(struct pci_dev *dev)
1750 {
1751 	dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL;
1752 }
1753 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0101, qcom_fixup_class);
1754 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0104, qcom_fixup_class);
1755 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0106, qcom_fixup_class);
1756 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0107, qcom_fixup_class);
1757 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0302, qcom_fixup_class);
1758 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1000, qcom_fixup_class);
1759 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1001, qcom_fixup_class);
1760 
1761 static const struct dev_pm_ops qcom_pcie_pm_ops = {
1762 	NOIRQ_SYSTEM_SLEEP_PM_OPS(qcom_pcie_suspend_noirq, qcom_pcie_resume_noirq)
1763 };
1764 
1765 static struct platform_driver qcom_pcie_driver = {
1766 	.probe = qcom_pcie_probe,
1767 	.driver = {
1768 		.name = "qcom-pcie",
1769 		.suppress_bind_attrs = true,
1770 		.of_match_table = qcom_pcie_match,
1771 		.pm = &qcom_pcie_pm_ops,
1772 		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
1773 	},
1774 };
1775 builtin_platform_driver(qcom_pcie_driver);
1776