xref: /linux/drivers/pci/controller/dwc/pcie-qcom.c (revision eb01fe7abbe2d0b38824d2a93fdb4cc3eaf2ccc1)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Qualcomm PCIe root complex driver
4  *
5  * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
6  * Copyright 2015 Linaro Limited.
7  *
8  * Author: Stanimir Varbanov <svarbanov@mm-sol.com>
9  */
10 
11 #include <linux/clk.h>
12 #include <linux/crc8.h>
13 #include <linux/debugfs.h>
14 #include <linux/delay.h>
15 #include <linux/gpio/consumer.h>
16 #include <linux/interconnect.h>
17 #include <linux/interrupt.h>
18 #include <linux/io.h>
19 #include <linux/iopoll.h>
20 #include <linux/kernel.h>
21 #include <linux/init.h>
22 #include <linux/of.h>
23 #include <linux/of_gpio.h>
24 #include <linux/pci.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/platform_device.h>
27 #include <linux/phy/pcie.h>
28 #include <linux/phy/phy.h>
29 #include <linux/regulator/consumer.h>
30 #include <linux/reset.h>
31 #include <linux/slab.h>
32 #include <linux/types.h>
33 
34 #include "../../pci.h"
35 #include "pcie-designware.h"
36 
37 /* PARF registers */
38 #define PARF_SYS_CTRL				0x00
39 #define PARF_PM_CTRL				0x20
40 #define PARF_PCS_DEEMPH				0x34
41 #define PARF_PCS_SWING				0x38
42 #define PARF_PHY_CTRL				0x40
43 #define PARF_PHY_REFCLK				0x4c
44 #define PARF_CONFIG_BITS			0x50
45 #define PARF_DBI_BASE_ADDR			0x168
46 #define PARF_MHI_CLOCK_RESET_CTRL		0x174
47 #define PARF_AXI_MSTR_WR_ADDR_HALT		0x178
48 #define PARF_AXI_MSTR_WR_ADDR_HALT_V2		0x1a8
49 #define PARF_Q2A_FLUSH				0x1ac
50 #define PARF_LTSSM				0x1b0
51 #define PARF_SID_OFFSET				0x234
52 #define PARF_BDF_TRANSLATE_CFG			0x24c
53 #define PARF_SLV_ADDR_SPACE_SIZE		0x358
54 #define PARF_DEVICE_TYPE			0x1000
55 #define PARF_BDF_TO_SID_TABLE_N			0x2000
56 #define PARF_BDF_TO_SID_CFG			0x2c00
57 
58 /* ELBI registers */
59 #define ELBI_SYS_CTRL				0x04
60 
61 /* DBI registers */
62 #define AXI_MSTR_RESP_COMP_CTRL0		0x818
63 #define AXI_MSTR_RESP_COMP_CTRL1		0x81c
64 
65 /* MHI registers */
66 #define PARF_DEBUG_CNT_PM_LINKST_IN_L2		0xc04
67 #define PARF_DEBUG_CNT_PM_LINKST_IN_L1		0xc0c
68 #define PARF_DEBUG_CNT_PM_LINKST_IN_L0S		0xc10
69 #define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1	0xc84
70 #define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2	0xc88
71 
72 /* PARF_SYS_CTRL register fields */
73 #define MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN	BIT(29)
74 #define MST_WAKEUP_EN				BIT(13)
75 #define SLV_WAKEUP_EN				BIT(12)
76 #define MSTR_ACLK_CGC_DIS			BIT(10)
77 #define SLV_ACLK_CGC_DIS			BIT(9)
78 #define CORE_CLK_CGC_DIS			BIT(6)
79 #define AUX_PWR_DET				BIT(4)
80 #define L23_CLK_RMV_DIS				BIT(2)
81 #define L1_CLK_RMV_DIS				BIT(1)
82 
83 /* PARF_PM_CTRL register fields */
84 #define REQ_NOT_ENTR_L1				BIT(5)
85 
86 /* PARF_PCS_DEEMPH register fields */
87 #define PCS_DEEMPH_TX_DEEMPH_GEN1(x)		FIELD_PREP(GENMASK(21, 16), x)
88 #define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x)	FIELD_PREP(GENMASK(13, 8), x)
89 #define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x)	FIELD_PREP(GENMASK(5, 0), x)
90 
91 /* PARF_PCS_SWING register fields */
92 #define PCS_SWING_TX_SWING_FULL(x)		FIELD_PREP(GENMASK(14, 8), x)
93 #define PCS_SWING_TX_SWING_LOW(x)		FIELD_PREP(GENMASK(6, 0), x)
94 
95 /* PARF_PHY_CTRL register fields */
96 #define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK	GENMASK(20, 16)
97 #define PHY_CTRL_PHY_TX0_TERM_OFFSET(x)		FIELD_PREP(PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK, x)
98 #define PHY_TEST_PWR_DOWN			BIT(0)
99 
100 /* PARF_PHY_REFCLK register fields */
101 #define PHY_REFCLK_SSP_EN			BIT(16)
102 #define PHY_REFCLK_USE_PAD			BIT(12)
103 
104 /* PARF_CONFIG_BITS register fields */
105 #define PHY_RX0_EQ(x)				FIELD_PREP(GENMASK(26, 24), x)
106 
107 /* PARF_SLV_ADDR_SPACE_SIZE register value */
108 #define SLV_ADDR_SPACE_SZ			0x10000000
109 
110 /* PARF_MHI_CLOCK_RESET_CTRL register fields */
111 #define AHB_CLK_EN				BIT(0)
112 #define MSTR_AXI_CLK_EN				BIT(1)
113 #define BYPASS					BIT(4)
114 
115 /* PARF_AXI_MSTR_WR_ADDR_HALT register fields */
116 #define EN					BIT(31)
117 
118 /* PARF_LTSSM register fields */
119 #define LTSSM_EN				BIT(8)
120 
121 /* PARF_DEVICE_TYPE register fields */
122 #define DEVICE_TYPE_RC				0x4
123 
124 /* PARF_BDF_TO_SID_CFG fields */
125 #define BDF_TO_SID_BYPASS			BIT(0)
126 
127 /* ELBI_SYS_CTRL register fields */
128 #define ELBI_SYS_CTRL_LT_ENABLE			BIT(0)
129 
130 /* AXI_MSTR_RESP_COMP_CTRL0 register fields */
131 #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K	0x4
132 #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K	0x5
133 
134 /* AXI_MSTR_RESP_COMP_CTRL1 register fields */
135 #define CFG_BRIDGE_SB_INIT			BIT(0)
136 
137 /* PCI_EXP_SLTCAP register fields */
138 #define PCIE_CAP_SLOT_POWER_LIMIT_VAL		FIELD_PREP(PCI_EXP_SLTCAP_SPLV, 250)
139 #define PCIE_CAP_SLOT_POWER_LIMIT_SCALE		FIELD_PREP(PCI_EXP_SLTCAP_SPLS, 1)
140 #define PCIE_CAP_SLOT_VAL			(PCI_EXP_SLTCAP_ABP | \
141 						PCI_EXP_SLTCAP_PCP | \
142 						PCI_EXP_SLTCAP_MRLSP | \
143 						PCI_EXP_SLTCAP_AIP | \
144 						PCI_EXP_SLTCAP_PIP | \
145 						PCI_EXP_SLTCAP_HPS | \
146 						PCI_EXP_SLTCAP_EIP | \
147 						PCIE_CAP_SLOT_POWER_LIMIT_VAL | \
148 						PCIE_CAP_SLOT_POWER_LIMIT_SCALE)
149 
150 #define PERST_DELAY_US				1000
151 
152 #define QCOM_PCIE_CRC8_POLYNOMIAL		(BIT(2) | BIT(1) | BIT(0))
153 
154 #define QCOM_PCIE_LINK_SPEED_TO_BW(speed) \
155 		Mbps_to_icc(PCIE_SPEED2MBS_ENC(pcie_link_speed[speed]))
156 
157 #define QCOM_PCIE_1_0_0_MAX_CLOCKS		4
158 struct qcom_pcie_resources_1_0_0 {
159 	struct clk_bulk_data clks[QCOM_PCIE_1_0_0_MAX_CLOCKS];
160 	struct reset_control *core;
161 	struct regulator *vdda;
162 };
163 
164 #define QCOM_PCIE_2_1_0_MAX_CLOCKS		5
165 #define QCOM_PCIE_2_1_0_MAX_RESETS		6
166 #define QCOM_PCIE_2_1_0_MAX_SUPPLY		3
167 struct qcom_pcie_resources_2_1_0 {
168 	struct clk_bulk_data clks[QCOM_PCIE_2_1_0_MAX_CLOCKS];
169 	struct reset_control_bulk_data resets[QCOM_PCIE_2_1_0_MAX_RESETS];
170 	int num_resets;
171 	struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY];
172 };
173 
174 #define QCOM_PCIE_2_3_2_MAX_CLOCKS		4
175 #define QCOM_PCIE_2_3_2_MAX_SUPPLY		2
176 struct qcom_pcie_resources_2_3_2 {
177 	struct clk_bulk_data clks[QCOM_PCIE_2_3_2_MAX_CLOCKS];
178 	struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY];
179 };
180 
181 #define QCOM_PCIE_2_3_3_MAX_CLOCKS		5
182 #define QCOM_PCIE_2_3_3_MAX_RESETS		7
183 struct qcom_pcie_resources_2_3_3 {
184 	struct clk_bulk_data clks[QCOM_PCIE_2_3_3_MAX_CLOCKS];
185 	struct reset_control_bulk_data rst[QCOM_PCIE_2_3_3_MAX_RESETS];
186 };
187 
188 #define QCOM_PCIE_2_4_0_MAX_CLOCKS		4
189 #define QCOM_PCIE_2_4_0_MAX_RESETS		12
190 struct qcom_pcie_resources_2_4_0 {
191 	struct clk_bulk_data clks[QCOM_PCIE_2_4_0_MAX_CLOCKS];
192 	int num_clks;
193 	struct reset_control_bulk_data resets[QCOM_PCIE_2_4_0_MAX_RESETS];
194 	int num_resets;
195 };
196 
197 #define QCOM_PCIE_2_7_0_MAX_CLOCKS		15
198 #define QCOM_PCIE_2_7_0_MAX_SUPPLIES		2
199 struct qcom_pcie_resources_2_7_0 {
200 	struct clk_bulk_data clks[QCOM_PCIE_2_7_0_MAX_CLOCKS];
201 	int num_clks;
202 	struct regulator_bulk_data supplies[QCOM_PCIE_2_7_0_MAX_SUPPLIES];
203 	struct reset_control *rst;
204 };
205 
206 #define QCOM_PCIE_2_9_0_MAX_CLOCKS		5
207 struct qcom_pcie_resources_2_9_0 {
208 	struct clk_bulk_data clks[QCOM_PCIE_2_9_0_MAX_CLOCKS];
209 	struct reset_control *rst;
210 };
211 
212 union qcom_pcie_resources {
213 	struct qcom_pcie_resources_1_0_0 v1_0_0;
214 	struct qcom_pcie_resources_2_1_0 v2_1_0;
215 	struct qcom_pcie_resources_2_3_2 v2_3_2;
216 	struct qcom_pcie_resources_2_3_3 v2_3_3;
217 	struct qcom_pcie_resources_2_4_0 v2_4_0;
218 	struct qcom_pcie_resources_2_7_0 v2_7_0;
219 	struct qcom_pcie_resources_2_9_0 v2_9_0;
220 };
221 
222 struct qcom_pcie;
223 
224 struct qcom_pcie_ops {
225 	int (*get_resources)(struct qcom_pcie *pcie);
226 	int (*init)(struct qcom_pcie *pcie);
227 	int (*post_init)(struct qcom_pcie *pcie);
228 	void (*host_post_init)(struct qcom_pcie *pcie);
229 	void (*deinit)(struct qcom_pcie *pcie);
230 	void (*ltssm_enable)(struct qcom_pcie *pcie);
231 	int (*config_sid)(struct qcom_pcie *pcie);
232 };
233 
234 struct qcom_pcie_cfg {
235 	const struct qcom_pcie_ops *ops;
236 	bool no_l0s;
237 };
238 
239 struct qcom_pcie {
240 	struct dw_pcie *pci;
241 	void __iomem *parf;			/* DT parf */
242 	void __iomem *elbi;			/* DT elbi */
243 	void __iomem *mhi;
244 	union qcom_pcie_resources res;
245 	struct phy *phy;
246 	struct gpio_desc *reset;
247 	struct icc_path *icc_mem;
248 	const struct qcom_pcie_cfg *cfg;
249 	struct dentry *debugfs;
250 	bool suspended;
251 };
252 
253 #define to_qcom_pcie(x)		dev_get_drvdata((x)->dev)
254 
255 static void qcom_ep_reset_assert(struct qcom_pcie *pcie)
256 {
257 	gpiod_set_value_cansleep(pcie->reset, 1);
258 	usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
259 }
260 
261 static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
262 {
263 	/* Ensure that PERST has been asserted for at least 100 ms */
264 	msleep(100);
265 	gpiod_set_value_cansleep(pcie->reset, 0);
266 	usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
267 }
268 
269 static int qcom_pcie_start_link(struct dw_pcie *pci)
270 {
271 	struct qcom_pcie *pcie = to_qcom_pcie(pci);
272 
273 	/* Enable Link Training state machine */
274 	if (pcie->cfg->ops->ltssm_enable)
275 		pcie->cfg->ops->ltssm_enable(pcie);
276 
277 	return 0;
278 }
279 
280 static void qcom_pcie_clear_aspm_l0s(struct dw_pcie *pci)
281 {
282 	struct qcom_pcie *pcie = to_qcom_pcie(pci);
283 	u16 offset;
284 	u32 val;
285 
286 	if (!pcie->cfg->no_l0s)
287 		return;
288 
289 	offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
290 
291 	dw_pcie_dbi_ro_wr_en(pci);
292 
293 	val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP);
294 	val &= ~PCI_EXP_LNKCAP_ASPM_L0S;
295 	writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP);
296 
297 	dw_pcie_dbi_ro_wr_dis(pci);
298 }
299 
300 static void qcom_pcie_clear_hpc(struct dw_pcie *pci)
301 {
302 	u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
303 	u32 val;
304 
305 	dw_pcie_dbi_ro_wr_en(pci);
306 
307 	val = readl(pci->dbi_base + offset + PCI_EXP_SLTCAP);
308 	val &= ~PCI_EXP_SLTCAP_HPC;
309 	writel(val, pci->dbi_base + offset + PCI_EXP_SLTCAP);
310 
311 	dw_pcie_dbi_ro_wr_dis(pci);
312 }
313 
314 static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie)
315 {
316 	u32 val;
317 
318 	/* enable link training */
319 	val = readl(pcie->elbi + ELBI_SYS_CTRL);
320 	val |= ELBI_SYS_CTRL_LT_ENABLE;
321 	writel(val, pcie->elbi + ELBI_SYS_CTRL);
322 }
323 
324 static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
325 {
326 	struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
327 	struct dw_pcie *pci = pcie->pci;
328 	struct device *dev = pci->dev;
329 	bool is_apq = of_device_is_compatible(dev->of_node, "qcom,pcie-apq8064");
330 	int ret;
331 
332 	res->supplies[0].supply = "vdda";
333 	res->supplies[1].supply = "vdda_phy";
334 	res->supplies[2].supply = "vdda_refclk";
335 	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
336 				      res->supplies);
337 	if (ret)
338 		return ret;
339 
340 	res->clks[0].id = "iface";
341 	res->clks[1].id = "core";
342 	res->clks[2].id = "phy";
343 	res->clks[3].id = "aux";
344 	res->clks[4].id = "ref";
345 
346 	/* iface, core, phy are required */
347 	ret = devm_clk_bulk_get(dev, 3, res->clks);
348 	if (ret < 0)
349 		return ret;
350 
351 	/* aux, ref are optional */
352 	ret = devm_clk_bulk_get_optional(dev, 2, res->clks + 3);
353 	if (ret < 0)
354 		return ret;
355 
356 	res->resets[0].id = "pci";
357 	res->resets[1].id = "axi";
358 	res->resets[2].id = "ahb";
359 	res->resets[3].id = "por";
360 	res->resets[4].id = "phy";
361 	res->resets[5].id = "ext";
362 
363 	/* ext is optional on APQ8016 */
364 	res->num_resets = is_apq ? 5 : 6;
365 	ret = devm_reset_control_bulk_get_exclusive(dev, res->num_resets, res->resets);
366 	if (ret < 0)
367 		return ret;
368 
369 	return 0;
370 }
371 
372 static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie)
373 {
374 	struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
375 
376 	clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
377 	reset_control_bulk_assert(res->num_resets, res->resets);
378 
379 	writel(1, pcie->parf + PARF_PHY_CTRL);
380 
381 	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
382 }
383 
384 static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
385 {
386 	struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
387 	struct dw_pcie *pci = pcie->pci;
388 	struct device *dev = pci->dev;
389 	int ret;
390 
391 	/* reset the PCIe interface as uboot can leave it undefined state */
392 	ret = reset_control_bulk_assert(res->num_resets, res->resets);
393 	if (ret < 0) {
394 		dev_err(dev, "cannot assert resets\n");
395 		return ret;
396 	}
397 
398 	ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
399 	if (ret < 0) {
400 		dev_err(dev, "cannot enable regulators\n");
401 		return ret;
402 	}
403 
404 	ret = reset_control_bulk_deassert(res->num_resets, res->resets);
405 	if (ret < 0) {
406 		dev_err(dev, "cannot deassert resets\n");
407 		regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
408 		return ret;
409 	}
410 
411 	return 0;
412 }
413 
414 static int qcom_pcie_post_init_2_1_0(struct qcom_pcie *pcie)
415 {
416 	struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
417 	struct dw_pcie *pci = pcie->pci;
418 	struct device *dev = pci->dev;
419 	struct device_node *node = dev->of_node;
420 	u32 val;
421 	int ret;
422 
423 	/* enable PCIe clocks and resets */
424 	val = readl(pcie->parf + PARF_PHY_CTRL);
425 	val &= ~PHY_TEST_PWR_DOWN;
426 	writel(val, pcie->parf + PARF_PHY_CTRL);
427 
428 	ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
429 	if (ret)
430 		return ret;
431 
432 	if (of_device_is_compatible(node, "qcom,pcie-ipq8064") ||
433 	    of_device_is_compatible(node, "qcom,pcie-ipq8064-v2")) {
434 		writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) |
435 			       PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(24) |
436 			       PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(34),
437 		       pcie->parf + PARF_PCS_DEEMPH);
438 		writel(PCS_SWING_TX_SWING_FULL(120) |
439 			       PCS_SWING_TX_SWING_LOW(120),
440 		       pcie->parf + PARF_PCS_SWING);
441 		writel(PHY_RX0_EQ(4), pcie->parf + PARF_CONFIG_BITS);
442 	}
443 
444 	if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) {
445 		/* set TX termination offset */
446 		val = readl(pcie->parf + PARF_PHY_CTRL);
447 		val &= ~PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK;
448 		val |= PHY_CTRL_PHY_TX0_TERM_OFFSET(7);
449 		writel(val, pcie->parf + PARF_PHY_CTRL);
450 	}
451 
452 	/* enable external reference clock */
453 	val = readl(pcie->parf + PARF_PHY_REFCLK);
454 	/* USE_PAD is required only for ipq806x */
455 	if (!of_device_is_compatible(node, "qcom,pcie-apq8064"))
456 		val &= ~PHY_REFCLK_USE_PAD;
457 	val |= PHY_REFCLK_SSP_EN;
458 	writel(val, pcie->parf + PARF_PHY_REFCLK);
459 
460 	/* wait for clock acquisition */
461 	usleep_range(1000, 1500);
462 
463 	/* Set the Max TLP size to 2K, instead of using default of 4K */
464 	writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K,
465 	       pci->dbi_base + AXI_MSTR_RESP_COMP_CTRL0);
466 	writel(CFG_BRIDGE_SB_INIT,
467 	       pci->dbi_base + AXI_MSTR_RESP_COMP_CTRL1);
468 
469 	qcom_pcie_clear_hpc(pcie->pci);
470 
471 	return 0;
472 }
473 
474 static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie)
475 {
476 	struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
477 	struct dw_pcie *pci = pcie->pci;
478 	struct device *dev = pci->dev;
479 	int ret;
480 
481 	res->vdda = devm_regulator_get(dev, "vdda");
482 	if (IS_ERR(res->vdda))
483 		return PTR_ERR(res->vdda);
484 
485 	res->clks[0].id = "iface";
486 	res->clks[1].id = "aux";
487 	res->clks[2].id = "master_bus";
488 	res->clks[3].id = "slave_bus";
489 
490 	ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
491 	if (ret < 0)
492 		return ret;
493 
494 	res->core = devm_reset_control_get_exclusive(dev, "core");
495 	return PTR_ERR_OR_ZERO(res->core);
496 }
497 
498 static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie)
499 {
500 	struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
501 
502 	reset_control_assert(res->core);
503 	clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
504 	regulator_disable(res->vdda);
505 }
506 
507 static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie)
508 {
509 	struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
510 	struct dw_pcie *pci = pcie->pci;
511 	struct device *dev = pci->dev;
512 	int ret;
513 
514 	ret = reset_control_deassert(res->core);
515 	if (ret) {
516 		dev_err(dev, "cannot deassert core reset\n");
517 		return ret;
518 	}
519 
520 	ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
521 	if (ret) {
522 		dev_err(dev, "cannot prepare/enable clocks\n");
523 		goto err_assert_reset;
524 	}
525 
526 	ret = regulator_enable(res->vdda);
527 	if (ret) {
528 		dev_err(dev, "cannot enable vdda regulator\n");
529 		goto err_disable_clks;
530 	}
531 
532 	return 0;
533 
534 err_disable_clks:
535 	clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
536 err_assert_reset:
537 	reset_control_assert(res->core);
538 
539 	return ret;
540 }
541 
542 static int qcom_pcie_post_init_1_0_0(struct qcom_pcie *pcie)
543 {
544 	/* change DBI base address */
545 	writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
546 
547 	if (IS_ENABLED(CONFIG_PCI_MSI)) {
548 		u32 val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT);
549 
550 		val |= EN;
551 		writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT);
552 	}
553 
554 	qcom_pcie_clear_hpc(pcie->pci);
555 
556 	return 0;
557 }
558 
559 static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie)
560 {
561 	u32 val;
562 
563 	/* enable link training */
564 	val = readl(pcie->parf + PARF_LTSSM);
565 	val |= LTSSM_EN;
566 	writel(val, pcie->parf + PARF_LTSSM);
567 }
568 
569 static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie)
570 {
571 	struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
572 	struct dw_pcie *pci = pcie->pci;
573 	struct device *dev = pci->dev;
574 	int ret;
575 
576 	res->supplies[0].supply = "vdda";
577 	res->supplies[1].supply = "vddpe-3v3";
578 	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
579 				      res->supplies);
580 	if (ret)
581 		return ret;
582 
583 	res->clks[0].id = "aux";
584 	res->clks[1].id = "cfg";
585 	res->clks[2].id = "bus_master";
586 	res->clks[3].id = "bus_slave";
587 
588 	ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
589 	if (ret < 0)
590 		return ret;
591 
592 	return 0;
593 }
594 
595 static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie)
596 {
597 	struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
598 
599 	clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
600 	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
601 }
602 
603 static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie)
604 {
605 	struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
606 	struct dw_pcie *pci = pcie->pci;
607 	struct device *dev = pci->dev;
608 	int ret;
609 
610 	ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
611 	if (ret < 0) {
612 		dev_err(dev, "cannot enable regulators\n");
613 		return ret;
614 	}
615 
616 	ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
617 	if (ret) {
618 		dev_err(dev, "cannot prepare/enable clocks\n");
619 		regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
620 		return ret;
621 	}
622 
623 	return 0;
624 }
625 
626 static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie)
627 {
628 	u32 val;
629 
630 	/* enable PCIe clocks and resets */
631 	val = readl(pcie->parf + PARF_PHY_CTRL);
632 	val &= ~PHY_TEST_PWR_DOWN;
633 	writel(val, pcie->parf + PARF_PHY_CTRL);
634 
635 	/* change DBI base address */
636 	writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
637 
638 	/* MAC PHY_POWERDOWN MUX DISABLE  */
639 	val = readl(pcie->parf + PARF_SYS_CTRL);
640 	val &= ~MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN;
641 	writel(val, pcie->parf + PARF_SYS_CTRL);
642 
643 	val = readl(pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
644 	val |= BYPASS;
645 	writel(val, pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
646 
647 	val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2);
648 	val |= EN;
649 	writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2);
650 
651 	qcom_pcie_clear_hpc(pcie->pci);
652 
653 	return 0;
654 }
655 
656 static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie)
657 {
658 	struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
659 	struct dw_pcie *pci = pcie->pci;
660 	struct device *dev = pci->dev;
661 	bool is_ipq = of_device_is_compatible(dev->of_node, "qcom,pcie-ipq4019");
662 	int ret;
663 
664 	res->clks[0].id = "aux";
665 	res->clks[1].id = "master_bus";
666 	res->clks[2].id = "slave_bus";
667 	res->clks[3].id = "iface";
668 
669 	/* qcom,pcie-ipq4019 is defined without "iface" */
670 	res->num_clks = is_ipq ? 3 : 4;
671 
672 	ret = devm_clk_bulk_get(dev, res->num_clks, res->clks);
673 	if (ret < 0)
674 		return ret;
675 
676 	res->resets[0].id = "axi_m";
677 	res->resets[1].id = "axi_s";
678 	res->resets[2].id = "axi_m_sticky";
679 	res->resets[3].id = "pipe_sticky";
680 	res->resets[4].id = "pwr";
681 	res->resets[5].id = "ahb";
682 	res->resets[6].id = "pipe";
683 	res->resets[7].id = "axi_m_vmid";
684 	res->resets[8].id = "axi_s_xpu";
685 	res->resets[9].id = "parf";
686 	res->resets[10].id = "phy";
687 	res->resets[11].id = "phy_ahb";
688 
689 	res->num_resets = is_ipq ? 12 : 6;
690 
691 	ret = devm_reset_control_bulk_get_exclusive(dev, res->num_resets, res->resets);
692 	if (ret < 0)
693 		return ret;
694 
695 	return 0;
696 }
697 
698 static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie)
699 {
700 	struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
701 
702 	reset_control_bulk_assert(res->num_resets, res->resets);
703 	clk_bulk_disable_unprepare(res->num_clks, res->clks);
704 }
705 
706 static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie)
707 {
708 	struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
709 	struct dw_pcie *pci = pcie->pci;
710 	struct device *dev = pci->dev;
711 	int ret;
712 
713 	ret = reset_control_bulk_assert(res->num_resets, res->resets);
714 	if (ret < 0) {
715 		dev_err(dev, "cannot assert resets\n");
716 		return ret;
717 	}
718 
719 	usleep_range(10000, 12000);
720 
721 	ret = reset_control_bulk_deassert(res->num_resets, res->resets);
722 	if (ret < 0) {
723 		dev_err(dev, "cannot deassert resets\n");
724 		return ret;
725 	}
726 
727 	usleep_range(10000, 12000);
728 
729 	ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
730 	if (ret) {
731 		reset_control_bulk_assert(res->num_resets, res->resets);
732 		return ret;
733 	}
734 
735 	return 0;
736 }
737 
738 static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie)
739 {
740 	struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
741 	struct dw_pcie *pci = pcie->pci;
742 	struct device *dev = pci->dev;
743 	int ret;
744 
745 	res->clks[0].id = "iface";
746 	res->clks[1].id = "axi_m";
747 	res->clks[2].id = "axi_s";
748 	res->clks[3].id = "ahb";
749 	res->clks[4].id = "aux";
750 
751 	ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
752 	if (ret < 0)
753 		return ret;
754 
755 	res->rst[0].id = "axi_m";
756 	res->rst[1].id = "axi_s";
757 	res->rst[2].id = "pipe";
758 	res->rst[3].id = "axi_m_sticky";
759 	res->rst[4].id = "sticky";
760 	res->rst[5].id = "ahb";
761 	res->rst[6].id = "sleep";
762 
763 	ret = devm_reset_control_bulk_get_exclusive(dev, ARRAY_SIZE(res->rst), res->rst);
764 	if (ret < 0)
765 		return ret;
766 
767 	return 0;
768 }
769 
770 static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie)
771 {
772 	struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
773 
774 	clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
775 }
776 
777 static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
778 {
779 	struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
780 	struct dw_pcie *pci = pcie->pci;
781 	struct device *dev = pci->dev;
782 	int ret;
783 
784 	ret = reset_control_bulk_assert(ARRAY_SIZE(res->rst), res->rst);
785 	if (ret < 0) {
786 		dev_err(dev, "cannot assert resets\n");
787 		return ret;
788 	}
789 
790 	usleep_range(2000, 2500);
791 
792 	ret = reset_control_bulk_deassert(ARRAY_SIZE(res->rst), res->rst);
793 	if (ret < 0) {
794 		dev_err(dev, "cannot deassert resets\n");
795 		return ret;
796 	}
797 
798 	/*
799 	 * Don't have a way to see if the reset has completed.
800 	 * Wait for some time.
801 	 */
802 	usleep_range(2000, 2500);
803 
804 	ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
805 	if (ret) {
806 		dev_err(dev, "cannot prepare/enable clocks\n");
807 		goto err_assert_resets;
808 	}
809 
810 	return 0;
811 
812 err_assert_resets:
813 	/*
814 	 * Not checking for failure, will anyway return
815 	 * the original failure in 'ret'.
816 	 */
817 	reset_control_bulk_assert(ARRAY_SIZE(res->rst), res->rst);
818 
819 	return ret;
820 }
821 
822 static int qcom_pcie_post_init_2_3_3(struct qcom_pcie *pcie)
823 {
824 	struct dw_pcie *pci = pcie->pci;
825 	u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
826 	u32 val;
827 
828 	writel(SLV_ADDR_SPACE_SZ, pcie->parf + PARF_SLV_ADDR_SPACE_SIZE);
829 
830 	val = readl(pcie->parf + PARF_PHY_CTRL);
831 	val &= ~PHY_TEST_PWR_DOWN;
832 	writel(val, pcie->parf + PARF_PHY_CTRL);
833 
834 	writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
835 
836 	writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS
837 		| SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |
838 		AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS,
839 		pcie->parf + PARF_SYS_CTRL);
840 	writel(0, pcie->parf + PARF_Q2A_FLUSH);
841 
842 	writel(PCI_COMMAND_MASTER, pci->dbi_base + PCI_COMMAND);
843 
844 	dw_pcie_dbi_ro_wr_en(pci);
845 
846 	writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP);
847 
848 	val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP);
849 	val &= ~PCI_EXP_LNKCAP_ASPMS;
850 	writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP);
851 
852 	writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset +
853 		PCI_EXP_DEVCTL2);
854 
855 	dw_pcie_dbi_ro_wr_dis(pci);
856 
857 	return 0;
858 }
859 
860 static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)
861 {
862 	struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
863 	struct dw_pcie *pci = pcie->pci;
864 	struct device *dev = pci->dev;
865 	unsigned int num_clks, num_opt_clks;
866 	unsigned int idx;
867 	int ret;
868 
869 	res->rst = devm_reset_control_array_get_exclusive(dev);
870 	if (IS_ERR(res->rst))
871 		return PTR_ERR(res->rst);
872 
873 	res->supplies[0].supply = "vdda";
874 	res->supplies[1].supply = "vddpe-3v3";
875 	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
876 				      res->supplies);
877 	if (ret)
878 		return ret;
879 
880 	idx = 0;
881 	res->clks[idx++].id = "aux";
882 	res->clks[idx++].id = "cfg";
883 	res->clks[idx++].id = "bus_master";
884 	res->clks[idx++].id = "bus_slave";
885 	res->clks[idx++].id = "slave_q2a";
886 
887 	num_clks = idx;
888 
889 	ret = devm_clk_bulk_get(dev, num_clks, res->clks);
890 	if (ret < 0)
891 		return ret;
892 
893 	res->clks[idx++].id = "tbu";
894 	res->clks[idx++].id = "ddrss_sf_tbu";
895 	res->clks[idx++].id = "aggre0";
896 	res->clks[idx++].id = "aggre1";
897 	res->clks[idx++].id = "noc_aggr";
898 	res->clks[idx++].id = "noc_aggr_4";
899 	res->clks[idx++].id = "noc_aggr_south_sf";
900 	res->clks[idx++].id = "cnoc_qx";
901 	res->clks[idx++].id = "sleep";
902 	res->clks[idx++].id = "cnoc_sf_axi";
903 
904 	num_opt_clks = idx - num_clks;
905 	res->num_clks = idx;
906 
907 	ret = devm_clk_bulk_get_optional(dev, num_opt_clks, res->clks + num_clks);
908 	if (ret < 0)
909 		return ret;
910 
911 	return 0;
912 }
913 
914 static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)
915 {
916 	struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
917 	struct dw_pcie *pci = pcie->pci;
918 	struct device *dev = pci->dev;
919 	u32 val;
920 	int ret;
921 
922 	ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
923 	if (ret < 0) {
924 		dev_err(dev, "cannot enable regulators\n");
925 		return ret;
926 	}
927 
928 	ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
929 	if (ret < 0)
930 		goto err_disable_regulators;
931 
932 	ret = reset_control_assert(res->rst);
933 	if (ret) {
934 		dev_err(dev, "reset assert failed (%d)\n", ret);
935 		goto err_disable_clocks;
936 	}
937 
938 	usleep_range(1000, 1500);
939 
940 	ret = reset_control_deassert(res->rst);
941 	if (ret) {
942 		dev_err(dev, "reset deassert failed (%d)\n", ret);
943 		goto err_disable_clocks;
944 	}
945 
946 	/* Wait for reset to complete, required on SM8450 */
947 	usleep_range(1000, 1500);
948 
949 	/* configure PCIe to RC mode */
950 	writel(DEVICE_TYPE_RC, pcie->parf + PARF_DEVICE_TYPE);
951 
952 	/* enable PCIe clocks and resets */
953 	val = readl(pcie->parf + PARF_PHY_CTRL);
954 	val &= ~PHY_TEST_PWR_DOWN;
955 	writel(val, pcie->parf + PARF_PHY_CTRL);
956 
957 	/* change DBI base address */
958 	writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
959 
960 	/* MAC PHY_POWERDOWN MUX DISABLE  */
961 	val = readl(pcie->parf + PARF_SYS_CTRL);
962 	val &= ~MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN;
963 	writel(val, pcie->parf + PARF_SYS_CTRL);
964 
965 	val = readl(pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
966 	val |= BYPASS;
967 	writel(val, pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
968 
969 	/* Enable L1 and L1SS */
970 	val = readl(pcie->parf + PARF_PM_CTRL);
971 	val &= ~REQ_NOT_ENTR_L1;
972 	writel(val, pcie->parf + PARF_PM_CTRL);
973 
974 	val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2);
975 	val |= EN;
976 	writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2);
977 
978 	return 0;
979 err_disable_clocks:
980 	clk_bulk_disable_unprepare(res->num_clks, res->clks);
981 err_disable_regulators:
982 	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
983 
984 	return ret;
985 }
986 
987 static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie)
988 {
989 	qcom_pcie_clear_aspm_l0s(pcie->pci);
990 	qcom_pcie_clear_hpc(pcie->pci);
991 
992 	return 0;
993 }
994 
995 static int qcom_pcie_enable_aspm(struct pci_dev *pdev, void *userdata)
996 {
997 	/*
998 	 * Downstream devices need to be in D0 state before enabling PCI PM
999 	 * substates.
1000 	 */
1001 	pci_set_power_state_locked(pdev, PCI_D0);
1002 	pci_enable_link_state_locked(pdev, PCIE_LINK_STATE_ALL);
1003 
1004 	return 0;
1005 }
1006 
1007 static void qcom_pcie_host_post_init_2_7_0(struct qcom_pcie *pcie)
1008 {
1009 	struct dw_pcie_rp *pp = &pcie->pci->pp;
1010 
1011 	pci_walk_bus(pp->bridge->bus, qcom_pcie_enable_aspm, NULL);
1012 }
1013 
1014 static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie)
1015 {
1016 	struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
1017 
1018 	clk_bulk_disable_unprepare(res->num_clks, res->clks);
1019 
1020 	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
1021 }
1022 
1023 static int qcom_pcie_config_sid_1_9_0(struct qcom_pcie *pcie)
1024 {
1025 	/* iommu map structure */
1026 	struct {
1027 		u32 bdf;
1028 		u32 phandle;
1029 		u32 smmu_sid;
1030 		u32 smmu_sid_len;
1031 	} *map;
1032 	void __iomem *bdf_to_sid_base = pcie->parf + PARF_BDF_TO_SID_TABLE_N;
1033 	struct device *dev = pcie->pci->dev;
1034 	u8 qcom_pcie_crc8_table[CRC8_TABLE_SIZE];
1035 	int i, nr_map, size = 0;
1036 	u32 smmu_sid_base;
1037 	u32 val;
1038 
1039 	of_get_property(dev->of_node, "iommu-map", &size);
1040 	if (!size)
1041 		return 0;
1042 
1043 	/* Enable BDF to SID translation by disabling bypass mode (default) */
1044 	val = readl(pcie->parf + PARF_BDF_TO_SID_CFG);
1045 	val &= ~BDF_TO_SID_BYPASS;
1046 	writel(val, pcie->parf + PARF_BDF_TO_SID_CFG);
1047 
1048 	map = kzalloc(size, GFP_KERNEL);
1049 	if (!map)
1050 		return -ENOMEM;
1051 
1052 	of_property_read_u32_array(dev->of_node, "iommu-map", (u32 *)map,
1053 				   size / sizeof(u32));
1054 
1055 	nr_map = size / (sizeof(*map));
1056 
1057 	crc8_populate_msb(qcom_pcie_crc8_table, QCOM_PCIE_CRC8_POLYNOMIAL);
1058 
1059 	/* Registers need to be zero out first */
1060 	memset_io(bdf_to_sid_base, 0, CRC8_TABLE_SIZE * sizeof(u32));
1061 
1062 	/* Extract the SMMU SID base from the first entry of iommu-map */
1063 	smmu_sid_base = map[0].smmu_sid;
1064 
1065 	/* Look for an available entry to hold the mapping */
1066 	for (i = 0; i < nr_map; i++) {
1067 		__be16 bdf_be = cpu_to_be16(map[i].bdf);
1068 		u32 val;
1069 		u8 hash;
1070 
1071 		hash = crc8(qcom_pcie_crc8_table, (u8 *)&bdf_be, sizeof(bdf_be), 0);
1072 
1073 		val = readl(bdf_to_sid_base + hash * sizeof(u32));
1074 
1075 		/* If the register is already populated, look for next available entry */
1076 		while (val) {
1077 			u8 current_hash = hash++;
1078 			u8 next_mask = 0xff;
1079 
1080 			/* If NEXT field is NULL then update it with next hash */
1081 			if (!(val & next_mask)) {
1082 				val |= (u32)hash;
1083 				writel(val, bdf_to_sid_base + current_hash * sizeof(u32));
1084 			}
1085 
1086 			val = readl(bdf_to_sid_base + hash * sizeof(u32));
1087 		}
1088 
1089 		/* BDF [31:16] | SID [15:8] | NEXT [7:0] */
1090 		val = map[i].bdf << 16 | (map[i].smmu_sid - smmu_sid_base) << 8 | 0;
1091 		writel(val, bdf_to_sid_base + hash * sizeof(u32));
1092 	}
1093 
1094 	kfree(map);
1095 
1096 	return 0;
1097 }
1098 
1099 static int qcom_pcie_get_resources_2_9_0(struct qcom_pcie *pcie)
1100 {
1101 	struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
1102 	struct dw_pcie *pci = pcie->pci;
1103 	struct device *dev = pci->dev;
1104 	int ret;
1105 
1106 	res->clks[0].id = "iface";
1107 	res->clks[1].id = "axi_m";
1108 	res->clks[2].id = "axi_s";
1109 	res->clks[3].id = "axi_bridge";
1110 	res->clks[4].id = "rchng";
1111 
1112 	ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
1113 	if (ret < 0)
1114 		return ret;
1115 
1116 	res->rst = devm_reset_control_array_get_exclusive(dev);
1117 	if (IS_ERR(res->rst))
1118 		return PTR_ERR(res->rst);
1119 
1120 	return 0;
1121 }
1122 
1123 static void qcom_pcie_deinit_2_9_0(struct qcom_pcie *pcie)
1124 {
1125 	struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
1126 
1127 	clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
1128 }
1129 
1130 static int qcom_pcie_init_2_9_0(struct qcom_pcie *pcie)
1131 {
1132 	struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
1133 	struct device *dev = pcie->pci->dev;
1134 	int ret;
1135 
1136 	ret = reset_control_assert(res->rst);
1137 	if (ret) {
1138 		dev_err(dev, "reset assert failed (%d)\n", ret);
1139 		return ret;
1140 	}
1141 
1142 	/*
1143 	 * Delay periods before and after reset deassert are working values
1144 	 * from downstream Codeaurora kernel
1145 	 */
1146 	usleep_range(2000, 2500);
1147 
1148 	ret = reset_control_deassert(res->rst);
1149 	if (ret) {
1150 		dev_err(dev, "reset deassert failed (%d)\n", ret);
1151 		return ret;
1152 	}
1153 
1154 	usleep_range(2000, 2500);
1155 
1156 	return clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
1157 }
1158 
1159 static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie)
1160 {
1161 	struct dw_pcie *pci = pcie->pci;
1162 	u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
1163 	u32 val;
1164 	int i;
1165 
1166 	writel(SLV_ADDR_SPACE_SZ,
1167 		pcie->parf + PARF_SLV_ADDR_SPACE_SIZE);
1168 
1169 	val = readl(pcie->parf + PARF_PHY_CTRL);
1170 	val &= ~PHY_TEST_PWR_DOWN;
1171 	writel(val, pcie->parf + PARF_PHY_CTRL);
1172 
1173 	writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
1174 
1175 	writel(DEVICE_TYPE_RC, pcie->parf + PARF_DEVICE_TYPE);
1176 	writel(BYPASS | MSTR_AXI_CLK_EN | AHB_CLK_EN,
1177 		pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
1178 	writel(GEN3_RELATED_OFF_RXEQ_RGRDLESS_RXTS |
1179 		GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL,
1180 		pci->dbi_base + GEN3_RELATED_OFF);
1181 
1182 	writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS |
1183 		SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |
1184 		AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS,
1185 		pcie->parf + PARF_SYS_CTRL);
1186 
1187 	writel(0, pcie->parf + PARF_Q2A_FLUSH);
1188 
1189 	dw_pcie_dbi_ro_wr_en(pci);
1190 
1191 	writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP);
1192 
1193 	val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP);
1194 	val &= ~PCI_EXP_LNKCAP_ASPMS;
1195 	writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP);
1196 
1197 	writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset +
1198 			PCI_EXP_DEVCTL2);
1199 
1200 	dw_pcie_dbi_ro_wr_dis(pci);
1201 
1202 	for (i = 0; i < 256; i++)
1203 		writel(0, pcie->parf + PARF_BDF_TO_SID_TABLE_N + (4 * i));
1204 
1205 	return 0;
1206 }
1207 
1208 static int qcom_pcie_link_up(struct dw_pcie *pci)
1209 {
1210 	u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
1211 	u16 val = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA);
1212 
1213 	return !!(val & PCI_EXP_LNKSTA_DLLLA);
1214 }
1215 
1216 static int qcom_pcie_host_init(struct dw_pcie_rp *pp)
1217 {
1218 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
1219 	struct qcom_pcie *pcie = to_qcom_pcie(pci);
1220 	int ret;
1221 
1222 	qcom_ep_reset_assert(pcie);
1223 
1224 	ret = pcie->cfg->ops->init(pcie);
1225 	if (ret)
1226 		return ret;
1227 
1228 	ret = phy_set_mode_ext(pcie->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_RC);
1229 	if (ret)
1230 		goto err_deinit;
1231 
1232 	ret = phy_power_on(pcie->phy);
1233 	if (ret)
1234 		goto err_deinit;
1235 
1236 	if (pcie->cfg->ops->post_init) {
1237 		ret = pcie->cfg->ops->post_init(pcie);
1238 		if (ret)
1239 			goto err_disable_phy;
1240 	}
1241 
1242 	qcom_ep_reset_deassert(pcie);
1243 
1244 	if (pcie->cfg->ops->config_sid) {
1245 		ret = pcie->cfg->ops->config_sid(pcie);
1246 		if (ret)
1247 			goto err_assert_reset;
1248 	}
1249 
1250 	return 0;
1251 
1252 err_assert_reset:
1253 	qcom_ep_reset_assert(pcie);
1254 err_disable_phy:
1255 	phy_power_off(pcie->phy);
1256 err_deinit:
1257 	pcie->cfg->ops->deinit(pcie);
1258 
1259 	return ret;
1260 }
1261 
1262 static void qcom_pcie_host_deinit(struct dw_pcie_rp *pp)
1263 {
1264 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
1265 	struct qcom_pcie *pcie = to_qcom_pcie(pci);
1266 
1267 	qcom_ep_reset_assert(pcie);
1268 	phy_power_off(pcie->phy);
1269 	pcie->cfg->ops->deinit(pcie);
1270 }
1271 
1272 static void qcom_pcie_host_post_init(struct dw_pcie_rp *pp)
1273 {
1274 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
1275 	struct qcom_pcie *pcie = to_qcom_pcie(pci);
1276 
1277 	if (pcie->cfg->ops->host_post_init)
1278 		pcie->cfg->ops->host_post_init(pcie);
1279 }
1280 
1281 static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {
1282 	.init		= qcom_pcie_host_init,
1283 	.deinit		= qcom_pcie_host_deinit,
1284 	.post_init	= qcom_pcie_host_post_init,
1285 };
1286 
1287 /* Qcom IP rev.: 2.1.0	Synopsys IP rev.: 4.01a */
1288 static const struct qcom_pcie_ops ops_2_1_0 = {
1289 	.get_resources = qcom_pcie_get_resources_2_1_0,
1290 	.init = qcom_pcie_init_2_1_0,
1291 	.post_init = qcom_pcie_post_init_2_1_0,
1292 	.deinit = qcom_pcie_deinit_2_1_0,
1293 	.ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
1294 };
1295 
1296 /* Qcom IP rev.: 1.0.0	Synopsys IP rev.: 4.11a */
1297 static const struct qcom_pcie_ops ops_1_0_0 = {
1298 	.get_resources = qcom_pcie_get_resources_1_0_0,
1299 	.init = qcom_pcie_init_1_0_0,
1300 	.post_init = qcom_pcie_post_init_1_0_0,
1301 	.deinit = qcom_pcie_deinit_1_0_0,
1302 	.ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
1303 };
1304 
1305 /* Qcom IP rev.: 2.3.2	Synopsys IP rev.: 4.21a */
1306 static const struct qcom_pcie_ops ops_2_3_2 = {
1307 	.get_resources = qcom_pcie_get_resources_2_3_2,
1308 	.init = qcom_pcie_init_2_3_2,
1309 	.post_init = qcom_pcie_post_init_2_3_2,
1310 	.deinit = qcom_pcie_deinit_2_3_2,
1311 	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1312 };
1313 
1314 /* Qcom IP rev.: 2.4.0	Synopsys IP rev.: 4.20a */
1315 static const struct qcom_pcie_ops ops_2_4_0 = {
1316 	.get_resources = qcom_pcie_get_resources_2_4_0,
1317 	.init = qcom_pcie_init_2_4_0,
1318 	.post_init = qcom_pcie_post_init_2_3_2,
1319 	.deinit = qcom_pcie_deinit_2_4_0,
1320 	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1321 };
1322 
1323 /* Qcom IP rev.: 2.3.3	Synopsys IP rev.: 4.30a */
1324 static const struct qcom_pcie_ops ops_2_3_3 = {
1325 	.get_resources = qcom_pcie_get_resources_2_3_3,
1326 	.init = qcom_pcie_init_2_3_3,
1327 	.post_init = qcom_pcie_post_init_2_3_3,
1328 	.deinit = qcom_pcie_deinit_2_3_3,
1329 	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1330 };
1331 
1332 /* Qcom IP rev.: 2.7.0	Synopsys IP rev.: 4.30a */
1333 static const struct qcom_pcie_ops ops_2_7_0 = {
1334 	.get_resources = qcom_pcie_get_resources_2_7_0,
1335 	.init = qcom_pcie_init_2_7_0,
1336 	.post_init = qcom_pcie_post_init_2_7_0,
1337 	.deinit = qcom_pcie_deinit_2_7_0,
1338 	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1339 };
1340 
1341 /* Qcom IP rev.: 1.9.0 */
1342 static const struct qcom_pcie_ops ops_1_9_0 = {
1343 	.get_resources = qcom_pcie_get_resources_2_7_0,
1344 	.init = qcom_pcie_init_2_7_0,
1345 	.post_init = qcom_pcie_post_init_2_7_0,
1346 	.host_post_init = qcom_pcie_host_post_init_2_7_0,
1347 	.deinit = qcom_pcie_deinit_2_7_0,
1348 	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1349 	.config_sid = qcom_pcie_config_sid_1_9_0,
1350 };
1351 
1352 /* Qcom IP rev.: 2.9.0  Synopsys IP rev.: 5.00a */
1353 static const struct qcom_pcie_ops ops_2_9_0 = {
1354 	.get_resources = qcom_pcie_get_resources_2_9_0,
1355 	.init = qcom_pcie_init_2_9_0,
1356 	.post_init = qcom_pcie_post_init_2_9_0,
1357 	.deinit = qcom_pcie_deinit_2_9_0,
1358 	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1359 };
1360 
1361 static const struct qcom_pcie_cfg cfg_1_0_0 = {
1362 	.ops = &ops_1_0_0,
1363 };
1364 
1365 static const struct qcom_pcie_cfg cfg_1_9_0 = {
1366 	.ops = &ops_1_9_0,
1367 };
1368 
1369 static const struct qcom_pcie_cfg cfg_2_1_0 = {
1370 	.ops = &ops_2_1_0,
1371 };
1372 
1373 static const struct qcom_pcie_cfg cfg_2_3_2 = {
1374 	.ops = &ops_2_3_2,
1375 };
1376 
1377 static const struct qcom_pcie_cfg cfg_2_3_3 = {
1378 	.ops = &ops_2_3_3,
1379 };
1380 
1381 static const struct qcom_pcie_cfg cfg_2_4_0 = {
1382 	.ops = &ops_2_4_0,
1383 };
1384 
1385 static const struct qcom_pcie_cfg cfg_2_7_0 = {
1386 	.ops = &ops_2_7_0,
1387 };
1388 
1389 static const struct qcom_pcie_cfg cfg_2_9_0 = {
1390 	.ops = &ops_2_9_0,
1391 };
1392 
1393 static const struct qcom_pcie_cfg cfg_sc8280xp = {
1394 	.ops = &ops_1_9_0,
1395 	.no_l0s = true,
1396 };
1397 
1398 static const struct dw_pcie_ops dw_pcie_ops = {
1399 	.link_up = qcom_pcie_link_up,
1400 	.start_link = qcom_pcie_start_link,
1401 };
1402 
1403 static int qcom_pcie_icc_init(struct qcom_pcie *pcie)
1404 {
1405 	struct dw_pcie *pci = pcie->pci;
1406 	int ret;
1407 
1408 	pcie->icc_mem = devm_of_icc_get(pci->dev, "pcie-mem");
1409 	if (IS_ERR(pcie->icc_mem))
1410 		return PTR_ERR(pcie->icc_mem);
1411 
1412 	/*
1413 	 * Some Qualcomm platforms require interconnect bandwidth constraints
1414 	 * to be set before enabling interconnect clocks.
1415 	 *
1416 	 * Set an initial peak bandwidth corresponding to single-lane Gen 1
1417 	 * for the pcie-mem path.
1418 	 */
1419 	ret = icc_set_bw(pcie->icc_mem, 0, QCOM_PCIE_LINK_SPEED_TO_BW(1));
1420 	if (ret) {
1421 		dev_err(pci->dev, "failed to set interconnect bandwidth: %d\n",
1422 			ret);
1423 		return ret;
1424 	}
1425 
1426 	return 0;
1427 }
1428 
1429 static void qcom_pcie_icc_update(struct qcom_pcie *pcie)
1430 {
1431 	struct dw_pcie *pci = pcie->pci;
1432 	u32 offset, status;
1433 	int speed, width;
1434 	int ret;
1435 
1436 	if (!pcie->icc_mem)
1437 		return;
1438 
1439 	offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
1440 	status = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA);
1441 
1442 	/* Only update constraints if link is up. */
1443 	if (!(status & PCI_EXP_LNKSTA_DLLLA))
1444 		return;
1445 
1446 	speed = FIELD_GET(PCI_EXP_LNKSTA_CLS, status);
1447 	width = FIELD_GET(PCI_EXP_LNKSTA_NLW, status);
1448 
1449 	ret = icc_set_bw(pcie->icc_mem, 0, width * QCOM_PCIE_LINK_SPEED_TO_BW(speed));
1450 	if (ret) {
1451 		dev_err(pci->dev, "failed to set interconnect bandwidth: %d\n",
1452 			ret);
1453 	}
1454 }
1455 
1456 static int qcom_pcie_link_transition_count(struct seq_file *s, void *data)
1457 {
1458 	struct qcom_pcie *pcie = (struct qcom_pcie *)dev_get_drvdata(s->private);
1459 
1460 	seq_printf(s, "L0s transition count: %u\n",
1461 		   readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L0S));
1462 
1463 	seq_printf(s, "L1 transition count: %u\n",
1464 		   readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L1));
1465 
1466 	seq_printf(s, "L1.1 transition count: %u\n",
1467 		   readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1));
1468 
1469 	seq_printf(s, "L1.2 transition count: %u\n",
1470 		   readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2));
1471 
1472 	seq_printf(s, "L2 transition count: %u\n",
1473 		   readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L2));
1474 
1475 	return 0;
1476 }
1477 
1478 static void qcom_pcie_init_debugfs(struct qcom_pcie *pcie)
1479 {
1480 	struct dw_pcie *pci = pcie->pci;
1481 	struct device *dev = pci->dev;
1482 	char *name;
1483 
1484 	name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
1485 	if (!name)
1486 		return;
1487 
1488 	pcie->debugfs = debugfs_create_dir(name, NULL);
1489 	debugfs_create_devm_seqfile(dev, "link_transition_count", pcie->debugfs,
1490 				    qcom_pcie_link_transition_count);
1491 }
1492 
1493 static int qcom_pcie_probe(struct platform_device *pdev)
1494 {
1495 	const struct qcom_pcie_cfg *pcie_cfg;
1496 	struct device *dev = &pdev->dev;
1497 	struct qcom_pcie *pcie;
1498 	struct dw_pcie_rp *pp;
1499 	struct resource *res;
1500 	struct dw_pcie *pci;
1501 	int ret;
1502 
1503 	pcie_cfg = of_device_get_match_data(dev);
1504 	if (!pcie_cfg || !pcie_cfg->ops) {
1505 		dev_err(dev, "Invalid platform data\n");
1506 		return -EINVAL;
1507 	}
1508 
1509 	pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
1510 	if (!pcie)
1511 		return -ENOMEM;
1512 
1513 	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
1514 	if (!pci)
1515 		return -ENOMEM;
1516 
1517 	pm_runtime_enable(dev);
1518 	ret = pm_runtime_get_sync(dev);
1519 	if (ret < 0)
1520 		goto err_pm_runtime_put;
1521 
1522 	pci->dev = dev;
1523 	pci->ops = &dw_pcie_ops;
1524 	pp = &pci->pp;
1525 
1526 	pcie->pci = pci;
1527 
1528 	pcie->cfg = pcie_cfg;
1529 
1530 	pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH);
1531 	if (IS_ERR(pcie->reset)) {
1532 		ret = PTR_ERR(pcie->reset);
1533 		goto err_pm_runtime_put;
1534 	}
1535 
1536 	pcie->parf = devm_platform_ioremap_resource_byname(pdev, "parf");
1537 	if (IS_ERR(pcie->parf)) {
1538 		ret = PTR_ERR(pcie->parf);
1539 		goto err_pm_runtime_put;
1540 	}
1541 
1542 	pcie->elbi = devm_platform_ioremap_resource_byname(pdev, "elbi");
1543 	if (IS_ERR(pcie->elbi)) {
1544 		ret = PTR_ERR(pcie->elbi);
1545 		goto err_pm_runtime_put;
1546 	}
1547 
1548 	/* MHI region is optional */
1549 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mhi");
1550 	if (res) {
1551 		pcie->mhi = devm_ioremap_resource(dev, res);
1552 		if (IS_ERR(pcie->mhi)) {
1553 			ret = PTR_ERR(pcie->mhi);
1554 			goto err_pm_runtime_put;
1555 		}
1556 	}
1557 
1558 	pcie->phy = devm_phy_optional_get(dev, "pciephy");
1559 	if (IS_ERR(pcie->phy)) {
1560 		ret = PTR_ERR(pcie->phy);
1561 		goto err_pm_runtime_put;
1562 	}
1563 
1564 	ret = qcom_pcie_icc_init(pcie);
1565 	if (ret)
1566 		goto err_pm_runtime_put;
1567 
1568 	ret = pcie->cfg->ops->get_resources(pcie);
1569 	if (ret)
1570 		goto err_pm_runtime_put;
1571 
1572 	pp->ops = &qcom_pcie_dw_ops;
1573 
1574 	ret = phy_init(pcie->phy);
1575 	if (ret)
1576 		goto err_pm_runtime_put;
1577 
1578 	platform_set_drvdata(pdev, pcie);
1579 
1580 	ret = dw_pcie_host_init(pp);
1581 	if (ret) {
1582 		dev_err(dev, "cannot initialize host\n");
1583 		goto err_phy_exit;
1584 	}
1585 
1586 	qcom_pcie_icc_update(pcie);
1587 
1588 	if (pcie->mhi)
1589 		qcom_pcie_init_debugfs(pcie);
1590 
1591 	return 0;
1592 
1593 err_phy_exit:
1594 	phy_exit(pcie->phy);
1595 err_pm_runtime_put:
1596 	pm_runtime_put(dev);
1597 	pm_runtime_disable(dev);
1598 
1599 	return ret;
1600 }
1601 
1602 static int qcom_pcie_suspend_noirq(struct device *dev)
1603 {
1604 	struct qcom_pcie *pcie = dev_get_drvdata(dev);
1605 	int ret;
1606 
1607 	/*
1608 	 * Set minimum bandwidth required to keep data path functional during
1609 	 * suspend.
1610 	 */
1611 	ret = icc_set_bw(pcie->icc_mem, 0, kBps_to_icc(1));
1612 	if (ret) {
1613 		dev_err(dev, "Failed to set interconnect bandwidth: %d\n", ret);
1614 		return ret;
1615 	}
1616 
1617 	/*
1618 	 * Turn OFF the resources only for controllers without active PCIe
1619 	 * devices. For controllers with active devices, the resources are kept
1620 	 * ON and the link is expected to be in L0/L1 (sub)states.
1621 	 *
1622 	 * Turning OFF the resources for controllers with active PCIe devices
1623 	 * will trigger access violation during the end of the suspend cycle,
1624 	 * as kernel tries to access the PCIe devices config space for masking
1625 	 * MSIs.
1626 	 *
1627 	 * Also, it is not desirable to put the link into L2/L3 state as that
1628 	 * implies VDD supply will be removed and the devices may go into
1629 	 * powerdown state. This will affect the lifetime of the storage devices
1630 	 * like NVMe.
1631 	 */
1632 	if (!dw_pcie_link_up(pcie->pci)) {
1633 		qcom_pcie_host_deinit(&pcie->pci->pp);
1634 		pcie->suspended = true;
1635 	}
1636 
1637 	return 0;
1638 }
1639 
1640 static int qcom_pcie_resume_noirq(struct device *dev)
1641 {
1642 	struct qcom_pcie *pcie = dev_get_drvdata(dev);
1643 	int ret;
1644 
1645 	if (pcie->suspended) {
1646 		ret = qcom_pcie_host_init(&pcie->pci->pp);
1647 		if (ret)
1648 			return ret;
1649 
1650 		pcie->suspended = false;
1651 	}
1652 
1653 	qcom_pcie_icc_update(pcie);
1654 
1655 	return 0;
1656 }
1657 
1658 static const struct of_device_id qcom_pcie_match[] = {
1659 	{ .compatible = "qcom,pcie-apq8064", .data = &cfg_2_1_0 },
1660 	{ .compatible = "qcom,pcie-apq8084", .data = &cfg_1_0_0 },
1661 	{ .compatible = "qcom,pcie-ipq4019", .data = &cfg_2_4_0 },
1662 	{ .compatible = "qcom,pcie-ipq6018", .data = &cfg_2_9_0 },
1663 	{ .compatible = "qcom,pcie-ipq8064", .data = &cfg_2_1_0 },
1664 	{ .compatible = "qcom,pcie-ipq8064-v2", .data = &cfg_2_1_0 },
1665 	{ .compatible = "qcom,pcie-ipq8074", .data = &cfg_2_3_3 },
1666 	{ .compatible = "qcom,pcie-ipq8074-gen3", .data = &cfg_2_9_0 },
1667 	{ .compatible = "qcom,pcie-msm8996", .data = &cfg_2_3_2 },
1668 	{ .compatible = "qcom,pcie-qcs404", .data = &cfg_2_4_0 },
1669 	{ .compatible = "qcom,pcie-sa8540p", .data = &cfg_sc8280xp },
1670 	{ .compatible = "qcom,pcie-sa8775p", .data = &cfg_1_9_0},
1671 	{ .compatible = "qcom,pcie-sc7280", .data = &cfg_1_9_0 },
1672 	{ .compatible = "qcom,pcie-sc8180x", .data = &cfg_1_9_0 },
1673 	{ .compatible = "qcom,pcie-sc8280xp", .data = &cfg_sc8280xp },
1674 	{ .compatible = "qcom,pcie-sdm845", .data = &cfg_2_7_0 },
1675 	{ .compatible = "qcom,pcie-sdx55", .data = &cfg_1_9_0 },
1676 	{ .compatible = "qcom,pcie-sm8150", .data = &cfg_1_9_0 },
1677 	{ .compatible = "qcom,pcie-sm8250", .data = &cfg_1_9_0 },
1678 	{ .compatible = "qcom,pcie-sm8350", .data = &cfg_1_9_0 },
1679 	{ .compatible = "qcom,pcie-sm8450-pcie0", .data = &cfg_1_9_0 },
1680 	{ .compatible = "qcom,pcie-sm8450-pcie1", .data = &cfg_1_9_0 },
1681 	{ .compatible = "qcom,pcie-sm8550", .data = &cfg_1_9_0 },
1682 	{ .compatible = "qcom,pcie-x1e80100", .data = &cfg_1_9_0 },
1683 	{ }
1684 };
1685 
1686 static void qcom_fixup_class(struct pci_dev *dev)
1687 {
1688 	dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL;
1689 }
1690 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0101, qcom_fixup_class);
1691 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0104, qcom_fixup_class);
1692 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0106, qcom_fixup_class);
1693 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0107, qcom_fixup_class);
1694 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0302, qcom_fixup_class);
1695 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1000, qcom_fixup_class);
1696 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1001, qcom_fixup_class);
1697 
1698 static const struct dev_pm_ops qcom_pcie_pm_ops = {
1699 	NOIRQ_SYSTEM_SLEEP_PM_OPS(qcom_pcie_suspend_noirq, qcom_pcie_resume_noirq)
1700 };
1701 
1702 static struct platform_driver qcom_pcie_driver = {
1703 	.probe = qcom_pcie_probe,
1704 	.driver = {
1705 		.name = "qcom-pcie",
1706 		.suppress_bind_attrs = true,
1707 		.of_match_table = qcom_pcie_match,
1708 		.pm = &qcom_pcie_pm_ops,
1709 		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
1710 	},
1711 };
1712 builtin_platform_driver(qcom_pcie_driver);
1713