xref: /linux/drivers/pci/controller/dwc/pcie-qcom.c (revision 382bd6a792836875da555fe9a2b51222b813fed1)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Qualcomm PCIe root complex driver
4  *
5  * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
6  * Copyright 2015 Linaro Limited.
7  *
8  * Author: Stanimir Varbanov <svarbanov@mm-sol.com>
9  */
10 
11 #include <linux/clk.h>
12 #include <linux/crc8.h>
13 #include <linux/debugfs.h>
14 #include <linux/delay.h>
15 #include <linux/gpio/consumer.h>
16 #include <linux/interconnect.h>
17 #include <linux/interrupt.h>
18 #include <linux/io.h>
19 #include <linux/iopoll.h>
20 #include <linux/kernel.h>
21 #include <linux/limits.h>
22 #include <linux/init.h>
23 #include <linux/of.h>
24 #include <linux/of_pci.h>
25 #include <linux/pci.h>
26 #include <linux/pci-ecam.h>
27 #include <linux/pm_opp.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/platform_device.h>
30 #include <linux/phy/pcie.h>
31 #include <linux/phy/phy.h>
32 #include <linux/regulator/consumer.h>
33 #include <linux/reset.h>
34 #include <linux/slab.h>
35 #include <linux/types.h>
36 #include <linux/units.h>
37 
38 #include "../../pci.h"
39 #include "../pci-host-common.h"
40 #include "pcie-designware.h"
41 #include "pcie-qcom-common.h"
42 
43 /* PARF registers */
44 #define PARF_SYS_CTRL				0x00
45 #define PARF_PM_CTRL				0x20
46 #define PARF_PCS_DEEMPH				0x34
47 #define PARF_PCS_SWING				0x38
48 #define PARF_PHY_CTRL				0x40
49 #define PARF_PHY_REFCLK				0x4c
50 #define PARF_CONFIG_BITS			0x50
51 #define PARF_DBI_BASE_ADDR			0x168
52 #define PARF_SLV_ADDR_SPACE_SIZE		0x16c
53 #define PARF_MHI_CLOCK_RESET_CTRL		0x174
54 #define PARF_AXI_MSTR_WR_ADDR_HALT		0x178
55 #define PARF_AXI_MSTR_WR_ADDR_HALT_V2		0x1a8
56 #define PARF_Q2A_FLUSH				0x1ac
57 #define PARF_LTSSM				0x1b0
58 #define PARF_INT_ALL_STATUS			0x224
59 #define PARF_INT_ALL_CLEAR			0x228
60 #define PARF_INT_ALL_MASK			0x22c
61 #define PARF_SID_OFFSET				0x234
62 #define PARF_BDF_TRANSLATE_CFG			0x24c
63 #define PARF_DBI_BASE_ADDR_V2			0x350
64 #define PARF_DBI_BASE_ADDR_V2_HI		0x354
65 #define PARF_SLV_ADDR_SPACE_SIZE_V2		0x358
66 #define PARF_SLV_ADDR_SPACE_SIZE_V2_HI		0x35c
67 #define PARF_NO_SNOOP_OVERRIDE			0x3d4
68 #define PARF_ATU_BASE_ADDR			0x634
69 #define PARF_ATU_BASE_ADDR_HI			0x638
70 #define PARF_DEVICE_TYPE			0x1000
71 #define PARF_BDF_TO_SID_TABLE_N			0x2000
72 #define PARF_BDF_TO_SID_CFG			0x2c00
73 
74 /* ELBI registers */
75 #define ELBI_SYS_CTRL				0x04
76 
77 /* DBI registers */
78 #define AXI_MSTR_RESP_COMP_CTRL0		0x818
79 #define AXI_MSTR_RESP_COMP_CTRL1		0x81c
80 
81 /* MHI registers */
82 #define PARF_DEBUG_CNT_PM_LINKST_IN_L2		0xc04
83 #define PARF_DEBUG_CNT_PM_LINKST_IN_L1		0xc0c
84 #define PARF_DEBUG_CNT_PM_LINKST_IN_L0S		0xc10
85 #define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1	0xc84
86 #define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2	0xc88
87 
88 /* PARF_SYS_CTRL register fields */
89 #define MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN	BIT(29)
90 #define MST_WAKEUP_EN				BIT(13)
91 #define SLV_WAKEUP_EN				BIT(12)
92 #define MSTR_ACLK_CGC_DIS			BIT(10)
93 #define SLV_ACLK_CGC_DIS			BIT(9)
94 #define CORE_CLK_CGC_DIS			BIT(6)
95 #define AUX_PWR_DET				BIT(4)
96 #define L23_CLK_RMV_DIS				BIT(2)
97 #define L1_CLK_RMV_DIS				BIT(1)
98 
99 /* PARF_PM_CTRL register fields */
100 #define REQ_NOT_ENTR_L1				BIT(5)
101 
102 /* PARF_PCS_DEEMPH register fields */
103 #define PCS_DEEMPH_TX_DEEMPH_GEN1(x)		FIELD_PREP(GENMASK(21, 16), x)
104 #define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x)	FIELD_PREP(GENMASK(13, 8), x)
105 #define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x)	FIELD_PREP(GENMASK(5, 0), x)
106 
107 /* PARF_PCS_SWING register fields */
108 #define PCS_SWING_TX_SWING_FULL(x)		FIELD_PREP(GENMASK(14, 8), x)
109 #define PCS_SWING_TX_SWING_LOW(x)		FIELD_PREP(GENMASK(6, 0), x)
110 
111 /* PARF_PHY_CTRL register fields */
112 #define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK	GENMASK(20, 16)
113 #define PHY_CTRL_PHY_TX0_TERM_OFFSET(x)		FIELD_PREP(PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK, x)
114 #define PHY_TEST_PWR_DOWN			BIT(0)
115 
116 /* PARF_PHY_REFCLK register fields */
117 #define PHY_REFCLK_SSP_EN			BIT(16)
118 #define PHY_REFCLK_USE_PAD			BIT(12)
119 
120 /* PARF_CONFIG_BITS register fields */
121 #define PHY_RX0_EQ(x)				FIELD_PREP(GENMASK(26, 24), x)
122 
123 /* PARF_SLV_ADDR_SPACE_SIZE register value */
124 #define SLV_ADDR_SPACE_SZ			0x80000000
125 
126 /* PARF_MHI_CLOCK_RESET_CTRL register fields */
127 #define AHB_CLK_EN				BIT(0)
128 #define MSTR_AXI_CLK_EN				BIT(1)
129 #define BYPASS					BIT(4)
130 
131 /* PARF_AXI_MSTR_WR_ADDR_HALT register fields */
132 #define EN					BIT(31)
133 
134 /* PARF_LTSSM register fields */
135 #define LTSSM_EN				BIT(8)
136 
137 /* PARF_INT_ALL_{STATUS/CLEAR/MASK} register fields */
138 #define PARF_INT_ALL_LINK_UP			BIT(13)
139 #define PARF_INT_MSI_DEV_0_7			GENMASK(30, 23)
140 
141 /* PARF_NO_SNOOP_OVERRIDE register fields */
142 #define WR_NO_SNOOP_OVERRIDE_EN			BIT(1)
143 #define RD_NO_SNOOP_OVERRIDE_EN			BIT(3)
144 
145 /* PARF_DEVICE_TYPE register fields */
146 #define DEVICE_TYPE_RC				0x4
147 
148 /* PARF_BDF_TO_SID_CFG fields */
149 #define BDF_TO_SID_BYPASS			BIT(0)
150 
151 /* ELBI_SYS_CTRL register fields */
152 #define ELBI_SYS_CTRL_LT_ENABLE			BIT(0)
153 
154 /* AXI_MSTR_RESP_COMP_CTRL0 register fields */
155 #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K	0x4
156 #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K	0x5
157 
158 /* AXI_MSTR_RESP_COMP_CTRL1 register fields */
159 #define CFG_BRIDGE_SB_INIT			BIT(0)
160 
161 /* PCI_EXP_SLTCAP register fields */
162 #define PCIE_CAP_SLOT_POWER_LIMIT_VAL		FIELD_PREP(PCI_EXP_SLTCAP_SPLV, 250)
163 #define PCIE_CAP_SLOT_POWER_LIMIT_SCALE		FIELD_PREP(PCI_EXP_SLTCAP_SPLS, 1)
164 #define PCIE_CAP_SLOT_VAL			(PCI_EXP_SLTCAP_ABP | \
165 						PCI_EXP_SLTCAP_PCP | \
166 						PCI_EXP_SLTCAP_MRLSP | \
167 						PCI_EXP_SLTCAP_AIP | \
168 						PCI_EXP_SLTCAP_PIP | \
169 						PCI_EXP_SLTCAP_HPS | \
170 						PCI_EXP_SLTCAP_EIP | \
171 						PCIE_CAP_SLOT_POWER_LIMIT_VAL | \
172 						PCIE_CAP_SLOT_POWER_LIMIT_SCALE)
173 
174 #define PERST_DELAY_US				1000
175 
176 #define QCOM_PCIE_CRC8_POLYNOMIAL		(BIT(2) | BIT(1) | BIT(0))
177 
178 #define QCOM_PCIE_LINK_SPEED_TO_BW(speed) \
179 		Mbps_to_icc(PCIE_SPEED2MBS_ENC(pcie_link_speed[speed]))
180 
181 struct qcom_pcie_resources_1_0_0 {
182 	struct clk_bulk_data *clks;
183 	int num_clks;
184 	struct reset_control *core;
185 	struct regulator *vdda;
186 };
187 
188 #define QCOM_PCIE_2_1_0_MAX_RESETS		6
189 #define QCOM_PCIE_2_1_0_MAX_SUPPLY		3
190 struct qcom_pcie_resources_2_1_0 {
191 	struct clk_bulk_data *clks;
192 	int num_clks;
193 	struct reset_control_bulk_data resets[QCOM_PCIE_2_1_0_MAX_RESETS];
194 	int num_resets;
195 	struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY];
196 };
197 
198 #define QCOM_PCIE_2_3_2_MAX_SUPPLY		2
199 struct qcom_pcie_resources_2_3_2 {
200 	struct clk_bulk_data *clks;
201 	int num_clks;
202 	struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY];
203 };
204 
205 #define QCOM_PCIE_2_3_3_MAX_RESETS		7
206 struct qcom_pcie_resources_2_3_3 {
207 	struct clk_bulk_data *clks;
208 	int num_clks;
209 	struct reset_control_bulk_data rst[QCOM_PCIE_2_3_3_MAX_RESETS];
210 };
211 
212 #define QCOM_PCIE_2_4_0_MAX_RESETS		12
213 struct qcom_pcie_resources_2_4_0 {
214 	struct clk_bulk_data *clks;
215 	int num_clks;
216 	struct reset_control_bulk_data resets[QCOM_PCIE_2_4_0_MAX_RESETS];
217 	int num_resets;
218 };
219 
220 #define QCOM_PCIE_2_7_0_MAX_SUPPLIES		2
221 struct qcom_pcie_resources_2_7_0 {
222 	struct clk_bulk_data *clks;
223 	int num_clks;
224 	struct regulator_bulk_data supplies[QCOM_PCIE_2_7_0_MAX_SUPPLIES];
225 	struct reset_control *rst;
226 };
227 
228 struct qcom_pcie_resources_2_9_0 {
229 	struct clk_bulk_data *clks;
230 	int num_clks;
231 	struct reset_control *rst;
232 };
233 
234 union qcom_pcie_resources {
235 	struct qcom_pcie_resources_1_0_0 v1_0_0;
236 	struct qcom_pcie_resources_2_1_0 v2_1_0;
237 	struct qcom_pcie_resources_2_3_2 v2_3_2;
238 	struct qcom_pcie_resources_2_3_3 v2_3_3;
239 	struct qcom_pcie_resources_2_4_0 v2_4_0;
240 	struct qcom_pcie_resources_2_7_0 v2_7_0;
241 	struct qcom_pcie_resources_2_9_0 v2_9_0;
242 };
243 
244 struct qcom_pcie;
245 
246 struct qcom_pcie_ops {
247 	int (*get_resources)(struct qcom_pcie *pcie);
248 	int (*init)(struct qcom_pcie *pcie);
249 	int (*post_init)(struct qcom_pcie *pcie);
250 	void (*deinit)(struct qcom_pcie *pcie);
251 	void (*ltssm_enable)(struct qcom_pcie *pcie);
252 	int (*config_sid)(struct qcom_pcie *pcie);
253 };
254 
255  /**
256   * struct qcom_pcie_cfg - Per SoC config struct
257   * @ops: qcom PCIe ops structure
258   * @override_no_snoop: Override NO_SNOOP attribute in TLP to enable cache
259   * snooping
260   * @firmware_managed: Set if the Root Complex is firmware managed
261   */
262 struct qcom_pcie_cfg {
263 	const struct qcom_pcie_ops *ops;
264 	bool override_no_snoop;
265 	bool firmware_managed;
266 	bool no_l0s;
267 };
268 
269 struct qcom_pcie_port {
270 	struct list_head list;
271 	struct gpio_desc *reset;
272 	struct phy *phy;
273 };
274 
275 struct qcom_pcie {
276 	struct dw_pcie *pci;
277 	void __iomem *parf;			/* DT parf */
278 	void __iomem *mhi;
279 	union qcom_pcie_resources res;
280 	struct icc_path *icc_mem;
281 	struct icc_path *icc_cpu;
282 	const struct qcom_pcie_cfg *cfg;
283 	struct dentry *debugfs;
284 	struct list_head ports;
285 	bool suspended;
286 	bool use_pm_opp;
287 };
288 
289 #define to_qcom_pcie(x)		dev_get_drvdata((x)->dev)
290 
291 static void qcom_perst_assert(struct qcom_pcie *pcie, bool assert)
292 {
293 	struct qcom_pcie_port *port;
294 	int val = assert ? 1 : 0;
295 
296 	list_for_each_entry(port, &pcie->ports, list)
297 		gpiod_set_value_cansleep(port->reset, val);
298 
299 	usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
300 }
301 
302 static void qcom_ep_reset_assert(struct qcom_pcie *pcie)
303 {
304 	qcom_perst_assert(pcie, true);
305 }
306 
307 static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
308 {
309 	/* Ensure that PERST has been asserted for at least 100 ms */
310 	msleep(PCIE_T_PVPERL_MS);
311 	qcom_perst_assert(pcie, false);
312 }
313 
314 static int qcom_pcie_start_link(struct dw_pcie *pci)
315 {
316 	struct qcom_pcie *pcie = to_qcom_pcie(pci);
317 
318 	qcom_pcie_common_set_equalization(pci);
319 
320 	if (pcie_link_speed[pci->max_link_speed] == PCIE_SPEED_16_0GT)
321 		qcom_pcie_common_set_16gt_lane_margining(pci);
322 
323 	/* Enable Link Training state machine */
324 	if (pcie->cfg->ops->ltssm_enable)
325 		pcie->cfg->ops->ltssm_enable(pcie);
326 
327 	return 0;
328 }
329 
330 static void qcom_pcie_clear_aspm_l0s(struct dw_pcie *pci)
331 {
332 	struct qcom_pcie *pcie = to_qcom_pcie(pci);
333 	u16 offset;
334 	u32 val;
335 
336 	if (!pcie->cfg->no_l0s)
337 		return;
338 
339 	offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
340 
341 	dw_pcie_dbi_ro_wr_en(pci);
342 
343 	val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP);
344 	val &= ~PCI_EXP_LNKCAP_ASPM_L0S;
345 	writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP);
346 
347 	dw_pcie_dbi_ro_wr_dis(pci);
348 }
349 
350 static void qcom_pcie_clear_hpc(struct dw_pcie *pci)
351 {
352 	u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
353 	u32 val;
354 
355 	dw_pcie_dbi_ro_wr_en(pci);
356 
357 	val = readl(pci->dbi_base + offset + PCI_EXP_SLTCAP);
358 	val &= ~PCI_EXP_SLTCAP_HPC;
359 	writel(val, pci->dbi_base + offset + PCI_EXP_SLTCAP);
360 
361 	dw_pcie_dbi_ro_wr_dis(pci);
362 }
363 
364 static void qcom_pcie_configure_dbi_base(struct qcom_pcie *pcie)
365 {
366 	struct dw_pcie *pci = pcie->pci;
367 
368 	if (pci->dbi_phys_addr) {
369 		/*
370 		 * PARF_DBI_BASE_ADDR register is in CPU domain and require to
371 		 * be programmed with CPU physical address.
372 		 */
373 		writel(lower_32_bits(pci->dbi_phys_addr), pcie->parf +
374 							PARF_DBI_BASE_ADDR);
375 		writel(SLV_ADDR_SPACE_SZ, pcie->parf +
376 						PARF_SLV_ADDR_SPACE_SIZE);
377 	}
378 }
379 
380 static void qcom_pcie_configure_dbi_atu_base(struct qcom_pcie *pcie)
381 {
382 	struct dw_pcie *pci = pcie->pci;
383 
384 	if (pci->dbi_phys_addr) {
385 		/*
386 		 * PARF_DBI_BASE_ADDR_V2 and PARF_ATU_BASE_ADDR registers are
387 		 * in CPU domain and require to be programmed with CPU
388 		 * physical addresses.
389 		 */
390 		writel(lower_32_bits(pci->dbi_phys_addr), pcie->parf +
391 							PARF_DBI_BASE_ADDR_V2);
392 		writel(upper_32_bits(pci->dbi_phys_addr), pcie->parf +
393 						PARF_DBI_BASE_ADDR_V2_HI);
394 
395 		if (pci->atu_phys_addr) {
396 			writel(lower_32_bits(pci->atu_phys_addr), pcie->parf +
397 							PARF_ATU_BASE_ADDR);
398 			writel(upper_32_bits(pci->atu_phys_addr), pcie->parf +
399 							PARF_ATU_BASE_ADDR_HI);
400 		}
401 
402 		writel(0x0, pcie->parf + PARF_SLV_ADDR_SPACE_SIZE_V2);
403 		writel(SLV_ADDR_SPACE_SZ, pcie->parf +
404 					PARF_SLV_ADDR_SPACE_SIZE_V2_HI);
405 	}
406 }
407 
408 static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie)
409 {
410 	struct dw_pcie *pci = pcie->pci;
411 	u32 val;
412 
413 	if (!pci->elbi_base) {
414 		dev_err(pci->dev, "ELBI is not present\n");
415 		return;
416 	}
417 	/* enable link training */
418 	val = readl(pci->elbi_base + ELBI_SYS_CTRL);
419 	val |= ELBI_SYS_CTRL_LT_ENABLE;
420 	writel(val, pci->elbi_base + ELBI_SYS_CTRL);
421 }
422 
423 static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
424 {
425 	struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
426 	struct dw_pcie *pci = pcie->pci;
427 	struct device *dev = pci->dev;
428 	bool is_apq = of_device_is_compatible(dev->of_node, "qcom,pcie-apq8064");
429 	int ret;
430 
431 	res->supplies[0].supply = "vdda";
432 	res->supplies[1].supply = "vdda_phy";
433 	res->supplies[2].supply = "vdda_refclk";
434 	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
435 				      res->supplies);
436 	if (ret)
437 		return ret;
438 
439 	res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
440 	if (res->num_clks < 0) {
441 		dev_err(dev, "Failed to get clocks\n");
442 		return res->num_clks;
443 	}
444 
445 	res->resets[0].id = "pci";
446 	res->resets[1].id = "axi";
447 	res->resets[2].id = "ahb";
448 	res->resets[3].id = "por";
449 	res->resets[4].id = "phy";
450 	res->resets[5].id = "ext";
451 
452 	/* ext is optional on APQ8016 */
453 	res->num_resets = is_apq ? 5 : 6;
454 	ret = devm_reset_control_bulk_get_exclusive(dev, res->num_resets, res->resets);
455 	if (ret < 0)
456 		return ret;
457 
458 	return 0;
459 }
460 
461 static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie)
462 {
463 	struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
464 
465 	clk_bulk_disable_unprepare(res->num_clks, res->clks);
466 	reset_control_bulk_assert(res->num_resets, res->resets);
467 
468 	writel(1, pcie->parf + PARF_PHY_CTRL);
469 
470 	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
471 }
472 
473 static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
474 {
475 	struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
476 	struct dw_pcie *pci = pcie->pci;
477 	struct device *dev = pci->dev;
478 	int ret;
479 
480 	/* reset the PCIe interface as uboot can leave it undefined state */
481 	ret = reset_control_bulk_assert(res->num_resets, res->resets);
482 	if (ret < 0) {
483 		dev_err(dev, "cannot assert resets\n");
484 		return ret;
485 	}
486 
487 	ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
488 	if (ret < 0) {
489 		dev_err(dev, "cannot enable regulators\n");
490 		return ret;
491 	}
492 
493 	ret = reset_control_bulk_deassert(res->num_resets, res->resets);
494 	if (ret < 0) {
495 		dev_err(dev, "cannot deassert resets\n");
496 		regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
497 		return ret;
498 	}
499 
500 	return 0;
501 }
502 
503 static int qcom_pcie_post_init_2_1_0(struct qcom_pcie *pcie)
504 {
505 	struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
506 	struct dw_pcie *pci = pcie->pci;
507 	struct device *dev = pci->dev;
508 	struct device_node *node = dev->of_node;
509 	u32 val;
510 	int ret;
511 
512 	/* enable PCIe clocks and resets */
513 	val = readl(pcie->parf + PARF_PHY_CTRL);
514 	val &= ~PHY_TEST_PWR_DOWN;
515 	writel(val, pcie->parf + PARF_PHY_CTRL);
516 
517 	ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
518 	if (ret)
519 		return ret;
520 
521 	if (of_device_is_compatible(node, "qcom,pcie-ipq8064") ||
522 	    of_device_is_compatible(node, "qcom,pcie-ipq8064-v2")) {
523 		writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) |
524 			       PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(24) |
525 			       PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(34),
526 		       pcie->parf + PARF_PCS_DEEMPH);
527 		writel(PCS_SWING_TX_SWING_FULL(120) |
528 			       PCS_SWING_TX_SWING_LOW(120),
529 		       pcie->parf + PARF_PCS_SWING);
530 		writel(PHY_RX0_EQ(4), pcie->parf + PARF_CONFIG_BITS);
531 	}
532 
533 	if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) {
534 		/* set TX termination offset */
535 		val = readl(pcie->parf + PARF_PHY_CTRL);
536 		val &= ~PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK;
537 		val |= PHY_CTRL_PHY_TX0_TERM_OFFSET(7);
538 		writel(val, pcie->parf + PARF_PHY_CTRL);
539 	}
540 
541 	/* enable external reference clock */
542 	val = readl(pcie->parf + PARF_PHY_REFCLK);
543 	/* USE_PAD is required only for ipq806x */
544 	if (!of_device_is_compatible(node, "qcom,pcie-apq8064"))
545 		val &= ~PHY_REFCLK_USE_PAD;
546 	val |= PHY_REFCLK_SSP_EN;
547 	writel(val, pcie->parf + PARF_PHY_REFCLK);
548 
549 	/* wait for clock acquisition */
550 	usleep_range(1000, 1500);
551 
552 	/* Set the Max TLP size to 2K, instead of using default of 4K */
553 	writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K,
554 	       pci->dbi_base + AXI_MSTR_RESP_COMP_CTRL0);
555 	writel(CFG_BRIDGE_SB_INIT,
556 	       pci->dbi_base + AXI_MSTR_RESP_COMP_CTRL1);
557 
558 	qcom_pcie_clear_hpc(pcie->pci);
559 
560 	return 0;
561 }
562 
563 static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie)
564 {
565 	struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
566 	struct dw_pcie *pci = pcie->pci;
567 	struct device *dev = pci->dev;
568 
569 	res->vdda = devm_regulator_get(dev, "vdda");
570 	if (IS_ERR(res->vdda))
571 		return PTR_ERR(res->vdda);
572 
573 	res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
574 	if (res->num_clks < 0) {
575 		dev_err(dev, "Failed to get clocks\n");
576 		return res->num_clks;
577 	}
578 
579 	res->core = devm_reset_control_get_exclusive(dev, "core");
580 	return PTR_ERR_OR_ZERO(res->core);
581 }
582 
583 static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie)
584 {
585 	struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
586 
587 	reset_control_assert(res->core);
588 	clk_bulk_disable_unprepare(res->num_clks, res->clks);
589 	regulator_disable(res->vdda);
590 }
591 
592 static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie)
593 {
594 	struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
595 	struct dw_pcie *pci = pcie->pci;
596 	struct device *dev = pci->dev;
597 	int ret;
598 
599 	ret = reset_control_deassert(res->core);
600 	if (ret) {
601 		dev_err(dev, "cannot deassert core reset\n");
602 		return ret;
603 	}
604 
605 	ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
606 	if (ret) {
607 		dev_err(dev, "cannot prepare/enable clocks\n");
608 		goto err_assert_reset;
609 	}
610 
611 	ret = regulator_enable(res->vdda);
612 	if (ret) {
613 		dev_err(dev, "cannot enable vdda regulator\n");
614 		goto err_disable_clks;
615 	}
616 
617 	return 0;
618 
619 err_disable_clks:
620 	clk_bulk_disable_unprepare(res->num_clks, res->clks);
621 err_assert_reset:
622 	reset_control_assert(res->core);
623 
624 	return ret;
625 }
626 
627 static int qcom_pcie_post_init_1_0_0(struct qcom_pcie *pcie)
628 {
629 	qcom_pcie_configure_dbi_base(pcie);
630 
631 	if (IS_ENABLED(CONFIG_PCI_MSI)) {
632 		u32 val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT);
633 
634 		val |= EN;
635 		writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT);
636 	}
637 
638 	qcom_pcie_clear_hpc(pcie->pci);
639 
640 	return 0;
641 }
642 
643 static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie)
644 {
645 	u32 val;
646 
647 	/* enable link training */
648 	val = readl(pcie->parf + PARF_LTSSM);
649 	val |= LTSSM_EN;
650 	writel(val, pcie->parf + PARF_LTSSM);
651 }
652 
653 static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie)
654 {
655 	struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
656 	struct dw_pcie *pci = pcie->pci;
657 	struct device *dev = pci->dev;
658 	int ret;
659 
660 	res->supplies[0].supply = "vdda";
661 	res->supplies[1].supply = "vddpe-3v3";
662 	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
663 				      res->supplies);
664 	if (ret)
665 		return ret;
666 
667 	res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
668 	if (res->num_clks < 0) {
669 		dev_err(dev, "Failed to get clocks\n");
670 		return res->num_clks;
671 	}
672 
673 	return 0;
674 }
675 
676 static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie)
677 {
678 	struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
679 
680 	clk_bulk_disable_unprepare(res->num_clks, res->clks);
681 	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
682 }
683 
684 static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie)
685 {
686 	struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
687 	struct dw_pcie *pci = pcie->pci;
688 	struct device *dev = pci->dev;
689 	int ret;
690 
691 	ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
692 	if (ret < 0) {
693 		dev_err(dev, "cannot enable regulators\n");
694 		return ret;
695 	}
696 
697 	ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
698 	if (ret) {
699 		dev_err(dev, "cannot prepare/enable clocks\n");
700 		regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
701 		return ret;
702 	}
703 
704 	return 0;
705 }
706 
707 static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie)
708 {
709 	u32 val;
710 
711 	/* enable PCIe clocks and resets */
712 	val = readl(pcie->parf + PARF_PHY_CTRL);
713 	val &= ~PHY_TEST_PWR_DOWN;
714 	writel(val, pcie->parf + PARF_PHY_CTRL);
715 
716 	qcom_pcie_configure_dbi_base(pcie);
717 
718 	/* MAC PHY_POWERDOWN MUX DISABLE  */
719 	val = readl(pcie->parf + PARF_SYS_CTRL);
720 	val &= ~MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN;
721 	writel(val, pcie->parf + PARF_SYS_CTRL);
722 
723 	val = readl(pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
724 	val |= BYPASS;
725 	writel(val, pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
726 
727 	val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2);
728 	val |= EN;
729 	writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2);
730 
731 	qcom_pcie_clear_hpc(pcie->pci);
732 
733 	return 0;
734 }
735 
736 static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie)
737 {
738 	struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
739 	struct dw_pcie *pci = pcie->pci;
740 	struct device *dev = pci->dev;
741 	bool is_ipq = of_device_is_compatible(dev->of_node, "qcom,pcie-ipq4019");
742 	int ret;
743 
744 	res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
745 	if (res->num_clks < 0) {
746 		dev_err(dev, "Failed to get clocks\n");
747 		return res->num_clks;
748 	}
749 
750 	res->resets[0].id = "axi_m";
751 	res->resets[1].id = "axi_s";
752 	res->resets[2].id = "axi_m_sticky";
753 	res->resets[3].id = "pipe_sticky";
754 	res->resets[4].id = "pwr";
755 	res->resets[5].id = "ahb";
756 	res->resets[6].id = "pipe";
757 	res->resets[7].id = "axi_m_vmid";
758 	res->resets[8].id = "axi_s_xpu";
759 	res->resets[9].id = "parf";
760 	res->resets[10].id = "phy";
761 	res->resets[11].id = "phy_ahb";
762 
763 	res->num_resets = is_ipq ? 12 : 6;
764 
765 	ret = devm_reset_control_bulk_get_exclusive(dev, res->num_resets, res->resets);
766 	if (ret < 0)
767 		return ret;
768 
769 	return 0;
770 }
771 
772 static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie)
773 {
774 	struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
775 
776 	reset_control_bulk_assert(res->num_resets, res->resets);
777 	clk_bulk_disable_unprepare(res->num_clks, res->clks);
778 }
779 
780 static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie)
781 {
782 	struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
783 	struct dw_pcie *pci = pcie->pci;
784 	struct device *dev = pci->dev;
785 	int ret;
786 
787 	ret = reset_control_bulk_assert(res->num_resets, res->resets);
788 	if (ret < 0) {
789 		dev_err(dev, "cannot assert resets\n");
790 		return ret;
791 	}
792 
793 	usleep_range(10000, 12000);
794 
795 	ret = reset_control_bulk_deassert(res->num_resets, res->resets);
796 	if (ret < 0) {
797 		dev_err(dev, "cannot deassert resets\n");
798 		return ret;
799 	}
800 
801 	usleep_range(10000, 12000);
802 
803 	ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
804 	if (ret) {
805 		reset_control_bulk_assert(res->num_resets, res->resets);
806 		return ret;
807 	}
808 
809 	return 0;
810 }
811 
812 static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie)
813 {
814 	struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
815 	struct dw_pcie *pci = pcie->pci;
816 	struct device *dev = pci->dev;
817 	int ret;
818 
819 	res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
820 	if (res->num_clks < 0) {
821 		dev_err(dev, "Failed to get clocks\n");
822 		return res->num_clks;
823 	}
824 
825 	res->rst[0].id = "axi_m";
826 	res->rst[1].id = "axi_s";
827 	res->rst[2].id = "pipe";
828 	res->rst[3].id = "axi_m_sticky";
829 	res->rst[4].id = "sticky";
830 	res->rst[5].id = "ahb";
831 	res->rst[6].id = "sleep";
832 
833 	ret = devm_reset_control_bulk_get_exclusive(dev, ARRAY_SIZE(res->rst), res->rst);
834 	if (ret < 0)
835 		return ret;
836 
837 	return 0;
838 }
839 
840 static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie)
841 {
842 	struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
843 
844 	clk_bulk_disable_unprepare(res->num_clks, res->clks);
845 }
846 
847 static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
848 {
849 	struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
850 	struct dw_pcie *pci = pcie->pci;
851 	struct device *dev = pci->dev;
852 	int ret;
853 
854 	ret = reset_control_bulk_assert(ARRAY_SIZE(res->rst), res->rst);
855 	if (ret < 0) {
856 		dev_err(dev, "cannot assert resets\n");
857 		return ret;
858 	}
859 
860 	usleep_range(2000, 2500);
861 
862 	ret = reset_control_bulk_deassert(ARRAY_SIZE(res->rst), res->rst);
863 	if (ret < 0) {
864 		dev_err(dev, "cannot deassert resets\n");
865 		return ret;
866 	}
867 
868 	/*
869 	 * Don't have a way to see if the reset has completed.
870 	 * Wait for some time.
871 	 */
872 	usleep_range(2000, 2500);
873 
874 	ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
875 	if (ret) {
876 		dev_err(dev, "cannot prepare/enable clocks\n");
877 		goto err_assert_resets;
878 	}
879 
880 	return 0;
881 
882 err_assert_resets:
883 	/*
884 	 * Not checking for failure, will anyway return
885 	 * the original failure in 'ret'.
886 	 */
887 	reset_control_bulk_assert(ARRAY_SIZE(res->rst), res->rst);
888 
889 	return ret;
890 }
891 
892 static int qcom_pcie_post_init_2_3_3(struct qcom_pcie *pcie)
893 {
894 	struct dw_pcie *pci = pcie->pci;
895 	u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
896 	u32 val;
897 
898 	val = readl(pcie->parf + PARF_PHY_CTRL);
899 	val &= ~PHY_TEST_PWR_DOWN;
900 	writel(val, pcie->parf + PARF_PHY_CTRL);
901 
902 	qcom_pcie_configure_dbi_atu_base(pcie);
903 
904 	writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS
905 		| SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |
906 		AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS,
907 		pcie->parf + PARF_SYS_CTRL);
908 	writel(0, pcie->parf + PARF_Q2A_FLUSH);
909 
910 	writel(PCI_COMMAND_MASTER, pci->dbi_base + PCI_COMMAND);
911 
912 	dw_pcie_dbi_ro_wr_en(pci);
913 
914 	writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP);
915 
916 	val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP);
917 	val &= ~PCI_EXP_LNKCAP_ASPMS;
918 	writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP);
919 
920 	writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset +
921 		PCI_EXP_DEVCTL2);
922 
923 	dw_pcie_dbi_ro_wr_dis(pci);
924 
925 	return 0;
926 }
927 
928 static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)
929 {
930 	struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
931 	struct dw_pcie *pci = pcie->pci;
932 	struct device *dev = pci->dev;
933 	int ret;
934 
935 	res->rst = devm_reset_control_array_get_exclusive(dev);
936 	if (IS_ERR(res->rst))
937 		return PTR_ERR(res->rst);
938 
939 	res->supplies[0].supply = "vdda";
940 	res->supplies[1].supply = "vddpe-3v3";
941 	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
942 				      res->supplies);
943 	if (ret)
944 		return ret;
945 
946 	res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
947 	if (res->num_clks < 0) {
948 		dev_err(dev, "Failed to get clocks\n");
949 		return res->num_clks;
950 	}
951 
952 	return 0;
953 }
954 
955 static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)
956 {
957 	struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
958 	struct dw_pcie *pci = pcie->pci;
959 	struct device *dev = pci->dev;
960 	u32 val;
961 	int ret;
962 
963 	ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
964 	if (ret < 0) {
965 		dev_err(dev, "cannot enable regulators\n");
966 		return ret;
967 	}
968 
969 	ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
970 	if (ret < 0)
971 		goto err_disable_regulators;
972 
973 	ret = reset_control_assert(res->rst);
974 	if (ret) {
975 		dev_err(dev, "reset assert failed (%d)\n", ret);
976 		goto err_disable_clocks;
977 	}
978 
979 	usleep_range(1000, 1500);
980 
981 	ret = reset_control_deassert(res->rst);
982 	if (ret) {
983 		dev_err(dev, "reset deassert failed (%d)\n", ret);
984 		goto err_disable_clocks;
985 	}
986 
987 	/* Wait for reset to complete, required on SM8450 */
988 	usleep_range(1000, 1500);
989 
990 	/* configure PCIe to RC mode */
991 	writel(DEVICE_TYPE_RC, pcie->parf + PARF_DEVICE_TYPE);
992 
993 	/* enable PCIe clocks and resets */
994 	val = readl(pcie->parf + PARF_PHY_CTRL);
995 	val &= ~PHY_TEST_PWR_DOWN;
996 	writel(val, pcie->parf + PARF_PHY_CTRL);
997 
998 	qcom_pcie_configure_dbi_atu_base(pcie);
999 
1000 	/* MAC PHY_POWERDOWN MUX DISABLE  */
1001 	val = readl(pcie->parf + PARF_SYS_CTRL);
1002 	val &= ~MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN;
1003 	writel(val, pcie->parf + PARF_SYS_CTRL);
1004 
1005 	val = readl(pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
1006 	val |= BYPASS;
1007 	writel(val, pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
1008 
1009 	/* Enable L1 and L1SS */
1010 	val = readl(pcie->parf + PARF_PM_CTRL);
1011 	val &= ~REQ_NOT_ENTR_L1;
1012 	writel(val, pcie->parf + PARF_PM_CTRL);
1013 
1014 	val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2);
1015 	val |= EN;
1016 	writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2);
1017 
1018 	return 0;
1019 err_disable_clocks:
1020 	clk_bulk_disable_unprepare(res->num_clks, res->clks);
1021 err_disable_regulators:
1022 	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
1023 
1024 	return ret;
1025 }
1026 
1027 static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie)
1028 {
1029 	const struct qcom_pcie_cfg *pcie_cfg = pcie->cfg;
1030 
1031 	if (pcie_cfg->override_no_snoop)
1032 		writel(WR_NO_SNOOP_OVERRIDE_EN | RD_NO_SNOOP_OVERRIDE_EN,
1033 				pcie->parf + PARF_NO_SNOOP_OVERRIDE);
1034 
1035 	qcom_pcie_clear_aspm_l0s(pcie->pci);
1036 	qcom_pcie_clear_hpc(pcie->pci);
1037 
1038 	return 0;
1039 }
1040 
1041 static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie)
1042 {
1043 	struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
1044 
1045 	clk_bulk_disable_unprepare(res->num_clks, res->clks);
1046 
1047 	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
1048 }
1049 
1050 static int qcom_pcie_config_sid_1_9_0(struct qcom_pcie *pcie)
1051 {
1052 	/* iommu map structure */
1053 	struct {
1054 		u32 bdf;
1055 		u32 phandle;
1056 		u32 smmu_sid;
1057 		u32 smmu_sid_len;
1058 	} *map;
1059 	void __iomem *bdf_to_sid_base = pcie->parf + PARF_BDF_TO_SID_TABLE_N;
1060 	struct device *dev = pcie->pci->dev;
1061 	u8 qcom_pcie_crc8_table[CRC8_TABLE_SIZE];
1062 	int i, nr_map, size = 0;
1063 	u32 smmu_sid_base;
1064 	u32 val;
1065 
1066 	of_get_property(dev->of_node, "iommu-map", &size);
1067 	if (!size)
1068 		return 0;
1069 
1070 	/* Enable BDF to SID translation by disabling bypass mode (default) */
1071 	val = readl(pcie->parf + PARF_BDF_TO_SID_CFG);
1072 	val &= ~BDF_TO_SID_BYPASS;
1073 	writel(val, pcie->parf + PARF_BDF_TO_SID_CFG);
1074 
1075 	map = kzalloc(size, GFP_KERNEL);
1076 	if (!map)
1077 		return -ENOMEM;
1078 
1079 	of_property_read_u32_array(dev->of_node, "iommu-map", (u32 *)map,
1080 				   size / sizeof(u32));
1081 
1082 	nr_map = size / (sizeof(*map));
1083 
1084 	crc8_populate_msb(qcom_pcie_crc8_table, QCOM_PCIE_CRC8_POLYNOMIAL);
1085 
1086 	/* Registers need to be zero out first */
1087 	memset_io(bdf_to_sid_base, 0, CRC8_TABLE_SIZE * sizeof(u32));
1088 
1089 	/* Extract the SMMU SID base from the first entry of iommu-map */
1090 	smmu_sid_base = map[0].smmu_sid;
1091 
1092 	/* Look for an available entry to hold the mapping */
1093 	for (i = 0; i < nr_map; i++) {
1094 		__be16 bdf_be = cpu_to_be16(map[i].bdf);
1095 		u32 val;
1096 		u8 hash;
1097 
1098 		hash = crc8(qcom_pcie_crc8_table, (u8 *)&bdf_be, sizeof(bdf_be), 0);
1099 
1100 		val = readl(bdf_to_sid_base + hash * sizeof(u32));
1101 
1102 		/* If the register is already populated, look for next available entry */
1103 		while (val) {
1104 			u8 current_hash = hash++;
1105 			u8 next_mask = 0xff;
1106 
1107 			/* If NEXT field is NULL then update it with next hash */
1108 			if (!(val & next_mask)) {
1109 				val |= (u32)hash;
1110 				writel(val, bdf_to_sid_base + current_hash * sizeof(u32));
1111 			}
1112 
1113 			val = readl(bdf_to_sid_base + hash * sizeof(u32));
1114 		}
1115 
1116 		/* BDF [31:16] | SID [15:8] | NEXT [7:0] */
1117 		val = map[i].bdf << 16 | (map[i].smmu_sid - smmu_sid_base) << 8 | 0;
1118 		writel(val, bdf_to_sid_base + hash * sizeof(u32));
1119 	}
1120 
1121 	kfree(map);
1122 
1123 	return 0;
1124 }
1125 
1126 static int qcom_pcie_get_resources_2_9_0(struct qcom_pcie *pcie)
1127 {
1128 	struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
1129 	struct dw_pcie *pci = pcie->pci;
1130 	struct device *dev = pci->dev;
1131 
1132 	res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
1133 	if (res->num_clks < 0) {
1134 		dev_err(dev, "Failed to get clocks\n");
1135 		return res->num_clks;
1136 	}
1137 
1138 	res->rst = devm_reset_control_array_get_exclusive(dev);
1139 	if (IS_ERR(res->rst))
1140 		return PTR_ERR(res->rst);
1141 
1142 	return 0;
1143 }
1144 
1145 static void qcom_pcie_deinit_2_9_0(struct qcom_pcie *pcie)
1146 {
1147 	struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
1148 
1149 	clk_bulk_disable_unprepare(res->num_clks, res->clks);
1150 }
1151 
1152 static int qcom_pcie_init_2_9_0(struct qcom_pcie *pcie)
1153 {
1154 	struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
1155 	struct device *dev = pcie->pci->dev;
1156 	int ret;
1157 
1158 	ret = reset_control_assert(res->rst);
1159 	if (ret) {
1160 		dev_err(dev, "reset assert failed (%d)\n", ret);
1161 		return ret;
1162 	}
1163 
1164 	/*
1165 	 * Delay periods before and after reset deassert are working values
1166 	 * from downstream Codeaurora kernel
1167 	 */
1168 	usleep_range(2000, 2500);
1169 
1170 	ret = reset_control_deassert(res->rst);
1171 	if (ret) {
1172 		dev_err(dev, "reset deassert failed (%d)\n", ret);
1173 		return ret;
1174 	}
1175 
1176 	usleep_range(2000, 2500);
1177 
1178 	return clk_bulk_prepare_enable(res->num_clks, res->clks);
1179 }
1180 
1181 static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie)
1182 {
1183 	struct dw_pcie *pci = pcie->pci;
1184 	u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
1185 	u32 val;
1186 	int i;
1187 
1188 	val = readl(pcie->parf + PARF_PHY_CTRL);
1189 	val &= ~PHY_TEST_PWR_DOWN;
1190 	writel(val, pcie->parf + PARF_PHY_CTRL);
1191 
1192 	qcom_pcie_configure_dbi_atu_base(pcie);
1193 
1194 	writel(DEVICE_TYPE_RC, pcie->parf + PARF_DEVICE_TYPE);
1195 	writel(BYPASS | MSTR_AXI_CLK_EN | AHB_CLK_EN,
1196 		pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
1197 	writel(GEN3_RELATED_OFF_RXEQ_RGRDLESS_RXTS |
1198 		GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL,
1199 		pci->dbi_base + GEN3_RELATED_OFF);
1200 
1201 	writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS |
1202 		SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |
1203 		AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS,
1204 		pcie->parf + PARF_SYS_CTRL);
1205 
1206 	writel(0, pcie->parf + PARF_Q2A_FLUSH);
1207 
1208 	dw_pcie_dbi_ro_wr_en(pci);
1209 
1210 	writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP);
1211 
1212 	val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP);
1213 	val &= ~PCI_EXP_LNKCAP_ASPMS;
1214 	writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP);
1215 
1216 	writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset +
1217 			PCI_EXP_DEVCTL2);
1218 
1219 	dw_pcie_dbi_ro_wr_dis(pci);
1220 
1221 	for (i = 0; i < 256; i++)
1222 		writel(0, pcie->parf + PARF_BDF_TO_SID_TABLE_N + (4 * i));
1223 
1224 	return 0;
1225 }
1226 
1227 static bool qcom_pcie_link_up(struct dw_pcie *pci)
1228 {
1229 	u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
1230 	u16 val = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA);
1231 
1232 	return val & PCI_EXP_LNKSTA_DLLLA;
1233 }
1234 
1235 static void qcom_pcie_phy_power_off(struct qcom_pcie *pcie)
1236 {
1237 	struct qcom_pcie_port *port;
1238 
1239 	list_for_each_entry(port, &pcie->ports, list)
1240 		phy_power_off(port->phy);
1241 }
1242 
1243 static int qcom_pcie_phy_power_on(struct qcom_pcie *pcie)
1244 {
1245 	struct qcom_pcie_port *port;
1246 	int ret;
1247 
1248 	list_for_each_entry(port, &pcie->ports, list) {
1249 		ret = phy_set_mode_ext(port->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_RC);
1250 		if (ret)
1251 			return ret;
1252 
1253 		ret = phy_power_on(port->phy);
1254 		if (ret) {
1255 			qcom_pcie_phy_power_off(pcie);
1256 			return ret;
1257 		}
1258 	}
1259 
1260 	return 0;
1261 }
1262 
1263 static int qcom_pcie_host_init(struct dw_pcie_rp *pp)
1264 {
1265 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
1266 	struct qcom_pcie *pcie = to_qcom_pcie(pci);
1267 	int ret;
1268 
1269 	qcom_ep_reset_assert(pcie);
1270 
1271 	ret = pcie->cfg->ops->init(pcie);
1272 	if (ret)
1273 		return ret;
1274 
1275 	ret = qcom_pcie_phy_power_on(pcie);
1276 	if (ret)
1277 		goto err_deinit;
1278 
1279 	if (pcie->cfg->ops->post_init) {
1280 		ret = pcie->cfg->ops->post_init(pcie);
1281 		if (ret)
1282 			goto err_disable_phy;
1283 	}
1284 
1285 	qcom_ep_reset_deassert(pcie);
1286 
1287 	if (pcie->cfg->ops->config_sid) {
1288 		ret = pcie->cfg->ops->config_sid(pcie);
1289 		if (ret)
1290 			goto err_assert_reset;
1291 	}
1292 
1293 	return 0;
1294 
1295 err_assert_reset:
1296 	qcom_ep_reset_assert(pcie);
1297 err_disable_phy:
1298 	qcom_pcie_phy_power_off(pcie);
1299 err_deinit:
1300 	pcie->cfg->ops->deinit(pcie);
1301 
1302 	return ret;
1303 }
1304 
1305 static void qcom_pcie_host_deinit(struct dw_pcie_rp *pp)
1306 {
1307 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
1308 	struct qcom_pcie *pcie = to_qcom_pcie(pci);
1309 
1310 	qcom_ep_reset_assert(pcie);
1311 	qcom_pcie_phy_power_off(pcie);
1312 	pcie->cfg->ops->deinit(pcie);
1313 }
1314 
1315 static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {
1316 	.init		= qcom_pcie_host_init,
1317 	.deinit		= qcom_pcie_host_deinit,
1318 };
1319 
1320 /* Qcom IP rev.: 2.1.0	Synopsys IP rev.: 4.01a */
1321 static const struct qcom_pcie_ops ops_2_1_0 = {
1322 	.get_resources = qcom_pcie_get_resources_2_1_0,
1323 	.init = qcom_pcie_init_2_1_0,
1324 	.post_init = qcom_pcie_post_init_2_1_0,
1325 	.deinit = qcom_pcie_deinit_2_1_0,
1326 	.ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
1327 };
1328 
1329 /* Qcom IP rev.: 1.0.0	Synopsys IP rev.: 4.11a */
1330 static const struct qcom_pcie_ops ops_1_0_0 = {
1331 	.get_resources = qcom_pcie_get_resources_1_0_0,
1332 	.init = qcom_pcie_init_1_0_0,
1333 	.post_init = qcom_pcie_post_init_1_0_0,
1334 	.deinit = qcom_pcie_deinit_1_0_0,
1335 	.ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
1336 };
1337 
1338 /* Qcom IP rev.: 2.3.2	Synopsys IP rev.: 4.21a */
1339 static const struct qcom_pcie_ops ops_2_3_2 = {
1340 	.get_resources = qcom_pcie_get_resources_2_3_2,
1341 	.init = qcom_pcie_init_2_3_2,
1342 	.post_init = qcom_pcie_post_init_2_3_2,
1343 	.deinit = qcom_pcie_deinit_2_3_2,
1344 	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1345 };
1346 
1347 /* Qcom IP rev.: 2.4.0	Synopsys IP rev.: 4.20a */
1348 static const struct qcom_pcie_ops ops_2_4_0 = {
1349 	.get_resources = qcom_pcie_get_resources_2_4_0,
1350 	.init = qcom_pcie_init_2_4_0,
1351 	.post_init = qcom_pcie_post_init_2_3_2,
1352 	.deinit = qcom_pcie_deinit_2_4_0,
1353 	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1354 };
1355 
1356 /* Qcom IP rev.: 2.3.3	Synopsys IP rev.: 4.30a */
1357 static const struct qcom_pcie_ops ops_2_3_3 = {
1358 	.get_resources = qcom_pcie_get_resources_2_3_3,
1359 	.init = qcom_pcie_init_2_3_3,
1360 	.post_init = qcom_pcie_post_init_2_3_3,
1361 	.deinit = qcom_pcie_deinit_2_3_3,
1362 	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1363 };
1364 
1365 /* Qcom IP rev.: 2.7.0	Synopsys IP rev.: 4.30a */
1366 static const struct qcom_pcie_ops ops_2_7_0 = {
1367 	.get_resources = qcom_pcie_get_resources_2_7_0,
1368 	.init = qcom_pcie_init_2_7_0,
1369 	.post_init = qcom_pcie_post_init_2_7_0,
1370 	.deinit = qcom_pcie_deinit_2_7_0,
1371 	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1372 };
1373 
1374 /* Qcom IP rev.: 1.9.0 */
1375 static const struct qcom_pcie_ops ops_1_9_0 = {
1376 	.get_resources = qcom_pcie_get_resources_2_7_0,
1377 	.init = qcom_pcie_init_2_7_0,
1378 	.post_init = qcom_pcie_post_init_2_7_0,
1379 	.deinit = qcom_pcie_deinit_2_7_0,
1380 	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1381 	.config_sid = qcom_pcie_config_sid_1_9_0,
1382 };
1383 
1384 /* Qcom IP rev.: 1.21.0  Synopsys IP rev.: 5.60a */
1385 static const struct qcom_pcie_ops ops_1_21_0 = {
1386 	.get_resources = qcom_pcie_get_resources_2_7_0,
1387 	.init = qcom_pcie_init_2_7_0,
1388 	.post_init = qcom_pcie_post_init_2_7_0,
1389 	.deinit = qcom_pcie_deinit_2_7_0,
1390 	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1391 };
1392 
1393 /* Qcom IP rev.: 2.9.0  Synopsys IP rev.: 5.00a */
1394 static const struct qcom_pcie_ops ops_2_9_0 = {
1395 	.get_resources = qcom_pcie_get_resources_2_9_0,
1396 	.init = qcom_pcie_init_2_9_0,
1397 	.post_init = qcom_pcie_post_init_2_9_0,
1398 	.deinit = qcom_pcie_deinit_2_9_0,
1399 	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1400 };
1401 
1402 static const struct qcom_pcie_cfg cfg_1_0_0 = {
1403 	.ops = &ops_1_0_0,
1404 };
1405 
1406 static const struct qcom_pcie_cfg cfg_1_9_0 = {
1407 	.ops = &ops_1_9_0,
1408 };
1409 
1410 static const struct qcom_pcie_cfg cfg_1_34_0 = {
1411 	.ops = &ops_1_9_0,
1412 	.override_no_snoop = true,
1413 };
1414 
1415 static const struct qcom_pcie_cfg cfg_2_1_0 = {
1416 	.ops = &ops_2_1_0,
1417 };
1418 
1419 static const struct qcom_pcie_cfg cfg_2_3_2 = {
1420 	.ops = &ops_2_3_2,
1421 };
1422 
1423 static const struct qcom_pcie_cfg cfg_2_3_3 = {
1424 	.ops = &ops_2_3_3,
1425 };
1426 
1427 static const struct qcom_pcie_cfg cfg_2_4_0 = {
1428 	.ops = &ops_2_4_0,
1429 };
1430 
1431 static const struct qcom_pcie_cfg cfg_2_7_0 = {
1432 	.ops = &ops_2_7_0,
1433 };
1434 
1435 static const struct qcom_pcie_cfg cfg_2_9_0 = {
1436 	.ops = &ops_2_9_0,
1437 };
1438 
1439 static const struct qcom_pcie_cfg cfg_sc8280xp = {
1440 	.ops = &ops_1_21_0,
1441 	.no_l0s = true,
1442 };
1443 
1444 static const struct qcom_pcie_cfg cfg_fw_managed = {
1445 	.firmware_managed = true,
1446 };
1447 
1448 static const struct dw_pcie_ops dw_pcie_ops = {
1449 	.link_up = qcom_pcie_link_up,
1450 	.start_link = qcom_pcie_start_link,
1451 };
1452 
1453 static int qcom_pcie_icc_init(struct qcom_pcie *pcie)
1454 {
1455 	struct dw_pcie *pci = pcie->pci;
1456 	int ret;
1457 
1458 	pcie->icc_mem = devm_of_icc_get(pci->dev, "pcie-mem");
1459 	if (IS_ERR(pcie->icc_mem))
1460 		return PTR_ERR(pcie->icc_mem);
1461 
1462 	pcie->icc_cpu = devm_of_icc_get(pci->dev, "cpu-pcie");
1463 	if (IS_ERR(pcie->icc_cpu))
1464 		return PTR_ERR(pcie->icc_cpu);
1465 	/*
1466 	 * Some Qualcomm platforms require interconnect bandwidth constraints
1467 	 * to be set before enabling interconnect clocks.
1468 	 *
1469 	 * Set an initial peak bandwidth corresponding to single-lane Gen 1
1470 	 * for the pcie-mem path.
1471 	 */
1472 	ret = icc_set_bw(pcie->icc_mem, 0, QCOM_PCIE_LINK_SPEED_TO_BW(1));
1473 	if (ret) {
1474 		dev_err(pci->dev, "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n",
1475 			ret);
1476 		return ret;
1477 	}
1478 
1479 	/*
1480 	 * Since the CPU-PCIe path is only used for activities like register
1481 	 * access of the host controller and endpoint Config/BAR space access,
1482 	 * HW team has recommended to use a minimal bandwidth of 1KBps just to
1483 	 * keep the path active.
1484 	 */
1485 	ret = icc_set_bw(pcie->icc_cpu, 0, kBps_to_icc(1));
1486 	if (ret) {
1487 		dev_err(pci->dev, "Failed to set bandwidth for CPU-PCIe interconnect path: %d\n",
1488 			ret);
1489 		icc_set_bw(pcie->icc_mem, 0, 0);
1490 		return ret;
1491 	}
1492 
1493 	return 0;
1494 }
1495 
1496 static void qcom_pcie_icc_opp_update(struct qcom_pcie *pcie)
1497 {
1498 	u32 offset, status, width, speed;
1499 	struct dw_pcie *pci = pcie->pci;
1500 	unsigned long freq_kbps;
1501 	struct dev_pm_opp *opp;
1502 	int ret, freq_mbps;
1503 
1504 	offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
1505 	status = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA);
1506 
1507 	/* Only update constraints if link is up. */
1508 	if (!(status & PCI_EXP_LNKSTA_DLLLA))
1509 		return;
1510 
1511 	speed = FIELD_GET(PCI_EXP_LNKSTA_CLS, status);
1512 	width = FIELD_GET(PCI_EXP_LNKSTA_NLW, status);
1513 
1514 	if (pcie->icc_mem) {
1515 		ret = icc_set_bw(pcie->icc_mem, 0,
1516 				 width * QCOM_PCIE_LINK_SPEED_TO_BW(speed));
1517 		if (ret) {
1518 			dev_err(pci->dev, "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n",
1519 				ret);
1520 		}
1521 	} else if (pcie->use_pm_opp) {
1522 		freq_mbps = pcie_dev_speed_mbps(pcie_link_speed[speed]);
1523 		if (freq_mbps < 0)
1524 			return;
1525 
1526 		freq_kbps = freq_mbps * KILO;
1527 		opp = dev_pm_opp_find_freq_exact(pci->dev, freq_kbps * width,
1528 						 true);
1529 		if (!IS_ERR(opp)) {
1530 			ret = dev_pm_opp_set_opp(pci->dev, opp);
1531 			if (ret)
1532 				dev_err(pci->dev, "Failed to set OPP for freq (%lu): %d\n",
1533 					freq_kbps * width, ret);
1534 			dev_pm_opp_put(opp);
1535 		}
1536 	}
1537 }
1538 
1539 static int qcom_pcie_link_transition_count(struct seq_file *s, void *data)
1540 {
1541 	struct qcom_pcie *pcie = (struct qcom_pcie *)dev_get_drvdata(s->private);
1542 
1543 	seq_printf(s, "L0s transition count: %u\n",
1544 		   readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L0S));
1545 
1546 	seq_printf(s, "L1 transition count: %u\n",
1547 		   readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L1));
1548 
1549 	seq_printf(s, "L1.1 transition count: %u\n",
1550 		   readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1));
1551 
1552 	seq_printf(s, "L1.2 transition count: %u\n",
1553 		   readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2));
1554 
1555 	seq_printf(s, "L2 transition count: %u\n",
1556 		   readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L2));
1557 
1558 	return 0;
1559 }
1560 
1561 static void qcom_pcie_init_debugfs(struct qcom_pcie *pcie)
1562 {
1563 	struct dw_pcie *pci = pcie->pci;
1564 	struct device *dev = pci->dev;
1565 	char *name;
1566 
1567 	name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
1568 	if (!name)
1569 		return;
1570 
1571 	pcie->debugfs = debugfs_create_dir(name, NULL);
1572 	debugfs_create_devm_seqfile(dev, "link_transition_count", pcie->debugfs,
1573 				    qcom_pcie_link_transition_count);
1574 }
1575 
1576 static irqreturn_t qcom_pcie_global_irq_thread(int irq, void *data)
1577 {
1578 	struct qcom_pcie *pcie = data;
1579 	struct dw_pcie_rp *pp = &pcie->pci->pp;
1580 	struct device *dev = pcie->pci->dev;
1581 	u32 status = readl_relaxed(pcie->parf + PARF_INT_ALL_STATUS);
1582 
1583 	writel_relaxed(status, pcie->parf + PARF_INT_ALL_CLEAR);
1584 
1585 	if (FIELD_GET(PARF_INT_ALL_LINK_UP, status)) {
1586 		msleep(PCIE_RESET_CONFIG_WAIT_MS);
1587 		dev_dbg(dev, "Received Link up event. Starting enumeration!\n");
1588 		/* Rescan the bus to enumerate endpoint devices */
1589 		pci_lock_rescan_remove();
1590 		pci_rescan_bus(pp->bridge->bus);
1591 		pci_unlock_rescan_remove();
1592 
1593 		qcom_pcie_icc_opp_update(pcie);
1594 	} else {
1595 		dev_WARN_ONCE(dev, 1, "Received unknown event. INT_STATUS: 0x%08x\n",
1596 			      status);
1597 	}
1598 
1599 	return IRQ_HANDLED;
1600 }
1601 
1602 static void qcom_pci_free_msi(void *ptr)
1603 {
1604 	struct dw_pcie_rp *pp = (struct dw_pcie_rp *)ptr;
1605 
1606 	if (pp && pp->has_msi_ctrl)
1607 		dw_pcie_free_msi(pp);
1608 }
1609 
1610 static int qcom_pcie_ecam_host_init(struct pci_config_window *cfg)
1611 {
1612 	struct device *dev = cfg->parent;
1613 	struct dw_pcie_rp *pp;
1614 	struct dw_pcie *pci;
1615 	int ret;
1616 
1617 	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
1618 	if (!pci)
1619 		return -ENOMEM;
1620 
1621 	pci->dev = dev;
1622 	pp = &pci->pp;
1623 	pci->dbi_base = cfg->win;
1624 	pp->num_vectors = MSI_DEF_NUM_VECTORS;
1625 
1626 	ret = dw_pcie_msi_host_init(pp);
1627 	if (ret)
1628 		return ret;
1629 
1630 	pp->has_msi_ctrl = true;
1631 	dw_pcie_msi_init(pp);
1632 
1633 	return devm_add_action_or_reset(dev, qcom_pci_free_msi, pp);
1634 }
1635 
1636 static const struct pci_ecam_ops pci_qcom_ecam_ops = {
1637 	.init		= qcom_pcie_ecam_host_init,
1638 	.pci_ops	= {
1639 		.map_bus	= pci_ecam_map_bus,
1640 		.read		= pci_generic_config_read,
1641 		.write		= pci_generic_config_write,
1642 	}
1643 };
1644 
1645 static int qcom_pcie_parse_port(struct qcom_pcie *pcie, struct device_node *node)
1646 {
1647 	struct device *dev = pcie->pci->dev;
1648 	struct qcom_pcie_port *port;
1649 	struct gpio_desc *reset;
1650 	struct phy *phy;
1651 	int ret;
1652 
1653 	reset = devm_fwnode_gpiod_get(dev, of_fwnode_handle(node),
1654 				      "reset", GPIOD_OUT_HIGH, "PERST#");
1655 	if (IS_ERR(reset))
1656 		return PTR_ERR(reset);
1657 
1658 	phy = devm_of_phy_get(dev, node, NULL);
1659 	if (IS_ERR(phy))
1660 		return PTR_ERR(phy);
1661 
1662 	port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
1663 	if (!port)
1664 		return -ENOMEM;
1665 
1666 	ret = phy_init(phy);
1667 	if (ret)
1668 		return ret;
1669 
1670 	port->reset = reset;
1671 	port->phy = phy;
1672 	INIT_LIST_HEAD(&port->list);
1673 	list_add_tail(&port->list, &pcie->ports);
1674 
1675 	return 0;
1676 }
1677 
1678 static int qcom_pcie_parse_ports(struct qcom_pcie *pcie)
1679 {
1680 	struct device *dev = pcie->pci->dev;
1681 	struct qcom_pcie_port *port, *tmp;
1682 	int ret = -ENOENT;
1683 
1684 	for_each_available_child_of_node_scoped(dev->of_node, of_port) {
1685 		if (!of_node_is_type(of_port, "pci"))
1686 			continue;
1687 		ret = qcom_pcie_parse_port(pcie, of_port);
1688 		if (ret)
1689 			goto err_port_del;
1690 	}
1691 
1692 	return ret;
1693 
1694 err_port_del:
1695 	list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
1696 		phy_exit(port->phy);
1697 		list_del(&port->list);
1698 	}
1699 
1700 	return ret;
1701 }
1702 
1703 static int qcom_pcie_parse_legacy_binding(struct qcom_pcie *pcie)
1704 {
1705 	struct device *dev = pcie->pci->dev;
1706 	struct qcom_pcie_port *port;
1707 	struct gpio_desc *reset;
1708 	struct phy *phy;
1709 	int ret;
1710 
1711 	phy = devm_phy_optional_get(dev, "pciephy");
1712 	if (IS_ERR(phy))
1713 		return PTR_ERR(phy);
1714 
1715 	reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH);
1716 	if (IS_ERR(reset))
1717 		return PTR_ERR(reset);
1718 
1719 	ret = phy_init(phy);
1720 	if (ret)
1721 		return ret;
1722 
1723 	port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
1724 	if (!port)
1725 		return -ENOMEM;
1726 
1727 	port->reset = reset;
1728 	port->phy = phy;
1729 	INIT_LIST_HEAD(&port->list);
1730 	list_add_tail(&port->list, &pcie->ports);
1731 
1732 	return 0;
1733 }
1734 
1735 static int qcom_pcie_probe(struct platform_device *pdev)
1736 {
1737 	const struct qcom_pcie_cfg *pcie_cfg;
1738 	unsigned long max_freq = ULONG_MAX;
1739 	struct qcom_pcie_port *port, *tmp;
1740 	struct device *dev = &pdev->dev;
1741 	struct dev_pm_opp *opp;
1742 	struct qcom_pcie *pcie;
1743 	struct dw_pcie_rp *pp;
1744 	struct resource *res;
1745 	struct dw_pcie *pci;
1746 	int ret, irq;
1747 	char *name;
1748 
1749 	pcie_cfg = of_device_get_match_data(dev);
1750 	if (!pcie_cfg) {
1751 		dev_err(dev, "No platform data\n");
1752 		return -ENODATA;
1753 	}
1754 
1755 	if (!pcie_cfg->firmware_managed && !pcie_cfg->ops) {
1756 		dev_err(dev, "No platform ops\n");
1757 		return -ENODATA;
1758 	}
1759 
1760 	pm_runtime_enable(dev);
1761 	ret = pm_runtime_get_sync(dev);
1762 	if (ret < 0)
1763 		goto err_pm_runtime_put;
1764 
1765 	if (pcie_cfg->firmware_managed) {
1766 		struct pci_host_bridge *bridge;
1767 		struct pci_config_window *cfg;
1768 
1769 		bridge = devm_pci_alloc_host_bridge(dev, 0);
1770 		if (!bridge) {
1771 			ret = -ENOMEM;
1772 			goto err_pm_runtime_put;
1773 		}
1774 
1775 		/* Parse and map our ECAM configuration space area */
1776 		cfg = pci_host_common_ecam_create(dev, bridge,
1777 				&pci_qcom_ecam_ops);
1778 		if (IS_ERR(cfg)) {
1779 			ret = PTR_ERR(cfg);
1780 			goto err_pm_runtime_put;
1781 		}
1782 
1783 		bridge->sysdata = cfg;
1784 		bridge->ops = (struct pci_ops *)&pci_qcom_ecam_ops.pci_ops;
1785 		bridge->msi_domain = true;
1786 
1787 		ret = pci_host_probe(bridge);
1788 		if (ret)
1789 			goto err_pm_runtime_put;
1790 
1791 		return 0;
1792 	}
1793 
1794 	pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
1795 	if (!pcie) {
1796 		ret = -ENOMEM;
1797 		goto err_pm_runtime_put;
1798 	}
1799 
1800 	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
1801 	if (!pci) {
1802 		ret = -ENOMEM;
1803 		goto err_pm_runtime_put;
1804 	}
1805 
1806 	INIT_LIST_HEAD(&pcie->ports);
1807 
1808 	pci->dev = dev;
1809 	pci->ops = &dw_pcie_ops;
1810 	pp = &pci->pp;
1811 
1812 	pcie->pci = pci;
1813 
1814 	pcie->cfg = pcie_cfg;
1815 
1816 	pcie->parf = devm_platform_ioremap_resource_byname(pdev, "parf");
1817 	if (IS_ERR(pcie->parf)) {
1818 		ret = PTR_ERR(pcie->parf);
1819 		goto err_pm_runtime_put;
1820 	}
1821 
1822 	/* MHI region is optional */
1823 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mhi");
1824 	if (res) {
1825 		pcie->mhi = devm_ioremap_resource(dev, res);
1826 		if (IS_ERR(pcie->mhi)) {
1827 			ret = PTR_ERR(pcie->mhi);
1828 			goto err_pm_runtime_put;
1829 		}
1830 	}
1831 
1832 	/* OPP table is optional */
1833 	ret = devm_pm_opp_of_add_table(dev);
1834 	if (ret && ret != -ENODEV) {
1835 		dev_err_probe(dev, ret, "Failed to add OPP table\n");
1836 		goto err_pm_runtime_put;
1837 	}
1838 
1839 	/*
1840 	 * Before the PCIe link is initialized, vote for highest OPP in the OPP
1841 	 * table, so that we are voting for maximum voltage corner for the
1842 	 * link to come up in maximum supported speed. At the end of the
1843 	 * probe(), OPP will be updated using qcom_pcie_icc_opp_update().
1844 	 */
1845 	if (!ret) {
1846 		opp = dev_pm_opp_find_freq_floor(dev, &max_freq);
1847 		if (IS_ERR(opp)) {
1848 			ret = PTR_ERR(opp);
1849 			dev_err_probe(pci->dev, ret,
1850 				      "Unable to find max freq OPP\n");
1851 			goto err_pm_runtime_put;
1852 		} else {
1853 			ret = dev_pm_opp_set_opp(dev, opp);
1854 		}
1855 
1856 		dev_pm_opp_put(opp);
1857 		if (ret) {
1858 			dev_err_probe(pci->dev, ret,
1859 				      "Failed to set OPP for freq %lu\n",
1860 				      max_freq);
1861 			goto err_pm_runtime_put;
1862 		}
1863 
1864 		pcie->use_pm_opp = true;
1865 	} else {
1866 		/* Skip ICC init if OPP is supported as it is handled by OPP */
1867 		ret = qcom_pcie_icc_init(pcie);
1868 		if (ret)
1869 			goto err_pm_runtime_put;
1870 	}
1871 
1872 	ret = pcie->cfg->ops->get_resources(pcie);
1873 	if (ret)
1874 		goto err_pm_runtime_put;
1875 
1876 	pp->ops = &qcom_pcie_dw_ops;
1877 
1878 	ret = qcom_pcie_parse_ports(pcie);
1879 	if (ret) {
1880 		if (ret != -ENOENT) {
1881 			dev_err_probe(pci->dev, ret,
1882 				      "Failed to parse Root Port: %d\n", ret);
1883 			goto err_pm_runtime_put;
1884 		}
1885 
1886 		/*
1887 		 * In the case of properties not populated in Root Port node,
1888 		 * fallback to the legacy method of parsing the Host Bridge
1889 		 * node. This is to maintain DT backwards compatibility.
1890 		 */
1891 		ret = qcom_pcie_parse_legacy_binding(pcie);
1892 		if (ret)
1893 			goto err_pm_runtime_put;
1894 	}
1895 
1896 	platform_set_drvdata(pdev, pcie);
1897 
1898 	irq = platform_get_irq_byname_optional(pdev, "global");
1899 	if (irq > 0)
1900 		pp->use_linkup_irq = true;
1901 
1902 	ret = dw_pcie_host_init(pp);
1903 	if (ret) {
1904 		dev_err(dev, "cannot initialize host\n");
1905 		goto err_phy_exit;
1906 	}
1907 
1908 	name = devm_kasprintf(dev, GFP_KERNEL, "qcom_pcie_global_irq%d",
1909 			      pci_domain_nr(pp->bridge->bus));
1910 	if (!name) {
1911 		ret = -ENOMEM;
1912 		goto err_host_deinit;
1913 	}
1914 
1915 	if (irq > 0) {
1916 		ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
1917 						qcom_pcie_global_irq_thread,
1918 						IRQF_ONESHOT, name, pcie);
1919 		if (ret) {
1920 			dev_err_probe(&pdev->dev, ret,
1921 				      "Failed to request Global IRQ\n");
1922 			goto err_host_deinit;
1923 		}
1924 
1925 		writel_relaxed(PARF_INT_ALL_LINK_UP | PARF_INT_MSI_DEV_0_7,
1926 			       pcie->parf + PARF_INT_ALL_MASK);
1927 	}
1928 
1929 	qcom_pcie_icc_opp_update(pcie);
1930 
1931 	if (pcie->mhi)
1932 		qcom_pcie_init_debugfs(pcie);
1933 
1934 	return 0;
1935 
1936 err_host_deinit:
1937 	dw_pcie_host_deinit(pp);
1938 err_phy_exit:
1939 	list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
1940 		phy_exit(port->phy);
1941 		list_del(&port->list);
1942 	}
1943 err_pm_runtime_put:
1944 	pm_runtime_put(dev);
1945 	pm_runtime_disable(dev);
1946 
1947 	return ret;
1948 }
1949 
1950 static int qcom_pcie_suspend_noirq(struct device *dev)
1951 {
1952 	struct qcom_pcie *pcie;
1953 	int ret = 0;
1954 
1955 	pcie = dev_get_drvdata(dev);
1956 	if (!pcie)
1957 		return 0;
1958 
1959 	/*
1960 	 * Set minimum bandwidth required to keep data path functional during
1961 	 * suspend.
1962 	 */
1963 	if (pcie->icc_mem) {
1964 		ret = icc_set_bw(pcie->icc_mem, 0, kBps_to_icc(1));
1965 		if (ret) {
1966 			dev_err(dev,
1967 				"Failed to set bandwidth for PCIe-MEM interconnect path: %d\n",
1968 				ret);
1969 			return ret;
1970 		}
1971 	}
1972 
1973 	/*
1974 	 * Turn OFF the resources only for controllers without active PCIe
1975 	 * devices. For controllers with active devices, the resources are kept
1976 	 * ON and the link is expected to be in L0/L1 (sub)states.
1977 	 *
1978 	 * Turning OFF the resources for controllers with active PCIe devices
1979 	 * will trigger access violation during the end of the suspend cycle,
1980 	 * as kernel tries to access the PCIe devices config space for masking
1981 	 * MSIs.
1982 	 *
1983 	 * Also, it is not desirable to put the link into L2/L3 state as that
1984 	 * implies VDD supply will be removed and the devices may go into
1985 	 * powerdown state. This will affect the lifetime of the storage devices
1986 	 * like NVMe.
1987 	 */
1988 	if (!dw_pcie_link_up(pcie->pci)) {
1989 		qcom_pcie_host_deinit(&pcie->pci->pp);
1990 		pcie->suspended = true;
1991 	}
1992 
1993 	/*
1994 	 * Only disable CPU-PCIe interconnect path if the suspend is non-S2RAM.
1995 	 * Because on some platforms, DBI access can happen very late during the
1996 	 * S2RAM and a non-active CPU-PCIe interconnect path may lead to NoC
1997 	 * error.
1998 	 */
1999 	if (pm_suspend_target_state != PM_SUSPEND_MEM) {
2000 		ret = icc_disable(pcie->icc_cpu);
2001 		if (ret)
2002 			dev_err(dev, "Failed to disable CPU-PCIe interconnect path: %d\n", ret);
2003 
2004 		if (pcie->use_pm_opp)
2005 			dev_pm_opp_set_opp(pcie->pci->dev, NULL);
2006 	}
2007 	return ret;
2008 }
2009 
2010 static int qcom_pcie_resume_noirq(struct device *dev)
2011 {
2012 	struct qcom_pcie *pcie;
2013 	int ret;
2014 
2015 	pcie = dev_get_drvdata(dev);
2016 	if (!pcie)
2017 		return 0;
2018 
2019 	if (pm_suspend_target_state != PM_SUSPEND_MEM) {
2020 		ret = icc_enable(pcie->icc_cpu);
2021 		if (ret) {
2022 			dev_err(dev, "Failed to enable CPU-PCIe interconnect path: %d\n", ret);
2023 			return ret;
2024 		}
2025 	}
2026 
2027 	if (pcie->suspended) {
2028 		ret = qcom_pcie_host_init(&pcie->pci->pp);
2029 		if (ret)
2030 			return ret;
2031 
2032 		pcie->suspended = false;
2033 	}
2034 
2035 	qcom_pcie_icc_opp_update(pcie);
2036 
2037 	return 0;
2038 }
2039 
2040 static const struct of_device_id qcom_pcie_match[] = {
2041 	{ .compatible = "qcom,pcie-apq8064", .data = &cfg_2_1_0 },
2042 	{ .compatible = "qcom,pcie-apq8084", .data = &cfg_1_0_0 },
2043 	{ .compatible = "qcom,pcie-ipq4019", .data = &cfg_2_4_0 },
2044 	{ .compatible = "qcom,pcie-ipq5018", .data = &cfg_2_9_0 },
2045 	{ .compatible = "qcom,pcie-ipq6018", .data = &cfg_2_9_0 },
2046 	{ .compatible = "qcom,pcie-ipq8064", .data = &cfg_2_1_0 },
2047 	{ .compatible = "qcom,pcie-ipq8064-v2", .data = &cfg_2_1_0 },
2048 	{ .compatible = "qcom,pcie-ipq8074", .data = &cfg_2_3_3 },
2049 	{ .compatible = "qcom,pcie-ipq8074-gen3", .data = &cfg_2_9_0 },
2050 	{ .compatible = "qcom,pcie-ipq9574", .data = &cfg_2_9_0 },
2051 	{ .compatible = "qcom,pcie-msm8996", .data = &cfg_2_3_2 },
2052 	{ .compatible = "qcom,pcie-qcs404", .data = &cfg_2_4_0 },
2053 	{ .compatible = "qcom,pcie-sa8255p", .data = &cfg_fw_managed },
2054 	{ .compatible = "qcom,pcie-sa8540p", .data = &cfg_sc8280xp },
2055 	{ .compatible = "qcom,pcie-sa8775p", .data = &cfg_1_34_0},
2056 	{ .compatible = "qcom,pcie-sc7280", .data = &cfg_1_9_0 },
2057 	{ .compatible = "qcom,pcie-sc8180x", .data = &cfg_1_9_0 },
2058 	{ .compatible = "qcom,pcie-sc8280xp", .data = &cfg_sc8280xp },
2059 	{ .compatible = "qcom,pcie-sdm845", .data = &cfg_2_7_0 },
2060 	{ .compatible = "qcom,pcie-sdx55", .data = &cfg_1_9_0 },
2061 	{ .compatible = "qcom,pcie-sm8150", .data = &cfg_1_9_0 },
2062 	{ .compatible = "qcom,pcie-sm8250", .data = &cfg_1_9_0 },
2063 	{ .compatible = "qcom,pcie-sm8350", .data = &cfg_1_9_0 },
2064 	{ .compatible = "qcom,pcie-sm8450-pcie0", .data = &cfg_1_9_0 },
2065 	{ .compatible = "qcom,pcie-sm8450-pcie1", .data = &cfg_1_9_0 },
2066 	{ .compatible = "qcom,pcie-sm8550", .data = &cfg_1_9_0 },
2067 	{ .compatible = "qcom,pcie-x1e80100", .data = &cfg_sc8280xp },
2068 	{ }
2069 };
2070 
2071 static void qcom_fixup_class(struct pci_dev *dev)
2072 {
2073 	dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL;
2074 }
2075 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0101, qcom_fixup_class);
2076 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0104, qcom_fixup_class);
2077 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0106, qcom_fixup_class);
2078 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0107, qcom_fixup_class);
2079 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0302, qcom_fixup_class);
2080 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1000, qcom_fixup_class);
2081 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1001, qcom_fixup_class);
2082 
2083 static const struct dev_pm_ops qcom_pcie_pm_ops = {
2084 	NOIRQ_SYSTEM_SLEEP_PM_OPS(qcom_pcie_suspend_noirq, qcom_pcie_resume_noirq)
2085 };
2086 
2087 static struct platform_driver qcom_pcie_driver = {
2088 	.probe = qcom_pcie_probe,
2089 	.driver = {
2090 		.name = "qcom-pcie",
2091 		.suppress_bind_attrs = true,
2092 		.of_match_table = qcom_pcie_match,
2093 		.pm = &qcom_pcie_pm_ops,
2094 		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
2095 	},
2096 };
2097 builtin_platform_driver(qcom_pcie_driver);
2098