xref: /linux/drivers/pci/controller/dwc/pcie-qcom.c (revision 2f2c7254931f41b5736e3ba12aaa9ac1bbeeeb92)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Qualcomm PCIe root complex driver
4  *
5  * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
6  * Copyright 2015 Linaro Limited.
7  *
8  * Author: Stanimir Varbanov <svarbanov@mm-sol.com>
9  */
10 
11 #include <linux/clk.h>
12 #include <linux/crc8.h>
13 #include <linux/debugfs.h>
14 #include <linux/delay.h>
15 #include <linux/gpio/consumer.h>
16 #include <linux/interconnect.h>
17 #include <linux/interrupt.h>
18 #include <linux/io.h>
19 #include <linux/iopoll.h>
20 #include <linux/kernel.h>
21 #include <linux/limits.h>
22 #include <linux/init.h>
23 #include <linux/of.h>
24 #include <linux/of_pci.h>
25 #include <linux/pci.h>
26 #include <linux/pci-ecam.h>
27 #include <linux/pm_opp.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/platform_device.h>
30 #include <linux/phy/pcie.h>
31 #include <linux/phy/phy.h>
32 #include <linux/regulator/consumer.h>
33 #include <linux/reset.h>
34 #include <linux/slab.h>
35 #include <linux/types.h>
36 #include <linux/units.h>
37 
38 #include "../../pci.h"
39 #include "../pci-host-common.h"
40 #include "pcie-designware.h"
41 #include "pcie-qcom-common.h"
42 
43 /* PARF registers */
44 #define PARF_SYS_CTRL				0x00
45 #define PARF_PM_CTRL				0x20
46 #define PARF_PCS_DEEMPH				0x34
47 #define PARF_PCS_SWING				0x38
48 #define PARF_PHY_CTRL				0x40
49 #define PARF_PHY_REFCLK				0x4c
50 #define PARF_CONFIG_BITS			0x50
51 #define PARF_DBI_BASE_ADDR			0x168
52 #define PARF_SLV_ADDR_SPACE_SIZE		0x16c
53 #define PARF_MHI_CLOCK_RESET_CTRL		0x174
54 #define PARF_AXI_MSTR_WR_ADDR_HALT		0x178
55 #define PARF_AXI_MSTR_WR_ADDR_HALT_V2		0x1a8
56 #define PARF_Q2A_FLUSH				0x1ac
57 #define PARF_LTSSM				0x1b0
58 #define PARF_SLV_DBI_ELBI			0x1b4
59 #define PARF_INT_ALL_STATUS			0x224
60 #define PARF_INT_ALL_CLEAR			0x228
61 #define PARF_INT_ALL_MASK			0x22c
62 #define PARF_SID_OFFSET				0x234
63 #define PARF_BDF_TRANSLATE_CFG			0x24c
64 #define PARF_DBI_BASE_ADDR_V2			0x350
65 #define PARF_DBI_BASE_ADDR_V2_HI		0x354
66 #define PARF_SLV_ADDR_SPACE_SIZE_V2		0x358
67 #define PARF_SLV_ADDR_SPACE_SIZE_V2_HI		0x35c
68 #define PARF_BLOCK_SLV_AXI_WR_BASE		0x360
69 #define PARF_BLOCK_SLV_AXI_WR_BASE_HI		0x364
70 #define PARF_BLOCK_SLV_AXI_WR_LIMIT		0x368
71 #define PARF_BLOCK_SLV_AXI_WR_LIMIT_HI		0x36c
72 #define PARF_BLOCK_SLV_AXI_RD_BASE		0x370
73 #define PARF_BLOCK_SLV_AXI_RD_BASE_HI		0x374
74 #define PARF_BLOCK_SLV_AXI_RD_LIMIT		0x378
75 #define PARF_BLOCK_SLV_AXI_RD_LIMIT_HI		0x37c
76 #define PARF_ECAM_BASE				0x380
77 #define PARF_ECAM_BASE_HI			0x384
78 #define PARF_NO_SNOOP_OVERRIDE			0x3d4
79 #define PARF_ATU_BASE_ADDR			0x634
80 #define PARF_ATU_BASE_ADDR_HI			0x638
81 #define PARF_DEVICE_TYPE			0x1000
82 #define PARF_BDF_TO_SID_TABLE_N			0x2000
83 #define PARF_BDF_TO_SID_CFG			0x2c00
84 
85 /* ELBI registers */
86 #define ELBI_SYS_CTRL				0x04
87 
88 /* DBI registers */
89 #define AXI_MSTR_RESP_COMP_CTRL0		0x818
90 #define AXI_MSTR_RESP_COMP_CTRL1		0x81c
91 
92 /* MHI registers */
93 #define PARF_DEBUG_CNT_PM_LINKST_IN_L2		0xc04
94 #define PARF_DEBUG_CNT_PM_LINKST_IN_L1		0xc0c
95 #define PARF_DEBUG_CNT_PM_LINKST_IN_L0S		0xc10
96 #define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1	0xc84
97 #define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2	0xc88
98 
99 /* PARF_SYS_CTRL register fields */
100 #define MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN	BIT(29)
101 #define PCIE_ECAM_BLOCKER_EN			BIT(26)
102 #define MST_WAKEUP_EN				BIT(13)
103 #define SLV_WAKEUP_EN				BIT(12)
104 #define MSTR_ACLK_CGC_DIS			BIT(10)
105 #define SLV_ACLK_CGC_DIS			BIT(9)
106 #define CORE_CLK_CGC_DIS			BIT(6)
107 #define AUX_PWR_DET				BIT(4)
108 #define L23_CLK_RMV_DIS				BIT(2)
109 #define L1_CLK_RMV_DIS				BIT(1)
110 
111 /* PARF_PM_CTRL register fields */
112 #define REQ_NOT_ENTR_L1				BIT(5)
113 
114 /* PARF_PCS_DEEMPH register fields */
115 #define PCS_DEEMPH_TX_DEEMPH_GEN1(x)		FIELD_PREP(GENMASK(21, 16), x)
116 #define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x)	FIELD_PREP(GENMASK(13, 8), x)
117 #define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x)	FIELD_PREP(GENMASK(5, 0), x)
118 
119 /* PARF_PCS_SWING register fields */
120 #define PCS_SWING_TX_SWING_FULL(x)		FIELD_PREP(GENMASK(14, 8), x)
121 #define PCS_SWING_TX_SWING_LOW(x)		FIELD_PREP(GENMASK(6, 0), x)
122 
123 /* PARF_PHY_CTRL register fields */
124 #define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK	GENMASK(20, 16)
125 #define PHY_CTRL_PHY_TX0_TERM_OFFSET(x)		FIELD_PREP(PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK, x)
126 #define PHY_TEST_PWR_DOWN			BIT(0)
127 
128 /* PARF_PHY_REFCLK register fields */
129 #define PHY_REFCLK_SSP_EN			BIT(16)
130 #define PHY_REFCLK_USE_PAD			BIT(12)
131 
132 /* PARF_CONFIG_BITS register fields */
133 #define PHY_RX0_EQ(x)				FIELD_PREP(GENMASK(26, 24), x)
134 
135 /* PARF_SLV_ADDR_SPACE_SIZE register value */
136 #define SLV_ADDR_SPACE_SZ			0x80000000
137 
138 /* PARF_MHI_CLOCK_RESET_CTRL register fields */
139 #define AHB_CLK_EN				BIT(0)
140 #define MSTR_AXI_CLK_EN				BIT(1)
141 #define BYPASS					BIT(4)
142 
143 /* PARF_AXI_MSTR_WR_ADDR_HALT register fields */
144 #define EN					BIT(31)
145 
146 /* PARF_LTSSM register fields */
147 #define LTSSM_EN				BIT(8)
148 
149 /* PARF_SLV_DBI_ELBI */
150 #define SLV_DBI_ELBI_ADDR_BASE			GENMASK(11, 0)
151 
152 /* PARF_INT_ALL_{STATUS/CLEAR/MASK} register fields */
153 #define PARF_INT_ALL_LINK_UP			BIT(13)
154 #define PARF_INT_MSI_DEV_0_7			GENMASK(30, 23)
155 
156 /* PARF_NO_SNOOP_OVERRIDE register fields */
157 #define WR_NO_SNOOP_OVERRIDE_EN			BIT(1)
158 #define RD_NO_SNOOP_OVERRIDE_EN			BIT(3)
159 
160 /* PARF_DEVICE_TYPE register fields */
161 #define DEVICE_TYPE_RC				0x4
162 
163 /* PARF_BDF_TO_SID_CFG fields */
164 #define BDF_TO_SID_BYPASS			BIT(0)
165 
166 /* ELBI_SYS_CTRL register fields */
167 #define ELBI_SYS_CTRL_LT_ENABLE			BIT(0)
168 
169 /* AXI_MSTR_RESP_COMP_CTRL0 register fields */
170 #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K	0x4
171 #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K	0x5
172 
173 /* AXI_MSTR_RESP_COMP_CTRL1 register fields */
174 #define CFG_BRIDGE_SB_INIT			BIT(0)
175 
176 /* PCI_EXP_SLTCAP register fields */
177 #define PCIE_CAP_SLOT_POWER_LIMIT_VAL		FIELD_PREP(PCI_EXP_SLTCAP_SPLV, 250)
178 #define PCIE_CAP_SLOT_POWER_LIMIT_SCALE		FIELD_PREP(PCI_EXP_SLTCAP_SPLS, 1)
179 #define PCIE_CAP_SLOT_VAL			(PCI_EXP_SLTCAP_ABP | \
180 						PCI_EXP_SLTCAP_PCP | \
181 						PCI_EXP_SLTCAP_MRLSP | \
182 						PCI_EXP_SLTCAP_AIP | \
183 						PCI_EXP_SLTCAP_PIP | \
184 						PCI_EXP_SLTCAP_HPS | \
185 						PCI_EXP_SLTCAP_EIP | \
186 						PCIE_CAP_SLOT_POWER_LIMIT_VAL | \
187 						PCIE_CAP_SLOT_POWER_LIMIT_SCALE)
188 
189 #define PERST_DELAY_US				1000
190 
191 #define QCOM_PCIE_CRC8_POLYNOMIAL		(BIT(2) | BIT(1) | BIT(0))
192 
193 #define QCOM_PCIE_LINK_SPEED_TO_BW(speed) \
194 		Mbps_to_icc(PCIE_SPEED2MBS_ENC(pcie_link_speed[speed]))
195 
196 struct qcom_pcie_resources_1_0_0 {
197 	struct clk_bulk_data *clks;
198 	int num_clks;
199 	struct reset_control *core;
200 	struct regulator *vdda;
201 };
202 
203 #define QCOM_PCIE_2_1_0_MAX_RESETS		6
204 #define QCOM_PCIE_2_1_0_MAX_SUPPLY		3
205 struct qcom_pcie_resources_2_1_0 {
206 	struct clk_bulk_data *clks;
207 	int num_clks;
208 	struct reset_control_bulk_data resets[QCOM_PCIE_2_1_0_MAX_RESETS];
209 	int num_resets;
210 	struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY];
211 };
212 
213 #define QCOM_PCIE_2_3_2_MAX_SUPPLY		2
214 struct qcom_pcie_resources_2_3_2 {
215 	struct clk_bulk_data *clks;
216 	int num_clks;
217 	struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY];
218 };
219 
220 #define QCOM_PCIE_2_3_3_MAX_RESETS		7
221 struct qcom_pcie_resources_2_3_3 {
222 	struct clk_bulk_data *clks;
223 	int num_clks;
224 	struct reset_control_bulk_data rst[QCOM_PCIE_2_3_3_MAX_RESETS];
225 };
226 
227 #define QCOM_PCIE_2_4_0_MAX_RESETS		12
228 struct qcom_pcie_resources_2_4_0 {
229 	struct clk_bulk_data *clks;
230 	int num_clks;
231 	struct reset_control_bulk_data resets[QCOM_PCIE_2_4_0_MAX_RESETS];
232 	int num_resets;
233 };
234 
235 #define QCOM_PCIE_2_7_0_MAX_SUPPLIES		2
236 struct qcom_pcie_resources_2_7_0 {
237 	struct clk_bulk_data *clks;
238 	int num_clks;
239 	struct regulator_bulk_data supplies[QCOM_PCIE_2_7_0_MAX_SUPPLIES];
240 	struct reset_control *rst;
241 };
242 
243 struct qcom_pcie_resources_2_9_0 {
244 	struct clk_bulk_data *clks;
245 	int num_clks;
246 	struct reset_control *rst;
247 };
248 
249 union qcom_pcie_resources {
250 	struct qcom_pcie_resources_1_0_0 v1_0_0;
251 	struct qcom_pcie_resources_2_1_0 v2_1_0;
252 	struct qcom_pcie_resources_2_3_2 v2_3_2;
253 	struct qcom_pcie_resources_2_3_3 v2_3_3;
254 	struct qcom_pcie_resources_2_4_0 v2_4_0;
255 	struct qcom_pcie_resources_2_7_0 v2_7_0;
256 	struct qcom_pcie_resources_2_9_0 v2_9_0;
257 };
258 
259 struct qcom_pcie;
260 
261 struct qcom_pcie_ops {
262 	int (*get_resources)(struct qcom_pcie *pcie);
263 	int (*init)(struct qcom_pcie *pcie);
264 	int (*post_init)(struct qcom_pcie *pcie);
265 	void (*deinit)(struct qcom_pcie *pcie);
266 	void (*ltssm_enable)(struct qcom_pcie *pcie);
267 	int (*config_sid)(struct qcom_pcie *pcie);
268 };
269 
270  /**
271   * struct qcom_pcie_cfg - Per SoC config struct
272   * @ops: qcom PCIe ops structure
273   * @override_no_snoop: Override NO_SNOOP attribute in TLP to enable cache
274   * snooping
275   * @firmware_managed: Set if the Root Complex is firmware managed
276   */
277 struct qcom_pcie_cfg {
278 	const struct qcom_pcie_ops *ops;
279 	bool override_no_snoop;
280 	bool firmware_managed;
281 	bool no_l0s;
282 };
283 
284 struct qcom_pcie_port {
285 	struct list_head list;
286 	struct gpio_desc *reset;
287 	struct phy *phy;
288 };
289 
290 struct qcom_pcie {
291 	struct dw_pcie *pci;
292 	void __iomem *parf;			/* DT parf */
293 	void __iomem *mhi;
294 	union qcom_pcie_resources res;
295 	struct icc_path *icc_mem;
296 	struct icc_path *icc_cpu;
297 	const struct qcom_pcie_cfg *cfg;
298 	struct dentry *debugfs;
299 	struct list_head ports;
300 	bool suspended;
301 	bool use_pm_opp;
302 };
303 
304 #define to_qcom_pcie(x)		dev_get_drvdata((x)->dev)
305 
qcom_perst_assert(struct qcom_pcie * pcie,bool assert)306 static void qcom_perst_assert(struct qcom_pcie *pcie, bool assert)
307 {
308 	struct qcom_pcie_port *port;
309 	int val = assert ? 1 : 0;
310 
311 	list_for_each_entry(port, &pcie->ports, list)
312 		gpiod_set_value_cansleep(port->reset, val);
313 
314 	usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
315 }
316 
qcom_ep_reset_assert(struct qcom_pcie * pcie)317 static void qcom_ep_reset_assert(struct qcom_pcie *pcie)
318 {
319 	qcom_perst_assert(pcie, true);
320 }
321 
qcom_ep_reset_deassert(struct qcom_pcie * pcie)322 static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
323 {
324 	/* Ensure that PERST has been asserted for at least 100 ms */
325 	msleep(PCIE_T_PVPERL_MS);
326 	qcom_perst_assert(pcie, false);
327 }
328 
qcom_pci_config_ecam(struct dw_pcie_rp * pp)329 static void qcom_pci_config_ecam(struct dw_pcie_rp *pp)
330 {
331 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
332 	struct qcom_pcie *pcie = to_qcom_pcie(pci);
333 	u64 addr, addr_end;
334 	u32 val;
335 
336 	writel_relaxed(lower_32_bits(pci->dbi_phys_addr), pcie->parf + PARF_ECAM_BASE);
337 	writel_relaxed(upper_32_bits(pci->dbi_phys_addr), pcie->parf + PARF_ECAM_BASE_HI);
338 
339 	/*
340 	 * The only device on the root bus is a single Root Port. If we try to
341 	 * access any devices other than Device/Function 00.0 on Bus 0, the TLP
342 	 * will go outside of the controller to the PCI bus. But with CFG Shift
343 	 * Feature (ECAM) enabled in iATU, there is no guarantee that the
344 	 * response is going to be all F's. Hence, to make sure that the
345 	 * requester gets all F's response for accesses other than the Root
346 	 * Port, configure iATU to block the transactions starting from
347 	 * function 1 of the root bus to the end of the root bus (i.e., from
348 	 * dbi_base + 4KB to dbi_base + 1MB).
349 	 */
350 	addr = pci->dbi_phys_addr + SZ_4K;
351 	writel_relaxed(lower_32_bits(addr), pcie->parf + PARF_BLOCK_SLV_AXI_WR_BASE);
352 	writel_relaxed(upper_32_bits(addr), pcie->parf + PARF_BLOCK_SLV_AXI_WR_BASE_HI);
353 
354 	writel_relaxed(lower_32_bits(addr), pcie->parf + PARF_BLOCK_SLV_AXI_RD_BASE);
355 	writel_relaxed(upper_32_bits(addr), pcie->parf + PARF_BLOCK_SLV_AXI_RD_BASE_HI);
356 
357 	addr_end = pci->dbi_phys_addr + SZ_1M - 1;
358 
359 	writel_relaxed(lower_32_bits(addr_end), pcie->parf + PARF_BLOCK_SLV_AXI_WR_LIMIT);
360 	writel_relaxed(upper_32_bits(addr_end), pcie->parf + PARF_BLOCK_SLV_AXI_WR_LIMIT_HI);
361 
362 	writel_relaxed(lower_32_bits(addr_end), pcie->parf + PARF_BLOCK_SLV_AXI_RD_LIMIT);
363 	writel_relaxed(upper_32_bits(addr_end), pcie->parf + PARF_BLOCK_SLV_AXI_RD_LIMIT_HI);
364 
365 	val = readl_relaxed(pcie->parf + PARF_SYS_CTRL);
366 	val |= PCIE_ECAM_BLOCKER_EN;
367 	writel_relaxed(val, pcie->parf + PARF_SYS_CTRL);
368 }
369 
qcom_pcie_start_link(struct dw_pcie * pci)370 static int qcom_pcie_start_link(struct dw_pcie *pci)
371 {
372 	struct qcom_pcie *pcie = to_qcom_pcie(pci);
373 
374 	qcom_pcie_common_set_equalization(pci);
375 
376 	if (pcie_link_speed[pci->max_link_speed] == PCIE_SPEED_16_0GT)
377 		qcom_pcie_common_set_16gt_lane_margining(pci);
378 
379 	/* Enable Link Training state machine */
380 	if (pcie->cfg->ops->ltssm_enable)
381 		pcie->cfg->ops->ltssm_enable(pcie);
382 
383 	return 0;
384 }
385 
qcom_pcie_clear_aspm_l0s(struct dw_pcie * pci)386 static void qcom_pcie_clear_aspm_l0s(struct dw_pcie *pci)
387 {
388 	struct qcom_pcie *pcie = to_qcom_pcie(pci);
389 	u16 offset;
390 	u32 val;
391 
392 	if (!pcie->cfg->no_l0s)
393 		return;
394 
395 	offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
396 
397 	dw_pcie_dbi_ro_wr_en(pci);
398 
399 	val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP);
400 	val &= ~PCI_EXP_LNKCAP_ASPM_L0S;
401 	writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP);
402 
403 	dw_pcie_dbi_ro_wr_dis(pci);
404 }
405 
qcom_pcie_clear_hpc(struct dw_pcie * pci)406 static void qcom_pcie_clear_hpc(struct dw_pcie *pci)
407 {
408 	u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
409 	u32 val;
410 
411 	dw_pcie_dbi_ro_wr_en(pci);
412 
413 	val = readl(pci->dbi_base + offset + PCI_EXP_SLTCAP);
414 	val &= ~PCI_EXP_SLTCAP_HPC;
415 	writel(val, pci->dbi_base + offset + PCI_EXP_SLTCAP);
416 
417 	dw_pcie_dbi_ro_wr_dis(pci);
418 }
419 
qcom_pcie_configure_dbi_base(struct qcom_pcie * pcie)420 static void qcom_pcie_configure_dbi_base(struct qcom_pcie *pcie)
421 {
422 	struct dw_pcie *pci = pcie->pci;
423 
424 	if (pci->dbi_phys_addr) {
425 		/*
426 		 * PARF_DBI_BASE_ADDR register is in CPU domain and require to
427 		 * be programmed with CPU physical address.
428 		 */
429 		writel(lower_32_bits(pci->dbi_phys_addr), pcie->parf +
430 							PARF_DBI_BASE_ADDR);
431 		writel(SLV_ADDR_SPACE_SZ, pcie->parf +
432 						PARF_SLV_ADDR_SPACE_SIZE);
433 	}
434 }
435 
qcom_pcie_configure_dbi_atu_base(struct qcom_pcie * pcie)436 static void qcom_pcie_configure_dbi_atu_base(struct qcom_pcie *pcie)
437 {
438 	struct dw_pcie *pci = pcie->pci;
439 
440 	if (pci->dbi_phys_addr) {
441 		/*
442 		 * PARF_DBI_BASE_ADDR_V2 and PARF_ATU_BASE_ADDR registers are
443 		 * in CPU domain and require to be programmed with CPU
444 		 * physical addresses.
445 		 */
446 		writel(lower_32_bits(pci->dbi_phys_addr), pcie->parf +
447 							PARF_DBI_BASE_ADDR_V2);
448 		writel(upper_32_bits(pci->dbi_phys_addr), pcie->parf +
449 						PARF_DBI_BASE_ADDR_V2_HI);
450 
451 		if (pci->atu_phys_addr) {
452 			writel(lower_32_bits(pci->atu_phys_addr), pcie->parf +
453 							PARF_ATU_BASE_ADDR);
454 			writel(upper_32_bits(pci->atu_phys_addr), pcie->parf +
455 							PARF_ATU_BASE_ADDR_HI);
456 		}
457 
458 		writel(0x0, pcie->parf + PARF_SLV_ADDR_SPACE_SIZE_V2);
459 		writel(SLV_ADDR_SPACE_SZ, pcie->parf +
460 					PARF_SLV_ADDR_SPACE_SIZE_V2_HI);
461 	}
462 }
463 
qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie * pcie)464 static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie)
465 {
466 	struct dw_pcie *pci = pcie->pci;
467 	u32 val;
468 
469 	if (!pci->elbi_base) {
470 		dev_err(pci->dev, "ELBI is not present\n");
471 		return;
472 	}
473 	/* enable link training */
474 	val = readl(pci->elbi_base + ELBI_SYS_CTRL);
475 	val |= ELBI_SYS_CTRL_LT_ENABLE;
476 	writel(val, pci->elbi_base + ELBI_SYS_CTRL);
477 }
478 
qcom_pcie_get_resources_2_1_0(struct qcom_pcie * pcie)479 static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
480 {
481 	struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
482 	struct dw_pcie *pci = pcie->pci;
483 	struct device *dev = pci->dev;
484 	bool is_apq = of_device_is_compatible(dev->of_node, "qcom,pcie-apq8064");
485 	int ret;
486 
487 	res->supplies[0].supply = "vdda";
488 	res->supplies[1].supply = "vdda_phy";
489 	res->supplies[2].supply = "vdda_refclk";
490 	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
491 				      res->supplies);
492 	if (ret)
493 		return ret;
494 
495 	res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
496 	if (res->num_clks < 0) {
497 		dev_err(dev, "Failed to get clocks\n");
498 		return res->num_clks;
499 	}
500 
501 	res->resets[0].id = "pci";
502 	res->resets[1].id = "axi";
503 	res->resets[2].id = "ahb";
504 	res->resets[3].id = "por";
505 	res->resets[4].id = "phy";
506 	res->resets[5].id = "ext";
507 
508 	/* ext is optional on APQ8016 */
509 	res->num_resets = is_apq ? 5 : 6;
510 	ret = devm_reset_control_bulk_get_exclusive(dev, res->num_resets, res->resets);
511 	if (ret < 0)
512 		return ret;
513 
514 	return 0;
515 }
516 
qcom_pcie_deinit_2_1_0(struct qcom_pcie * pcie)517 static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie)
518 {
519 	struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
520 
521 	clk_bulk_disable_unprepare(res->num_clks, res->clks);
522 	reset_control_bulk_assert(res->num_resets, res->resets);
523 
524 	writel(1, pcie->parf + PARF_PHY_CTRL);
525 
526 	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
527 }
528 
qcom_pcie_init_2_1_0(struct qcom_pcie * pcie)529 static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
530 {
531 	struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
532 	struct dw_pcie *pci = pcie->pci;
533 	struct device *dev = pci->dev;
534 	int ret;
535 
536 	/* reset the PCIe interface as uboot can leave it undefined state */
537 	ret = reset_control_bulk_assert(res->num_resets, res->resets);
538 	if (ret < 0) {
539 		dev_err(dev, "cannot assert resets\n");
540 		return ret;
541 	}
542 
543 	ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
544 	if (ret < 0) {
545 		dev_err(dev, "cannot enable regulators\n");
546 		return ret;
547 	}
548 
549 	ret = reset_control_bulk_deassert(res->num_resets, res->resets);
550 	if (ret < 0) {
551 		dev_err(dev, "cannot deassert resets\n");
552 		regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
553 		return ret;
554 	}
555 
556 	return 0;
557 }
558 
qcom_pcie_post_init_2_1_0(struct qcom_pcie * pcie)559 static int qcom_pcie_post_init_2_1_0(struct qcom_pcie *pcie)
560 {
561 	struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
562 	struct dw_pcie *pci = pcie->pci;
563 	struct device *dev = pci->dev;
564 	struct device_node *node = dev->of_node;
565 	u32 val;
566 	int ret;
567 
568 	/* enable PCIe clocks and resets */
569 	val = readl(pcie->parf + PARF_PHY_CTRL);
570 	val &= ~PHY_TEST_PWR_DOWN;
571 	writel(val, pcie->parf + PARF_PHY_CTRL);
572 
573 	ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
574 	if (ret)
575 		return ret;
576 
577 	if (of_device_is_compatible(node, "qcom,pcie-ipq8064") ||
578 	    of_device_is_compatible(node, "qcom,pcie-ipq8064-v2")) {
579 		writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) |
580 			       PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(24) |
581 			       PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(34),
582 		       pcie->parf + PARF_PCS_DEEMPH);
583 		writel(PCS_SWING_TX_SWING_FULL(120) |
584 			       PCS_SWING_TX_SWING_LOW(120),
585 		       pcie->parf + PARF_PCS_SWING);
586 		writel(PHY_RX0_EQ(4), pcie->parf + PARF_CONFIG_BITS);
587 	}
588 
589 	if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) {
590 		/* set TX termination offset */
591 		val = readl(pcie->parf + PARF_PHY_CTRL);
592 		val &= ~PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK;
593 		val |= PHY_CTRL_PHY_TX0_TERM_OFFSET(7);
594 		writel(val, pcie->parf + PARF_PHY_CTRL);
595 	}
596 
597 	/* enable external reference clock */
598 	val = readl(pcie->parf + PARF_PHY_REFCLK);
599 	/* USE_PAD is required only for ipq806x */
600 	if (!of_device_is_compatible(node, "qcom,pcie-apq8064"))
601 		val &= ~PHY_REFCLK_USE_PAD;
602 	val |= PHY_REFCLK_SSP_EN;
603 	writel(val, pcie->parf + PARF_PHY_REFCLK);
604 
605 	/* wait for clock acquisition */
606 	usleep_range(1000, 1500);
607 
608 	/* Set the Max TLP size to 2K, instead of using default of 4K */
609 	writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K,
610 	       pci->dbi_base + AXI_MSTR_RESP_COMP_CTRL0);
611 	writel(CFG_BRIDGE_SB_INIT,
612 	       pci->dbi_base + AXI_MSTR_RESP_COMP_CTRL1);
613 
614 	qcom_pcie_clear_hpc(pcie->pci);
615 
616 	return 0;
617 }
618 
qcom_pcie_get_resources_1_0_0(struct qcom_pcie * pcie)619 static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie)
620 {
621 	struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
622 	struct dw_pcie *pci = pcie->pci;
623 	struct device *dev = pci->dev;
624 
625 	res->vdda = devm_regulator_get(dev, "vdda");
626 	if (IS_ERR(res->vdda))
627 		return PTR_ERR(res->vdda);
628 
629 	res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
630 	if (res->num_clks < 0) {
631 		dev_err(dev, "Failed to get clocks\n");
632 		return res->num_clks;
633 	}
634 
635 	res->core = devm_reset_control_get_exclusive(dev, "core");
636 	return PTR_ERR_OR_ZERO(res->core);
637 }
638 
qcom_pcie_deinit_1_0_0(struct qcom_pcie * pcie)639 static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie)
640 {
641 	struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
642 
643 	reset_control_assert(res->core);
644 	clk_bulk_disable_unprepare(res->num_clks, res->clks);
645 	regulator_disable(res->vdda);
646 }
647 
qcom_pcie_init_1_0_0(struct qcom_pcie * pcie)648 static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie)
649 {
650 	struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
651 	struct dw_pcie *pci = pcie->pci;
652 	struct device *dev = pci->dev;
653 	int ret;
654 
655 	ret = reset_control_deassert(res->core);
656 	if (ret) {
657 		dev_err(dev, "cannot deassert core reset\n");
658 		return ret;
659 	}
660 
661 	ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
662 	if (ret) {
663 		dev_err(dev, "cannot prepare/enable clocks\n");
664 		goto err_assert_reset;
665 	}
666 
667 	ret = regulator_enable(res->vdda);
668 	if (ret) {
669 		dev_err(dev, "cannot enable vdda regulator\n");
670 		goto err_disable_clks;
671 	}
672 
673 	return 0;
674 
675 err_disable_clks:
676 	clk_bulk_disable_unprepare(res->num_clks, res->clks);
677 err_assert_reset:
678 	reset_control_assert(res->core);
679 
680 	return ret;
681 }
682 
qcom_pcie_post_init_1_0_0(struct qcom_pcie * pcie)683 static int qcom_pcie_post_init_1_0_0(struct qcom_pcie *pcie)
684 {
685 	qcom_pcie_configure_dbi_base(pcie);
686 
687 	if (IS_ENABLED(CONFIG_PCI_MSI)) {
688 		u32 val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT);
689 
690 		val |= EN;
691 		writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT);
692 	}
693 
694 	qcom_pcie_clear_hpc(pcie->pci);
695 
696 	return 0;
697 }
698 
qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie * pcie)699 static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie)
700 {
701 	u32 val;
702 
703 	/* enable link training */
704 	val = readl(pcie->parf + PARF_LTSSM);
705 	val |= LTSSM_EN;
706 	writel(val, pcie->parf + PARF_LTSSM);
707 }
708 
qcom_pcie_get_resources_2_3_2(struct qcom_pcie * pcie)709 static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie)
710 {
711 	struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
712 	struct dw_pcie *pci = pcie->pci;
713 	struct device *dev = pci->dev;
714 	int ret;
715 
716 	res->supplies[0].supply = "vdda";
717 	res->supplies[1].supply = "vddpe-3v3";
718 	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
719 				      res->supplies);
720 	if (ret)
721 		return ret;
722 
723 	res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
724 	if (res->num_clks < 0) {
725 		dev_err(dev, "Failed to get clocks\n");
726 		return res->num_clks;
727 	}
728 
729 	return 0;
730 }
731 
qcom_pcie_deinit_2_3_2(struct qcom_pcie * pcie)732 static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie)
733 {
734 	struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
735 
736 	clk_bulk_disable_unprepare(res->num_clks, res->clks);
737 	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
738 }
739 
qcom_pcie_init_2_3_2(struct qcom_pcie * pcie)740 static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie)
741 {
742 	struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
743 	struct dw_pcie *pci = pcie->pci;
744 	struct device *dev = pci->dev;
745 	int ret;
746 
747 	ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
748 	if (ret < 0) {
749 		dev_err(dev, "cannot enable regulators\n");
750 		return ret;
751 	}
752 
753 	ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
754 	if (ret) {
755 		dev_err(dev, "cannot prepare/enable clocks\n");
756 		regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
757 		return ret;
758 	}
759 
760 	return 0;
761 }
762 
qcom_pcie_post_init_2_3_2(struct qcom_pcie * pcie)763 static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie)
764 {
765 	u32 val;
766 
767 	/* enable PCIe clocks and resets */
768 	val = readl(pcie->parf + PARF_PHY_CTRL);
769 	val &= ~PHY_TEST_PWR_DOWN;
770 	writel(val, pcie->parf + PARF_PHY_CTRL);
771 
772 	qcom_pcie_configure_dbi_base(pcie);
773 
774 	/* MAC PHY_POWERDOWN MUX DISABLE  */
775 	val = readl(pcie->parf + PARF_SYS_CTRL);
776 	val &= ~MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN;
777 	writel(val, pcie->parf + PARF_SYS_CTRL);
778 
779 	val = readl(pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
780 	val |= BYPASS;
781 	writel(val, pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
782 
783 	val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2);
784 	val |= EN;
785 	writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2);
786 
787 	qcom_pcie_clear_hpc(pcie->pci);
788 
789 	return 0;
790 }
791 
qcom_pcie_get_resources_2_4_0(struct qcom_pcie * pcie)792 static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie)
793 {
794 	struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
795 	struct dw_pcie *pci = pcie->pci;
796 	struct device *dev = pci->dev;
797 	bool is_ipq = of_device_is_compatible(dev->of_node, "qcom,pcie-ipq4019");
798 	int ret;
799 
800 	res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
801 	if (res->num_clks < 0) {
802 		dev_err(dev, "Failed to get clocks\n");
803 		return res->num_clks;
804 	}
805 
806 	res->resets[0].id = "axi_m";
807 	res->resets[1].id = "axi_s";
808 	res->resets[2].id = "axi_m_sticky";
809 	res->resets[3].id = "pipe_sticky";
810 	res->resets[4].id = "pwr";
811 	res->resets[5].id = "ahb";
812 	res->resets[6].id = "pipe";
813 	res->resets[7].id = "axi_m_vmid";
814 	res->resets[8].id = "axi_s_xpu";
815 	res->resets[9].id = "parf";
816 	res->resets[10].id = "phy";
817 	res->resets[11].id = "phy_ahb";
818 
819 	res->num_resets = is_ipq ? 12 : 6;
820 
821 	ret = devm_reset_control_bulk_get_exclusive(dev, res->num_resets, res->resets);
822 	if (ret < 0)
823 		return ret;
824 
825 	return 0;
826 }
827 
qcom_pcie_deinit_2_4_0(struct qcom_pcie * pcie)828 static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie)
829 {
830 	struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
831 
832 	reset_control_bulk_assert(res->num_resets, res->resets);
833 	clk_bulk_disable_unprepare(res->num_clks, res->clks);
834 }
835 
qcom_pcie_init_2_4_0(struct qcom_pcie * pcie)836 static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie)
837 {
838 	struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
839 	struct dw_pcie *pci = pcie->pci;
840 	struct device *dev = pci->dev;
841 	int ret;
842 
843 	ret = reset_control_bulk_assert(res->num_resets, res->resets);
844 	if (ret < 0) {
845 		dev_err(dev, "cannot assert resets\n");
846 		return ret;
847 	}
848 
849 	usleep_range(10000, 12000);
850 
851 	ret = reset_control_bulk_deassert(res->num_resets, res->resets);
852 	if (ret < 0) {
853 		dev_err(dev, "cannot deassert resets\n");
854 		return ret;
855 	}
856 
857 	usleep_range(10000, 12000);
858 
859 	ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
860 	if (ret) {
861 		reset_control_bulk_assert(res->num_resets, res->resets);
862 		return ret;
863 	}
864 
865 	return 0;
866 }
867 
qcom_pcie_get_resources_2_3_3(struct qcom_pcie * pcie)868 static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie)
869 {
870 	struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
871 	struct dw_pcie *pci = pcie->pci;
872 	struct device *dev = pci->dev;
873 	int ret;
874 
875 	res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
876 	if (res->num_clks < 0) {
877 		dev_err(dev, "Failed to get clocks\n");
878 		return res->num_clks;
879 	}
880 
881 	res->rst[0].id = "axi_m";
882 	res->rst[1].id = "axi_s";
883 	res->rst[2].id = "pipe";
884 	res->rst[3].id = "axi_m_sticky";
885 	res->rst[4].id = "sticky";
886 	res->rst[5].id = "ahb";
887 	res->rst[6].id = "sleep";
888 
889 	ret = devm_reset_control_bulk_get_exclusive(dev, ARRAY_SIZE(res->rst), res->rst);
890 	if (ret < 0)
891 		return ret;
892 
893 	return 0;
894 }
895 
qcom_pcie_deinit_2_3_3(struct qcom_pcie * pcie)896 static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie)
897 {
898 	struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
899 
900 	clk_bulk_disable_unprepare(res->num_clks, res->clks);
901 }
902 
qcom_pcie_init_2_3_3(struct qcom_pcie * pcie)903 static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
904 {
905 	struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
906 	struct dw_pcie *pci = pcie->pci;
907 	struct device *dev = pci->dev;
908 	int ret;
909 
910 	ret = reset_control_bulk_assert(ARRAY_SIZE(res->rst), res->rst);
911 	if (ret < 0) {
912 		dev_err(dev, "cannot assert resets\n");
913 		return ret;
914 	}
915 
916 	usleep_range(2000, 2500);
917 
918 	ret = reset_control_bulk_deassert(ARRAY_SIZE(res->rst), res->rst);
919 	if (ret < 0) {
920 		dev_err(dev, "cannot deassert resets\n");
921 		return ret;
922 	}
923 
924 	/*
925 	 * Don't have a way to see if the reset has completed.
926 	 * Wait for some time.
927 	 */
928 	usleep_range(2000, 2500);
929 
930 	ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
931 	if (ret) {
932 		dev_err(dev, "cannot prepare/enable clocks\n");
933 		goto err_assert_resets;
934 	}
935 
936 	return 0;
937 
938 err_assert_resets:
939 	/*
940 	 * Not checking for failure, will anyway return
941 	 * the original failure in 'ret'.
942 	 */
943 	reset_control_bulk_assert(ARRAY_SIZE(res->rst), res->rst);
944 
945 	return ret;
946 }
947 
qcom_pcie_post_init_2_3_3(struct qcom_pcie * pcie)948 static int qcom_pcie_post_init_2_3_3(struct qcom_pcie *pcie)
949 {
950 	struct dw_pcie *pci = pcie->pci;
951 	u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
952 	u32 val;
953 
954 	val = readl(pcie->parf + PARF_PHY_CTRL);
955 	val &= ~PHY_TEST_PWR_DOWN;
956 	writel(val, pcie->parf + PARF_PHY_CTRL);
957 
958 	qcom_pcie_configure_dbi_atu_base(pcie);
959 
960 	writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS
961 		| SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |
962 		AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS,
963 		pcie->parf + PARF_SYS_CTRL);
964 	writel(0, pcie->parf + PARF_Q2A_FLUSH);
965 
966 	writel(PCI_COMMAND_MASTER, pci->dbi_base + PCI_COMMAND);
967 
968 	dw_pcie_dbi_ro_wr_en(pci);
969 
970 	writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP);
971 
972 	val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP);
973 	val &= ~PCI_EXP_LNKCAP_ASPMS;
974 	writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP);
975 
976 	writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset +
977 		PCI_EXP_DEVCTL2);
978 
979 	dw_pcie_dbi_ro_wr_dis(pci);
980 
981 	return 0;
982 }
983 
qcom_pcie_get_resources_2_7_0(struct qcom_pcie * pcie)984 static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)
985 {
986 	struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
987 	struct dw_pcie *pci = pcie->pci;
988 	struct device *dev = pci->dev;
989 	int ret;
990 
991 	res->rst = devm_reset_control_array_get_exclusive(dev);
992 	if (IS_ERR(res->rst))
993 		return PTR_ERR(res->rst);
994 
995 	res->supplies[0].supply = "vdda";
996 	res->supplies[1].supply = "vddpe-3v3";
997 	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
998 				      res->supplies);
999 	if (ret)
1000 		return ret;
1001 
1002 	res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
1003 	if (res->num_clks < 0) {
1004 		dev_err(dev, "Failed to get clocks\n");
1005 		return res->num_clks;
1006 	}
1007 
1008 	return 0;
1009 }
1010 
qcom_pcie_init_2_7_0(struct qcom_pcie * pcie)1011 static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)
1012 {
1013 	struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
1014 	struct dw_pcie *pci = pcie->pci;
1015 	struct device *dev = pci->dev;
1016 	u32 val;
1017 	int ret;
1018 
1019 	ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
1020 	if (ret < 0) {
1021 		dev_err(dev, "cannot enable regulators\n");
1022 		return ret;
1023 	}
1024 
1025 	ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
1026 	if (ret < 0)
1027 		goto err_disable_regulators;
1028 
1029 	ret = reset_control_assert(res->rst);
1030 	if (ret) {
1031 		dev_err(dev, "reset assert failed (%d)\n", ret);
1032 		goto err_disable_clocks;
1033 	}
1034 
1035 	usleep_range(1000, 1500);
1036 
1037 	ret = reset_control_deassert(res->rst);
1038 	if (ret) {
1039 		dev_err(dev, "reset deassert failed (%d)\n", ret);
1040 		goto err_disable_clocks;
1041 	}
1042 
1043 	/* Wait for reset to complete, required on SM8450 */
1044 	usleep_range(1000, 1500);
1045 
1046 	/* configure PCIe to RC mode */
1047 	writel(DEVICE_TYPE_RC, pcie->parf + PARF_DEVICE_TYPE);
1048 
1049 	/* enable PCIe clocks and resets */
1050 	val = readl(pcie->parf + PARF_PHY_CTRL);
1051 	val &= ~PHY_TEST_PWR_DOWN;
1052 	writel(val, pcie->parf + PARF_PHY_CTRL);
1053 
1054 	qcom_pcie_configure_dbi_atu_base(pcie);
1055 
1056 	/* MAC PHY_POWERDOWN MUX DISABLE  */
1057 	val = readl(pcie->parf + PARF_SYS_CTRL);
1058 	val &= ~MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN;
1059 	writel(val, pcie->parf + PARF_SYS_CTRL);
1060 
1061 	val = readl(pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
1062 	val |= BYPASS;
1063 	writel(val, pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
1064 
1065 	/* Enable L1 and L1SS */
1066 	val = readl(pcie->parf + PARF_PM_CTRL);
1067 	val &= ~REQ_NOT_ENTR_L1;
1068 	writel(val, pcie->parf + PARF_PM_CTRL);
1069 
1070 	val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2);
1071 	val |= EN;
1072 	writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2);
1073 
1074 	return 0;
1075 err_disable_clocks:
1076 	clk_bulk_disable_unprepare(res->num_clks, res->clks);
1077 err_disable_regulators:
1078 	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
1079 
1080 	return ret;
1081 }
1082 
qcom_pcie_post_init_2_7_0(struct qcom_pcie * pcie)1083 static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie)
1084 {
1085 	const struct qcom_pcie_cfg *pcie_cfg = pcie->cfg;
1086 
1087 	if (pcie_cfg->override_no_snoop)
1088 		writel(WR_NO_SNOOP_OVERRIDE_EN | RD_NO_SNOOP_OVERRIDE_EN,
1089 				pcie->parf + PARF_NO_SNOOP_OVERRIDE);
1090 
1091 	qcom_pcie_clear_aspm_l0s(pcie->pci);
1092 	qcom_pcie_clear_hpc(pcie->pci);
1093 
1094 	return 0;
1095 }
1096 
qcom_pcie_deinit_2_7_0(struct qcom_pcie * pcie)1097 static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie)
1098 {
1099 	struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
1100 
1101 	clk_bulk_disable_unprepare(res->num_clks, res->clks);
1102 
1103 	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
1104 }
1105 
qcom_pcie_config_sid_1_9_0(struct qcom_pcie * pcie)1106 static int qcom_pcie_config_sid_1_9_0(struct qcom_pcie *pcie)
1107 {
1108 	/* iommu map structure */
1109 	struct {
1110 		u32 bdf;
1111 		u32 phandle;
1112 		u32 smmu_sid;
1113 		u32 smmu_sid_len;
1114 	} *map;
1115 	void __iomem *bdf_to_sid_base = pcie->parf + PARF_BDF_TO_SID_TABLE_N;
1116 	struct device *dev = pcie->pci->dev;
1117 	u8 qcom_pcie_crc8_table[CRC8_TABLE_SIZE];
1118 	int i, nr_map, size = 0;
1119 	u32 smmu_sid_base;
1120 	u32 val;
1121 
1122 	of_get_property(dev->of_node, "iommu-map", &size);
1123 	if (!size)
1124 		return 0;
1125 
1126 	/* Enable BDF to SID translation by disabling bypass mode (default) */
1127 	val = readl(pcie->parf + PARF_BDF_TO_SID_CFG);
1128 	val &= ~BDF_TO_SID_BYPASS;
1129 	writel(val, pcie->parf + PARF_BDF_TO_SID_CFG);
1130 
1131 	map = kzalloc(size, GFP_KERNEL);
1132 	if (!map)
1133 		return -ENOMEM;
1134 
1135 	of_property_read_u32_array(dev->of_node, "iommu-map", (u32 *)map,
1136 				   size / sizeof(u32));
1137 
1138 	nr_map = size / (sizeof(*map));
1139 
1140 	crc8_populate_msb(qcom_pcie_crc8_table, QCOM_PCIE_CRC8_POLYNOMIAL);
1141 
1142 	/* Registers need to be zero out first */
1143 	memset_io(bdf_to_sid_base, 0, CRC8_TABLE_SIZE * sizeof(u32));
1144 
1145 	/* Extract the SMMU SID base from the first entry of iommu-map */
1146 	smmu_sid_base = map[0].smmu_sid;
1147 
1148 	/* Look for an available entry to hold the mapping */
1149 	for (i = 0; i < nr_map; i++) {
1150 		__be16 bdf_be = cpu_to_be16(map[i].bdf);
1151 		u32 val;
1152 		u8 hash;
1153 
1154 		hash = crc8(qcom_pcie_crc8_table, (u8 *)&bdf_be, sizeof(bdf_be), 0);
1155 
1156 		val = readl(bdf_to_sid_base + hash * sizeof(u32));
1157 
1158 		/* If the register is already populated, look for next available entry */
1159 		while (val) {
1160 			u8 current_hash = hash++;
1161 			u8 next_mask = 0xff;
1162 
1163 			/* If NEXT field is NULL then update it with next hash */
1164 			if (!(val & next_mask)) {
1165 				val |= (u32)hash;
1166 				writel(val, bdf_to_sid_base + current_hash * sizeof(u32));
1167 			}
1168 
1169 			val = readl(bdf_to_sid_base + hash * sizeof(u32));
1170 		}
1171 
1172 		/* BDF [31:16] | SID [15:8] | NEXT [7:0] */
1173 		val = map[i].bdf << 16 | (map[i].smmu_sid - smmu_sid_base) << 8 | 0;
1174 		writel(val, bdf_to_sid_base + hash * sizeof(u32));
1175 	}
1176 
1177 	kfree(map);
1178 
1179 	return 0;
1180 }
1181 
qcom_pcie_get_resources_2_9_0(struct qcom_pcie * pcie)1182 static int qcom_pcie_get_resources_2_9_0(struct qcom_pcie *pcie)
1183 {
1184 	struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
1185 	struct dw_pcie *pci = pcie->pci;
1186 	struct device *dev = pci->dev;
1187 
1188 	res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
1189 	if (res->num_clks < 0) {
1190 		dev_err(dev, "Failed to get clocks\n");
1191 		return res->num_clks;
1192 	}
1193 
1194 	res->rst = devm_reset_control_array_get_exclusive(dev);
1195 	if (IS_ERR(res->rst))
1196 		return PTR_ERR(res->rst);
1197 
1198 	return 0;
1199 }
1200 
qcom_pcie_deinit_2_9_0(struct qcom_pcie * pcie)1201 static void qcom_pcie_deinit_2_9_0(struct qcom_pcie *pcie)
1202 {
1203 	struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
1204 
1205 	clk_bulk_disable_unprepare(res->num_clks, res->clks);
1206 }
1207 
qcom_pcie_init_2_9_0(struct qcom_pcie * pcie)1208 static int qcom_pcie_init_2_9_0(struct qcom_pcie *pcie)
1209 {
1210 	struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
1211 	struct device *dev = pcie->pci->dev;
1212 	int ret;
1213 
1214 	ret = reset_control_assert(res->rst);
1215 	if (ret) {
1216 		dev_err(dev, "reset assert failed (%d)\n", ret);
1217 		return ret;
1218 	}
1219 
1220 	/*
1221 	 * Delay periods before and after reset deassert are working values
1222 	 * from downstream Codeaurora kernel
1223 	 */
1224 	usleep_range(2000, 2500);
1225 
1226 	ret = reset_control_deassert(res->rst);
1227 	if (ret) {
1228 		dev_err(dev, "reset deassert failed (%d)\n", ret);
1229 		return ret;
1230 	}
1231 
1232 	usleep_range(2000, 2500);
1233 
1234 	return clk_bulk_prepare_enable(res->num_clks, res->clks);
1235 }
1236 
qcom_pcie_post_init_2_9_0(struct qcom_pcie * pcie)1237 static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie)
1238 {
1239 	struct dw_pcie *pci = pcie->pci;
1240 	u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
1241 	u32 val;
1242 	int i;
1243 
1244 	val = readl(pcie->parf + PARF_PHY_CTRL);
1245 	val &= ~PHY_TEST_PWR_DOWN;
1246 	writel(val, pcie->parf + PARF_PHY_CTRL);
1247 
1248 	qcom_pcie_configure_dbi_atu_base(pcie);
1249 
1250 	writel(DEVICE_TYPE_RC, pcie->parf + PARF_DEVICE_TYPE);
1251 	writel(BYPASS | MSTR_AXI_CLK_EN | AHB_CLK_EN,
1252 		pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
1253 	writel(GEN3_RELATED_OFF_RXEQ_RGRDLESS_RXTS |
1254 		GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL,
1255 		pci->dbi_base + GEN3_RELATED_OFF);
1256 
1257 	writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS |
1258 		SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |
1259 		AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS,
1260 		pcie->parf + PARF_SYS_CTRL);
1261 
1262 	writel(0, pcie->parf + PARF_Q2A_FLUSH);
1263 
1264 	dw_pcie_dbi_ro_wr_en(pci);
1265 
1266 	writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP);
1267 
1268 	val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP);
1269 	val &= ~PCI_EXP_LNKCAP_ASPMS;
1270 	writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP);
1271 
1272 	writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset +
1273 			PCI_EXP_DEVCTL2);
1274 
1275 	dw_pcie_dbi_ro_wr_dis(pci);
1276 
1277 	for (i = 0; i < 256; i++)
1278 		writel(0, pcie->parf + PARF_BDF_TO_SID_TABLE_N + (4 * i));
1279 
1280 	return 0;
1281 }
1282 
qcom_pcie_link_up(struct dw_pcie * pci)1283 static bool qcom_pcie_link_up(struct dw_pcie *pci)
1284 {
1285 	u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
1286 	u16 val = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA);
1287 
1288 	return val & PCI_EXP_LNKSTA_DLLLA;
1289 }
1290 
qcom_pcie_phy_power_off(struct qcom_pcie * pcie)1291 static void qcom_pcie_phy_power_off(struct qcom_pcie *pcie)
1292 {
1293 	struct qcom_pcie_port *port;
1294 
1295 	list_for_each_entry(port, &pcie->ports, list)
1296 		phy_power_off(port->phy);
1297 }
1298 
qcom_pcie_phy_power_on(struct qcom_pcie * pcie)1299 static int qcom_pcie_phy_power_on(struct qcom_pcie *pcie)
1300 {
1301 	struct qcom_pcie_port *port;
1302 	int ret;
1303 
1304 	list_for_each_entry(port, &pcie->ports, list) {
1305 		ret = phy_set_mode_ext(port->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_RC);
1306 		if (ret)
1307 			return ret;
1308 
1309 		ret = phy_power_on(port->phy);
1310 		if (ret) {
1311 			qcom_pcie_phy_power_off(pcie);
1312 			return ret;
1313 		}
1314 	}
1315 
1316 	return 0;
1317 }
1318 
qcom_pcie_host_init(struct dw_pcie_rp * pp)1319 static int qcom_pcie_host_init(struct dw_pcie_rp *pp)
1320 {
1321 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
1322 	struct qcom_pcie *pcie = to_qcom_pcie(pci);
1323 	u16 offset;
1324 	int ret;
1325 
1326 	qcom_ep_reset_assert(pcie);
1327 
1328 	ret = pcie->cfg->ops->init(pcie);
1329 	if (ret)
1330 		return ret;
1331 
1332 	if (pp->ecam_enabled) {
1333 		/*
1334 		 * Override ELBI when ECAM is enabled, as when ECAM is enabled,
1335 		 * ELBI moves under the 'config' space.
1336 		 */
1337 		offset = FIELD_GET(SLV_DBI_ELBI_ADDR_BASE, readl(pcie->parf + PARF_SLV_DBI_ELBI));
1338 		pci->elbi_base = pci->dbi_base + offset;
1339 
1340 		qcom_pci_config_ecam(pp);
1341 	}
1342 
1343 	ret = qcom_pcie_phy_power_on(pcie);
1344 	if (ret)
1345 		goto err_deinit;
1346 
1347 	if (pcie->cfg->ops->post_init) {
1348 		ret = pcie->cfg->ops->post_init(pcie);
1349 		if (ret)
1350 			goto err_disable_phy;
1351 	}
1352 
1353 	qcom_ep_reset_deassert(pcie);
1354 
1355 	if (pcie->cfg->ops->config_sid) {
1356 		ret = pcie->cfg->ops->config_sid(pcie);
1357 		if (ret)
1358 			goto err_assert_reset;
1359 	}
1360 
1361 	return 0;
1362 
1363 err_assert_reset:
1364 	qcom_ep_reset_assert(pcie);
1365 err_disable_phy:
1366 	qcom_pcie_phy_power_off(pcie);
1367 err_deinit:
1368 	pcie->cfg->ops->deinit(pcie);
1369 
1370 	return ret;
1371 }
1372 
qcom_pcie_host_deinit(struct dw_pcie_rp * pp)1373 static void qcom_pcie_host_deinit(struct dw_pcie_rp *pp)
1374 {
1375 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
1376 	struct qcom_pcie *pcie = to_qcom_pcie(pci);
1377 
1378 	qcom_ep_reset_assert(pcie);
1379 	qcom_pcie_phy_power_off(pcie);
1380 	pcie->cfg->ops->deinit(pcie);
1381 }
1382 
1383 static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {
1384 	.init		= qcom_pcie_host_init,
1385 	.deinit		= qcom_pcie_host_deinit,
1386 };
1387 
1388 /* Qcom IP rev.: 2.1.0	Synopsys IP rev.: 4.01a */
1389 static const struct qcom_pcie_ops ops_2_1_0 = {
1390 	.get_resources = qcom_pcie_get_resources_2_1_0,
1391 	.init = qcom_pcie_init_2_1_0,
1392 	.post_init = qcom_pcie_post_init_2_1_0,
1393 	.deinit = qcom_pcie_deinit_2_1_0,
1394 	.ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
1395 };
1396 
1397 /* Qcom IP rev.: 1.0.0	Synopsys IP rev.: 4.11a */
1398 static const struct qcom_pcie_ops ops_1_0_0 = {
1399 	.get_resources = qcom_pcie_get_resources_1_0_0,
1400 	.init = qcom_pcie_init_1_0_0,
1401 	.post_init = qcom_pcie_post_init_1_0_0,
1402 	.deinit = qcom_pcie_deinit_1_0_0,
1403 	.ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
1404 };
1405 
1406 /* Qcom IP rev.: 2.3.2	Synopsys IP rev.: 4.21a */
1407 static const struct qcom_pcie_ops ops_2_3_2 = {
1408 	.get_resources = qcom_pcie_get_resources_2_3_2,
1409 	.init = qcom_pcie_init_2_3_2,
1410 	.post_init = qcom_pcie_post_init_2_3_2,
1411 	.deinit = qcom_pcie_deinit_2_3_2,
1412 	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1413 };
1414 
1415 /* Qcom IP rev.: 2.4.0	Synopsys IP rev.: 4.20a */
1416 static const struct qcom_pcie_ops ops_2_4_0 = {
1417 	.get_resources = qcom_pcie_get_resources_2_4_0,
1418 	.init = qcom_pcie_init_2_4_0,
1419 	.post_init = qcom_pcie_post_init_2_3_2,
1420 	.deinit = qcom_pcie_deinit_2_4_0,
1421 	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1422 };
1423 
1424 /* Qcom IP rev.: 2.3.3	Synopsys IP rev.: 4.30a */
1425 static const struct qcom_pcie_ops ops_2_3_3 = {
1426 	.get_resources = qcom_pcie_get_resources_2_3_3,
1427 	.init = qcom_pcie_init_2_3_3,
1428 	.post_init = qcom_pcie_post_init_2_3_3,
1429 	.deinit = qcom_pcie_deinit_2_3_3,
1430 	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1431 };
1432 
1433 /* Qcom IP rev.: 2.7.0	Synopsys IP rev.: 4.30a */
1434 static const struct qcom_pcie_ops ops_2_7_0 = {
1435 	.get_resources = qcom_pcie_get_resources_2_7_0,
1436 	.init = qcom_pcie_init_2_7_0,
1437 	.post_init = qcom_pcie_post_init_2_7_0,
1438 	.deinit = qcom_pcie_deinit_2_7_0,
1439 	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1440 };
1441 
1442 /* Qcom IP rev.: 1.9.0 */
1443 static const struct qcom_pcie_ops ops_1_9_0 = {
1444 	.get_resources = qcom_pcie_get_resources_2_7_0,
1445 	.init = qcom_pcie_init_2_7_0,
1446 	.post_init = qcom_pcie_post_init_2_7_0,
1447 	.deinit = qcom_pcie_deinit_2_7_0,
1448 	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1449 	.config_sid = qcom_pcie_config_sid_1_9_0,
1450 };
1451 
1452 /* Qcom IP rev.: 1.21.0  Synopsys IP rev.: 5.60a */
1453 static const struct qcom_pcie_ops ops_1_21_0 = {
1454 	.get_resources = qcom_pcie_get_resources_2_7_0,
1455 	.init = qcom_pcie_init_2_7_0,
1456 	.post_init = qcom_pcie_post_init_2_7_0,
1457 	.deinit = qcom_pcie_deinit_2_7_0,
1458 	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1459 };
1460 
1461 /* Qcom IP rev.: 2.9.0  Synopsys IP rev.: 5.00a */
1462 static const struct qcom_pcie_ops ops_2_9_0 = {
1463 	.get_resources = qcom_pcie_get_resources_2_9_0,
1464 	.init = qcom_pcie_init_2_9_0,
1465 	.post_init = qcom_pcie_post_init_2_9_0,
1466 	.deinit = qcom_pcie_deinit_2_9_0,
1467 	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1468 };
1469 
1470 static const struct qcom_pcie_cfg cfg_1_0_0 = {
1471 	.ops = &ops_1_0_0,
1472 };
1473 
1474 static const struct qcom_pcie_cfg cfg_1_9_0 = {
1475 	.ops = &ops_1_9_0,
1476 };
1477 
1478 static const struct qcom_pcie_cfg cfg_1_34_0 = {
1479 	.ops = &ops_1_9_0,
1480 	.override_no_snoop = true,
1481 };
1482 
1483 static const struct qcom_pcie_cfg cfg_2_1_0 = {
1484 	.ops = &ops_2_1_0,
1485 };
1486 
1487 static const struct qcom_pcie_cfg cfg_2_3_2 = {
1488 	.ops = &ops_2_3_2,
1489 };
1490 
1491 static const struct qcom_pcie_cfg cfg_2_3_3 = {
1492 	.ops = &ops_2_3_3,
1493 };
1494 
1495 static const struct qcom_pcie_cfg cfg_2_4_0 = {
1496 	.ops = &ops_2_4_0,
1497 };
1498 
1499 static const struct qcom_pcie_cfg cfg_2_7_0 = {
1500 	.ops = &ops_2_7_0,
1501 };
1502 
1503 static const struct qcom_pcie_cfg cfg_2_9_0 = {
1504 	.ops = &ops_2_9_0,
1505 };
1506 
1507 static const struct qcom_pcie_cfg cfg_sc8280xp = {
1508 	.ops = &ops_1_21_0,
1509 	.no_l0s = true,
1510 };
1511 
1512 static const struct qcom_pcie_cfg cfg_fw_managed = {
1513 	.firmware_managed = true,
1514 };
1515 
1516 static const struct dw_pcie_ops dw_pcie_ops = {
1517 	.link_up = qcom_pcie_link_up,
1518 	.start_link = qcom_pcie_start_link,
1519 };
1520 
qcom_pcie_icc_init(struct qcom_pcie * pcie)1521 static int qcom_pcie_icc_init(struct qcom_pcie *pcie)
1522 {
1523 	struct dw_pcie *pci = pcie->pci;
1524 	int ret;
1525 
1526 	pcie->icc_mem = devm_of_icc_get(pci->dev, "pcie-mem");
1527 	if (IS_ERR(pcie->icc_mem))
1528 		return PTR_ERR(pcie->icc_mem);
1529 
1530 	pcie->icc_cpu = devm_of_icc_get(pci->dev, "cpu-pcie");
1531 	if (IS_ERR(pcie->icc_cpu))
1532 		return PTR_ERR(pcie->icc_cpu);
1533 	/*
1534 	 * Some Qualcomm platforms require interconnect bandwidth constraints
1535 	 * to be set before enabling interconnect clocks.
1536 	 *
1537 	 * Set an initial peak bandwidth corresponding to single-lane Gen 1
1538 	 * for the pcie-mem path.
1539 	 */
1540 	ret = icc_set_bw(pcie->icc_mem, 0, QCOM_PCIE_LINK_SPEED_TO_BW(1));
1541 	if (ret) {
1542 		dev_err(pci->dev, "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n",
1543 			ret);
1544 		return ret;
1545 	}
1546 
1547 	/*
1548 	 * Since the CPU-PCIe path is only used for activities like register
1549 	 * access of the host controller and endpoint Config/BAR space access,
1550 	 * HW team has recommended to use a minimal bandwidth of 1KBps just to
1551 	 * keep the path active.
1552 	 */
1553 	ret = icc_set_bw(pcie->icc_cpu, 0, kBps_to_icc(1));
1554 	if (ret) {
1555 		dev_err(pci->dev, "Failed to set bandwidth for CPU-PCIe interconnect path: %d\n",
1556 			ret);
1557 		icc_set_bw(pcie->icc_mem, 0, 0);
1558 		return ret;
1559 	}
1560 
1561 	return 0;
1562 }
1563 
qcom_pcie_icc_opp_update(struct qcom_pcie * pcie)1564 static void qcom_pcie_icc_opp_update(struct qcom_pcie *pcie)
1565 {
1566 	u32 offset, status, width, speed;
1567 	struct dw_pcie *pci = pcie->pci;
1568 	unsigned long freq_kbps;
1569 	struct dev_pm_opp *opp;
1570 	int ret, freq_mbps;
1571 
1572 	offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
1573 	status = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA);
1574 
1575 	/* Only update constraints if link is up. */
1576 	if (!(status & PCI_EXP_LNKSTA_DLLLA))
1577 		return;
1578 
1579 	speed = FIELD_GET(PCI_EXP_LNKSTA_CLS, status);
1580 	width = FIELD_GET(PCI_EXP_LNKSTA_NLW, status);
1581 
1582 	if (pcie->icc_mem) {
1583 		ret = icc_set_bw(pcie->icc_mem, 0,
1584 				 width * QCOM_PCIE_LINK_SPEED_TO_BW(speed));
1585 		if (ret) {
1586 			dev_err(pci->dev, "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n",
1587 				ret);
1588 		}
1589 	} else if (pcie->use_pm_opp) {
1590 		freq_mbps = pcie_dev_speed_mbps(pcie_link_speed[speed]);
1591 		if (freq_mbps < 0)
1592 			return;
1593 
1594 		freq_kbps = freq_mbps * KILO;
1595 		opp = dev_pm_opp_find_freq_exact(pci->dev, freq_kbps * width,
1596 						 true);
1597 		if (!IS_ERR(opp)) {
1598 			ret = dev_pm_opp_set_opp(pci->dev, opp);
1599 			if (ret)
1600 				dev_err(pci->dev, "Failed to set OPP for freq (%lu): %d\n",
1601 					freq_kbps * width, ret);
1602 			dev_pm_opp_put(opp);
1603 		}
1604 	}
1605 }
1606 
qcom_pcie_link_transition_count(struct seq_file * s,void * data)1607 static int qcom_pcie_link_transition_count(struct seq_file *s, void *data)
1608 {
1609 	struct qcom_pcie *pcie = (struct qcom_pcie *)dev_get_drvdata(s->private);
1610 
1611 	seq_printf(s, "L0s transition count: %u\n",
1612 		   readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L0S));
1613 
1614 	seq_printf(s, "L1 transition count: %u\n",
1615 		   readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L1));
1616 
1617 	seq_printf(s, "L1.1 transition count: %u\n",
1618 		   readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1));
1619 
1620 	seq_printf(s, "L1.2 transition count: %u\n",
1621 		   readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2));
1622 
1623 	seq_printf(s, "L2 transition count: %u\n",
1624 		   readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L2));
1625 
1626 	return 0;
1627 }
1628 
qcom_pcie_init_debugfs(struct qcom_pcie * pcie)1629 static void qcom_pcie_init_debugfs(struct qcom_pcie *pcie)
1630 {
1631 	struct dw_pcie *pci = pcie->pci;
1632 	struct device *dev = pci->dev;
1633 	char *name;
1634 
1635 	name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
1636 	if (!name)
1637 		return;
1638 
1639 	pcie->debugfs = debugfs_create_dir(name, NULL);
1640 	debugfs_create_devm_seqfile(dev, "link_transition_count", pcie->debugfs,
1641 				    qcom_pcie_link_transition_count);
1642 }
1643 
qcom_pcie_global_irq_thread(int irq,void * data)1644 static irqreturn_t qcom_pcie_global_irq_thread(int irq, void *data)
1645 {
1646 	struct qcom_pcie *pcie = data;
1647 	struct dw_pcie_rp *pp = &pcie->pci->pp;
1648 	struct device *dev = pcie->pci->dev;
1649 	u32 status = readl_relaxed(pcie->parf + PARF_INT_ALL_STATUS);
1650 
1651 	writel_relaxed(status, pcie->parf + PARF_INT_ALL_CLEAR);
1652 
1653 	if (FIELD_GET(PARF_INT_ALL_LINK_UP, status)) {
1654 		msleep(PCIE_RESET_CONFIG_WAIT_MS);
1655 		dev_dbg(dev, "Received Link up event. Starting enumeration!\n");
1656 		/* Rescan the bus to enumerate endpoint devices */
1657 		pci_lock_rescan_remove();
1658 		pci_rescan_bus(pp->bridge->bus);
1659 		pci_unlock_rescan_remove();
1660 
1661 		qcom_pcie_icc_opp_update(pcie);
1662 	} else {
1663 		dev_WARN_ONCE(dev, 1, "Received unknown event. INT_STATUS: 0x%08x\n",
1664 			      status);
1665 	}
1666 
1667 	return IRQ_HANDLED;
1668 }
1669 
qcom_pci_free_msi(void * ptr)1670 static void qcom_pci_free_msi(void *ptr)
1671 {
1672 	struct dw_pcie_rp *pp = (struct dw_pcie_rp *)ptr;
1673 
1674 	if (pp && pp->has_msi_ctrl)
1675 		dw_pcie_free_msi(pp);
1676 }
1677 
qcom_pcie_ecam_host_init(struct pci_config_window * cfg)1678 static int qcom_pcie_ecam_host_init(struct pci_config_window *cfg)
1679 {
1680 	struct device *dev = cfg->parent;
1681 	struct dw_pcie_rp *pp;
1682 	struct dw_pcie *pci;
1683 	int ret;
1684 
1685 	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
1686 	if (!pci)
1687 		return -ENOMEM;
1688 
1689 	pci->dev = dev;
1690 	pp = &pci->pp;
1691 	pci->dbi_base = cfg->win;
1692 	pp->num_vectors = MSI_DEF_NUM_VECTORS;
1693 
1694 	ret = dw_pcie_msi_host_init(pp);
1695 	if (ret)
1696 		return ret;
1697 
1698 	pp->has_msi_ctrl = true;
1699 	dw_pcie_msi_init(pp);
1700 
1701 	return devm_add_action_or_reset(dev, qcom_pci_free_msi, pp);
1702 }
1703 
1704 static const struct pci_ecam_ops pci_qcom_ecam_ops = {
1705 	.init		= qcom_pcie_ecam_host_init,
1706 	.pci_ops	= {
1707 		.map_bus	= pci_ecam_map_bus,
1708 		.read		= pci_generic_config_read,
1709 		.write		= pci_generic_config_write,
1710 	}
1711 };
1712 
qcom_pcie_parse_port(struct qcom_pcie * pcie,struct device_node * node)1713 static int qcom_pcie_parse_port(struct qcom_pcie *pcie, struct device_node *node)
1714 {
1715 	struct device *dev = pcie->pci->dev;
1716 	struct qcom_pcie_port *port;
1717 	struct gpio_desc *reset;
1718 	struct phy *phy;
1719 	int ret;
1720 
1721 	reset = devm_fwnode_gpiod_get(dev, of_fwnode_handle(node),
1722 				      "reset", GPIOD_OUT_HIGH, "PERST#");
1723 	if (IS_ERR(reset))
1724 		return PTR_ERR(reset);
1725 
1726 	phy = devm_of_phy_get(dev, node, NULL);
1727 	if (IS_ERR(phy))
1728 		return PTR_ERR(phy);
1729 
1730 	port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
1731 	if (!port)
1732 		return -ENOMEM;
1733 
1734 	ret = phy_init(phy);
1735 	if (ret)
1736 		return ret;
1737 
1738 	port->reset = reset;
1739 	port->phy = phy;
1740 	INIT_LIST_HEAD(&port->list);
1741 	list_add_tail(&port->list, &pcie->ports);
1742 
1743 	return 0;
1744 }
1745 
qcom_pcie_parse_ports(struct qcom_pcie * pcie)1746 static int qcom_pcie_parse_ports(struct qcom_pcie *pcie)
1747 {
1748 	struct device *dev = pcie->pci->dev;
1749 	struct qcom_pcie_port *port, *tmp;
1750 	int ret = -ENOENT;
1751 
1752 	for_each_available_child_of_node_scoped(dev->of_node, of_port) {
1753 		if (!of_node_is_type(of_port, "pci"))
1754 			continue;
1755 		ret = qcom_pcie_parse_port(pcie, of_port);
1756 		if (ret)
1757 			goto err_port_del;
1758 	}
1759 
1760 	return ret;
1761 
1762 err_port_del:
1763 	list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
1764 		phy_exit(port->phy);
1765 		list_del(&port->list);
1766 	}
1767 
1768 	return ret;
1769 }
1770 
qcom_pcie_parse_legacy_binding(struct qcom_pcie * pcie)1771 static int qcom_pcie_parse_legacy_binding(struct qcom_pcie *pcie)
1772 {
1773 	struct device *dev = pcie->pci->dev;
1774 	struct qcom_pcie_port *port;
1775 	struct gpio_desc *reset;
1776 	struct phy *phy;
1777 	int ret;
1778 
1779 	phy = devm_phy_optional_get(dev, "pciephy");
1780 	if (IS_ERR(phy))
1781 		return PTR_ERR(phy);
1782 
1783 	reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH);
1784 	if (IS_ERR(reset))
1785 		return PTR_ERR(reset);
1786 
1787 	ret = phy_init(phy);
1788 	if (ret)
1789 		return ret;
1790 
1791 	port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
1792 	if (!port)
1793 		return -ENOMEM;
1794 
1795 	port->reset = reset;
1796 	port->phy = phy;
1797 	INIT_LIST_HEAD(&port->list);
1798 	list_add_tail(&port->list, &pcie->ports);
1799 
1800 	return 0;
1801 }
1802 
qcom_pcie_probe(struct platform_device * pdev)1803 static int qcom_pcie_probe(struct platform_device *pdev)
1804 {
1805 	const struct qcom_pcie_cfg *pcie_cfg;
1806 	unsigned long max_freq = ULONG_MAX;
1807 	struct qcom_pcie_port *port, *tmp;
1808 	struct device *dev = &pdev->dev;
1809 	struct dev_pm_opp *opp;
1810 	struct qcom_pcie *pcie;
1811 	struct dw_pcie_rp *pp;
1812 	struct resource *res;
1813 	struct dw_pcie *pci;
1814 	int ret, irq;
1815 	char *name;
1816 
1817 	pcie_cfg = of_device_get_match_data(dev);
1818 	if (!pcie_cfg) {
1819 		dev_err(dev, "No platform data\n");
1820 		return -ENODATA;
1821 	}
1822 
1823 	if (!pcie_cfg->firmware_managed && !pcie_cfg->ops) {
1824 		dev_err(dev, "No platform ops\n");
1825 		return -ENODATA;
1826 	}
1827 
1828 	pm_runtime_enable(dev);
1829 	ret = pm_runtime_get_sync(dev);
1830 	if (ret < 0)
1831 		goto err_pm_runtime_put;
1832 
1833 	if (pcie_cfg->firmware_managed) {
1834 		struct pci_host_bridge *bridge;
1835 		struct pci_config_window *cfg;
1836 
1837 		bridge = devm_pci_alloc_host_bridge(dev, 0);
1838 		if (!bridge) {
1839 			ret = -ENOMEM;
1840 			goto err_pm_runtime_put;
1841 		}
1842 
1843 		/* Parse and map our ECAM configuration space area */
1844 		cfg = pci_host_common_ecam_create(dev, bridge,
1845 				&pci_qcom_ecam_ops);
1846 		if (IS_ERR(cfg)) {
1847 			ret = PTR_ERR(cfg);
1848 			goto err_pm_runtime_put;
1849 		}
1850 
1851 		bridge->sysdata = cfg;
1852 		bridge->ops = (struct pci_ops *)&pci_qcom_ecam_ops.pci_ops;
1853 		bridge->msi_domain = true;
1854 
1855 		ret = pci_host_probe(bridge);
1856 		if (ret)
1857 			goto err_pm_runtime_put;
1858 
1859 		return 0;
1860 	}
1861 
1862 	pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
1863 	if (!pcie) {
1864 		ret = -ENOMEM;
1865 		goto err_pm_runtime_put;
1866 	}
1867 
1868 	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
1869 	if (!pci) {
1870 		ret = -ENOMEM;
1871 		goto err_pm_runtime_put;
1872 	}
1873 
1874 	INIT_LIST_HEAD(&pcie->ports);
1875 
1876 	pci->dev = dev;
1877 	pci->ops = &dw_pcie_ops;
1878 	pp = &pci->pp;
1879 
1880 	pcie->pci = pci;
1881 
1882 	pcie->cfg = pcie_cfg;
1883 
1884 	pcie->parf = devm_platform_ioremap_resource_byname(pdev, "parf");
1885 	if (IS_ERR(pcie->parf)) {
1886 		ret = PTR_ERR(pcie->parf);
1887 		goto err_pm_runtime_put;
1888 	}
1889 
1890 	/* MHI region is optional */
1891 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mhi");
1892 	if (res) {
1893 		pcie->mhi = devm_ioremap_resource(dev, res);
1894 		if (IS_ERR(pcie->mhi)) {
1895 			ret = PTR_ERR(pcie->mhi);
1896 			goto err_pm_runtime_put;
1897 		}
1898 	}
1899 
1900 	/* OPP table is optional */
1901 	ret = devm_pm_opp_of_add_table(dev);
1902 	if (ret && ret != -ENODEV) {
1903 		dev_err_probe(dev, ret, "Failed to add OPP table\n");
1904 		goto err_pm_runtime_put;
1905 	}
1906 
1907 	/*
1908 	 * Before the PCIe link is initialized, vote for highest OPP in the OPP
1909 	 * table, so that we are voting for maximum voltage corner for the
1910 	 * link to come up in maximum supported speed. At the end of the
1911 	 * probe(), OPP will be updated using qcom_pcie_icc_opp_update().
1912 	 */
1913 	if (!ret) {
1914 		opp = dev_pm_opp_find_freq_floor(dev, &max_freq);
1915 		if (IS_ERR(opp)) {
1916 			ret = PTR_ERR(opp);
1917 			dev_err_probe(pci->dev, ret,
1918 				      "Unable to find max freq OPP\n");
1919 			goto err_pm_runtime_put;
1920 		} else {
1921 			ret = dev_pm_opp_set_opp(dev, opp);
1922 		}
1923 
1924 		dev_pm_opp_put(opp);
1925 		if (ret) {
1926 			dev_err_probe(pci->dev, ret,
1927 				      "Failed to set OPP for freq %lu\n",
1928 				      max_freq);
1929 			goto err_pm_runtime_put;
1930 		}
1931 
1932 		pcie->use_pm_opp = true;
1933 	} else {
1934 		/* Skip ICC init if OPP is supported as it is handled by OPP */
1935 		ret = qcom_pcie_icc_init(pcie);
1936 		if (ret)
1937 			goto err_pm_runtime_put;
1938 	}
1939 
1940 	ret = pcie->cfg->ops->get_resources(pcie);
1941 	if (ret)
1942 		goto err_pm_runtime_put;
1943 
1944 	pp->ops = &qcom_pcie_dw_ops;
1945 
1946 	ret = qcom_pcie_parse_ports(pcie);
1947 	if (ret) {
1948 		if (ret != -ENOENT) {
1949 			dev_err_probe(pci->dev, ret,
1950 				      "Failed to parse Root Port: %d\n", ret);
1951 			goto err_pm_runtime_put;
1952 		}
1953 
1954 		/*
1955 		 * In the case of properties not populated in Root Port node,
1956 		 * fallback to the legacy method of parsing the Host Bridge
1957 		 * node. This is to maintain DT backwards compatibility.
1958 		 */
1959 		ret = qcom_pcie_parse_legacy_binding(pcie);
1960 		if (ret)
1961 			goto err_pm_runtime_put;
1962 	}
1963 
1964 	platform_set_drvdata(pdev, pcie);
1965 
1966 	irq = platform_get_irq_byname_optional(pdev, "global");
1967 	if (irq > 0)
1968 		pp->use_linkup_irq = true;
1969 
1970 	ret = dw_pcie_host_init(pp);
1971 	if (ret) {
1972 		dev_err(dev, "cannot initialize host\n");
1973 		goto err_phy_exit;
1974 	}
1975 
1976 	name = devm_kasprintf(dev, GFP_KERNEL, "qcom_pcie_global_irq%d",
1977 			      pci_domain_nr(pp->bridge->bus));
1978 	if (!name) {
1979 		ret = -ENOMEM;
1980 		goto err_host_deinit;
1981 	}
1982 
1983 	if (irq > 0) {
1984 		ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
1985 						qcom_pcie_global_irq_thread,
1986 						IRQF_ONESHOT, name, pcie);
1987 		if (ret) {
1988 			dev_err_probe(&pdev->dev, ret,
1989 				      "Failed to request Global IRQ\n");
1990 			goto err_host_deinit;
1991 		}
1992 
1993 		writel_relaxed(PARF_INT_ALL_LINK_UP | PARF_INT_MSI_DEV_0_7,
1994 			       pcie->parf + PARF_INT_ALL_MASK);
1995 	}
1996 
1997 	qcom_pcie_icc_opp_update(pcie);
1998 
1999 	if (pcie->mhi)
2000 		qcom_pcie_init_debugfs(pcie);
2001 
2002 	return 0;
2003 
2004 err_host_deinit:
2005 	dw_pcie_host_deinit(pp);
2006 err_phy_exit:
2007 	list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
2008 		phy_exit(port->phy);
2009 		list_del(&port->list);
2010 	}
2011 err_pm_runtime_put:
2012 	pm_runtime_put(dev);
2013 	pm_runtime_disable(dev);
2014 
2015 	return ret;
2016 }
2017 
qcom_pcie_suspend_noirq(struct device * dev)2018 static int qcom_pcie_suspend_noirq(struct device *dev)
2019 {
2020 	struct qcom_pcie *pcie;
2021 	int ret = 0;
2022 
2023 	pcie = dev_get_drvdata(dev);
2024 	if (!pcie)
2025 		return 0;
2026 
2027 	/*
2028 	 * Set minimum bandwidth required to keep data path functional during
2029 	 * suspend.
2030 	 */
2031 	if (pcie->icc_mem) {
2032 		ret = icc_set_bw(pcie->icc_mem, 0, kBps_to_icc(1));
2033 		if (ret) {
2034 			dev_err(dev,
2035 				"Failed to set bandwidth for PCIe-MEM interconnect path: %d\n",
2036 				ret);
2037 			return ret;
2038 		}
2039 	}
2040 
2041 	/*
2042 	 * Turn OFF the resources only for controllers without active PCIe
2043 	 * devices. For controllers with active devices, the resources are kept
2044 	 * ON and the link is expected to be in L0/L1 (sub)states.
2045 	 *
2046 	 * Turning OFF the resources for controllers with active PCIe devices
2047 	 * will trigger access violation during the end of the suspend cycle,
2048 	 * as kernel tries to access the PCIe devices config space for masking
2049 	 * MSIs.
2050 	 *
2051 	 * Also, it is not desirable to put the link into L2/L3 state as that
2052 	 * implies VDD supply will be removed and the devices may go into
2053 	 * powerdown state. This will affect the lifetime of the storage devices
2054 	 * like NVMe.
2055 	 */
2056 	if (!dw_pcie_link_up(pcie->pci)) {
2057 		qcom_pcie_host_deinit(&pcie->pci->pp);
2058 		pcie->suspended = true;
2059 	}
2060 
2061 	/*
2062 	 * Only disable CPU-PCIe interconnect path if the suspend is non-S2RAM.
2063 	 * Because on some platforms, DBI access can happen very late during the
2064 	 * S2RAM and a non-active CPU-PCIe interconnect path may lead to NoC
2065 	 * error.
2066 	 */
2067 	if (pm_suspend_target_state != PM_SUSPEND_MEM) {
2068 		ret = icc_disable(pcie->icc_cpu);
2069 		if (ret)
2070 			dev_err(dev, "Failed to disable CPU-PCIe interconnect path: %d\n", ret);
2071 
2072 		if (pcie->use_pm_opp)
2073 			dev_pm_opp_set_opp(pcie->pci->dev, NULL);
2074 	}
2075 	return ret;
2076 }
2077 
qcom_pcie_resume_noirq(struct device * dev)2078 static int qcom_pcie_resume_noirq(struct device *dev)
2079 {
2080 	struct qcom_pcie *pcie;
2081 	int ret;
2082 
2083 	pcie = dev_get_drvdata(dev);
2084 	if (!pcie)
2085 		return 0;
2086 
2087 	if (pm_suspend_target_state != PM_SUSPEND_MEM) {
2088 		ret = icc_enable(pcie->icc_cpu);
2089 		if (ret) {
2090 			dev_err(dev, "Failed to enable CPU-PCIe interconnect path: %d\n", ret);
2091 			return ret;
2092 		}
2093 	}
2094 
2095 	if (pcie->suspended) {
2096 		ret = qcom_pcie_host_init(&pcie->pci->pp);
2097 		if (ret)
2098 			return ret;
2099 
2100 		pcie->suspended = false;
2101 	}
2102 
2103 	qcom_pcie_icc_opp_update(pcie);
2104 
2105 	return 0;
2106 }
2107 
2108 static const struct of_device_id qcom_pcie_match[] = {
2109 	{ .compatible = "qcom,pcie-apq8064", .data = &cfg_2_1_0 },
2110 	{ .compatible = "qcom,pcie-apq8084", .data = &cfg_1_0_0 },
2111 	{ .compatible = "qcom,pcie-ipq4019", .data = &cfg_2_4_0 },
2112 	{ .compatible = "qcom,pcie-ipq5018", .data = &cfg_2_9_0 },
2113 	{ .compatible = "qcom,pcie-ipq6018", .data = &cfg_2_9_0 },
2114 	{ .compatible = "qcom,pcie-ipq8064", .data = &cfg_2_1_0 },
2115 	{ .compatible = "qcom,pcie-ipq8064-v2", .data = &cfg_2_1_0 },
2116 	{ .compatible = "qcom,pcie-ipq8074", .data = &cfg_2_3_3 },
2117 	{ .compatible = "qcom,pcie-ipq8074-gen3", .data = &cfg_2_9_0 },
2118 	{ .compatible = "qcom,pcie-ipq9574", .data = &cfg_2_9_0 },
2119 	{ .compatible = "qcom,pcie-msm8996", .data = &cfg_2_3_2 },
2120 	{ .compatible = "qcom,pcie-qcs404", .data = &cfg_2_4_0 },
2121 	{ .compatible = "qcom,pcie-sa8255p", .data = &cfg_fw_managed },
2122 	{ .compatible = "qcom,pcie-sa8540p", .data = &cfg_sc8280xp },
2123 	{ .compatible = "qcom,pcie-sa8775p", .data = &cfg_1_34_0},
2124 	{ .compatible = "qcom,pcie-sc7280", .data = &cfg_1_9_0 },
2125 	{ .compatible = "qcom,pcie-sc8180x", .data = &cfg_1_9_0 },
2126 	{ .compatible = "qcom,pcie-sc8280xp", .data = &cfg_sc8280xp },
2127 	{ .compatible = "qcom,pcie-sdm845", .data = &cfg_2_7_0 },
2128 	{ .compatible = "qcom,pcie-sdx55", .data = &cfg_1_9_0 },
2129 	{ .compatible = "qcom,pcie-sm8150", .data = &cfg_1_9_0 },
2130 	{ .compatible = "qcom,pcie-sm8250", .data = &cfg_1_9_0 },
2131 	{ .compatible = "qcom,pcie-sm8350", .data = &cfg_1_9_0 },
2132 	{ .compatible = "qcom,pcie-sm8450-pcie0", .data = &cfg_1_9_0 },
2133 	{ .compatible = "qcom,pcie-sm8450-pcie1", .data = &cfg_1_9_0 },
2134 	{ .compatible = "qcom,pcie-sm8550", .data = &cfg_1_9_0 },
2135 	{ .compatible = "qcom,pcie-x1e80100", .data = &cfg_sc8280xp },
2136 	{ }
2137 };
2138 
qcom_fixup_class(struct pci_dev * dev)2139 static void qcom_fixup_class(struct pci_dev *dev)
2140 {
2141 	dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL;
2142 }
2143 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0101, qcom_fixup_class);
2144 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0104, qcom_fixup_class);
2145 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0106, qcom_fixup_class);
2146 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0107, qcom_fixup_class);
2147 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0302, qcom_fixup_class);
2148 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1000, qcom_fixup_class);
2149 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1001, qcom_fixup_class);
2150 
2151 static const struct dev_pm_ops qcom_pcie_pm_ops = {
2152 	NOIRQ_SYSTEM_SLEEP_PM_OPS(qcom_pcie_suspend_noirq, qcom_pcie_resume_noirq)
2153 };
2154 
2155 static struct platform_driver qcom_pcie_driver = {
2156 	.probe = qcom_pcie_probe,
2157 	.driver = {
2158 		.name = "qcom-pcie",
2159 		.suppress_bind_attrs = true,
2160 		.of_match_table = qcom_pcie_match,
2161 		.pm = &qcom_pcie_pm_ops,
2162 		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
2163 	},
2164 };
2165 builtin_platform_driver(qcom_pcie_driver);
2166