xref: /linux/drivers/pci/controller/dwc/pcie-qcom.c (revision 56fb34d86e875dbb0d3e6a81c5d3d035db373031)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Qualcomm PCIe root complex driver
4  *
5  * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
6  * Copyright 2015 Linaro Limited.
7  *
8  * Author: Stanimir Varbanov <svarbanov@mm-sol.com>
9  */
10 
11 #include <linux/clk.h>
12 #include <linux/delay.h>
13 #include <linux/gpio/consumer.h>
14 #include <linux/interrupt.h>
15 #include <linux/io.h>
16 #include <linux/iopoll.h>
17 #include <linux/kernel.h>
18 #include <linux/init.h>
19 #include <linux/of_device.h>
20 #include <linux/of_gpio.h>
21 #include <linux/pci.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/platform_device.h>
24 #include <linux/phy/phy.h>
25 #include <linux/regulator/consumer.h>
26 #include <linux/reset.h>
27 #include <linux/slab.h>
28 #include <linux/types.h>
29 
30 #include "pcie-designware.h"
31 
32 #define PCIE20_PARF_SYS_CTRL			0x00
33 #define MST_WAKEUP_EN				BIT(13)
34 #define SLV_WAKEUP_EN				BIT(12)
35 #define MSTR_ACLK_CGC_DIS			BIT(10)
36 #define SLV_ACLK_CGC_DIS			BIT(9)
37 #define CORE_CLK_CGC_DIS			BIT(6)
38 #define AUX_PWR_DET				BIT(4)
39 #define L23_CLK_RMV_DIS				BIT(2)
40 #define L1_CLK_RMV_DIS				BIT(1)
41 
42 #define PCIE20_COMMAND_STATUS			0x04
43 #define CMD_BME_VAL				0x4
44 #define PCIE20_DEVICE_CONTROL2_STATUS2		0x98
45 #define PCIE_CAP_CPL_TIMEOUT_DISABLE		0x10
46 
47 #define PCIE20_PARF_PHY_CTRL			0x40
48 #define PCIE20_PARF_PHY_REFCLK			0x4C
49 #define PCIE20_PARF_DBI_BASE_ADDR		0x168
50 #define PCIE20_PARF_SLV_ADDR_SPACE_SIZE		0x16C
51 #define PCIE20_PARF_MHI_CLOCK_RESET_CTRL	0x174
52 #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT	0x178
53 #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2	0x1A8
54 #define PCIE20_PARF_LTSSM			0x1B0
55 #define PCIE20_PARF_SID_OFFSET			0x234
56 #define PCIE20_PARF_BDF_TRANSLATE_CFG		0x24C
57 
58 #define PCIE20_ELBI_SYS_CTRL			0x04
59 #define PCIE20_ELBI_SYS_CTRL_LT_ENABLE		BIT(0)
60 
61 #define PCIE20_AXI_MSTR_RESP_COMP_CTRL0		0x818
62 #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K	0x4
63 #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K	0x5
64 #define PCIE20_AXI_MSTR_RESP_COMP_CTRL1		0x81c
65 #define CFG_BRIDGE_SB_INIT			BIT(0)
66 
67 #define PCIE20_CAP				0x70
68 #define PCIE20_CAP_LINK_CAPABILITIES		(PCIE20_CAP + 0xC)
69 #define PCIE20_CAP_ACTIVE_STATE_LINK_PM_SUPPORT	(BIT(10) | BIT(11))
70 #define PCIE20_CAP_LINK_1			(PCIE20_CAP + 0x14)
71 #define PCIE_CAP_LINK1_VAL			0x2FD7F
72 
73 #define PCIE20_PARF_Q2A_FLUSH			0x1AC
74 
75 #define PCIE20_MISC_CONTROL_1_REG		0x8BC
76 #define DBI_RO_WR_EN				1
77 
78 #define PERST_DELAY_US				1000
79 
80 #define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE	0x358
81 #define SLV_ADDR_SPACE_SZ			0x10000000
82 
83 #define QCOM_PCIE_2_1_0_MAX_SUPPLY	3
84 struct qcom_pcie_resources_2_1_0 {
85 	struct clk *iface_clk;
86 	struct clk *core_clk;
87 	struct clk *phy_clk;
88 	struct reset_control *pci_reset;
89 	struct reset_control *axi_reset;
90 	struct reset_control *ahb_reset;
91 	struct reset_control *por_reset;
92 	struct reset_control *phy_reset;
93 	struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY];
94 };
95 
96 struct qcom_pcie_resources_1_0_0 {
97 	struct clk *iface;
98 	struct clk *aux;
99 	struct clk *master_bus;
100 	struct clk *slave_bus;
101 	struct reset_control *core;
102 	struct regulator *vdda;
103 };
104 
105 #define QCOM_PCIE_2_3_2_MAX_SUPPLY	2
106 struct qcom_pcie_resources_2_3_2 {
107 	struct clk *aux_clk;
108 	struct clk *master_clk;
109 	struct clk *slave_clk;
110 	struct clk *cfg_clk;
111 	struct clk *pipe_clk;
112 	struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY];
113 };
114 
115 #define QCOM_PCIE_2_4_0_MAX_CLOCKS	4
116 struct qcom_pcie_resources_2_4_0 {
117 	struct clk_bulk_data clks[QCOM_PCIE_2_4_0_MAX_CLOCKS];
118 	int num_clks;
119 	struct reset_control *axi_m_reset;
120 	struct reset_control *axi_s_reset;
121 	struct reset_control *pipe_reset;
122 	struct reset_control *axi_m_vmid_reset;
123 	struct reset_control *axi_s_xpu_reset;
124 	struct reset_control *parf_reset;
125 	struct reset_control *phy_reset;
126 	struct reset_control *axi_m_sticky_reset;
127 	struct reset_control *pipe_sticky_reset;
128 	struct reset_control *pwr_reset;
129 	struct reset_control *ahb_reset;
130 	struct reset_control *phy_ahb_reset;
131 };
132 
133 struct qcom_pcie_resources_2_3_3 {
134 	struct clk *iface;
135 	struct clk *axi_m_clk;
136 	struct clk *axi_s_clk;
137 	struct clk *ahb_clk;
138 	struct clk *aux_clk;
139 	struct reset_control *rst[7];
140 };
141 
142 union qcom_pcie_resources {
143 	struct qcom_pcie_resources_1_0_0 v1_0_0;
144 	struct qcom_pcie_resources_2_1_0 v2_1_0;
145 	struct qcom_pcie_resources_2_3_2 v2_3_2;
146 	struct qcom_pcie_resources_2_3_3 v2_3_3;
147 	struct qcom_pcie_resources_2_4_0 v2_4_0;
148 };
149 
150 struct qcom_pcie;
151 
152 struct qcom_pcie_ops {
153 	int (*get_resources)(struct qcom_pcie *pcie);
154 	int (*init)(struct qcom_pcie *pcie);
155 	int (*post_init)(struct qcom_pcie *pcie);
156 	void (*deinit)(struct qcom_pcie *pcie);
157 	void (*post_deinit)(struct qcom_pcie *pcie);
158 	void (*ltssm_enable)(struct qcom_pcie *pcie);
159 };
160 
161 struct qcom_pcie {
162 	struct dw_pcie *pci;
163 	void __iomem *parf;			/* DT parf */
164 	void __iomem *elbi;			/* DT elbi */
165 	union qcom_pcie_resources res;
166 	struct phy *phy;
167 	struct gpio_desc *reset;
168 	const struct qcom_pcie_ops *ops;
169 };
170 
171 #define to_qcom_pcie(x)		dev_get_drvdata((x)->dev)
172 
173 static void qcom_ep_reset_assert(struct qcom_pcie *pcie)
174 {
175 	gpiod_set_value_cansleep(pcie->reset, 1);
176 	usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
177 }
178 
179 static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
180 {
181 	/* Ensure that PERST has been asserted for at least 100 ms */
182 	msleep(100);
183 	gpiod_set_value_cansleep(pcie->reset, 0);
184 	usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
185 }
186 
187 static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
188 {
189 	struct dw_pcie *pci = pcie->pci;
190 
191 	if (dw_pcie_link_up(pci))
192 		return 0;
193 
194 	/* Enable Link Training state machine */
195 	if (pcie->ops->ltssm_enable)
196 		pcie->ops->ltssm_enable(pcie);
197 
198 	return dw_pcie_wait_for_link(pci);
199 }
200 
201 static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie)
202 {
203 	u32 val;
204 
205 	/* enable link training */
206 	val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL);
207 	val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE;
208 	writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL);
209 }
210 
211 static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
212 {
213 	struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
214 	struct dw_pcie *pci = pcie->pci;
215 	struct device *dev = pci->dev;
216 	int ret;
217 
218 	res->supplies[0].supply = "vdda";
219 	res->supplies[1].supply = "vdda_phy";
220 	res->supplies[2].supply = "vdda_refclk";
221 	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
222 				      res->supplies);
223 	if (ret)
224 		return ret;
225 
226 	res->iface_clk = devm_clk_get(dev, "iface");
227 	if (IS_ERR(res->iface_clk))
228 		return PTR_ERR(res->iface_clk);
229 
230 	res->core_clk = devm_clk_get(dev, "core");
231 	if (IS_ERR(res->core_clk))
232 		return PTR_ERR(res->core_clk);
233 
234 	res->phy_clk = devm_clk_get(dev, "phy");
235 	if (IS_ERR(res->phy_clk))
236 		return PTR_ERR(res->phy_clk);
237 
238 	res->pci_reset = devm_reset_control_get_exclusive(dev, "pci");
239 	if (IS_ERR(res->pci_reset))
240 		return PTR_ERR(res->pci_reset);
241 
242 	res->axi_reset = devm_reset_control_get_exclusive(dev, "axi");
243 	if (IS_ERR(res->axi_reset))
244 		return PTR_ERR(res->axi_reset);
245 
246 	res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb");
247 	if (IS_ERR(res->ahb_reset))
248 		return PTR_ERR(res->ahb_reset);
249 
250 	res->por_reset = devm_reset_control_get_exclusive(dev, "por");
251 	if (IS_ERR(res->por_reset))
252 		return PTR_ERR(res->por_reset);
253 
254 	res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
255 	return PTR_ERR_OR_ZERO(res->phy_reset);
256 }
257 
258 static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie)
259 {
260 	struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
261 
262 	reset_control_assert(res->pci_reset);
263 	reset_control_assert(res->axi_reset);
264 	reset_control_assert(res->ahb_reset);
265 	reset_control_assert(res->por_reset);
266 	reset_control_assert(res->pci_reset);
267 	clk_disable_unprepare(res->iface_clk);
268 	clk_disable_unprepare(res->core_clk);
269 	clk_disable_unprepare(res->phy_clk);
270 	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
271 }
272 
273 static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
274 {
275 	struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
276 	struct dw_pcie *pci = pcie->pci;
277 	struct device *dev = pci->dev;
278 	u32 val;
279 	int ret;
280 
281 	ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
282 	if (ret < 0) {
283 		dev_err(dev, "cannot enable regulators\n");
284 		return ret;
285 	}
286 
287 	ret = reset_control_assert(res->ahb_reset);
288 	if (ret) {
289 		dev_err(dev, "cannot assert ahb reset\n");
290 		goto err_assert_ahb;
291 	}
292 
293 	ret = clk_prepare_enable(res->iface_clk);
294 	if (ret) {
295 		dev_err(dev, "cannot prepare/enable iface clock\n");
296 		goto err_assert_ahb;
297 	}
298 
299 	ret = clk_prepare_enable(res->phy_clk);
300 	if (ret) {
301 		dev_err(dev, "cannot prepare/enable phy clock\n");
302 		goto err_clk_phy;
303 	}
304 
305 	ret = clk_prepare_enable(res->core_clk);
306 	if (ret) {
307 		dev_err(dev, "cannot prepare/enable core clock\n");
308 		goto err_clk_core;
309 	}
310 
311 	ret = reset_control_deassert(res->ahb_reset);
312 	if (ret) {
313 		dev_err(dev, "cannot deassert ahb reset\n");
314 		goto err_deassert_ahb;
315 	}
316 
317 	/* enable PCIe clocks and resets */
318 	val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
319 	val &= ~BIT(0);
320 	writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
321 
322 	/* enable external reference clock */
323 	val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK);
324 	val |= BIT(16);
325 	writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK);
326 
327 	ret = reset_control_deassert(res->phy_reset);
328 	if (ret) {
329 		dev_err(dev, "cannot deassert phy reset\n");
330 		return ret;
331 	}
332 
333 	ret = reset_control_deassert(res->pci_reset);
334 	if (ret) {
335 		dev_err(dev, "cannot deassert pci reset\n");
336 		return ret;
337 	}
338 
339 	ret = reset_control_deassert(res->por_reset);
340 	if (ret) {
341 		dev_err(dev, "cannot deassert por reset\n");
342 		return ret;
343 	}
344 
345 	ret = reset_control_deassert(res->axi_reset);
346 	if (ret) {
347 		dev_err(dev, "cannot deassert axi reset\n");
348 		return ret;
349 	}
350 
351 	/* wait for clock acquisition */
352 	usleep_range(1000, 1500);
353 
354 
355 	/* Set the Max TLP size to 2K, instead of using default of 4K */
356 	writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K,
357 	       pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL0);
358 	writel(CFG_BRIDGE_SB_INIT,
359 	       pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL1);
360 
361 	return 0;
362 
363 err_deassert_ahb:
364 	clk_disable_unprepare(res->core_clk);
365 err_clk_core:
366 	clk_disable_unprepare(res->phy_clk);
367 err_clk_phy:
368 	clk_disable_unprepare(res->iface_clk);
369 err_assert_ahb:
370 	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
371 
372 	return ret;
373 }
374 
375 static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie)
376 {
377 	struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
378 	struct dw_pcie *pci = pcie->pci;
379 	struct device *dev = pci->dev;
380 
381 	res->vdda = devm_regulator_get(dev, "vdda");
382 	if (IS_ERR(res->vdda))
383 		return PTR_ERR(res->vdda);
384 
385 	res->iface = devm_clk_get(dev, "iface");
386 	if (IS_ERR(res->iface))
387 		return PTR_ERR(res->iface);
388 
389 	res->aux = devm_clk_get(dev, "aux");
390 	if (IS_ERR(res->aux))
391 		return PTR_ERR(res->aux);
392 
393 	res->master_bus = devm_clk_get(dev, "master_bus");
394 	if (IS_ERR(res->master_bus))
395 		return PTR_ERR(res->master_bus);
396 
397 	res->slave_bus = devm_clk_get(dev, "slave_bus");
398 	if (IS_ERR(res->slave_bus))
399 		return PTR_ERR(res->slave_bus);
400 
401 	res->core = devm_reset_control_get_exclusive(dev, "core");
402 	return PTR_ERR_OR_ZERO(res->core);
403 }
404 
405 static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie)
406 {
407 	struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
408 
409 	reset_control_assert(res->core);
410 	clk_disable_unprepare(res->slave_bus);
411 	clk_disable_unprepare(res->master_bus);
412 	clk_disable_unprepare(res->iface);
413 	clk_disable_unprepare(res->aux);
414 	regulator_disable(res->vdda);
415 }
416 
417 static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie)
418 {
419 	struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
420 	struct dw_pcie *pci = pcie->pci;
421 	struct device *dev = pci->dev;
422 	int ret;
423 
424 	ret = reset_control_deassert(res->core);
425 	if (ret) {
426 		dev_err(dev, "cannot deassert core reset\n");
427 		return ret;
428 	}
429 
430 	ret = clk_prepare_enable(res->aux);
431 	if (ret) {
432 		dev_err(dev, "cannot prepare/enable aux clock\n");
433 		goto err_res;
434 	}
435 
436 	ret = clk_prepare_enable(res->iface);
437 	if (ret) {
438 		dev_err(dev, "cannot prepare/enable iface clock\n");
439 		goto err_aux;
440 	}
441 
442 	ret = clk_prepare_enable(res->master_bus);
443 	if (ret) {
444 		dev_err(dev, "cannot prepare/enable master_bus clock\n");
445 		goto err_iface;
446 	}
447 
448 	ret = clk_prepare_enable(res->slave_bus);
449 	if (ret) {
450 		dev_err(dev, "cannot prepare/enable slave_bus clock\n");
451 		goto err_master;
452 	}
453 
454 	ret = regulator_enable(res->vdda);
455 	if (ret) {
456 		dev_err(dev, "cannot enable vdda regulator\n");
457 		goto err_slave;
458 	}
459 
460 	/* change DBI base address */
461 	writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
462 
463 	if (IS_ENABLED(CONFIG_PCI_MSI)) {
464 		u32 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
465 
466 		val |= BIT(31);
467 		writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
468 	}
469 
470 	return 0;
471 err_slave:
472 	clk_disable_unprepare(res->slave_bus);
473 err_master:
474 	clk_disable_unprepare(res->master_bus);
475 err_iface:
476 	clk_disable_unprepare(res->iface);
477 err_aux:
478 	clk_disable_unprepare(res->aux);
479 err_res:
480 	reset_control_assert(res->core);
481 
482 	return ret;
483 }
484 
485 static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie)
486 {
487 	u32 val;
488 
489 	/* enable link training */
490 	val = readl(pcie->parf + PCIE20_PARF_LTSSM);
491 	val |= BIT(8);
492 	writel(val, pcie->parf + PCIE20_PARF_LTSSM);
493 }
494 
495 static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie)
496 {
497 	struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
498 	struct dw_pcie *pci = pcie->pci;
499 	struct device *dev = pci->dev;
500 	int ret;
501 
502 	res->supplies[0].supply = "vdda";
503 	res->supplies[1].supply = "vddpe-3v3";
504 	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
505 				      res->supplies);
506 	if (ret)
507 		return ret;
508 
509 	res->aux_clk = devm_clk_get(dev, "aux");
510 	if (IS_ERR(res->aux_clk))
511 		return PTR_ERR(res->aux_clk);
512 
513 	res->cfg_clk = devm_clk_get(dev, "cfg");
514 	if (IS_ERR(res->cfg_clk))
515 		return PTR_ERR(res->cfg_clk);
516 
517 	res->master_clk = devm_clk_get(dev, "bus_master");
518 	if (IS_ERR(res->master_clk))
519 		return PTR_ERR(res->master_clk);
520 
521 	res->slave_clk = devm_clk_get(dev, "bus_slave");
522 	if (IS_ERR(res->slave_clk))
523 		return PTR_ERR(res->slave_clk);
524 
525 	res->pipe_clk = devm_clk_get(dev, "pipe");
526 	return PTR_ERR_OR_ZERO(res->pipe_clk);
527 }
528 
529 static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie)
530 {
531 	struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
532 
533 	clk_disable_unprepare(res->slave_clk);
534 	clk_disable_unprepare(res->master_clk);
535 	clk_disable_unprepare(res->cfg_clk);
536 	clk_disable_unprepare(res->aux_clk);
537 
538 	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
539 }
540 
541 static void qcom_pcie_post_deinit_2_3_2(struct qcom_pcie *pcie)
542 {
543 	struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
544 
545 	clk_disable_unprepare(res->pipe_clk);
546 }
547 
548 static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie)
549 {
550 	struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
551 	struct dw_pcie *pci = pcie->pci;
552 	struct device *dev = pci->dev;
553 	u32 val;
554 	int ret;
555 
556 	ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
557 	if (ret < 0) {
558 		dev_err(dev, "cannot enable regulators\n");
559 		return ret;
560 	}
561 
562 	ret = clk_prepare_enable(res->aux_clk);
563 	if (ret) {
564 		dev_err(dev, "cannot prepare/enable aux clock\n");
565 		goto err_aux_clk;
566 	}
567 
568 	ret = clk_prepare_enable(res->cfg_clk);
569 	if (ret) {
570 		dev_err(dev, "cannot prepare/enable cfg clock\n");
571 		goto err_cfg_clk;
572 	}
573 
574 	ret = clk_prepare_enable(res->master_clk);
575 	if (ret) {
576 		dev_err(dev, "cannot prepare/enable master clock\n");
577 		goto err_master_clk;
578 	}
579 
580 	ret = clk_prepare_enable(res->slave_clk);
581 	if (ret) {
582 		dev_err(dev, "cannot prepare/enable slave clock\n");
583 		goto err_slave_clk;
584 	}
585 
586 	/* enable PCIe clocks and resets */
587 	val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
588 	val &= ~BIT(0);
589 	writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
590 
591 	/* change DBI base address */
592 	writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
593 
594 	/* MAC PHY_POWERDOWN MUX DISABLE  */
595 	val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
596 	val &= ~BIT(29);
597 	writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
598 
599 	val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
600 	val |= BIT(4);
601 	writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
602 
603 	val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
604 	val |= BIT(31);
605 	writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
606 
607 	return 0;
608 
609 err_slave_clk:
610 	clk_disable_unprepare(res->master_clk);
611 err_master_clk:
612 	clk_disable_unprepare(res->cfg_clk);
613 err_cfg_clk:
614 	clk_disable_unprepare(res->aux_clk);
615 
616 err_aux_clk:
617 	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
618 
619 	return ret;
620 }
621 
622 static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie)
623 {
624 	struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
625 	struct dw_pcie *pci = pcie->pci;
626 	struct device *dev = pci->dev;
627 	int ret;
628 
629 	ret = clk_prepare_enable(res->pipe_clk);
630 	if (ret) {
631 		dev_err(dev, "cannot prepare/enable pipe clock\n");
632 		return ret;
633 	}
634 
635 	return 0;
636 }
637 
638 static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie)
639 {
640 	struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
641 	struct dw_pcie *pci = pcie->pci;
642 	struct device *dev = pci->dev;
643 	bool is_ipq = of_device_is_compatible(dev->of_node, "qcom,pcie-ipq4019");
644 	int ret;
645 
646 	res->clks[0].id = "aux";
647 	res->clks[1].id = "master_bus";
648 	res->clks[2].id = "slave_bus";
649 	res->clks[3].id = "iface";
650 
651 	/* qcom,pcie-ipq4019 is defined without "iface" */
652 	res->num_clks = is_ipq ? 3 : 4;
653 
654 	ret = devm_clk_bulk_get(dev, res->num_clks, res->clks);
655 	if (ret < 0)
656 		return ret;
657 
658 	res->axi_m_reset = devm_reset_control_get_exclusive(dev, "axi_m");
659 	if (IS_ERR(res->axi_m_reset))
660 		return PTR_ERR(res->axi_m_reset);
661 
662 	res->axi_s_reset = devm_reset_control_get_exclusive(dev, "axi_s");
663 	if (IS_ERR(res->axi_s_reset))
664 		return PTR_ERR(res->axi_s_reset);
665 
666 	if (is_ipq) {
667 		/*
668 		 * These resources relates to the PHY or are secure clocks, but
669 		 * are controlled here for IPQ4019
670 		 */
671 		res->pipe_reset = devm_reset_control_get_exclusive(dev, "pipe");
672 		if (IS_ERR(res->pipe_reset))
673 			return PTR_ERR(res->pipe_reset);
674 
675 		res->axi_m_vmid_reset = devm_reset_control_get_exclusive(dev,
676 									 "axi_m_vmid");
677 		if (IS_ERR(res->axi_m_vmid_reset))
678 			return PTR_ERR(res->axi_m_vmid_reset);
679 
680 		res->axi_s_xpu_reset = devm_reset_control_get_exclusive(dev,
681 									"axi_s_xpu");
682 		if (IS_ERR(res->axi_s_xpu_reset))
683 			return PTR_ERR(res->axi_s_xpu_reset);
684 
685 		res->parf_reset = devm_reset_control_get_exclusive(dev, "parf");
686 		if (IS_ERR(res->parf_reset))
687 			return PTR_ERR(res->parf_reset);
688 
689 		res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
690 		if (IS_ERR(res->phy_reset))
691 			return PTR_ERR(res->phy_reset);
692 	}
693 
694 	res->axi_m_sticky_reset = devm_reset_control_get_exclusive(dev,
695 								   "axi_m_sticky");
696 	if (IS_ERR(res->axi_m_sticky_reset))
697 		return PTR_ERR(res->axi_m_sticky_reset);
698 
699 	res->pipe_sticky_reset = devm_reset_control_get_exclusive(dev,
700 								  "pipe_sticky");
701 	if (IS_ERR(res->pipe_sticky_reset))
702 		return PTR_ERR(res->pipe_sticky_reset);
703 
704 	res->pwr_reset = devm_reset_control_get_exclusive(dev, "pwr");
705 	if (IS_ERR(res->pwr_reset))
706 		return PTR_ERR(res->pwr_reset);
707 
708 	res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb");
709 	if (IS_ERR(res->ahb_reset))
710 		return PTR_ERR(res->ahb_reset);
711 
712 	if (is_ipq) {
713 		res->phy_ahb_reset = devm_reset_control_get_exclusive(dev, "phy_ahb");
714 		if (IS_ERR(res->phy_ahb_reset))
715 			return PTR_ERR(res->phy_ahb_reset);
716 	}
717 
718 	return 0;
719 }
720 
721 static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie)
722 {
723 	struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
724 
725 	reset_control_assert(res->axi_m_reset);
726 	reset_control_assert(res->axi_s_reset);
727 	reset_control_assert(res->pipe_reset);
728 	reset_control_assert(res->pipe_sticky_reset);
729 	reset_control_assert(res->phy_reset);
730 	reset_control_assert(res->phy_ahb_reset);
731 	reset_control_assert(res->axi_m_sticky_reset);
732 	reset_control_assert(res->pwr_reset);
733 	reset_control_assert(res->ahb_reset);
734 	clk_bulk_disable_unprepare(res->num_clks, res->clks);
735 }
736 
737 static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie)
738 {
739 	struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
740 	struct dw_pcie *pci = pcie->pci;
741 	struct device *dev = pci->dev;
742 	u32 val;
743 	int ret;
744 
745 	ret = reset_control_assert(res->axi_m_reset);
746 	if (ret) {
747 		dev_err(dev, "cannot assert axi master reset\n");
748 		return ret;
749 	}
750 
751 	ret = reset_control_assert(res->axi_s_reset);
752 	if (ret) {
753 		dev_err(dev, "cannot assert axi slave reset\n");
754 		return ret;
755 	}
756 
757 	usleep_range(10000, 12000);
758 
759 	ret = reset_control_assert(res->pipe_reset);
760 	if (ret) {
761 		dev_err(dev, "cannot assert pipe reset\n");
762 		return ret;
763 	}
764 
765 	ret = reset_control_assert(res->pipe_sticky_reset);
766 	if (ret) {
767 		dev_err(dev, "cannot assert pipe sticky reset\n");
768 		return ret;
769 	}
770 
771 	ret = reset_control_assert(res->phy_reset);
772 	if (ret) {
773 		dev_err(dev, "cannot assert phy reset\n");
774 		return ret;
775 	}
776 
777 	ret = reset_control_assert(res->phy_ahb_reset);
778 	if (ret) {
779 		dev_err(dev, "cannot assert phy ahb reset\n");
780 		return ret;
781 	}
782 
783 	usleep_range(10000, 12000);
784 
785 	ret = reset_control_assert(res->axi_m_sticky_reset);
786 	if (ret) {
787 		dev_err(dev, "cannot assert axi master sticky reset\n");
788 		return ret;
789 	}
790 
791 	ret = reset_control_assert(res->pwr_reset);
792 	if (ret) {
793 		dev_err(dev, "cannot assert power reset\n");
794 		return ret;
795 	}
796 
797 	ret = reset_control_assert(res->ahb_reset);
798 	if (ret) {
799 		dev_err(dev, "cannot assert ahb reset\n");
800 		return ret;
801 	}
802 
803 	usleep_range(10000, 12000);
804 
805 	ret = reset_control_deassert(res->phy_ahb_reset);
806 	if (ret) {
807 		dev_err(dev, "cannot deassert phy ahb reset\n");
808 		return ret;
809 	}
810 
811 	ret = reset_control_deassert(res->phy_reset);
812 	if (ret) {
813 		dev_err(dev, "cannot deassert phy reset\n");
814 		goto err_rst_phy;
815 	}
816 
817 	ret = reset_control_deassert(res->pipe_reset);
818 	if (ret) {
819 		dev_err(dev, "cannot deassert pipe reset\n");
820 		goto err_rst_pipe;
821 	}
822 
823 	ret = reset_control_deassert(res->pipe_sticky_reset);
824 	if (ret) {
825 		dev_err(dev, "cannot deassert pipe sticky reset\n");
826 		goto err_rst_pipe_sticky;
827 	}
828 
829 	usleep_range(10000, 12000);
830 
831 	ret = reset_control_deassert(res->axi_m_reset);
832 	if (ret) {
833 		dev_err(dev, "cannot deassert axi master reset\n");
834 		goto err_rst_axi_m;
835 	}
836 
837 	ret = reset_control_deassert(res->axi_m_sticky_reset);
838 	if (ret) {
839 		dev_err(dev, "cannot deassert axi master sticky reset\n");
840 		goto err_rst_axi_m_sticky;
841 	}
842 
843 	ret = reset_control_deassert(res->axi_s_reset);
844 	if (ret) {
845 		dev_err(dev, "cannot deassert axi slave reset\n");
846 		goto err_rst_axi_s;
847 	}
848 
849 	ret = reset_control_deassert(res->pwr_reset);
850 	if (ret) {
851 		dev_err(dev, "cannot deassert power reset\n");
852 		goto err_rst_pwr;
853 	}
854 
855 	ret = reset_control_deassert(res->ahb_reset);
856 	if (ret) {
857 		dev_err(dev, "cannot deassert ahb reset\n");
858 		goto err_rst_ahb;
859 	}
860 
861 	usleep_range(10000, 12000);
862 
863 	ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
864 	if (ret)
865 		goto err_clks;
866 
867 	/* enable PCIe clocks and resets */
868 	val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
869 	val &= ~BIT(0);
870 	writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
871 
872 	/* change DBI base address */
873 	writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
874 
875 	/* MAC PHY_POWERDOWN MUX DISABLE  */
876 	val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
877 	val &= ~BIT(29);
878 	writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
879 
880 	val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
881 	val |= BIT(4);
882 	writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
883 
884 	val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
885 	val |= BIT(31);
886 	writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
887 
888 	return 0;
889 
890 err_clks:
891 	reset_control_assert(res->ahb_reset);
892 err_rst_ahb:
893 	reset_control_assert(res->pwr_reset);
894 err_rst_pwr:
895 	reset_control_assert(res->axi_s_reset);
896 err_rst_axi_s:
897 	reset_control_assert(res->axi_m_sticky_reset);
898 err_rst_axi_m_sticky:
899 	reset_control_assert(res->axi_m_reset);
900 err_rst_axi_m:
901 	reset_control_assert(res->pipe_sticky_reset);
902 err_rst_pipe_sticky:
903 	reset_control_assert(res->pipe_reset);
904 err_rst_pipe:
905 	reset_control_assert(res->phy_reset);
906 err_rst_phy:
907 	reset_control_assert(res->phy_ahb_reset);
908 	return ret;
909 }
910 
911 static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie)
912 {
913 	struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
914 	struct dw_pcie *pci = pcie->pci;
915 	struct device *dev = pci->dev;
916 	int i;
917 	const char *rst_names[] = { "axi_m", "axi_s", "pipe",
918 				    "axi_m_sticky", "sticky",
919 				    "ahb", "sleep", };
920 
921 	res->iface = devm_clk_get(dev, "iface");
922 	if (IS_ERR(res->iface))
923 		return PTR_ERR(res->iface);
924 
925 	res->axi_m_clk = devm_clk_get(dev, "axi_m");
926 	if (IS_ERR(res->axi_m_clk))
927 		return PTR_ERR(res->axi_m_clk);
928 
929 	res->axi_s_clk = devm_clk_get(dev, "axi_s");
930 	if (IS_ERR(res->axi_s_clk))
931 		return PTR_ERR(res->axi_s_clk);
932 
933 	res->ahb_clk = devm_clk_get(dev, "ahb");
934 	if (IS_ERR(res->ahb_clk))
935 		return PTR_ERR(res->ahb_clk);
936 
937 	res->aux_clk = devm_clk_get(dev, "aux");
938 	if (IS_ERR(res->aux_clk))
939 		return PTR_ERR(res->aux_clk);
940 
941 	for (i = 0; i < ARRAY_SIZE(rst_names); i++) {
942 		res->rst[i] = devm_reset_control_get(dev, rst_names[i]);
943 		if (IS_ERR(res->rst[i]))
944 			return PTR_ERR(res->rst[i]);
945 	}
946 
947 	return 0;
948 }
949 
950 static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie)
951 {
952 	struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
953 
954 	clk_disable_unprepare(res->iface);
955 	clk_disable_unprepare(res->axi_m_clk);
956 	clk_disable_unprepare(res->axi_s_clk);
957 	clk_disable_unprepare(res->ahb_clk);
958 	clk_disable_unprepare(res->aux_clk);
959 }
960 
961 static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
962 {
963 	struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
964 	struct dw_pcie *pci = pcie->pci;
965 	struct device *dev = pci->dev;
966 	int i, ret;
967 	u32 val;
968 
969 	for (i = 0; i < ARRAY_SIZE(res->rst); i++) {
970 		ret = reset_control_assert(res->rst[i]);
971 		if (ret) {
972 			dev_err(dev, "reset #%d assert failed (%d)\n", i, ret);
973 			return ret;
974 		}
975 	}
976 
977 	usleep_range(2000, 2500);
978 
979 	for (i = 0; i < ARRAY_SIZE(res->rst); i++) {
980 		ret = reset_control_deassert(res->rst[i]);
981 		if (ret) {
982 			dev_err(dev, "reset #%d deassert failed (%d)\n", i,
983 				ret);
984 			return ret;
985 		}
986 	}
987 
988 	/*
989 	 * Don't have a way to see if the reset has completed.
990 	 * Wait for some time.
991 	 */
992 	usleep_range(2000, 2500);
993 
994 	ret = clk_prepare_enable(res->iface);
995 	if (ret) {
996 		dev_err(dev, "cannot prepare/enable core clock\n");
997 		goto err_clk_iface;
998 	}
999 
1000 	ret = clk_prepare_enable(res->axi_m_clk);
1001 	if (ret) {
1002 		dev_err(dev, "cannot prepare/enable core clock\n");
1003 		goto err_clk_axi_m;
1004 	}
1005 
1006 	ret = clk_prepare_enable(res->axi_s_clk);
1007 	if (ret) {
1008 		dev_err(dev, "cannot prepare/enable axi slave clock\n");
1009 		goto err_clk_axi_s;
1010 	}
1011 
1012 	ret = clk_prepare_enable(res->ahb_clk);
1013 	if (ret) {
1014 		dev_err(dev, "cannot prepare/enable ahb clock\n");
1015 		goto err_clk_ahb;
1016 	}
1017 
1018 	ret = clk_prepare_enable(res->aux_clk);
1019 	if (ret) {
1020 		dev_err(dev, "cannot prepare/enable aux clock\n");
1021 		goto err_clk_aux;
1022 	}
1023 
1024 	writel(SLV_ADDR_SPACE_SZ,
1025 		pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE);
1026 
1027 	val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
1028 	val &= ~BIT(0);
1029 	writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
1030 
1031 	writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
1032 
1033 	writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS
1034 		| SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |
1035 		AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS,
1036 		pcie->parf + PCIE20_PARF_SYS_CTRL);
1037 	writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH);
1038 
1039 	writel(CMD_BME_VAL, pci->dbi_base + PCIE20_COMMAND_STATUS);
1040 	writel(DBI_RO_WR_EN, pci->dbi_base + PCIE20_MISC_CONTROL_1_REG);
1041 	writel(PCIE_CAP_LINK1_VAL, pci->dbi_base + PCIE20_CAP_LINK_1);
1042 
1043 	val = readl(pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES);
1044 	val &= ~PCIE20_CAP_ACTIVE_STATE_LINK_PM_SUPPORT;
1045 	writel(val, pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES);
1046 
1047 	writel(PCIE_CAP_CPL_TIMEOUT_DISABLE, pci->dbi_base +
1048 		PCIE20_DEVICE_CONTROL2_STATUS2);
1049 
1050 	return 0;
1051 
1052 err_clk_aux:
1053 	clk_disable_unprepare(res->ahb_clk);
1054 err_clk_ahb:
1055 	clk_disable_unprepare(res->axi_s_clk);
1056 err_clk_axi_s:
1057 	clk_disable_unprepare(res->axi_m_clk);
1058 err_clk_axi_m:
1059 	clk_disable_unprepare(res->iface);
1060 err_clk_iface:
1061 	/*
1062 	 * Not checking for failure, will anyway return
1063 	 * the original failure in 'ret'.
1064 	 */
1065 	for (i = 0; i < ARRAY_SIZE(res->rst); i++)
1066 		reset_control_assert(res->rst[i]);
1067 
1068 	return ret;
1069 }
1070 
1071 static int qcom_pcie_link_up(struct dw_pcie *pci)
1072 {
1073 	u16 val = readw(pci->dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA);
1074 
1075 	return !!(val & PCI_EXP_LNKSTA_DLLLA);
1076 }
1077 
1078 static int qcom_pcie_host_init(struct pcie_port *pp)
1079 {
1080 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
1081 	struct qcom_pcie *pcie = to_qcom_pcie(pci);
1082 	int ret;
1083 
1084 	qcom_ep_reset_assert(pcie);
1085 
1086 	ret = pcie->ops->init(pcie);
1087 	if (ret)
1088 		return ret;
1089 
1090 	ret = phy_power_on(pcie->phy);
1091 	if (ret)
1092 		goto err_deinit;
1093 
1094 	if (pcie->ops->post_init) {
1095 		ret = pcie->ops->post_init(pcie);
1096 		if (ret)
1097 			goto err_disable_phy;
1098 	}
1099 
1100 	dw_pcie_setup_rc(pp);
1101 
1102 	if (IS_ENABLED(CONFIG_PCI_MSI))
1103 		dw_pcie_msi_init(pp);
1104 
1105 	qcom_ep_reset_deassert(pcie);
1106 
1107 	ret = qcom_pcie_establish_link(pcie);
1108 	if (ret)
1109 		goto err;
1110 
1111 	return 0;
1112 err:
1113 	qcom_ep_reset_assert(pcie);
1114 	if (pcie->ops->post_deinit)
1115 		pcie->ops->post_deinit(pcie);
1116 err_disable_phy:
1117 	phy_power_off(pcie->phy);
1118 err_deinit:
1119 	pcie->ops->deinit(pcie);
1120 
1121 	return ret;
1122 }
1123 
1124 static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {
1125 	.host_init = qcom_pcie_host_init,
1126 };
1127 
1128 /* Qcom IP rev.: 2.1.0	Synopsys IP rev.: 4.01a */
1129 static const struct qcom_pcie_ops ops_2_1_0 = {
1130 	.get_resources = qcom_pcie_get_resources_2_1_0,
1131 	.init = qcom_pcie_init_2_1_0,
1132 	.deinit = qcom_pcie_deinit_2_1_0,
1133 	.ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
1134 };
1135 
1136 /* Qcom IP rev.: 1.0.0	Synopsys IP rev.: 4.11a */
1137 static const struct qcom_pcie_ops ops_1_0_0 = {
1138 	.get_resources = qcom_pcie_get_resources_1_0_0,
1139 	.init = qcom_pcie_init_1_0_0,
1140 	.deinit = qcom_pcie_deinit_1_0_0,
1141 	.ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
1142 };
1143 
1144 /* Qcom IP rev.: 2.3.2	Synopsys IP rev.: 4.21a */
1145 static const struct qcom_pcie_ops ops_2_3_2 = {
1146 	.get_resources = qcom_pcie_get_resources_2_3_2,
1147 	.init = qcom_pcie_init_2_3_2,
1148 	.post_init = qcom_pcie_post_init_2_3_2,
1149 	.deinit = qcom_pcie_deinit_2_3_2,
1150 	.post_deinit = qcom_pcie_post_deinit_2_3_2,
1151 	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1152 };
1153 
1154 /* Qcom IP rev.: 2.4.0	Synopsys IP rev.: 4.20a */
1155 static const struct qcom_pcie_ops ops_2_4_0 = {
1156 	.get_resources = qcom_pcie_get_resources_2_4_0,
1157 	.init = qcom_pcie_init_2_4_0,
1158 	.deinit = qcom_pcie_deinit_2_4_0,
1159 	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1160 };
1161 
1162 /* Qcom IP rev.: 2.3.3	Synopsys IP rev.: 4.30a */
1163 static const struct qcom_pcie_ops ops_2_3_3 = {
1164 	.get_resources = qcom_pcie_get_resources_2_3_3,
1165 	.init = qcom_pcie_init_2_3_3,
1166 	.deinit = qcom_pcie_deinit_2_3_3,
1167 	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1168 };
1169 
1170 static const struct dw_pcie_ops dw_pcie_ops = {
1171 	.link_up = qcom_pcie_link_up,
1172 };
1173 
1174 static int qcom_pcie_probe(struct platform_device *pdev)
1175 {
1176 	struct device *dev = &pdev->dev;
1177 	struct resource *res;
1178 	struct pcie_port *pp;
1179 	struct dw_pcie *pci;
1180 	struct qcom_pcie *pcie;
1181 	int ret;
1182 
1183 	pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
1184 	if (!pcie)
1185 		return -ENOMEM;
1186 
1187 	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
1188 	if (!pci)
1189 		return -ENOMEM;
1190 
1191 	pm_runtime_enable(dev);
1192 	ret = pm_runtime_get_sync(dev);
1193 	if (ret < 0) {
1194 		pm_runtime_disable(dev);
1195 		return ret;
1196 	}
1197 
1198 	pci->dev = dev;
1199 	pci->ops = &dw_pcie_ops;
1200 	pp = &pci->pp;
1201 
1202 	pcie->pci = pci;
1203 
1204 	pcie->ops = of_device_get_match_data(dev);
1205 
1206 	pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH);
1207 	if (IS_ERR(pcie->reset)) {
1208 		ret = PTR_ERR(pcie->reset);
1209 		goto err_pm_runtime_put;
1210 	}
1211 
1212 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "parf");
1213 	pcie->parf = devm_ioremap_resource(dev, res);
1214 	if (IS_ERR(pcie->parf)) {
1215 		ret = PTR_ERR(pcie->parf);
1216 		goto err_pm_runtime_put;
1217 	}
1218 
1219 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
1220 	pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
1221 	if (IS_ERR(pci->dbi_base)) {
1222 		ret = PTR_ERR(pci->dbi_base);
1223 		goto err_pm_runtime_put;
1224 	}
1225 
1226 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi");
1227 	pcie->elbi = devm_ioremap_resource(dev, res);
1228 	if (IS_ERR(pcie->elbi)) {
1229 		ret = PTR_ERR(pcie->elbi);
1230 		goto err_pm_runtime_put;
1231 	}
1232 
1233 	pcie->phy = devm_phy_optional_get(dev, "pciephy");
1234 	if (IS_ERR(pcie->phy)) {
1235 		ret = PTR_ERR(pcie->phy);
1236 		goto err_pm_runtime_put;
1237 	}
1238 
1239 	ret = pcie->ops->get_resources(pcie);
1240 	if (ret)
1241 		goto err_pm_runtime_put;
1242 
1243 	pp->ops = &qcom_pcie_dw_ops;
1244 
1245 	if (IS_ENABLED(CONFIG_PCI_MSI)) {
1246 		pp->msi_irq = platform_get_irq_byname(pdev, "msi");
1247 		if (pp->msi_irq < 0) {
1248 			ret = pp->msi_irq;
1249 			goto err_pm_runtime_put;
1250 		}
1251 	}
1252 
1253 	ret = phy_init(pcie->phy);
1254 	if (ret) {
1255 		pm_runtime_disable(&pdev->dev);
1256 		goto err_pm_runtime_put;
1257 	}
1258 
1259 	platform_set_drvdata(pdev, pcie);
1260 
1261 	ret = dw_pcie_host_init(pp);
1262 	if (ret) {
1263 		dev_err(dev, "cannot initialize host\n");
1264 		pm_runtime_disable(&pdev->dev);
1265 		goto err_pm_runtime_put;
1266 	}
1267 
1268 	return 0;
1269 
1270 err_pm_runtime_put:
1271 	pm_runtime_put(dev);
1272 	pm_runtime_disable(dev);
1273 
1274 	return ret;
1275 }
1276 
1277 static const struct of_device_id qcom_pcie_match[] = {
1278 	{ .compatible = "qcom,pcie-apq8084", .data = &ops_1_0_0 },
1279 	{ .compatible = "qcom,pcie-ipq8064", .data = &ops_2_1_0 },
1280 	{ .compatible = "qcom,pcie-apq8064", .data = &ops_2_1_0 },
1281 	{ .compatible = "qcom,pcie-msm8996", .data = &ops_2_3_2 },
1282 	{ .compatible = "qcom,pcie-ipq8074", .data = &ops_2_3_3 },
1283 	{ .compatible = "qcom,pcie-ipq4019", .data = &ops_2_4_0 },
1284 	{ .compatible = "qcom,pcie-qcs404", .data = &ops_2_4_0 },
1285 	{ }
1286 };
1287 
1288 static void qcom_fixup_class(struct pci_dev *dev)
1289 {
1290 	dev->class = PCI_CLASS_BRIDGE_PCI << 8;
1291 }
1292 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, PCI_ANY_ID, qcom_fixup_class);
1293 
1294 static struct platform_driver qcom_pcie_driver = {
1295 	.probe = qcom_pcie_probe,
1296 	.driver = {
1297 		.name = "qcom-pcie",
1298 		.suppress_bind_attrs = true,
1299 		.of_match_table = qcom_pcie_match,
1300 	},
1301 };
1302 builtin_platform_driver(qcom_pcie_driver);
1303