xref: /linux/drivers/pci/controller/pcie-mediatek-gen3.c (revision cdd30ebb1b9f36159d66f088b61aee264e649d7a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * MediaTek PCIe host controller driver.
4  *
5  * Copyright (c) 2020 MediaTek Inc.
6  * Author: Jianjun Wang <jianjun.wang@mediatek.com>
7  */
8 
9 #include <linux/bitfield.h>
10 #include <linux/clk.h>
11 #include <linux/clk-provider.h>
12 #include <linux/delay.h>
13 #include <linux/iopoll.h>
14 #include <linux/irq.h>
15 #include <linux/irqchip/chained_irq.h>
16 #include <linux/irqdomain.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/msi.h>
20 #include <linux/of_device.h>
21 #include <linux/of_pci.h>
22 #include <linux/pci.h>
23 #include <linux/phy/phy.h>
24 #include <linux/platform_device.h>
25 #include <linux/pm_domain.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/reset.h>
28 
29 #include "../pci.h"
30 
31 #define PCIE_BASE_CFG_REG		0x14
32 #define PCIE_BASE_CFG_SPEED		GENMASK(15, 8)
33 
34 #define PCIE_SETTING_REG		0x80
35 #define PCIE_SETTING_LINK_WIDTH		GENMASK(11, 8)
36 #define PCIE_SETTING_GEN_SUPPORT	GENMASK(14, 12)
37 #define PCIE_PCI_IDS_1			0x9c
38 #define PCI_CLASS(class)		(class << 8)
39 #define PCIE_RC_MODE			BIT(0)
40 
41 #define PCIE_EQ_PRESET_01_REG		0x100
42 #define PCIE_VAL_LN0_DOWNSTREAM		GENMASK(6, 0)
43 #define PCIE_VAL_LN0_UPSTREAM		GENMASK(14, 8)
44 #define PCIE_VAL_LN1_DOWNSTREAM		GENMASK(22, 16)
45 #define PCIE_VAL_LN1_UPSTREAM		GENMASK(30, 24)
46 
47 #define PCIE_CFGNUM_REG			0x140
48 #define PCIE_CFG_DEVFN(devfn)		((devfn) & GENMASK(7, 0))
49 #define PCIE_CFG_BUS(bus)		(((bus) << 8) & GENMASK(15, 8))
50 #define PCIE_CFG_BYTE_EN(bytes)		(((bytes) << 16) & GENMASK(19, 16))
51 #define PCIE_CFG_FORCE_BYTE_EN		BIT(20)
52 #define PCIE_CFG_OFFSET_ADDR		0x1000
53 #define PCIE_CFG_HEADER(bus, devfn) \
54 	(PCIE_CFG_BUS(bus) | PCIE_CFG_DEVFN(devfn))
55 
56 #define PCIE_RST_CTRL_REG		0x148
57 #define PCIE_MAC_RSTB			BIT(0)
58 #define PCIE_PHY_RSTB			BIT(1)
59 #define PCIE_BRG_RSTB			BIT(2)
60 #define PCIE_PE_RSTB			BIT(3)
61 
62 #define PCIE_LTSSM_STATUS_REG		0x150
63 #define PCIE_LTSSM_STATE_MASK		GENMASK(28, 24)
64 #define PCIE_LTSSM_STATE(val)		((val & PCIE_LTSSM_STATE_MASK) >> 24)
65 #define PCIE_LTSSM_STATE_L2_IDLE	0x14
66 
67 #define PCIE_LINK_STATUS_REG		0x154
68 #define PCIE_PORT_LINKUP		BIT(8)
69 
70 #define PCIE_MSI_SET_NUM		8
71 #define PCIE_MSI_IRQS_PER_SET		32
72 #define PCIE_MSI_IRQS_NUM \
73 	(PCIE_MSI_IRQS_PER_SET * PCIE_MSI_SET_NUM)
74 
75 #define PCIE_INT_ENABLE_REG		0x180
76 #define PCIE_MSI_ENABLE			GENMASK(PCIE_MSI_SET_NUM + 8 - 1, 8)
77 #define PCIE_MSI_SHIFT			8
78 #define PCIE_INTX_SHIFT			24
79 #define PCIE_INTX_ENABLE \
80 	GENMASK(PCIE_INTX_SHIFT + PCI_NUM_INTX - 1, PCIE_INTX_SHIFT)
81 
82 #define PCIE_INT_STATUS_REG		0x184
83 #define PCIE_MSI_SET_ENABLE_REG		0x190
84 #define PCIE_MSI_SET_ENABLE		GENMASK(PCIE_MSI_SET_NUM - 1, 0)
85 
86 #define PCIE_PIPE4_PIE8_REG		0x338
87 #define PCIE_K_FINETUNE_MAX		GENMASK(5, 0)
88 #define PCIE_K_FINETUNE_ERR		GENMASK(7, 6)
89 #define PCIE_K_PRESET_TO_USE		GENMASK(18, 8)
90 #define PCIE_K_PHYPARAM_QUERY		BIT(19)
91 #define PCIE_K_QUERY_TIMEOUT		BIT(20)
92 #define PCIE_K_PRESET_TO_USE_16G	GENMASK(31, 21)
93 
94 #define PCIE_MSI_SET_BASE_REG		0xc00
95 #define PCIE_MSI_SET_OFFSET		0x10
96 #define PCIE_MSI_SET_STATUS_OFFSET	0x04
97 #define PCIE_MSI_SET_ENABLE_OFFSET	0x08
98 
99 #define PCIE_MSI_SET_ADDR_HI_BASE	0xc80
100 #define PCIE_MSI_SET_ADDR_HI_OFFSET	0x04
101 
102 #define PCIE_ICMD_PM_REG		0x198
103 #define PCIE_TURN_OFF_LINK		BIT(4)
104 
105 #define PCIE_MISC_CTRL_REG		0x348
106 #define PCIE_DISABLE_DVFSRC_VLT_REQ	BIT(1)
107 
108 #define PCIE_TRANS_TABLE_BASE_REG	0x800
109 #define PCIE_ATR_SRC_ADDR_MSB_OFFSET	0x4
110 #define PCIE_ATR_TRSL_ADDR_LSB_OFFSET	0x8
111 #define PCIE_ATR_TRSL_ADDR_MSB_OFFSET	0xc
112 #define PCIE_ATR_TRSL_PARAM_OFFSET	0x10
113 #define PCIE_ATR_TLB_SET_OFFSET		0x20
114 
115 #define PCIE_MAX_TRANS_TABLES		8
116 #define PCIE_ATR_EN			BIT(0)
117 #define PCIE_ATR_SIZE(size) \
118 	(((((size) - 1) << 1) & GENMASK(6, 1)) | PCIE_ATR_EN)
119 #define PCIE_ATR_ID(id)			((id) & GENMASK(3, 0))
120 #define PCIE_ATR_TYPE_MEM		PCIE_ATR_ID(0)
121 #define PCIE_ATR_TYPE_IO		PCIE_ATR_ID(1)
122 #define PCIE_ATR_TLP_TYPE(type)		(((type) << 16) & GENMASK(18, 16))
123 #define PCIE_ATR_TLP_TYPE_MEM		PCIE_ATR_TLP_TYPE(0)
124 #define PCIE_ATR_TLP_TYPE_IO		PCIE_ATR_TLP_TYPE(2)
125 
126 #define MAX_NUM_PHY_RESETS		3
127 
128 /* Time in ms needed to complete PCIe reset on EN7581 SoC */
129 #define PCIE_EN7581_RESET_TIME_MS	100
130 
131 struct mtk_gen3_pcie;
132 
133 #define PCIE_CONF_LINK2_CTL_STS		(PCIE_CFG_OFFSET_ADDR + 0xb0)
134 #define PCIE_CONF_LINK2_LCR2_LINK_SPEED	GENMASK(3, 0)
135 
136 /**
137  * struct mtk_gen3_pcie_pdata - differentiate between host generations
138  * @power_up: pcie power_up callback
139  * @phy_resets: phy reset lines SoC data.
140  */
141 struct mtk_gen3_pcie_pdata {
142 	int (*power_up)(struct mtk_gen3_pcie *pcie);
143 	struct {
144 		const char *id[MAX_NUM_PHY_RESETS];
145 		int num_resets;
146 	} phy_resets;
147 };
148 
149 /**
150  * struct mtk_msi_set - MSI information for each set
151  * @base: IO mapped register base
152  * @msg_addr: MSI message address
153  * @saved_irq_state: IRQ enable state saved at suspend time
154  */
155 struct mtk_msi_set {
156 	void __iomem *base;
157 	phys_addr_t msg_addr;
158 	u32 saved_irq_state;
159 };
160 
161 /**
162  * struct mtk_gen3_pcie - PCIe port information
163  * @dev: pointer to PCIe device
164  * @base: IO mapped register base
165  * @reg_base: physical register base
166  * @mac_reset: MAC reset control
167  * @phy_resets: PHY reset controllers
168  * @phy: PHY controller block
169  * @clks: PCIe clocks
170  * @num_clks: PCIe clocks count for this port
171  * @max_link_speed: Maximum link speed (PCIe Gen) for this port
172  * @num_lanes: Number of PCIe lanes for this port
173  * @irq: PCIe controller interrupt number
174  * @saved_irq_state: IRQ enable state saved at suspend time
175  * @irq_lock: lock protecting IRQ register access
176  * @intx_domain: legacy INTx IRQ domain
177  * @msi_domain: MSI IRQ domain
178  * @msi_bottom_domain: MSI IRQ bottom domain
179  * @msi_sets: MSI sets information
180  * @lock: lock protecting IRQ bit map
181  * @msi_irq_in_use: bit map for assigned MSI IRQ
182  * @soc: pointer to SoC-dependent operations
183  */
184 struct mtk_gen3_pcie {
185 	struct device *dev;
186 	void __iomem *base;
187 	phys_addr_t reg_base;
188 	struct reset_control *mac_reset;
189 	struct reset_control_bulk_data phy_resets[MAX_NUM_PHY_RESETS];
190 	struct phy *phy;
191 	struct clk_bulk_data *clks;
192 	int num_clks;
193 	u8 max_link_speed;
194 	u8 num_lanes;
195 
196 	int irq;
197 	u32 saved_irq_state;
198 	raw_spinlock_t irq_lock;
199 	struct irq_domain *intx_domain;
200 	struct irq_domain *msi_domain;
201 	struct irq_domain *msi_bottom_domain;
202 	struct mtk_msi_set msi_sets[PCIE_MSI_SET_NUM];
203 	struct mutex lock;
204 	DECLARE_BITMAP(msi_irq_in_use, PCIE_MSI_IRQS_NUM);
205 
206 	const struct mtk_gen3_pcie_pdata *soc;
207 };
208 
209 /* LTSSM state in PCIE_LTSSM_STATUS_REG bit[28:24] */
210 static const char *const ltssm_str[] = {
211 	"detect.quiet",			/* 0x00 */
212 	"detect.active",		/* 0x01 */
213 	"polling.active",		/* 0x02 */
214 	"polling.compliance",		/* 0x03 */
215 	"polling.configuration",	/* 0x04 */
216 	"config.linkwidthstart",	/* 0x05 */
217 	"config.linkwidthaccept",	/* 0x06 */
218 	"config.lanenumwait",		/* 0x07 */
219 	"config.lanenumaccept",		/* 0x08 */
220 	"config.complete",		/* 0x09 */
221 	"config.idle",			/* 0x0A */
222 	"recovery.receiverlock",	/* 0x0B */
223 	"recovery.equalization",	/* 0x0C */
224 	"recovery.speed",		/* 0x0D */
225 	"recovery.receiverconfig",	/* 0x0E */
226 	"recovery.idle",		/* 0x0F */
227 	"L0",				/* 0x10 */
228 	"L0s",				/* 0x11 */
229 	"L1.entry",			/* 0x12 */
230 	"L1.idle",			/* 0x13 */
231 	"L2.idle",			/* 0x14 */
232 	"L2.transmitwake",		/* 0x15 */
233 	"disable",			/* 0x16 */
234 	"loopback.entry",		/* 0x17 */
235 	"loopback.active",		/* 0x18 */
236 	"loopback.exit",		/* 0x19 */
237 	"hotreset",			/* 0x1A */
238 };
239 
240 /**
241  * mtk_pcie_config_tlp_header() - Configure a configuration TLP header
242  * @bus: PCI bus to query
243  * @devfn: device/function number
244  * @where: offset in config space
245  * @size: data size in TLP header
246  *
247  * Set byte enable field and device information in configuration TLP header.
248  */
249 static void mtk_pcie_config_tlp_header(struct pci_bus *bus, unsigned int devfn,
250 					int where, int size)
251 {
252 	struct mtk_gen3_pcie *pcie = bus->sysdata;
253 	int bytes;
254 	u32 val;
255 
256 	bytes = (GENMASK(size - 1, 0) & 0xf) << (where & 0x3);
257 
258 	val = PCIE_CFG_FORCE_BYTE_EN | PCIE_CFG_BYTE_EN(bytes) |
259 	      PCIE_CFG_HEADER(bus->number, devfn);
260 
261 	writel_relaxed(val, pcie->base + PCIE_CFGNUM_REG);
262 }
263 
264 static void __iomem *mtk_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
265 				      int where)
266 {
267 	struct mtk_gen3_pcie *pcie = bus->sysdata;
268 
269 	return pcie->base + PCIE_CFG_OFFSET_ADDR + where;
270 }
271 
272 static int mtk_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
273 				int where, int size, u32 *val)
274 {
275 	mtk_pcie_config_tlp_header(bus, devfn, where, size);
276 
277 	return pci_generic_config_read32(bus, devfn, where, size, val);
278 }
279 
280 static int mtk_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
281 				 int where, int size, u32 val)
282 {
283 	mtk_pcie_config_tlp_header(bus, devfn, where, size);
284 
285 	if (size <= 2)
286 		val <<= (where & 0x3) * 8;
287 
288 	return pci_generic_config_write32(bus, devfn, where, 4, val);
289 }
290 
291 static struct pci_ops mtk_pcie_ops = {
292 	.map_bus = mtk_pcie_map_bus,
293 	.read  = mtk_pcie_config_read,
294 	.write = mtk_pcie_config_write,
295 };
296 
297 static int mtk_pcie_set_trans_table(struct mtk_gen3_pcie *pcie,
298 				    resource_size_t cpu_addr,
299 				    resource_size_t pci_addr,
300 				    resource_size_t size,
301 				    unsigned long type, int *num)
302 {
303 	resource_size_t remaining = size;
304 	resource_size_t table_size;
305 	resource_size_t addr_align;
306 	const char *range_type;
307 	void __iomem *table;
308 	u32 val;
309 
310 	while (remaining && (*num < PCIE_MAX_TRANS_TABLES)) {
311 		/* Table size needs to be a power of 2 */
312 		table_size = BIT(fls(remaining) - 1);
313 
314 		if (cpu_addr > 0) {
315 			addr_align = BIT(ffs(cpu_addr) - 1);
316 			table_size = min(table_size, addr_align);
317 		}
318 
319 		/* Minimum size of translate table is 4KiB */
320 		if (table_size < 0x1000) {
321 			dev_err(pcie->dev, "illegal table size %#llx\n",
322 				(unsigned long long)table_size);
323 			return -EINVAL;
324 		}
325 
326 		table = pcie->base + PCIE_TRANS_TABLE_BASE_REG + *num * PCIE_ATR_TLB_SET_OFFSET;
327 		writel_relaxed(lower_32_bits(cpu_addr) | PCIE_ATR_SIZE(fls(table_size) - 1), table);
328 		writel_relaxed(upper_32_bits(cpu_addr), table + PCIE_ATR_SRC_ADDR_MSB_OFFSET);
329 		writel_relaxed(lower_32_bits(pci_addr), table + PCIE_ATR_TRSL_ADDR_LSB_OFFSET);
330 		writel_relaxed(upper_32_bits(pci_addr), table + PCIE_ATR_TRSL_ADDR_MSB_OFFSET);
331 
332 		if (type == IORESOURCE_IO) {
333 			val = PCIE_ATR_TYPE_IO | PCIE_ATR_TLP_TYPE_IO;
334 			range_type = "IO";
335 		} else {
336 			val = PCIE_ATR_TYPE_MEM | PCIE_ATR_TLP_TYPE_MEM;
337 			range_type = "MEM";
338 		}
339 
340 		writel_relaxed(val, table + PCIE_ATR_TRSL_PARAM_OFFSET);
341 
342 		dev_dbg(pcie->dev, "set %s trans window[%d]: cpu_addr = %#llx, pci_addr = %#llx, size = %#llx\n",
343 			range_type, *num, (unsigned long long)cpu_addr,
344 			(unsigned long long)pci_addr, (unsigned long long)table_size);
345 
346 		cpu_addr += table_size;
347 		pci_addr += table_size;
348 		remaining -= table_size;
349 		(*num)++;
350 	}
351 
352 	if (remaining)
353 		dev_warn(pcie->dev, "not enough translate table for addr: %#llx, limited to [%d]\n",
354 			 (unsigned long long)cpu_addr, PCIE_MAX_TRANS_TABLES);
355 
356 	return 0;
357 }
358 
359 static void mtk_pcie_enable_msi(struct mtk_gen3_pcie *pcie)
360 {
361 	int i;
362 	u32 val;
363 
364 	for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
365 		struct mtk_msi_set *msi_set = &pcie->msi_sets[i];
366 
367 		msi_set->base = pcie->base + PCIE_MSI_SET_BASE_REG +
368 				i * PCIE_MSI_SET_OFFSET;
369 		msi_set->msg_addr = pcie->reg_base + PCIE_MSI_SET_BASE_REG +
370 				    i * PCIE_MSI_SET_OFFSET;
371 
372 		/* Configure the MSI capture address */
373 		writel_relaxed(lower_32_bits(msi_set->msg_addr), msi_set->base);
374 		writel_relaxed(upper_32_bits(msi_set->msg_addr),
375 			       pcie->base + PCIE_MSI_SET_ADDR_HI_BASE +
376 			       i * PCIE_MSI_SET_ADDR_HI_OFFSET);
377 	}
378 
379 	val = readl_relaxed(pcie->base + PCIE_MSI_SET_ENABLE_REG);
380 	val |= PCIE_MSI_SET_ENABLE;
381 	writel_relaxed(val, pcie->base + PCIE_MSI_SET_ENABLE_REG);
382 
383 	val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
384 	val |= PCIE_MSI_ENABLE;
385 	writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG);
386 }
387 
388 static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie)
389 {
390 	struct resource_entry *entry;
391 	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
392 	unsigned int table_index = 0;
393 	int err;
394 	u32 val;
395 
396 	/* Set as RC mode and set controller PCIe Gen speed restriction, if any */
397 	val = readl_relaxed(pcie->base + PCIE_SETTING_REG);
398 	val |= PCIE_RC_MODE;
399 	if (pcie->max_link_speed) {
400 		val &= ~PCIE_SETTING_GEN_SUPPORT;
401 
402 		/* Can enable link speed support only from Gen2 onwards */
403 		if (pcie->max_link_speed >= 2)
404 			val |= FIELD_PREP(PCIE_SETTING_GEN_SUPPORT,
405 					  GENMASK(pcie->max_link_speed - 2, 0));
406 	}
407 	if (pcie->num_lanes) {
408 		val &= ~PCIE_SETTING_LINK_WIDTH;
409 
410 		/* Zero means one lane, each bit activates x2/x4/x8/x16 */
411 		if (pcie->num_lanes > 1)
412 			val |= FIELD_PREP(PCIE_SETTING_LINK_WIDTH,
413 					  GENMASK(fls(pcie->num_lanes >> 2), 0));
414 	}
415 	writel_relaxed(val, pcie->base + PCIE_SETTING_REG);
416 
417 	/* Set Link Control 2 (LNKCTL2) speed restriction, if any */
418 	if (pcie->max_link_speed) {
419 		val = readl_relaxed(pcie->base + PCIE_CONF_LINK2_CTL_STS);
420 		val &= ~PCIE_CONF_LINK2_LCR2_LINK_SPEED;
421 		val |= FIELD_PREP(PCIE_CONF_LINK2_LCR2_LINK_SPEED, pcie->max_link_speed);
422 		writel_relaxed(val, pcie->base + PCIE_CONF_LINK2_CTL_STS);
423 	}
424 
425 	/* Set class code */
426 	val = readl_relaxed(pcie->base + PCIE_PCI_IDS_1);
427 	val &= ~GENMASK(31, 8);
428 	val |= PCI_CLASS(PCI_CLASS_BRIDGE_PCI_NORMAL);
429 	writel_relaxed(val, pcie->base + PCIE_PCI_IDS_1);
430 
431 	/* Mask all INTx interrupts */
432 	val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
433 	val &= ~PCIE_INTX_ENABLE;
434 	writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG);
435 
436 	/* Disable DVFSRC voltage request */
437 	val = readl_relaxed(pcie->base + PCIE_MISC_CTRL_REG);
438 	val |= PCIE_DISABLE_DVFSRC_VLT_REQ;
439 	writel_relaxed(val, pcie->base + PCIE_MISC_CTRL_REG);
440 
441 	/* Assert all reset signals */
442 	val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
443 	val |= PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB;
444 	writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
445 
446 	/*
447 	 * Described in PCIe CEM specification sections 2.2 (PERST# Signal)
448 	 * and 2.2.1 (Initial Power-Up (G3 to S0)).
449 	 * The deassertion of PERST# should be delayed 100ms (TPVPERL)
450 	 * for the power and clock to become stable.
451 	 */
452 	msleep(100);
453 
454 	/* De-assert reset signals */
455 	val &= ~(PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB);
456 	writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
457 
458 	/* Check if the link is up or not */
459 	err = readl_poll_timeout(pcie->base + PCIE_LINK_STATUS_REG, val,
460 				 !!(val & PCIE_PORT_LINKUP), 20,
461 				 PCI_PM_D3COLD_WAIT * USEC_PER_MSEC);
462 	if (err) {
463 		const char *ltssm_state;
464 		int ltssm_index;
465 
466 		val = readl_relaxed(pcie->base + PCIE_LTSSM_STATUS_REG);
467 		ltssm_index = PCIE_LTSSM_STATE(val);
468 		ltssm_state = ltssm_index >= ARRAY_SIZE(ltssm_str) ?
469 			      "Unknown state" : ltssm_str[ltssm_index];
470 		dev_err(pcie->dev,
471 			"PCIe link down, current LTSSM state: %s (%#x)\n",
472 			ltssm_state, val);
473 		return err;
474 	}
475 
476 	mtk_pcie_enable_msi(pcie);
477 
478 	/* Set PCIe translation windows */
479 	resource_list_for_each_entry(entry, &host->windows) {
480 		struct resource *res = entry->res;
481 		unsigned long type = resource_type(res);
482 		resource_size_t cpu_addr;
483 		resource_size_t pci_addr;
484 		resource_size_t size;
485 
486 		if (type == IORESOURCE_IO)
487 			cpu_addr = pci_pio_to_address(res->start);
488 		else if (type == IORESOURCE_MEM)
489 			cpu_addr = res->start;
490 		else
491 			continue;
492 
493 		pci_addr = res->start - entry->offset;
494 		size = resource_size(res);
495 		err = mtk_pcie_set_trans_table(pcie, cpu_addr, pci_addr, size,
496 					       type, &table_index);
497 		if (err)
498 			return err;
499 	}
500 
501 	return 0;
502 }
503 
504 static void mtk_pcie_msi_irq_mask(struct irq_data *data)
505 {
506 	pci_msi_mask_irq(data);
507 	irq_chip_mask_parent(data);
508 }
509 
510 static void mtk_pcie_msi_irq_unmask(struct irq_data *data)
511 {
512 	pci_msi_unmask_irq(data);
513 	irq_chip_unmask_parent(data);
514 }
515 
516 static struct irq_chip mtk_msi_irq_chip = {
517 	.irq_ack = irq_chip_ack_parent,
518 	.irq_mask = mtk_pcie_msi_irq_mask,
519 	.irq_unmask = mtk_pcie_msi_irq_unmask,
520 	.name = "MSI",
521 };
522 
523 static struct msi_domain_info mtk_msi_domain_info = {
524 	.flags	= MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
525 		  MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX |
526 		  MSI_FLAG_MULTI_PCI_MSI,
527 	.chip	= &mtk_msi_irq_chip,
528 };
529 
530 static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
531 {
532 	struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
533 	struct mtk_gen3_pcie *pcie = data->domain->host_data;
534 	unsigned long hwirq;
535 
536 	hwirq =	data->hwirq % PCIE_MSI_IRQS_PER_SET;
537 
538 	msg->address_hi = upper_32_bits(msi_set->msg_addr);
539 	msg->address_lo = lower_32_bits(msi_set->msg_addr);
540 	msg->data = hwirq;
541 	dev_dbg(pcie->dev, "msi#%#lx address_hi %#x address_lo %#x data %d\n",
542 		hwirq, msg->address_hi, msg->address_lo, msg->data);
543 }
544 
545 static void mtk_msi_bottom_irq_ack(struct irq_data *data)
546 {
547 	struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
548 	unsigned long hwirq;
549 
550 	hwirq =	data->hwirq % PCIE_MSI_IRQS_PER_SET;
551 
552 	writel_relaxed(BIT(hwirq), msi_set->base + PCIE_MSI_SET_STATUS_OFFSET);
553 }
554 
555 static void mtk_msi_bottom_irq_mask(struct irq_data *data)
556 {
557 	struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
558 	struct mtk_gen3_pcie *pcie = data->domain->host_data;
559 	unsigned long hwirq, flags;
560 	u32 val;
561 
562 	hwirq =	data->hwirq % PCIE_MSI_IRQS_PER_SET;
563 
564 	raw_spin_lock_irqsave(&pcie->irq_lock, flags);
565 	val = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
566 	val &= ~BIT(hwirq);
567 	writel_relaxed(val, msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
568 	raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
569 }
570 
571 static void mtk_msi_bottom_irq_unmask(struct irq_data *data)
572 {
573 	struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
574 	struct mtk_gen3_pcie *pcie = data->domain->host_data;
575 	unsigned long hwirq, flags;
576 	u32 val;
577 
578 	hwirq =	data->hwirq % PCIE_MSI_IRQS_PER_SET;
579 
580 	raw_spin_lock_irqsave(&pcie->irq_lock, flags);
581 	val = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
582 	val |= BIT(hwirq);
583 	writel_relaxed(val, msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
584 	raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
585 }
586 
587 static struct irq_chip mtk_msi_bottom_irq_chip = {
588 	.irq_ack		= mtk_msi_bottom_irq_ack,
589 	.irq_mask		= mtk_msi_bottom_irq_mask,
590 	.irq_unmask		= mtk_msi_bottom_irq_unmask,
591 	.irq_compose_msi_msg	= mtk_compose_msi_msg,
592 	.name			= "MSI",
593 };
594 
595 static int mtk_msi_bottom_domain_alloc(struct irq_domain *domain,
596 				       unsigned int virq, unsigned int nr_irqs,
597 				       void *arg)
598 {
599 	struct mtk_gen3_pcie *pcie = domain->host_data;
600 	struct mtk_msi_set *msi_set;
601 	int i, hwirq, set_idx;
602 
603 	mutex_lock(&pcie->lock);
604 
605 	hwirq = bitmap_find_free_region(pcie->msi_irq_in_use, PCIE_MSI_IRQS_NUM,
606 					order_base_2(nr_irqs));
607 
608 	mutex_unlock(&pcie->lock);
609 
610 	if (hwirq < 0)
611 		return -ENOSPC;
612 
613 	set_idx = hwirq / PCIE_MSI_IRQS_PER_SET;
614 	msi_set = &pcie->msi_sets[set_idx];
615 
616 	for (i = 0; i < nr_irqs; i++)
617 		irq_domain_set_info(domain, virq + i, hwirq + i,
618 				    &mtk_msi_bottom_irq_chip, msi_set,
619 				    handle_edge_irq, NULL, NULL);
620 
621 	return 0;
622 }
623 
624 static void mtk_msi_bottom_domain_free(struct irq_domain *domain,
625 				       unsigned int virq, unsigned int nr_irqs)
626 {
627 	struct mtk_gen3_pcie *pcie = domain->host_data;
628 	struct irq_data *data = irq_domain_get_irq_data(domain, virq);
629 
630 	mutex_lock(&pcie->lock);
631 
632 	bitmap_release_region(pcie->msi_irq_in_use, data->hwirq,
633 			      order_base_2(nr_irqs));
634 
635 	mutex_unlock(&pcie->lock);
636 
637 	irq_domain_free_irqs_common(domain, virq, nr_irqs);
638 }
639 
640 static const struct irq_domain_ops mtk_msi_bottom_domain_ops = {
641 	.alloc = mtk_msi_bottom_domain_alloc,
642 	.free = mtk_msi_bottom_domain_free,
643 };
644 
645 static void mtk_intx_mask(struct irq_data *data)
646 {
647 	struct mtk_gen3_pcie *pcie = irq_data_get_irq_chip_data(data);
648 	unsigned long flags;
649 	u32 val;
650 
651 	raw_spin_lock_irqsave(&pcie->irq_lock, flags);
652 	val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
653 	val &= ~BIT(data->hwirq + PCIE_INTX_SHIFT);
654 	writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG);
655 	raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
656 }
657 
658 static void mtk_intx_unmask(struct irq_data *data)
659 {
660 	struct mtk_gen3_pcie *pcie = irq_data_get_irq_chip_data(data);
661 	unsigned long flags;
662 	u32 val;
663 
664 	raw_spin_lock_irqsave(&pcie->irq_lock, flags);
665 	val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
666 	val |= BIT(data->hwirq + PCIE_INTX_SHIFT);
667 	writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG);
668 	raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
669 }
670 
671 /**
672  * mtk_intx_eoi() - Clear INTx IRQ status at the end of interrupt
673  * @data: pointer to chip specific data
674  *
675  * As an emulated level IRQ, its interrupt status will remain
676  * until the corresponding de-assert message is received; hence that
677  * the status can only be cleared when the interrupt has been serviced.
678  */
679 static void mtk_intx_eoi(struct irq_data *data)
680 {
681 	struct mtk_gen3_pcie *pcie = irq_data_get_irq_chip_data(data);
682 	unsigned long hwirq;
683 
684 	hwirq = data->hwirq + PCIE_INTX_SHIFT;
685 	writel_relaxed(BIT(hwirq), pcie->base + PCIE_INT_STATUS_REG);
686 }
687 
688 static struct irq_chip mtk_intx_irq_chip = {
689 	.irq_mask		= mtk_intx_mask,
690 	.irq_unmask		= mtk_intx_unmask,
691 	.irq_eoi		= mtk_intx_eoi,
692 	.name			= "INTx",
693 };
694 
695 static int mtk_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
696 			     irq_hw_number_t hwirq)
697 {
698 	irq_set_chip_data(irq, domain->host_data);
699 	irq_set_chip_and_handler_name(irq, &mtk_intx_irq_chip,
700 				      handle_fasteoi_irq, "INTx");
701 	return 0;
702 }
703 
704 static const struct irq_domain_ops intx_domain_ops = {
705 	.map = mtk_pcie_intx_map,
706 };
707 
708 static int mtk_pcie_init_irq_domains(struct mtk_gen3_pcie *pcie)
709 {
710 	struct device *dev = pcie->dev;
711 	struct device_node *intc_node, *node = dev->of_node;
712 	int ret;
713 
714 	raw_spin_lock_init(&pcie->irq_lock);
715 
716 	/* Setup INTx */
717 	intc_node = of_get_child_by_name(node, "interrupt-controller");
718 	if (!intc_node) {
719 		dev_err(dev, "missing interrupt-controller node\n");
720 		return -ENODEV;
721 	}
722 
723 	pcie->intx_domain = irq_domain_add_linear(intc_node, PCI_NUM_INTX,
724 						  &intx_domain_ops, pcie);
725 	if (!pcie->intx_domain) {
726 		dev_err(dev, "failed to create INTx IRQ domain\n");
727 		ret = -ENODEV;
728 		goto out_put_node;
729 	}
730 
731 	/* Setup MSI */
732 	mutex_init(&pcie->lock);
733 
734 	pcie->msi_bottom_domain = irq_domain_add_linear(node, PCIE_MSI_IRQS_NUM,
735 				  &mtk_msi_bottom_domain_ops, pcie);
736 	if (!pcie->msi_bottom_domain) {
737 		dev_err(dev, "failed to create MSI bottom domain\n");
738 		ret = -ENODEV;
739 		goto err_msi_bottom_domain;
740 	}
741 
742 	pcie->msi_domain = pci_msi_create_irq_domain(dev->fwnode,
743 						     &mtk_msi_domain_info,
744 						     pcie->msi_bottom_domain);
745 	if (!pcie->msi_domain) {
746 		dev_err(dev, "failed to create MSI domain\n");
747 		ret = -ENODEV;
748 		goto err_msi_domain;
749 	}
750 
751 	of_node_put(intc_node);
752 	return 0;
753 
754 err_msi_domain:
755 	irq_domain_remove(pcie->msi_bottom_domain);
756 err_msi_bottom_domain:
757 	irq_domain_remove(pcie->intx_domain);
758 out_put_node:
759 	of_node_put(intc_node);
760 	return ret;
761 }
762 
763 static void mtk_pcie_irq_teardown(struct mtk_gen3_pcie *pcie)
764 {
765 	irq_set_chained_handler_and_data(pcie->irq, NULL, NULL);
766 
767 	if (pcie->intx_domain)
768 		irq_domain_remove(pcie->intx_domain);
769 
770 	if (pcie->msi_domain)
771 		irq_domain_remove(pcie->msi_domain);
772 
773 	if (pcie->msi_bottom_domain)
774 		irq_domain_remove(pcie->msi_bottom_domain);
775 
776 	irq_dispose_mapping(pcie->irq);
777 }
778 
779 static void mtk_pcie_msi_handler(struct mtk_gen3_pcie *pcie, int set_idx)
780 {
781 	struct mtk_msi_set *msi_set = &pcie->msi_sets[set_idx];
782 	unsigned long msi_enable, msi_status;
783 	irq_hw_number_t bit, hwirq;
784 
785 	msi_enable = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
786 
787 	do {
788 		msi_status = readl_relaxed(msi_set->base +
789 					   PCIE_MSI_SET_STATUS_OFFSET);
790 		msi_status &= msi_enable;
791 		if (!msi_status)
792 			break;
793 
794 		for_each_set_bit(bit, &msi_status, PCIE_MSI_IRQS_PER_SET) {
795 			hwirq = bit + set_idx * PCIE_MSI_IRQS_PER_SET;
796 			generic_handle_domain_irq(pcie->msi_bottom_domain, hwirq);
797 		}
798 	} while (true);
799 }
800 
801 static void mtk_pcie_irq_handler(struct irq_desc *desc)
802 {
803 	struct mtk_gen3_pcie *pcie = irq_desc_get_handler_data(desc);
804 	struct irq_chip *irqchip = irq_desc_get_chip(desc);
805 	unsigned long status;
806 	irq_hw_number_t irq_bit = PCIE_INTX_SHIFT;
807 
808 	chained_irq_enter(irqchip, desc);
809 
810 	status = readl_relaxed(pcie->base + PCIE_INT_STATUS_REG);
811 	for_each_set_bit_from(irq_bit, &status, PCI_NUM_INTX +
812 			      PCIE_INTX_SHIFT)
813 		generic_handle_domain_irq(pcie->intx_domain,
814 					  irq_bit - PCIE_INTX_SHIFT);
815 
816 	irq_bit = PCIE_MSI_SHIFT;
817 	for_each_set_bit_from(irq_bit, &status, PCIE_MSI_SET_NUM +
818 			      PCIE_MSI_SHIFT) {
819 		mtk_pcie_msi_handler(pcie, irq_bit - PCIE_MSI_SHIFT);
820 
821 		writel_relaxed(BIT(irq_bit), pcie->base + PCIE_INT_STATUS_REG);
822 	}
823 
824 	chained_irq_exit(irqchip, desc);
825 }
826 
827 static int mtk_pcie_setup_irq(struct mtk_gen3_pcie *pcie)
828 {
829 	struct device *dev = pcie->dev;
830 	struct platform_device *pdev = to_platform_device(dev);
831 	int err;
832 
833 	err = mtk_pcie_init_irq_domains(pcie);
834 	if (err)
835 		return err;
836 
837 	pcie->irq = platform_get_irq(pdev, 0);
838 	if (pcie->irq < 0)
839 		return pcie->irq;
840 
841 	irq_set_chained_handler_and_data(pcie->irq, mtk_pcie_irq_handler, pcie);
842 
843 	return 0;
844 }
845 
846 static int mtk_pcie_parse_port(struct mtk_gen3_pcie *pcie)
847 {
848 	int i, ret, num_resets = pcie->soc->phy_resets.num_resets;
849 	struct device *dev = pcie->dev;
850 	struct platform_device *pdev = to_platform_device(dev);
851 	struct resource *regs;
852 	u32 num_lanes;
853 
854 	regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcie-mac");
855 	if (!regs)
856 		return -EINVAL;
857 	pcie->base = devm_ioremap_resource(dev, regs);
858 	if (IS_ERR(pcie->base)) {
859 		dev_err(dev, "failed to map register base\n");
860 		return PTR_ERR(pcie->base);
861 	}
862 
863 	pcie->reg_base = regs->start;
864 
865 	for (i = 0; i < num_resets; i++)
866 		pcie->phy_resets[i].id = pcie->soc->phy_resets.id[i];
867 
868 	ret = devm_reset_control_bulk_get_optional_shared(dev, num_resets, pcie->phy_resets);
869 	if (ret) {
870 		dev_err(dev, "failed to get PHY bulk reset\n");
871 		return ret;
872 	}
873 
874 	pcie->mac_reset = devm_reset_control_get_optional_exclusive(dev, "mac");
875 	if (IS_ERR(pcie->mac_reset)) {
876 		ret = PTR_ERR(pcie->mac_reset);
877 		if (ret != -EPROBE_DEFER)
878 			dev_err(dev, "failed to get MAC reset\n");
879 
880 		return ret;
881 	}
882 
883 	pcie->phy = devm_phy_optional_get(dev, "pcie-phy");
884 	if (IS_ERR(pcie->phy)) {
885 		ret = PTR_ERR(pcie->phy);
886 		if (ret != -EPROBE_DEFER)
887 			dev_err(dev, "failed to get PHY\n");
888 
889 		return ret;
890 	}
891 
892 	pcie->num_clks = devm_clk_bulk_get_all(dev, &pcie->clks);
893 	if (pcie->num_clks < 0) {
894 		dev_err(dev, "failed to get clocks\n");
895 		return pcie->num_clks;
896 	}
897 
898        ret = of_property_read_u32(dev->of_node, "num-lanes", &num_lanes);
899        if (ret == 0) {
900 	       if (num_lanes == 0 || num_lanes > 16 || (num_lanes != 1 && num_lanes % 2))
901 			dev_warn(dev, "invalid num-lanes, using controller defaults\n");
902 	       else
903 			pcie->num_lanes = num_lanes;
904        }
905 
906 	return 0;
907 }
908 
909 static int mtk_pcie_en7581_power_up(struct mtk_gen3_pcie *pcie)
910 {
911 	struct device *dev = pcie->dev;
912 	int err;
913 	u32 val;
914 
915 	/*
916 	 * Wait for the time needed to complete the bulk assert in
917 	 * mtk_pcie_setup for EN7581 SoC.
918 	 */
919 	mdelay(PCIE_EN7581_RESET_TIME_MS);
920 
921 	err = phy_init(pcie->phy);
922 	if (err) {
923 		dev_err(dev, "failed to initialize PHY\n");
924 		return err;
925 	}
926 
927 	err = phy_power_on(pcie->phy);
928 	if (err) {
929 		dev_err(dev, "failed to power on PHY\n");
930 		goto err_phy_on;
931 	}
932 
933 	err = reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
934 	if (err) {
935 		dev_err(dev, "failed to deassert PHYs\n");
936 		goto err_phy_deassert;
937 	}
938 
939 	/*
940 	 * Wait for the time needed to complete the bulk de-assert above.
941 	 * This time is specific for EN7581 SoC.
942 	 */
943 	mdelay(PCIE_EN7581_RESET_TIME_MS);
944 
945 	pm_runtime_enable(dev);
946 	pm_runtime_get_sync(dev);
947 
948 	err = clk_bulk_prepare(pcie->num_clks, pcie->clks);
949 	if (err) {
950 		dev_err(dev, "failed to prepare clock\n");
951 		goto err_clk_prepare;
952 	}
953 
954 	val = FIELD_PREP(PCIE_VAL_LN0_DOWNSTREAM, 0x47) |
955 	      FIELD_PREP(PCIE_VAL_LN1_DOWNSTREAM, 0x47) |
956 	      FIELD_PREP(PCIE_VAL_LN0_UPSTREAM, 0x41) |
957 	      FIELD_PREP(PCIE_VAL_LN1_UPSTREAM, 0x41);
958 	writel_relaxed(val, pcie->base + PCIE_EQ_PRESET_01_REG);
959 
960 	val = PCIE_K_PHYPARAM_QUERY | PCIE_K_QUERY_TIMEOUT |
961 	      FIELD_PREP(PCIE_K_PRESET_TO_USE_16G, 0x80) |
962 	      FIELD_PREP(PCIE_K_PRESET_TO_USE, 0x2) |
963 	      FIELD_PREP(PCIE_K_FINETUNE_MAX, 0xf);
964 	writel_relaxed(val, pcie->base + PCIE_PIPE4_PIE8_REG);
965 
966 	err = clk_bulk_enable(pcie->num_clks, pcie->clks);
967 	if (err) {
968 		dev_err(dev, "failed to prepare clock\n");
969 		goto err_clk_enable;
970 	}
971 
972 	return 0;
973 
974 err_clk_enable:
975 	clk_bulk_unprepare(pcie->num_clks, pcie->clks);
976 err_clk_prepare:
977 	pm_runtime_put_sync(dev);
978 	pm_runtime_disable(dev);
979 	reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
980 err_phy_deassert:
981 	phy_power_off(pcie->phy);
982 err_phy_on:
983 	phy_exit(pcie->phy);
984 
985 	return err;
986 }
987 
988 static int mtk_pcie_power_up(struct mtk_gen3_pcie *pcie)
989 {
990 	struct device *dev = pcie->dev;
991 	int err;
992 
993 	/* PHY power on and enable pipe clock */
994 	err = reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
995 	if (err) {
996 		dev_err(dev, "failed to deassert PHYs\n");
997 		return err;
998 	}
999 
1000 	err = phy_init(pcie->phy);
1001 	if (err) {
1002 		dev_err(dev, "failed to initialize PHY\n");
1003 		goto err_phy_init;
1004 	}
1005 
1006 	err = phy_power_on(pcie->phy);
1007 	if (err) {
1008 		dev_err(dev, "failed to power on PHY\n");
1009 		goto err_phy_on;
1010 	}
1011 
1012 	/* MAC power on and enable transaction layer clocks */
1013 	reset_control_deassert(pcie->mac_reset);
1014 
1015 	pm_runtime_enable(dev);
1016 	pm_runtime_get_sync(dev);
1017 
1018 	err = clk_bulk_prepare_enable(pcie->num_clks, pcie->clks);
1019 	if (err) {
1020 		dev_err(dev, "failed to enable clocks\n");
1021 		goto err_clk_init;
1022 	}
1023 
1024 	return 0;
1025 
1026 err_clk_init:
1027 	pm_runtime_put_sync(dev);
1028 	pm_runtime_disable(dev);
1029 	reset_control_assert(pcie->mac_reset);
1030 	phy_power_off(pcie->phy);
1031 err_phy_on:
1032 	phy_exit(pcie->phy);
1033 err_phy_init:
1034 	reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
1035 
1036 	return err;
1037 }
1038 
1039 static void mtk_pcie_power_down(struct mtk_gen3_pcie *pcie)
1040 {
1041 	clk_bulk_disable_unprepare(pcie->num_clks, pcie->clks);
1042 
1043 	pm_runtime_put_sync(pcie->dev);
1044 	pm_runtime_disable(pcie->dev);
1045 	reset_control_assert(pcie->mac_reset);
1046 
1047 	phy_power_off(pcie->phy);
1048 	phy_exit(pcie->phy);
1049 	reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
1050 }
1051 
1052 static int mtk_pcie_get_controller_max_link_speed(struct mtk_gen3_pcie *pcie)
1053 {
1054 	u32 val;
1055 	int ret;
1056 
1057 	val = readl_relaxed(pcie->base + PCIE_BASE_CFG_REG);
1058 	val = FIELD_GET(PCIE_BASE_CFG_SPEED, val);
1059 	ret = fls(val);
1060 
1061 	return ret > 0 ? ret : -EINVAL;
1062 }
1063 
1064 static int mtk_pcie_setup(struct mtk_gen3_pcie *pcie)
1065 {
1066 	int err, max_speed;
1067 
1068 	err = mtk_pcie_parse_port(pcie);
1069 	if (err)
1070 		return err;
1071 
1072 	/*
1073 	 * Deassert the line in order to avoid unbalance in deassert_count
1074 	 * counter since the bulk is shared.
1075 	 */
1076 	reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
1077 	/*
1078 	 * The controller may have been left out of reset by the bootloader
1079 	 * so make sure that we get a clean start by asserting resets here.
1080 	 */
1081 	reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
1082 
1083 	reset_control_assert(pcie->mac_reset);
1084 	usleep_range(10, 20);
1085 
1086 	/* Don't touch the hardware registers before power up */
1087 	err = pcie->soc->power_up(pcie);
1088 	if (err)
1089 		return err;
1090 
1091 	err = of_pci_get_max_link_speed(pcie->dev->of_node);
1092 	if (err) {
1093 		/* Get the maximum speed supported by the controller */
1094 		max_speed = mtk_pcie_get_controller_max_link_speed(pcie);
1095 
1096 		/* Set max_link_speed only if the controller supports it */
1097 		if (max_speed >= 0 && max_speed <= err) {
1098 			pcie->max_link_speed = err;
1099 			dev_info(pcie->dev,
1100 				 "maximum controller link speed Gen%d, overriding to Gen%u",
1101 				 max_speed, pcie->max_link_speed);
1102 		}
1103 	}
1104 
1105 	/* Try link up */
1106 	err = mtk_pcie_startup_port(pcie);
1107 	if (err)
1108 		goto err_setup;
1109 
1110 	err = mtk_pcie_setup_irq(pcie);
1111 	if (err)
1112 		goto err_setup;
1113 
1114 	return 0;
1115 
1116 err_setup:
1117 	mtk_pcie_power_down(pcie);
1118 
1119 	return err;
1120 }
1121 
1122 static int mtk_pcie_probe(struct platform_device *pdev)
1123 {
1124 	struct device *dev = &pdev->dev;
1125 	struct mtk_gen3_pcie *pcie;
1126 	struct pci_host_bridge *host;
1127 	int err;
1128 
1129 	host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
1130 	if (!host)
1131 		return -ENOMEM;
1132 
1133 	pcie = pci_host_bridge_priv(host);
1134 
1135 	pcie->dev = dev;
1136 	pcie->soc = device_get_match_data(dev);
1137 	platform_set_drvdata(pdev, pcie);
1138 
1139 	err = mtk_pcie_setup(pcie);
1140 	if (err)
1141 		return err;
1142 
1143 	host->ops = &mtk_pcie_ops;
1144 	host->sysdata = pcie;
1145 
1146 	err = pci_host_probe(host);
1147 	if (err) {
1148 		mtk_pcie_irq_teardown(pcie);
1149 		mtk_pcie_power_down(pcie);
1150 		return err;
1151 	}
1152 
1153 	return 0;
1154 }
1155 
1156 static void mtk_pcie_remove(struct platform_device *pdev)
1157 {
1158 	struct mtk_gen3_pcie *pcie = platform_get_drvdata(pdev);
1159 	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
1160 
1161 	pci_lock_rescan_remove();
1162 	pci_stop_root_bus(host->bus);
1163 	pci_remove_root_bus(host->bus);
1164 	pci_unlock_rescan_remove();
1165 
1166 	mtk_pcie_irq_teardown(pcie);
1167 	mtk_pcie_power_down(pcie);
1168 }
1169 
1170 static void mtk_pcie_irq_save(struct mtk_gen3_pcie *pcie)
1171 {
1172 	int i;
1173 
1174 	raw_spin_lock(&pcie->irq_lock);
1175 
1176 	pcie->saved_irq_state = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
1177 
1178 	for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
1179 		struct mtk_msi_set *msi_set = &pcie->msi_sets[i];
1180 
1181 		msi_set->saved_irq_state = readl_relaxed(msi_set->base +
1182 					   PCIE_MSI_SET_ENABLE_OFFSET);
1183 	}
1184 
1185 	raw_spin_unlock(&pcie->irq_lock);
1186 }
1187 
1188 static void mtk_pcie_irq_restore(struct mtk_gen3_pcie *pcie)
1189 {
1190 	int i;
1191 
1192 	raw_spin_lock(&pcie->irq_lock);
1193 
1194 	writel_relaxed(pcie->saved_irq_state, pcie->base + PCIE_INT_ENABLE_REG);
1195 
1196 	for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
1197 		struct mtk_msi_set *msi_set = &pcie->msi_sets[i];
1198 
1199 		writel_relaxed(msi_set->saved_irq_state,
1200 			       msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
1201 	}
1202 
1203 	raw_spin_unlock(&pcie->irq_lock);
1204 }
1205 
1206 static int mtk_pcie_turn_off_link(struct mtk_gen3_pcie *pcie)
1207 {
1208 	u32 val;
1209 
1210 	val = readl_relaxed(pcie->base + PCIE_ICMD_PM_REG);
1211 	val |= PCIE_TURN_OFF_LINK;
1212 	writel_relaxed(val, pcie->base + PCIE_ICMD_PM_REG);
1213 
1214 	/* Check the link is L2 */
1215 	return readl_poll_timeout(pcie->base + PCIE_LTSSM_STATUS_REG, val,
1216 				  (PCIE_LTSSM_STATE(val) ==
1217 				   PCIE_LTSSM_STATE_L2_IDLE), 20,
1218 				   50 * USEC_PER_MSEC);
1219 }
1220 
1221 static int mtk_pcie_suspend_noirq(struct device *dev)
1222 {
1223 	struct mtk_gen3_pcie *pcie = dev_get_drvdata(dev);
1224 	int err;
1225 	u32 val;
1226 
1227 	/* Trigger link to L2 state */
1228 	err = mtk_pcie_turn_off_link(pcie);
1229 	if (err) {
1230 		dev_err(pcie->dev, "cannot enter L2 state\n");
1231 		return err;
1232 	}
1233 
1234 	/* Pull down the PERST# pin */
1235 	val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
1236 	val |= PCIE_PE_RSTB;
1237 	writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
1238 
1239 	dev_dbg(pcie->dev, "entered L2 states successfully");
1240 
1241 	mtk_pcie_irq_save(pcie);
1242 	mtk_pcie_power_down(pcie);
1243 
1244 	return 0;
1245 }
1246 
1247 static int mtk_pcie_resume_noirq(struct device *dev)
1248 {
1249 	struct mtk_gen3_pcie *pcie = dev_get_drvdata(dev);
1250 	int err;
1251 
1252 	err = pcie->soc->power_up(pcie);
1253 	if (err)
1254 		return err;
1255 
1256 	err = mtk_pcie_startup_port(pcie);
1257 	if (err) {
1258 		mtk_pcie_power_down(pcie);
1259 		return err;
1260 	}
1261 
1262 	mtk_pcie_irq_restore(pcie);
1263 
1264 	return 0;
1265 }
1266 
1267 static const struct dev_pm_ops mtk_pcie_pm_ops = {
1268 	NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_pcie_suspend_noirq,
1269 				  mtk_pcie_resume_noirq)
1270 };
1271 
1272 static const struct mtk_gen3_pcie_pdata mtk_pcie_soc_mt8192 = {
1273 	.power_up = mtk_pcie_power_up,
1274 	.phy_resets = {
1275 		.id[0] = "phy",
1276 		.num_resets = 1,
1277 	},
1278 };
1279 
1280 static const struct mtk_gen3_pcie_pdata mtk_pcie_soc_en7581 = {
1281 	.power_up = mtk_pcie_en7581_power_up,
1282 	.phy_resets = {
1283 		.id[0] = "phy-lane0",
1284 		.id[1] = "phy-lane1",
1285 		.id[2] = "phy-lane2",
1286 		.num_resets = 3,
1287 	},
1288 };
1289 
1290 static const struct of_device_id mtk_pcie_of_match[] = {
1291 	{ .compatible = "airoha,en7581-pcie", .data = &mtk_pcie_soc_en7581 },
1292 	{ .compatible = "mediatek,mt8192-pcie", .data = &mtk_pcie_soc_mt8192 },
1293 	{},
1294 };
1295 MODULE_DEVICE_TABLE(of, mtk_pcie_of_match);
1296 
1297 static struct platform_driver mtk_pcie_driver = {
1298 	.probe = mtk_pcie_probe,
1299 	.remove = mtk_pcie_remove,
1300 	.driver = {
1301 		.name = "mtk-pcie-gen3",
1302 		.of_match_table = mtk_pcie_of_match,
1303 		.pm = &mtk_pcie_pm_ops,
1304 	},
1305 };
1306 
1307 module_platform_driver(mtk_pcie_driver);
1308 MODULE_DESCRIPTION("MediaTek Gen3 PCIe host controller driver");
1309 MODULE_LICENSE("GPL v2");
1310