xref: /linux/drivers/pci/controller/pcie-mediatek-gen3.c (revision df2e3152f1cb798ed8ffa7e488c50261e6dc50e3)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * MediaTek PCIe host controller driver.
4  *
5  * Copyright (c) 2020 MediaTek Inc.
6  * Author: Jianjun Wang <jianjun.wang@mediatek.com>
7  */
8 
9 #include <linux/bitfield.h>
10 #include <linux/clk.h>
11 #include <linux/clk-provider.h>
12 #include <linux/delay.h>
13 #include <linux/iopoll.h>
14 #include <linux/irq.h>
15 #include <linux/irqchip/chained_irq.h>
16 #include <linux/irqdomain.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/msi.h>
20 #include <linux/of_device.h>
21 #include <linux/of_pci.h>
22 #include <linux/pci.h>
23 #include <linux/phy/phy.h>
24 #include <linux/platform_device.h>
25 #include <linux/pm_domain.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/reset.h>
28 
29 #include "../pci.h"
30 
31 #define PCIE_BASE_CFG_REG		0x14
32 #define PCIE_BASE_CFG_SPEED		GENMASK(15, 8)
33 
34 #define PCIE_SETTING_REG		0x80
35 #define PCIE_SETTING_LINK_WIDTH		GENMASK(11, 8)
36 #define PCIE_SETTING_GEN_SUPPORT	GENMASK(14, 12)
37 #define PCIE_PCI_IDS_1			0x9c
38 #define PCI_CLASS(class)		(class << 8)
39 #define PCIE_RC_MODE			BIT(0)
40 
41 #define PCIE_EQ_PRESET_01_REG		0x100
42 #define PCIE_VAL_LN0_DOWNSTREAM		GENMASK(6, 0)
43 #define PCIE_VAL_LN0_UPSTREAM		GENMASK(14, 8)
44 #define PCIE_VAL_LN1_DOWNSTREAM		GENMASK(22, 16)
45 #define PCIE_VAL_LN1_UPSTREAM		GENMASK(30, 24)
46 
47 #define PCIE_CFGNUM_REG			0x140
48 #define PCIE_CFG_DEVFN(devfn)		((devfn) & GENMASK(7, 0))
49 #define PCIE_CFG_BUS(bus)		(((bus) << 8) & GENMASK(15, 8))
50 #define PCIE_CFG_BYTE_EN(bytes)		(((bytes) << 16) & GENMASK(19, 16))
51 #define PCIE_CFG_FORCE_BYTE_EN		BIT(20)
52 #define PCIE_CFG_OFFSET_ADDR		0x1000
53 #define PCIE_CFG_HEADER(bus, devfn) \
54 	(PCIE_CFG_BUS(bus) | PCIE_CFG_DEVFN(devfn))
55 
56 #define PCIE_RST_CTRL_REG		0x148
57 #define PCIE_MAC_RSTB			BIT(0)
58 #define PCIE_PHY_RSTB			BIT(1)
59 #define PCIE_BRG_RSTB			BIT(2)
60 #define PCIE_PE_RSTB			BIT(3)
61 
62 #define PCIE_LTSSM_STATUS_REG		0x150
63 #define PCIE_LTSSM_STATE_MASK		GENMASK(28, 24)
64 #define PCIE_LTSSM_STATE(val)		((val & PCIE_LTSSM_STATE_MASK) >> 24)
65 #define PCIE_LTSSM_STATE_L2_IDLE	0x14
66 
67 #define PCIE_LINK_STATUS_REG		0x154
68 #define PCIE_PORT_LINKUP		BIT(8)
69 
70 #define PCIE_MSI_SET_NUM		8
71 #define PCIE_MSI_IRQS_PER_SET		32
72 #define PCIE_MSI_IRQS_NUM \
73 	(PCIE_MSI_IRQS_PER_SET * PCIE_MSI_SET_NUM)
74 
75 #define PCIE_INT_ENABLE_REG		0x180
76 #define PCIE_MSI_ENABLE			GENMASK(PCIE_MSI_SET_NUM + 8 - 1, 8)
77 #define PCIE_MSI_SHIFT			8
78 #define PCIE_INTX_SHIFT			24
79 #define PCIE_INTX_ENABLE \
80 	GENMASK(PCIE_INTX_SHIFT + PCI_NUM_INTX - 1, PCIE_INTX_SHIFT)
81 
82 #define PCIE_INT_STATUS_REG		0x184
83 #define PCIE_MSI_SET_ENABLE_REG		0x190
84 #define PCIE_MSI_SET_ENABLE		GENMASK(PCIE_MSI_SET_NUM - 1, 0)
85 
86 #define PCIE_PIPE4_PIE8_REG		0x338
87 #define PCIE_K_FINETUNE_MAX		GENMASK(5, 0)
88 #define PCIE_K_FINETUNE_ERR		GENMASK(7, 6)
89 #define PCIE_K_PRESET_TO_USE		GENMASK(18, 8)
90 #define PCIE_K_PHYPARAM_QUERY		BIT(19)
91 #define PCIE_K_QUERY_TIMEOUT		BIT(20)
92 #define PCIE_K_PRESET_TO_USE_16G	GENMASK(31, 21)
93 
94 #define PCIE_MSI_SET_BASE_REG		0xc00
95 #define PCIE_MSI_SET_OFFSET		0x10
96 #define PCIE_MSI_SET_STATUS_OFFSET	0x04
97 #define PCIE_MSI_SET_ENABLE_OFFSET	0x08
98 
99 #define PCIE_MSI_SET_ADDR_HI_BASE	0xc80
100 #define PCIE_MSI_SET_ADDR_HI_OFFSET	0x04
101 
102 #define PCIE_ICMD_PM_REG		0x198
103 #define PCIE_TURN_OFF_LINK		BIT(4)
104 
105 #define PCIE_MISC_CTRL_REG		0x348
106 #define PCIE_DISABLE_DVFSRC_VLT_REQ	BIT(1)
107 
108 #define PCIE_TRANS_TABLE_BASE_REG	0x800
109 #define PCIE_ATR_SRC_ADDR_MSB_OFFSET	0x4
110 #define PCIE_ATR_TRSL_ADDR_LSB_OFFSET	0x8
111 #define PCIE_ATR_TRSL_ADDR_MSB_OFFSET	0xc
112 #define PCIE_ATR_TRSL_PARAM_OFFSET	0x10
113 #define PCIE_ATR_TLB_SET_OFFSET		0x20
114 
115 #define PCIE_MAX_TRANS_TABLES		8
116 #define PCIE_ATR_EN			BIT(0)
117 #define PCIE_ATR_SIZE(size) \
118 	(((((size) - 1) << 1) & GENMASK(6, 1)) | PCIE_ATR_EN)
119 #define PCIE_ATR_ID(id)			((id) & GENMASK(3, 0))
120 #define PCIE_ATR_TYPE_MEM		PCIE_ATR_ID(0)
121 #define PCIE_ATR_TYPE_IO		PCIE_ATR_ID(1)
122 #define PCIE_ATR_TLP_TYPE(type)		(((type) << 16) & GENMASK(18, 16))
123 #define PCIE_ATR_TLP_TYPE_MEM		PCIE_ATR_TLP_TYPE(0)
124 #define PCIE_ATR_TLP_TYPE_IO		PCIE_ATR_TLP_TYPE(2)
125 
126 #define MAX_NUM_PHY_RESETS		3
127 
128 #define PCIE_MTK_RESET_TIME_US		10
129 
130 /* Time in ms needed to complete PCIe reset on EN7581 SoC */
131 #define PCIE_EN7581_RESET_TIME_MS	100
132 
133 struct mtk_gen3_pcie;
134 
135 #define PCIE_CONF_LINK2_CTL_STS		(PCIE_CFG_OFFSET_ADDR + 0xb0)
136 #define PCIE_CONF_LINK2_LCR2_LINK_SPEED	GENMASK(3, 0)
137 
138 enum mtk_gen3_pcie_flags {
139 	SKIP_PCIE_RSTB	= BIT(0), /* Skip PERST# assertion during device
140 				   * probing or suspend/resume phase to
141 				   * avoid hw bugs/issues.
142 				   */
143 };
144 
145 /**
146  * struct mtk_gen3_pcie_pdata - differentiate between host generations
147  * @power_up: pcie power_up callback
148  * @phy_resets: phy reset lines SoC data.
149  * @flags: pcie device flags.
150  */
151 struct mtk_gen3_pcie_pdata {
152 	int (*power_up)(struct mtk_gen3_pcie *pcie);
153 	struct {
154 		const char *id[MAX_NUM_PHY_RESETS];
155 		int num_resets;
156 	} phy_resets;
157 	u32 flags;
158 };
159 
160 /**
161  * struct mtk_msi_set - MSI information for each set
162  * @base: IO mapped register base
163  * @msg_addr: MSI message address
164  * @saved_irq_state: IRQ enable state saved at suspend time
165  */
166 struct mtk_msi_set {
167 	void __iomem *base;
168 	phys_addr_t msg_addr;
169 	u32 saved_irq_state;
170 };
171 
172 /**
173  * struct mtk_gen3_pcie - PCIe port information
174  * @dev: pointer to PCIe device
175  * @base: IO mapped register base
176  * @reg_base: physical register base
177  * @mac_reset: MAC reset control
178  * @phy_resets: PHY reset controllers
179  * @phy: PHY controller block
180  * @clks: PCIe clocks
181  * @num_clks: PCIe clocks count for this port
182  * @max_link_speed: Maximum link speed (PCIe Gen) for this port
183  * @num_lanes: Number of PCIe lanes for this port
184  * @irq: PCIe controller interrupt number
185  * @saved_irq_state: IRQ enable state saved at suspend time
186  * @irq_lock: lock protecting IRQ register access
187  * @intx_domain: legacy INTx IRQ domain
188  * @msi_domain: MSI IRQ domain
189  * @msi_bottom_domain: MSI IRQ bottom domain
190  * @msi_sets: MSI sets information
191  * @lock: lock protecting IRQ bit map
192  * @msi_irq_in_use: bit map for assigned MSI IRQ
193  * @soc: pointer to SoC-dependent operations
194  */
195 struct mtk_gen3_pcie {
196 	struct device *dev;
197 	void __iomem *base;
198 	phys_addr_t reg_base;
199 	struct reset_control *mac_reset;
200 	struct reset_control_bulk_data phy_resets[MAX_NUM_PHY_RESETS];
201 	struct phy *phy;
202 	struct clk_bulk_data *clks;
203 	int num_clks;
204 	u8 max_link_speed;
205 	u8 num_lanes;
206 
207 	int irq;
208 	u32 saved_irq_state;
209 	raw_spinlock_t irq_lock;
210 	struct irq_domain *intx_domain;
211 	struct irq_domain *msi_domain;
212 	struct irq_domain *msi_bottom_domain;
213 	struct mtk_msi_set msi_sets[PCIE_MSI_SET_NUM];
214 	struct mutex lock;
215 	DECLARE_BITMAP(msi_irq_in_use, PCIE_MSI_IRQS_NUM);
216 
217 	const struct mtk_gen3_pcie_pdata *soc;
218 };
219 
220 /* LTSSM state in PCIE_LTSSM_STATUS_REG bit[28:24] */
221 static const char *const ltssm_str[] = {
222 	"detect.quiet",			/* 0x00 */
223 	"detect.active",		/* 0x01 */
224 	"polling.active",		/* 0x02 */
225 	"polling.compliance",		/* 0x03 */
226 	"polling.configuration",	/* 0x04 */
227 	"config.linkwidthstart",	/* 0x05 */
228 	"config.linkwidthaccept",	/* 0x06 */
229 	"config.lanenumwait",		/* 0x07 */
230 	"config.lanenumaccept",		/* 0x08 */
231 	"config.complete",		/* 0x09 */
232 	"config.idle",			/* 0x0A */
233 	"recovery.receiverlock",	/* 0x0B */
234 	"recovery.equalization",	/* 0x0C */
235 	"recovery.speed",		/* 0x0D */
236 	"recovery.receiverconfig",	/* 0x0E */
237 	"recovery.idle",		/* 0x0F */
238 	"L0",				/* 0x10 */
239 	"L0s",				/* 0x11 */
240 	"L1.entry",			/* 0x12 */
241 	"L1.idle",			/* 0x13 */
242 	"L2.idle",			/* 0x14 */
243 	"L2.transmitwake",		/* 0x15 */
244 	"disable",			/* 0x16 */
245 	"loopback.entry",		/* 0x17 */
246 	"loopback.active",		/* 0x18 */
247 	"loopback.exit",		/* 0x19 */
248 	"hotreset",			/* 0x1A */
249 };
250 
251 /**
252  * mtk_pcie_config_tlp_header() - Configure a configuration TLP header
253  * @bus: PCI bus to query
254  * @devfn: device/function number
255  * @where: offset in config space
256  * @size: data size in TLP header
257  *
258  * Set byte enable field and device information in configuration TLP header.
259  */
260 static void mtk_pcie_config_tlp_header(struct pci_bus *bus, unsigned int devfn,
261 					int where, int size)
262 {
263 	struct mtk_gen3_pcie *pcie = bus->sysdata;
264 	int bytes;
265 	u32 val;
266 
267 	bytes = (GENMASK(size - 1, 0) & 0xf) << (where & 0x3);
268 
269 	val = PCIE_CFG_FORCE_BYTE_EN | PCIE_CFG_BYTE_EN(bytes) |
270 	      PCIE_CFG_HEADER(bus->number, devfn);
271 
272 	writel_relaxed(val, pcie->base + PCIE_CFGNUM_REG);
273 }
274 
275 static void __iomem *mtk_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
276 				      int where)
277 {
278 	struct mtk_gen3_pcie *pcie = bus->sysdata;
279 
280 	return pcie->base + PCIE_CFG_OFFSET_ADDR + where;
281 }
282 
283 static int mtk_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
284 				int where, int size, u32 *val)
285 {
286 	mtk_pcie_config_tlp_header(bus, devfn, where, size);
287 
288 	return pci_generic_config_read32(bus, devfn, where, size, val);
289 }
290 
291 static int mtk_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
292 				 int where, int size, u32 val)
293 {
294 	mtk_pcie_config_tlp_header(bus, devfn, where, size);
295 
296 	if (size <= 2)
297 		val <<= (where & 0x3) * 8;
298 
299 	return pci_generic_config_write32(bus, devfn, where, 4, val);
300 }
301 
302 static struct pci_ops mtk_pcie_ops = {
303 	.map_bus = mtk_pcie_map_bus,
304 	.read  = mtk_pcie_config_read,
305 	.write = mtk_pcie_config_write,
306 };
307 
308 static int mtk_pcie_set_trans_table(struct mtk_gen3_pcie *pcie,
309 				    resource_size_t cpu_addr,
310 				    resource_size_t pci_addr,
311 				    resource_size_t size,
312 				    unsigned long type, int *num)
313 {
314 	resource_size_t remaining = size;
315 	resource_size_t table_size;
316 	resource_size_t addr_align;
317 	const char *range_type;
318 	void __iomem *table;
319 	u32 val;
320 
321 	while (remaining && (*num < PCIE_MAX_TRANS_TABLES)) {
322 		/* Table size needs to be a power of 2 */
323 		table_size = BIT(fls(remaining) - 1);
324 
325 		if (cpu_addr > 0) {
326 			addr_align = BIT(ffs(cpu_addr) - 1);
327 			table_size = min(table_size, addr_align);
328 		}
329 
330 		/* Minimum size of translate table is 4KiB */
331 		if (table_size < 0x1000) {
332 			dev_err(pcie->dev, "illegal table size %#llx\n",
333 				(unsigned long long)table_size);
334 			return -EINVAL;
335 		}
336 
337 		table = pcie->base + PCIE_TRANS_TABLE_BASE_REG + *num * PCIE_ATR_TLB_SET_OFFSET;
338 		writel_relaxed(lower_32_bits(cpu_addr) | PCIE_ATR_SIZE(fls(table_size) - 1), table);
339 		writel_relaxed(upper_32_bits(cpu_addr), table + PCIE_ATR_SRC_ADDR_MSB_OFFSET);
340 		writel_relaxed(lower_32_bits(pci_addr), table + PCIE_ATR_TRSL_ADDR_LSB_OFFSET);
341 		writel_relaxed(upper_32_bits(pci_addr), table + PCIE_ATR_TRSL_ADDR_MSB_OFFSET);
342 
343 		if (type == IORESOURCE_IO) {
344 			val = PCIE_ATR_TYPE_IO | PCIE_ATR_TLP_TYPE_IO;
345 			range_type = "IO";
346 		} else {
347 			val = PCIE_ATR_TYPE_MEM | PCIE_ATR_TLP_TYPE_MEM;
348 			range_type = "MEM";
349 		}
350 
351 		writel_relaxed(val, table + PCIE_ATR_TRSL_PARAM_OFFSET);
352 
353 		dev_dbg(pcie->dev, "set %s trans window[%d]: cpu_addr = %#llx, pci_addr = %#llx, size = %#llx\n",
354 			range_type, *num, (unsigned long long)cpu_addr,
355 			(unsigned long long)pci_addr, (unsigned long long)table_size);
356 
357 		cpu_addr += table_size;
358 		pci_addr += table_size;
359 		remaining -= table_size;
360 		(*num)++;
361 	}
362 
363 	if (remaining)
364 		dev_warn(pcie->dev, "not enough translate table for addr: %#llx, limited to [%d]\n",
365 			 (unsigned long long)cpu_addr, PCIE_MAX_TRANS_TABLES);
366 
367 	return 0;
368 }
369 
370 static void mtk_pcie_enable_msi(struct mtk_gen3_pcie *pcie)
371 {
372 	int i;
373 	u32 val;
374 
375 	for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
376 		struct mtk_msi_set *msi_set = &pcie->msi_sets[i];
377 
378 		msi_set->base = pcie->base + PCIE_MSI_SET_BASE_REG +
379 				i * PCIE_MSI_SET_OFFSET;
380 		msi_set->msg_addr = pcie->reg_base + PCIE_MSI_SET_BASE_REG +
381 				    i * PCIE_MSI_SET_OFFSET;
382 
383 		/* Configure the MSI capture address */
384 		writel_relaxed(lower_32_bits(msi_set->msg_addr), msi_set->base);
385 		writel_relaxed(upper_32_bits(msi_set->msg_addr),
386 			       pcie->base + PCIE_MSI_SET_ADDR_HI_BASE +
387 			       i * PCIE_MSI_SET_ADDR_HI_OFFSET);
388 	}
389 
390 	val = readl_relaxed(pcie->base + PCIE_MSI_SET_ENABLE_REG);
391 	val |= PCIE_MSI_SET_ENABLE;
392 	writel_relaxed(val, pcie->base + PCIE_MSI_SET_ENABLE_REG);
393 
394 	val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
395 	val |= PCIE_MSI_ENABLE;
396 	writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG);
397 }
398 
399 static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie)
400 {
401 	struct resource_entry *entry;
402 	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
403 	unsigned int table_index = 0;
404 	int err;
405 	u32 val;
406 
407 	/* Set as RC mode and set controller PCIe Gen speed restriction, if any */
408 	val = readl_relaxed(pcie->base + PCIE_SETTING_REG);
409 	val |= PCIE_RC_MODE;
410 	if (pcie->max_link_speed) {
411 		val &= ~PCIE_SETTING_GEN_SUPPORT;
412 
413 		/* Can enable link speed support only from Gen2 onwards */
414 		if (pcie->max_link_speed >= 2)
415 			val |= FIELD_PREP(PCIE_SETTING_GEN_SUPPORT,
416 					  GENMASK(pcie->max_link_speed - 2, 0));
417 	}
418 	if (pcie->num_lanes) {
419 		val &= ~PCIE_SETTING_LINK_WIDTH;
420 
421 		/* Zero means one lane, each bit activates x2/x4/x8/x16 */
422 		if (pcie->num_lanes > 1)
423 			val |= FIELD_PREP(PCIE_SETTING_LINK_WIDTH,
424 					  GENMASK(fls(pcie->num_lanes >> 2), 0));
425 	}
426 	writel_relaxed(val, pcie->base + PCIE_SETTING_REG);
427 
428 	/* Set Link Control 2 (LNKCTL2) speed restriction, if any */
429 	if (pcie->max_link_speed) {
430 		val = readl_relaxed(pcie->base + PCIE_CONF_LINK2_CTL_STS);
431 		val &= ~PCIE_CONF_LINK2_LCR2_LINK_SPEED;
432 		val |= FIELD_PREP(PCIE_CONF_LINK2_LCR2_LINK_SPEED, pcie->max_link_speed);
433 		writel_relaxed(val, pcie->base + PCIE_CONF_LINK2_CTL_STS);
434 	}
435 
436 	/* Set class code */
437 	val = readl_relaxed(pcie->base + PCIE_PCI_IDS_1);
438 	val &= ~GENMASK(31, 8);
439 	val |= PCI_CLASS(PCI_CLASS_BRIDGE_PCI_NORMAL);
440 	writel_relaxed(val, pcie->base + PCIE_PCI_IDS_1);
441 
442 	/* Mask all INTx interrupts */
443 	val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
444 	val &= ~PCIE_INTX_ENABLE;
445 	writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG);
446 
447 	/* Disable DVFSRC voltage request */
448 	val = readl_relaxed(pcie->base + PCIE_MISC_CTRL_REG);
449 	val |= PCIE_DISABLE_DVFSRC_VLT_REQ;
450 	writel_relaxed(val, pcie->base + PCIE_MISC_CTRL_REG);
451 
452 	/*
453 	 * Airoha EN7581 has a hw bug asserting/releasing PCIE_PE_RSTB signal
454 	 * causing occasional PCIe link down. In order to overcome the issue,
455 	 * PCIE_RSTB signals are not asserted/released at this stage and the
456 	 * PCIe block is reset using en7523_reset_assert() and
457 	 * en7581_pci_enable().
458 	 */
459 	if (!(pcie->soc->flags & SKIP_PCIE_RSTB)) {
460 		/* Assert all reset signals */
461 		val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
462 		val |= PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB |
463 		       PCIE_PE_RSTB;
464 		writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
465 
466 		/*
467 		 * Described in PCIe CEM specification revision 6.0.
468 		 *
469 		 * The deassertion of PERST# should be delayed 100ms (TPVPERL)
470 		 * for the power and clock to become stable.
471 		 */
472 		msleep(PCIE_T_PVPERL_MS);
473 
474 		/* De-assert reset signals */
475 		val &= ~(PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB |
476 			 PCIE_PE_RSTB);
477 		writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
478 	}
479 
480 	/* Check if the link is up or not */
481 	err = readl_poll_timeout(pcie->base + PCIE_LINK_STATUS_REG, val,
482 				 !!(val & PCIE_PORT_LINKUP), 20,
483 				 PCI_PM_D3COLD_WAIT * USEC_PER_MSEC);
484 	if (err) {
485 		const char *ltssm_state;
486 		int ltssm_index;
487 
488 		val = readl_relaxed(pcie->base + PCIE_LTSSM_STATUS_REG);
489 		ltssm_index = PCIE_LTSSM_STATE(val);
490 		ltssm_state = ltssm_index >= ARRAY_SIZE(ltssm_str) ?
491 			      "Unknown state" : ltssm_str[ltssm_index];
492 		dev_err(pcie->dev,
493 			"PCIe link down, current LTSSM state: %s (%#x)\n",
494 			ltssm_state, val);
495 		return err;
496 	}
497 
498 	mtk_pcie_enable_msi(pcie);
499 
500 	/* Set PCIe translation windows */
501 	resource_list_for_each_entry(entry, &host->windows) {
502 		struct resource *res = entry->res;
503 		unsigned long type = resource_type(res);
504 		resource_size_t cpu_addr;
505 		resource_size_t pci_addr;
506 		resource_size_t size;
507 
508 		if (type == IORESOURCE_IO)
509 			cpu_addr = pci_pio_to_address(res->start);
510 		else if (type == IORESOURCE_MEM)
511 			cpu_addr = res->start;
512 		else
513 			continue;
514 
515 		pci_addr = res->start - entry->offset;
516 		size = resource_size(res);
517 		err = mtk_pcie_set_trans_table(pcie, cpu_addr, pci_addr, size,
518 					       type, &table_index);
519 		if (err)
520 			return err;
521 	}
522 
523 	return 0;
524 }
525 
526 static void mtk_pcie_msi_irq_mask(struct irq_data *data)
527 {
528 	pci_msi_mask_irq(data);
529 	irq_chip_mask_parent(data);
530 }
531 
532 static void mtk_pcie_msi_irq_unmask(struct irq_data *data)
533 {
534 	pci_msi_unmask_irq(data);
535 	irq_chip_unmask_parent(data);
536 }
537 
538 static struct irq_chip mtk_msi_irq_chip = {
539 	.irq_ack = irq_chip_ack_parent,
540 	.irq_mask = mtk_pcie_msi_irq_mask,
541 	.irq_unmask = mtk_pcie_msi_irq_unmask,
542 	.name = "MSI",
543 };
544 
545 static struct msi_domain_info mtk_msi_domain_info = {
546 	.flags	= MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
547 		  MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX |
548 		  MSI_FLAG_MULTI_PCI_MSI,
549 	.chip	= &mtk_msi_irq_chip,
550 };
551 
552 static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
553 {
554 	struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
555 	struct mtk_gen3_pcie *pcie = data->domain->host_data;
556 	unsigned long hwirq;
557 
558 	hwirq =	data->hwirq % PCIE_MSI_IRQS_PER_SET;
559 
560 	msg->address_hi = upper_32_bits(msi_set->msg_addr);
561 	msg->address_lo = lower_32_bits(msi_set->msg_addr);
562 	msg->data = hwirq;
563 	dev_dbg(pcie->dev, "msi#%#lx address_hi %#x address_lo %#x data %d\n",
564 		hwirq, msg->address_hi, msg->address_lo, msg->data);
565 }
566 
567 static void mtk_msi_bottom_irq_ack(struct irq_data *data)
568 {
569 	struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
570 	unsigned long hwirq;
571 
572 	hwirq =	data->hwirq % PCIE_MSI_IRQS_PER_SET;
573 
574 	writel_relaxed(BIT(hwirq), msi_set->base + PCIE_MSI_SET_STATUS_OFFSET);
575 }
576 
577 static void mtk_msi_bottom_irq_mask(struct irq_data *data)
578 {
579 	struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
580 	struct mtk_gen3_pcie *pcie = data->domain->host_data;
581 	unsigned long hwirq, flags;
582 	u32 val;
583 
584 	hwirq =	data->hwirq % PCIE_MSI_IRQS_PER_SET;
585 
586 	raw_spin_lock_irqsave(&pcie->irq_lock, flags);
587 	val = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
588 	val &= ~BIT(hwirq);
589 	writel_relaxed(val, msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
590 	raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
591 }
592 
593 static void mtk_msi_bottom_irq_unmask(struct irq_data *data)
594 {
595 	struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
596 	struct mtk_gen3_pcie *pcie = data->domain->host_data;
597 	unsigned long hwirq, flags;
598 	u32 val;
599 
600 	hwirq =	data->hwirq % PCIE_MSI_IRQS_PER_SET;
601 
602 	raw_spin_lock_irqsave(&pcie->irq_lock, flags);
603 	val = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
604 	val |= BIT(hwirq);
605 	writel_relaxed(val, msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
606 	raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
607 }
608 
609 static struct irq_chip mtk_msi_bottom_irq_chip = {
610 	.irq_ack		= mtk_msi_bottom_irq_ack,
611 	.irq_mask		= mtk_msi_bottom_irq_mask,
612 	.irq_unmask		= mtk_msi_bottom_irq_unmask,
613 	.irq_compose_msi_msg	= mtk_compose_msi_msg,
614 	.name			= "MSI",
615 };
616 
617 static int mtk_msi_bottom_domain_alloc(struct irq_domain *domain,
618 				       unsigned int virq, unsigned int nr_irqs,
619 				       void *arg)
620 {
621 	struct mtk_gen3_pcie *pcie = domain->host_data;
622 	struct mtk_msi_set *msi_set;
623 	int i, hwirq, set_idx;
624 
625 	mutex_lock(&pcie->lock);
626 
627 	hwirq = bitmap_find_free_region(pcie->msi_irq_in_use, PCIE_MSI_IRQS_NUM,
628 					order_base_2(nr_irqs));
629 
630 	mutex_unlock(&pcie->lock);
631 
632 	if (hwirq < 0)
633 		return -ENOSPC;
634 
635 	set_idx = hwirq / PCIE_MSI_IRQS_PER_SET;
636 	msi_set = &pcie->msi_sets[set_idx];
637 
638 	for (i = 0; i < nr_irqs; i++)
639 		irq_domain_set_info(domain, virq + i, hwirq + i,
640 				    &mtk_msi_bottom_irq_chip, msi_set,
641 				    handle_edge_irq, NULL, NULL);
642 
643 	return 0;
644 }
645 
646 static void mtk_msi_bottom_domain_free(struct irq_domain *domain,
647 				       unsigned int virq, unsigned int nr_irqs)
648 {
649 	struct mtk_gen3_pcie *pcie = domain->host_data;
650 	struct irq_data *data = irq_domain_get_irq_data(domain, virq);
651 
652 	mutex_lock(&pcie->lock);
653 
654 	bitmap_release_region(pcie->msi_irq_in_use, data->hwirq,
655 			      order_base_2(nr_irqs));
656 
657 	mutex_unlock(&pcie->lock);
658 
659 	irq_domain_free_irqs_common(domain, virq, nr_irqs);
660 }
661 
662 static const struct irq_domain_ops mtk_msi_bottom_domain_ops = {
663 	.alloc = mtk_msi_bottom_domain_alloc,
664 	.free = mtk_msi_bottom_domain_free,
665 };
666 
667 static void mtk_intx_mask(struct irq_data *data)
668 {
669 	struct mtk_gen3_pcie *pcie = irq_data_get_irq_chip_data(data);
670 	unsigned long flags;
671 	u32 val;
672 
673 	raw_spin_lock_irqsave(&pcie->irq_lock, flags);
674 	val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
675 	val &= ~BIT(data->hwirq + PCIE_INTX_SHIFT);
676 	writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG);
677 	raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
678 }
679 
680 static void mtk_intx_unmask(struct irq_data *data)
681 {
682 	struct mtk_gen3_pcie *pcie = irq_data_get_irq_chip_data(data);
683 	unsigned long flags;
684 	u32 val;
685 
686 	raw_spin_lock_irqsave(&pcie->irq_lock, flags);
687 	val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
688 	val |= BIT(data->hwirq + PCIE_INTX_SHIFT);
689 	writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG);
690 	raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
691 }
692 
693 /**
694  * mtk_intx_eoi() - Clear INTx IRQ status at the end of interrupt
695  * @data: pointer to chip specific data
696  *
697  * As an emulated level IRQ, its interrupt status will remain
698  * until the corresponding de-assert message is received; hence that
699  * the status can only be cleared when the interrupt has been serviced.
700  */
701 static void mtk_intx_eoi(struct irq_data *data)
702 {
703 	struct mtk_gen3_pcie *pcie = irq_data_get_irq_chip_data(data);
704 	unsigned long hwirq;
705 
706 	hwirq = data->hwirq + PCIE_INTX_SHIFT;
707 	writel_relaxed(BIT(hwirq), pcie->base + PCIE_INT_STATUS_REG);
708 }
709 
710 static struct irq_chip mtk_intx_irq_chip = {
711 	.irq_mask		= mtk_intx_mask,
712 	.irq_unmask		= mtk_intx_unmask,
713 	.irq_eoi		= mtk_intx_eoi,
714 	.name			= "INTx",
715 };
716 
717 static int mtk_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
718 			     irq_hw_number_t hwirq)
719 {
720 	irq_set_chip_data(irq, domain->host_data);
721 	irq_set_chip_and_handler_name(irq, &mtk_intx_irq_chip,
722 				      handle_fasteoi_irq, "INTx");
723 	return 0;
724 }
725 
726 static const struct irq_domain_ops intx_domain_ops = {
727 	.map = mtk_pcie_intx_map,
728 };
729 
730 static int mtk_pcie_init_irq_domains(struct mtk_gen3_pcie *pcie)
731 {
732 	struct device *dev = pcie->dev;
733 	struct device_node *intc_node, *node = dev->of_node;
734 	int ret;
735 
736 	raw_spin_lock_init(&pcie->irq_lock);
737 
738 	/* Setup INTx */
739 	intc_node = of_get_child_by_name(node, "interrupt-controller");
740 	if (!intc_node) {
741 		dev_err(dev, "missing interrupt-controller node\n");
742 		return -ENODEV;
743 	}
744 
745 	pcie->intx_domain = irq_domain_add_linear(intc_node, PCI_NUM_INTX,
746 						  &intx_domain_ops, pcie);
747 	if (!pcie->intx_domain) {
748 		dev_err(dev, "failed to create INTx IRQ domain\n");
749 		ret = -ENODEV;
750 		goto out_put_node;
751 	}
752 
753 	/* Setup MSI */
754 	mutex_init(&pcie->lock);
755 
756 	pcie->msi_bottom_domain = irq_domain_add_linear(node, PCIE_MSI_IRQS_NUM,
757 				  &mtk_msi_bottom_domain_ops, pcie);
758 	if (!pcie->msi_bottom_domain) {
759 		dev_err(dev, "failed to create MSI bottom domain\n");
760 		ret = -ENODEV;
761 		goto err_msi_bottom_domain;
762 	}
763 
764 	pcie->msi_domain = pci_msi_create_irq_domain(dev->fwnode,
765 						     &mtk_msi_domain_info,
766 						     pcie->msi_bottom_domain);
767 	if (!pcie->msi_domain) {
768 		dev_err(dev, "failed to create MSI domain\n");
769 		ret = -ENODEV;
770 		goto err_msi_domain;
771 	}
772 
773 	of_node_put(intc_node);
774 	return 0;
775 
776 err_msi_domain:
777 	irq_domain_remove(pcie->msi_bottom_domain);
778 err_msi_bottom_domain:
779 	irq_domain_remove(pcie->intx_domain);
780 out_put_node:
781 	of_node_put(intc_node);
782 	return ret;
783 }
784 
785 static void mtk_pcie_irq_teardown(struct mtk_gen3_pcie *pcie)
786 {
787 	irq_set_chained_handler_and_data(pcie->irq, NULL, NULL);
788 
789 	if (pcie->intx_domain)
790 		irq_domain_remove(pcie->intx_domain);
791 
792 	if (pcie->msi_domain)
793 		irq_domain_remove(pcie->msi_domain);
794 
795 	if (pcie->msi_bottom_domain)
796 		irq_domain_remove(pcie->msi_bottom_domain);
797 
798 	irq_dispose_mapping(pcie->irq);
799 }
800 
801 static void mtk_pcie_msi_handler(struct mtk_gen3_pcie *pcie, int set_idx)
802 {
803 	struct mtk_msi_set *msi_set = &pcie->msi_sets[set_idx];
804 	unsigned long msi_enable, msi_status;
805 	irq_hw_number_t bit, hwirq;
806 
807 	msi_enable = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
808 
809 	do {
810 		msi_status = readl_relaxed(msi_set->base +
811 					   PCIE_MSI_SET_STATUS_OFFSET);
812 		msi_status &= msi_enable;
813 		if (!msi_status)
814 			break;
815 
816 		for_each_set_bit(bit, &msi_status, PCIE_MSI_IRQS_PER_SET) {
817 			hwirq = bit + set_idx * PCIE_MSI_IRQS_PER_SET;
818 			generic_handle_domain_irq(pcie->msi_bottom_domain, hwirq);
819 		}
820 	} while (true);
821 }
822 
823 static void mtk_pcie_irq_handler(struct irq_desc *desc)
824 {
825 	struct mtk_gen3_pcie *pcie = irq_desc_get_handler_data(desc);
826 	struct irq_chip *irqchip = irq_desc_get_chip(desc);
827 	unsigned long status;
828 	irq_hw_number_t irq_bit = PCIE_INTX_SHIFT;
829 
830 	chained_irq_enter(irqchip, desc);
831 
832 	status = readl_relaxed(pcie->base + PCIE_INT_STATUS_REG);
833 	for_each_set_bit_from(irq_bit, &status, PCI_NUM_INTX +
834 			      PCIE_INTX_SHIFT)
835 		generic_handle_domain_irq(pcie->intx_domain,
836 					  irq_bit - PCIE_INTX_SHIFT);
837 
838 	irq_bit = PCIE_MSI_SHIFT;
839 	for_each_set_bit_from(irq_bit, &status, PCIE_MSI_SET_NUM +
840 			      PCIE_MSI_SHIFT) {
841 		mtk_pcie_msi_handler(pcie, irq_bit - PCIE_MSI_SHIFT);
842 
843 		writel_relaxed(BIT(irq_bit), pcie->base + PCIE_INT_STATUS_REG);
844 	}
845 
846 	chained_irq_exit(irqchip, desc);
847 }
848 
849 static int mtk_pcie_setup_irq(struct mtk_gen3_pcie *pcie)
850 {
851 	struct device *dev = pcie->dev;
852 	struct platform_device *pdev = to_platform_device(dev);
853 	int err;
854 
855 	err = mtk_pcie_init_irq_domains(pcie);
856 	if (err)
857 		return err;
858 
859 	pcie->irq = platform_get_irq(pdev, 0);
860 	if (pcie->irq < 0)
861 		return pcie->irq;
862 
863 	irq_set_chained_handler_and_data(pcie->irq, mtk_pcie_irq_handler, pcie);
864 
865 	return 0;
866 }
867 
868 static int mtk_pcie_parse_port(struct mtk_gen3_pcie *pcie)
869 {
870 	int i, ret, num_resets = pcie->soc->phy_resets.num_resets;
871 	struct device *dev = pcie->dev;
872 	struct platform_device *pdev = to_platform_device(dev);
873 	struct resource *regs;
874 	u32 num_lanes;
875 
876 	regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcie-mac");
877 	if (!regs)
878 		return -EINVAL;
879 	pcie->base = devm_ioremap_resource(dev, regs);
880 	if (IS_ERR(pcie->base)) {
881 		dev_err(dev, "failed to map register base\n");
882 		return PTR_ERR(pcie->base);
883 	}
884 
885 	pcie->reg_base = regs->start;
886 
887 	for (i = 0; i < num_resets; i++)
888 		pcie->phy_resets[i].id = pcie->soc->phy_resets.id[i];
889 
890 	ret = devm_reset_control_bulk_get_optional_shared(dev, num_resets, pcie->phy_resets);
891 	if (ret) {
892 		dev_err(dev, "failed to get PHY bulk reset\n");
893 		return ret;
894 	}
895 
896 	pcie->mac_reset = devm_reset_control_get_optional_exclusive(dev, "mac");
897 	if (IS_ERR(pcie->mac_reset)) {
898 		ret = PTR_ERR(pcie->mac_reset);
899 		if (ret != -EPROBE_DEFER)
900 			dev_err(dev, "failed to get MAC reset\n");
901 
902 		return ret;
903 	}
904 
905 	pcie->phy = devm_phy_optional_get(dev, "pcie-phy");
906 	if (IS_ERR(pcie->phy)) {
907 		ret = PTR_ERR(pcie->phy);
908 		if (ret != -EPROBE_DEFER)
909 			dev_err(dev, "failed to get PHY\n");
910 
911 		return ret;
912 	}
913 
914 	pcie->num_clks = devm_clk_bulk_get_all(dev, &pcie->clks);
915 	if (pcie->num_clks < 0) {
916 		dev_err(dev, "failed to get clocks\n");
917 		return pcie->num_clks;
918 	}
919 
920        ret = of_property_read_u32(dev->of_node, "num-lanes", &num_lanes);
921        if (ret == 0) {
922 	       if (num_lanes == 0 || num_lanes > 16 || (num_lanes != 1 && num_lanes % 2))
923 			dev_warn(dev, "invalid num-lanes, using controller defaults\n");
924 	       else
925 			pcie->num_lanes = num_lanes;
926        }
927 
928 	return 0;
929 }
930 
931 static int mtk_pcie_en7581_power_up(struct mtk_gen3_pcie *pcie)
932 {
933 	struct device *dev = pcie->dev;
934 	int err;
935 	u32 val;
936 
937 	/*
938 	 * The controller may have been left out of reset by the bootloader
939 	 * so make sure that we get a clean start by asserting resets here.
940 	 */
941 	reset_control_bulk_assert(pcie->soc->phy_resets.num_resets,
942 				  pcie->phy_resets);
943 	reset_control_assert(pcie->mac_reset);
944 
945 	/* Wait for the time needed to complete the reset lines assert. */
946 	msleep(PCIE_EN7581_RESET_TIME_MS);
947 
948 	/*
949 	 * Unlike the other MediaTek Gen3 controllers, the Airoha EN7581
950 	 * requires PHY initialization and power-on before PHY reset deassert.
951 	 */
952 	err = phy_init(pcie->phy);
953 	if (err) {
954 		dev_err(dev, "failed to initialize PHY\n");
955 		return err;
956 	}
957 
958 	err = phy_power_on(pcie->phy);
959 	if (err) {
960 		dev_err(dev, "failed to power on PHY\n");
961 		goto err_phy_on;
962 	}
963 
964 	err = reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
965 	if (err) {
966 		dev_err(dev, "failed to deassert PHYs\n");
967 		goto err_phy_deassert;
968 	}
969 
970 	/*
971 	 * Wait for the time needed to complete the bulk de-assert above.
972 	 * This time is specific for EN7581 SoC.
973 	 */
974 	msleep(PCIE_EN7581_RESET_TIME_MS);
975 
976 	pm_runtime_enable(dev);
977 	pm_runtime_get_sync(dev);
978 
979 	val = FIELD_PREP(PCIE_VAL_LN0_DOWNSTREAM, 0x47) |
980 	      FIELD_PREP(PCIE_VAL_LN1_DOWNSTREAM, 0x47) |
981 	      FIELD_PREP(PCIE_VAL_LN0_UPSTREAM, 0x41) |
982 	      FIELD_PREP(PCIE_VAL_LN1_UPSTREAM, 0x41);
983 	writel_relaxed(val, pcie->base + PCIE_EQ_PRESET_01_REG);
984 
985 	val = PCIE_K_PHYPARAM_QUERY | PCIE_K_QUERY_TIMEOUT |
986 	      FIELD_PREP(PCIE_K_PRESET_TO_USE_16G, 0x80) |
987 	      FIELD_PREP(PCIE_K_PRESET_TO_USE, 0x2) |
988 	      FIELD_PREP(PCIE_K_FINETUNE_MAX, 0xf);
989 	writel_relaxed(val, pcie->base + PCIE_PIPE4_PIE8_REG);
990 
991 	err = clk_bulk_prepare_enable(pcie->num_clks, pcie->clks);
992 	if (err) {
993 		dev_err(dev, "failed to prepare clock\n");
994 		goto err_clk_prepare_enable;
995 	}
996 
997 	/*
998 	 * Airoha EN7581 performs PCIe reset via clk callbacks since it has a
999 	 * hw issue with PCIE_PE_RSTB signal. Add wait for the time needed to
1000 	 * complete the PCIe reset.
1001 	 */
1002 	msleep(PCIE_T_PVPERL_MS);
1003 
1004 	return 0;
1005 
1006 err_clk_prepare_enable:
1007 	pm_runtime_put_sync(dev);
1008 	pm_runtime_disable(dev);
1009 	reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
1010 err_phy_deassert:
1011 	phy_power_off(pcie->phy);
1012 err_phy_on:
1013 	phy_exit(pcie->phy);
1014 
1015 	return err;
1016 }
1017 
1018 static int mtk_pcie_power_up(struct mtk_gen3_pcie *pcie)
1019 {
1020 	struct device *dev = pcie->dev;
1021 	int err;
1022 
1023 	/*
1024 	 * The controller may have been left out of reset by the bootloader
1025 	 * so make sure that we get a clean start by asserting resets here.
1026 	 */
1027 	reset_control_bulk_assert(pcie->soc->phy_resets.num_resets,
1028 				  pcie->phy_resets);
1029 	reset_control_assert(pcie->mac_reset);
1030 	usleep_range(PCIE_MTK_RESET_TIME_US, 2 * PCIE_MTK_RESET_TIME_US);
1031 
1032 	/* PHY power on and enable pipe clock */
1033 	err = reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
1034 	if (err) {
1035 		dev_err(dev, "failed to deassert PHYs\n");
1036 		return err;
1037 	}
1038 
1039 	err = phy_init(pcie->phy);
1040 	if (err) {
1041 		dev_err(dev, "failed to initialize PHY\n");
1042 		goto err_phy_init;
1043 	}
1044 
1045 	err = phy_power_on(pcie->phy);
1046 	if (err) {
1047 		dev_err(dev, "failed to power on PHY\n");
1048 		goto err_phy_on;
1049 	}
1050 
1051 	/* MAC power on and enable transaction layer clocks */
1052 	reset_control_deassert(pcie->mac_reset);
1053 
1054 	pm_runtime_enable(dev);
1055 	pm_runtime_get_sync(dev);
1056 
1057 	err = clk_bulk_prepare_enable(pcie->num_clks, pcie->clks);
1058 	if (err) {
1059 		dev_err(dev, "failed to enable clocks\n");
1060 		goto err_clk_init;
1061 	}
1062 
1063 	return 0;
1064 
1065 err_clk_init:
1066 	pm_runtime_put_sync(dev);
1067 	pm_runtime_disable(dev);
1068 	reset_control_assert(pcie->mac_reset);
1069 	phy_power_off(pcie->phy);
1070 err_phy_on:
1071 	phy_exit(pcie->phy);
1072 err_phy_init:
1073 	reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
1074 
1075 	return err;
1076 }
1077 
1078 static void mtk_pcie_power_down(struct mtk_gen3_pcie *pcie)
1079 {
1080 	clk_bulk_disable_unprepare(pcie->num_clks, pcie->clks);
1081 
1082 	pm_runtime_put_sync(pcie->dev);
1083 	pm_runtime_disable(pcie->dev);
1084 	reset_control_assert(pcie->mac_reset);
1085 
1086 	phy_power_off(pcie->phy);
1087 	phy_exit(pcie->phy);
1088 	reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
1089 }
1090 
1091 static int mtk_pcie_get_controller_max_link_speed(struct mtk_gen3_pcie *pcie)
1092 {
1093 	u32 val;
1094 	int ret;
1095 
1096 	val = readl_relaxed(pcie->base + PCIE_BASE_CFG_REG);
1097 	val = FIELD_GET(PCIE_BASE_CFG_SPEED, val);
1098 	ret = fls(val);
1099 
1100 	return ret > 0 ? ret : -EINVAL;
1101 }
1102 
1103 static int mtk_pcie_setup(struct mtk_gen3_pcie *pcie)
1104 {
1105 	int err, max_speed;
1106 
1107 	err = mtk_pcie_parse_port(pcie);
1108 	if (err)
1109 		return err;
1110 
1111 	/*
1112 	 * Deassert the line in order to avoid unbalance in deassert_count
1113 	 * counter since the bulk is shared.
1114 	 */
1115 	reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
1116 
1117 	/* Don't touch the hardware registers before power up */
1118 	err = pcie->soc->power_up(pcie);
1119 	if (err)
1120 		return err;
1121 
1122 	err = of_pci_get_max_link_speed(pcie->dev->of_node);
1123 	if (err) {
1124 		/* Get the maximum speed supported by the controller */
1125 		max_speed = mtk_pcie_get_controller_max_link_speed(pcie);
1126 
1127 		/* Set max_link_speed only if the controller supports it */
1128 		if (max_speed >= 0 && max_speed <= err) {
1129 			pcie->max_link_speed = err;
1130 			dev_info(pcie->dev,
1131 				 "maximum controller link speed Gen%d, overriding to Gen%u",
1132 				 max_speed, pcie->max_link_speed);
1133 		}
1134 	}
1135 
1136 	/* Try link up */
1137 	err = mtk_pcie_startup_port(pcie);
1138 	if (err)
1139 		goto err_setup;
1140 
1141 	err = mtk_pcie_setup_irq(pcie);
1142 	if (err)
1143 		goto err_setup;
1144 
1145 	return 0;
1146 
1147 err_setup:
1148 	mtk_pcie_power_down(pcie);
1149 
1150 	return err;
1151 }
1152 
1153 static int mtk_pcie_probe(struct platform_device *pdev)
1154 {
1155 	struct device *dev = &pdev->dev;
1156 	struct mtk_gen3_pcie *pcie;
1157 	struct pci_host_bridge *host;
1158 	int err;
1159 
1160 	host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
1161 	if (!host)
1162 		return -ENOMEM;
1163 
1164 	pcie = pci_host_bridge_priv(host);
1165 
1166 	pcie->dev = dev;
1167 	pcie->soc = device_get_match_data(dev);
1168 	platform_set_drvdata(pdev, pcie);
1169 
1170 	err = mtk_pcie_setup(pcie);
1171 	if (err)
1172 		return err;
1173 
1174 	host->ops = &mtk_pcie_ops;
1175 	host->sysdata = pcie;
1176 
1177 	err = pci_host_probe(host);
1178 	if (err) {
1179 		mtk_pcie_irq_teardown(pcie);
1180 		mtk_pcie_power_down(pcie);
1181 		return err;
1182 	}
1183 
1184 	return 0;
1185 }
1186 
1187 static void mtk_pcie_remove(struct platform_device *pdev)
1188 {
1189 	struct mtk_gen3_pcie *pcie = platform_get_drvdata(pdev);
1190 	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
1191 
1192 	pci_lock_rescan_remove();
1193 	pci_stop_root_bus(host->bus);
1194 	pci_remove_root_bus(host->bus);
1195 	pci_unlock_rescan_remove();
1196 
1197 	mtk_pcie_irq_teardown(pcie);
1198 	mtk_pcie_power_down(pcie);
1199 }
1200 
1201 static void mtk_pcie_irq_save(struct mtk_gen3_pcie *pcie)
1202 {
1203 	int i;
1204 
1205 	raw_spin_lock(&pcie->irq_lock);
1206 
1207 	pcie->saved_irq_state = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
1208 
1209 	for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
1210 		struct mtk_msi_set *msi_set = &pcie->msi_sets[i];
1211 
1212 		msi_set->saved_irq_state = readl_relaxed(msi_set->base +
1213 					   PCIE_MSI_SET_ENABLE_OFFSET);
1214 	}
1215 
1216 	raw_spin_unlock(&pcie->irq_lock);
1217 }
1218 
1219 static void mtk_pcie_irq_restore(struct mtk_gen3_pcie *pcie)
1220 {
1221 	int i;
1222 
1223 	raw_spin_lock(&pcie->irq_lock);
1224 
1225 	writel_relaxed(pcie->saved_irq_state, pcie->base + PCIE_INT_ENABLE_REG);
1226 
1227 	for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
1228 		struct mtk_msi_set *msi_set = &pcie->msi_sets[i];
1229 
1230 		writel_relaxed(msi_set->saved_irq_state,
1231 			       msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
1232 	}
1233 
1234 	raw_spin_unlock(&pcie->irq_lock);
1235 }
1236 
1237 static int mtk_pcie_turn_off_link(struct mtk_gen3_pcie *pcie)
1238 {
1239 	u32 val;
1240 
1241 	val = readl_relaxed(pcie->base + PCIE_ICMD_PM_REG);
1242 	val |= PCIE_TURN_OFF_LINK;
1243 	writel_relaxed(val, pcie->base + PCIE_ICMD_PM_REG);
1244 
1245 	/* Check the link is L2 */
1246 	return readl_poll_timeout(pcie->base + PCIE_LTSSM_STATUS_REG, val,
1247 				  (PCIE_LTSSM_STATE(val) ==
1248 				   PCIE_LTSSM_STATE_L2_IDLE), 20,
1249 				   50 * USEC_PER_MSEC);
1250 }
1251 
1252 static int mtk_pcie_suspend_noirq(struct device *dev)
1253 {
1254 	struct mtk_gen3_pcie *pcie = dev_get_drvdata(dev);
1255 	int err;
1256 	u32 val;
1257 
1258 	/* Trigger link to L2 state */
1259 	err = mtk_pcie_turn_off_link(pcie);
1260 	if (err) {
1261 		dev_err(pcie->dev, "cannot enter L2 state\n");
1262 		return err;
1263 	}
1264 
1265 	if (!(pcie->soc->flags & SKIP_PCIE_RSTB)) {
1266 		/* Assert the PERST# pin */
1267 		val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
1268 		val |= PCIE_PE_RSTB;
1269 		writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
1270 	}
1271 
1272 	dev_dbg(pcie->dev, "entered L2 states successfully");
1273 
1274 	mtk_pcie_irq_save(pcie);
1275 	mtk_pcie_power_down(pcie);
1276 
1277 	return 0;
1278 }
1279 
1280 static int mtk_pcie_resume_noirq(struct device *dev)
1281 {
1282 	struct mtk_gen3_pcie *pcie = dev_get_drvdata(dev);
1283 	int err;
1284 
1285 	err = pcie->soc->power_up(pcie);
1286 	if (err)
1287 		return err;
1288 
1289 	err = mtk_pcie_startup_port(pcie);
1290 	if (err) {
1291 		mtk_pcie_power_down(pcie);
1292 		return err;
1293 	}
1294 
1295 	mtk_pcie_irq_restore(pcie);
1296 
1297 	return 0;
1298 }
1299 
1300 static const struct dev_pm_ops mtk_pcie_pm_ops = {
1301 	NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_pcie_suspend_noirq,
1302 				  mtk_pcie_resume_noirq)
1303 };
1304 
1305 static const struct mtk_gen3_pcie_pdata mtk_pcie_soc_mt8192 = {
1306 	.power_up = mtk_pcie_power_up,
1307 	.phy_resets = {
1308 		.id[0] = "phy",
1309 		.num_resets = 1,
1310 	},
1311 };
1312 
1313 static const struct mtk_gen3_pcie_pdata mtk_pcie_soc_en7581 = {
1314 	.power_up = mtk_pcie_en7581_power_up,
1315 	.phy_resets = {
1316 		.id[0] = "phy-lane0",
1317 		.id[1] = "phy-lane1",
1318 		.id[2] = "phy-lane2",
1319 		.num_resets = 3,
1320 	},
1321 	.flags = SKIP_PCIE_RSTB,
1322 };
1323 
1324 static const struct of_device_id mtk_pcie_of_match[] = {
1325 	{ .compatible = "airoha,en7581-pcie", .data = &mtk_pcie_soc_en7581 },
1326 	{ .compatible = "mediatek,mt8192-pcie", .data = &mtk_pcie_soc_mt8192 },
1327 	{},
1328 };
1329 MODULE_DEVICE_TABLE(of, mtk_pcie_of_match);
1330 
1331 static struct platform_driver mtk_pcie_driver = {
1332 	.probe = mtk_pcie_probe,
1333 	.remove = mtk_pcie_remove,
1334 	.driver = {
1335 		.name = "mtk-pcie-gen3",
1336 		.of_match_table = mtk_pcie_of_match,
1337 		.pm = &mtk_pcie_pm_ops,
1338 		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
1339 	},
1340 };
1341 
1342 module_platform_driver(mtk_pcie_driver);
1343 MODULE_DESCRIPTION("MediaTek Gen3 PCIe host controller driver");
1344 MODULE_LICENSE("GPL v2");
1345