1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * MediaTek PCIe host controller driver.
4 *
5 * Copyright (c) 2020 MediaTek Inc.
6 * Author: Jianjun Wang <jianjun.wang@mediatek.com>
7 */
8
9 #include <linux/bitfield.h>
10 #include <linux/clk.h>
11 #include <linux/clk-provider.h>
12 #include <linux/delay.h>
13 #include <linux/iopoll.h>
14 #include <linux/irq.h>
15 #include <linux/irqchip/chained_irq.h>
16 #include <linux/irqdomain.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/msi.h>
20 #include <linux/of_device.h>
21 #include <linux/of_pci.h>
22 #include <linux/pci.h>
23 #include <linux/phy/phy.h>
24 #include <linux/platform_device.h>
25 #include <linux/pm_domain.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/reset.h>
28
29 #include "../pci.h"
30
31 #define PCIE_SETTING_REG 0x80
32 #define PCIE_PCI_IDS_1 0x9c
33 #define PCI_CLASS(class) (class << 8)
34 #define PCIE_RC_MODE BIT(0)
35
36 #define PCIE_EQ_PRESET_01_REG 0x100
37 #define PCIE_VAL_LN0_DOWNSTREAM GENMASK(6, 0)
38 #define PCIE_VAL_LN0_UPSTREAM GENMASK(14, 8)
39 #define PCIE_VAL_LN1_DOWNSTREAM GENMASK(22, 16)
40 #define PCIE_VAL_LN1_UPSTREAM GENMASK(30, 24)
41
42 #define PCIE_CFGNUM_REG 0x140
43 #define PCIE_CFG_DEVFN(devfn) ((devfn) & GENMASK(7, 0))
44 #define PCIE_CFG_BUS(bus) (((bus) << 8) & GENMASK(15, 8))
45 #define PCIE_CFG_BYTE_EN(bytes) (((bytes) << 16) & GENMASK(19, 16))
46 #define PCIE_CFG_FORCE_BYTE_EN BIT(20)
47 #define PCIE_CFG_OFFSET_ADDR 0x1000
48 #define PCIE_CFG_HEADER(bus, devfn) \
49 (PCIE_CFG_BUS(bus) | PCIE_CFG_DEVFN(devfn))
50
51 #define PCIE_RST_CTRL_REG 0x148
52 #define PCIE_MAC_RSTB BIT(0)
53 #define PCIE_PHY_RSTB BIT(1)
54 #define PCIE_BRG_RSTB BIT(2)
55 #define PCIE_PE_RSTB BIT(3)
56
57 #define PCIE_LTSSM_STATUS_REG 0x150
58 #define PCIE_LTSSM_STATE_MASK GENMASK(28, 24)
59 #define PCIE_LTSSM_STATE(val) ((val & PCIE_LTSSM_STATE_MASK) >> 24)
60 #define PCIE_LTSSM_STATE_L2_IDLE 0x14
61
62 #define PCIE_LINK_STATUS_REG 0x154
63 #define PCIE_PORT_LINKUP BIT(8)
64
65 #define PCIE_MSI_SET_NUM 8
66 #define PCIE_MSI_IRQS_PER_SET 32
67 #define PCIE_MSI_IRQS_NUM \
68 (PCIE_MSI_IRQS_PER_SET * PCIE_MSI_SET_NUM)
69
70 #define PCIE_INT_ENABLE_REG 0x180
71 #define PCIE_MSI_ENABLE GENMASK(PCIE_MSI_SET_NUM + 8 - 1, 8)
72 #define PCIE_MSI_SHIFT 8
73 #define PCIE_INTX_SHIFT 24
74 #define PCIE_INTX_ENABLE \
75 GENMASK(PCIE_INTX_SHIFT + PCI_NUM_INTX - 1, PCIE_INTX_SHIFT)
76
77 #define PCIE_INT_STATUS_REG 0x184
78 #define PCIE_MSI_SET_ENABLE_REG 0x190
79 #define PCIE_MSI_SET_ENABLE GENMASK(PCIE_MSI_SET_NUM - 1, 0)
80
81 #define PCIE_PIPE4_PIE8_REG 0x338
82 #define PCIE_K_FINETUNE_MAX GENMASK(5, 0)
83 #define PCIE_K_FINETUNE_ERR GENMASK(7, 6)
84 #define PCIE_K_PRESET_TO_USE GENMASK(18, 8)
85 #define PCIE_K_PHYPARAM_QUERY BIT(19)
86 #define PCIE_K_QUERY_TIMEOUT BIT(20)
87 #define PCIE_K_PRESET_TO_USE_16G GENMASK(31, 21)
88
89 #define PCIE_MSI_SET_BASE_REG 0xc00
90 #define PCIE_MSI_SET_OFFSET 0x10
91 #define PCIE_MSI_SET_STATUS_OFFSET 0x04
92 #define PCIE_MSI_SET_ENABLE_OFFSET 0x08
93
94 #define PCIE_MSI_SET_ADDR_HI_BASE 0xc80
95 #define PCIE_MSI_SET_ADDR_HI_OFFSET 0x04
96
97 #define PCIE_ICMD_PM_REG 0x198
98 #define PCIE_TURN_OFF_LINK BIT(4)
99
100 #define PCIE_MISC_CTRL_REG 0x348
101 #define PCIE_DISABLE_DVFSRC_VLT_REQ BIT(1)
102
103 #define PCIE_TRANS_TABLE_BASE_REG 0x800
104 #define PCIE_ATR_SRC_ADDR_MSB_OFFSET 0x4
105 #define PCIE_ATR_TRSL_ADDR_LSB_OFFSET 0x8
106 #define PCIE_ATR_TRSL_ADDR_MSB_OFFSET 0xc
107 #define PCIE_ATR_TRSL_PARAM_OFFSET 0x10
108 #define PCIE_ATR_TLB_SET_OFFSET 0x20
109
110 #define PCIE_MAX_TRANS_TABLES 8
111 #define PCIE_ATR_EN BIT(0)
112 #define PCIE_ATR_SIZE(size) \
113 (((((size) - 1) << 1) & GENMASK(6, 1)) | PCIE_ATR_EN)
114 #define PCIE_ATR_ID(id) ((id) & GENMASK(3, 0))
115 #define PCIE_ATR_TYPE_MEM PCIE_ATR_ID(0)
116 #define PCIE_ATR_TYPE_IO PCIE_ATR_ID(1)
117 #define PCIE_ATR_TLP_TYPE(type) (((type) << 16) & GENMASK(18, 16))
118 #define PCIE_ATR_TLP_TYPE_MEM PCIE_ATR_TLP_TYPE(0)
119 #define PCIE_ATR_TLP_TYPE_IO PCIE_ATR_TLP_TYPE(2)
120
121 #define MAX_NUM_PHY_RESETS 3
122
123 /* Time in ms needed to complete PCIe reset on EN7581 SoC */
124 #define PCIE_EN7581_RESET_TIME_MS 100
125
126 struct mtk_gen3_pcie;
127
128 /**
129 * struct mtk_gen3_pcie_pdata - differentiate between host generations
130 * @power_up: pcie power_up callback
131 * @phy_resets: phy reset lines SoC data.
132 */
133 struct mtk_gen3_pcie_pdata {
134 int (*power_up)(struct mtk_gen3_pcie *pcie);
135 struct {
136 const char *id[MAX_NUM_PHY_RESETS];
137 int num_resets;
138 } phy_resets;
139 };
140
141 /**
142 * struct mtk_msi_set - MSI information for each set
143 * @base: IO mapped register base
144 * @msg_addr: MSI message address
145 * @saved_irq_state: IRQ enable state saved at suspend time
146 */
147 struct mtk_msi_set {
148 void __iomem *base;
149 phys_addr_t msg_addr;
150 u32 saved_irq_state;
151 };
152
153 /**
154 * struct mtk_gen3_pcie - PCIe port information
155 * @dev: pointer to PCIe device
156 * @base: IO mapped register base
157 * @reg_base: physical register base
158 * @mac_reset: MAC reset control
159 * @phy_resets: PHY reset controllers
160 * @phy: PHY controller block
161 * @clks: PCIe clocks
162 * @num_clks: PCIe clocks count for this port
163 * @irq: PCIe controller interrupt number
164 * @saved_irq_state: IRQ enable state saved at suspend time
165 * @irq_lock: lock protecting IRQ register access
166 * @intx_domain: legacy INTx IRQ domain
167 * @msi_domain: MSI IRQ domain
168 * @msi_bottom_domain: MSI IRQ bottom domain
169 * @msi_sets: MSI sets information
170 * @lock: lock protecting IRQ bit map
171 * @msi_irq_in_use: bit map for assigned MSI IRQ
172 * @soc: pointer to SoC-dependent operations
173 */
174 struct mtk_gen3_pcie {
175 struct device *dev;
176 void __iomem *base;
177 phys_addr_t reg_base;
178 struct reset_control *mac_reset;
179 struct reset_control_bulk_data phy_resets[MAX_NUM_PHY_RESETS];
180 struct phy *phy;
181 struct clk_bulk_data *clks;
182 int num_clks;
183
184 int irq;
185 u32 saved_irq_state;
186 raw_spinlock_t irq_lock;
187 struct irq_domain *intx_domain;
188 struct irq_domain *msi_domain;
189 struct irq_domain *msi_bottom_domain;
190 struct mtk_msi_set msi_sets[PCIE_MSI_SET_NUM];
191 struct mutex lock;
192 DECLARE_BITMAP(msi_irq_in_use, PCIE_MSI_IRQS_NUM);
193
194 const struct mtk_gen3_pcie_pdata *soc;
195 };
196
197 /* LTSSM state in PCIE_LTSSM_STATUS_REG bit[28:24] */
198 static const char *const ltssm_str[] = {
199 "detect.quiet", /* 0x00 */
200 "detect.active", /* 0x01 */
201 "polling.active", /* 0x02 */
202 "polling.compliance", /* 0x03 */
203 "polling.configuration", /* 0x04 */
204 "config.linkwidthstart", /* 0x05 */
205 "config.linkwidthaccept", /* 0x06 */
206 "config.lanenumwait", /* 0x07 */
207 "config.lanenumaccept", /* 0x08 */
208 "config.complete", /* 0x09 */
209 "config.idle", /* 0x0A */
210 "recovery.receiverlock", /* 0x0B */
211 "recovery.equalization", /* 0x0C */
212 "recovery.speed", /* 0x0D */
213 "recovery.receiverconfig", /* 0x0E */
214 "recovery.idle", /* 0x0F */
215 "L0", /* 0x10 */
216 "L0s", /* 0x11 */
217 "L1.entry", /* 0x12 */
218 "L1.idle", /* 0x13 */
219 "L2.idle", /* 0x14 */
220 "L2.transmitwake", /* 0x15 */
221 "disable", /* 0x16 */
222 "loopback.entry", /* 0x17 */
223 "loopback.active", /* 0x18 */
224 "loopback.exit", /* 0x19 */
225 "hotreset", /* 0x1A */
226 };
227
228 /**
229 * mtk_pcie_config_tlp_header() - Configure a configuration TLP header
230 * @bus: PCI bus to query
231 * @devfn: device/function number
232 * @where: offset in config space
233 * @size: data size in TLP header
234 *
235 * Set byte enable field and device information in configuration TLP header.
236 */
mtk_pcie_config_tlp_header(struct pci_bus * bus,unsigned int devfn,int where,int size)237 static void mtk_pcie_config_tlp_header(struct pci_bus *bus, unsigned int devfn,
238 int where, int size)
239 {
240 struct mtk_gen3_pcie *pcie = bus->sysdata;
241 int bytes;
242 u32 val;
243
244 bytes = (GENMASK(size - 1, 0) & 0xf) << (where & 0x3);
245
246 val = PCIE_CFG_FORCE_BYTE_EN | PCIE_CFG_BYTE_EN(bytes) |
247 PCIE_CFG_HEADER(bus->number, devfn);
248
249 writel_relaxed(val, pcie->base + PCIE_CFGNUM_REG);
250 }
251
mtk_pcie_map_bus(struct pci_bus * bus,unsigned int devfn,int where)252 static void __iomem *mtk_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
253 int where)
254 {
255 struct mtk_gen3_pcie *pcie = bus->sysdata;
256
257 return pcie->base + PCIE_CFG_OFFSET_ADDR + where;
258 }
259
mtk_pcie_config_read(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 * val)260 static int mtk_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
261 int where, int size, u32 *val)
262 {
263 mtk_pcie_config_tlp_header(bus, devfn, where, size);
264
265 return pci_generic_config_read32(bus, devfn, where, size, val);
266 }
267
mtk_pcie_config_write(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 val)268 static int mtk_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
269 int where, int size, u32 val)
270 {
271 mtk_pcie_config_tlp_header(bus, devfn, where, size);
272
273 if (size <= 2)
274 val <<= (where & 0x3) * 8;
275
276 return pci_generic_config_write32(bus, devfn, where, 4, val);
277 }
278
279 static struct pci_ops mtk_pcie_ops = {
280 .map_bus = mtk_pcie_map_bus,
281 .read = mtk_pcie_config_read,
282 .write = mtk_pcie_config_write,
283 };
284
mtk_pcie_set_trans_table(struct mtk_gen3_pcie * pcie,resource_size_t cpu_addr,resource_size_t pci_addr,resource_size_t size,unsigned long type,int * num)285 static int mtk_pcie_set_trans_table(struct mtk_gen3_pcie *pcie,
286 resource_size_t cpu_addr,
287 resource_size_t pci_addr,
288 resource_size_t size,
289 unsigned long type, int *num)
290 {
291 resource_size_t remaining = size;
292 resource_size_t table_size;
293 resource_size_t addr_align;
294 const char *range_type;
295 void __iomem *table;
296 u32 val;
297
298 while (remaining && (*num < PCIE_MAX_TRANS_TABLES)) {
299 /* Table size needs to be a power of 2 */
300 table_size = BIT(fls(remaining) - 1);
301
302 if (cpu_addr > 0) {
303 addr_align = BIT(ffs(cpu_addr) - 1);
304 table_size = min(table_size, addr_align);
305 }
306
307 /* Minimum size of translate table is 4KiB */
308 if (table_size < 0x1000) {
309 dev_err(pcie->dev, "illegal table size %#llx\n",
310 (unsigned long long)table_size);
311 return -EINVAL;
312 }
313
314 table = pcie->base + PCIE_TRANS_TABLE_BASE_REG + *num * PCIE_ATR_TLB_SET_OFFSET;
315 writel_relaxed(lower_32_bits(cpu_addr) | PCIE_ATR_SIZE(fls(table_size) - 1), table);
316 writel_relaxed(upper_32_bits(cpu_addr), table + PCIE_ATR_SRC_ADDR_MSB_OFFSET);
317 writel_relaxed(lower_32_bits(pci_addr), table + PCIE_ATR_TRSL_ADDR_LSB_OFFSET);
318 writel_relaxed(upper_32_bits(pci_addr), table + PCIE_ATR_TRSL_ADDR_MSB_OFFSET);
319
320 if (type == IORESOURCE_IO) {
321 val = PCIE_ATR_TYPE_IO | PCIE_ATR_TLP_TYPE_IO;
322 range_type = "IO";
323 } else {
324 val = PCIE_ATR_TYPE_MEM | PCIE_ATR_TLP_TYPE_MEM;
325 range_type = "MEM";
326 }
327
328 writel_relaxed(val, table + PCIE_ATR_TRSL_PARAM_OFFSET);
329
330 dev_dbg(pcie->dev, "set %s trans window[%d]: cpu_addr = %#llx, pci_addr = %#llx, size = %#llx\n",
331 range_type, *num, (unsigned long long)cpu_addr,
332 (unsigned long long)pci_addr, (unsigned long long)table_size);
333
334 cpu_addr += table_size;
335 pci_addr += table_size;
336 remaining -= table_size;
337 (*num)++;
338 }
339
340 if (remaining)
341 dev_warn(pcie->dev, "not enough translate table for addr: %#llx, limited to [%d]\n",
342 (unsigned long long)cpu_addr, PCIE_MAX_TRANS_TABLES);
343
344 return 0;
345 }
346
mtk_pcie_enable_msi(struct mtk_gen3_pcie * pcie)347 static void mtk_pcie_enable_msi(struct mtk_gen3_pcie *pcie)
348 {
349 int i;
350 u32 val;
351
352 for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
353 struct mtk_msi_set *msi_set = &pcie->msi_sets[i];
354
355 msi_set->base = pcie->base + PCIE_MSI_SET_BASE_REG +
356 i * PCIE_MSI_SET_OFFSET;
357 msi_set->msg_addr = pcie->reg_base + PCIE_MSI_SET_BASE_REG +
358 i * PCIE_MSI_SET_OFFSET;
359
360 /* Configure the MSI capture address */
361 writel_relaxed(lower_32_bits(msi_set->msg_addr), msi_set->base);
362 writel_relaxed(upper_32_bits(msi_set->msg_addr),
363 pcie->base + PCIE_MSI_SET_ADDR_HI_BASE +
364 i * PCIE_MSI_SET_ADDR_HI_OFFSET);
365 }
366
367 val = readl_relaxed(pcie->base + PCIE_MSI_SET_ENABLE_REG);
368 val |= PCIE_MSI_SET_ENABLE;
369 writel_relaxed(val, pcie->base + PCIE_MSI_SET_ENABLE_REG);
370
371 val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
372 val |= PCIE_MSI_ENABLE;
373 writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG);
374 }
375
mtk_pcie_startup_port(struct mtk_gen3_pcie * pcie)376 static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie)
377 {
378 struct resource_entry *entry;
379 struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
380 unsigned int table_index = 0;
381 int err;
382 u32 val;
383
384 /* Set as RC mode */
385 val = readl_relaxed(pcie->base + PCIE_SETTING_REG);
386 val |= PCIE_RC_MODE;
387 writel_relaxed(val, pcie->base + PCIE_SETTING_REG);
388
389 /* Set class code */
390 val = readl_relaxed(pcie->base + PCIE_PCI_IDS_1);
391 val &= ~GENMASK(31, 8);
392 val |= PCI_CLASS(PCI_CLASS_BRIDGE_PCI_NORMAL);
393 writel_relaxed(val, pcie->base + PCIE_PCI_IDS_1);
394
395 /* Mask all INTx interrupts */
396 val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
397 val &= ~PCIE_INTX_ENABLE;
398 writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG);
399
400 /* Disable DVFSRC voltage request */
401 val = readl_relaxed(pcie->base + PCIE_MISC_CTRL_REG);
402 val |= PCIE_DISABLE_DVFSRC_VLT_REQ;
403 writel_relaxed(val, pcie->base + PCIE_MISC_CTRL_REG);
404
405 /* Assert all reset signals */
406 val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
407 val |= PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB;
408 writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
409
410 /*
411 * Described in PCIe CEM specification sections 2.2 (PERST# Signal)
412 * and 2.2.1 (Initial Power-Up (G3 to S0)).
413 * The deassertion of PERST# should be delayed 100ms (TPVPERL)
414 * for the power and clock to become stable.
415 */
416 msleep(100);
417
418 /* De-assert reset signals */
419 val &= ~(PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB);
420 writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
421
422 /* Check if the link is up or not */
423 err = readl_poll_timeout(pcie->base + PCIE_LINK_STATUS_REG, val,
424 !!(val & PCIE_PORT_LINKUP), 20,
425 PCI_PM_D3COLD_WAIT * USEC_PER_MSEC);
426 if (err) {
427 const char *ltssm_state;
428 int ltssm_index;
429
430 val = readl_relaxed(pcie->base + PCIE_LTSSM_STATUS_REG);
431 ltssm_index = PCIE_LTSSM_STATE(val);
432 ltssm_state = ltssm_index >= ARRAY_SIZE(ltssm_str) ?
433 "Unknown state" : ltssm_str[ltssm_index];
434 dev_err(pcie->dev,
435 "PCIe link down, current LTSSM state: %s (%#x)\n",
436 ltssm_state, val);
437 return err;
438 }
439
440 mtk_pcie_enable_msi(pcie);
441
442 /* Set PCIe translation windows */
443 resource_list_for_each_entry(entry, &host->windows) {
444 struct resource *res = entry->res;
445 unsigned long type = resource_type(res);
446 resource_size_t cpu_addr;
447 resource_size_t pci_addr;
448 resource_size_t size;
449
450 if (type == IORESOURCE_IO)
451 cpu_addr = pci_pio_to_address(res->start);
452 else if (type == IORESOURCE_MEM)
453 cpu_addr = res->start;
454 else
455 continue;
456
457 pci_addr = res->start - entry->offset;
458 size = resource_size(res);
459 err = mtk_pcie_set_trans_table(pcie, cpu_addr, pci_addr, size,
460 type, &table_index);
461 if (err)
462 return err;
463 }
464
465 return 0;
466 }
467
mtk_pcie_msi_irq_mask(struct irq_data * data)468 static void mtk_pcie_msi_irq_mask(struct irq_data *data)
469 {
470 pci_msi_mask_irq(data);
471 irq_chip_mask_parent(data);
472 }
473
mtk_pcie_msi_irq_unmask(struct irq_data * data)474 static void mtk_pcie_msi_irq_unmask(struct irq_data *data)
475 {
476 pci_msi_unmask_irq(data);
477 irq_chip_unmask_parent(data);
478 }
479
480 static struct irq_chip mtk_msi_irq_chip = {
481 .irq_ack = irq_chip_ack_parent,
482 .irq_mask = mtk_pcie_msi_irq_mask,
483 .irq_unmask = mtk_pcie_msi_irq_unmask,
484 .name = "MSI",
485 };
486
487 static struct msi_domain_info mtk_msi_domain_info = {
488 .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
489 MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX |
490 MSI_FLAG_MULTI_PCI_MSI,
491 .chip = &mtk_msi_irq_chip,
492 };
493
mtk_compose_msi_msg(struct irq_data * data,struct msi_msg * msg)494 static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
495 {
496 struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
497 struct mtk_gen3_pcie *pcie = data->domain->host_data;
498 unsigned long hwirq;
499
500 hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
501
502 msg->address_hi = upper_32_bits(msi_set->msg_addr);
503 msg->address_lo = lower_32_bits(msi_set->msg_addr);
504 msg->data = hwirq;
505 dev_dbg(pcie->dev, "msi#%#lx address_hi %#x address_lo %#x data %d\n",
506 hwirq, msg->address_hi, msg->address_lo, msg->data);
507 }
508
mtk_msi_bottom_irq_ack(struct irq_data * data)509 static void mtk_msi_bottom_irq_ack(struct irq_data *data)
510 {
511 struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
512 unsigned long hwirq;
513
514 hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
515
516 writel_relaxed(BIT(hwirq), msi_set->base + PCIE_MSI_SET_STATUS_OFFSET);
517 }
518
mtk_msi_bottom_irq_mask(struct irq_data * data)519 static void mtk_msi_bottom_irq_mask(struct irq_data *data)
520 {
521 struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
522 struct mtk_gen3_pcie *pcie = data->domain->host_data;
523 unsigned long hwirq, flags;
524 u32 val;
525
526 hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
527
528 raw_spin_lock_irqsave(&pcie->irq_lock, flags);
529 val = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
530 val &= ~BIT(hwirq);
531 writel_relaxed(val, msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
532 raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
533 }
534
mtk_msi_bottom_irq_unmask(struct irq_data * data)535 static void mtk_msi_bottom_irq_unmask(struct irq_data *data)
536 {
537 struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
538 struct mtk_gen3_pcie *pcie = data->domain->host_data;
539 unsigned long hwirq, flags;
540 u32 val;
541
542 hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
543
544 raw_spin_lock_irqsave(&pcie->irq_lock, flags);
545 val = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
546 val |= BIT(hwirq);
547 writel_relaxed(val, msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
548 raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
549 }
550
551 static struct irq_chip mtk_msi_bottom_irq_chip = {
552 .irq_ack = mtk_msi_bottom_irq_ack,
553 .irq_mask = mtk_msi_bottom_irq_mask,
554 .irq_unmask = mtk_msi_bottom_irq_unmask,
555 .irq_compose_msi_msg = mtk_compose_msi_msg,
556 .name = "MSI",
557 };
558
mtk_msi_bottom_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * arg)559 static int mtk_msi_bottom_domain_alloc(struct irq_domain *domain,
560 unsigned int virq, unsigned int nr_irqs,
561 void *arg)
562 {
563 struct mtk_gen3_pcie *pcie = domain->host_data;
564 struct mtk_msi_set *msi_set;
565 int i, hwirq, set_idx;
566
567 mutex_lock(&pcie->lock);
568
569 hwirq = bitmap_find_free_region(pcie->msi_irq_in_use, PCIE_MSI_IRQS_NUM,
570 order_base_2(nr_irqs));
571
572 mutex_unlock(&pcie->lock);
573
574 if (hwirq < 0)
575 return -ENOSPC;
576
577 set_idx = hwirq / PCIE_MSI_IRQS_PER_SET;
578 msi_set = &pcie->msi_sets[set_idx];
579
580 for (i = 0; i < nr_irqs; i++)
581 irq_domain_set_info(domain, virq + i, hwirq + i,
582 &mtk_msi_bottom_irq_chip, msi_set,
583 handle_edge_irq, NULL, NULL);
584
585 return 0;
586 }
587
mtk_msi_bottom_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)588 static void mtk_msi_bottom_domain_free(struct irq_domain *domain,
589 unsigned int virq, unsigned int nr_irqs)
590 {
591 struct mtk_gen3_pcie *pcie = domain->host_data;
592 struct irq_data *data = irq_domain_get_irq_data(domain, virq);
593
594 mutex_lock(&pcie->lock);
595
596 bitmap_release_region(pcie->msi_irq_in_use, data->hwirq,
597 order_base_2(nr_irqs));
598
599 mutex_unlock(&pcie->lock);
600
601 irq_domain_free_irqs_common(domain, virq, nr_irqs);
602 }
603
604 static const struct irq_domain_ops mtk_msi_bottom_domain_ops = {
605 .alloc = mtk_msi_bottom_domain_alloc,
606 .free = mtk_msi_bottom_domain_free,
607 };
608
mtk_intx_mask(struct irq_data * data)609 static void mtk_intx_mask(struct irq_data *data)
610 {
611 struct mtk_gen3_pcie *pcie = irq_data_get_irq_chip_data(data);
612 unsigned long flags;
613 u32 val;
614
615 raw_spin_lock_irqsave(&pcie->irq_lock, flags);
616 val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
617 val &= ~BIT(data->hwirq + PCIE_INTX_SHIFT);
618 writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG);
619 raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
620 }
621
mtk_intx_unmask(struct irq_data * data)622 static void mtk_intx_unmask(struct irq_data *data)
623 {
624 struct mtk_gen3_pcie *pcie = irq_data_get_irq_chip_data(data);
625 unsigned long flags;
626 u32 val;
627
628 raw_spin_lock_irqsave(&pcie->irq_lock, flags);
629 val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
630 val |= BIT(data->hwirq + PCIE_INTX_SHIFT);
631 writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG);
632 raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
633 }
634
635 /**
636 * mtk_intx_eoi() - Clear INTx IRQ status at the end of interrupt
637 * @data: pointer to chip specific data
638 *
639 * As an emulated level IRQ, its interrupt status will remain
640 * until the corresponding de-assert message is received; hence that
641 * the status can only be cleared when the interrupt has been serviced.
642 */
mtk_intx_eoi(struct irq_data * data)643 static void mtk_intx_eoi(struct irq_data *data)
644 {
645 struct mtk_gen3_pcie *pcie = irq_data_get_irq_chip_data(data);
646 unsigned long hwirq;
647
648 hwirq = data->hwirq + PCIE_INTX_SHIFT;
649 writel_relaxed(BIT(hwirq), pcie->base + PCIE_INT_STATUS_REG);
650 }
651
652 static struct irq_chip mtk_intx_irq_chip = {
653 .irq_mask = mtk_intx_mask,
654 .irq_unmask = mtk_intx_unmask,
655 .irq_eoi = mtk_intx_eoi,
656 .name = "INTx",
657 };
658
mtk_pcie_intx_map(struct irq_domain * domain,unsigned int irq,irq_hw_number_t hwirq)659 static int mtk_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
660 irq_hw_number_t hwirq)
661 {
662 irq_set_chip_data(irq, domain->host_data);
663 irq_set_chip_and_handler_name(irq, &mtk_intx_irq_chip,
664 handle_fasteoi_irq, "INTx");
665 return 0;
666 }
667
668 static const struct irq_domain_ops intx_domain_ops = {
669 .map = mtk_pcie_intx_map,
670 };
671
mtk_pcie_init_irq_domains(struct mtk_gen3_pcie * pcie)672 static int mtk_pcie_init_irq_domains(struct mtk_gen3_pcie *pcie)
673 {
674 struct device *dev = pcie->dev;
675 struct device_node *intc_node, *node = dev->of_node;
676 int ret;
677
678 raw_spin_lock_init(&pcie->irq_lock);
679
680 /* Setup INTx */
681 intc_node = of_get_child_by_name(node, "interrupt-controller");
682 if (!intc_node) {
683 dev_err(dev, "missing interrupt-controller node\n");
684 return -ENODEV;
685 }
686
687 pcie->intx_domain = irq_domain_add_linear(intc_node, PCI_NUM_INTX,
688 &intx_domain_ops, pcie);
689 if (!pcie->intx_domain) {
690 dev_err(dev, "failed to create INTx IRQ domain\n");
691 ret = -ENODEV;
692 goto out_put_node;
693 }
694
695 /* Setup MSI */
696 mutex_init(&pcie->lock);
697
698 pcie->msi_bottom_domain = irq_domain_add_linear(node, PCIE_MSI_IRQS_NUM,
699 &mtk_msi_bottom_domain_ops, pcie);
700 if (!pcie->msi_bottom_domain) {
701 dev_err(dev, "failed to create MSI bottom domain\n");
702 ret = -ENODEV;
703 goto err_msi_bottom_domain;
704 }
705
706 pcie->msi_domain = pci_msi_create_irq_domain(dev->fwnode,
707 &mtk_msi_domain_info,
708 pcie->msi_bottom_domain);
709 if (!pcie->msi_domain) {
710 dev_err(dev, "failed to create MSI domain\n");
711 ret = -ENODEV;
712 goto err_msi_domain;
713 }
714
715 of_node_put(intc_node);
716 return 0;
717
718 err_msi_domain:
719 irq_domain_remove(pcie->msi_bottom_domain);
720 err_msi_bottom_domain:
721 irq_domain_remove(pcie->intx_domain);
722 out_put_node:
723 of_node_put(intc_node);
724 return ret;
725 }
726
mtk_pcie_irq_teardown(struct mtk_gen3_pcie * pcie)727 static void mtk_pcie_irq_teardown(struct mtk_gen3_pcie *pcie)
728 {
729 irq_set_chained_handler_and_data(pcie->irq, NULL, NULL);
730
731 if (pcie->intx_domain)
732 irq_domain_remove(pcie->intx_domain);
733
734 if (pcie->msi_domain)
735 irq_domain_remove(pcie->msi_domain);
736
737 if (pcie->msi_bottom_domain)
738 irq_domain_remove(pcie->msi_bottom_domain);
739
740 irq_dispose_mapping(pcie->irq);
741 }
742
mtk_pcie_msi_handler(struct mtk_gen3_pcie * pcie,int set_idx)743 static void mtk_pcie_msi_handler(struct mtk_gen3_pcie *pcie, int set_idx)
744 {
745 struct mtk_msi_set *msi_set = &pcie->msi_sets[set_idx];
746 unsigned long msi_enable, msi_status;
747 irq_hw_number_t bit, hwirq;
748
749 msi_enable = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
750
751 do {
752 msi_status = readl_relaxed(msi_set->base +
753 PCIE_MSI_SET_STATUS_OFFSET);
754 msi_status &= msi_enable;
755 if (!msi_status)
756 break;
757
758 for_each_set_bit(bit, &msi_status, PCIE_MSI_IRQS_PER_SET) {
759 hwirq = bit + set_idx * PCIE_MSI_IRQS_PER_SET;
760 generic_handle_domain_irq(pcie->msi_bottom_domain, hwirq);
761 }
762 } while (true);
763 }
764
mtk_pcie_irq_handler(struct irq_desc * desc)765 static void mtk_pcie_irq_handler(struct irq_desc *desc)
766 {
767 struct mtk_gen3_pcie *pcie = irq_desc_get_handler_data(desc);
768 struct irq_chip *irqchip = irq_desc_get_chip(desc);
769 unsigned long status;
770 irq_hw_number_t irq_bit = PCIE_INTX_SHIFT;
771
772 chained_irq_enter(irqchip, desc);
773
774 status = readl_relaxed(pcie->base + PCIE_INT_STATUS_REG);
775 for_each_set_bit_from(irq_bit, &status, PCI_NUM_INTX +
776 PCIE_INTX_SHIFT)
777 generic_handle_domain_irq(pcie->intx_domain,
778 irq_bit - PCIE_INTX_SHIFT);
779
780 irq_bit = PCIE_MSI_SHIFT;
781 for_each_set_bit_from(irq_bit, &status, PCIE_MSI_SET_NUM +
782 PCIE_MSI_SHIFT) {
783 mtk_pcie_msi_handler(pcie, irq_bit - PCIE_MSI_SHIFT);
784
785 writel_relaxed(BIT(irq_bit), pcie->base + PCIE_INT_STATUS_REG);
786 }
787
788 chained_irq_exit(irqchip, desc);
789 }
790
mtk_pcie_setup_irq(struct mtk_gen3_pcie * pcie)791 static int mtk_pcie_setup_irq(struct mtk_gen3_pcie *pcie)
792 {
793 struct device *dev = pcie->dev;
794 struct platform_device *pdev = to_platform_device(dev);
795 int err;
796
797 err = mtk_pcie_init_irq_domains(pcie);
798 if (err)
799 return err;
800
801 pcie->irq = platform_get_irq(pdev, 0);
802 if (pcie->irq < 0)
803 return pcie->irq;
804
805 irq_set_chained_handler_and_data(pcie->irq, mtk_pcie_irq_handler, pcie);
806
807 return 0;
808 }
809
mtk_pcie_parse_port(struct mtk_gen3_pcie * pcie)810 static int mtk_pcie_parse_port(struct mtk_gen3_pcie *pcie)
811 {
812 int i, ret, num_resets = pcie->soc->phy_resets.num_resets;
813 struct device *dev = pcie->dev;
814 struct platform_device *pdev = to_platform_device(dev);
815 struct resource *regs;
816
817 regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcie-mac");
818 if (!regs)
819 return -EINVAL;
820 pcie->base = devm_ioremap_resource(dev, regs);
821 if (IS_ERR(pcie->base)) {
822 dev_err(dev, "failed to map register base\n");
823 return PTR_ERR(pcie->base);
824 }
825
826 pcie->reg_base = regs->start;
827
828 for (i = 0; i < num_resets; i++)
829 pcie->phy_resets[i].id = pcie->soc->phy_resets.id[i];
830
831 ret = devm_reset_control_bulk_get_optional_shared(dev, num_resets, pcie->phy_resets);
832 if (ret) {
833 dev_err(dev, "failed to get PHY bulk reset\n");
834 return ret;
835 }
836
837 pcie->mac_reset = devm_reset_control_get_optional_exclusive(dev, "mac");
838 if (IS_ERR(pcie->mac_reset)) {
839 ret = PTR_ERR(pcie->mac_reset);
840 if (ret != -EPROBE_DEFER)
841 dev_err(dev, "failed to get MAC reset\n");
842
843 return ret;
844 }
845
846 pcie->phy = devm_phy_optional_get(dev, "pcie-phy");
847 if (IS_ERR(pcie->phy)) {
848 ret = PTR_ERR(pcie->phy);
849 if (ret != -EPROBE_DEFER)
850 dev_err(dev, "failed to get PHY\n");
851
852 return ret;
853 }
854
855 pcie->num_clks = devm_clk_bulk_get_all(dev, &pcie->clks);
856 if (pcie->num_clks < 0) {
857 dev_err(dev, "failed to get clocks\n");
858 return pcie->num_clks;
859 }
860
861 return 0;
862 }
863
mtk_pcie_en7581_power_up(struct mtk_gen3_pcie * pcie)864 static int mtk_pcie_en7581_power_up(struct mtk_gen3_pcie *pcie)
865 {
866 struct device *dev = pcie->dev;
867 int err;
868 u32 val;
869
870 /*
871 * Wait for the time needed to complete the bulk assert in
872 * mtk_pcie_setup for EN7581 SoC.
873 */
874 mdelay(PCIE_EN7581_RESET_TIME_MS);
875
876 err = phy_init(pcie->phy);
877 if (err) {
878 dev_err(dev, "failed to initialize PHY\n");
879 return err;
880 }
881
882 err = phy_power_on(pcie->phy);
883 if (err) {
884 dev_err(dev, "failed to power on PHY\n");
885 goto err_phy_on;
886 }
887
888 err = reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
889 if (err) {
890 dev_err(dev, "failed to deassert PHYs\n");
891 goto err_phy_deassert;
892 }
893
894 /*
895 * Wait for the time needed to complete the bulk de-assert above.
896 * This time is specific for EN7581 SoC.
897 */
898 mdelay(PCIE_EN7581_RESET_TIME_MS);
899
900 pm_runtime_enable(dev);
901 pm_runtime_get_sync(dev);
902
903 err = clk_bulk_prepare(pcie->num_clks, pcie->clks);
904 if (err) {
905 dev_err(dev, "failed to prepare clock\n");
906 goto err_clk_prepare;
907 }
908
909 val = FIELD_PREP(PCIE_VAL_LN0_DOWNSTREAM, 0x47) |
910 FIELD_PREP(PCIE_VAL_LN1_DOWNSTREAM, 0x47) |
911 FIELD_PREP(PCIE_VAL_LN0_UPSTREAM, 0x41) |
912 FIELD_PREP(PCIE_VAL_LN1_UPSTREAM, 0x41);
913 writel_relaxed(val, pcie->base + PCIE_EQ_PRESET_01_REG);
914
915 val = PCIE_K_PHYPARAM_QUERY | PCIE_K_QUERY_TIMEOUT |
916 FIELD_PREP(PCIE_K_PRESET_TO_USE_16G, 0x80) |
917 FIELD_PREP(PCIE_K_PRESET_TO_USE, 0x2) |
918 FIELD_PREP(PCIE_K_FINETUNE_MAX, 0xf);
919 writel_relaxed(val, pcie->base + PCIE_PIPE4_PIE8_REG);
920
921 err = clk_bulk_enable(pcie->num_clks, pcie->clks);
922 if (err) {
923 dev_err(dev, "failed to prepare clock\n");
924 goto err_clk_enable;
925 }
926
927 return 0;
928
929 err_clk_enable:
930 clk_bulk_unprepare(pcie->num_clks, pcie->clks);
931 err_clk_prepare:
932 pm_runtime_put_sync(dev);
933 pm_runtime_disable(dev);
934 reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
935 err_phy_deassert:
936 phy_power_off(pcie->phy);
937 err_phy_on:
938 phy_exit(pcie->phy);
939
940 return err;
941 }
942
mtk_pcie_power_up(struct mtk_gen3_pcie * pcie)943 static int mtk_pcie_power_up(struct mtk_gen3_pcie *pcie)
944 {
945 struct device *dev = pcie->dev;
946 int err;
947
948 /* PHY power on and enable pipe clock */
949 err = reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
950 if (err) {
951 dev_err(dev, "failed to deassert PHYs\n");
952 return err;
953 }
954
955 err = phy_init(pcie->phy);
956 if (err) {
957 dev_err(dev, "failed to initialize PHY\n");
958 goto err_phy_init;
959 }
960
961 err = phy_power_on(pcie->phy);
962 if (err) {
963 dev_err(dev, "failed to power on PHY\n");
964 goto err_phy_on;
965 }
966
967 /* MAC power on and enable transaction layer clocks */
968 reset_control_deassert(pcie->mac_reset);
969
970 pm_runtime_enable(dev);
971 pm_runtime_get_sync(dev);
972
973 err = clk_bulk_prepare_enable(pcie->num_clks, pcie->clks);
974 if (err) {
975 dev_err(dev, "failed to enable clocks\n");
976 goto err_clk_init;
977 }
978
979 return 0;
980
981 err_clk_init:
982 pm_runtime_put_sync(dev);
983 pm_runtime_disable(dev);
984 reset_control_assert(pcie->mac_reset);
985 phy_power_off(pcie->phy);
986 err_phy_on:
987 phy_exit(pcie->phy);
988 err_phy_init:
989 reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
990
991 return err;
992 }
993
mtk_pcie_power_down(struct mtk_gen3_pcie * pcie)994 static void mtk_pcie_power_down(struct mtk_gen3_pcie *pcie)
995 {
996 clk_bulk_disable_unprepare(pcie->num_clks, pcie->clks);
997
998 pm_runtime_put_sync(pcie->dev);
999 pm_runtime_disable(pcie->dev);
1000 reset_control_assert(pcie->mac_reset);
1001
1002 phy_power_off(pcie->phy);
1003 phy_exit(pcie->phy);
1004 reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
1005 }
1006
mtk_pcie_setup(struct mtk_gen3_pcie * pcie)1007 static int mtk_pcie_setup(struct mtk_gen3_pcie *pcie)
1008 {
1009 int err;
1010
1011 err = mtk_pcie_parse_port(pcie);
1012 if (err)
1013 return err;
1014
1015 /*
1016 * Deassert the line in order to avoid unbalance in deassert_count
1017 * counter since the bulk is shared.
1018 */
1019 reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
1020 /*
1021 * The controller may have been left out of reset by the bootloader
1022 * so make sure that we get a clean start by asserting resets here.
1023 */
1024 reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
1025
1026 reset_control_assert(pcie->mac_reset);
1027 usleep_range(10, 20);
1028
1029 /* Don't touch the hardware registers before power up */
1030 err = pcie->soc->power_up(pcie);
1031 if (err)
1032 return err;
1033
1034 /* Try link up */
1035 err = mtk_pcie_startup_port(pcie);
1036 if (err)
1037 goto err_setup;
1038
1039 err = mtk_pcie_setup_irq(pcie);
1040 if (err)
1041 goto err_setup;
1042
1043 return 0;
1044
1045 err_setup:
1046 mtk_pcie_power_down(pcie);
1047
1048 return err;
1049 }
1050
mtk_pcie_probe(struct platform_device * pdev)1051 static int mtk_pcie_probe(struct platform_device *pdev)
1052 {
1053 struct device *dev = &pdev->dev;
1054 struct mtk_gen3_pcie *pcie;
1055 struct pci_host_bridge *host;
1056 int err;
1057
1058 host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
1059 if (!host)
1060 return -ENOMEM;
1061
1062 pcie = pci_host_bridge_priv(host);
1063
1064 pcie->dev = dev;
1065 pcie->soc = device_get_match_data(dev);
1066 platform_set_drvdata(pdev, pcie);
1067
1068 err = mtk_pcie_setup(pcie);
1069 if (err)
1070 return err;
1071
1072 host->ops = &mtk_pcie_ops;
1073 host->sysdata = pcie;
1074
1075 err = pci_host_probe(host);
1076 if (err) {
1077 mtk_pcie_irq_teardown(pcie);
1078 mtk_pcie_power_down(pcie);
1079 return err;
1080 }
1081
1082 return 0;
1083 }
1084
mtk_pcie_remove(struct platform_device * pdev)1085 static void mtk_pcie_remove(struct platform_device *pdev)
1086 {
1087 struct mtk_gen3_pcie *pcie = platform_get_drvdata(pdev);
1088 struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
1089
1090 pci_lock_rescan_remove();
1091 pci_stop_root_bus(host->bus);
1092 pci_remove_root_bus(host->bus);
1093 pci_unlock_rescan_remove();
1094
1095 mtk_pcie_irq_teardown(pcie);
1096 mtk_pcie_power_down(pcie);
1097 }
1098
mtk_pcie_irq_save(struct mtk_gen3_pcie * pcie)1099 static void mtk_pcie_irq_save(struct mtk_gen3_pcie *pcie)
1100 {
1101 int i;
1102
1103 raw_spin_lock(&pcie->irq_lock);
1104
1105 pcie->saved_irq_state = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
1106
1107 for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
1108 struct mtk_msi_set *msi_set = &pcie->msi_sets[i];
1109
1110 msi_set->saved_irq_state = readl_relaxed(msi_set->base +
1111 PCIE_MSI_SET_ENABLE_OFFSET);
1112 }
1113
1114 raw_spin_unlock(&pcie->irq_lock);
1115 }
1116
mtk_pcie_irq_restore(struct mtk_gen3_pcie * pcie)1117 static void mtk_pcie_irq_restore(struct mtk_gen3_pcie *pcie)
1118 {
1119 int i;
1120
1121 raw_spin_lock(&pcie->irq_lock);
1122
1123 writel_relaxed(pcie->saved_irq_state, pcie->base + PCIE_INT_ENABLE_REG);
1124
1125 for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
1126 struct mtk_msi_set *msi_set = &pcie->msi_sets[i];
1127
1128 writel_relaxed(msi_set->saved_irq_state,
1129 msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
1130 }
1131
1132 raw_spin_unlock(&pcie->irq_lock);
1133 }
1134
mtk_pcie_turn_off_link(struct mtk_gen3_pcie * pcie)1135 static int mtk_pcie_turn_off_link(struct mtk_gen3_pcie *pcie)
1136 {
1137 u32 val;
1138
1139 val = readl_relaxed(pcie->base + PCIE_ICMD_PM_REG);
1140 val |= PCIE_TURN_OFF_LINK;
1141 writel_relaxed(val, pcie->base + PCIE_ICMD_PM_REG);
1142
1143 /* Check the link is L2 */
1144 return readl_poll_timeout(pcie->base + PCIE_LTSSM_STATUS_REG, val,
1145 (PCIE_LTSSM_STATE(val) ==
1146 PCIE_LTSSM_STATE_L2_IDLE), 20,
1147 50 * USEC_PER_MSEC);
1148 }
1149
mtk_pcie_suspend_noirq(struct device * dev)1150 static int mtk_pcie_suspend_noirq(struct device *dev)
1151 {
1152 struct mtk_gen3_pcie *pcie = dev_get_drvdata(dev);
1153 int err;
1154 u32 val;
1155
1156 /* Trigger link to L2 state */
1157 err = mtk_pcie_turn_off_link(pcie);
1158 if (err) {
1159 dev_err(pcie->dev, "cannot enter L2 state\n");
1160 return err;
1161 }
1162
1163 /* Pull down the PERST# pin */
1164 val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
1165 val |= PCIE_PE_RSTB;
1166 writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
1167
1168 dev_dbg(pcie->dev, "entered L2 states successfully");
1169
1170 mtk_pcie_irq_save(pcie);
1171 mtk_pcie_power_down(pcie);
1172
1173 return 0;
1174 }
1175
mtk_pcie_resume_noirq(struct device * dev)1176 static int mtk_pcie_resume_noirq(struct device *dev)
1177 {
1178 struct mtk_gen3_pcie *pcie = dev_get_drvdata(dev);
1179 int err;
1180
1181 err = pcie->soc->power_up(pcie);
1182 if (err)
1183 return err;
1184
1185 err = mtk_pcie_startup_port(pcie);
1186 if (err) {
1187 mtk_pcie_power_down(pcie);
1188 return err;
1189 }
1190
1191 mtk_pcie_irq_restore(pcie);
1192
1193 return 0;
1194 }
1195
1196 static const struct dev_pm_ops mtk_pcie_pm_ops = {
1197 NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_pcie_suspend_noirq,
1198 mtk_pcie_resume_noirq)
1199 };
1200
1201 static const struct mtk_gen3_pcie_pdata mtk_pcie_soc_mt8192 = {
1202 .power_up = mtk_pcie_power_up,
1203 .phy_resets = {
1204 .id[0] = "phy",
1205 .num_resets = 1,
1206 },
1207 };
1208
1209 static const struct mtk_gen3_pcie_pdata mtk_pcie_soc_en7581 = {
1210 .power_up = mtk_pcie_en7581_power_up,
1211 .phy_resets = {
1212 .id[0] = "phy-lane0",
1213 .id[1] = "phy-lane1",
1214 .id[2] = "phy-lane2",
1215 .num_resets = 3,
1216 },
1217 };
1218
1219 static const struct of_device_id mtk_pcie_of_match[] = {
1220 { .compatible = "airoha,en7581-pcie", .data = &mtk_pcie_soc_en7581 },
1221 { .compatible = "mediatek,mt8192-pcie", .data = &mtk_pcie_soc_mt8192 },
1222 {},
1223 };
1224 MODULE_DEVICE_TABLE(of, mtk_pcie_of_match);
1225
1226 static struct platform_driver mtk_pcie_driver = {
1227 .probe = mtk_pcie_probe,
1228 .remove_new = mtk_pcie_remove,
1229 .driver = {
1230 .name = "mtk-pcie-gen3",
1231 .of_match_table = mtk_pcie_of_match,
1232 .pm = &mtk_pcie_pm_ops,
1233 },
1234 };
1235
1236 module_platform_driver(mtk_pcie_driver);
1237 MODULE_DESCRIPTION("MediaTek Gen3 PCIe host controller driver");
1238 MODULE_LICENSE("GPL v2");
1239