1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * MediaTek PCIe host controller driver.
4 *
5 * Copyright (c) 2017 MediaTek Inc.
6 * Author: Ryder Lee <ryder.lee@mediatek.com>
7 * Honghui Zhang <honghui.zhang@mediatek.com>
8 */
9
10 #include <linux/clk.h>
11 #include <linux/delay.h>
12 #include <linux/iopoll.h>
13 #include <linux/irq.h>
14 #include <linux/irqchip/chained_irq.h>
15 #include <linux/irqchip/irq-msi-lib.h>
16 #include <linux/irqdomain.h>
17 #include <linux/kernel.h>
18 #include <linux/mfd/syscon.h>
19 #include <linux/msi.h>
20 #include <linux/module.h>
21 #include <linux/of_address.h>
22 #include <linux/of_pci.h>
23 #include <linux/of_platform.h>
24 #include <linux/pci.h>
25 #include <linux/phy/phy.h>
26 #include <linux/platform_device.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/regmap.h>
29 #include <linux/reset.h>
30
31 #include "../pci.h"
32
33 /* PCIe shared registers */
34 #define PCIE_SYS_CFG 0x00
35 #define PCIE_INT_ENABLE 0x0c
36 #define PCIE_CFG_ADDR 0x20
37 #define PCIE_CFG_DATA 0x24
38
39 /* PCIe per port registers */
40 #define PCIE_BAR0_SETUP 0x10
41 #define PCIE_CLASS 0x34
42 #define PCIE_LINK_STATUS 0x50
43
44 #define PCIE_PORT_INT_EN(x) BIT(20 + (x))
45 #define PCIE_PORT_PERST(x) BIT(1 + (x))
46 #define PCIE_PORT_LINKUP BIT(0)
47 #define PCIE_BAR_MAP_MAX GENMASK(31, 16)
48
49 #define PCIE_BAR_ENABLE BIT(0)
50 #define PCIE_REVISION_ID BIT(0)
51 #define PCIE_CLASS_CODE (0x60400 << 8)
52 #define PCIE_CONF_REG(regn) (((regn) & GENMASK(7, 2)) | \
53 ((((regn) >> 8) & GENMASK(3, 0)) << 24))
54 #define PCIE_CONF_FUN(fun) (((fun) << 8) & GENMASK(10, 8))
55 #define PCIE_CONF_DEV(dev) (((dev) << 11) & GENMASK(15, 11))
56 #define PCIE_CONF_BUS(bus) (((bus) << 16) & GENMASK(23, 16))
57 #define PCIE_CONF_ADDR(regn, fun, dev, bus) \
58 (PCIE_CONF_REG(regn) | PCIE_CONF_FUN(fun) | \
59 PCIE_CONF_DEV(dev) | PCIE_CONF_BUS(bus))
60
61 /* MediaTek specific configuration registers */
62 #define PCIE_FTS_NUM 0x70c
63 #define PCIE_FTS_NUM_MASK GENMASK(15, 8)
64 #define PCIE_FTS_NUM_L0(x) ((x) & 0xff << 8)
65
66 #define PCIE_FC_CREDIT 0x73c
67 #define PCIE_FC_CREDIT_MASK (GENMASK(31, 31) | GENMASK(28, 16))
68 #define PCIE_FC_CREDIT_VAL(x) ((x) << 16)
69
70 /* PCIe V2 share registers */
71 #define PCIE_SYS_CFG_V2 0x0
72 #define PCIE_CSR_LTSSM_EN(x) BIT(0 + (x) * 8)
73 #define PCIE_CSR_ASPM_L1_EN(x) BIT(1 + (x) * 8)
74
75 /* PCIe V2 per-port registers */
76 #define PCIE_MSI_VECTOR 0x0c0
77
78 #define PCIE_CONF_VEND_ID 0x100
79 #define PCIE_CONF_DEVICE_ID 0x102
80 #define PCIE_CONF_CLASS_ID 0x106
81
82 #define PCIE_INT_MASK 0x420
83 #define INTX_MASK GENMASK(19, 16)
84 #define INTX_SHIFT 16
85 #define PCIE_INT_STATUS 0x424
86 #define MSI_STATUS BIT(23)
87 #define PCIE_IMSI_STATUS 0x42c
88 #define PCIE_IMSI_ADDR 0x430
89 #define MSI_MASK BIT(23)
90 #define MTK_MSI_IRQS_NUM 32
91
92 #define PCIE_AHB_TRANS_BASE0_L 0x438
93 #define PCIE_AHB_TRANS_BASE0_H 0x43c
94 #define AHB2PCIE_SIZE(x) ((x) & GENMASK(4, 0))
95 #define PCIE_AXI_WINDOW0 0x448
96 #define WIN_ENABLE BIT(7)
97 /*
98 * Define PCIe to AHB window size as 2^33 to support max 8GB address space
99 * translate, support least 4GB DRAM size access from EP DMA(physical DRAM
100 * start from 0x40000000).
101 */
102 #define PCIE2AHB_SIZE 0x21
103
104 /* PCIe V2 configuration transaction header */
105 #define PCIE_CFG_HEADER0 0x460
106 #define PCIE_CFG_HEADER1 0x464
107 #define PCIE_CFG_HEADER2 0x468
108 #define PCIE_CFG_WDATA 0x470
109 #define PCIE_APP_TLP_REQ 0x488
110 #define PCIE_CFG_RDATA 0x48c
111 #define APP_CFG_REQ BIT(0)
112 #define APP_CPL_STATUS GENMASK(7, 5)
113
114 #define CFG_WRRD_TYPE_0 4
115 #define CFG_WR_FMT 2
116 #define CFG_RD_FMT 0
117
118 #define CFG_DW0_LENGTH(length) ((length) & GENMASK(9, 0))
119 #define CFG_DW0_TYPE(type) (((type) << 24) & GENMASK(28, 24))
120 #define CFG_DW0_FMT(fmt) (((fmt) << 29) & GENMASK(31, 29))
121 #define CFG_DW2_REGN(regn) ((regn) & GENMASK(11, 2))
122 #define CFG_DW2_FUN(fun) (((fun) << 16) & GENMASK(18, 16))
123 #define CFG_DW2_DEV(dev) (((dev) << 19) & GENMASK(23, 19))
124 #define CFG_DW2_BUS(bus) (((bus) << 24) & GENMASK(31, 24))
125 #define CFG_HEADER_DW0(type, fmt) \
126 (CFG_DW0_LENGTH(1) | CFG_DW0_TYPE(type) | CFG_DW0_FMT(fmt))
127 #define CFG_HEADER_DW1(where, size) \
128 (GENMASK(((size) - 1), 0) << ((where) & 0x3))
129 #define CFG_HEADER_DW2(regn, fun, dev, bus) \
130 (CFG_DW2_REGN(regn) | CFG_DW2_FUN(fun) | \
131 CFG_DW2_DEV(dev) | CFG_DW2_BUS(bus))
132
133 #define PCIE_RST_CTRL 0x510
134 #define PCIE_PHY_RSTB BIT(0)
135 #define PCIE_PIPE_SRSTB BIT(1)
136 #define PCIE_MAC_SRSTB BIT(2)
137 #define PCIE_CRSTB BIT(3)
138 #define PCIE_PERSTB BIT(8)
139 #define PCIE_LINKDOWN_RST_EN GENMASK(15, 13)
140 #define PCIE_LINK_STATUS_V2 0x804
141 #define PCIE_PORT_LINKUP_V2 BIT(10)
142
143 struct mtk_pcie_port;
144
145 /**
146 * struct mtk_pcie_soc - differentiate between host generations
147 * @need_fix_class_id: whether this host's class ID needed to be fixed or not
148 * @need_fix_device_id: whether this host's device ID needed to be fixed or not
149 * @no_msi: Bridge has no MSI support, and relies on an external block
150 * @device_id: device ID which this host need to be fixed
151 * @ops: pointer to configuration access functions
152 * @startup: pointer to controller setting functions
153 * @setup_irq: pointer to initialize IRQ functions
154 */
155 struct mtk_pcie_soc {
156 bool need_fix_class_id;
157 bool need_fix_device_id;
158 bool no_msi;
159 unsigned int device_id;
160 struct pci_ops *ops;
161 int (*startup)(struct mtk_pcie_port *port);
162 int (*setup_irq)(struct mtk_pcie_port *port, struct device_node *node);
163 };
164
165 /**
166 * struct mtk_pcie_port - PCIe port information
167 * @base: IO mapped register base
168 * @list: port list
169 * @pcie: pointer to PCIe host info
170 * @reset: pointer to port reset control
171 * @sys_ck: pointer to transaction/data link layer clock
172 * @ahb_ck: pointer to AHB slave interface operating clock for CSR access
173 * and RC initiated MMIO access
174 * @axi_ck: pointer to application layer MMIO channel operating clock
175 * @aux_ck: pointer to pe2_mac_bridge and pe2_mac_core operating clock
176 * when pcie_mac_ck/pcie_pipe_ck is turned off
177 * @obff_ck: pointer to OBFF functional block operating clock
178 * @pipe_ck: pointer to LTSSM and PHY/MAC layer operating clock
179 * @phy: pointer to PHY control block
180 * @slot: port slot
181 * @irq: GIC irq
182 * @irq_domain: legacy INTx IRQ domain
183 * @inner_domain: inner IRQ domain
184 * @lock: protect the msi_irq_in_use bitmap
185 * @msi_irq_in_use: bit map for assigned MSI IRQ
186 */
187 struct mtk_pcie_port {
188 void __iomem *base;
189 struct list_head list;
190 struct mtk_pcie *pcie;
191 struct reset_control *reset;
192 struct clk *sys_ck;
193 struct clk *ahb_ck;
194 struct clk *axi_ck;
195 struct clk *aux_ck;
196 struct clk *obff_ck;
197 struct clk *pipe_ck;
198 struct phy *phy;
199 u32 slot;
200 int irq;
201 struct irq_domain *irq_domain;
202 struct irq_domain *inner_domain;
203 struct mutex lock;
204 DECLARE_BITMAP(msi_irq_in_use, MTK_MSI_IRQS_NUM);
205 };
206
207 /**
208 * struct mtk_pcie - PCIe host information
209 * @dev: pointer to PCIe device
210 * @base: IO mapped register base
211 * @cfg: IO mapped register map for PCIe config
212 * @free_ck: free-run reference clock
213 * @ports: pointer to PCIe port information
214 * @soc: pointer to SoC-dependent operations
215 */
216 struct mtk_pcie {
217 struct device *dev;
218 void __iomem *base;
219 struct regmap *cfg;
220 struct clk *free_ck;
221
222 struct list_head ports;
223 const struct mtk_pcie_soc *soc;
224 };
225
mtk_pcie_subsys_powerdown(struct mtk_pcie * pcie)226 static void mtk_pcie_subsys_powerdown(struct mtk_pcie *pcie)
227 {
228 struct device *dev = pcie->dev;
229
230 clk_disable_unprepare(pcie->free_ck);
231
232 pm_runtime_put_sync(dev);
233 pm_runtime_disable(dev);
234 }
235
mtk_pcie_port_free(struct mtk_pcie_port * port)236 static void mtk_pcie_port_free(struct mtk_pcie_port *port)
237 {
238 struct mtk_pcie *pcie = port->pcie;
239 struct device *dev = pcie->dev;
240
241 devm_iounmap(dev, port->base);
242 list_del(&port->list);
243 devm_kfree(dev, port);
244 }
245
mtk_pcie_put_resources(struct mtk_pcie * pcie)246 static void mtk_pcie_put_resources(struct mtk_pcie *pcie)
247 {
248 struct mtk_pcie_port *port, *tmp;
249
250 list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
251 phy_power_off(port->phy);
252 phy_exit(port->phy);
253 clk_disable_unprepare(port->pipe_ck);
254 clk_disable_unprepare(port->obff_ck);
255 clk_disable_unprepare(port->axi_ck);
256 clk_disable_unprepare(port->aux_ck);
257 clk_disable_unprepare(port->ahb_ck);
258 clk_disable_unprepare(port->sys_ck);
259 mtk_pcie_port_free(port);
260 }
261
262 mtk_pcie_subsys_powerdown(pcie);
263 }
264
mtk_pcie_check_cfg_cpld(struct mtk_pcie_port * port)265 static int mtk_pcie_check_cfg_cpld(struct mtk_pcie_port *port)
266 {
267 u32 val;
268 int err;
269
270 err = readl_poll_timeout_atomic(port->base + PCIE_APP_TLP_REQ, val,
271 !(val & APP_CFG_REQ), 10,
272 100 * USEC_PER_MSEC);
273 if (err)
274 return PCIBIOS_SET_FAILED;
275
276 if (readl(port->base + PCIE_APP_TLP_REQ) & APP_CPL_STATUS)
277 return PCIBIOS_SET_FAILED;
278
279 return PCIBIOS_SUCCESSFUL;
280 }
281
mtk_pcie_hw_rd_cfg(struct mtk_pcie_port * port,u32 bus,u32 devfn,int where,int size,u32 * val)282 static int mtk_pcie_hw_rd_cfg(struct mtk_pcie_port *port, u32 bus, u32 devfn,
283 int where, int size, u32 *val)
284 {
285 u32 tmp;
286
287 /* Write PCIe configuration transaction header for Cfgrd */
288 writel(CFG_HEADER_DW0(CFG_WRRD_TYPE_0, CFG_RD_FMT),
289 port->base + PCIE_CFG_HEADER0);
290 writel(CFG_HEADER_DW1(where, size), port->base + PCIE_CFG_HEADER1);
291 writel(CFG_HEADER_DW2(where, PCI_FUNC(devfn), PCI_SLOT(devfn), bus),
292 port->base + PCIE_CFG_HEADER2);
293
294 /* Trigger h/w to transmit Cfgrd TLP */
295 tmp = readl(port->base + PCIE_APP_TLP_REQ);
296 tmp |= APP_CFG_REQ;
297 writel(tmp, port->base + PCIE_APP_TLP_REQ);
298
299 /* Check completion status */
300 if (mtk_pcie_check_cfg_cpld(port))
301 return PCIBIOS_SET_FAILED;
302
303 /* Read cpld payload of Cfgrd */
304 *val = readl(port->base + PCIE_CFG_RDATA);
305
306 if (size == 1)
307 *val = (*val >> (8 * (where & 3))) & 0xff;
308 else if (size == 2)
309 *val = (*val >> (8 * (where & 3))) & 0xffff;
310
311 return PCIBIOS_SUCCESSFUL;
312 }
313
mtk_pcie_hw_wr_cfg(struct mtk_pcie_port * port,u32 bus,u32 devfn,int where,int size,u32 val)314 static int mtk_pcie_hw_wr_cfg(struct mtk_pcie_port *port, u32 bus, u32 devfn,
315 int where, int size, u32 val)
316 {
317 /* Write PCIe configuration transaction header for Cfgwr */
318 writel(CFG_HEADER_DW0(CFG_WRRD_TYPE_0, CFG_WR_FMT),
319 port->base + PCIE_CFG_HEADER0);
320 writel(CFG_HEADER_DW1(where, size), port->base + PCIE_CFG_HEADER1);
321 writel(CFG_HEADER_DW2(where, PCI_FUNC(devfn), PCI_SLOT(devfn), bus),
322 port->base + PCIE_CFG_HEADER2);
323
324 /* Write Cfgwr data */
325 val = val << 8 * (where & 3);
326 writel(val, port->base + PCIE_CFG_WDATA);
327
328 /* Trigger h/w to transmit Cfgwr TLP */
329 val = readl(port->base + PCIE_APP_TLP_REQ);
330 val |= APP_CFG_REQ;
331 writel(val, port->base + PCIE_APP_TLP_REQ);
332
333 /* Check completion status */
334 return mtk_pcie_check_cfg_cpld(port);
335 }
336
mtk_pcie_find_port(struct pci_bus * bus,unsigned int devfn)337 static struct mtk_pcie_port *mtk_pcie_find_port(struct pci_bus *bus,
338 unsigned int devfn)
339 {
340 struct mtk_pcie *pcie = bus->sysdata;
341 struct mtk_pcie_port *port;
342 struct pci_dev *dev = NULL;
343
344 /*
345 * Walk the bus hierarchy to get the devfn value
346 * of the port in the root bus.
347 */
348 while (bus && bus->number) {
349 dev = bus->self;
350 bus = dev->bus;
351 devfn = dev->devfn;
352 }
353
354 list_for_each_entry(port, &pcie->ports, list)
355 if (port->slot == PCI_SLOT(devfn))
356 return port;
357
358 return NULL;
359 }
360
mtk_pcie_config_read(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 * val)361 static int mtk_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
362 int where, int size, u32 *val)
363 {
364 struct mtk_pcie_port *port;
365 u32 bn = bus->number;
366
367 port = mtk_pcie_find_port(bus, devfn);
368 if (!port)
369 return PCIBIOS_DEVICE_NOT_FOUND;
370
371 return mtk_pcie_hw_rd_cfg(port, bn, devfn, where, size, val);
372 }
373
mtk_pcie_config_write(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 val)374 static int mtk_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
375 int where, int size, u32 val)
376 {
377 struct mtk_pcie_port *port;
378 u32 bn = bus->number;
379
380 port = mtk_pcie_find_port(bus, devfn);
381 if (!port)
382 return PCIBIOS_DEVICE_NOT_FOUND;
383
384 return mtk_pcie_hw_wr_cfg(port, bn, devfn, where, size, val);
385 }
386
387 static struct pci_ops mtk_pcie_ops_v2 = {
388 .read = mtk_pcie_config_read,
389 .write = mtk_pcie_config_write,
390 };
391
mtk_compose_msi_msg(struct irq_data * data,struct msi_msg * msg)392 static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
393 {
394 struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
395 phys_addr_t addr;
396
397 /* MT2712/MT7622 only support 32-bit MSI addresses */
398 addr = virt_to_phys(port->base + PCIE_MSI_VECTOR);
399 msg->address_hi = 0;
400 msg->address_lo = lower_32_bits(addr);
401
402 msg->data = data->hwirq;
403
404 dev_dbg(port->pcie->dev, "msi#%d address_hi %#x address_lo %#x\n",
405 (int)data->hwirq, msg->address_hi, msg->address_lo);
406 }
407
mtk_msi_ack_irq(struct irq_data * data)408 static void mtk_msi_ack_irq(struct irq_data *data)
409 {
410 struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
411 u32 hwirq = data->hwirq;
412
413 writel(1 << hwirq, port->base + PCIE_IMSI_STATUS);
414 }
415
416 static struct irq_chip mtk_msi_bottom_irq_chip = {
417 .name = "MTK MSI",
418 .irq_compose_msi_msg = mtk_compose_msi_msg,
419 .irq_ack = mtk_msi_ack_irq,
420 };
421
mtk_pcie_irq_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * args)422 static int mtk_pcie_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
423 unsigned int nr_irqs, void *args)
424 {
425 struct mtk_pcie_port *port = domain->host_data;
426 unsigned long bit;
427
428 WARN_ON(nr_irqs != 1);
429 mutex_lock(&port->lock);
430
431 bit = find_first_zero_bit(port->msi_irq_in_use, MTK_MSI_IRQS_NUM);
432 if (bit >= MTK_MSI_IRQS_NUM) {
433 mutex_unlock(&port->lock);
434 return -ENOSPC;
435 }
436
437 __set_bit(bit, port->msi_irq_in_use);
438
439 mutex_unlock(&port->lock);
440
441 irq_domain_set_info(domain, virq, bit, &mtk_msi_bottom_irq_chip,
442 domain->host_data, handle_edge_irq,
443 NULL, NULL);
444
445 return 0;
446 }
447
mtk_pcie_irq_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)448 static void mtk_pcie_irq_domain_free(struct irq_domain *domain,
449 unsigned int virq, unsigned int nr_irqs)
450 {
451 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
452 struct mtk_pcie_port *port = irq_data_get_irq_chip_data(d);
453
454 mutex_lock(&port->lock);
455
456 if (!test_bit(d->hwirq, port->msi_irq_in_use))
457 dev_err(port->pcie->dev, "trying to free unused MSI#%lu\n",
458 d->hwirq);
459 else
460 __clear_bit(d->hwirq, port->msi_irq_in_use);
461
462 mutex_unlock(&port->lock);
463
464 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
465 }
466
467 static const struct irq_domain_ops msi_domain_ops = {
468 .alloc = mtk_pcie_irq_domain_alloc,
469 .free = mtk_pcie_irq_domain_free,
470 };
471
472 #define MTK_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
473 MSI_FLAG_USE_DEF_CHIP_OPS | \
474 MSI_FLAG_NO_AFFINITY)
475
476 #define MTK_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
477 MSI_FLAG_PCI_MSIX)
478
479 static const struct msi_parent_ops mtk_msi_parent_ops = {
480 .required_flags = MTK_MSI_FLAGS_REQUIRED,
481 .supported_flags = MTK_MSI_FLAGS_SUPPORTED,
482 .bus_select_token = DOMAIN_BUS_PCI_MSI,
483 .chip_flags = MSI_CHIP_FLAG_SET_ACK,
484 .prefix = "MTK-",
485 .init_dev_msi_info = msi_lib_init_dev_msi_info,
486 };
487
mtk_pcie_allocate_msi_domains(struct mtk_pcie_port * port)488 static int mtk_pcie_allocate_msi_domains(struct mtk_pcie_port *port)
489 {
490 mutex_init(&port->lock);
491
492 struct irq_domain_info info = {
493 .fwnode = dev_fwnode(port->pcie->dev),
494 .ops = &msi_domain_ops,
495 .host_data = port,
496 .size = MTK_MSI_IRQS_NUM,
497 };
498
499 port->inner_domain = msi_create_parent_irq_domain(&info, &mtk_msi_parent_ops);
500 if (!port->inner_domain) {
501 dev_err(port->pcie->dev, "failed to create IRQ domain\n");
502 return -ENOMEM;
503 }
504
505 return 0;
506 }
507
mtk_pcie_enable_msi(struct mtk_pcie_port * port)508 static void mtk_pcie_enable_msi(struct mtk_pcie_port *port)
509 {
510 u32 val;
511 phys_addr_t msg_addr;
512
513 msg_addr = virt_to_phys(port->base + PCIE_MSI_VECTOR);
514 val = lower_32_bits(msg_addr);
515 writel(val, port->base + PCIE_IMSI_ADDR);
516
517 val = readl(port->base + PCIE_INT_MASK);
518 val &= ~MSI_MASK;
519 writel(val, port->base + PCIE_INT_MASK);
520 }
521
mtk_pcie_irq_teardown(struct mtk_pcie * pcie)522 static void mtk_pcie_irq_teardown(struct mtk_pcie *pcie)
523 {
524 struct mtk_pcie_port *port, *tmp;
525
526 list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
527 irq_set_chained_handler_and_data(port->irq, NULL, NULL);
528
529 if (port->irq_domain)
530 irq_domain_remove(port->irq_domain);
531
532 if (IS_ENABLED(CONFIG_PCI_MSI)) {
533 if (port->inner_domain)
534 irq_domain_remove(port->inner_domain);
535 }
536
537 irq_dispose_mapping(port->irq);
538 }
539 }
540
mtk_pcie_intx_map(struct irq_domain * domain,unsigned int irq,irq_hw_number_t hwirq)541 static int mtk_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
542 irq_hw_number_t hwirq)
543 {
544 irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
545 irq_set_chip_data(irq, domain->host_data);
546
547 return 0;
548 }
549
550 static const struct irq_domain_ops intx_domain_ops = {
551 .map = mtk_pcie_intx_map,
552 };
553
mtk_pcie_init_irq_domain(struct mtk_pcie_port * port,struct device_node * node)554 static int mtk_pcie_init_irq_domain(struct mtk_pcie_port *port,
555 struct device_node *node)
556 {
557 struct device *dev = port->pcie->dev;
558 struct device_node *pcie_intc_node;
559 int ret;
560
561 /* Setup INTx */
562 pcie_intc_node = of_get_next_child(node, NULL);
563 if (!pcie_intc_node) {
564 dev_err(dev, "no PCIe Intc node found\n");
565 return -ENODEV;
566 }
567
568 port->irq_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), PCI_NUM_INTX,
569 &intx_domain_ops, port);
570 of_node_put(pcie_intc_node);
571 if (!port->irq_domain) {
572 dev_err(dev, "failed to get INTx IRQ domain\n");
573 return -ENODEV;
574 }
575
576 if (IS_ENABLED(CONFIG_PCI_MSI)) {
577 ret = mtk_pcie_allocate_msi_domains(port);
578 if (ret)
579 return ret;
580 }
581
582 return 0;
583 }
584
mtk_pcie_intr_handler(struct irq_desc * desc)585 static void mtk_pcie_intr_handler(struct irq_desc *desc)
586 {
587 struct mtk_pcie_port *port = irq_desc_get_handler_data(desc);
588 struct irq_chip *irqchip = irq_desc_get_chip(desc);
589 unsigned long status;
590 u32 bit = INTX_SHIFT;
591
592 chained_irq_enter(irqchip, desc);
593
594 status = readl(port->base + PCIE_INT_STATUS);
595 if (status & INTX_MASK) {
596 for_each_set_bit_from(bit, &status, PCI_NUM_INTX + INTX_SHIFT) {
597 /* Clear the INTx */
598 writel(1 << bit, port->base + PCIE_INT_STATUS);
599 generic_handle_domain_irq(port->irq_domain,
600 bit - INTX_SHIFT);
601 }
602 }
603
604 if (IS_ENABLED(CONFIG_PCI_MSI)) {
605 if (status & MSI_STATUS){
606 unsigned long imsi_status;
607
608 /*
609 * The interrupt status can be cleared even if the
610 * MSI status remains pending. As such, given the
611 * edge-triggered interrupt type, its status should
612 * be cleared before being dispatched to the
613 * handler of the underlying device.
614 */
615 writel(MSI_STATUS, port->base + PCIE_INT_STATUS);
616 while ((imsi_status = readl(port->base + PCIE_IMSI_STATUS))) {
617 for_each_set_bit(bit, &imsi_status, MTK_MSI_IRQS_NUM)
618 generic_handle_domain_irq(port->inner_domain, bit);
619 }
620 }
621 }
622
623 chained_irq_exit(irqchip, desc);
624 }
625
mtk_pcie_setup_irq(struct mtk_pcie_port * port,struct device_node * node)626 static int mtk_pcie_setup_irq(struct mtk_pcie_port *port,
627 struct device_node *node)
628 {
629 struct mtk_pcie *pcie = port->pcie;
630 struct device *dev = pcie->dev;
631 struct platform_device *pdev = to_platform_device(dev);
632 int err;
633
634 err = mtk_pcie_init_irq_domain(port, node);
635 if (err) {
636 dev_err(dev, "failed to init PCIe IRQ domain\n");
637 return err;
638 }
639
640 if (of_property_present(dev->of_node, "interrupt-names"))
641 port->irq = platform_get_irq_byname(pdev, "pcie_irq");
642 else
643 port->irq = platform_get_irq(pdev, port->slot);
644
645 if (port->irq < 0)
646 return port->irq;
647
648 irq_set_chained_handler_and_data(port->irq,
649 mtk_pcie_intr_handler, port);
650
651 return 0;
652 }
653
mtk_pcie_startup_port_v2(struct mtk_pcie_port * port)654 static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
655 {
656 struct mtk_pcie *pcie = port->pcie;
657 struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
658 struct resource *mem = NULL;
659 struct resource_entry *entry;
660 const struct mtk_pcie_soc *soc = port->pcie->soc;
661 u32 val;
662 int err;
663
664 entry = resource_list_first_type(&host->windows, IORESOURCE_MEM);
665 if (entry)
666 mem = entry->res;
667 if (!mem)
668 return -EINVAL;
669
670 /* MT7622 platforms need to enable LTSSM and ASPM from PCIe subsys */
671 if (pcie->base) {
672 val = readl(pcie->base + PCIE_SYS_CFG_V2);
673 val |= PCIE_CSR_LTSSM_EN(port->slot) |
674 PCIE_CSR_ASPM_L1_EN(port->slot);
675 writel(val, pcie->base + PCIE_SYS_CFG_V2);
676 } else if (pcie->cfg) {
677 val = PCIE_CSR_LTSSM_EN(port->slot) |
678 PCIE_CSR_ASPM_L1_EN(port->slot);
679 regmap_update_bits(pcie->cfg, PCIE_SYS_CFG_V2, val, val);
680 }
681
682 /* Assert all reset signals */
683 writel(0, port->base + PCIE_RST_CTRL);
684
685 /*
686 * Enable PCIe link down reset, if link status changed from link up to
687 * link down, this will reset MAC control registers and configuration
688 * space.
689 */
690 writel(PCIE_LINKDOWN_RST_EN, port->base + PCIE_RST_CTRL);
691
692 /*
693 * Described in PCIe CEM specification sections 2.2 (PERST# Signal) and
694 * 2.2.1 (Initial Power-Up (G3 to S0)). The deassertion of PERST# should
695 * be delayed 100ms (TPVPERL) for the power and clock to become stable.
696 */
697 msleep(100);
698
699 /* De-assert PHY, PE, PIPE, MAC and configuration reset */
700 val = readl(port->base + PCIE_RST_CTRL);
701 val |= PCIE_PHY_RSTB | PCIE_PERSTB | PCIE_PIPE_SRSTB |
702 PCIE_MAC_SRSTB | PCIE_CRSTB;
703 writel(val, port->base + PCIE_RST_CTRL);
704
705 /* Set up vendor ID and class code */
706 if (soc->need_fix_class_id) {
707 val = PCI_VENDOR_ID_MEDIATEK;
708 writew(val, port->base + PCIE_CONF_VEND_ID);
709
710 val = PCI_CLASS_BRIDGE_PCI;
711 writew(val, port->base + PCIE_CONF_CLASS_ID);
712 }
713
714 if (soc->need_fix_device_id)
715 writew(soc->device_id, port->base + PCIE_CONF_DEVICE_ID);
716
717 /* 100ms timeout value should be enough for Gen1/2 training */
718 err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_V2, val,
719 !!(val & PCIE_PORT_LINKUP_V2), 20,
720 100 * USEC_PER_MSEC);
721 if (err)
722 return -ETIMEDOUT;
723
724 /* Set INTx mask */
725 val = readl(port->base + PCIE_INT_MASK);
726 val &= ~INTX_MASK;
727 writel(val, port->base + PCIE_INT_MASK);
728
729 if (IS_ENABLED(CONFIG_PCI_MSI))
730 mtk_pcie_enable_msi(port);
731
732 /* Set AHB to PCIe translation windows */
733 val = lower_32_bits(mem->start) |
734 AHB2PCIE_SIZE(fls(resource_size(mem)));
735 writel(val, port->base + PCIE_AHB_TRANS_BASE0_L);
736
737 val = upper_32_bits(mem->start);
738 writel(val, port->base + PCIE_AHB_TRANS_BASE0_H);
739
740 /* Set PCIe to AXI translation memory space.*/
741 val = PCIE2AHB_SIZE | WIN_ENABLE;
742 writel(val, port->base + PCIE_AXI_WINDOW0);
743
744 return 0;
745 }
746
mtk_pcie_map_bus(struct pci_bus * bus,unsigned int devfn,int where)747 static void __iomem *mtk_pcie_map_bus(struct pci_bus *bus,
748 unsigned int devfn, int where)
749 {
750 struct mtk_pcie *pcie = bus->sysdata;
751
752 writel(PCIE_CONF_ADDR(where, PCI_FUNC(devfn), PCI_SLOT(devfn),
753 bus->number), pcie->base + PCIE_CFG_ADDR);
754
755 return pcie->base + PCIE_CFG_DATA + (where & 3);
756 }
757
758 static struct pci_ops mtk_pcie_ops = {
759 .map_bus = mtk_pcie_map_bus,
760 .read = pci_generic_config_read,
761 .write = pci_generic_config_write,
762 };
763
mtk_pcie_startup_port(struct mtk_pcie_port * port)764 static int mtk_pcie_startup_port(struct mtk_pcie_port *port)
765 {
766 struct mtk_pcie *pcie = port->pcie;
767 u32 func = PCI_FUNC(port->slot);
768 u32 slot = PCI_SLOT(port->slot << 3);
769 u32 val;
770 int err;
771
772 /* assert port PERST_N */
773 val = readl(pcie->base + PCIE_SYS_CFG);
774 val |= PCIE_PORT_PERST(port->slot);
775 writel(val, pcie->base + PCIE_SYS_CFG);
776
777 /* de-assert port PERST_N */
778 val = readl(pcie->base + PCIE_SYS_CFG);
779 val &= ~PCIE_PORT_PERST(port->slot);
780 writel(val, pcie->base + PCIE_SYS_CFG);
781
782 /* 100ms timeout value should be enough for Gen1/2 training */
783 err = readl_poll_timeout(port->base + PCIE_LINK_STATUS, val,
784 !!(val & PCIE_PORT_LINKUP), 20,
785 100 * USEC_PER_MSEC);
786 if (err)
787 return -ETIMEDOUT;
788
789 /* enable interrupt */
790 val = readl(pcie->base + PCIE_INT_ENABLE);
791 val |= PCIE_PORT_INT_EN(port->slot);
792 writel(val, pcie->base + PCIE_INT_ENABLE);
793
794 /* map to all DDR region. We need to set it before cfg operation. */
795 writel(PCIE_BAR_MAP_MAX | PCIE_BAR_ENABLE,
796 port->base + PCIE_BAR0_SETUP);
797
798 /* configure class code and revision ID */
799 writel(PCIE_CLASS_CODE | PCIE_REVISION_ID, port->base + PCIE_CLASS);
800
801 /* configure FC credit */
802 writel(PCIE_CONF_ADDR(PCIE_FC_CREDIT, func, slot, 0),
803 pcie->base + PCIE_CFG_ADDR);
804 val = readl(pcie->base + PCIE_CFG_DATA);
805 val &= ~PCIE_FC_CREDIT_MASK;
806 val |= PCIE_FC_CREDIT_VAL(0x806c);
807 writel(PCIE_CONF_ADDR(PCIE_FC_CREDIT, func, slot, 0),
808 pcie->base + PCIE_CFG_ADDR);
809 writel(val, pcie->base + PCIE_CFG_DATA);
810
811 /* configure RC FTS number to 250 when it leaves L0s */
812 writel(PCIE_CONF_ADDR(PCIE_FTS_NUM, func, slot, 0),
813 pcie->base + PCIE_CFG_ADDR);
814 val = readl(pcie->base + PCIE_CFG_DATA);
815 val &= ~PCIE_FTS_NUM_MASK;
816 val |= PCIE_FTS_NUM_L0(0x50);
817 writel(PCIE_CONF_ADDR(PCIE_FTS_NUM, func, slot, 0),
818 pcie->base + PCIE_CFG_ADDR);
819 writel(val, pcie->base + PCIE_CFG_DATA);
820
821 return 0;
822 }
823
mtk_pcie_enable_port(struct mtk_pcie_port * port)824 static void mtk_pcie_enable_port(struct mtk_pcie_port *port)
825 {
826 struct mtk_pcie *pcie = port->pcie;
827 struct device *dev = pcie->dev;
828 int err;
829
830 err = clk_prepare_enable(port->sys_ck);
831 if (err) {
832 dev_err(dev, "failed to enable sys_ck%d clock\n", port->slot);
833 goto err_sys_clk;
834 }
835
836 err = clk_prepare_enable(port->ahb_ck);
837 if (err) {
838 dev_err(dev, "failed to enable ahb_ck%d\n", port->slot);
839 goto err_ahb_clk;
840 }
841
842 err = clk_prepare_enable(port->aux_ck);
843 if (err) {
844 dev_err(dev, "failed to enable aux_ck%d\n", port->slot);
845 goto err_aux_clk;
846 }
847
848 err = clk_prepare_enable(port->axi_ck);
849 if (err) {
850 dev_err(dev, "failed to enable axi_ck%d\n", port->slot);
851 goto err_axi_clk;
852 }
853
854 err = clk_prepare_enable(port->obff_ck);
855 if (err) {
856 dev_err(dev, "failed to enable obff_ck%d\n", port->slot);
857 goto err_obff_clk;
858 }
859
860 err = clk_prepare_enable(port->pipe_ck);
861 if (err) {
862 dev_err(dev, "failed to enable pipe_ck%d\n", port->slot);
863 goto err_pipe_clk;
864 }
865
866 reset_control_assert(port->reset);
867 reset_control_deassert(port->reset);
868
869 err = phy_init(port->phy);
870 if (err) {
871 dev_err(dev, "failed to initialize port%d phy\n", port->slot);
872 goto err_phy_init;
873 }
874
875 err = phy_power_on(port->phy);
876 if (err) {
877 dev_err(dev, "failed to power on port%d phy\n", port->slot);
878 goto err_phy_on;
879 }
880
881 if (!pcie->soc->startup(port))
882 return;
883
884 dev_info(dev, "Port%d link down\n", port->slot);
885
886 phy_power_off(port->phy);
887 err_phy_on:
888 phy_exit(port->phy);
889 err_phy_init:
890 clk_disable_unprepare(port->pipe_ck);
891 err_pipe_clk:
892 clk_disable_unprepare(port->obff_ck);
893 err_obff_clk:
894 clk_disable_unprepare(port->axi_ck);
895 err_axi_clk:
896 clk_disable_unprepare(port->aux_ck);
897 err_aux_clk:
898 clk_disable_unprepare(port->ahb_ck);
899 err_ahb_clk:
900 clk_disable_unprepare(port->sys_ck);
901 err_sys_clk:
902 mtk_pcie_port_free(port);
903 }
904
mtk_pcie_parse_port(struct mtk_pcie * pcie,struct device_node * node,int slot)905 static int mtk_pcie_parse_port(struct mtk_pcie *pcie,
906 struct device_node *node,
907 int slot)
908 {
909 struct mtk_pcie_port *port;
910 struct device *dev = pcie->dev;
911 struct platform_device *pdev = to_platform_device(dev);
912 char name[10];
913 int err;
914
915 port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
916 if (!port)
917 return -ENOMEM;
918
919 snprintf(name, sizeof(name), "port%d", slot);
920 port->base = devm_platform_ioremap_resource_byname(pdev, name);
921 if (IS_ERR(port->base)) {
922 dev_err(dev, "failed to map port%d base\n", slot);
923 return PTR_ERR(port->base);
924 }
925
926 snprintf(name, sizeof(name), "sys_ck%d", slot);
927 port->sys_ck = devm_clk_get(dev, name);
928 if (IS_ERR(port->sys_ck)) {
929 dev_err(dev, "failed to get sys_ck%d clock\n", slot);
930 return PTR_ERR(port->sys_ck);
931 }
932
933 /* sys_ck might be divided into the following parts in some chips */
934 snprintf(name, sizeof(name), "ahb_ck%d", slot);
935 port->ahb_ck = devm_clk_get_optional(dev, name);
936 if (IS_ERR(port->ahb_ck))
937 return PTR_ERR(port->ahb_ck);
938
939 snprintf(name, sizeof(name), "axi_ck%d", slot);
940 port->axi_ck = devm_clk_get_optional(dev, name);
941 if (IS_ERR(port->axi_ck))
942 return PTR_ERR(port->axi_ck);
943
944 snprintf(name, sizeof(name), "aux_ck%d", slot);
945 port->aux_ck = devm_clk_get_optional(dev, name);
946 if (IS_ERR(port->aux_ck))
947 return PTR_ERR(port->aux_ck);
948
949 snprintf(name, sizeof(name), "obff_ck%d", slot);
950 port->obff_ck = devm_clk_get_optional(dev, name);
951 if (IS_ERR(port->obff_ck))
952 return PTR_ERR(port->obff_ck);
953
954 snprintf(name, sizeof(name), "pipe_ck%d", slot);
955 port->pipe_ck = devm_clk_get_optional(dev, name);
956 if (IS_ERR(port->pipe_ck))
957 return PTR_ERR(port->pipe_ck);
958
959 snprintf(name, sizeof(name), "pcie-rst%d", slot);
960 port->reset = devm_reset_control_get_optional_exclusive(dev, name);
961 if (PTR_ERR(port->reset) == -EPROBE_DEFER)
962 return PTR_ERR(port->reset);
963
964 /* some platforms may use default PHY setting */
965 snprintf(name, sizeof(name), "pcie-phy%d", slot);
966 port->phy = devm_phy_optional_get(dev, name);
967 if (IS_ERR(port->phy))
968 return PTR_ERR(port->phy);
969
970 port->slot = slot;
971 port->pcie = pcie;
972
973 if (pcie->soc->setup_irq) {
974 err = pcie->soc->setup_irq(port, node);
975 if (err)
976 return err;
977 }
978
979 INIT_LIST_HEAD(&port->list);
980 list_add_tail(&port->list, &pcie->ports);
981
982 return 0;
983 }
984
mtk_pcie_subsys_powerup(struct mtk_pcie * pcie)985 static int mtk_pcie_subsys_powerup(struct mtk_pcie *pcie)
986 {
987 struct device *dev = pcie->dev;
988 struct platform_device *pdev = to_platform_device(dev);
989 struct resource *regs;
990 struct device_node *cfg_node;
991 int err;
992
993 /* get shared registers, which are optional */
994 regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "subsys");
995 if (regs) {
996 pcie->base = devm_ioremap_resource(dev, regs);
997 if (IS_ERR(pcie->base))
998 return PTR_ERR(pcie->base);
999 }
1000
1001 cfg_node = of_find_compatible_node(NULL, NULL,
1002 "mediatek,generic-pciecfg");
1003 if (cfg_node) {
1004 pcie->cfg = syscon_node_to_regmap(cfg_node);
1005 of_node_put(cfg_node);
1006 if (IS_ERR(pcie->cfg))
1007 return PTR_ERR(pcie->cfg);
1008 }
1009
1010 pcie->free_ck = devm_clk_get(dev, "free_ck");
1011 if (IS_ERR(pcie->free_ck)) {
1012 if (PTR_ERR(pcie->free_ck) == -EPROBE_DEFER)
1013 return -EPROBE_DEFER;
1014
1015 pcie->free_ck = NULL;
1016 }
1017
1018 pm_runtime_enable(dev);
1019 pm_runtime_get_sync(dev);
1020
1021 /* enable top level clock */
1022 err = clk_prepare_enable(pcie->free_ck);
1023 if (err) {
1024 dev_err(dev, "failed to enable free_ck\n");
1025 goto err_free_ck;
1026 }
1027
1028 return 0;
1029
1030 err_free_ck:
1031 pm_runtime_put_sync(dev);
1032 pm_runtime_disable(dev);
1033
1034 return err;
1035 }
1036
mtk_pcie_setup(struct mtk_pcie * pcie)1037 static int mtk_pcie_setup(struct mtk_pcie *pcie)
1038 {
1039 struct device *dev = pcie->dev;
1040 struct device_node *node = dev->of_node;
1041 struct mtk_pcie_port *port, *tmp;
1042 int err, slot;
1043
1044 slot = of_get_pci_domain_nr(dev->of_node);
1045 if (slot < 0) {
1046 for_each_available_child_of_node_scoped(node, child) {
1047 err = of_pci_get_devfn(child);
1048 if (err < 0)
1049 return dev_err_probe(dev, err, "failed to get devfn\n");
1050
1051 slot = PCI_SLOT(err);
1052
1053 err = mtk_pcie_parse_port(pcie, child, slot);
1054 if (err)
1055 return err;
1056 }
1057 } else {
1058 err = mtk_pcie_parse_port(pcie, node, slot);
1059 if (err)
1060 return err;
1061 }
1062
1063 err = mtk_pcie_subsys_powerup(pcie);
1064 if (err)
1065 return err;
1066
1067 /* enable each port, and then check link status */
1068 list_for_each_entry_safe(port, tmp, &pcie->ports, list)
1069 mtk_pcie_enable_port(port);
1070
1071 /* power down PCIe subsys if slots are all empty (link down) */
1072 if (list_empty(&pcie->ports))
1073 mtk_pcie_subsys_powerdown(pcie);
1074
1075 return 0;
1076 }
1077
mtk_pcie_probe(struct platform_device * pdev)1078 static int mtk_pcie_probe(struct platform_device *pdev)
1079 {
1080 struct device *dev = &pdev->dev;
1081 struct mtk_pcie *pcie;
1082 struct pci_host_bridge *host;
1083 int err;
1084
1085 host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
1086 if (!host)
1087 return -ENOMEM;
1088
1089 pcie = pci_host_bridge_priv(host);
1090
1091 pcie->dev = dev;
1092 pcie->soc = of_device_get_match_data(dev);
1093 platform_set_drvdata(pdev, pcie);
1094 INIT_LIST_HEAD(&pcie->ports);
1095
1096 err = mtk_pcie_setup(pcie);
1097 if (err)
1098 return err;
1099
1100 host->ops = pcie->soc->ops;
1101 host->sysdata = pcie;
1102 host->msi_domain = pcie->soc->no_msi;
1103
1104 err = pci_host_probe(host);
1105 if (err)
1106 goto put_resources;
1107
1108 return 0;
1109
1110 put_resources:
1111 if (!list_empty(&pcie->ports))
1112 mtk_pcie_put_resources(pcie);
1113
1114 return err;
1115 }
1116
1117
mtk_pcie_free_resources(struct mtk_pcie * pcie)1118 static void mtk_pcie_free_resources(struct mtk_pcie *pcie)
1119 {
1120 struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
1121 struct list_head *windows = &host->windows;
1122
1123 pci_free_resource_list(windows);
1124 }
1125
mtk_pcie_remove(struct platform_device * pdev)1126 static void mtk_pcie_remove(struct platform_device *pdev)
1127 {
1128 struct mtk_pcie *pcie = platform_get_drvdata(pdev);
1129 struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
1130
1131 pci_stop_root_bus(host->bus);
1132 pci_remove_root_bus(host->bus);
1133 mtk_pcie_free_resources(pcie);
1134
1135 mtk_pcie_irq_teardown(pcie);
1136
1137 mtk_pcie_put_resources(pcie);
1138 }
1139
mtk_pcie_suspend_noirq(struct device * dev)1140 static int mtk_pcie_suspend_noirq(struct device *dev)
1141 {
1142 struct mtk_pcie *pcie = dev_get_drvdata(dev);
1143 struct mtk_pcie_port *port;
1144
1145 if (list_empty(&pcie->ports))
1146 return 0;
1147
1148 list_for_each_entry(port, &pcie->ports, list) {
1149 clk_disable_unprepare(port->pipe_ck);
1150 clk_disable_unprepare(port->obff_ck);
1151 clk_disable_unprepare(port->axi_ck);
1152 clk_disable_unprepare(port->aux_ck);
1153 clk_disable_unprepare(port->ahb_ck);
1154 clk_disable_unprepare(port->sys_ck);
1155 phy_power_off(port->phy);
1156 phy_exit(port->phy);
1157 }
1158
1159 clk_disable_unprepare(pcie->free_ck);
1160
1161 return 0;
1162 }
1163
mtk_pcie_resume_noirq(struct device * dev)1164 static int mtk_pcie_resume_noirq(struct device *dev)
1165 {
1166 struct mtk_pcie *pcie = dev_get_drvdata(dev);
1167 struct mtk_pcie_port *port, *tmp;
1168
1169 if (list_empty(&pcie->ports))
1170 return 0;
1171
1172 clk_prepare_enable(pcie->free_ck);
1173
1174 list_for_each_entry_safe(port, tmp, &pcie->ports, list)
1175 mtk_pcie_enable_port(port);
1176
1177 /* In case of EP was removed while system suspend. */
1178 if (list_empty(&pcie->ports))
1179 clk_disable_unprepare(pcie->free_ck);
1180
1181 return 0;
1182 }
1183
1184 static const struct dev_pm_ops mtk_pcie_pm_ops = {
1185 NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_pcie_suspend_noirq,
1186 mtk_pcie_resume_noirq)
1187 };
1188
1189 static const struct mtk_pcie_soc mtk_pcie_soc_v1 = {
1190 .no_msi = true,
1191 .ops = &mtk_pcie_ops,
1192 .startup = mtk_pcie_startup_port,
1193 };
1194
1195 static const struct mtk_pcie_soc mtk_pcie_soc_mt2712 = {
1196 .ops = &mtk_pcie_ops_v2,
1197 .startup = mtk_pcie_startup_port_v2,
1198 .setup_irq = mtk_pcie_setup_irq,
1199 };
1200
1201 static const struct mtk_pcie_soc mtk_pcie_soc_mt7622 = {
1202 .need_fix_class_id = true,
1203 .ops = &mtk_pcie_ops_v2,
1204 .startup = mtk_pcie_startup_port_v2,
1205 .setup_irq = mtk_pcie_setup_irq,
1206 };
1207
1208 static const struct mtk_pcie_soc mtk_pcie_soc_mt7629 = {
1209 .need_fix_class_id = true,
1210 .need_fix_device_id = true,
1211 .device_id = PCI_DEVICE_ID_MEDIATEK_7629,
1212 .ops = &mtk_pcie_ops_v2,
1213 .startup = mtk_pcie_startup_port_v2,
1214 .setup_irq = mtk_pcie_setup_irq,
1215 };
1216
1217 static const struct of_device_id mtk_pcie_ids[] = {
1218 { .compatible = "mediatek,mt2701-pcie", .data = &mtk_pcie_soc_v1 },
1219 { .compatible = "mediatek,mt7623-pcie", .data = &mtk_pcie_soc_v1 },
1220 { .compatible = "mediatek,mt2712-pcie", .data = &mtk_pcie_soc_mt2712 },
1221 { .compatible = "mediatek,mt7622-pcie", .data = &mtk_pcie_soc_mt7622 },
1222 { .compatible = "mediatek,mt7629-pcie", .data = &mtk_pcie_soc_mt7629 },
1223 {},
1224 };
1225 MODULE_DEVICE_TABLE(of, mtk_pcie_ids);
1226
1227 static struct platform_driver mtk_pcie_driver = {
1228 .probe = mtk_pcie_probe,
1229 .remove = mtk_pcie_remove,
1230 .driver = {
1231 .name = "mtk-pcie",
1232 .of_match_table = mtk_pcie_ids,
1233 .suppress_bind_attrs = true,
1234 .pm = &mtk_pcie_pm_ops,
1235 },
1236 };
1237 module_platform_driver(mtk_pcie_driver);
1238 MODULE_DESCRIPTION("MediaTek PCIe host controller driver");
1239 MODULE_LICENSE("GPL v2");
1240