1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * PCIe host bridge driver for Apple system-on-chips.
4 *
5 * The HW is ECAM compliant, so once the controller is initialized,
6 * the driver mostly deals MSI mapping and handling of per-port
7 * interrupts (INTx, management and error signals).
8 *
9 * Initialization requires enabling power and clocks, along with a
10 * number of register pokes.
11 *
12 * Copyright (C) 2021 Alyssa Rosenzweig <alyssa@rosenzweig.io>
13 * Copyright (C) 2021 Google LLC
14 * Copyright (C) 2021 Corellium LLC
15 * Copyright (C) 2021 Mark Kettenis <kettenis@openbsd.org>
16 *
17 * Author: Alyssa Rosenzweig <alyssa@rosenzweig.io>
18 * Author: Marc Zyngier <maz@kernel.org>
19 */
20
21 #include <linux/bitfield.h>
22 #include <linux/gpio/consumer.h>
23 #include <linux/kernel.h>
24 #include <linux/iopoll.h>
25 #include <linux/irqchip/chained_irq.h>
26 #include <linux/irqchip/irq-msi-lib.h>
27 #include <linux/irqdomain.h>
28 #include <linux/list.h>
29 #include <linux/module.h>
30 #include <linux/msi.h>
31 #include <linux/of_irq.h>
32 #include <linux/pci-ecam.h>
33
34 #include "pci-host-common.h"
35
36 /* T8103 (original M1) and related SoCs */
37 #define CORE_RC_PHYIF_CTL 0x00024
38 #define CORE_RC_PHYIF_CTL_RUN BIT(0)
39 #define CORE_RC_PHYIF_STAT 0x00028
40 #define CORE_RC_PHYIF_STAT_REFCLK BIT(4)
41 #define CORE_RC_CTL 0x00050
42 #define CORE_RC_CTL_RUN BIT(0)
43 #define CORE_RC_STAT 0x00058
44 #define CORE_RC_STAT_READY BIT(0)
45 #define CORE_FABRIC_STAT 0x04000
46 #define CORE_FABRIC_STAT_MASK 0x001F001F
47
48 #define CORE_PHY_DEFAULT_BASE(port) (0x84000 + 0x4000 * (port))
49
50 #define PHY_LANE_CFG 0x00000
51 #define PHY_LANE_CFG_REFCLK0REQ BIT(0)
52 #define PHY_LANE_CFG_REFCLK1REQ BIT(1)
53 #define PHY_LANE_CFG_REFCLK0ACK BIT(2)
54 #define PHY_LANE_CFG_REFCLK1ACK BIT(3)
55 #define PHY_LANE_CFG_REFCLKEN (BIT(9) | BIT(10))
56 #define PHY_LANE_CFG_REFCLKCGEN (BIT(30) | BIT(31))
57 #define PHY_LANE_CTL 0x00004
58 #define PHY_LANE_CTL_CFGACC BIT(15)
59
60 #define PORT_LTSSMCTL 0x00080
61 #define PORT_LTSSMCTL_START BIT(0)
62 #define PORT_INTSTAT 0x00100
63 #define PORT_INT_TUNNEL_ERR 31
64 #define PORT_INT_CPL_TIMEOUT 23
65 #define PORT_INT_RID2SID_MAPERR 22
66 #define PORT_INT_CPL_ABORT 21
67 #define PORT_INT_MSI_BAD_DATA 19
68 #define PORT_INT_MSI_ERR 18
69 #define PORT_INT_REQADDR_GT32 17
70 #define PORT_INT_AF_TIMEOUT 15
71 #define PORT_INT_LINK_DOWN 14
72 #define PORT_INT_LINK_UP 12
73 #define PORT_INT_LINK_BWMGMT 11
74 #define PORT_INT_AER_MASK (15 << 4)
75 #define PORT_INT_PORT_ERR 4
76 #define PORT_INT_INTx(i) i
77 #define PORT_INT_INTx_MASK 15
78 #define PORT_INTMSK 0x00104
79 #define PORT_INTMSKSET 0x00108
80 #define PORT_INTMSKCLR 0x0010c
81 #define PORT_MSICFG 0x00124
82 #define PORT_MSICFG_EN BIT(0)
83 #define PORT_MSICFG_L2MSINUM_SHIFT 4
84 #define PORT_MSIBASE 0x00128
85 #define PORT_MSIBASE_1_SHIFT 16
86 #define PORT_MSIADDR 0x00168
87 #define PORT_LINKSTS 0x00208
88 #define PORT_LINKSTS_UP BIT(0)
89 #define PORT_LINKSTS_BUSY BIT(2)
90 #define PORT_LINKCMDSTS 0x00210
91 #define PORT_OUTS_NPREQS 0x00284
92 #define PORT_OUTS_NPREQS_REQ BIT(24)
93 #define PORT_OUTS_NPREQS_CPL BIT(16)
94 #define PORT_RXWR_FIFO 0x00288
95 #define PORT_RXWR_FIFO_HDR GENMASK(15, 10)
96 #define PORT_RXWR_FIFO_DATA GENMASK(9, 0)
97 #define PORT_RXRD_FIFO 0x0028C
98 #define PORT_RXRD_FIFO_REQ GENMASK(6, 0)
99 #define PORT_OUTS_CPLS 0x00290
100 #define PORT_OUTS_CPLS_SHRD GENMASK(14, 8)
101 #define PORT_OUTS_CPLS_WAIT GENMASK(6, 0)
102 #define PORT_APPCLK 0x00800
103 #define PORT_APPCLK_EN BIT(0)
104 #define PORT_APPCLK_CGDIS BIT(8)
105 #define PORT_STATUS 0x00804
106 #define PORT_STATUS_READY BIT(0)
107 #define PORT_REFCLK 0x00810
108 #define PORT_REFCLK_EN BIT(0)
109 #define PORT_REFCLK_CGDIS BIT(8)
110 #define PORT_PERST 0x00814
111 #define PORT_PERST_OFF BIT(0)
112 #define PORT_RID2SID 0x00828
113 #define PORT_RID2SID_VALID BIT(31)
114 #define PORT_RID2SID_SID_SHIFT 16
115 #define PORT_RID2SID_BUS_SHIFT 8
116 #define PORT_RID2SID_DEV_SHIFT 3
117 #define PORT_RID2SID_FUNC_SHIFT 0
118 #define PORT_OUTS_PREQS_HDR 0x00980
119 #define PORT_OUTS_PREQS_HDR_MASK GENMASK(9, 0)
120 #define PORT_OUTS_PREQS_DATA 0x00984
121 #define PORT_OUTS_PREQS_DATA_MASK GENMASK(15, 0)
122 #define PORT_TUNCTRL 0x00988
123 #define PORT_TUNCTRL_PERST_ON BIT(0)
124 #define PORT_TUNCTRL_PERST_ACK_REQ BIT(1)
125 #define PORT_TUNSTAT 0x0098c
126 #define PORT_TUNSTAT_PERST_ON BIT(0)
127 #define PORT_TUNSTAT_PERST_ACK_PEND BIT(1)
128 #define PORT_PREFMEM_ENABLE 0x00994
129
130 /* T602x (M2-pro and co) */
131 #define PORT_T602X_MSIADDR 0x016c
132 #define PORT_T602X_MSIADDR_HI 0x0170
133 #define PORT_T602X_PERST 0x082c
134 #define PORT_T602X_RID2SID 0x3000
135 #define PORT_T602X_MSIMAP 0x3800
136
137 #define PORT_MSIMAP_ENABLE BIT(31)
138 #define PORT_MSIMAP_TARGET GENMASK(7, 0)
139
140 /*
141 * The doorbell address is set to 0xfffff000, which by convention
142 * matches what MacOS does, and it is possible to use any other
143 * address (in the bottom 4GB, as the base register is only 32bit).
144 * However, it has to be excluded from the IOVA range, and the DART
145 * driver has to know about it.
146 */
147 #define DOORBELL_ADDR CONFIG_PCIE_APPLE_MSI_DOORBELL_ADDR
148
149 struct hw_info {
150 u32 phy_lane_ctl;
151 u32 port_msiaddr;
152 u32 port_msiaddr_hi;
153 u32 port_refclk;
154 u32 port_perst;
155 u32 port_rid2sid;
156 u32 port_msimap;
157 u32 max_rid2sid;
158 };
159
160 static const struct hw_info t8103_hw = {
161 .phy_lane_ctl = PHY_LANE_CTL,
162 .port_msiaddr = PORT_MSIADDR,
163 .port_msiaddr_hi = 0,
164 .port_refclk = PORT_REFCLK,
165 .port_perst = PORT_PERST,
166 .port_rid2sid = PORT_RID2SID,
167 .port_msimap = 0,
168 .max_rid2sid = 64,
169 };
170
171 static const struct hw_info t602x_hw = {
172 .phy_lane_ctl = 0,
173 .port_msiaddr = PORT_T602X_MSIADDR,
174 .port_msiaddr_hi = PORT_T602X_MSIADDR_HI,
175 .port_refclk = 0,
176 .port_perst = PORT_T602X_PERST,
177 .port_rid2sid = PORT_T602X_RID2SID,
178 .port_msimap = PORT_T602X_MSIMAP,
179 /* 16 on t602x, guess for autodetect on future HW */
180 .max_rid2sid = 512,
181 };
182
183 struct apple_pcie {
184 struct mutex lock;
185 struct device *dev;
186 void __iomem *base;
187 const struct hw_info *hw;
188 unsigned long *bitmap;
189 struct list_head ports;
190 struct completion event;
191 struct irq_fwspec fwspec;
192 u32 nvecs;
193 };
194
195 struct apple_pcie_port {
196 raw_spinlock_t lock;
197 struct apple_pcie *pcie;
198 struct device_node *np;
199 void __iomem *base;
200 void __iomem *phy;
201 struct irq_domain *domain;
202 struct list_head entry;
203 unsigned long *sid_map;
204 int sid_map_sz;
205 int idx;
206 };
207
rmw_set(u32 set,void __iomem * addr)208 static void rmw_set(u32 set, void __iomem *addr)
209 {
210 writel_relaxed(readl_relaxed(addr) | set, addr);
211 }
212
rmw_clear(u32 clr,void __iomem * addr)213 static void rmw_clear(u32 clr, void __iomem *addr)
214 {
215 writel_relaxed(readl_relaxed(addr) & ~clr, addr);
216 }
217
apple_msi_compose_msg(struct irq_data * data,struct msi_msg * msg)218 static void apple_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
219 {
220 msg->address_hi = upper_32_bits(DOORBELL_ADDR);
221 msg->address_lo = lower_32_bits(DOORBELL_ADDR);
222 msg->data = data->hwirq;
223 }
224
225 static struct irq_chip apple_msi_bottom_chip = {
226 .name = "MSI",
227 .irq_mask = irq_chip_mask_parent,
228 .irq_unmask = irq_chip_unmask_parent,
229 .irq_eoi = irq_chip_eoi_parent,
230 .irq_set_affinity = irq_chip_set_affinity_parent,
231 .irq_set_type = irq_chip_set_type_parent,
232 .irq_compose_msi_msg = apple_msi_compose_msg,
233 };
234
apple_msi_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * args)235 static int apple_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
236 unsigned int nr_irqs, void *args)
237 {
238 struct apple_pcie *pcie = domain->host_data;
239 struct irq_fwspec fwspec = pcie->fwspec;
240 unsigned int i;
241 int ret, hwirq;
242
243 mutex_lock(&pcie->lock);
244
245 hwirq = bitmap_find_free_region(pcie->bitmap, pcie->nvecs,
246 order_base_2(nr_irqs));
247
248 mutex_unlock(&pcie->lock);
249
250 if (hwirq < 0)
251 return -ENOSPC;
252
253 fwspec.param[fwspec.param_count - 2] += hwirq;
254
255 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &fwspec);
256 if (ret)
257 return ret;
258
259 for (i = 0; i < nr_irqs; i++) {
260 irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
261 &apple_msi_bottom_chip, pcie);
262 }
263
264 return 0;
265 }
266
apple_msi_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)267 static void apple_msi_domain_free(struct irq_domain *domain, unsigned int virq,
268 unsigned int nr_irqs)
269 {
270 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
271 struct apple_pcie *pcie = domain->host_data;
272
273 mutex_lock(&pcie->lock);
274
275 bitmap_release_region(pcie->bitmap, d->hwirq, order_base_2(nr_irqs));
276
277 mutex_unlock(&pcie->lock);
278 }
279
280 static const struct irq_domain_ops apple_msi_domain_ops = {
281 .alloc = apple_msi_domain_alloc,
282 .free = apple_msi_domain_free,
283 };
284
apple_port_irq_mask(struct irq_data * data)285 static void apple_port_irq_mask(struct irq_data *data)
286 {
287 struct apple_pcie_port *port = irq_data_get_irq_chip_data(data);
288
289 guard(raw_spinlock_irqsave)(&port->lock);
290 rmw_set(BIT(data->hwirq), port->base + PORT_INTMSK);
291 }
292
apple_port_irq_unmask(struct irq_data * data)293 static void apple_port_irq_unmask(struct irq_data *data)
294 {
295 struct apple_pcie_port *port = irq_data_get_irq_chip_data(data);
296
297 guard(raw_spinlock_irqsave)(&port->lock);
298 rmw_clear(BIT(data->hwirq), port->base + PORT_INTMSK);
299 }
300
hwirq_is_intx(unsigned int hwirq)301 static bool hwirq_is_intx(unsigned int hwirq)
302 {
303 return BIT(hwirq) & PORT_INT_INTx_MASK;
304 }
305
apple_port_irq_ack(struct irq_data * data)306 static void apple_port_irq_ack(struct irq_data *data)
307 {
308 struct apple_pcie_port *port = irq_data_get_irq_chip_data(data);
309
310 if (!hwirq_is_intx(data->hwirq))
311 writel_relaxed(BIT(data->hwirq), port->base + PORT_INTSTAT);
312 }
313
apple_port_irq_set_type(struct irq_data * data,unsigned int type)314 static int apple_port_irq_set_type(struct irq_data *data, unsigned int type)
315 {
316 /*
317 * It doesn't seem that there is any way to configure the
318 * trigger, so assume INTx have to be level (as per the spec),
319 * and the rest is edge (which looks likely).
320 */
321 if (hwirq_is_intx(data->hwirq) ^ !!(type & IRQ_TYPE_LEVEL_MASK))
322 return -EINVAL;
323
324 irqd_set_trigger_type(data, type);
325 return 0;
326 }
327
328 static struct irq_chip apple_port_irqchip = {
329 .name = "PCIe",
330 .irq_ack = apple_port_irq_ack,
331 .irq_mask = apple_port_irq_mask,
332 .irq_unmask = apple_port_irq_unmask,
333 .irq_set_type = apple_port_irq_set_type,
334 };
335
apple_port_irq_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * args)336 static int apple_port_irq_domain_alloc(struct irq_domain *domain,
337 unsigned int virq, unsigned int nr_irqs,
338 void *args)
339 {
340 struct apple_pcie_port *port = domain->host_data;
341 struct irq_fwspec *fwspec = args;
342 int i;
343
344 for (i = 0; i < nr_irqs; i++) {
345 irq_flow_handler_t flow = handle_edge_irq;
346 unsigned int type = IRQ_TYPE_EDGE_RISING;
347
348 if (hwirq_is_intx(fwspec->param[0] + i)) {
349 flow = handle_level_irq;
350 type = IRQ_TYPE_LEVEL_HIGH;
351 }
352
353 irq_domain_set_info(domain, virq + i, fwspec->param[0] + i,
354 &apple_port_irqchip, port, flow,
355 NULL, NULL);
356
357 irq_set_irq_type(virq + i, type);
358 }
359
360 return 0;
361 }
362
apple_port_irq_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)363 static void apple_port_irq_domain_free(struct irq_domain *domain,
364 unsigned int virq, unsigned int nr_irqs)
365 {
366 int i;
367
368 for (i = 0; i < nr_irqs; i++) {
369 struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
370
371 irq_set_handler(virq + i, NULL);
372 irq_domain_reset_irq_data(d);
373 }
374 }
375
376 static const struct irq_domain_ops apple_port_irq_domain_ops = {
377 .translate = irq_domain_translate_onecell,
378 .alloc = apple_port_irq_domain_alloc,
379 .free = apple_port_irq_domain_free,
380 };
381
apple_port_irq_handler(struct irq_desc * desc)382 static void apple_port_irq_handler(struct irq_desc *desc)
383 {
384 struct apple_pcie_port *port = irq_desc_get_handler_data(desc);
385 struct irq_chip *chip = irq_desc_get_chip(desc);
386 unsigned long stat;
387 int i;
388
389 chained_irq_enter(chip, desc);
390
391 stat = readl_relaxed(port->base + PORT_INTSTAT);
392
393 for_each_set_bit(i, &stat, 32)
394 generic_handle_domain_irq(port->domain, i);
395
396 chained_irq_exit(chip, desc);
397 }
398
apple_pcie_port_setup_irq(struct apple_pcie_port * port)399 static int apple_pcie_port_setup_irq(struct apple_pcie_port *port)
400 {
401 struct fwnode_handle *fwnode = &port->np->fwnode;
402 struct apple_pcie *pcie = port->pcie;
403 unsigned int irq;
404 u32 val = 0;
405
406 /* FIXME: consider moving each interrupt under each port */
407 irq = irq_of_parse_and_map(to_of_node(dev_fwnode(port->pcie->dev)),
408 port->idx);
409 if (!irq)
410 return -ENXIO;
411
412 port->domain = irq_domain_create_linear(fwnode, 32,
413 &apple_port_irq_domain_ops,
414 port);
415 if (!port->domain)
416 return -ENOMEM;
417
418 /* Disable all interrupts */
419 writel_relaxed(~0, port->base + PORT_INTMSK);
420 writel_relaxed(~0, port->base + PORT_INTSTAT);
421 writel_relaxed(~0, port->base + PORT_LINKCMDSTS);
422
423 irq_set_chained_handler_and_data(irq, apple_port_irq_handler, port);
424
425 /* Configure MSI base address */
426 BUILD_BUG_ON(upper_32_bits(DOORBELL_ADDR));
427 writel_relaxed(lower_32_bits(DOORBELL_ADDR),
428 port->base + pcie->hw->port_msiaddr);
429 if (pcie->hw->port_msiaddr_hi)
430 writel_relaxed(0, port->base + pcie->hw->port_msiaddr_hi);
431
432 /* Enable MSIs, shared between all ports */
433 if (pcie->hw->port_msimap) {
434 for (int i = 0; i < pcie->nvecs; i++)
435 writel_relaxed(FIELD_PREP(PORT_MSIMAP_TARGET, i) |
436 PORT_MSIMAP_ENABLE,
437 port->base + pcie->hw->port_msimap + 4 * i);
438 } else {
439 writel_relaxed(0, port->base + PORT_MSIBASE);
440 val = ilog2(pcie->nvecs) << PORT_MSICFG_L2MSINUM_SHIFT;
441 }
442
443 writel_relaxed(val | PORT_MSICFG_EN, port->base + PORT_MSICFG);
444 return 0;
445 }
446
apple_pcie_port_irq(int irq,void * data)447 static irqreturn_t apple_pcie_port_irq(int irq, void *data)
448 {
449 struct apple_pcie_port *port = data;
450 unsigned int hwirq = irq_domain_get_irq_data(port->domain, irq)->hwirq;
451
452 switch (hwirq) {
453 case PORT_INT_LINK_UP:
454 dev_info_ratelimited(port->pcie->dev, "Link up on %pOF\n",
455 port->np);
456 complete_all(&port->pcie->event);
457 break;
458 case PORT_INT_LINK_DOWN:
459 dev_info_ratelimited(port->pcie->dev, "Link down on %pOF\n",
460 port->np);
461 break;
462 default:
463 return IRQ_NONE;
464 }
465
466 return IRQ_HANDLED;
467 }
468
apple_pcie_port_register_irqs(struct apple_pcie_port * port)469 static int apple_pcie_port_register_irqs(struct apple_pcie_port *port)
470 {
471 static struct {
472 unsigned int hwirq;
473 const char *name;
474 } port_irqs[] = {
475 { PORT_INT_LINK_UP, "Link up", },
476 { PORT_INT_LINK_DOWN, "Link down", },
477 };
478 int i;
479
480 for (i = 0; i < ARRAY_SIZE(port_irqs); i++) {
481 struct irq_fwspec fwspec = {
482 .fwnode = &port->np->fwnode,
483 .param_count = 1,
484 .param = {
485 [0] = port_irqs[i].hwirq,
486 },
487 };
488 unsigned int irq;
489 int ret;
490
491 irq = irq_domain_alloc_irqs(port->domain, 1, NUMA_NO_NODE,
492 &fwspec);
493 if (WARN_ON(!irq))
494 continue;
495
496 ret = request_irq(irq, apple_pcie_port_irq, 0,
497 port_irqs[i].name, port);
498 WARN_ON(ret);
499 }
500
501 return 0;
502 }
503
apple_pcie_setup_refclk(struct apple_pcie * pcie,struct apple_pcie_port * port)504 static int apple_pcie_setup_refclk(struct apple_pcie *pcie,
505 struct apple_pcie_port *port)
506 {
507 u32 stat;
508 int res;
509
510 if (pcie->hw->phy_lane_ctl)
511 rmw_set(PHY_LANE_CTL_CFGACC, port->phy + pcie->hw->phy_lane_ctl);
512
513 rmw_set(PHY_LANE_CFG_REFCLK0REQ, port->phy + PHY_LANE_CFG);
514
515 res = readl_relaxed_poll_timeout(port->phy + PHY_LANE_CFG,
516 stat, stat & PHY_LANE_CFG_REFCLK0ACK,
517 100, 50000);
518 if (res < 0)
519 return res;
520
521 rmw_set(PHY_LANE_CFG_REFCLK1REQ, port->phy + PHY_LANE_CFG);
522 res = readl_relaxed_poll_timeout(port->phy + PHY_LANE_CFG,
523 stat, stat & PHY_LANE_CFG_REFCLK1ACK,
524 100, 50000);
525
526 if (res < 0)
527 return res;
528
529 if (pcie->hw->phy_lane_ctl)
530 rmw_clear(PHY_LANE_CTL_CFGACC, port->phy + pcie->hw->phy_lane_ctl);
531
532 rmw_set(PHY_LANE_CFG_REFCLKEN, port->phy + PHY_LANE_CFG);
533
534 if (pcie->hw->port_refclk)
535 rmw_set(PORT_REFCLK_EN, port->base + pcie->hw->port_refclk);
536
537 return 0;
538 }
539
port_rid2sid_addr(struct apple_pcie_port * port,int idx)540 static void __iomem *port_rid2sid_addr(struct apple_pcie_port *port, int idx)
541 {
542 return port->base + port->pcie->hw->port_rid2sid + 4 * idx;
543 }
544
apple_pcie_rid2sid_write(struct apple_pcie_port * port,int idx,u32 val)545 static u32 apple_pcie_rid2sid_write(struct apple_pcie_port *port,
546 int idx, u32 val)
547 {
548 writel_relaxed(val, port_rid2sid_addr(port, idx));
549 /* Read back to ensure completion of the write */
550 return readl_relaxed(port_rid2sid_addr(port, idx));
551 }
552
apple_pcie_setup_port(struct apple_pcie * pcie,struct device_node * np)553 static int apple_pcie_setup_port(struct apple_pcie *pcie,
554 struct device_node *np)
555 {
556 struct platform_device *platform = to_platform_device(pcie->dev);
557 struct apple_pcie_port *port;
558 struct gpio_desc *reset;
559 struct resource *res;
560 char name[16];
561 u32 stat, idx;
562 int ret, i;
563
564 reset = devm_fwnode_gpiod_get(pcie->dev, of_fwnode_handle(np), "reset",
565 GPIOD_OUT_LOW, "PERST#");
566 if (IS_ERR(reset))
567 return PTR_ERR(reset);
568
569 port = devm_kzalloc(pcie->dev, sizeof(*port), GFP_KERNEL);
570 if (!port)
571 return -ENOMEM;
572
573 port->sid_map = devm_bitmap_zalloc(pcie->dev, pcie->hw->max_rid2sid, GFP_KERNEL);
574 if (!port->sid_map)
575 return -ENOMEM;
576
577 ret = of_property_read_u32_index(np, "reg", 0, &idx);
578 if (ret)
579 return ret;
580
581 /* Use the first reg entry to work out the port index */
582 port->idx = idx >> 11;
583 port->pcie = pcie;
584 port->np = np;
585
586 raw_spin_lock_init(&port->lock);
587
588 snprintf(name, sizeof(name), "port%d", port->idx);
589 res = platform_get_resource_byname(platform, IORESOURCE_MEM, name);
590 if (!res)
591 res = platform_get_resource(platform, IORESOURCE_MEM, port->idx + 2);
592
593 port->base = devm_ioremap_resource(&platform->dev, res);
594 if (IS_ERR(port->base))
595 return PTR_ERR(port->base);
596
597 snprintf(name, sizeof(name), "phy%d", port->idx);
598 res = platform_get_resource_byname(platform, IORESOURCE_MEM, name);
599 if (res)
600 port->phy = devm_ioremap_resource(&platform->dev, res);
601 else
602 port->phy = pcie->base + CORE_PHY_DEFAULT_BASE(port->idx);
603
604 rmw_set(PORT_APPCLK_EN, port->base + PORT_APPCLK);
605
606 /* Assert PERST# before setting up the clock */
607 gpiod_set_value_cansleep(reset, 1);
608
609 ret = apple_pcie_setup_refclk(pcie, port);
610 if (ret < 0)
611 return ret;
612
613 /* The minimal Tperst-clk value is 100us (PCIe CEM r5.0, 2.9.2) */
614 usleep_range(100, 200);
615
616 /* Deassert PERST# */
617 rmw_set(PORT_PERST_OFF, port->base + pcie->hw->port_perst);
618 gpiod_set_value_cansleep(reset, 0);
619
620 /* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */
621 msleep(100);
622
623 ret = readl_relaxed_poll_timeout(port->base + PORT_STATUS, stat,
624 stat & PORT_STATUS_READY, 100, 250000);
625 if (ret < 0) {
626 dev_err(pcie->dev, "port %pOF ready wait timeout\n", np);
627 return ret;
628 }
629
630 if (pcie->hw->port_refclk)
631 rmw_clear(PORT_REFCLK_CGDIS, port->base + pcie->hw->port_refclk);
632 else
633 rmw_set(PHY_LANE_CFG_REFCLKCGEN, port->phy + PHY_LANE_CFG);
634
635 rmw_clear(PORT_APPCLK_CGDIS, port->base + PORT_APPCLK);
636
637 ret = apple_pcie_port_setup_irq(port);
638 if (ret)
639 return ret;
640
641 /* Reset all RID/SID mappings, and check for RAZ/WI registers */
642 for (i = 0; i < pcie->hw->max_rid2sid; i++) {
643 if (apple_pcie_rid2sid_write(port, i, 0xbad1d) != 0xbad1d)
644 break;
645 apple_pcie_rid2sid_write(port, i, 0);
646 }
647
648 dev_dbg(pcie->dev, "%pOF: %d RID/SID mapping entries\n", np, i);
649
650 port->sid_map_sz = i;
651
652 list_add_tail(&port->entry, &pcie->ports);
653 init_completion(&pcie->event);
654
655 /* In the success path, we keep a reference to np around */
656 of_node_get(np);
657
658 ret = apple_pcie_port_register_irqs(port);
659 WARN_ON(ret);
660
661 writel_relaxed(PORT_LTSSMCTL_START, port->base + PORT_LTSSMCTL);
662
663 if (!wait_for_completion_timeout(&pcie->event, HZ / 10))
664 dev_warn(pcie->dev, "%pOF link didn't come up\n", np);
665
666 return 0;
667 }
668
669 static const struct msi_parent_ops apple_msi_parent_ops = {
670 .supported_flags = (MSI_GENERIC_FLAGS_MASK |
671 MSI_FLAG_PCI_MSIX |
672 MSI_FLAG_MULTI_PCI_MSI),
673 .required_flags = (MSI_FLAG_USE_DEF_DOM_OPS |
674 MSI_FLAG_USE_DEF_CHIP_OPS |
675 MSI_FLAG_PCI_MSI_MASK_PARENT),
676 .chip_flags = MSI_CHIP_FLAG_SET_EOI,
677 .bus_select_token = DOMAIN_BUS_PCI_MSI,
678 .init_dev_msi_info = msi_lib_init_dev_msi_info,
679 };
680
apple_msi_init(struct apple_pcie * pcie)681 static int apple_msi_init(struct apple_pcie *pcie)
682 {
683 struct fwnode_handle *fwnode = dev_fwnode(pcie->dev);
684 struct irq_domain_info info = {
685 .fwnode = fwnode,
686 .ops = &apple_msi_domain_ops,
687 .size = pcie->nvecs,
688 .host_data = pcie,
689 };
690 struct of_phandle_args args = {};
691 int ret;
692
693 ret = of_parse_phandle_with_args(to_of_node(fwnode), "msi-ranges",
694 "#interrupt-cells", 0, &args);
695 if (ret)
696 return ret;
697
698 ret = of_property_read_u32_index(to_of_node(fwnode), "msi-ranges",
699 args.args_count + 1, &pcie->nvecs);
700 if (ret)
701 return ret;
702
703 of_phandle_args_to_fwspec(args.np, args.args, args.args_count,
704 &pcie->fwspec);
705
706 pcie->bitmap = devm_bitmap_zalloc(pcie->dev, pcie->nvecs, GFP_KERNEL);
707 if (!pcie->bitmap)
708 return -ENOMEM;
709
710 info.parent = irq_find_matching_fwspec(&pcie->fwspec, DOMAIN_BUS_WIRED);
711 if (!info.parent) {
712 dev_err(pcie->dev, "failed to find parent domain\n");
713 return -ENXIO;
714 }
715
716 if (!msi_create_parent_irq_domain(&info, &apple_msi_parent_ops)) {
717 dev_err(pcie->dev, "failed to create IRQ domain\n");
718 return -ENOMEM;
719 }
720 return 0;
721 }
722
apple_pcie_get_port(struct pci_dev * pdev)723 static struct apple_pcie_port *apple_pcie_get_port(struct pci_dev *pdev)
724 {
725 struct pci_config_window *cfg = pdev->sysdata;
726 struct apple_pcie *pcie = cfg->priv;
727 struct pci_dev *port_pdev;
728 struct apple_pcie_port *port;
729
730 /* Find the root port this device is on */
731 port_pdev = pcie_find_root_port(pdev);
732
733 /* If finding the port itself, nothing to do */
734 if (WARN_ON(!port_pdev) || pdev == port_pdev)
735 return NULL;
736
737 list_for_each_entry(port, &pcie->ports, entry) {
738 if (port->idx == PCI_SLOT(port_pdev->devfn))
739 return port;
740 }
741
742 return NULL;
743 }
744
apple_pcie_enable_device(struct pci_host_bridge * bridge,struct pci_dev * pdev)745 static int apple_pcie_enable_device(struct pci_host_bridge *bridge, struct pci_dev *pdev)
746 {
747 u32 sid, rid = pci_dev_id(pdev);
748 struct apple_pcie_port *port;
749 int idx, err;
750
751 port = apple_pcie_get_port(pdev);
752 if (!port)
753 return 0;
754
755 dev_dbg(&pdev->dev, "added to bus %s, index %d\n",
756 pci_name(pdev->bus->self), port->idx);
757
758 err = of_map_id(port->pcie->dev->of_node, rid, "iommu-map",
759 "iommu-map-mask", NULL, &sid);
760 if (err)
761 return err;
762
763 mutex_lock(&port->pcie->lock);
764
765 idx = bitmap_find_free_region(port->sid_map, port->sid_map_sz, 0);
766 if (idx >= 0) {
767 apple_pcie_rid2sid_write(port, idx,
768 PORT_RID2SID_VALID |
769 (sid << PORT_RID2SID_SID_SHIFT) | rid);
770
771 dev_dbg(&pdev->dev, "mapping RID%x to SID%x (index %d)\n",
772 rid, sid, idx);
773 }
774
775 mutex_unlock(&port->pcie->lock);
776
777 return idx >= 0 ? 0 : -ENOSPC;
778 }
779
apple_pcie_disable_device(struct pci_host_bridge * bridge,struct pci_dev * pdev)780 static void apple_pcie_disable_device(struct pci_host_bridge *bridge, struct pci_dev *pdev)
781 {
782 struct apple_pcie_port *port;
783 u32 rid = pci_dev_id(pdev);
784 int idx;
785
786 port = apple_pcie_get_port(pdev);
787 if (!port)
788 return;
789
790 mutex_lock(&port->pcie->lock);
791
792 for_each_set_bit(idx, port->sid_map, port->sid_map_sz) {
793 u32 val;
794
795 val = readl_relaxed(port_rid2sid_addr(port, idx));
796 if ((val & 0xffff) == rid) {
797 apple_pcie_rid2sid_write(port, idx, 0);
798 bitmap_release_region(port->sid_map, idx, 0);
799 dev_dbg(&pdev->dev, "Released %x (%d)\n", val, idx);
800 break;
801 }
802 }
803
804 mutex_unlock(&port->pcie->lock);
805 }
806
apple_pcie_init(struct pci_config_window * cfg)807 static int apple_pcie_init(struct pci_config_window *cfg)
808 {
809 struct apple_pcie *pcie = cfg->priv;
810 struct device *dev = cfg->parent;
811 int ret;
812
813 for_each_available_child_of_node_scoped(dev->of_node, of_port) {
814 ret = apple_pcie_setup_port(pcie, of_port);
815 if (ret) {
816 dev_err(dev, "Port %pOF setup fail: %d\n", of_port, ret);
817 return ret;
818 }
819 }
820
821 return 0;
822 }
823
824 static const struct pci_ecam_ops apple_pcie_cfg_ecam_ops = {
825 .init = apple_pcie_init,
826 .enable_device = apple_pcie_enable_device,
827 .disable_device = apple_pcie_disable_device,
828 .pci_ops = {
829 .map_bus = pci_ecam_map_bus,
830 .read = pci_generic_config_read,
831 .write = pci_generic_config_write,
832 }
833 };
834
apple_pcie_probe(struct platform_device * pdev)835 static int apple_pcie_probe(struct platform_device *pdev)
836 {
837 struct device *dev = &pdev->dev;
838 struct apple_pcie *pcie;
839 int ret;
840
841 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
842 if (!pcie)
843 return -ENOMEM;
844
845 pcie->dev = dev;
846 pcie->hw = of_device_get_match_data(dev);
847 if (!pcie->hw)
848 return -ENODEV;
849 pcie->base = devm_platform_ioremap_resource(pdev, 1);
850 if (IS_ERR(pcie->base))
851 return PTR_ERR(pcie->base);
852
853 mutex_init(&pcie->lock);
854 INIT_LIST_HEAD(&pcie->ports);
855 dev_set_drvdata(dev, pcie);
856
857 ret = apple_msi_init(pcie);
858 if (ret)
859 return ret;
860
861 return pci_host_common_init(pdev, &apple_pcie_cfg_ecam_ops);
862 }
863
864 static const struct of_device_id apple_pcie_of_match[] = {
865 { .compatible = "apple,t6020-pcie", .data = &t602x_hw },
866 { .compatible = "apple,pcie", .data = &t8103_hw },
867 { }
868 };
869 MODULE_DEVICE_TABLE(of, apple_pcie_of_match);
870
871 static struct platform_driver apple_pcie_driver = {
872 .probe = apple_pcie_probe,
873 .driver = {
874 .name = "pcie-apple",
875 .of_match_table = apple_pcie_of_match,
876 .suppress_bind_attrs = true,
877 },
878 };
879 module_platform_driver(apple_pcie_driver);
880
881 MODULE_DESCRIPTION("Apple PCIe host bridge driver");
882 MODULE_LICENSE("GPL v2");
883