xref: /linux/drivers/pci/controller/pcie-apple.c (revision 40840afa53bed05b990b201d749dfee3bd6e7e42)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * PCIe host bridge driver for Apple system-on-chips.
4  *
5  * The HW is ECAM compliant, so once the controller is initialized,
6  * the driver mostly deals MSI mapping and handling of per-port
7  * interrupts (INTx, management and error signals).
8  *
9  * Initialization requires enabling power and clocks, along with a
10  * number of register pokes.
11  *
12  * Copyright (C) 2021 Alyssa Rosenzweig <alyssa@rosenzweig.io>
13  * Copyright (C) 2021 Google LLC
14  * Copyright (C) 2021 Corellium LLC
15  * Copyright (C) 2021 Mark Kettenis <kettenis@openbsd.org>
16  *
17  * Author: Alyssa Rosenzweig <alyssa@rosenzweig.io>
18  * Author: Marc Zyngier <maz@kernel.org>
19  */
20 
21 #include <linux/gpio/consumer.h>
22 #include <linux/kernel.h>
23 #include <linux/iopoll.h>
24 #include <linux/irqchip/chained_irq.h>
25 #include <linux/irqchip/irq-msi-lib.h>
26 #include <linux/irqdomain.h>
27 #include <linux/list.h>
28 #include <linux/module.h>
29 #include <linux/msi.h>
30 #include <linux/of_irq.h>
31 #include <linux/pci-ecam.h>
32 
33 #define CORE_RC_PHYIF_CTL		0x00024
34 #define   CORE_RC_PHYIF_CTL_RUN		BIT(0)
35 #define CORE_RC_PHYIF_STAT		0x00028
36 #define   CORE_RC_PHYIF_STAT_REFCLK	BIT(4)
37 #define CORE_RC_CTL			0x00050
38 #define   CORE_RC_CTL_RUN		BIT(0)
39 #define CORE_RC_STAT			0x00058
40 #define   CORE_RC_STAT_READY		BIT(0)
41 #define CORE_FABRIC_STAT		0x04000
42 #define   CORE_FABRIC_STAT_MASK		0x001F001F
43 #define CORE_LANE_CFG(port)		(0x84000 + 0x4000 * (port))
44 #define   CORE_LANE_CFG_REFCLK0REQ	BIT(0)
45 #define   CORE_LANE_CFG_REFCLK1REQ	BIT(1)
46 #define   CORE_LANE_CFG_REFCLK0ACK	BIT(2)
47 #define   CORE_LANE_CFG_REFCLK1ACK	BIT(3)
48 #define   CORE_LANE_CFG_REFCLKEN	(BIT(9) | BIT(10))
49 #define CORE_LANE_CTL(port)		(0x84004 + 0x4000 * (port))
50 #define   CORE_LANE_CTL_CFGACC		BIT(15)
51 
52 #define PORT_LTSSMCTL			0x00080
53 #define   PORT_LTSSMCTL_START		BIT(0)
54 #define PORT_INTSTAT			0x00100
55 #define   PORT_INT_TUNNEL_ERR		31
56 #define   PORT_INT_CPL_TIMEOUT		23
57 #define   PORT_INT_RID2SID_MAPERR	22
58 #define   PORT_INT_CPL_ABORT		21
59 #define   PORT_INT_MSI_BAD_DATA		19
60 #define   PORT_INT_MSI_ERR		18
61 #define   PORT_INT_REQADDR_GT32		17
62 #define   PORT_INT_AF_TIMEOUT		15
63 #define   PORT_INT_LINK_DOWN		14
64 #define   PORT_INT_LINK_UP		12
65 #define   PORT_INT_LINK_BWMGMT		11
66 #define   PORT_INT_AER_MASK		(15 << 4)
67 #define   PORT_INT_PORT_ERR		4
68 #define   PORT_INT_INTx(i)		i
69 #define   PORT_INT_INTx_MASK		15
70 #define PORT_INTMSK			0x00104
71 #define PORT_INTMSKSET			0x00108
72 #define PORT_INTMSKCLR			0x0010c
73 #define PORT_MSICFG			0x00124
74 #define   PORT_MSICFG_EN		BIT(0)
75 #define   PORT_MSICFG_L2MSINUM_SHIFT	4
76 #define PORT_MSIBASE			0x00128
77 #define   PORT_MSIBASE_1_SHIFT		16
78 #define PORT_MSIADDR			0x00168
79 #define PORT_LINKSTS			0x00208
80 #define   PORT_LINKSTS_UP		BIT(0)
81 #define   PORT_LINKSTS_BUSY		BIT(2)
82 #define PORT_LINKCMDSTS			0x00210
83 #define PORT_OUTS_NPREQS		0x00284
84 #define   PORT_OUTS_NPREQS_REQ		BIT(24)
85 #define   PORT_OUTS_NPREQS_CPL		BIT(16)
86 #define PORT_RXWR_FIFO			0x00288
87 #define   PORT_RXWR_FIFO_HDR		GENMASK(15, 10)
88 #define   PORT_RXWR_FIFO_DATA		GENMASK(9, 0)
89 #define PORT_RXRD_FIFO			0x0028C
90 #define   PORT_RXRD_FIFO_REQ		GENMASK(6, 0)
91 #define PORT_OUTS_CPLS			0x00290
92 #define   PORT_OUTS_CPLS_SHRD		GENMASK(14, 8)
93 #define   PORT_OUTS_CPLS_WAIT		GENMASK(6, 0)
94 #define PORT_APPCLK			0x00800
95 #define   PORT_APPCLK_EN		BIT(0)
96 #define   PORT_APPCLK_CGDIS		BIT(8)
97 #define PORT_STATUS			0x00804
98 #define   PORT_STATUS_READY		BIT(0)
99 #define PORT_REFCLK			0x00810
100 #define   PORT_REFCLK_EN		BIT(0)
101 #define   PORT_REFCLK_CGDIS		BIT(8)
102 #define PORT_PERST			0x00814
103 #define   PORT_PERST_OFF		BIT(0)
104 #define PORT_RID2SID(i16)		(0x00828 + 4 * (i16))
105 #define   PORT_RID2SID_VALID		BIT(31)
106 #define   PORT_RID2SID_SID_SHIFT	16
107 #define   PORT_RID2SID_BUS_SHIFT	8
108 #define   PORT_RID2SID_DEV_SHIFT	3
109 #define   PORT_RID2SID_FUNC_SHIFT	0
110 #define PORT_OUTS_PREQS_HDR		0x00980
111 #define   PORT_OUTS_PREQS_HDR_MASK	GENMASK(9, 0)
112 #define PORT_OUTS_PREQS_DATA		0x00984
113 #define   PORT_OUTS_PREQS_DATA_MASK	GENMASK(15, 0)
114 #define PORT_TUNCTRL			0x00988
115 #define   PORT_TUNCTRL_PERST_ON		BIT(0)
116 #define   PORT_TUNCTRL_PERST_ACK_REQ	BIT(1)
117 #define PORT_TUNSTAT			0x0098c
118 #define   PORT_TUNSTAT_PERST_ON		BIT(0)
119 #define   PORT_TUNSTAT_PERST_ACK_PEND	BIT(1)
120 #define PORT_PREFMEM_ENABLE		0x00994
121 
122 #define MAX_RID2SID			64
123 
124 /*
125  * The doorbell address is set to 0xfffff000, which by convention
126  * matches what MacOS does, and it is possible to use any other
127  * address (in the bottom 4GB, as the base register is only 32bit).
128  * However, it has to be excluded from the IOVA range, and the DART
129  * driver has to know about it.
130  */
131 #define DOORBELL_ADDR		CONFIG_PCIE_APPLE_MSI_DOORBELL_ADDR
132 
133 struct apple_pcie {
134 	struct mutex		lock;
135 	struct device		*dev;
136 	void __iomem            *base;
137 	unsigned long		*bitmap;
138 	struct list_head	ports;
139 	struct completion	event;
140 	struct irq_fwspec	fwspec;
141 	u32			nvecs;
142 };
143 
144 struct apple_pcie_port {
145 	struct apple_pcie	*pcie;
146 	struct device_node	*np;
147 	void __iomem		*base;
148 	struct irq_domain	*domain;
149 	struct list_head	entry;
150 	DECLARE_BITMAP(sid_map, MAX_RID2SID);
151 	int			sid_map_sz;
152 	int			idx;
153 };
154 
155 static void rmw_set(u32 set, void __iomem *addr)
156 {
157 	writel_relaxed(readl_relaxed(addr) | set, addr);
158 }
159 
160 static void rmw_clear(u32 clr, void __iomem *addr)
161 {
162 	writel_relaxed(readl_relaxed(addr) & ~clr, addr);
163 }
164 
165 static void apple_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
166 {
167 	msg->address_hi = upper_32_bits(DOORBELL_ADDR);
168 	msg->address_lo = lower_32_bits(DOORBELL_ADDR);
169 	msg->data = data->hwirq;
170 }
171 
172 static struct irq_chip apple_msi_bottom_chip = {
173 	.name			= "MSI",
174 	.irq_mask		= irq_chip_mask_parent,
175 	.irq_unmask		= irq_chip_unmask_parent,
176 	.irq_eoi		= irq_chip_eoi_parent,
177 	.irq_set_affinity	= irq_chip_set_affinity_parent,
178 	.irq_set_type		= irq_chip_set_type_parent,
179 	.irq_compose_msi_msg	= apple_msi_compose_msg,
180 };
181 
182 static int apple_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
183 				  unsigned int nr_irqs, void *args)
184 {
185 	struct apple_pcie *pcie = domain->host_data;
186 	struct irq_fwspec fwspec = pcie->fwspec;
187 	unsigned int i;
188 	int ret, hwirq;
189 
190 	mutex_lock(&pcie->lock);
191 
192 	hwirq = bitmap_find_free_region(pcie->bitmap, pcie->nvecs,
193 					order_base_2(nr_irqs));
194 
195 	mutex_unlock(&pcie->lock);
196 
197 	if (hwirq < 0)
198 		return -ENOSPC;
199 
200 	fwspec.param[fwspec.param_count - 2] += hwirq;
201 
202 	ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &fwspec);
203 	if (ret)
204 		return ret;
205 
206 	for (i = 0; i < nr_irqs; i++) {
207 		irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
208 					      &apple_msi_bottom_chip, pcie);
209 	}
210 
211 	return 0;
212 }
213 
214 static void apple_msi_domain_free(struct irq_domain *domain, unsigned int virq,
215 				  unsigned int nr_irqs)
216 {
217 	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
218 	struct apple_pcie *pcie = domain->host_data;
219 
220 	mutex_lock(&pcie->lock);
221 
222 	bitmap_release_region(pcie->bitmap, d->hwirq, order_base_2(nr_irqs));
223 
224 	mutex_unlock(&pcie->lock);
225 }
226 
227 static const struct irq_domain_ops apple_msi_domain_ops = {
228 	.alloc	= apple_msi_domain_alloc,
229 	.free	= apple_msi_domain_free,
230 };
231 
232 static void apple_port_irq_mask(struct irq_data *data)
233 {
234 	struct apple_pcie_port *port = irq_data_get_irq_chip_data(data);
235 
236 	writel_relaxed(BIT(data->hwirq), port->base + PORT_INTMSKSET);
237 }
238 
239 static void apple_port_irq_unmask(struct irq_data *data)
240 {
241 	struct apple_pcie_port *port = irq_data_get_irq_chip_data(data);
242 
243 	writel_relaxed(BIT(data->hwirq), port->base + PORT_INTMSKCLR);
244 }
245 
246 static bool hwirq_is_intx(unsigned int hwirq)
247 {
248 	return BIT(hwirq) & PORT_INT_INTx_MASK;
249 }
250 
251 static void apple_port_irq_ack(struct irq_data *data)
252 {
253 	struct apple_pcie_port *port = irq_data_get_irq_chip_data(data);
254 
255 	if (!hwirq_is_intx(data->hwirq))
256 		writel_relaxed(BIT(data->hwirq), port->base + PORT_INTSTAT);
257 }
258 
259 static int apple_port_irq_set_type(struct irq_data *data, unsigned int type)
260 {
261 	/*
262 	 * It doesn't seem that there is any way to configure the
263 	 * trigger, so assume INTx have to be level (as per the spec),
264 	 * and the rest is edge (which looks likely).
265 	 */
266 	if (hwirq_is_intx(data->hwirq) ^ !!(type & IRQ_TYPE_LEVEL_MASK))
267 		return -EINVAL;
268 
269 	irqd_set_trigger_type(data, type);
270 	return 0;
271 }
272 
273 static struct irq_chip apple_port_irqchip = {
274 	.name		= "PCIe",
275 	.irq_ack	= apple_port_irq_ack,
276 	.irq_mask	= apple_port_irq_mask,
277 	.irq_unmask	= apple_port_irq_unmask,
278 	.irq_set_type	= apple_port_irq_set_type,
279 };
280 
281 static int apple_port_irq_domain_alloc(struct irq_domain *domain,
282 				       unsigned int virq, unsigned int nr_irqs,
283 				       void *args)
284 {
285 	struct apple_pcie_port *port = domain->host_data;
286 	struct irq_fwspec *fwspec = args;
287 	int i;
288 
289 	for (i = 0; i < nr_irqs; i++) {
290 		irq_flow_handler_t flow = handle_edge_irq;
291 		unsigned int type = IRQ_TYPE_EDGE_RISING;
292 
293 		if (hwirq_is_intx(fwspec->param[0] + i)) {
294 			flow = handle_level_irq;
295 			type = IRQ_TYPE_LEVEL_HIGH;
296 		}
297 
298 		irq_domain_set_info(domain, virq + i, fwspec->param[0] + i,
299 				    &apple_port_irqchip, port, flow,
300 				    NULL, NULL);
301 
302 		irq_set_irq_type(virq + i, type);
303 	}
304 
305 	return 0;
306 }
307 
308 static void apple_port_irq_domain_free(struct irq_domain *domain,
309 				       unsigned int virq, unsigned int nr_irqs)
310 {
311 	int i;
312 
313 	for (i = 0; i < nr_irqs; i++) {
314 		struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
315 
316 		irq_set_handler(virq + i, NULL);
317 		irq_domain_reset_irq_data(d);
318 	}
319 }
320 
321 static const struct irq_domain_ops apple_port_irq_domain_ops = {
322 	.translate	= irq_domain_translate_onecell,
323 	.alloc		= apple_port_irq_domain_alloc,
324 	.free		= apple_port_irq_domain_free,
325 };
326 
327 static void apple_port_irq_handler(struct irq_desc *desc)
328 {
329 	struct apple_pcie_port *port = irq_desc_get_handler_data(desc);
330 	struct irq_chip *chip = irq_desc_get_chip(desc);
331 	unsigned long stat;
332 	int i;
333 
334 	chained_irq_enter(chip, desc);
335 
336 	stat = readl_relaxed(port->base + PORT_INTSTAT);
337 
338 	for_each_set_bit(i, &stat, 32)
339 		generic_handle_domain_irq(port->domain, i);
340 
341 	chained_irq_exit(chip, desc);
342 }
343 
344 static int apple_pcie_port_setup_irq(struct apple_pcie_port *port)
345 {
346 	struct fwnode_handle *fwnode = &port->np->fwnode;
347 	unsigned int irq;
348 
349 	/* FIXME: consider moving each interrupt under each port */
350 	irq = irq_of_parse_and_map(to_of_node(dev_fwnode(port->pcie->dev)),
351 				   port->idx);
352 	if (!irq)
353 		return -ENXIO;
354 
355 	port->domain = irq_domain_create_linear(fwnode, 32,
356 						&apple_port_irq_domain_ops,
357 						port);
358 	if (!port->domain)
359 		return -ENOMEM;
360 
361 	/* Disable all interrupts */
362 	writel_relaxed(~0, port->base + PORT_INTMSKSET);
363 	writel_relaxed(~0, port->base + PORT_INTSTAT);
364 
365 	irq_set_chained_handler_and_data(irq, apple_port_irq_handler, port);
366 
367 	/* Configure MSI base address */
368 	BUILD_BUG_ON(upper_32_bits(DOORBELL_ADDR));
369 	writel_relaxed(lower_32_bits(DOORBELL_ADDR), port->base + PORT_MSIADDR);
370 
371 	/* Enable MSIs, shared between all ports */
372 	writel_relaxed(0, port->base + PORT_MSIBASE);
373 	writel_relaxed((ilog2(port->pcie->nvecs) << PORT_MSICFG_L2MSINUM_SHIFT) |
374 		       PORT_MSICFG_EN, port->base + PORT_MSICFG);
375 
376 	return 0;
377 }
378 
379 static irqreturn_t apple_pcie_port_irq(int irq, void *data)
380 {
381 	struct apple_pcie_port *port = data;
382 	unsigned int hwirq = irq_domain_get_irq_data(port->domain, irq)->hwirq;
383 
384 	switch (hwirq) {
385 	case PORT_INT_LINK_UP:
386 		dev_info_ratelimited(port->pcie->dev, "Link up on %pOF\n",
387 				     port->np);
388 		complete_all(&port->pcie->event);
389 		break;
390 	case PORT_INT_LINK_DOWN:
391 		dev_info_ratelimited(port->pcie->dev, "Link down on %pOF\n",
392 				     port->np);
393 		break;
394 	default:
395 		return IRQ_NONE;
396 	}
397 
398 	return IRQ_HANDLED;
399 }
400 
401 static int apple_pcie_port_register_irqs(struct apple_pcie_port *port)
402 {
403 	static struct {
404 		unsigned int	hwirq;
405 		const char	*name;
406 	} port_irqs[] = {
407 		{ PORT_INT_LINK_UP,	"Link up",	},
408 		{ PORT_INT_LINK_DOWN,	"Link down",	},
409 	};
410 	int i;
411 
412 	for (i = 0; i < ARRAY_SIZE(port_irqs); i++) {
413 		struct irq_fwspec fwspec = {
414 			.fwnode		= &port->np->fwnode,
415 			.param_count	= 1,
416 			.param		= {
417 				[0]	= port_irqs[i].hwirq,
418 			},
419 		};
420 		unsigned int irq;
421 		int ret;
422 
423 		irq = irq_domain_alloc_irqs(port->domain, 1, NUMA_NO_NODE,
424 					    &fwspec);
425 		if (WARN_ON(!irq))
426 			continue;
427 
428 		ret = request_irq(irq, apple_pcie_port_irq, 0,
429 				  port_irqs[i].name, port);
430 		WARN_ON(ret);
431 	}
432 
433 	return 0;
434 }
435 
436 static int apple_pcie_setup_refclk(struct apple_pcie *pcie,
437 				   struct apple_pcie_port *port)
438 {
439 	u32 stat;
440 	int res;
441 
442 	res = readl_relaxed_poll_timeout(pcie->base + CORE_RC_PHYIF_STAT, stat,
443 					 stat & CORE_RC_PHYIF_STAT_REFCLK,
444 					 100, 50000);
445 	if (res < 0)
446 		return res;
447 
448 	rmw_set(CORE_LANE_CTL_CFGACC, pcie->base + CORE_LANE_CTL(port->idx));
449 	rmw_set(CORE_LANE_CFG_REFCLK0REQ, pcie->base + CORE_LANE_CFG(port->idx));
450 
451 	res = readl_relaxed_poll_timeout(pcie->base + CORE_LANE_CFG(port->idx),
452 					 stat, stat & CORE_LANE_CFG_REFCLK0ACK,
453 					 100, 50000);
454 	if (res < 0)
455 		return res;
456 
457 	rmw_set(CORE_LANE_CFG_REFCLK1REQ, pcie->base + CORE_LANE_CFG(port->idx));
458 	res = readl_relaxed_poll_timeout(pcie->base + CORE_LANE_CFG(port->idx),
459 					 stat, stat & CORE_LANE_CFG_REFCLK1ACK,
460 					 100, 50000);
461 
462 	if (res < 0)
463 		return res;
464 
465 	rmw_clear(CORE_LANE_CTL_CFGACC, pcie->base + CORE_LANE_CTL(port->idx));
466 
467 	rmw_set(CORE_LANE_CFG_REFCLKEN, pcie->base + CORE_LANE_CFG(port->idx));
468 	rmw_set(PORT_REFCLK_EN, port->base + PORT_REFCLK);
469 
470 	return 0;
471 }
472 
473 static u32 apple_pcie_rid2sid_write(struct apple_pcie_port *port,
474 				    int idx, u32 val)
475 {
476 	writel_relaxed(val, port->base + PORT_RID2SID(idx));
477 	/* Read back to ensure completion of the write */
478 	return readl_relaxed(port->base + PORT_RID2SID(idx));
479 }
480 
481 static int apple_pcie_setup_port(struct apple_pcie *pcie,
482 				 struct device_node *np)
483 {
484 	struct platform_device *platform = to_platform_device(pcie->dev);
485 	struct apple_pcie_port *port;
486 	struct gpio_desc *reset;
487 	u32 stat, idx;
488 	int ret, i;
489 
490 	reset = devm_fwnode_gpiod_get(pcie->dev, of_fwnode_handle(np), "reset",
491 				      GPIOD_OUT_LOW, "PERST#");
492 	if (IS_ERR(reset))
493 		return PTR_ERR(reset);
494 
495 	port = devm_kzalloc(pcie->dev, sizeof(*port), GFP_KERNEL);
496 	if (!port)
497 		return -ENOMEM;
498 
499 	ret = of_property_read_u32_index(np, "reg", 0, &idx);
500 	if (ret)
501 		return ret;
502 
503 	/* Use the first reg entry to work out the port index */
504 	port->idx = idx >> 11;
505 	port->pcie = pcie;
506 	port->np = np;
507 
508 	port->base = devm_platform_ioremap_resource(platform, port->idx + 2);
509 	if (IS_ERR(port->base))
510 		return PTR_ERR(port->base);
511 
512 	rmw_set(PORT_APPCLK_EN, port->base + PORT_APPCLK);
513 
514 	/* Assert PERST# before setting up the clock */
515 	gpiod_set_value(reset, 1);
516 
517 	ret = apple_pcie_setup_refclk(pcie, port);
518 	if (ret < 0)
519 		return ret;
520 
521 	/* The minimal Tperst-clk value is 100us (PCIe CEM r5.0, 2.9.2) */
522 	usleep_range(100, 200);
523 
524 	/* Deassert PERST# */
525 	rmw_set(PORT_PERST_OFF, port->base + PORT_PERST);
526 	gpiod_set_value(reset, 0);
527 
528 	/* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */
529 	msleep(100);
530 
531 	ret = readl_relaxed_poll_timeout(port->base + PORT_STATUS, stat,
532 					 stat & PORT_STATUS_READY, 100, 250000);
533 	if (ret < 0) {
534 		dev_err(pcie->dev, "port %pOF ready wait timeout\n", np);
535 		return ret;
536 	}
537 
538 	rmw_clear(PORT_REFCLK_CGDIS, port->base + PORT_REFCLK);
539 	rmw_clear(PORT_APPCLK_CGDIS, port->base + PORT_APPCLK);
540 
541 	ret = apple_pcie_port_setup_irq(port);
542 	if (ret)
543 		return ret;
544 
545 	/* Reset all RID/SID mappings, and check for RAZ/WI registers */
546 	for (i = 0; i < MAX_RID2SID; i++) {
547 		if (apple_pcie_rid2sid_write(port, i, 0xbad1d) != 0xbad1d)
548 			break;
549 		apple_pcie_rid2sid_write(port, i, 0);
550 	}
551 
552 	dev_dbg(pcie->dev, "%pOF: %d RID/SID mapping entries\n", np, i);
553 
554 	port->sid_map_sz = i;
555 
556 	list_add_tail(&port->entry, &pcie->ports);
557 	init_completion(&pcie->event);
558 
559 	ret = apple_pcie_port_register_irqs(port);
560 	WARN_ON(ret);
561 
562 	writel_relaxed(PORT_LTSSMCTL_START, port->base + PORT_LTSSMCTL);
563 
564 	if (!wait_for_completion_timeout(&pcie->event, HZ / 10))
565 		dev_warn(pcie->dev, "%pOF link didn't come up\n", np);
566 
567 	return 0;
568 }
569 
570 static const struct msi_parent_ops apple_msi_parent_ops = {
571 	.supported_flags	= (MSI_GENERIC_FLAGS_MASK	|
572 				   MSI_FLAG_PCI_MSIX		|
573 				   MSI_FLAG_MULTI_PCI_MSI),
574 	.required_flags		= (MSI_FLAG_USE_DEF_DOM_OPS	|
575 				   MSI_FLAG_USE_DEF_CHIP_OPS	|
576 				   MSI_FLAG_PCI_MSI_MASK_PARENT),
577 	.chip_flags		= MSI_CHIP_FLAG_SET_EOI,
578 	.bus_select_token	= DOMAIN_BUS_PCI_MSI,
579 	.init_dev_msi_info	= msi_lib_init_dev_msi_info,
580 };
581 
582 static int apple_msi_init(struct apple_pcie *pcie)
583 {
584 	struct fwnode_handle *fwnode = dev_fwnode(pcie->dev);
585 	struct irq_domain_info info = {
586 		.fwnode		= fwnode,
587 		.ops		= &apple_msi_domain_ops,
588 		.size		= pcie->nvecs,
589 		.host_data	= pcie,
590 	};
591 	struct of_phandle_args args = {};
592 	int ret;
593 
594 	ret = of_parse_phandle_with_args(to_of_node(fwnode), "msi-ranges",
595 					 "#interrupt-cells", 0, &args);
596 	if (ret)
597 		return ret;
598 
599 	ret = of_property_read_u32_index(to_of_node(fwnode), "msi-ranges",
600 					 args.args_count + 1, &pcie->nvecs);
601 	if (ret)
602 		return ret;
603 
604 	of_phandle_args_to_fwspec(args.np, args.args, args.args_count,
605 				  &pcie->fwspec);
606 
607 	pcie->bitmap = devm_bitmap_zalloc(pcie->dev, pcie->nvecs, GFP_KERNEL);
608 	if (!pcie->bitmap)
609 		return -ENOMEM;
610 
611 	info.parent = irq_find_matching_fwspec(&pcie->fwspec, DOMAIN_BUS_WIRED);
612 	if (!info.parent) {
613 		dev_err(pcie->dev, "failed to find parent domain\n");
614 		return -ENXIO;
615 	}
616 
617 	if (!msi_create_parent_irq_domain(&info, &apple_msi_parent_ops)) {
618 		dev_err(pcie->dev, "failed to create IRQ domain\n");
619 		return -ENOMEM;
620 	}
621 	return 0;
622 }
623 
624 static struct apple_pcie_port *apple_pcie_get_port(struct pci_dev *pdev)
625 {
626 	struct pci_config_window *cfg = pdev->sysdata;
627 	struct apple_pcie *pcie = cfg->priv;
628 	struct pci_dev *port_pdev;
629 	struct apple_pcie_port *port;
630 
631 	/* Find the root port this device is on */
632 	port_pdev = pcie_find_root_port(pdev);
633 
634 	/* If finding the port itself, nothing to do */
635 	if (WARN_ON(!port_pdev) || pdev == port_pdev)
636 		return NULL;
637 
638 	list_for_each_entry(port, &pcie->ports, entry) {
639 		if (port->idx == PCI_SLOT(port_pdev->devfn))
640 			return port;
641 	}
642 
643 	return NULL;
644 }
645 
646 static int apple_pcie_enable_device(struct pci_host_bridge *bridge, struct pci_dev *pdev)
647 {
648 	u32 sid, rid = pci_dev_id(pdev);
649 	struct apple_pcie_port *port;
650 	int idx, err;
651 
652 	port = apple_pcie_get_port(pdev);
653 	if (!port)
654 		return 0;
655 
656 	dev_dbg(&pdev->dev, "added to bus %s, index %d\n",
657 		pci_name(pdev->bus->self), port->idx);
658 
659 	err = of_map_id(port->pcie->dev->of_node, rid, "iommu-map",
660 			"iommu-map-mask", NULL, &sid);
661 	if (err)
662 		return err;
663 
664 	mutex_lock(&port->pcie->lock);
665 
666 	idx = bitmap_find_free_region(port->sid_map, port->sid_map_sz, 0);
667 	if (idx >= 0) {
668 		apple_pcie_rid2sid_write(port, idx,
669 					 PORT_RID2SID_VALID |
670 					 (sid << PORT_RID2SID_SID_SHIFT) | rid);
671 
672 		dev_dbg(&pdev->dev, "mapping RID%x to SID%x (index %d)\n",
673 			rid, sid, idx);
674 	}
675 
676 	mutex_unlock(&port->pcie->lock);
677 
678 	return idx >= 0 ? 0 : -ENOSPC;
679 }
680 
681 static void apple_pcie_disable_device(struct pci_host_bridge *bridge, struct pci_dev *pdev)
682 {
683 	struct apple_pcie_port *port;
684 	u32 rid = pci_dev_id(pdev);
685 	int idx;
686 
687 	port = apple_pcie_get_port(pdev);
688 	if (!port)
689 		return;
690 
691 	mutex_lock(&port->pcie->lock);
692 
693 	for_each_set_bit(idx, port->sid_map, port->sid_map_sz) {
694 		u32 val;
695 
696 		val = readl_relaxed(port->base + PORT_RID2SID(idx));
697 		if ((val & 0xffff) == rid) {
698 			apple_pcie_rid2sid_write(port, idx, 0);
699 			bitmap_release_region(port->sid_map, idx, 0);
700 			dev_dbg(&pdev->dev, "Released %x (%d)\n", val, idx);
701 			break;
702 		}
703 	}
704 
705 	mutex_unlock(&port->pcie->lock);
706 }
707 
708 static int apple_pcie_init(struct pci_config_window *cfg)
709 {
710 	struct device *dev = cfg->parent;
711 	struct platform_device *platform = to_platform_device(dev);
712 	struct apple_pcie *pcie;
713 	int ret;
714 
715 	pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
716 	if (!pcie)
717 		return -ENOMEM;
718 
719 	pcie->dev = dev;
720 
721 	mutex_init(&pcie->lock);
722 
723 	pcie->base = devm_platform_ioremap_resource(platform, 1);
724 	if (IS_ERR(pcie->base))
725 		return PTR_ERR(pcie->base);
726 
727 	cfg->priv = pcie;
728 	INIT_LIST_HEAD(&pcie->ports);
729 
730 	ret = apple_msi_init(pcie);
731 	if (ret)
732 		return ret;
733 
734 	for_each_child_of_node_scoped(dev->of_node, of_port) {
735 		ret = apple_pcie_setup_port(pcie, of_port);
736 		if (ret) {
737 			dev_err(pcie->dev, "Port %pOF setup fail: %d\n", of_port, ret);
738 			return ret;
739 		}
740 	}
741 
742 	return 0;
743 }
744 
745 static const struct pci_ecam_ops apple_pcie_cfg_ecam_ops = {
746 	.init		= apple_pcie_init,
747 	.enable_device	= apple_pcie_enable_device,
748 	.disable_device	= apple_pcie_disable_device,
749 	.pci_ops	= {
750 		.map_bus	= pci_ecam_map_bus,
751 		.read		= pci_generic_config_read,
752 		.write		= pci_generic_config_write,
753 	}
754 };
755 
756 static const struct of_device_id apple_pcie_of_match[] = {
757 	{ .compatible = "apple,pcie", .data = &apple_pcie_cfg_ecam_ops },
758 	{ }
759 };
760 MODULE_DEVICE_TABLE(of, apple_pcie_of_match);
761 
762 static struct platform_driver apple_pcie_driver = {
763 	.probe	= pci_host_common_probe,
764 	.driver	= {
765 		.name			= "pcie-apple",
766 		.of_match_table		= apple_pcie_of_match,
767 		.suppress_bind_attrs	= true,
768 	},
769 };
770 module_platform_driver(apple_pcie_driver);
771 
772 MODULE_DESCRIPTION("Apple PCIe host bridge driver");
773 MODULE_LICENSE("GPL v2");
774