xref: /linux/drivers/pci/controller/pcie-apple.c (revision 379f604cc3dc2c865dc2b13d81faa166b6df59ec)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * PCIe host bridge driver for Apple system-on-chips.
4  *
5  * The HW is ECAM compliant, so once the controller is initialized,
6  * the driver mostly deals MSI mapping and handling of per-port
7  * interrupts (INTx, management and error signals).
8  *
9  * Initialization requires enabling power and clocks, along with a
10  * number of register pokes.
11  *
12  * Copyright (C) 2021 Alyssa Rosenzweig <alyssa@rosenzweig.io>
13  * Copyright (C) 2021 Google LLC
14  * Copyright (C) 2021 Corellium LLC
15  * Copyright (C) 2021 Mark Kettenis <kettenis@openbsd.org>
16  *
17  * Author: Alyssa Rosenzweig <alyssa@rosenzweig.io>
18  * Author: Marc Zyngier <maz@kernel.org>
19  */
20 
21 #include <linux/bitfield.h>
22 #include <linux/gpio/consumer.h>
23 #include <linux/kernel.h>
24 #include <linux/iopoll.h>
25 #include <linux/irqchip/chained_irq.h>
26 #include <linux/irqchip/irq-msi-lib.h>
27 #include <linux/irqdomain.h>
28 #include <linux/list.h>
29 #include <linux/module.h>
30 #include <linux/msi.h>
31 #include <linux/of_irq.h>
32 #include <linux/pci-ecam.h>
33 
34 #include "pci-host-common.h"
35 
36 /* T8103 (original M1) and related SoCs */
37 #define CORE_RC_PHYIF_CTL		0x00024
38 #define   CORE_RC_PHYIF_CTL_RUN		BIT(0)
39 #define CORE_RC_PHYIF_STAT		0x00028
40 #define   CORE_RC_PHYIF_STAT_REFCLK	BIT(4)
41 #define CORE_RC_CTL			0x00050
42 #define   CORE_RC_CTL_RUN		BIT(0)
43 #define CORE_RC_STAT			0x00058
44 #define   CORE_RC_STAT_READY		BIT(0)
45 #define CORE_FABRIC_STAT		0x04000
46 #define   CORE_FABRIC_STAT_MASK		0x001F001F
47 
48 #define CORE_PHY_DEFAULT_BASE(port)	(0x84000 + 0x4000 * (port))
49 
50 #define PHY_LANE_CFG			0x00000
51 #define   PHY_LANE_CFG_REFCLK0REQ	BIT(0)
52 #define   PHY_LANE_CFG_REFCLK1REQ	BIT(1)
53 #define   PHY_LANE_CFG_REFCLK0ACK	BIT(2)
54 #define   PHY_LANE_CFG_REFCLK1ACK	BIT(3)
55 #define   PHY_LANE_CFG_REFCLKEN		(BIT(9) | BIT(10))
56 #define   PHY_LANE_CFG_REFCLKCGEN	(BIT(30) | BIT(31))
57 #define PHY_LANE_CTL			0x00004
58 #define   PHY_LANE_CTL_CFGACC		BIT(15)
59 
60 #define PORT_LTSSMCTL			0x00080
61 #define   PORT_LTSSMCTL_START		BIT(0)
62 #define PORT_INTSTAT			0x00100
63 #define   PORT_INT_TUNNEL_ERR		31
64 #define   PORT_INT_CPL_TIMEOUT		23
65 #define   PORT_INT_RID2SID_MAPERR	22
66 #define   PORT_INT_CPL_ABORT		21
67 #define   PORT_INT_MSI_BAD_DATA		19
68 #define   PORT_INT_MSI_ERR		18
69 #define   PORT_INT_REQADDR_GT32		17
70 #define   PORT_INT_AF_TIMEOUT		15
71 #define   PORT_INT_LINK_DOWN		14
72 #define   PORT_INT_LINK_UP		12
73 #define   PORT_INT_LINK_BWMGMT		11
74 #define   PORT_INT_AER_MASK		(15 << 4)
75 #define   PORT_INT_PORT_ERR		4
76 #define   PORT_INT_INTx(i)		i
77 #define   PORT_INT_INTx_MASK		15
78 #define PORT_INTMSK			0x00104
79 #define PORT_INTMSKSET			0x00108
80 #define PORT_INTMSKCLR			0x0010c
81 #define PORT_MSICFG			0x00124
82 #define   PORT_MSICFG_EN		BIT(0)
83 #define   PORT_MSICFG_L2MSINUM_SHIFT	4
84 #define PORT_MSIBASE			0x00128
85 #define   PORT_MSIBASE_1_SHIFT		16
86 #define PORT_MSIADDR			0x00168
87 #define PORT_LINKSTS			0x00208
88 #define   PORT_LINKSTS_UP		BIT(0)
89 #define   PORT_LINKSTS_BUSY		BIT(2)
90 #define PORT_LINKCMDSTS			0x00210
91 #define PORT_OUTS_NPREQS		0x00284
92 #define   PORT_OUTS_NPREQS_REQ		BIT(24)
93 #define   PORT_OUTS_NPREQS_CPL		BIT(16)
94 #define PORT_RXWR_FIFO			0x00288
95 #define   PORT_RXWR_FIFO_HDR		GENMASK(15, 10)
96 #define   PORT_RXWR_FIFO_DATA		GENMASK(9, 0)
97 #define PORT_RXRD_FIFO			0x0028C
98 #define   PORT_RXRD_FIFO_REQ		GENMASK(6, 0)
99 #define PORT_OUTS_CPLS			0x00290
100 #define   PORT_OUTS_CPLS_SHRD		GENMASK(14, 8)
101 #define   PORT_OUTS_CPLS_WAIT		GENMASK(6, 0)
102 #define PORT_APPCLK			0x00800
103 #define   PORT_APPCLK_EN		BIT(0)
104 #define   PORT_APPCLK_CGDIS		BIT(8)
105 #define PORT_STATUS			0x00804
106 #define   PORT_STATUS_READY		BIT(0)
107 #define PORT_REFCLK			0x00810
108 #define   PORT_REFCLK_EN		BIT(0)
109 #define   PORT_REFCLK_CGDIS		BIT(8)
110 #define PORT_PERST			0x00814
111 #define   PORT_PERST_OFF		BIT(0)
112 #define PORT_RID2SID			0x00828
113 #define   PORT_RID2SID_VALID		BIT(31)
114 #define   PORT_RID2SID_SID_SHIFT	16
115 #define   PORT_RID2SID_BUS_SHIFT	8
116 #define   PORT_RID2SID_DEV_SHIFT	3
117 #define   PORT_RID2SID_FUNC_SHIFT	0
118 #define PORT_OUTS_PREQS_HDR		0x00980
119 #define   PORT_OUTS_PREQS_HDR_MASK	GENMASK(9, 0)
120 #define PORT_OUTS_PREQS_DATA		0x00984
121 #define   PORT_OUTS_PREQS_DATA_MASK	GENMASK(15, 0)
122 #define PORT_TUNCTRL			0x00988
123 #define   PORT_TUNCTRL_PERST_ON		BIT(0)
124 #define   PORT_TUNCTRL_PERST_ACK_REQ	BIT(1)
125 #define PORT_TUNSTAT			0x0098c
126 #define   PORT_TUNSTAT_PERST_ON		BIT(0)
127 #define   PORT_TUNSTAT_PERST_ACK_PEND	BIT(1)
128 #define PORT_PREFMEM_ENABLE		0x00994
129 
130 /* T602x (M2-pro and co) */
131 #define PORT_T602X_MSIADDR	0x016c
132 #define PORT_T602X_MSIADDR_HI	0x0170
133 #define PORT_T602X_PERST	0x082c
134 #define PORT_T602X_RID2SID	0x3000
135 #define PORT_T602X_MSIMAP	0x3800
136 
137 #define PORT_MSIMAP_ENABLE	BIT(31)
138 #define PORT_MSIMAP_TARGET	GENMASK(7, 0)
139 
140 /*
141  * The doorbell address is set to 0xfffff000, which by convention
142  * matches what MacOS does, and it is possible to use any other
143  * address (in the bottom 4GB, as the base register is only 32bit).
144  * However, it has to be excluded from the IOVA range, and the DART
145  * driver has to know about it.
146  */
147 #define DOORBELL_ADDR		CONFIG_PCIE_APPLE_MSI_DOORBELL_ADDR
148 
149 struct hw_info {
150 	u32 phy_lane_ctl;
151 	u32 port_msiaddr;
152 	u32 port_msiaddr_hi;
153 	u32 port_refclk;
154 	u32 port_perst;
155 	u32 port_rid2sid;
156 	u32 port_msimap;
157 	u32 max_rid2sid;
158 };
159 
160 static const struct hw_info t8103_hw = {
161 	.phy_lane_ctl		= PHY_LANE_CTL,
162 	.port_msiaddr		= PORT_MSIADDR,
163 	.port_msiaddr_hi	= 0,
164 	.port_refclk		= PORT_REFCLK,
165 	.port_perst		= PORT_PERST,
166 	.port_rid2sid		= PORT_RID2SID,
167 	.port_msimap		= 0,
168 	.max_rid2sid		= 64,
169 };
170 
171 static const struct hw_info t602x_hw = {
172 	.phy_lane_ctl		= 0,
173 	.port_msiaddr		= PORT_T602X_MSIADDR,
174 	.port_msiaddr_hi	= PORT_T602X_MSIADDR_HI,
175 	.port_refclk		= 0,
176 	.port_perst		= PORT_T602X_PERST,
177 	.port_rid2sid		= PORT_T602X_RID2SID,
178 	.port_msimap		= PORT_T602X_MSIMAP,
179 	/* 16 on t602x, guess for autodetect on future HW */
180 	.max_rid2sid		= 512,
181 };
182 
183 struct apple_pcie {
184 	struct mutex		lock;
185 	struct device		*dev;
186 	void __iomem            *base;
187 	const struct hw_info	*hw;
188 	unsigned long		*bitmap;
189 	struct list_head	ports;
190 	struct list_head	entry;
191 	struct completion	event;
192 	struct irq_fwspec	fwspec;
193 	u32			nvecs;
194 };
195 
196 struct apple_pcie_port {
197 	raw_spinlock_t		lock;
198 	struct apple_pcie	*pcie;
199 	struct device_node	*np;
200 	void __iomem		*base;
201 	void __iomem		*phy;
202 	struct irq_domain	*domain;
203 	struct list_head	entry;
204 	unsigned long		*sid_map;
205 	int			sid_map_sz;
206 	int			idx;
207 };
208 
209 static LIST_HEAD(pcie_list);
210 static DEFINE_MUTEX(pcie_list_lock);
211 
rmw_set(u32 set,void __iomem * addr)212 static void rmw_set(u32 set, void __iomem *addr)
213 {
214 	writel_relaxed(readl_relaxed(addr) | set, addr);
215 }
216 
rmw_clear(u32 clr,void __iomem * addr)217 static void rmw_clear(u32 clr, void __iomem *addr)
218 {
219 	writel_relaxed(readl_relaxed(addr) & ~clr, addr);
220 }
221 
apple_msi_compose_msg(struct irq_data * data,struct msi_msg * msg)222 static void apple_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
223 {
224 	msg->address_hi = upper_32_bits(DOORBELL_ADDR);
225 	msg->address_lo = lower_32_bits(DOORBELL_ADDR);
226 	msg->data = data->hwirq;
227 }
228 
229 static struct irq_chip apple_msi_bottom_chip = {
230 	.name			= "MSI",
231 	.irq_mask		= irq_chip_mask_parent,
232 	.irq_unmask		= irq_chip_unmask_parent,
233 	.irq_eoi		= irq_chip_eoi_parent,
234 	.irq_set_affinity	= irq_chip_set_affinity_parent,
235 	.irq_set_type		= irq_chip_set_type_parent,
236 	.irq_compose_msi_msg	= apple_msi_compose_msg,
237 };
238 
apple_msi_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * args)239 static int apple_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
240 				  unsigned int nr_irqs, void *args)
241 {
242 	struct apple_pcie *pcie = domain->host_data;
243 	struct irq_fwspec fwspec = pcie->fwspec;
244 	unsigned int i;
245 	int ret, hwirq;
246 
247 	mutex_lock(&pcie->lock);
248 
249 	hwirq = bitmap_find_free_region(pcie->bitmap, pcie->nvecs,
250 					order_base_2(nr_irqs));
251 
252 	mutex_unlock(&pcie->lock);
253 
254 	if (hwirq < 0)
255 		return -ENOSPC;
256 
257 	fwspec.param[fwspec.param_count - 2] += hwirq;
258 
259 	ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &fwspec);
260 	if (ret)
261 		return ret;
262 
263 	for (i = 0; i < nr_irqs; i++) {
264 		irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
265 					      &apple_msi_bottom_chip, pcie);
266 	}
267 
268 	return 0;
269 }
270 
apple_msi_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)271 static void apple_msi_domain_free(struct irq_domain *domain, unsigned int virq,
272 				  unsigned int nr_irqs)
273 {
274 	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
275 	struct apple_pcie *pcie = domain->host_data;
276 
277 	mutex_lock(&pcie->lock);
278 
279 	bitmap_release_region(pcie->bitmap, d->hwirq, order_base_2(nr_irqs));
280 
281 	mutex_unlock(&pcie->lock);
282 }
283 
284 static const struct irq_domain_ops apple_msi_domain_ops = {
285 	.alloc	= apple_msi_domain_alloc,
286 	.free	= apple_msi_domain_free,
287 };
288 
apple_port_irq_mask(struct irq_data * data)289 static void apple_port_irq_mask(struct irq_data *data)
290 {
291 	struct apple_pcie_port *port = irq_data_get_irq_chip_data(data);
292 
293 	guard(raw_spinlock_irqsave)(&port->lock);
294 	rmw_set(BIT(data->hwirq), port->base + PORT_INTMSK);
295 }
296 
apple_port_irq_unmask(struct irq_data * data)297 static void apple_port_irq_unmask(struct irq_data *data)
298 {
299 	struct apple_pcie_port *port = irq_data_get_irq_chip_data(data);
300 
301 	guard(raw_spinlock_irqsave)(&port->lock);
302 	rmw_clear(BIT(data->hwirq), port->base + PORT_INTMSK);
303 }
304 
hwirq_is_intx(unsigned int hwirq)305 static bool hwirq_is_intx(unsigned int hwirq)
306 {
307 	return BIT(hwirq) & PORT_INT_INTx_MASK;
308 }
309 
apple_port_irq_ack(struct irq_data * data)310 static void apple_port_irq_ack(struct irq_data *data)
311 {
312 	struct apple_pcie_port *port = irq_data_get_irq_chip_data(data);
313 
314 	if (!hwirq_is_intx(data->hwirq))
315 		writel_relaxed(BIT(data->hwirq), port->base + PORT_INTSTAT);
316 }
317 
apple_port_irq_set_type(struct irq_data * data,unsigned int type)318 static int apple_port_irq_set_type(struct irq_data *data, unsigned int type)
319 {
320 	/*
321 	 * It doesn't seem that there is any way to configure the
322 	 * trigger, so assume INTx have to be level (as per the spec),
323 	 * and the rest is edge (which looks likely).
324 	 */
325 	if (hwirq_is_intx(data->hwirq) ^ !!(type & IRQ_TYPE_LEVEL_MASK))
326 		return -EINVAL;
327 
328 	irqd_set_trigger_type(data, type);
329 	return 0;
330 }
331 
332 static struct irq_chip apple_port_irqchip = {
333 	.name		= "PCIe",
334 	.irq_ack	= apple_port_irq_ack,
335 	.irq_mask	= apple_port_irq_mask,
336 	.irq_unmask	= apple_port_irq_unmask,
337 	.irq_set_type	= apple_port_irq_set_type,
338 };
339 
apple_port_irq_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * args)340 static int apple_port_irq_domain_alloc(struct irq_domain *domain,
341 				       unsigned int virq, unsigned int nr_irqs,
342 				       void *args)
343 {
344 	struct apple_pcie_port *port = domain->host_data;
345 	struct irq_fwspec *fwspec = args;
346 	int i;
347 
348 	for (i = 0; i < nr_irqs; i++) {
349 		irq_flow_handler_t flow = handle_edge_irq;
350 		unsigned int type = IRQ_TYPE_EDGE_RISING;
351 
352 		if (hwirq_is_intx(fwspec->param[0] + i)) {
353 			flow = handle_level_irq;
354 			type = IRQ_TYPE_LEVEL_HIGH;
355 		}
356 
357 		irq_domain_set_info(domain, virq + i, fwspec->param[0] + i,
358 				    &apple_port_irqchip, port, flow,
359 				    NULL, NULL);
360 
361 		irq_set_irq_type(virq + i, type);
362 	}
363 
364 	return 0;
365 }
366 
apple_port_irq_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)367 static void apple_port_irq_domain_free(struct irq_domain *domain,
368 				       unsigned int virq, unsigned int nr_irqs)
369 {
370 	int i;
371 
372 	for (i = 0; i < nr_irqs; i++) {
373 		struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
374 
375 		irq_set_handler(virq + i, NULL);
376 		irq_domain_reset_irq_data(d);
377 	}
378 }
379 
380 static const struct irq_domain_ops apple_port_irq_domain_ops = {
381 	.translate	= irq_domain_translate_onecell,
382 	.alloc		= apple_port_irq_domain_alloc,
383 	.free		= apple_port_irq_domain_free,
384 };
385 
apple_port_irq_handler(struct irq_desc * desc)386 static void apple_port_irq_handler(struct irq_desc *desc)
387 {
388 	struct apple_pcie_port *port = irq_desc_get_handler_data(desc);
389 	struct irq_chip *chip = irq_desc_get_chip(desc);
390 	unsigned long stat;
391 	int i;
392 
393 	chained_irq_enter(chip, desc);
394 
395 	stat = readl_relaxed(port->base + PORT_INTSTAT);
396 
397 	for_each_set_bit(i, &stat, 32)
398 		generic_handle_domain_irq(port->domain, i);
399 
400 	chained_irq_exit(chip, desc);
401 }
402 
apple_pcie_port_setup_irq(struct apple_pcie_port * port)403 static int apple_pcie_port_setup_irq(struct apple_pcie_port *port)
404 {
405 	struct fwnode_handle *fwnode = &port->np->fwnode;
406 	struct apple_pcie *pcie = port->pcie;
407 	unsigned int irq;
408 	u32 val = 0;
409 
410 	/* FIXME: consider moving each interrupt under each port */
411 	irq = irq_of_parse_and_map(to_of_node(dev_fwnode(port->pcie->dev)),
412 				   port->idx);
413 	if (!irq)
414 		return -ENXIO;
415 
416 	port->domain = irq_domain_create_linear(fwnode, 32,
417 						&apple_port_irq_domain_ops,
418 						port);
419 	if (!port->domain)
420 		return -ENOMEM;
421 
422 	/* Disable all interrupts */
423 	writel_relaxed(~0, port->base + PORT_INTMSK);
424 	writel_relaxed(~0, port->base + PORT_INTSTAT);
425 	writel_relaxed(~0, port->base + PORT_LINKCMDSTS);
426 
427 	irq_set_chained_handler_and_data(irq, apple_port_irq_handler, port);
428 
429 	/* Configure MSI base address */
430 	BUILD_BUG_ON(upper_32_bits(DOORBELL_ADDR));
431 	writel_relaxed(lower_32_bits(DOORBELL_ADDR),
432 		       port->base + pcie->hw->port_msiaddr);
433 	if (pcie->hw->port_msiaddr_hi)
434 		writel_relaxed(0, port->base + pcie->hw->port_msiaddr_hi);
435 
436 	/* Enable MSIs, shared between all ports */
437 	if (pcie->hw->port_msimap) {
438 		for (int i = 0; i < pcie->nvecs; i++)
439 			writel_relaxed(FIELD_PREP(PORT_MSIMAP_TARGET, i) |
440 				       PORT_MSIMAP_ENABLE,
441 				       port->base + pcie->hw->port_msimap + 4 * i);
442 	} else {
443 		writel_relaxed(0, port->base + PORT_MSIBASE);
444 		val = ilog2(pcie->nvecs) << PORT_MSICFG_L2MSINUM_SHIFT;
445 	}
446 
447 	writel_relaxed(val | PORT_MSICFG_EN, port->base + PORT_MSICFG);
448 	return 0;
449 }
450 
apple_pcie_port_irq(int irq,void * data)451 static irqreturn_t apple_pcie_port_irq(int irq, void *data)
452 {
453 	struct apple_pcie_port *port = data;
454 	unsigned int hwirq = irq_domain_get_irq_data(port->domain, irq)->hwirq;
455 
456 	switch (hwirq) {
457 	case PORT_INT_LINK_UP:
458 		dev_info_ratelimited(port->pcie->dev, "Link up on %pOF\n",
459 				     port->np);
460 		complete_all(&port->pcie->event);
461 		break;
462 	case PORT_INT_LINK_DOWN:
463 		dev_info_ratelimited(port->pcie->dev, "Link down on %pOF\n",
464 				     port->np);
465 		break;
466 	default:
467 		return IRQ_NONE;
468 	}
469 
470 	return IRQ_HANDLED;
471 }
472 
apple_pcie_port_register_irqs(struct apple_pcie_port * port)473 static int apple_pcie_port_register_irqs(struct apple_pcie_port *port)
474 {
475 	static struct {
476 		unsigned int	hwirq;
477 		const char	*name;
478 	} port_irqs[] = {
479 		{ PORT_INT_LINK_UP,	"Link up",	},
480 		{ PORT_INT_LINK_DOWN,	"Link down",	},
481 	};
482 	int i;
483 
484 	for (i = 0; i < ARRAY_SIZE(port_irqs); i++) {
485 		struct irq_fwspec fwspec = {
486 			.fwnode		= &port->np->fwnode,
487 			.param_count	= 1,
488 			.param		= {
489 				[0]	= port_irqs[i].hwirq,
490 			},
491 		};
492 		unsigned int irq;
493 		int ret;
494 
495 		irq = irq_domain_alloc_irqs(port->domain, 1, NUMA_NO_NODE,
496 					    &fwspec);
497 		if (WARN_ON(!irq))
498 			continue;
499 
500 		ret = request_irq(irq, apple_pcie_port_irq, 0,
501 				  port_irqs[i].name, port);
502 		WARN_ON(ret);
503 	}
504 
505 	return 0;
506 }
507 
apple_pcie_setup_refclk(struct apple_pcie * pcie,struct apple_pcie_port * port)508 static int apple_pcie_setup_refclk(struct apple_pcie *pcie,
509 				   struct apple_pcie_port *port)
510 {
511 	u32 stat;
512 	int res;
513 
514 	if (pcie->hw->phy_lane_ctl)
515 		rmw_set(PHY_LANE_CTL_CFGACC, port->phy + pcie->hw->phy_lane_ctl);
516 
517 	rmw_set(PHY_LANE_CFG_REFCLK0REQ, port->phy + PHY_LANE_CFG);
518 
519 	res = readl_relaxed_poll_timeout(port->phy + PHY_LANE_CFG,
520 					 stat, stat & PHY_LANE_CFG_REFCLK0ACK,
521 					 100, 50000);
522 	if (res < 0)
523 		return res;
524 
525 	rmw_set(PHY_LANE_CFG_REFCLK1REQ, port->phy + PHY_LANE_CFG);
526 	res = readl_relaxed_poll_timeout(port->phy + PHY_LANE_CFG,
527 					 stat, stat & PHY_LANE_CFG_REFCLK1ACK,
528 					 100, 50000);
529 
530 	if (res < 0)
531 		return res;
532 
533 	if (pcie->hw->phy_lane_ctl)
534 		rmw_clear(PHY_LANE_CTL_CFGACC, port->phy + pcie->hw->phy_lane_ctl);
535 
536 	rmw_set(PHY_LANE_CFG_REFCLKEN, port->phy + PHY_LANE_CFG);
537 
538 	if (pcie->hw->port_refclk)
539 		rmw_set(PORT_REFCLK_EN, port->base + pcie->hw->port_refclk);
540 
541 	return 0;
542 }
543 
port_rid2sid_addr(struct apple_pcie_port * port,int idx)544 static void __iomem *port_rid2sid_addr(struct apple_pcie_port *port, int idx)
545 {
546 	return port->base + port->pcie->hw->port_rid2sid + 4 * idx;
547 }
548 
apple_pcie_rid2sid_write(struct apple_pcie_port * port,int idx,u32 val)549 static u32 apple_pcie_rid2sid_write(struct apple_pcie_port *port,
550 				    int idx, u32 val)
551 {
552 	writel_relaxed(val, port_rid2sid_addr(port, idx));
553 	/* Read back to ensure completion of the write */
554 	return readl_relaxed(port_rid2sid_addr(port, idx));
555 }
556 
apple_pcie_setup_port(struct apple_pcie * pcie,struct device_node * np)557 static int apple_pcie_setup_port(struct apple_pcie *pcie,
558 				 struct device_node *np)
559 {
560 	struct platform_device *platform = to_platform_device(pcie->dev);
561 	struct apple_pcie_port *port;
562 	struct gpio_desc *reset;
563 	struct resource *res;
564 	char name[16];
565 	u32 stat, idx;
566 	int ret, i;
567 
568 	reset = devm_fwnode_gpiod_get(pcie->dev, of_fwnode_handle(np), "reset",
569 				      GPIOD_OUT_LOW, "PERST#");
570 	if (IS_ERR(reset))
571 		return PTR_ERR(reset);
572 
573 	port = devm_kzalloc(pcie->dev, sizeof(*port), GFP_KERNEL);
574 	if (!port)
575 		return -ENOMEM;
576 
577 	port->sid_map = devm_bitmap_zalloc(pcie->dev, pcie->hw->max_rid2sid, GFP_KERNEL);
578 	if (!port->sid_map)
579 		return -ENOMEM;
580 
581 	ret = of_property_read_u32_index(np, "reg", 0, &idx);
582 	if (ret)
583 		return ret;
584 
585 	/* Use the first reg entry to work out the port index */
586 	port->idx = idx >> 11;
587 	port->pcie = pcie;
588 	port->np = np;
589 
590 	raw_spin_lock_init(&port->lock);
591 
592 	snprintf(name, sizeof(name), "port%d", port->idx);
593 	res = platform_get_resource_byname(platform, IORESOURCE_MEM, name);
594 	if (!res)
595 		res = platform_get_resource(platform, IORESOURCE_MEM, port->idx + 2);
596 
597 	port->base = devm_ioremap_resource(&platform->dev, res);
598 	if (IS_ERR(port->base))
599 		return PTR_ERR(port->base);
600 
601 	snprintf(name, sizeof(name), "phy%d", port->idx);
602 	res = platform_get_resource_byname(platform, IORESOURCE_MEM, name);
603 	if (res)
604 		port->phy = devm_ioremap_resource(&platform->dev, res);
605 	else
606 		port->phy = pcie->base + CORE_PHY_DEFAULT_BASE(port->idx);
607 
608 	rmw_set(PORT_APPCLK_EN, port->base + PORT_APPCLK);
609 
610 	/* Assert PERST# before setting up the clock */
611 	gpiod_set_value_cansleep(reset, 1);
612 
613 	ret = apple_pcie_setup_refclk(pcie, port);
614 	if (ret < 0)
615 		return ret;
616 
617 	/* The minimal Tperst-clk value is 100us (PCIe CEM r5.0, 2.9.2) */
618 	usleep_range(100, 200);
619 
620 	/* Deassert PERST# */
621 	rmw_set(PORT_PERST_OFF, port->base + pcie->hw->port_perst);
622 	gpiod_set_value_cansleep(reset, 0);
623 
624 	/* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */
625 	msleep(100);
626 
627 	ret = readl_relaxed_poll_timeout(port->base + PORT_STATUS, stat,
628 					 stat & PORT_STATUS_READY, 100, 250000);
629 	if (ret < 0) {
630 		dev_err(pcie->dev, "port %pOF ready wait timeout\n", np);
631 		return ret;
632 	}
633 
634 	if (pcie->hw->port_refclk)
635 		rmw_clear(PORT_REFCLK_CGDIS, port->base + pcie->hw->port_refclk);
636 	else
637 		rmw_set(PHY_LANE_CFG_REFCLKCGEN, port->phy + PHY_LANE_CFG);
638 
639 	rmw_clear(PORT_APPCLK_CGDIS, port->base + PORT_APPCLK);
640 
641 	ret = apple_pcie_port_setup_irq(port);
642 	if (ret)
643 		return ret;
644 
645 	/* Reset all RID/SID mappings, and check for RAZ/WI registers */
646 	for (i = 0; i < pcie->hw->max_rid2sid; i++) {
647 		if (apple_pcie_rid2sid_write(port, i, 0xbad1d) != 0xbad1d)
648 			break;
649 		apple_pcie_rid2sid_write(port, i, 0);
650 	}
651 
652 	dev_dbg(pcie->dev, "%pOF: %d RID/SID mapping entries\n", np, i);
653 
654 	port->sid_map_sz = i;
655 
656 	list_add_tail(&port->entry, &pcie->ports);
657 	init_completion(&pcie->event);
658 
659 	/* In the success path, we keep a reference to np around */
660 	of_node_get(np);
661 
662 	ret = apple_pcie_port_register_irqs(port);
663 	WARN_ON(ret);
664 
665 	writel_relaxed(PORT_LTSSMCTL_START, port->base + PORT_LTSSMCTL);
666 
667 	if (!wait_for_completion_timeout(&pcie->event, HZ / 10))
668 		dev_warn(pcie->dev, "%pOF link didn't come up\n", np);
669 
670 	return 0;
671 }
672 
673 static const struct msi_parent_ops apple_msi_parent_ops = {
674 	.supported_flags	= (MSI_GENERIC_FLAGS_MASK	|
675 				   MSI_FLAG_PCI_MSIX		|
676 				   MSI_FLAG_MULTI_PCI_MSI),
677 	.required_flags		= (MSI_FLAG_USE_DEF_DOM_OPS	|
678 				   MSI_FLAG_USE_DEF_CHIP_OPS	|
679 				   MSI_FLAG_PCI_MSI_MASK_PARENT),
680 	.chip_flags		= MSI_CHIP_FLAG_SET_EOI,
681 	.bus_select_token	= DOMAIN_BUS_PCI_MSI,
682 	.init_dev_msi_info	= msi_lib_init_dev_msi_info,
683 };
684 
apple_msi_init(struct apple_pcie * pcie)685 static int apple_msi_init(struct apple_pcie *pcie)
686 {
687 	struct fwnode_handle *fwnode = dev_fwnode(pcie->dev);
688 	struct irq_domain_info info = {
689 		.fwnode		= fwnode,
690 		.ops		= &apple_msi_domain_ops,
691 		.size		= pcie->nvecs,
692 		.host_data	= pcie,
693 	};
694 	struct of_phandle_args args = {};
695 	int ret;
696 
697 	ret = of_parse_phandle_with_args(to_of_node(fwnode), "msi-ranges",
698 					 "#interrupt-cells", 0, &args);
699 	if (ret)
700 		return ret;
701 
702 	ret = of_property_read_u32_index(to_of_node(fwnode), "msi-ranges",
703 					 args.args_count + 1, &pcie->nvecs);
704 	if (ret)
705 		return ret;
706 
707 	of_phandle_args_to_fwspec(args.np, args.args, args.args_count,
708 				  &pcie->fwspec);
709 
710 	pcie->bitmap = devm_bitmap_zalloc(pcie->dev, pcie->nvecs, GFP_KERNEL);
711 	if (!pcie->bitmap)
712 		return -ENOMEM;
713 
714 	info.parent = irq_find_matching_fwspec(&pcie->fwspec, DOMAIN_BUS_WIRED);
715 	if (!info.parent) {
716 		dev_err(pcie->dev, "failed to find parent domain\n");
717 		return -ENXIO;
718 	}
719 
720 	if (!msi_create_parent_irq_domain(&info, &apple_msi_parent_ops)) {
721 		dev_err(pcie->dev, "failed to create IRQ domain\n");
722 		return -ENOMEM;
723 	}
724 	return 0;
725 }
726 
apple_pcie_register(struct apple_pcie * pcie)727 static void apple_pcie_register(struct apple_pcie *pcie)
728 {
729 	guard(mutex)(&pcie_list_lock);
730 
731 	list_add_tail(&pcie->entry, &pcie_list);
732 }
733 
apple_pcie_unregister(struct apple_pcie * pcie)734 static void apple_pcie_unregister(struct apple_pcie *pcie)
735 {
736 	guard(mutex)(&pcie_list_lock);
737 
738 	list_del(&pcie->entry);
739 }
740 
apple_pcie_lookup(struct device * dev)741 static struct apple_pcie *apple_pcie_lookup(struct device *dev)
742 {
743 	struct apple_pcie *pcie;
744 
745 	guard(mutex)(&pcie_list_lock);
746 
747 	list_for_each_entry(pcie, &pcie_list, entry) {
748 		if (pcie->dev == dev)
749 			return pcie;
750 	}
751 
752 	return NULL;
753 }
754 
apple_pcie_get_port(struct pci_dev * pdev)755 static struct apple_pcie_port *apple_pcie_get_port(struct pci_dev *pdev)
756 {
757 	struct pci_config_window *cfg = pdev->sysdata;
758 	struct apple_pcie *pcie;
759 	struct pci_dev *port_pdev;
760 	struct apple_pcie_port *port;
761 
762 	pcie = apple_pcie_lookup(cfg->parent);
763 	if (WARN_ON(!pcie))
764 		return NULL;
765 
766 	/* Find the root port this device is on */
767 	port_pdev = pcie_find_root_port(pdev);
768 
769 	/* If finding the port itself, nothing to do */
770 	if (WARN_ON(!port_pdev) || pdev == port_pdev)
771 		return NULL;
772 
773 	list_for_each_entry(port, &pcie->ports, entry) {
774 		if (port->idx == PCI_SLOT(port_pdev->devfn))
775 			return port;
776 	}
777 
778 	return NULL;
779 }
780 
apple_pcie_enable_device(struct pci_host_bridge * bridge,struct pci_dev * pdev)781 static int apple_pcie_enable_device(struct pci_host_bridge *bridge, struct pci_dev *pdev)
782 {
783 	u32 sid, rid = pci_dev_id(pdev);
784 	struct apple_pcie_port *port;
785 	int idx, err;
786 
787 	port = apple_pcie_get_port(pdev);
788 	if (!port)
789 		return 0;
790 
791 	dev_dbg(&pdev->dev, "added to bus %s, index %d\n",
792 		pci_name(pdev->bus->self), port->idx);
793 
794 	err = of_map_id(port->pcie->dev->of_node, rid, "iommu-map",
795 			"iommu-map-mask", NULL, &sid);
796 	if (err)
797 		return err;
798 
799 	mutex_lock(&port->pcie->lock);
800 
801 	idx = bitmap_find_free_region(port->sid_map, port->sid_map_sz, 0);
802 	if (idx >= 0) {
803 		apple_pcie_rid2sid_write(port, idx,
804 					 PORT_RID2SID_VALID |
805 					 (sid << PORT_RID2SID_SID_SHIFT) | rid);
806 
807 		dev_dbg(&pdev->dev, "mapping RID%x to SID%x (index %d)\n",
808 			rid, sid, idx);
809 	}
810 
811 	mutex_unlock(&port->pcie->lock);
812 
813 	return idx >= 0 ? 0 : -ENOSPC;
814 }
815 
apple_pcie_disable_device(struct pci_host_bridge * bridge,struct pci_dev * pdev)816 static void apple_pcie_disable_device(struct pci_host_bridge *bridge, struct pci_dev *pdev)
817 {
818 	struct apple_pcie_port *port;
819 	u32 rid = pci_dev_id(pdev);
820 	int idx;
821 
822 	port = apple_pcie_get_port(pdev);
823 	if (!port)
824 		return;
825 
826 	mutex_lock(&port->pcie->lock);
827 
828 	for_each_set_bit(idx, port->sid_map, port->sid_map_sz) {
829 		u32 val;
830 
831 		val = readl_relaxed(port_rid2sid_addr(port, idx));
832 		if ((val & 0xffff) == rid) {
833 			apple_pcie_rid2sid_write(port, idx, 0);
834 			bitmap_release_region(port->sid_map, idx, 0);
835 			dev_dbg(&pdev->dev, "Released %x (%d)\n", val, idx);
836 			break;
837 		}
838 	}
839 
840 	mutex_unlock(&port->pcie->lock);
841 }
842 
apple_pcie_init(struct pci_config_window * cfg)843 static int apple_pcie_init(struct pci_config_window *cfg)
844 {
845 	struct device *dev = cfg->parent;
846 	struct apple_pcie *pcie;
847 	int ret;
848 
849 	pcie = apple_pcie_lookup(dev);
850 	if (WARN_ON(!pcie))
851 		return -ENOENT;
852 
853 	for_each_available_child_of_node_scoped(dev->of_node, of_port) {
854 		ret = apple_pcie_setup_port(pcie, of_port);
855 		if (ret) {
856 			dev_err(dev, "Port %pOF setup fail: %d\n", of_port, ret);
857 			return ret;
858 		}
859 	}
860 
861 	return 0;
862 }
863 
864 static const struct pci_ecam_ops apple_pcie_cfg_ecam_ops = {
865 	.init		= apple_pcie_init,
866 	.enable_device	= apple_pcie_enable_device,
867 	.disable_device	= apple_pcie_disable_device,
868 	.pci_ops	= {
869 		.map_bus	= pci_ecam_map_bus,
870 		.read		= pci_generic_config_read,
871 		.write		= pci_generic_config_write,
872 	}
873 };
874 
apple_pcie_probe(struct platform_device * pdev)875 static int apple_pcie_probe(struct platform_device *pdev)
876 {
877 	struct device *dev = &pdev->dev;
878 	struct apple_pcie *pcie;
879 	int ret;
880 
881 	pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
882 	if (!pcie)
883 		return -ENOMEM;
884 
885 	pcie->dev = dev;
886 	pcie->hw = of_device_get_match_data(dev);
887 	if (!pcie->hw)
888 		return -ENODEV;
889 	pcie->base = devm_platform_ioremap_resource(pdev, 1);
890 	if (IS_ERR(pcie->base))
891 		return PTR_ERR(pcie->base);
892 
893 	mutex_init(&pcie->lock);
894 	INIT_LIST_HEAD(&pcie->ports);
895 
896 	ret = apple_msi_init(pcie);
897 	if (ret)
898 		return ret;
899 
900 	apple_pcie_register(pcie);
901 
902 	ret = pci_host_common_init(pdev, &apple_pcie_cfg_ecam_ops);
903 	if (ret)
904 		apple_pcie_unregister(pcie);
905 
906 	return ret;
907 }
908 
909 static const struct of_device_id apple_pcie_of_match[] = {
910 	{ .compatible = "apple,t6020-pcie",	.data = &t602x_hw },
911 	{ .compatible = "apple,pcie",		.data = &t8103_hw },
912 	{ }
913 };
914 MODULE_DEVICE_TABLE(of, apple_pcie_of_match);
915 
916 static struct platform_driver apple_pcie_driver = {
917 	.probe	= apple_pcie_probe,
918 	.driver	= {
919 		.name			= "pcie-apple",
920 		.of_match_table		= apple_pcie_of_match,
921 		.suppress_bind_attrs	= true,
922 	},
923 };
924 module_platform_driver(apple_pcie_driver);
925 
926 MODULE_DESCRIPTION("Apple PCIe host bridge driver");
927 MODULE_LICENSE("GPL v2");
928