xref: /linux/drivers/pci/controller/dwc/pcie-designware-host.c (revision 0bd0a41a5120f78685a132834865b0a631b9026a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Synopsys DesignWare PCIe host controller driver
4  *
5  * Copyright (C) 2013 Samsung Electronics Co., Ltd.
6  *		https://www.samsung.com
7  *
8  * Author: Jingoo Han <jg1.han@samsung.com>
9  */
10 
11 #include <linux/iopoll.h>
12 #include <linux/irqchip/chained_irq.h>
13 #include <linux/irqchip/irq-msi-lib.h>
14 #include <linux/irqdomain.h>
15 #include <linux/msi.h>
16 #include <linux/of_address.h>
17 #include <linux/of_pci.h>
18 #include <linux/pci_regs.h>
19 #include <linux/platform_device.h>
20 
21 #include "../../pci.h"
22 #include "pcie-designware.h"
23 
24 static struct pci_ops dw_pcie_ops;
25 static struct pci_ops dw_child_pcie_ops;
26 
27 #define DW_PCIE_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS		| \
28 				    MSI_FLAG_USE_DEF_CHIP_OPS		| \
29 				    MSI_FLAG_NO_AFFINITY		| \
30 				    MSI_FLAG_PCI_MSI_MASK_PARENT)
31 #define DW_PCIE_MSI_FLAGS_SUPPORTED (MSI_FLAG_MULTI_PCI_MSI		| \
32 				     MSI_FLAG_PCI_MSIX			| \
33 				     MSI_GENERIC_FLAGS_MASK)
34 
35 static const struct msi_parent_ops dw_pcie_msi_parent_ops = {
36 	.required_flags		= DW_PCIE_MSI_FLAGS_REQUIRED,
37 	.supported_flags	= DW_PCIE_MSI_FLAGS_SUPPORTED,
38 	.bus_select_token	= DOMAIN_BUS_PCI_MSI,
39 	.chip_flags		= MSI_CHIP_FLAG_SET_ACK,
40 	.prefix			= "DW-",
41 	.init_dev_msi_info	= msi_lib_init_dev_msi_info,
42 };
43 
44 /* MSI int handler */
dw_handle_msi_irq(struct dw_pcie_rp * pp)45 irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp)
46 {
47 	int i, pos;
48 	unsigned long val;
49 	u32 status, num_ctrls;
50 	irqreturn_t ret = IRQ_NONE;
51 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
52 
53 	num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
54 
55 	for (i = 0; i < num_ctrls; i++) {
56 		status = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS +
57 					   (i * MSI_REG_CTRL_BLOCK_SIZE));
58 		if (!status)
59 			continue;
60 
61 		ret = IRQ_HANDLED;
62 		val = status;
63 		pos = 0;
64 		while ((pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL,
65 					    pos)) != MAX_MSI_IRQS_PER_CTRL) {
66 			generic_handle_domain_irq(pp->irq_domain,
67 						  (i * MAX_MSI_IRQS_PER_CTRL) +
68 						  pos);
69 			pos++;
70 		}
71 	}
72 
73 	return ret;
74 }
75 
76 /* Chained MSI interrupt service routine */
dw_chained_msi_isr(struct irq_desc * desc)77 static void dw_chained_msi_isr(struct irq_desc *desc)
78 {
79 	struct irq_chip *chip = irq_desc_get_chip(desc);
80 	struct dw_pcie_rp *pp;
81 
82 	chained_irq_enter(chip, desc);
83 
84 	pp = irq_desc_get_handler_data(desc);
85 	dw_handle_msi_irq(pp);
86 
87 	chained_irq_exit(chip, desc);
88 }
89 
dw_pci_setup_msi_msg(struct irq_data * d,struct msi_msg * msg)90 static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg)
91 {
92 	struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
93 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
94 	u64 msi_target;
95 
96 	msi_target = (u64)pp->msi_data;
97 
98 	msg->address_lo = lower_32_bits(msi_target);
99 	msg->address_hi = upper_32_bits(msi_target);
100 
101 	msg->data = d->hwirq;
102 
103 	dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
104 		(int)d->hwirq, msg->address_hi, msg->address_lo);
105 }
106 
dw_pci_bottom_mask(struct irq_data * d)107 static void dw_pci_bottom_mask(struct irq_data *d)
108 {
109 	struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
110 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
111 	unsigned int res, bit, ctrl;
112 	unsigned long flags;
113 
114 	raw_spin_lock_irqsave(&pp->lock, flags);
115 
116 	ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
117 	res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
118 	bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
119 
120 	pp->irq_mask[ctrl] |= BIT(bit);
121 	dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]);
122 
123 	raw_spin_unlock_irqrestore(&pp->lock, flags);
124 }
125 
dw_pci_bottom_unmask(struct irq_data * d)126 static void dw_pci_bottom_unmask(struct irq_data *d)
127 {
128 	struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
129 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
130 	unsigned int res, bit, ctrl;
131 	unsigned long flags;
132 
133 	raw_spin_lock_irqsave(&pp->lock, flags);
134 
135 	ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
136 	res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
137 	bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
138 
139 	pp->irq_mask[ctrl] &= ~BIT(bit);
140 	dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]);
141 
142 	raw_spin_unlock_irqrestore(&pp->lock, flags);
143 }
144 
dw_pci_bottom_ack(struct irq_data * d)145 static void dw_pci_bottom_ack(struct irq_data *d)
146 {
147 	struct dw_pcie_rp *pp  = irq_data_get_irq_chip_data(d);
148 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
149 	unsigned int res, bit, ctrl;
150 
151 	ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
152 	res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
153 	bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
154 
155 	dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_STATUS + res, BIT(bit));
156 }
157 
158 static struct irq_chip dw_pci_msi_bottom_irq_chip = {
159 	.name = "DWPCI-MSI",
160 	.irq_ack = dw_pci_bottom_ack,
161 	.irq_compose_msi_msg = dw_pci_setup_msi_msg,
162 	.irq_mask = dw_pci_bottom_mask,
163 	.irq_unmask = dw_pci_bottom_unmask,
164 };
165 
dw_pcie_irq_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * args)166 static int dw_pcie_irq_domain_alloc(struct irq_domain *domain,
167 				    unsigned int virq, unsigned int nr_irqs,
168 				    void *args)
169 {
170 	struct dw_pcie_rp *pp = domain->host_data;
171 	unsigned long flags;
172 	u32 i;
173 	int bit;
174 
175 	raw_spin_lock_irqsave(&pp->lock, flags);
176 
177 	bit = bitmap_find_free_region(pp->msi_irq_in_use, pp->num_vectors,
178 				      order_base_2(nr_irqs));
179 
180 	raw_spin_unlock_irqrestore(&pp->lock, flags);
181 
182 	if (bit < 0)
183 		return -ENOSPC;
184 
185 	for (i = 0; i < nr_irqs; i++)
186 		irq_domain_set_info(domain, virq + i, bit + i,
187 				    pp->msi_irq_chip,
188 				    pp, handle_edge_irq,
189 				    NULL, NULL);
190 
191 	return 0;
192 }
193 
dw_pcie_irq_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)194 static void dw_pcie_irq_domain_free(struct irq_domain *domain,
195 				    unsigned int virq, unsigned int nr_irqs)
196 {
197 	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
198 	struct dw_pcie_rp *pp = domain->host_data;
199 	unsigned long flags;
200 
201 	raw_spin_lock_irqsave(&pp->lock, flags);
202 
203 	bitmap_release_region(pp->msi_irq_in_use, d->hwirq,
204 			      order_base_2(nr_irqs));
205 
206 	raw_spin_unlock_irqrestore(&pp->lock, flags);
207 }
208 
209 static const struct irq_domain_ops dw_pcie_msi_domain_ops = {
210 	.alloc	= dw_pcie_irq_domain_alloc,
211 	.free	= dw_pcie_irq_domain_free,
212 };
213 
dw_pcie_allocate_domains(struct dw_pcie_rp * pp)214 int dw_pcie_allocate_domains(struct dw_pcie_rp *pp)
215 {
216 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
217 	struct irq_domain_info info = {
218 		.fwnode		= dev_fwnode(pci->dev),
219 		.ops		= &dw_pcie_msi_domain_ops,
220 		.size		= pp->num_vectors,
221 		.host_data	= pp,
222 	};
223 
224 	pp->irq_domain = msi_create_parent_irq_domain(&info, &dw_pcie_msi_parent_ops);
225 	if (!pp->irq_domain) {
226 		dev_err(pci->dev, "Failed to create IRQ domain\n");
227 		return -ENOMEM;
228 	}
229 
230 	return 0;
231 }
232 
dw_pcie_free_msi(struct dw_pcie_rp * pp)233 void dw_pcie_free_msi(struct dw_pcie_rp *pp)
234 {
235 	u32 ctrl;
236 
237 	for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) {
238 		if (pp->msi_irq[ctrl] > 0)
239 			irq_set_chained_handler_and_data(pp->msi_irq[ctrl],
240 							 NULL, NULL);
241 	}
242 
243 	irq_domain_remove(pp->irq_domain);
244 }
245 EXPORT_SYMBOL_GPL(dw_pcie_free_msi);
246 
dw_pcie_msi_init(struct dw_pcie_rp * pp)247 void dw_pcie_msi_init(struct dw_pcie_rp *pp)
248 {
249 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
250 	u64 msi_target = (u64)pp->msi_data;
251 	u32 ctrl, num_ctrls;
252 
253 	if (!pci_msi_enabled() || !pp->has_msi_ctrl)
254 		return;
255 
256 	num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
257 
258 	/* Initialize IRQ Status array */
259 	for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
260 		dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK +
261 				    (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
262 				    pp->irq_mask[ctrl]);
263 		dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE +
264 				    (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
265 				    ~0);
266 	}
267 
268 	/* Program the msi_data */
269 	dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_LO, lower_32_bits(msi_target));
270 	dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_HI, upper_32_bits(msi_target));
271 }
272 EXPORT_SYMBOL_GPL(dw_pcie_msi_init);
273 
dw_pcie_parse_split_msi_irq(struct dw_pcie_rp * pp)274 static int dw_pcie_parse_split_msi_irq(struct dw_pcie_rp *pp)
275 {
276 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
277 	struct device *dev = pci->dev;
278 	struct platform_device *pdev = to_platform_device(dev);
279 	u32 ctrl, max_vectors;
280 	int irq;
281 
282 	/* Parse any "msiX" IRQs described in the devicetree */
283 	for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) {
284 		char msi_name[] = "msiX";
285 
286 		msi_name[3] = '0' + ctrl;
287 		irq = platform_get_irq_byname_optional(pdev, msi_name);
288 		if (irq == -ENXIO)
289 			break;
290 		if (irq < 0)
291 			return dev_err_probe(dev, irq,
292 					     "Failed to parse MSI IRQ '%s'\n",
293 					     msi_name);
294 
295 		pp->msi_irq[ctrl] = irq;
296 	}
297 
298 	/* If no "msiX" IRQs, caller should fallback to "msi" IRQ */
299 	if (ctrl == 0)
300 		return -ENXIO;
301 
302 	max_vectors = ctrl * MAX_MSI_IRQS_PER_CTRL;
303 	if (pp->num_vectors > max_vectors) {
304 		dev_warn(dev, "Exceeding number of MSI vectors, limiting to %u\n",
305 			 max_vectors);
306 		pp->num_vectors = max_vectors;
307 	}
308 	if (!pp->num_vectors)
309 		pp->num_vectors = max_vectors;
310 
311 	return 0;
312 }
313 
dw_pcie_msi_host_init(struct dw_pcie_rp * pp)314 int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
315 {
316 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
317 	struct device *dev = pci->dev;
318 	struct platform_device *pdev = to_platform_device(dev);
319 	u64 *msi_vaddr = NULL;
320 	int ret;
321 	u32 ctrl, num_ctrls;
322 
323 	for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++)
324 		pp->irq_mask[ctrl] = ~0;
325 
326 	if (!pp->msi_irq[0]) {
327 		ret = dw_pcie_parse_split_msi_irq(pp);
328 		if (ret < 0 && ret != -ENXIO)
329 			return ret;
330 	}
331 
332 	if (!pp->num_vectors)
333 		pp->num_vectors = MSI_DEF_NUM_VECTORS;
334 	num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
335 
336 	if (!pp->msi_irq[0]) {
337 		pp->msi_irq[0] = platform_get_irq_byname_optional(pdev, "msi");
338 		if (pp->msi_irq[0] < 0) {
339 			pp->msi_irq[0] = platform_get_irq(pdev, 0);
340 			if (pp->msi_irq[0] < 0)
341 				return pp->msi_irq[0];
342 		}
343 	}
344 
345 	dev_dbg(dev, "Using %d MSI vectors\n", pp->num_vectors);
346 
347 	pp->msi_irq_chip = &dw_pci_msi_bottom_irq_chip;
348 
349 	ret = dw_pcie_allocate_domains(pp);
350 	if (ret)
351 		return ret;
352 
353 	for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
354 		if (pp->msi_irq[ctrl] > 0)
355 			irq_set_chained_handler_and_data(pp->msi_irq[ctrl],
356 						    dw_chained_msi_isr, pp);
357 	}
358 
359 	/*
360 	 * Even though the iMSI-RX Module supports 64-bit addresses some
361 	 * peripheral PCIe devices may lack 64-bit message support. In
362 	 * order not to miss MSI TLPs from those devices the MSI target
363 	 * address has to be within the lowest 4GB.
364 	 *
365 	 * Note until there is a better alternative found the reservation is
366 	 * done by allocating from the artificially limited DMA-coherent
367 	 * memory.
368 	 */
369 	ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
370 	if (!ret)
371 		msi_vaddr = dmam_alloc_coherent(dev, sizeof(u64), &pp->msi_data,
372 						GFP_KERNEL);
373 
374 	if (!msi_vaddr) {
375 		dev_warn(dev, "Failed to allocate 32-bit MSI address\n");
376 		dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
377 		msi_vaddr = dmam_alloc_coherent(dev, sizeof(u64), &pp->msi_data,
378 						GFP_KERNEL);
379 		if (!msi_vaddr) {
380 			dev_err(dev, "Failed to allocate MSI address\n");
381 			dw_pcie_free_msi(pp);
382 			return -ENOMEM;
383 		}
384 	}
385 
386 	return 0;
387 }
388 EXPORT_SYMBOL_GPL(dw_pcie_msi_host_init);
389 
dw_pcie_host_request_msg_tlp_res(struct dw_pcie_rp * pp)390 static void dw_pcie_host_request_msg_tlp_res(struct dw_pcie_rp *pp)
391 {
392 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
393 	struct resource_entry *win;
394 	struct resource *res;
395 
396 	win = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM);
397 	if (win) {
398 		res = devm_kzalloc(pci->dev, sizeof(*res), GFP_KERNEL);
399 		if (!res)
400 			return;
401 
402 		/*
403 		 * Allocate MSG TLP region of size 'region_align' at the end of
404 		 * the host bridge window.
405 		 */
406 		res->start = win->res->end - pci->region_align + 1;
407 		res->end = win->res->end;
408 		res->name = "msg";
409 		res->flags = win->res->flags | IORESOURCE_BUSY;
410 
411 		if (!devm_request_resource(pci->dev, win->res, res))
412 			pp->msg_res = res;
413 	}
414 }
415 
dw_pcie_host_get_resources(struct dw_pcie_rp * pp)416 static int dw_pcie_host_get_resources(struct dw_pcie_rp *pp)
417 {
418 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
419 	struct device *dev = pci->dev;
420 	struct platform_device *pdev = to_platform_device(dev);
421 	struct resource_entry *win;
422 	struct resource *res;
423 	int ret;
424 
425 	ret = dw_pcie_get_resources(pci);
426 	if (ret)
427 		return ret;
428 
429 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
430 	if (!res) {
431 		dev_err(dev, "Missing \"config\" reg space\n");
432 		return -ENODEV;
433 	}
434 
435 	pp->cfg0_size = resource_size(res);
436 	pp->cfg0_base = res->start;
437 
438 	pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res);
439 	if (IS_ERR(pp->va_cfg0_base))
440 		return PTR_ERR(pp->va_cfg0_base);
441 
442 	/* Get the I/O range from DT */
443 	win = resource_list_first_type(&pp->bridge->windows, IORESOURCE_IO);
444 	if (win) {
445 		pp->io_size = resource_size(win->res);
446 		pp->io_bus_addr = win->res->start - win->offset;
447 		pp->io_base = pci_pio_to_address(win->res->start);
448 	}
449 
450 	/*
451 	 * visconti_pcie_cpu_addr_fixup() uses pp->io_base, so we have to
452 	 * call dw_pcie_parent_bus_offset() after setting pp->io_base.
453 	 */
454 	pci->parent_bus_offset = dw_pcie_parent_bus_offset(pci, "config",
455 							   pp->cfg0_base);
456 	return 0;
457 }
458 
dw_pcie_host_init(struct dw_pcie_rp * pp)459 int dw_pcie_host_init(struct dw_pcie_rp *pp)
460 {
461 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
462 	struct device *dev = pci->dev;
463 	struct device_node *np = dev->of_node;
464 	struct pci_host_bridge *bridge;
465 	int ret;
466 
467 	raw_spin_lock_init(&pp->lock);
468 
469 	bridge = devm_pci_alloc_host_bridge(dev, 0);
470 	if (!bridge)
471 		return -ENOMEM;
472 
473 	pp->bridge = bridge;
474 
475 	ret = dw_pcie_host_get_resources(pp);
476 	if (ret)
477 		return ret;
478 
479 	/* Set default bus ops */
480 	bridge->ops = &dw_pcie_ops;
481 	bridge->child_ops = &dw_child_pcie_ops;
482 
483 	if (pp->ops->init) {
484 		ret = pp->ops->init(pp);
485 		if (ret)
486 			return ret;
487 	}
488 
489 	if (pci_msi_enabled()) {
490 		pp->has_msi_ctrl = !(pp->ops->msi_init ||
491 				     of_property_present(np, "msi-parent") ||
492 				     of_property_present(np, "msi-map"));
493 
494 		/*
495 		 * For the has_msi_ctrl case the default assignment is handled
496 		 * in the dw_pcie_msi_host_init().
497 		 */
498 		if (!pp->has_msi_ctrl && !pp->num_vectors) {
499 			pp->num_vectors = MSI_DEF_NUM_VECTORS;
500 		} else if (pp->num_vectors > MAX_MSI_IRQS) {
501 			dev_err(dev, "Invalid number of vectors\n");
502 			ret = -EINVAL;
503 			goto err_deinit_host;
504 		}
505 
506 		if (pp->ops->msi_init) {
507 			ret = pp->ops->msi_init(pp);
508 			if (ret < 0)
509 				goto err_deinit_host;
510 		} else if (pp->has_msi_ctrl) {
511 			ret = dw_pcie_msi_host_init(pp);
512 			if (ret < 0)
513 				goto err_deinit_host;
514 		}
515 	}
516 
517 	dw_pcie_version_detect(pci);
518 
519 	dw_pcie_iatu_detect(pci);
520 
521 	if (pci->num_lanes < 1)
522 		pci->num_lanes = dw_pcie_link_get_max_link_width(pci);
523 
524 	ret = of_pci_get_equalization_presets(dev, &pp->presets, pci->num_lanes);
525 	if (ret)
526 		goto err_free_msi;
527 
528 	/*
529 	 * Allocate the resource for MSG TLP before programming the iATU
530 	 * outbound window in dw_pcie_setup_rc(). Since the allocation depends
531 	 * on the value of 'region_align', this has to be done after
532 	 * dw_pcie_iatu_detect().
533 	 *
534 	 * Glue drivers need to set 'use_atu_msg' before dw_pcie_host_init() to
535 	 * make use of the generic MSG TLP implementation.
536 	 */
537 	if (pp->use_atu_msg)
538 		dw_pcie_host_request_msg_tlp_res(pp);
539 
540 	ret = dw_pcie_edma_detect(pci);
541 	if (ret)
542 		goto err_free_msi;
543 
544 	ret = dw_pcie_setup_rc(pp);
545 	if (ret)
546 		goto err_remove_edma;
547 
548 	if (!dw_pcie_link_up(pci)) {
549 		ret = dw_pcie_start_link(pci);
550 		if (ret)
551 			goto err_remove_edma;
552 	}
553 
554 	/*
555 	 * Note: Skip the link up delay only when a Link Up IRQ is present.
556 	 * If there is no Link Up IRQ, we should not bypass the delay
557 	 * because that would require users to manually rescan for devices.
558 	 */
559 	if (!pp->use_linkup_irq)
560 		/* Ignore errors, the link may come up later */
561 		dw_pcie_wait_for_link(pci);
562 
563 	bridge->sysdata = pp;
564 
565 	ret = pci_host_probe(bridge);
566 	if (ret)
567 		goto err_stop_link;
568 
569 	if (pp->ops->post_init)
570 		pp->ops->post_init(pp);
571 
572 	dwc_pcie_debugfs_init(pci, DW_PCIE_RC_TYPE);
573 
574 	return 0;
575 
576 err_stop_link:
577 	dw_pcie_stop_link(pci);
578 
579 err_remove_edma:
580 	dw_pcie_edma_remove(pci);
581 
582 err_free_msi:
583 	if (pp->has_msi_ctrl)
584 		dw_pcie_free_msi(pp);
585 
586 err_deinit_host:
587 	if (pp->ops->deinit)
588 		pp->ops->deinit(pp);
589 
590 	return ret;
591 }
592 EXPORT_SYMBOL_GPL(dw_pcie_host_init);
593 
dw_pcie_host_deinit(struct dw_pcie_rp * pp)594 void dw_pcie_host_deinit(struct dw_pcie_rp *pp)
595 {
596 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
597 
598 	dwc_pcie_debugfs_deinit(pci);
599 
600 	pci_stop_root_bus(pp->bridge->bus);
601 	pci_remove_root_bus(pp->bridge->bus);
602 
603 	dw_pcie_stop_link(pci);
604 
605 	dw_pcie_edma_remove(pci);
606 
607 	if (pp->has_msi_ctrl)
608 		dw_pcie_free_msi(pp);
609 
610 	if (pp->ops->deinit)
611 		pp->ops->deinit(pp);
612 }
613 EXPORT_SYMBOL_GPL(dw_pcie_host_deinit);
614 
dw_pcie_other_conf_map_bus(struct pci_bus * bus,unsigned int devfn,int where)615 static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,
616 						unsigned int devfn, int where)
617 {
618 	struct dw_pcie_rp *pp = bus->sysdata;
619 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
620 	struct dw_pcie_ob_atu_cfg atu = { 0 };
621 	int type, ret;
622 	u32 busdev;
623 
624 	/*
625 	 * Checking whether the link is up here is a last line of defense
626 	 * against platforms that forward errors on the system bus as
627 	 * SError upon PCI configuration transactions issued when the link
628 	 * is down. This check is racy by definition and does not stop
629 	 * the system from triggering an SError if the link goes down
630 	 * after this check is performed.
631 	 */
632 	if (!dw_pcie_link_up(pci))
633 		return NULL;
634 
635 	busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
636 		 PCIE_ATU_FUNC(PCI_FUNC(devfn));
637 
638 	if (pci_is_root_bus(bus->parent))
639 		type = PCIE_ATU_TYPE_CFG0;
640 	else
641 		type = PCIE_ATU_TYPE_CFG1;
642 
643 	atu.type = type;
644 	atu.parent_bus_addr = pp->cfg0_base - pci->parent_bus_offset;
645 	atu.pci_addr = busdev;
646 	atu.size = pp->cfg0_size;
647 
648 	ret = dw_pcie_prog_outbound_atu(pci, &atu);
649 	if (ret)
650 		return NULL;
651 
652 	return pp->va_cfg0_base + where;
653 }
654 
dw_pcie_rd_other_conf(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 * val)655 static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn,
656 				 int where, int size, u32 *val)
657 {
658 	struct dw_pcie_rp *pp = bus->sysdata;
659 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
660 	struct dw_pcie_ob_atu_cfg atu = { 0 };
661 	int ret;
662 
663 	ret = pci_generic_config_read(bus, devfn, where, size, val);
664 	if (ret != PCIBIOS_SUCCESSFUL)
665 		return ret;
666 
667 	if (pp->cfg0_io_shared) {
668 		atu.type = PCIE_ATU_TYPE_IO;
669 		atu.parent_bus_addr = pp->io_base - pci->parent_bus_offset;
670 		atu.pci_addr = pp->io_bus_addr;
671 		atu.size = pp->io_size;
672 
673 		ret = dw_pcie_prog_outbound_atu(pci, &atu);
674 		if (ret)
675 			return PCIBIOS_SET_FAILED;
676 	}
677 
678 	return PCIBIOS_SUCCESSFUL;
679 }
680 
dw_pcie_wr_other_conf(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 val)681 static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn,
682 				 int where, int size, u32 val)
683 {
684 	struct dw_pcie_rp *pp = bus->sysdata;
685 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
686 	struct dw_pcie_ob_atu_cfg atu = { 0 };
687 	int ret;
688 
689 	ret = pci_generic_config_write(bus, devfn, where, size, val);
690 	if (ret != PCIBIOS_SUCCESSFUL)
691 		return ret;
692 
693 	if (pp->cfg0_io_shared) {
694 		atu.type = PCIE_ATU_TYPE_IO;
695 		atu.parent_bus_addr = pp->io_base - pci->parent_bus_offset;
696 		atu.pci_addr = pp->io_bus_addr;
697 		atu.size = pp->io_size;
698 
699 		ret = dw_pcie_prog_outbound_atu(pci, &atu);
700 		if (ret)
701 			return PCIBIOS_SET_FAILED;
702 	}
703 
704 	return PCIBIOS_SUCCESSFUL;
705 }
706 
707 static struct pci_ops dw_child_pcie_ops = {
708 	.map_bus = dw_pcie_other_conf_map_bus,
709 	.read = dw_pcie_rd_other_conf,
710 	.write = dw_pcie_wr_other_conf,
711 };
712 
dw_pcie_own_conf_map_bus(struct pci_bus * bus,unsigned int devfn,int where)713 void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn, int where)
714 {
715 	struct dw_pcie_rp *pp = bus->sysdata;
716 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
717 
718 	if (PCI_SLOT(devfn) > 0)
719 		return NULL;
720 
721 	return pci->dbi_base + where;
722 }
723 EXPORT_SYMBOL_GPL(dw_pcie_own_conf_map_bus);
724 
725 static struct pci_ops dw_pcie_ops = {
726 	.map_bus = dw_pcie_own_conf_map_bus,
727 	.read = pci_generic_config_read,
728 	.write = pci_generic_config_write,
729 };
730 
dw_pcie_iatu_setup(struct dw_pcie_rp * pp)731 static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
732 {
733 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
734 	struct dw_pcie_ob_atu_cfg atu = { 0 };
735 	struct resource_entry *entry;
736 	int i, ret;
737 
738 	/* Note the very first outbound ATU is used for CFG IOs */
739 	if (!pci->num_ob_windows) {
740 		dev_err(pci->dev, "No outbound iATU found\n");
741 		return -EINVAL;
742 	}
743 
744 	/*
745 	 * Ensure all out/inbound windows are disabled before proceeding with
746 	 * the MEM/IO (dma-)ranges setups.
747 	 */
748 	for (i = 0; i < pci->num_ob_windows; i++)
749 		dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_OB, i);
750 
751 	for (i = 0; i < pci->num_ib_windows; i++)
752 		dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_IB, i);
753 
754 	i = 0;
755 	resource_list_for_each_entry(entry, &pp->bridge->windows) {
756 		if (resource_type(entry->res) != IORESOURCE_MEM)
757 			continue;
758 
759 		if (pci->num_ob_windows <= ++i)
760 			break;
761 
762 		atu.index = i;
763 		atu.type = PCIE_ATU_TYPE_MEM;
764 		atu.parent_bus_addr = entry->res->start - pci->parent_bus_offset;
765 		atu.pci_addr = entry->res->start - entry->offset;
766 
767 		/* Adjust iATU size if MSG TLP region was allocated before */
768 		if (pp->msg_res && pp->msg_res->parent == entry->res)
769 			atu.size = resource_size(entry->res) -
770 					resource_size(pp->msg_res);
771 		else
772 			atu.size = resource_size(entry->res);
773 
774 		ret = dw_pcie_prog_outbound_atu(pci, &atu);
775 		if (ret) {
776 			dev_err(pci->dev, "Failed to set MEM range %pr\n",
777 				entry->res);
778 			return ret;
779 		}
780 	}
781 
782 	if (pp->io_size) {
783 		if (pci->num_ob_windows > ++i) {
784 			atu.index = i;
785 			atu.type = PCIE_ATU_TYPE_IO;
786 			atu.parent_bus_addr = pp->io_base - pci->parent_bus_offset;
787 			atu.pci_addr = pp->io_bus_addr;
788 			atu.size = pp->io_size;
789 
790 			ret = dw_pcie_prog_outbound_atu(pci, &atu);
791 			if (ret) {
792 				dev_err(pci->dev, "Failed to set IO range %pr\n",
793 					entry->res);
794 				return ret;
795 			}
796 		} else {
797 			pp->cfg0_io_shared = true;
798 		}
799 	}
800 
801 	if (pci->num_ob_windows <= i)
802 		dev_warn(pci->dev, "Ranges exceed outbound iATU size (%d)\n",
803 			 pci->num_ob_windows);
804 
805 	pp->msg_atu_index = i;
806 
807 	i = 0;
808 	resource_list_for_each_entry(entry, &pp->bridge->dma_ranges) {
809 		if (resource_type(entry->res) != IORESOURCE_MEM)
810 			continue;
811 
812 		if (pci->num_ib_windows <= i)
813 			break;
814 
815 		ret = dw_pcie_prog_inbound_atu(pci, i++, PCIE_ATU_TYPE_MEM,
816 					       entry->res->start,
817 					       entry->res->start - entry->offset,
818 					       resource_size(entry->res));
819 		if (ret) {
820 			dev_err(pci->dev, "Failed to set DMA range %pr\n",
821 				entry->res);
822 			return ret;
823 		}
824 	}
825 
826 	if (pci->num_ib_windows <= i)
827 		dev_warn(pci->dev, "Dma-ranges exceed inbound iATU size (%u)\n",
828 			 pci->num_ib_windows);
829 
830 	return 0;
831 }
832 
dw_pcie_program_presets(struct dw_pcie_rp * pp,enum pci_bus_speed speed)833 static void dw_pcie_program_presets(struct dw_pcie_rp *pp, enum pci_bus_speed speed)
834 {
835 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
836 	u8 lane_eq_offset, lane_reg_size, cap_id;
837 	u8 *presets;
838 	u32 cap;
839 	int i;
840 
841 	if (speed == PCIE_SPEED_8_0GT) {
842 		presets = (u8 *)pp->presets.eq_presets_8gts;
843 		lane_eq_offset =  PCI_SECPCI_LE_CTRL;
844 		cap_id = PCI_EXT_CAP_ID_SECPCI;
845 		/* For data rate of 8 GT/S each lane equalization control is 16bits wide*/
846 		lane_reg_size = 0x2;
847 	} else if (speed == PCIE_SPEED_16_0GT) {
848 		presets = pp->presets.eq_presets_Ngts[EQ_PRESET_TYPE_16GTS - 1];
849 		lane_eq_offset = PCI_PL_16GT_LE_CTRL;
850 		cap_id = PCI_EXT_CAP_ID_PL_16GT;
851 		lane_reg_size = 0x1;
852 	} else if (speed == PCIE_SPEED_32_0GT) {
853 		presets =  pp->presets.eq_presets_Ngts[EQ_PRESET_TYPE_32GTS - 1];
854 		lane_eq_offset = PCI_PL_32GT_LE_CTRL;
855 		cap_id = PCI_EXT_CAP_ID_PL_32GT;
856 		lane_reg_size = 0x1;
857 	} else if (speed == PCIE_SPEED_64_0GT) {
858 		presets =  pp->presets.eq_presets_Ngts[EQ_PRESET_TYPE_64GTS - 1];
859 		lane_eq_offset = PCI_PL_64GT_LE_CTRL;
860 		cap_id = PCI_EXT_CAP_ID_PL_64GT;
861 		lane_reg_size = 0x1;
862 	} else {
863 		return;
864 	}
865 
866 	if (presets[0] == PCI_EQ_RESV)
867 		return;
868 
869 	cap = dw_pcie_find_ext_capability(pci, cap_id);
870 	if (!cap)
871 		return;
872 
873 	/*
874 	 * Write preset values to the registers byte-by-byte for the given
875 	 * number of lanes and register size.
876 	 */
877 	for (i = 0; i < pci->num_lanes * lane_reg_size; i++)
878 		dw_pcie_writeb_dbi(pci, cap + lane_eq_offset + i, presets[i]);
879 }
880 
dw_pcie_config_presets(struct dw_pcie_rp * pp)881 static void dw_pcie_config_presets(struct dw_pcie_rp *pp)
882 {
883 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
884 	enum pci_bus_speed speed = pcie_link_speed[pci->max_link_speed];
885 
886 	/*
887 	 * Lane equalization settings need to be applied for all data rates the
888 	 * controller supports and for all supported lanes.
889 	 */
890 
891 	if (speed >= PCIE_SPEED_8_0GT)
892 		dw_pcie_program_presets(pp, PCIE_SPEED_8_0GT);
893 
894 	if (speed >= PCIE_SPEED_16_0GT)
895 		dw_pcie_program_presets(pp, PCIE_SPEED_16_0GT);
896 
897 	if (speed >= PCIE_SPEED_32_0GT)
898 		dw_pcie_program_presets(pp, PCIE_SPEED_32_0GT);
899 
900 	if (speed >= PCIE_SPEED_64_0GT)
901 		dw_pcie_program_presets(pp, PCIE_SPEED_64_0GT);
902 }
903 
dw_pcie_setup_rc(struct dw_pcie_rp * pp)904 int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
905 {
906 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
907 	u32 val;
908 	int ret;
909 
910 	/*
911 	 * Enable DBI read-only registers for writing/updating configuration.
912 	 * Write permission gets disabled towards the end of this function.
913 	 */
914 	dw_pcie_dbi_ro_wr_en(pci);
915 
916 	dw_pcie_setup(pci);
917 
918 	dw_pcie_msi_init(pp);
919 
920 	/* Setup RC BARs */
921 	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004);
922 	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000);
923 
924 	/* Setup interrupt pins */
925 	val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE);
926 	val &= 0xffff00ff;
927 	val |= 0x00000100;
928 	dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val);
929 
930 	/* Setup bus numbers */
931 	val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS);
932 	val &= 0xff000000;
933 	val |= 0x00ff0100;
934 	dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val);
935 
936 	/* Setup command register */
937 	val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
938 	val &= 0xffff0000;
939 	val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
940 		PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
941 	dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
942 
943 	dw_pcie_config_presets(pp);
944 	/*
945 	 * If the platform provides its own child bus config accesses, it means
946 	 * the platform uses its own address translation component rather than
947 	 * ATU, so we should not program the ATU here.
948 	 */
949 	if (pp->bridge->child_ops == &dw_child_pcie_ops) {
950 		ret = dw_pcie_iatu_setup(pp);
951 		if (ret)
952 			return ret;
953 	}
954 
955 	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
956 
957 	/* Program correct class for RC */
958 	dw_pcie_writew_dbi(pci, PCI_CLASS_DEVICE, PCI_CLASS_BRIDGE_PCI);
959 
960 	val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
961 	val |= PORT_LOGIC_SPEED_CHANGE;
962 	dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
963 
964 	dw_pcie_dbi_ro_wr_dis(pci);
965 
966 	return 0;
967 }
968 EXPORT_SYMBOL_GPL(dw_pcie_setup_rc);
969 
dw_pcie_pme_turn_off(struct dw_pcie * pci)970 static int dw_pcie_pme_turn_off(struct dw_pcie *pci)
971 {
972 	struct dw_pcie_ob_atu_cfg atu = { 0 };
973 	void __iomem *mem;
974 	int ret;
975 
976 	if (pci->num_ob_windows <= pci->pp.msg_atu_index)
977 		return -ENOSPC;
978 
979 	if (!pci->pp.msg_res)
980 		return -ENOSPC;
981 
982 	atu.code = PCIE_MSG_CODE_PME_TURN_OFF;
983 	atu.routing = PCIE_MSG_TYPE_R_BC;
984 	atu.type = PCIE_ATU_TYPE_MSG;
985 	atu.size = resource_size(pci->pp.msg_res);
986 	atu.index = pci->pp.msg_atu_index;
987 
988 	atu.parent_bus_addr = pci->pp.msg_res->start - pci->parent_bus_offset;
989 
990 	ret = dw_pcie_prog_outbound_atu(pci, &atu);
991 	if (ret)
992 		return ret;
993 
994 	mem = ioremap(pci->pp.msg_res->start, pci->region_align);
995 	if (!mem)
996 		return -ENOMEM;
997 
998 	/* A dummy write is converted to a Msg TLP */
999 	writel(0, mem);
1000 
1001 	iounmap(mem);
1002 
1003 	return 0;
1004 }
1005 
dw_pcie_suspend_noirq(struct dw_pcie * pci)1006 int dw_pcie_suspend_noirq(struct dw_pcie *pci)
1007 {
1008 	u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
1009 	u32 val;
1010 	int ret;
1011 
1012 	/*
1013 	 * If L1SS is supported, then do not put the link into L2 as some
1014 	 * devices such as NVMe expect low resume latency.
1015 	 */
1016 	if (dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKCTL) & PCI_EXP_LNKCTL_ASPM_L1)
1017 		return 0;
1018 
1019 	if (pci->pp.ops->pme_turn_off) {
1020 		pci->pp.ops->pme_turn_off(&pci->pp);
1021 	} else {
1022 		ret = dw_pcie_pme_turn_off(pci);
1023 		if (ret)
1024 			return ret;
1025 	}
1026 
1027 	ret = read_poll_timeout(dw_pcie_get_ltssm, val,
1028 				val == DW_PCIE_LTSSM_L2_IDLE ||
1029 				val <= DW_PCIE_LTSSM_DETECT_WAIT,
1030 				PCIE_PME_TO_L2_TIMEOUT_US/10,
1031 				PCIE_PME_TO_L2_TIMEOUT_US, false, pci);
1032 	if (ret) {
1033 		/* Only log message when LTSSM isn't in DETECT or POLL */
1034 		dev_err(pci->dev, "Timeout waiting for L2 entry! LTSSM: 0x%x\n", val);
1035 		return ret;
1036 	}
1037 
1038 	/*
1039 	 * Per PCIe r6.0, sec 5.3.3.2.1, software should wait at least
1040 	 * 100ns after L2/L3 Ready before turning off refclock and
1041 	 * main power. This is harmless when no endpoint is connected.
1042 	 */
1043 	udelay(1);
1044 
1045 	dw_pcie_stop_link(pci);
1046 	if (pci->pp.ops->deinit)
1047 		pci->pp.ops->deinit(&pci->pp);
1048 
1049 	pci->suspended = true;
1050 
1051 	return ret;
1052 }
1053 EXPORT_SYMBOL_GPL(dw_pcie_suspend_noirq);
1054 
dw_pcie_resume_noirq(struct dw_pcie * pci)1055 int dw_pcie_resume_noirq(struct dw_pcie *pci)
1056 {
1057 	int ret;
1058 
1059 	if (!pci->suspended)
1060 		return 0;
1061 
1062 	pci->suspended = false;
1063 
1064 	if (pci->pp.ops->init) {
1065 		ret = pci->pp.ops->init(&pci->pp);
1066 		if (ret) {
1067 			dev_err(pci->dev, "Host init failed: %d\n", ret);
1068 			return ret;
1069 		}
1070 	}
1071 
1072 	dw_pcie_setup_rc(&pci->pp);
1073 
1074 	ret = dw_pcie_start_link(pci);
1075 	if (ret)
1076 		return ret;
1077 
1078 	ret = dw_pcie_wait_for_link(pci);
1079 	if (ret)
1080 		return ret;
1081 
1082 	return ret;
1083 }
1084 EXPORT_SYMBOL_GPL(dw_pcie_resume_noirq);
1085