xref: /linux/drivers/pci/controller/dwc/pcie-designware-host.c (revision 3a39d672e7f48b8d6b91a09afa4b55352773b4b5)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Synopsys DesignWare PCIe host controller driver
4  *
5  * Copyright (C) 2013 Samsung Electronics Co., Ltd.
6  *		https://www.samsung.com
7  *
8  * Author: Jingoo Han <jg1.han@samsung.com>
9  */
10 
11 #include <linux/iopoll.h>
12 #include <linux/irqchip/chained_irq.h>
13 #include <linux/irqdomain.h>
14 #include <linux/msi.h>
15 #include <linux/of_address.h>
16 #include <linux/of_pci.h>
17 #include <linux/pci_regs.h>
18 #include <linux/platform_device.h>
19 
20 #include "../../pci.h"
21 #include "pcie-designware.h"
22 
23 static struct pci_ops dw_pcie_ops;
24 static struct pci_ops dw_child_pcie_ops;
25 
dw_msi_ack_irq(struct irq_data * d)26 static void dw_msi_ack_irq(struct irq_data *d)
27 {
28 	irq_chip_ack_parent(d);
29 }
30 
dw_msi_mask_irq(struct irq_data * d)31 static void dw_msi_mask_irq(struct irq_data *d)
32 {
33 	pci_msi_mask_irq(d);
34 	irq_chip_mask_parent(d);
35 }
36 
dw_msi_unmask_irq(struct irq_data * d)37 static void dw_msi_unmask_irq(struct irq_data *d)
38 {
39 	pci_msi_unmask_irq(d);
40 	irq_chip_unmask_parent(d);
41 }
42 
43 static struct irq_chip dw_pcie_msi_irq_chip = {
44 	.name = "PCI-MSI",
45 	.irq_ack = dw_msi_ack_irq,
46 	.irq_mask = dw_msi_mask_irq,
47 	.irq_unmask = dw_msi_unmask_irq,
48 };
49 
50 static struct msi_domain_info dw_pcie_msi_domain_info = {
51 	.flags	= MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
52 		  MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX |
53 		  MSI_FLAG_MULTI_PCI_MSI,
54 	.chip	= &dw_pcie_msi_irq_chip,
55 };
56 
57 /* MSI int handler */
dw_handle_msi_irq(struct dw_pcie_rp * pp)58 irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp)
59 {
60 	int i, pos;
61 	unsigned long val;
62 	u32 status, num_ctrls;
63 	irqreturn_t ret = IRQ_NONE;
64 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
65 
66 	num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
67 
68 	for (i = 0; i < num_ctrls; i++) {
69 		status = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS +
70 					   (i * MSI_REG_CTRL_BLOCK_SIZE));
71 		if (!status)
72 			continue;
73 
74 		ret = IRQ_HANDLED;
75 		val = status;
76 		pos = 0;
77 		while ((pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL,
78 					    pos)) != MAX_MSI_IRQS_PER_CTRL) {
79 			generic_handle_domain_irq(pp->irq_domain,
80 						  (i * MAX_MSI_IRQS_PER_CTRL) +
81 						  pos);
82 			pos++;
83 		}
84 	}
85 
86 	return ret;
87 }
88 
89 /* Chained MSI interrupt service routine */
dw_chained_msi_isr(struct irq_desc * desc)90 static void dw_chained_msi_isr(struct irq_desc *desc)
91 {
92 	struct irq_chip *chip = irq_desc_get_chip(desc);
93 	struct dw_pcie_rp *pp;
94 
95 	chained_irq_enter(chip, desc);
96 
97 	pp = irq_desc_get_handler_data(desc);
98 	dw_handle_msi_irq(pp);
99 
100 	chained_irq_exit(chip, desc);
101 }
102 
dw_pci_setup_msi_msg(struct irq_data * d,struct msi_msg * msg)103 static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg)
104 {
105 	struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
106 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
107 	u64 msi_target;
108 
109 	msi_target = (u64)pp->msi_data;
110 
111 	msg->address_lo = lower_32_bits(msi_target);
112 	msg->address_hi = upper_32_bits(msi_target);
113 
114 	msg->data = d->hwirq;
115 
116 	dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
117 		(int)d->hwirq, msg->address_hi, msg->address_lo);
118 }
119 
dw_pci_bottom_mask(struct irq_data * d)120 static void dw_pci_bottom_mask(struct irq_data *d)
121 {
122 	struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
123 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
124 	unsigned int res, bit, ctrl;
125 	unsigned long flags;
126 
127 	raw_spin_lock_irqsave(&pp->lock, flags);
128 
129 	ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
130 	res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
131 	bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
132 
133 	pp->irq_mask[ctrl] |= BIT(bit);
134 	dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]);
135 
136 	raw_spin_unlock_irqrestore(&pp->lock, flags);
137 }
138 
dw_pci_bottom_unmask(struct irq_data * d)139 static void dw_pci_bottom_unmask(struct irq_data *d)
140 {
141 	struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
142 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
143 	unsigned int res, bit, ctrl;
144 	unsigned long flags;
145 
146 	raw_spin_lock_irqsave(&pp->lock, flags);
147 
148 	ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
149 	res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
150 	bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
151 
152 	pp->irq_mask[ctrl] &= ~BIT(bit);
153 	dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]);
154 
155 	raw_spin_unlock_irqrestore(&pp->lock, flags);
156 }
157 
dw_pci_bottom_ack(struct irq_data * d)158 static void dw_pci_bottom_ack(struct irq_data *d)
159 {
160 	struct dw_pcie_rp *pp  = irq_data_get_irq_chip_data(d);
161 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
162 	unsigned int res, bit, ctrl;
163 
164 	ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
165 	res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
166 	bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
167 
168 	dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_STATUS + res, BIT(bit));
169 }
170 
171 static struct irq_chip dw_pci_msi_bottom_irq_chip = {
172 	.name = "DWPCI-MSI",
173 	.irq_ack = dw_pci_bottom_ack,
174 	.irq_compose_msi_msg = dw_pci_setup_msi_msg,
175 	.irq_mask = dw_pci_bottom_mask,
176 	.irq_unmask = dw_pci_bottom_unmask,
177 };
178 
dw_pcie_irq_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * args)179 static int dw_pcie_irq_domain_alloc(struct irq_domain *domain,
180 				    unsigned int virq, unsigned int nr_irqs,
181 				    void *args)
182 {
183 	struct dw_pcie_rp *pp = domain->host_data;
184 	unsigned long flags;
185 	u32 i;
186 	int bit;
187 
188 	raw_spin_lock_irqsave(&pp->lock, flags);
189 
190 	bit = bitmap_find_free_region(pp->msi_irq_in_use, pp->num_vectors,
191 				      order_base_2(nr_irqs));
192 
193 	raw_spin_unlock_irqrestore(&pp->lock, flags);
194 
195 	if (bit < 0)
196 		return -ENOSPC;
197 
198 	for (i = 0; i < nr_irqs; i++)
199 		irq_domain_set_info(domain, virq + i, bit + i,
200 				    pp->msi_irq_chip,
201 				    pp, handle_edge_irq,
202 				    NULL, NULL);
203 
204 	return 0;
205 }
206 
dw_pcie_irq_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)207 static void dw_pcie_irq_domain_free(struct irq_domain *domain,
208 				    unsigned int virq, unsigned int nr_irqs)
209 {
210 	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
211 	struct dw_pcie_rp *pp = domain->host_data;
212 	unsigned long flags;
213 
214 	raw_spin_lock_irqsave(&pp->lock, flags);
215 
216 	bitmap_release_region(pp->msi_irq_in_use, d->hwirq,
217 			      order_base_2(nr_irqs));
218 
219 	raw_spin_unlock_irqrestore(&pp->lock, flags);
220 }
221 
222 static const struct irq_domain_ops dw_pcie_msi_domain_ops = {
223 	.alloc	= dw_pcie_irq_domain_alloc,
224 	.free	= dw_pcie_irq_domain_free,
225 };
226 
dw_pcie_allocate_domains(struct dw_pcie_rp * pp)227 int dw_pcie_allocate_domains(struct dw_pcie_rp *pp)
228 {
229 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
230 	struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node);
231 
232 	pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors,
233 					       &dw_pcie_msi_domain_ops, pp);
234 	if (!pp->irq_domain) {
235 		dev_err(pci->dev, "Failed to create IRQ domain\n");
236 		return -ENOMEM;
237 	}
238 
239 	irq_domain_update_bus_token(pp->irq_domain, DOMAIN_BUS_NEXUS);
240 
241 	pp->msi_domain = pci_msi_create_irq_domain(fwnode,
242 						   &dw_pcie_msi_domain_info,
243 						   pp->irq_domain);
244 	if (!pp->msi_domain) {
245 		dev_err(pci->dev, "Failed to create MSI domain\n");
246 		irq_domain_remove(pp->irq_domain);
247 		return -ENOMEM;
248 	}
249 
250 	return 0;
251 }
252 
dw_pcie_free_msi(struct dw_pcie_rp * pp)253 static void dw_pcie_free_msi(struct dw_pcie_rp *pp)
254 {
255 	u32 ctrl;
256 
257 	for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) {
258 		if (pp->msi_irq[ctrl] > 0)
259 			irq_set_chained_handler_and_data(pp->msi_irq[ctrl],
260 							 NULL, NULL);
261 	}
262 
263 	irq_domain_remove(pp->msi_domain);
264 	irq_domain_remove(pp->irq_domain);
265 }
266 
dw_pcie_msi_init(struct dw_pcie_rp * pp)267 static void dw_pcie_msi_init(struct dw_pcie_rp *pp)
268 {
269 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
270 	u64 msi_target = (u64)pp->msi_data;
271 
272 	if (!pci_msi_enabled() || !pp->has_msi_ctrl)
273 		return;
274 
275 	/* Program the msi_data */
276 	dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_LO, lower_32_bits(msi_target));
277 	dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_HI, upper_32_bits(msi_target));
278 }
279 
dw_pcie_parse_split_msi_irq(struct dw_pcie_rp * pp)280 static int dw_pcie_parse_split_msi_irq(struct dw_pcie_rp *pp)
281 {
282 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
283 	struct device *dev = pci->dev;
284 	struct platform_device *pdev = to_platform_device(dev);
285 	u32 ctrl, max_vectors;
286 	int irq;
287 
288 	/* Parse any "msiX" IRQs described in the devicetree */
289 	for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) {
290 		char msi_name[] = "msiX";
291 
292 		msi_name[3] = '0' + ctrl;
293 		irq = platform_get_irq_byname_optional(pdev, msi_name);
294 		if (irq == -ENXIO)
295 			break;
296 		if (irq < 0)
297 			return dev_err_probe(dev, irq,
298 					     "Failed to parse MSI IRQ '%s'\n",
299 					     msi_name);
300 
301 		pp->msi_irq[ctrl] = irq;
302 	}
303 
304 	/* If no "msiX" IRQs, caller should fallback to "msi" IRQ */
305 	if (ctrl == 0)
306 		return -ENXIO;
307 
308 	max_vectors = ctrl * MAX_MSI_IRQS_PER_CTRL;
309 	if (pp->num_vectors > max_vectors) {
310 		dev_warn(dev, "Exceeding number of MSI vectors, limiting to %u\n",
311 			 max_vectors);
312 		pp->num_vectors = max_vectors;
313 	}
314 	if (!pp->num_vectors)
315 		pp->num_vectors = max_vectors;
316 
317 	return 0;
318 }
319 
dw_pcie_msi_host_init(struct dw_pcie_rp * pp)320 static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
321 {
322 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
323 	struct device *dev = pci->dev;
324 	struct platform_device *pdev = to_platform_device(dev);
325 	u64 *msi_vaddr = NULL;
326 	int ret;
327 	u32 ctrl, num_ctrls;
328 
329 	for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++)
330 		pp->irq_mask[ctrl] = ~0;
331 
332 	if (!pp->msi_irq[0]) {
333 		ret = dw_pcie_parse_split_msi_irq(pp);
334 		if (ret < 0 && ret != -ENXIO)
335 			return ret;
336 	}
337 
338 	if (!pp->num_vectors)
339 		pp->num_vectors = MSI_DEF_NUM_VECTORS;
340 	num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
341 
342 	if (!pp->msi_irq[0]) {
343 		pp->msi_irq[0] = platform_get_irq_byname_optional(pdev, "msi");
344 		if (pp->msi_irq[0] < 0) {
345 			pp->msi_irq[0] = platform_get_irq(pdev, 0);
346 			if (pp->msi_irq[0] < 0)
347 				return pp->msi_irq[0];
348 		}
349 	}
350 
351 	dev_dbg(dev, "Using %d MSI vectors\n", pp->num_vectors);
352 
353 	pp->msi_irq_chip = &dw_pci_msi_bottom_irq_chip;
354 
355 	ret = dw_pcie_allocate_domains(pp);
356 	if (ret)
357 		return ret;
358 
359 	for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
360 		if (pp->msi_irq[ctrl] > 0)
361 			irq_set_chained_handler_and_data(pp->msi_irq[ctrl],
362 						    dw_chained_msi_isr, pp);
363 	}
364 
365 	/*
366 	 * Even though the iMSI-RX Module supports 64-bit addresses some
367 	 * peripheral PCIe devices may lack 64-bit message support. In
368 	 * order not to miss MSI TLPs from those devices the MSI target
369 	 * address has to be within the lowest 4GB.
370 	 *
371 	 * Note until there is a better alternative found the reservation is
372 	 * done by allocating from the artificially limited DMA-coherent
373 	 * memory.
374 	 */
375 	ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
376 	if (!ret)
377 		msi_vaddr = dmam_alloc_coherent(dev, sizeof(u64), &pp->msi_data,
378 						GFP_KERNEL);
379 
380 	if (!msi_vaddr) {
381 		dev_warn(dev, "Failed to allocate 32-bit MSI address\n");
382 		dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
383 		msi_vaddr = dmam_alloc_coherent(dev, sizeof(u64), &pp->msi_data,
384 						GFP_KERNEL);
385 		if (!msi_vaddr) {
386 			dev_err(dev, "Failed to allocate MSI address\n");
387 			dw_pcie_free_msi(pp);
388 			return -ENOMEM;
389 		}
390 	}
391 
392 	return 0;
393 }
394 
dw_pcie_host_request_msg_tlp_res(struct dw_pcie_rp * pp)395 static void dw_pcie_host_request_msg_tlp_res(struct dw_pcie_rp *pp)
396 {
397 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
398 	struct resource_entry *win;
399 	struct resource *res;
400 
401 	win = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM);
402 	if (win) {
403 		res = devm_kzalloc(pci->dev, sizeof(*res), GFP_KERNEL);
404 		if (!res)
405 			return;
406 
407 		/*
408 		 * Allocate MSG TLP region of size 'region_align' at the end of
409 		 * the host bridge window.
410 		 */
411 		res->start = win->res->end - pci->region_align + 1;
412 		res->end = win->res->end;
413 		res->name = "msg";
414 		res->flags = win->res->flags | IORESOURCE_BUSY;
415 
416 		if (!devm_request_resource(pci->dev, win->res, res))
417 			pp->msg_res = res;
418 	}
419 }
420 
dw_pcie_host_init(struct dw_pcie_rp * pp)421 int dw_pcie_host_init(struct dw_pcie_rp *pp)
422 {
423 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
424 	struct device *dev = pci->dev;
425 	struct device_node *np = dev->of_node;
426 	struct platform_device *pdev = to_platform_device(dev);
427 	struct resource_entry *win;
428 	struct pci_host_bridge *bridge;
429 	struct resource *res;
430 	int ret;
431 
432 	raw_spin_lock_init(&pp->lock);
433 
434 	ret = dw_pcie_get_resources(pci);
435 	if (ret)
436 		return ret;
437 
438 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
439 	if (res) {
440 		pp->cfg0_size = resource_size(res);
441 		pp->cfg0_base = res->start;
442 
443 		pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res);
444 		if (IS_ERR(pp->va_cfg0_base))
445 			return PTR_ERR(pp->va_cfg0_base);
446 	} else {
447 		dev_err(dev, "Missing *config* reg space\n");
448 		return -ENODEV;
449 	}
450 
451 	bridge = devm_pci_alloc_host_bridge(dev, 0);
452 	if (!bridge)
453 		return -ENOMEM;
454 
455 	pp->bridge = bridge;
456 
457 	/* Get the I/O range from DT */
458 	win = resource_list_first_type(&bridge->windows, IORESOURCE_IO);
459 	if (win) {
460 		pp->io_size = resource_size(win->res);
461 		pp->io_bus_addr = win->res->start - win->offset;
462 		pp->io_base = pci_pio_to_address(win->res->start);
463 	}
464 
465 	/* Set default bus ops */
466 	bridge->ops = &dw_pcie_ops;
467 	bridge->child_ops = &dw_child_pcie_ops;
468 
469 	if (pp->ops->init) {
470 		ret = pp->ops->init(pp);
471 		if (ret)
472 			return ret;
473 	}
474 
475 	if (pci_msi_enabled()) {
476 		pp->has_msi_ctrl = !(pp->ops->msi_init ||
477 				     of_property_read_bool(np, "msi-parent") ||
478 				     of_property_read_bool(np, "msi-map"));
479 
480 		/*
481 		 * For the has_msi_ctrl case the default assignment is handled
482 		 * in the dw_pcie_msi_host_init().
483 		 */
484 		if (!pp->has_msi_ctrl && !pp->num_vectors) {
485 			pp->num_vectors = MSI_DEF_NUM_VECTORS;
486 		} else if (pp->num_vectors > MAX_MSI_IRQS) {
487 			dev_err(dev, "Invalid number of vectors\n");
488 			ret = -EINVAL;
489 			goto err_deinit_host;
490 		}
491 
492 		if (pp->ops->msi_init) {
493 			ret = pp->ops->msi_init(pp);
494 			if (ret < 0)
495 				goto err_deinit_host;
496 		} else if (pp->has_msi_ctrl) {
497 			ret = dw_pcie_msi_host_init(pp);
498 			if (ret < 0)
499 				goto err_deinit_host;
500 		}
501 	}
502 
503 	dw_pcie_version_detect(pci);
504 
505 	dw_pcie_iatu_detect(pci);
506 
507 	/*
508 	 * Allocate the resource for MSG TLP before programming the iATU
509 	 * outbound window in dw_pcie_setup_rc(). Since the allocation depends
510 	 * on the value of 'region_align', this has to be done after
511 	 * dw_pcie_iatu_detect().
512 	 *
513 	 * Glue drivers need to set 'use_atu_msg' before dw_pcie_host_init() to
514 	 * make use of the generic MSG TLP implementation.
515 	 */
516 	if (pp->use_atu_msg)
517 		dw_pcie_host_request_msg_tlp_res(pp);
518 
519 	ret = dw_pcie_edma_detect(pci);
520 	if (ret)
521 		goto err_free_msi;
522 
523 	ret = dw_pcie_setup_rc(pp);
524 	if (ret)
525 		goto err_remove_edma;
526 
527 	if (!dw_pcie_link_up(pci)) {
528 		ret = dw_pcie_start_link(pci);
529 		if (ret)
530 			goto err_remove_edma;
531 	}
532 
533 	/* Ignore errors, the link may come up later */
534 	dw_pcie_wait_for_link(pci);
535 
536 	bridge->sysdata = pp;
537 
538 	ret = pci_host_probe(bridge);
539 	if (ret)
540 		goto err_stop_link;
541 
542 	if (pp->ops->post_init)
543 		pp->ops->post_init(pp);
544 
545 	return 0;
546 
547 err_stop_link:
548 	dw_pcie_stop_link(pci);
549 
550 err_remove_edma:
551 	dw_pcie_edma_remove(pci);
552 
553 err_free_msi:
554 	if (pp->has_msi_ctrl)
555 		dw_pcie_free_msi(pp);
556 
557 err_deinit_host:
558 	if (pp->ops->deinit)
559 		pp->ops->deinit(pp);
560 
561 	return ret;
562 }
563 EXPORT_SYMBOL_GPL(dw_pcie_host_init);
564 
dw_pcie_host_deinit(struct dw_pcie_rp * pp)565 void dw_pcie_host_deinit(struct dw_pcie_rp *pp)
566 {
567 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
568 
569 	pci_stop_root_bus(pp->bridge->bus);
570 	pci_remove_root_bus(pp->bridge->bus);
571 
572 	dw_pcie_stop_link(pci);
573 
574 	dw_pcie_edma_remove(pci);
575 
576 	if (pp->has_msi_ctrl)
577 		dw_pcie_free_msi(pp);
578 
579 	if (pp->ops->deinit)
580 		pp->ops->deinit(pp);
581 }
582 EXPORT_SYMBOL_GPL(dw_pcie_host_deinit);
583 
dw_pcie_other_conf_map_bus(struct pci_bus * bus,unsigned int devfn,int where)584 static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,
585 						unsigned int devfn, int where)
586 {
587 	struct dw_pcie_rp *pp = bus->sysdata;
588 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
589 	struct dw_pcie_ob_atu_cfg atu = { 0 };
590 	int type, ret;
591 	u32 busdev;
592 
593 	/*
594 	 * Checking whether the link is up here is a last line of defense
595 	 * against platforms that forward errors on the system bus as
596 	 * SError upon PCI configuration transactions issued when the link
597 	 * is down. This check is racy by definition and does not stop
598 	 * the system from triggering an SError if the link goes down
599 	 * after this check is performed.
600 	 */
601 	if (!dw_pcie_link_up(pci))
602 		return NULL;
603 
604 	busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
605 		 PCIE_ATU_FUNC(PCI_FUNC(devfn));
606 
607 	if (pci_is_root_bus(bus->parent))
608 		type = PCIE_ATU_TYPE_CFG0;
609 	else
610 		type = PCIE_ATU_TYPE_CFG1;
611 
612 	atu.type = type;
613 	atu.cpu_addr = pp->cfg0_base;
614 	atu.pci_addr = busdev;
615 	atu.size = pp->cfg0_size;
616 
617 	ret = dw_pcie_prog_outbound_atu(pci, &atu);
618 	if (ret)
619 		return NULL;
620 
621 	return pp->va_cfg0_base + where;
622 }
623 
dw_pcie_rd_other_conf(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 * val)624 static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn,
625 				 int where, int size, u32 *val)
626 {
627 	struct dw_pcie_rp *pp = bus->sysdata;
628 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
629 	struct dw_pcie_ob_atu_cfg atu = { 0 };
630 	int ret;
631 
632 	ret = pci_generic_config_read(bus, devfn, where, size, val);
633 	if (ret != PCIBIOS_SUCCESSFUL)
634 		return ret;
635 
636 	if (pp->cfg0_io_shared) {
637 		atu.type = PCIE_ATU_TYPE_IO;
638 		atu.cpu_addr = pp->io_base;
639 		atu.pci_addr = pp->io_bus_addr;
640 		atu.size = pp->io_size;
641 
642 		ret = dw_pcie_prog_outbound_atu(pci, &atu);
643 		if (ret)
644 			return PCIBIOS_SET_FAILED;
645 	}
646 
647 	return PCIBIOS_SUCCESSFUL;
648 }
649 
dw_pcie_wr_other_conf(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 val)650 static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn,
651 				 int where, int size, u32 val)
652 {
653 	struct dw_pcie_rp *pp = bus->sysdata;
654 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
655 	struct dw_pcie_ob_atu_cfg atu = { 0 };
656 	int ret;
657 
658 	ret = pci_generic_config_write(bus, devfn, where, size, val);
659 	if (ret != PCIBIOS_SUCCESSFUL)
660 		return ret;
661 
662 	if (pp->cfg0_io_shared) {
663 		atu.type = PCIE_ATU_TYPE_IO;
664 		atu.cpu_addr = pp->io_base;
665 		atu.pci_addr = pp->io_bus_addr;
666 		atu.size = pp->io_size;
667 
668 		ret = dw_pcie_prog_outbound_atu(pci, &atu);
669 		if (ret)
670 			return PCIBIOS_SET_FAILED;
671 	}
672 
673 	return PCIBIOS_SUCCESSFUL;
674 }
675 
676 static struct pci_ops dw_child_pcie_ops = {
677 	.map_bus = dw_pcie_other_conf_map_bus,
678 	.read = dw_pcie_rd_other_conf,
679 	.write = dw_pcie_wr_other_conf,
680 };
681 
dw_pcie_own_conf_map_bus(struct pci_bus * bus,unsigned int devfn,int where)682 void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn, int where)
683 {
684 	struct dw_pcie_rp *pp = bus->sysdata;
685 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
686 
687 	if (PCI_SLOT(devfn) > 0)
688 		return NULL;
689 
690 	return pci->dbi_base + where;
691 }
692 EXPORT_SYMBOL_GPL(dw_pcie_own_conf_map_bus);
693 
694 static struct pci_ops dw_pcie_ops = {
695 	.map_bus = dw_pcie_own_conf_map_bus,
696 	.read = pci_generic_config_read,
697 	.write = pci_generic_config_write,
698 };
699 
dw_pcie_iatu_setup(struct dw_pcie_rp * pp)700 static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
701 {
702 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
703 	struct dw_pcie_ob_atu_cfg atu = { 0 };
704 	struct resource_entry *entry;
705 	int i, ret;
706 
707 	/* Note the very first outbound ATU is used for CFG IOs */
708 	if (!pci->num_ob_windows) {
709 		dev_err(pci->dev, "No outbound iATU found\n");
710 		return -EINVAL;
711 	}
712 
713 	/*
714 	 * Ensure all out/inbound windows are disabled before proceeding with
715 	 * the MEM/IO (dma-)ranges setups.
716 	 */
717 	for (i = 0; i < pci->num_ob_windows; i++)
718 		dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_OB, i);
719 
720 	for (i = 0; i < pci->num_ib_windows; i++)
721 		dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_IB, i);
722 
723 	i = 0;
724 	resource_list_for_each_entry(entry, &pp->bridge->windows) {
725 		if (resource_type(entry->res) != IORESOURCE_MEM)
726 			continue;
727 
728 		if (pci->num_ob_windows <= ++i)
729 			break;
730 
731 		atu.index = i;
732 		atu.type = PCIE_ATU_TYPE_MEM;
733 		atu.cpu_addr = entry->res->start;
734 		atu.pci_addr = entry->res->start - entry->offset;
735 
736 		/* Adjust iATU size if MSG TLP region was allocated before */
737 		if (pp->msg_res && pp->msg_res->parent == entry->res)
738 			atu.size = resource_size(entry->res) -
739 					resource_size(pp->msg_res);
740 		else
741 			atu.size = resource_size(entry->res);
742 
743 		ret = dw_pcie_prog_outbound_atu(pci, &atu);
744 		if (ret) {
745 			dev_err(pci->dev, "Failed to set MEM range %pr\n",
746 				entry->res);
747 			return ret;
748 		}
749 	}
750 
751 	if (pp->io_size) {
752 		if (pci->num_ob_windows > ++i) {
753 			atu.index = i;
754 			atu.type = PCIE_ATU_TYPE_IO;
755 			atu.cpu_addr = pp->io_base;
756 			atu.pci_addr = pp->io_bus_addr;
757 			atu.size = pp->io_size;
758 
759 			ret = dw_pcie_prog_outbound_atu(pci, &atu);
760 			if (ret) {
761 				dev_err(pci->dev, "Failed to set IO range %pr\n",
762 					entry->res);
763 				return ret;
764 			}
765 		} else {
766 			pp->cfg0_io_shared = true;
767 		}
768 	}
769 
770 	if (pci->num_ob_windows <= i)
771 		dev_warn(pci->dev, "Ranges exceed outbound iATU size (%d)\n",
772 			 pci->num_ob_windows);
773 
774 	pp->msg_atu_index = i;
775 
776 	i = 0;
777 	resource_list_for_each_entry(entry, &pp->bridge->dma_ranges) {
778 		if (resource_type(entry->res) != IORESOURCE_MEM)
779 			continue;
780 
781 		if (pci->num_ib_windows <= i)
782 			break;
783 
784 		ret = dw_pcie_prog_inbound_atu(pci, i++, PCIE_ATU_TYPE_MEM,
785 					       entry->res->start,
786 					       entry->res->start - entry->offset,
787 					       resource_size(entry->res));
788 		if (ret) {
789 			dev_err(pci->dev, "Failed to set DMA range %pr\n",
790 				entry->res);
791 			return ret;
792 		}
793 	}
794 
795 	if (pci->num_ib_windows <= i)
796 		dev_warn(pci->dev, "Dma-ranges exceed inbound iATU size (%u)\n",
797 			 pci->num_ib_windows);
798 
799 	return 0;
800 }
801 
dw_pcie_setup_rc(struct dw_pcie_rp * pp)802 int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
803 {
804 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
805 	u32 val, ctrl, num_ctrls;
806 	int ret;
807 
808 	/*
809 	 * Enable DBI read-only registers for writing/updating configuration.
810 	 * Write permission gets disabled towards the end of this function.
811 	 */
812 	dw_pcie_dbi_ro_wr_en(pci);
813 
814 	dw_pcie_setup(pci);
815 
816 	if (pp->has_msi_ctrl) {
817 		num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
818 
819 		/* Initialize IRQ Status array */
820 		for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
821 			dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK +
822 					    (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
823 					    pp->irq_mask[ctrl]);
824 			dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE +
825 					    (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
826 					    ~0);
827 		}
828 	}
829 
830 	dw_pcie_msi_init(pp);
831 
832 	/* Setup RC BARs */
833 	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004);
834 	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000);
835 
836 	/* Setup interrupt pins */
837 	val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE);
838 	val &= 0xffff00ff;
839 	val |= 0x00000100;
840 	dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val);
841 
842 	/* Setup bus numbers */
843 	val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS);
844 	val &= 0xff000000;
845 	val |= 0x00ff0100;
846 	dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val);
847 
848 	/* Setup command register */
849 	val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
850 	val &= 0xffff0000;
851 	val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
852 		PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
853 	dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
854 
855 	/*
856 	 * If the platform provides its own child bus config accesses, it means
857 	 * the platform uses its own address translation component rather than
858 	 * ATU, so we should not program the ATU here.
859 	 */
860 	if (pp->bridge->child_ops == &dw_child_pcie_ops) {
861 		ret = dw_pcie_iatu_setup(pp);
862 		if (ret)
863 			return ret;
864 	}
865 
866 	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
867 
868 	/* Program correct class for RC */
869 	dw_pcie_writew_dbi(pci, PCI_CLASS_DEVICE, PCI_CLASS_BRIDGE_PCI);
870 
871 	val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
872 	val |= PORT_LOGIC_SPEED_CHANGE;
873 	dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
874 
875 	dw_pcie_dbi_ro_wr_dis(pci);
876 
877 	return 0;
878 }
879 EXPORT_SYMBOL_GPL(dw_pcie_setup_rc);
880 
dw_pcie_pme_turn_off(struct dw_pcie * pci)881 static int dw_pcie_pme_turn_off(struct dw_pcie *pci)
882 {
883 	struct dw_pcie_ob_atu_cfg atu = { 0 };
884 	void __iomem *mem;
885 	int ret;
886 
887 	if (pci->num_ob_windows <= pci->pp.msg_atu_index)
888 		return -ENOSPC;
889 
890 	if (!pci->pp.msg_res)
891 		return -ENOSPC;
892 
893 	atu.code = PCIE_MSG_CODE_PME_TURN_OFF;
894 	atu.routing = PCIE_MSG_TYPE_R_BC;
895 	atu.type = PCIE_ATU_TYPE_MSG;
896 	atu.size = resource_size(pci->pp.msg_res);
897 	atu.index = pci->pp.msg_atu_index;
898 
899 	atu.cpu_addr = pci->pp.msg_res->start;
900 
901 	ret = dw_pcie_prog_outbound_atu(pci, &atu);
902 	if (ret)
903 		return ret;
904 
905 	mem = ioremap(atu.cpu_addr, pci->region_align);
906 	if (!mem)
907 		return -ENOMEM;
908 
909 	/* A dummy write is converted to a Msg TLP */
910 	writel(0, mem);
911 
912 	iounmap(mem);
913 
914 	return 0;
915 }
916 
dw_pcie_suspend_noirq(struct dw_pcie * pci)917 int dw_pcie_suspend_noirq(struct dw_pcie *pci)
918 {
919 	u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
920 	u32 val;
921 	int ret = 0;
922 
923 	/*
924 	 * If L1SS is supported, then do not put the link into L2 as some
925 	 * devices such as NVMe expect low resume latency.
926 	 */
927 	if (dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKCTL) & PCI_EXP_LNKCTL_ASPM_L1)
928 		return 0;
929 
930 	if (dw_pcie_get_ltssm(pci) <= DW_PCIE_LTSSM_DETECT_ACT)
931 		return 0;
932 
933 	if (pci->pp.ops->pme_turn_off)
934 		pci->pp.ops->pme_turn_off(&pci->pp);
935 	else
936 		ret = dw_pcie_pme_turn_off(pci);
937 
938 	if (ret)
939 		return ret;
940 
941 	ret = read_poll_timeout(dw_pcie_get_ltssm, val, val == DW_PCIE_LTSSM_L2_IDLE,
942 				PCIE_PME_TO_L2_TIMEOUT_US/10,
943 				PCIE_PME_TO_L2_TIMEOUT_US, false, pci);
944 	if (ret) {
945 		dev_err(pci->dev, "Timeout waiting for L2 entry! LTSSM: 0x%x\n", val);
946 		return ret;
947 	}
948 
949 	if (pci->pp.ops->deinit)
950 		pci->pp.ops->deinit(&pci->pp);
951 
952 	pci->suspended = true;
953 
954 	return ret;
955 }
956 EXPORT_SYMBOL_GPL(dw_pcie_suspend_noirq);
957 
dw_pcie_resume_noirq(struct dw_pcie * pci)958 int dw_pcie_resume_noirq(struct dw_pcie *pci)
959 {
960 	int ret;
961 
962 	if (!pci->suspended)
963 		return 0;
964 
965 	pci->suspended = false;
966 
967 	if (pci->pp.ops->init) {
968 		ret = pci->pp.ops->init(&pci->pp);
969 		if (ret) {
970 			dev_err(pci->dev, "Host init failed: %d\n", ret);
971 			return ret;
972 		}
973 	}
974 
975 	dw_pcie_setup_rc(&pci->pp);
976 
977 	ret = dw_pcie_start_link(pci);
978 	if (ret)
979 		return ret;
980 
981 	ret = dw_pcie_wait_for_link(pci);
982 	if (ret)
983 		return ret;
984 
985 	return ret;
986 }
987 EXPORT_SYMBOL_GPL(dw_pcie_resume_noirq);
988