xref: /linux/drivers/pci/controller/dwc/pcie-designware-host.c (revision 3f1c07fc21c68bd3bd2df9d2c9441f6485e934d9)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Synopsys DesignWare PCIe host controller driver
4  *
5  * Copyright (C) 2013 Samsung Electronics Co., Ltd.
6  *		https://www.samsung.com
7  *
8  * Author: Jingoo Han <jg1.han@samsung.com>
9  */
10 
11 #include <linux/align.h>
12 #include <linux/iopoll.h>
13 #include <linux/irqchip/chained_irq.h>
14 #include <linux/irqchip/irq-msi-lib.h>
15 #include <linux/irqdomain.h>
16 #include <linux/msi.h>
17 #include <linux/of_address.h>
18 #include <linux/of_pci.h>
19 #include <linux/pci_regs.h>
20 #include <linux/platform_device.h>
21 
22 #include "../../pci.h"
23 #include "pcie-designware.h"
24 
25 static struct pci_ops dw_pcie_ops;
26 static struct pci_ops dw_pcie_ecam_ops;
27 static struct pci_ops dw_child_pcie_ops;
28 
29 #define DW_PCIE_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS		| \
30 				    MSI_FLAG_USE_DEF_CHIP_OPS		| \
31 				    MSI_FLAG_NO_AFFINITY		| \
32 				    MSI_FLAG_PCI_MSI_MASK_PARENT)
33 #define DW_PCIE_MSI_FLAGS_SUPPORTED (MSI_FLAG_MULTI_PCI_MSI		| \
34 				     MSI_FLAG_PCI_MSIX			| \
35 				     MSI_GENERIC_FLAGS_MASK)
36 
37 #define IS_256MB_ALIGNED(x) IS_ALIGNED(x, SZ_256M)
38 
39 static const struct msi_parent_ops dw_pcie_msi_parent_ops = {
40 	.required_flags		= DW_PCIE_MSI_FLAGS_REQUIRED,
41 	.supported_flags	= DW_PCIE_MSI_FLAGS_SUPPORTED,
42 	.bus_select_token	= DOMAIN_BUS_PCI_MSI,
43 	.chip_flags		= MSI_CHIP_FLAG_SET_ACK,
44 	.prefix			= "DW-",
45 	.init_dev_msi_info	= msi_lib_init_dev_msi_info,
46 };
47 
48 /* MSI int handler */
dw_handle_msi_irq(struct dw_pcie_rp * pp)49 irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp)
50 {
51 	int i, pos;
52 	unsigned long val;
53 	u32 status, num_ctrls;
54 	irqreturn_t ret = IRQ_NONE;
55 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
56 
57 	num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
58 
59 	for (i = 0; i < num_ctrls; i++) {
60 		status = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS +
61 					   (i * MSI_REG_CTRL_BLOCK_SIZE));
62 		if (!status)
63 			continue;
64 
65 		ret = IRQ_HANDLED;
66 		val = status;
67 		pos = 0;
68 		while ((pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL,
69 					    pos)) != MAX_MSI_IRQS_PER_CTRL) {
70 			generic_handle_domain_irq(pp->irq_domain,
71 						  (i * MAX_MSI_IRQS_PER_CTRL) +
72 						  pos);
73 			pos++;
74 		}
75 	}
76 
77 	return ret;
78 }
79 
80 /* Chained MSI interrupt service routine */
dw_chained_msi_isr(struct irq_desc * desc)81 static void dw_chained_msi_isr(struct irq_desc *desc)
82 {
83 	struct irq_chip *chip = irq_desc_get_chip(desc);
84 	struct dw_pcie_rp *pp;
85 
86 	chained_irq_enter(chip, desc);
87 
88 	pp = irq_desc_get_handler_data(desc);
89 	dw_handle_msi_irq(pp);
90 
91 	chained_irq_exit(chip, desc);
92 }
93 
dw_pci_setup_msi_msg(struct irq_data * d,struct msi_msg * msg)94 static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg)
95 {
96 	struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
97 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
98 	u64 msi_target;
99 
100 	msi_target = (u64)pp->msi_data;
101 
102 	msg->address_lo = lower_32_bits(msi_target);
103 	msg->address_hi = upper_32_bits(msi_target);
104 
105 	msg->data = d->hwirq;
106 
107 	dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
108 		(int)d->hwirq, msg->address_hi, msg->address_lo);
109 }
110 
dw_pci_bottom_mask(struct irq_data * d)111 static void dw_pci_bottom_mask(struct irq_data *d)
112 {
113 	struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
114 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
115 	unsigned int res, bit, ctrl;
116 	unsigned long flags;
117 
118 	raw_spin_lock_irqsave(&pp->lock, flags);
119 
120 	ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
121 	res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
122 	bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
123 
124 	pp->irq_mask[ctrl] |= BIT(bit);
125 	dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]);
126 
127 	raw_spin_unlock_irqrestore(&pp->lock, flags);
128 }
129 
dw_pci_bottom_unmask(struct irq_data * d)130 static void dw_pci_bottom_unmask(struct irq_data *d)
131 {
132 	struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
133 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
134 	unsigned int res, bit, ctrl;
135 	unsigned long flags;
136 
137 	raw_spin_lock_irqsave(&pp->lock, flags);
138 
139 	ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
140 	res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
141 	bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
142 
143 	pp->irq_mask[ctrl] &= ~BIT(bit);
144 	dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]);
145 
146 	raw_spin_unlock_irqrestore(&pp->lock, flags);
147 }
148 
dw_pci_bottom_ack(struct irq_data * d)149 static void dw_pci_bottom_ack(struct irq_data *d)
150 {
151 	struct dw_pcie_rp *pp  = irq_data_get_irq_chip_data(d);
152 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
153 	unsigned int res, bit, ctrl;
154 
155 	ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
156 	res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
157 	bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
158 
159 	dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_STATUS + res, BIT(bit));
160 }
161 
162 static struct irq_chip dw_pci_msi_bottom_irq_chip = {
163 	.name = "DWPCI-MSI",
164 	.irq_ack = dw_pci_bottom_ack,
165 	.irq_compose_msi_msg = dw_pci_setup_msi_msg,
166 	.irq_mask = dw_pci_bottom_mask,
167 	.irq_unmask = dw_pci_bottom_unmask,
168 };
169 
dw_pcie_irq_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * args)170 static int dw_pcie_irq_domain_alloc(struct irq_domain *domain,
171 				    unsigned int virq, unsigned int nr_irqs,
172 				    void *args)
173 {
174 	struct dw_pcie_rp *pp = domain->host_data;
175 	unsigned long flags;
176 	u32 i;
177 	int bit;
178 
179 	raw_spin_lock_irqsave(&pp->lock, flags);
180 
181 	bit = bitmap_find_free_region(pp->msi_irq_in_use, pp->num_vectors,
182 				      order_base_2(nr_irqs));
183 
184 	raw_spin_unlock_irqrestore(&pp->lock, flags);
185 
186 	if (bit < 0)
187 		return -ENOSPC;
188 
189 	for (i = 0; i < nr_irqs; i++)
190 		irq_domain_set_info(domain, virq + i, bit + i,
191 				    pp->msi_irq_chip,
192 				    pp, handle_edge_irq,
193 				    NULL, NULL);
194 
195 	return 0;
196 }
197 
dw_pcie_irq_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)198 static void dw_pcie_irq_domain_free(struct irq_domain *domain,
199 				    unsigned int virq, unsigned int nr_irqs)
200 {
201 	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
202 	struct dw_pcie_rp *pp = domain->host_data;
203 	unsigned long flags;
204 
205 	raw_spin_lock_irqsave(&pp->lock, flags);
206 
207 	bitmap_release_region(pp->msi_irq_in_use, d->hwirq,
208 			      order_base_2(nr_irqs));
209 
210 	raw_spin_unlock_irqrestore(&pp->lock, flags);
211 }
212 
213 static const struct irq_domain_ops dw_pcie_msi_domain_ops = {
214 	.alloc	= dw_pcie_irq_domain_alloc,
215 	.free	= dw_pcie_irq_domain_free,
216 };
217 
dw_pcie_allocate_domains(struct dw_pcie_rp * pp)218 int dw_pcie_allocate_domains(struct dw_pcie_rp *pp)
219 {
220 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
221 	struct irq_domain_info info = {
222 		.fwnode		= dev_fwnode(pci->dev),
223 		.ops		= &dw_pcie_msi_domain_ops,
224 		.size		= pp->num_vectors,
225 		.host_data	= pp,
226 	};
227 
228 	pp->irq_domain = msi_create_parent_irq_domain(&info, &dw_pcie_msi_parent_ops);
229 	if (!pp->irq_domain) {
230 		dev_err(pci->dev, "Failed to create IRQ domain\n");
231 		return -ENOMEM;
232 	}
233 
234 	return 0;
235 }
236 EXPORT_SYMBOL_GPL(dw_pcie_allocate_domains);
237 
dw_pcie_free_msi(struct dw_pcie_rp * pp)238 void dw_pcie_free_msi(struct dw_pcie_rp *pp)
239 {
240 	u32 ctrl;
241 
242 	for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) {
243 		if (pp->msi_irq[ctrl] > 0)
244 			irq_set_chained_handler_and_data(pp->msi_irq[ctrl],
245 							 NULL, NULL);
246 	}
247 
248 	irq_domain_remove(pp->irq_domain);
249 }
250 EXPORT_SYMBOL_GPL(dw_pcie_free_msi);
251 
dw_pcie_msi_init(struct dw_pcie_rp * pp)252 void dw_pcie_msi_init(struct dw_pcie_rp *pp)
253 {
254 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
255 	u64 msi_target = (u64)pp->msi_data;
256 	u32 ctrl, num_ctrls;
257 
258 	if (!pci_msi_enabled() || !pp->has_msi_ctrl)
259 		return;
260 
261 	num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
262 
263 	/* Initialize IRQ Status array */
264 	for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
265 		dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK +
266 				    (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
267 				    pp->irq_mask[ctrl]);
268 		dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE +
269 				    (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
270 				    ~0);
271 	}
272 
273 	/* Program the msi_data */
274 	dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_LO, lower_32_bits(msi_target));
275 	dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_HI, upper_32_bits(msi_target));
276 }
277 EXPORT_SYMBOL_GPL(dw_pcie_msi_init);
278 
dw_pcie_parse_split_msi_irq(struct dw_pcie_rp * pp)279 static int dw_pcie_parse_split_msi_irq(struct dw_pcie_rp *pp)
280 {
281 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
282 	struct device *dev = pci->dev;
283 	struct platform_device *pdev = to_platform_device(dev);
284 	u32 ctrl, max_vectors;
285 	int irq;
286 
287 	/* Parse any "msiX" IRQs described in the devicetree */
288 	for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) {
289 		char msi_name[] = "msiX";
290 
291 		msi_name[3] = '0' + ctrl;
292 		irq = platform_get_irq_byname_optional(pdev, msi_name);
293 		if (irq == -ENXIO)
294 			break;
295 		if (irq < 0)
296 			return dev_err_probe(dev, irq,
297 					     "Failed to parse MSI IRQ '%s'\n",
298 					     msi_name);
299 
300 		pp->msi_irq[ctrl] = irq;
301 	}
302 
303 	/* If no "msiX" IRQs, caller should fallback to "msi" IRQ */
304 	if (ctrl == 0)
305 		return -ENXIO;
306 
307 	max_vectors = ctrl * MAX_MSI_IRQS_PER_CTRL;
308 	if (pp->num_vectors > max_vectors) {
309 		dev_warn(dev, "Exceeding number of MSI vectors, limiting to %u\n",
310 			 max_vectors);
311 		pp->num_vectors = max_vectors;
312 	}
313 	if (!pp->num_vectors)
314 		pp->num_vectors = max_vectors;
315 
316 	return 0;
317 }
318 
dw_pcie_msi_host_init(struct dw_pcie_rp * pp)319 int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
320 {
321 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
322 	struct device *dev = pci->dev;
323 	struct platform_device *pdev = to_platform_device(dev);
324 	u64 *msi_vaddr = NULL;
325 	int ret;
326 	u32 ctrl, num_ctrls;
327 
328 	for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++)
329 		pp->irq_mask[ctrl] = ~0;
330 
331 	if (!pp->msi_irq[0]) {
332 		ret = dw_pcie_parse_split_msi_irq(pp);
333 		if (ret < 0 && ret != -ENXIO)
334 			return ret;
335 	}
336 
337 	if (!pp->num_vectors)
338 		pp->num_vectors = MSI_DEF_NUM_VECTORS;
339 	num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
340 
341 	if (!pp->msi_irq[0]) {
342 		pp->msi_irq[0] = platform_get_irq_byname_optional(pdev, "msi");
343 		if (pp->msi_irq[0] < 0) {
344 			pp->msi_irq[0] = platform_get_irq(pdev, 0);
345 			if (pp->msi_irq[0] < 0)
346 				return pp->msi_irq[0];
347 		}
348 	}
349 
350 	dev_dbg(dev, "Using %d MSI vectors\n", pp->num_vectors);
351 
352 	pp->msi_irq_chip = &dw_pci_msi_bottom_irq_chip;
353 
354 	ret = dw_pcie_allocate_domains(pp);
355 	if (ret)
356 		return ret;
357 
358 	for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
359 		if (pp->msi_irq[ctrl] > 0)
360 			irq_set_chained_handler_and_data(pp->msi_irq[ctrl],
361 						    dw_chained_msi_isr, pp);
362 	}
363 
364 	/*
365 	 * Even though the iMSI-RX Module supports 64-bit addresses some
366 	 * peripheral PCIe devices may lack 64-bit message support. In
367 	 * order not to miss MSI TLPs from those devices the MSI target
368 	 * address has to be within the lowest 4GB.
369 	 *
370 	 * Note until there is a better alternative found the reservation is
371 	 * done by allocating from the artificially limited DMA-coherent
372 	 * memory.
373 	 */
374 	ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
375 	if (!ret)
376 		msi_vaddr = dmam_alloc_coherent(dev, sizeof(u64), &pp->msi_data,
377 						GFP_KERNEL);
378 
379 	if (!msi_vaddr) {
380 		dev_warn(dev, "Failed to allocate 32-bit MSI address\n");
381 		dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
382 		msi_vaddr = dmam_alloc_coherent(dev, sizeof(u64), &pp->msi_data,
383 						GFP_KERNEL);
384 		if (!msi_vaddr) {
385 			dev_err(dev, "Failed to allocate MSI address\n");
386 			dw_pcie_free_msi(pp);
387 			return -ENOMEM;
388 		}
389 	}
390 
391 	return 0;
392 }
393 EXPORT_SYMBOL_GPL(dw_pcie_msi_host_init);
394 
dw_pcie_host_request_msg_tlp_res(struct dw_pcie_rp * pp)395 static void dw_pcie_host_request_msg_tlp_res(struct dw_pcie_rp *pp)
396 {
397 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
398 	struct resource_entry *win;
399 	struct resource *res;
400 
401 	win = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM);
402 	if (win) {
403 		res = devm_kzalloc(pci->dev, sizeof(*res), GFP_KERNEL);
404 		if (!res)
405 			return;
406 
407 		/*
408 		 * Allocate MSG TLP region of size 'region_align' at the end of
409 		 * the host bridge window.
410 		 */
411 		res->start = win->res->end - pci->region_align + 1;
412 		res->end = win->res->end;
413 		res->name = "msg";
414 		res->flags = win->res->flags | IORESOURCE_BUSY;
415 
416 		if (!devm_request_resource(pci->dev, win->res, res))
417 			pp->msg_res = res;
418 	}
419 }
420 
dw_pcie_config_ecam_iatu(struct dw_pcie_rp * pp)421 static int dw_pcie_config_ecam_iatu(struct dw_pcie_rp *pp)
422 {
423 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
424 	struct dw_pcie_ob_atu_cfg atu = {0};
425 	resource_size_t bus_range_max;
426 	struct resource_entry *bus;
427 	int ret;
428 
429 	bus = resource_list_first_type(&pp->bridge->windows, IORESOURCE_BUS);
430 
431 	/*
432 	 * Root bus under the host bridge doesn't require any iATU configuration
433 	 * as DBI region will be used to access root bus config space.
434 	 * Immediate bus under Root Bus, needs type 0 iATU configuration and
435 	 * remaining buses need type 1 iATU configuration.
436 	 */
437 	atu.index = 0;
438 	atu.type = PCIE_ATU_TYPE_CFG0;
439 	atu.parent_bus_addr = pp->cfg0_base + SZ_1M;
440 	/* 1MiB is to cover 1 (bus) * 32 (devices) * 8 (functions) */
441 	atu.size = SZ_1M;
442 	atu.ctrl2 = PCIE_ATU_CFG_SHIFT_MODE_ENABLE;
443 	ret = dw_pcie_prog_outbound_atu(pci, &atu);
444 	if (ret)
445 		return ret;
446 
447 	bus_range_max = resource_size(bus->res);
448 
449 	if (bus_range_max < 2)
450 		return 0;
451 
452 	/* Configure remaining buses in type 1 iATU configuration */
453 	atu.index = 1;
454 	atu.type = PCIE_ATU_TYPE_CFG1;
455 	atu.parent_bus_addr = pp->cfg0_base + SZ_2M;
456 	atu.size = (SZ_1M * bus_range_max) - SZ_2M;
457 	atu.ctrl2 = PCIE_ATU_CFG_SHIFT_MODE_ENABLE;
458 
459 	return dw_pcie_prog_outbound_atu(pci, &atu);
460 }
461 
dw_pcie_create_ecam_window(struct dw_pcie_rp * pp,struct resource * res)462 static int dw_pcie_create_ecam_window(struct dw_pcie_rp *pp, struct resource *res)
463 {
464 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
465 	struct device *dev = pci->dev;
466 	struct resource_entry *bus;
467 
468 	bus = resource_list_first_type(&pp->bridge->windows, IORESOURCE_BUS);
469 	if (!bus)
470 		return -ENODEV;
471 
472 	pp->cfg = pci_ecam_create(dev, res, bus->res, &pci_generic_ecam_ops);
473 	if (IS_ERR(pp->cfg))
474 		return PTR_ERR(pp->cfg);
475 
476 	return 0;
477 }
478 
dw_pcie_ecam_enabled(struct dw_pcie_rp * pp,struct resource * config_res)479 static bool dw_pcie_ecam_enabled(struct dw_pcie_rp *pp, struct resource *config_res)
480 {
481 	struct resource *bus_range;
482 	u64 nr_buses;
483 
484 	/* Vendor glue drivers may implement their own ECAM mechanism */
485 	if (pp->native_ecam)
486 		return false;
487 
488 	/*
489 	 * PCIe spec r6.0, sec 7.2.2 mandates the base address used for ECAM to
490 	 * be aligned on a 2^(n+20) byte boundary, where n is the number of bits
491 	 * used for representing 'bus' in BDF. Since the DWC cores always use 8
492 	 * bits for representing 'bus', the base address has to be aligned to
493 	 * 2^28 byte boundary, which is 256 MiB.
494 	 */
495 	if (!IS_256MB_ALIGNED(config_res->start))
496 		return false;
497 
498 	bus_range = resource_list_first_type(&pp->bridge->windows, IORESOURCE_BUS)->res;
499 	if (!bus_range)
500 		return false;
501 
502 	nr_buses = resource_size(config_res) >> PCIE_ECAM_BUS_SHIFT;
503 
504 	return nr_buses >= resource_size(bus_range);
505 }
506 
dw_pcie_host_get_resources(struct dw_pcie_rp * pp)507 static int dw_pcie_host_get_resources(struct dw_pcie_rp *pp)
508 {
509 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
510 	struct device *dev = pci->dev;
511 	struct platform_device *pdev = to_platform_device(dev);
512 	struct resource_entry *win;
513 	struct resource *res;
514 	int ret;
515 
516 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
517 	if (!res) {
518 		dev_err(dev, "Missing \"config\" reg space\n");
519 		return -ENODEV;
520 	}
521 
522 	pp->cfg0_size = resource_size(res);
523 	pp->cfg0_base = res->start;
524 
525 	pp->ecam_enabled = dw_pcie_ecam_enabled(pp, res);
526 	if (pp->ecam_enabled) {
527 		ret = dw_pcie_create_ecam_window(pp, res);
528 		if (ret)
529 			return ret;
530 
531 		pp->bridge->ops = &dw_pcie_ecam_ops;
532 		pp->bridge->sysdata = pp->cfg;
533 		pp->cfg->priv = pp;
534 	} else {
535 		pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res);
536 		if (IS_ERR(pp->va_cfg0_base))
537 			return PTR_ERR(pp->va_cfg0_base);
538 
539 		/* Set default bus ops */
540 		pp->bridge->ops = &dw_pcie_ops;
541 		pp->bridge->child_ops = &dw_child_pcie_ops;
542 		pp->bridge->sysdata = pp;
543 	}
544 
545 	ret = dw_pcie_get_resources(pci);
546 	if (ret) {
547 		if (pp->cfg)
548 			pci_ecam_free(pp->cfg);
549 		return ret;
550 	}
551 
552 	/* Get the I/O range from DT */
553 	win = resource_list_first_type(&pp->bridge->windows, IORESOURCE_IO);
554 	if (win) {
555 		pp->io_size = resource_size(win->res);
556 		pp->io_bus_addr = win->res->start - win->offset;
557 		pp->io_base = pci_pio_to_address(win->res->start);
558 	}
559 
560 	/*
561 	 * visconti_pcie_cpu_addr_fixup() uses pp->io_base, so we have to
562 	 * call dw_pcie_parent_bus_offset() after setting pp->io_base.
563 	 */
564 	pci->parent_bus_offset = dw_pcie_parent_bus_offset(pci, "config",
565 							   pp->cfg0_base);
566 	return 0;
567 }
568 
dw_pcie_host_init(struct dw_pcie_rp * pp)569 int dw_pcie_host_init(struct dw_pcie_rp *pp)
570 {
571 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
572 	struct device *dev = pci->dev;
573 	struct device_node *np = dev->of_node;
574 	struct pci_host_bridge *bridge;
575 	int ret;
576 
577 	raw_spin_lock_init(&pp->lock);
578 
579 	bridge = devm_pci_alloc_host_bridge(dev, 0);
580 	if (!bridge)
581 		return -ENOMEM;
582 
583 	pp->bridge = bridge;
584 
585 	ret = dw_pcie_host_get_resources(pp);
586 	if (ret)
587 		return ret;
588 
589 	if (pp->ops->init) {
590 		ret = pp->ops->init(pp);
591 		if (ret)
592 			goto err_free_ecam;
593 	}
594 
595 	if (pci_msi_enabled()) {
596 		pp->has_msi_ctrl = !(pp->ops->msi_init ||
597 				     of_property_present(np, "msi-parent") ||
598 				     of_property_present(np, "msi-map"));
599 
600 		/*
601 		 * For the has_msi_ctrl case the default assignment is handled
602 		 * in the dw_pcie_msi_host_init().
603 		 */
604 		if (!pp->has_msi_ctrl && !pp->num_vectors) {
605 			pp->num_vectors = MSI_DEF_NUM_VECTORS;
606 		} else if (pp->num_vectors > MAX_MSI_IRQS) {
607 			dev_err(dev, "Invalid number of vectors\n");
608 			ret = -EINVAL;
609 			goto err_deinit_host;
610 		}
611 
612 		if (pp->ops->msi_init) {
613 			ret = pp->ops->msi_init(pp);
614 			if (ret < 0)
615 				goto err_deinit_host;
616 		} else if (pp->has_msi_ctrl) {
617 			ret = dw_pcie_msi_host_init(pp);
618 			if (ret < 0)
619 				goto err_deinit_host;
620 		}
621 	}
622 
623 	dw_pcie_version_detect(pci);
624 
625 	dw_pcie_iatu_detect(pci);
626 
627 	if (pci->num_lanes < 1)
628 		pci->num_lanes = dw_pcie_link_get_max_link_width(pci);
629 
630 	ret = of_pci_get_equalization_presets(dev, &pp->presets, pci->num_lanes);
631 	if (ret)
632 		goto err_free_msi;
633 
634 	if (pp->ecam_enabled) {
635 		ret = dw_pcie_config_ecam_iatu(pp);
636 		if (ret) {
637 			dev_err(dev, "Failed to configure iATU in ECAM mode\n");
638 			goto err_free_msi;
639 		}
640 	}
641 
642 	/*
643 	 * Allocate the resource for MSG TLP before programming the iATU
644 	 * outbound window in dw_pcie_setup_rc(). Since the allocation depends
645 	 * on the value of 'region_align', this has to be done after
646 	 * dw_pcie_iatu_detect().
647 	 *
648 	 * Glue drivers need to set 'use_atu_msg' before dw_pcie_host_init() to
649 	 * make use of the generic MSG TLP implementation.
650 	 */
651 	if (pp->use_atu_msg)
652 		dw_pcie_host_request_msg_tlp_res(pp);
653 
654 	ret = dw_pcie_edma_detect(pci);
655 	if (ret)
656 		goto err_free_msi;
657 
658 	ret = dw_pcie_setup_rc(pp);
659 	if (ret)
660 		goto err_remove_edma;
661 
662 	if (!dw_pcie_link_up(pci)) {
663 		ret = dw_pcie_start_link(pci);
664 		if (ret)
665 			goto err_remove_edma;
666 	}
667 
668 	/*
669 	 * Note: Skip the link up delay only when a Link Up IRQ is present.
670 	 * If there is no Link Up IRQ, we should not bypass the delay
671 	 * because that would require users to manually rescan for devices.
672 	 */
673 	if (!pp->use_linkup_irq)
674 		/* Ignore errors, the link may come up later */
675 		dw_pcie_wait_for_link(pci);
676 
677 	ret = pci_host_probe(bridge);
678 	if (ret)
679 		goto err_stop_link;
680 
681 	if (pp->ops->post_init)
682 		pp->ops->post_init(pp);
683 
684 	dwc_pcie_debugfs_init(pci, DW_PCIE_RC_TYPE);
685 
686 	return 0;
687 
688 err_stop_link:
689 	dw_pcie_stop_link(pci);
690 
691 err_remove_edma:
692 	dw_pcie_edma_remove(pci);
693 
694 err_free_msi:
695 	if (pp->has_msi_ctrl)
696 		dw_pcie_free_msi(pp);
697 
698 err_deinit_host:
699 	if (pp->ops->deinit)
700 		pp->ops->deinit(pp);
701 
702 err_free_ecam:
703 	if (pp->cfg)
704 		pci_ecam_free(pp->cfg);
705 
706 	return ret;
707 }
708 EXPORT_SYMBOL_GPL(dw_pcie_host_init);
709 
dw_pcie_host_deinit(struct dw_pcie_rp * pp)710 void dw_pcie_host_deinit(struct dw_pcie_rp *pp)
711 {
712 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
713 
714 	dwc_pcie_debugfs_deinit(pci);
715 
716 	pci_stop_root_bus(pp->bridge->bus);
717 	pci_remove_root_bus(pp->bridge->bus);
718 
719 	dw_pcie_stop_link(pci);
720 
721 	dw_pcie_edma_remove(pci);
722 
723 	if (pp->has_msi_ctrl)
724 		dw_pcie_free_msi(pp);
725 
726 	if (pp->ops->deinit)
727 		pp->ops->deinit(pp);
728 
729 	if (pp->cfg)
730 		pci_ecam_free(pp->cfg);
731 }
732 EXPORT_SYMBOL_GPL(dw_pcie_host_deinit);
733 
dw_pcie_other_conf_map_bus(struct pci_bus * bus,unsigned int devfn,int where)734 static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,
735 						unsigned int devfn, int where)
736 {
737 	struct dw_pcie_rp *pp = bus->sysdata;
738 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
739 	struct dw_pcie_ob_atu_cfg atu = { 0 };
740 	int type, ret;
741 	u32 busdev;
742 
743 	/*
744 	 * Checking whether the link is up here is a last line of defense
745 	 * against platforms that forward errors on the system bus as
746 	 * SError upon PCI configuration transactions issued when the link
747 	 * is down. This check is racy by definition and does not stop
748 	 * the system from triggering an SError if the link goes down
749 	 * after this check is performed.
750 	 */
751 	if (!dw_pcie_link_up(pci))
752 		return NULL;
753 
754 	busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
755 		 PCIE_ATU_FUNC(PCI_FUNC(devfn));
756 
757 	if (pci_is_root_bus(bus->parent))
758 		type = PCIE_ATU_TYPE_CFG0;
759 	else
760 		type = PCIE_ATU_TYPE_CFG1;
761 
762 	atu.type = type;
763 	atu.parent_bus_addr = pp->cfg0_base - pci->parent_bus_offset;
764 	atu.pci_addr = busdev;
765 	atu.size = pp->cfg0_size;
766 
767 	ret = dw_pcie_prog_outbound_atu(pci, &atu);
768 	if (ret)
769 		return NULL;
770 
771 	return pp->va_cfg0_base + where;
772 }
773 
dw_pcie_rd_other_conf(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 * val)774 static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn,
775 				 int where, int size, u32 *val)
776 {
777 	struct dw_pcie_rp *pp = bus->sysdata;
778 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
779 	struct dw_pcie_ob_atu_cfg atu = { 0 };
780 	int ret;
781 
782 	ret = pci_generic_config_read(bus, devfn, where, size, val);
783 	if (ret != PCIBIOS_SUCCESSFUL)
784 		return ret;
785 
786 	if (pp->cfg0_io_shared) {
787 		atu.type = PCIE_ATU_TYPE_IO;
788 		atu.parent_bus_addr = pp->io_base - pci->parent_bus_offset;
789 		atu.pci_addr = pp->io_bus_addr;
790 		atu.size = pp->io_size;
791 
792 		ret = dw_pcie_prog_outbound_atu(pci, &atu);
793 		if (ret)
794 			return PCIBIOS_SET_FAILED;
795 	}
796 
797 	return PCIBIOS_SUCCESSFUL;
798 }
799 
dw_pcie_wr_other_conf(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 val)800 static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn,
801 				 int where, int size, u32 val)
802 {
803 	struct dw_pcie_rp *pp = bus->sysdata;
804 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
805 	struct dw_pcie_ob_atu_cfg atu = { 0 };
806 	int ret;
807 
808 	ret = pci_generic_config_write(bus, devfn, where, size, val);
809 	if (ret != PCIBIOS_SUCCESSFUL)
810 		return ret;
811 
812 	if (pp->cfg0_io_shared) {
813 		atu.type = PCIE_ATU_TYPE_IO;
814 		atu.parent_bus_addr = pp->io_base - pci->parent_bus_offset;
815 		atu.pci_addr = pp->io_bus_addr;
816 		atu.size = pp->io_size;
817 
818 		ret = dw_pcie_prog_outbound_atu(pci, &atu);
819 		if (ret)
820 			return PCIBIOS_SET_FAILED;
821 	}
822 
823 	return PCIBIOS_SUCCESSFUL;
824 }
825 
826 static struct pci_ops dw_child_pcie_ops = {
827 	.map_bus = dw_pcie_other_conf_map_bus,
828 	.read = dw_pcie_rd_other_conf,
829 	.write = dw_pcie_wr_other_conf,
830 };
831 
dw_pcie_own_conf_map_bus(struct pci_bus * bus,unsigned int devfn,int where)832 void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn, int where)
833 {
834 	struct dw_pcie_rp *pp = bus->sysdata;
835 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
836 
837 	if (PCI_SLOT(devfn) > 0)
838 		return NULL;
839 
840 	return pci->dbi_base + where;
841 }
842 EXPORT_SYMBOL_GPL(dw_pcie_own_conf_map_bus);
843 
dw_pcie_ecam_conf_map_bus(struct pci_bus * bus,unsigned int devfn,int where)844 static void __iomem *dw_pcie_ecam_conf_map_bus(struct pci_bus *bus, unsigned int devfn, int where)
845 {
846 	struct pci_config_window *cfg = bus->sysdata;
847 	struct dw_pcie_rp *pp = cfg->priv;
848 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
849 	unsigned int busn = bus->number;
850 
851 	if (busn > 0)
852 		return pci_ecam_map_bus(bus, devfn, where);
853 
854 	if (PCI_SLOT(devfn) > 0)
855 		return NULL;
856 
857 	return pci->dbi_base + where;
858 }
859 
dw_pcie_op_assert_perst(struct pci_bus * bus,bool assert)860 static int dw_pcie_op_assert_perst(struct pci_bus *bus, bool assert)
861 {
862 	struct dw_pcie_rp *pp = bus->sysdata;
863 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
864 
865 	return dw_pcie_assert_perst(pci, assert);
866 }
867 
868 static struct pci_ops dw_pcie_ops = {
869 	.map_bus = dw_pcie_own_conf_map_bus,
870 	.read = pci_generic_config_read,
871 	.write = pci_generic_config_write,
872 	.assert_perst = dw_pcie_op_assert_perst,
873 };
874 
875 static struct pci_ops dw_pcie_ecam_ops = {
876 	.map_bus = dw_pcie_ecam_conf_map_bus,
877 	.read = pci_generic_config_read,
878 	.write = pci_generic_config_write,
879 };
880 
dw_pcie_iatu_setup(struct dw_pcie_rp * pp)881 static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
882 {
883 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
884 	struct dw_pcie_ob_atu_cfg atu = { 0 };
885 	struct resource_entry *entry;
886 	int i, ret;
887 
888 	/* Note the very first outbound ATU is used for CFG IOs */
889 	if (!pci->num_ob_windows) {
890 		dev_err(pci->dev, "No outbound iATU found\n");
891 		return -EINVAL;
892 	}
893 
894 	/*
895 	 * Ensure all out/inbound windows are disabled before proceeding with
896 	 * the MEM/IO (dma-)ranges setups.
897 	 */
898 	for (i = 0; i < pci->num_ob_windows; i++)
899 		dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_OB, i);
900 
901 	for (i = 0; i < pci->num_ib_windows; i++)
902 		dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_IB, i);
903 
904 	i = 0;
905 	resource_list_for_each_entry(entry, &pp->bridge->windows) {
906 		if (resource_type(entry->res) != IORESOURCE_MEM)
907 			continue;
908 
909 		if (pci->num_ob_windows <= ++i)
910 			break;
911 
912 		atu.index = i;
913 		atu.type = PCIE_ATU_TYPE_MEM;
914 		atu.parent_bus_addr = entry->res->start - pci->parent_bus_offset;
915 		atu.pci_addr = entry->res->start - entry->offset;
916 
917 		/* Adjust iATU size if MSG TLP region was allocated before */
918 		if (pp->msg_res && pp->msg_res->parent == entry->res)
919 			atu.size = resource_size(entry->res) -
920 					resource_size(pp->msg_res);
921 		else
922 			atu.size = resource_size(entry->res);
923 
924 		ret = dw_pcie_prog_outbound_atu(pci, &atu);
925 		if (ret) {
926 			dev_err(pci->dev, "Failed to set MEM range %pr\n",
927 				entry->res);
928 			return ret;
929 		}
930 	}
931 
932 	if (pp->io_size) {
933 		if (pci->num_ob_windows > ++i) {
934 			atu.index = i;
935 			atu.type = PCIE_ATU_TYPE_IO;
936 			atu.parent_bus_addr = pp->io_base - pci->parent_bus_offset;
937 			atu.pci_addr = pp->io_bus_addr;
938 			atu.size = pp->io_size;
939 
940 			ret = dw_pcie_prog_outbound_atu(pci, &atu);
941 			if (ret) {
942 				dev_err(pci->dev, "Failed to set IO range %pr\n",
943 					entry->res);
944 				return ret;
945 			}
946 		} else {
947 			pp->cfg0_io_shared = true;
948 		}
949 	}
950 
951 	if (pci->num_ob_windows <= i)
952 		dev_warn(pci->dev, "Ranges exceed outbound iATU size (%d)\n",
953 			 pci->num_ob_windows);
954 
955 	pp->msg_atu_index = i;
956 
957 	i = 0;
958 	resource_list_for_each_entry(entry, &pp->bridge->dma_ranges) {
959 		if (resource_type(entry->res) != IORESOURCE_MEM)
960 			continue;
961 
962 		if (pci->num_ib_windows <= i)
963 			break;
964 
965 		ret = dw_pcie_prog_inbound_atu(pci, i++, PCIE_ATU_TYPE_MEM,
966 					       entry->res->start,
967 					       entry->res->start - entry->offset,
968 					       resource_size(entry->res));
969 		if (ret) {
970 			dev_err(pci->dev, "Failed to set DMA range %pr\n",
971 				entry->res);
972 			return ret;
973 		}
974 	}
975 
976 	if (pci->num_ib_windows <= i)
977 		dev_warn(pci->dev, "Dma-ranges exceed inbound iATU size (%u)\n",
978 			 pci->num_ib_windows);
979 
980 	return 0;
981 }
982 
dw_pcie_program_presets(struct dw_pcie_rp * pp,enum pci_bus_speed speed)983 static void dw_pcie_program_presets(struct dw_pcie_rp *pp, enum pci_bus_speed speed)
984 {
985 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
986 	u8 lane_eq_offset, lane_reg_size, cap_id;
987 	u8 *presets;
988 	u32 cap;
989 	int i;
990 
991 	if (speed == PCIE_SPEED_8_0GT) {
992 		presets = (u8 *)pp->presets.eq_presets_8gts;
993 		lane_eq_offset =  PCI_SECPCI_LE_CTRL;
994 		cap_id = PCI_EXT_CAP_ID_SECPCI;
995 		/* For data rate of 8 GT/S each lane equalization control is 16bits wide*/
996 		lane_reg_size = 0x2;
997 	} else if (speed == PCIE_SPEED_16_0GT) {
998 		presets = pp->presets.eq_presets_Ngts[EQ_PRESET_TYPE_16GTS - 1];
999 		lane_eq_offset = PCI_PL_16GT_LE_CTRL;
1000 		cap_id = PCI_EXT_CAP_ID_PL_16GT;
1001 		lane_reg_size = 0x1;
1002 	} else if (speed == PCIE_SPEED_32_0GT) {
1003 		presets =  pp->presets.eq_presets_Ngts[EQ_PRESET_TYPE_32GTS - 1];
1004 		lane_eq_offset = PCI_PL_32GT_LE_CTRL;
1005 		cap_id = PCI_EXT_CAP_ID_PL_32GT;
1006 		lane_reg_size = 0x1;
1007 	} else if (speed == PCIE_SPEED_64_0GT) {
1008 		presets =  pp->presets.eq_presets_Ngts[EQ_PRESET_TYPE_64GTS - 1];
1009 		lane_eq_offset = PCI_PL_64GT_LE_CTRL;
1010 		cap_id = PCI_EXT_CAP_ID_PL_64GT;
1011 		lane_reg_size = 0x1;
1012 	} else {
1013 		return;
1014 	}
1015 
1016 	if (presets[0] == PCI_EQ_RESV)
1017 		return;
1018 
1019 	cap = dw_pcie_find_ext_capability(pci, cap_id);
1020 	if (!cap)
1021 		return;
1022 
1023 	/*
1024 	 * Write preset values to the registers byte-by-byte for the given
1025 	 * number of lanes and register size.
1026 	 */
1027 	for (i = 0; i < pci->num_lanes * lane_reg_size; i++)
1028 		dw_pcie_writeb_dbi(pci, cap + lane_eq_offset + i, presets[i]);
1029 }
1030 
dw_pcie_config_presets(struct dw_pcie_rp * pp)1031 static void dw_pcie_config_presets(struct dw_pcie_rp *pp)
1032 {
1033 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
1034 	enum pci_bus_speed speed = pcie_link_speed[pci->max_link_speed];
1035 
1036 	/*
1037 	 * Lane equalization settings need to be applied for all data rates the
1038 	 * controller supports and for all supported lanes.
1039 	 */
1040 
1041 	if (speed >= PCIE_SPEED_8_0GT)
1042 		dw_pcie_program_presets(pp, PCIE_SPEED_8_0GT);
1043 
1044 	if (speed >= PCIE_SPEED_16_0GT)
1045 		dw_pcie_program_presets(pp, PCIE_SPEED_16_0GT);
1046 
1047 	if (speed >= PCIE_SPEED_32_0GT)
1048 		dw_pcie_program_presets(pp, PCIE_SPEED_32_0GT);
1049 
1050 	if (speed >= PCIE_SPEED_64_0GT)
1051 		dw_pcie_program_presets(pp, PCIE_SPEED_64_0GT);
1052 }
1053 
dw_pcie_setup_rc(struct dw_pcie_rp * pp)1054 int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
1055 {
1056 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
1057 	u32 val;
1058 	int ret;
1059 
1060 	/*
1061 	 * Enable DBI read-only registers for writing/updating configuration.
1062 	 * Write permission gets disabled towards the end of this function.
1063 	 */
1064 	dw_pcie_dbi_ro_wr_en(pci);
1065 
1066 	dw_pcie_setup(pci);
1067 
1068 	dw_pcie_msi_init(pp);
1069 
1070 	/* Setup RC BARs */
1071 	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004);
1072 	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000);
1073 
1074 	/* Setup interrupt pins */
1075 	val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE);
1076 	val &= 0xffff00ff;
1077 	val |= 0x00000100;
1078 	dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val);
1079 
1080 	/* Setup bus numbers */
1081 	val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS);
1082 	val &= 0xff000000;
1083 	val |= 0x00ff0100;
1084 	dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val);
1085 
1086 	/* Setup command register */
1087 	val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
1088 	val &= 0xffff0000;
1089 	val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
1090 		PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
1091 	dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
1092 
1093 	dw_pcie_hide_unsupported_l1ss(pci);
1094 
1095 	dw_pcie_config_presets(pp);
1096 	/*
1097 	 * If the platform provides its own child bus config accesses, it means
1098 	 * the platform uses its own address translation component rather than
1099 	 * ATU, so we should not program the ATU here.
1100 	 */
1101 	if (pp->bridge->child_ops == &dw_child_pcie_ops) {
1102 		ret = dw_pcie_iatu_setup(pp);
1103 		if (ret)
1104 			return ret;
1105 	}
1106 
1107 	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
1108 
1109 	/* Program correct class for RC */
1110 	dw_pcie_writew_dbi(pci, PCI_CLASS_DEVICE, PCI_CLASS_BRIDGE_PCI);
1111 
1112 	val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
1113 	val |= PORT_LOGIC_SPEED_CHANGE;
1114 	dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
1115 
1116 	dw_pcie_dbi_ro_wr_dis(pci);
1117 
1118 	return 0;
1119 }
1120 EXPORT_SYMBOL_GPL(dw_pcie_setup_rc);
1121 
dw_pcie_pme_turn_off(struct dw_pcie * pci)1122 static int dw_pcie_pme_turn_off(struct dw_pcie *pci)
1123 {
1124 	struct dw_pcie_ob_atu_cfg atu = { 0 };
1125 	void __iomem *mem;
1126 	int ret;
1127 
1128 	if (pci->num_ob_windows <= pci->pp.msg_atu_index)
1129 		return -ENOSPC;
1130 
1131 	if (!pci->pp.msg_res)
1132 		return -ENOSPC;
1133 
1134 	atu.code = PCIE_MSG_CODE_PME_TURN_OFF;
1135 	atu.routing = PCIE_MSG_TYPE_R_BC;
1136 	atu.type = PCIE_ATU_TYPE_MSG;
1137 	atu.size = resource_size(pci->pp.msg_res);
1138 	atu.index = pci->pp.msg_atu_index;
1139 
1140 	atu.parent_bus_addr = pci->pp.msg_res->start - pci->parent_bus_offset;
1141 
1142 	ret = dw_pcie_prog_outbound_atu(pci, &atu);
1143 	if (ret)
1144 		return ret;
1145 
1146 	mem = ioremap(pci->pp.msg_res->start, pci->region_align);
1147 	if (!mem)
1148 		return -ENOMEM;
1149 
1150 	/* A dummy write is converted to a Msg TLP */
1151 	writel(0, mem);
1152 
1153 	iounmap(mem);
1154 
1155 	return 0;
1156 }
1157 
dw_pcie_suspend_noirq(struct dw_pcie * pci)1158 int dw_pcie_suspend_noirq(struct dw_pcie *pci)
1159 {
1160 	u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
1161 	u32 val;
1162 	int ret;
1163 
1164 	/*
1165 	 * If L1SS is supported, then do not put the link into L2 as some
1166 	 * devices such as NVMe expect low resume latency.
1167 	 */
1168 	if (dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKCTL) & PCI_EXP_LNKCTL_ASPM_L1)
1169 		return 0;
1170 
1171 	if (pci->pp.ops->pme_turn_off) {
1172 		pci->pp.ops->pme_turn_off(&pci->pp);
1173 	} else {
1174 		ret = dw_pcie_pme_turn_off(pci);
1175 		if (ret)
1176 			return ret;
1177 	}
1178 
1179 	ret = read_poll_timeout(dw_pcie_get_ltssm, val,
1180 				val == DW_PCIE_LTSSM_L2_IDLE ||
1181 				val <= DW_PCIE_LTSSM_DETECT_WAIT,
1182 				PCIE_PME_TO_L2_TIMEOUT_US/10,
1183 				PCIE_PME_TO_L2_TIMEOUT_US, false, pci);
1184 	if (ret) {
1185 		/* Only log message when LTSSM isn't in DETECT or POLL */
1186 		dev_err(pci->dev, "Timeout waiting for L2 entry! LTSSM: 0x%x\n", val);
1187 		return ret;
1188 	}
1189 
1190 	/*
1191 	 * Per PCIe r6.0, sec 5.3.3.2.1, software should wait at least
1192 	 * 100ns after L2/L3 Ready before turning off refclock and
1193 	 * main power. This is harmless when no endpoint is connected.
1194 	 */
1195 	udelay(1);
1196 
1197 	dw_pcie_stop_link(pci);
1198 	if (pci->pp.ops->deinit)
1199 		pci->pp.ops->deinit(&pci->pp);
1200 
1201 	pci->suspended = true;
1202 
1203 	return ret;
1204 }
1205 EXPORT_SYMBOL_GPL(dw_pcie_suspend_noirq);
1206 
dw_pcie_resume_noirq(struct dw_pcie * pci)1207 int dw_pcie_resume_noirq(struct dw_pcie *pci)
1208 {
1209 	int ret;
1210 
1211 	if (!pci->suspended)
1212 		return 0;
1213 
1214 	pci->suspended = false;
1215 
1216 	if (pci->pp.ops->init) {
1217 		ret = pci->pp.ops->init(&pci->pp);
1218 		if (ret) {
1219 			dev_err(pci->dev, "Host init failed: %d\n", ret);
1220 			return ret;
1221 		}
1222 	}
1223 
1224 	dw_pcie_setup_rc(&pci->pp);
1225 
1226 	ret = dw_pcie_start_link(pci);
1227 	if (ret)
1228 		return ret;
1229 
1230 	ret = dw_pcie_wait_for_link(pci);
1231 	if (ret)
1232 		return ret;
1233 
1234 	return ret;
1235 }
1236 EXPORT_SYMBOL_GPL(dw_pcie_resume_noirq);
1237