xref: /linux/drivers/pci/controller/dwc/pcie-designware-host.c (revision 6f7e6393d1ce636bb7ec77a7fe7b77458fddf701)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Synopsys DesignWare PCIe host controller driver
4  *
5  * Copyright (C) 2013 Samsung Electronics Co., Ltd.
6  *		https://www.samsung.com
7  *
8  * Author: Jingoo Han <jg1.han@samsung.com>
9  */
10 
11 #include <linux/align.h>
12 #include <linux/iopoll.h>
13 #include <linux/irqchip/chained_irq.h>
14 #include <linux/irqchip/irq-msi-lib.h>
15 #include <linux/irqdomain.h>
16 #include <linux/msi.h>
17 #include <linux/of_address.h>
18 #include <linux/of_pci.h>
19 #include <linux/pci_regs.h>
20 #include <linux/platform_device.h>
21 
22 #include "../../pci.h"
23 #include "pcie-designware.h"
24 
25 static struct pci_ops dw_pcie_ops;
26 static struct pci_ops dw_pcie_ecam_ops;
27 static struct pci_ops dw_child_pcie_ops;
28 
29 #ifdef CONFIG_SMP
30 static void dw_irq_noop(struct irq_data *d) { }
31 #endif
32 
33 static bool dw_pcie_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
34 				      struct irq_domain *real_parent, struct msi_domain_info *info)
35 {
36 	if (!msi_lib_init_dev_msi_info(dev, domain, real_parent, info))
37 		return false;
38 
39 #ifdef CONFIG_SMP
40 	info->chip->irq_ack = dw_irq_noop;
41 	info->chip->irq_pre_redirect = irq_chip_pre_redirect_parent;
42 #else
43 	info->chip->irq_ack = irq_chip_ack_parent;
44 #endif
45 	return true;
46 }
47 
48 #define DW_PCIE_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS		| \
49 				    MSI_FLAG_USE_DEF_CHIP_OPS		| \
50 				    MSI_FLAG_PCI_MSI_MASK_PARENT)
51 #define DW_PCIE_MSI_FLAGS_SUPPORTED (MSI_FLAG_MULTI_PCI_MSI		| \
52 				     MSI_FLAG_PCI_MSIX			| \
53 				     MSI_GENERIC_FLAGS_MASK)
54 
55 #define IS_256MB_ALIGNED(x) IS_ALIGNED(x, SZ_256M)
56 
57 static const struct msi_parent_ops dw_pcie_msi_parent_ops = {
58 	.required_flags		= DW_PCIE_MSI_FLAGS_REQUIRED,
59 	.supported_flags	= DW_PCIE_MSI_FLAGS_SUPPORTED,
60 	.bus_select_token	= DOMAIN_BUS_PCI_MSI,
61 	.prefix			= "DW-",
62 	.init_dev_msi_info	= dw_pcie_init_dev_msi_info,
63 };
64 
65 /* MSI int handler */
66 void dw_handle_msi_irq(struct dw_pcie_rp *pp)
67 {
68 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
69 	unsigned int i, num_ctrls;
70 
71 	num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
72 
73 	for (i = 0; i < num_ctrls; i++) {
74 		unsigned int reg_off = i * MSI_REG_CTRL_BLOCK_SIZE;
75 		unsigned int irq_off = i * MAX_MSI_IRQS_PER_CTRL;
76 		unsigned long status, pos;
77 
78 		status = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS + reg_off);
79 		if (!status)
80 			continue;
81 
82 		for_each_set_bit(pos, &status, MAX_MSI_IRQS_PER_CTRL)
83 			generic_handle_demux_domain_irq(pp->irq_domain, irq_off + pos);
84 	}
85 }
86 
87 /* Chained MSI interrupt service routine */
88 static void dw_chained_msi_isr(struct irq_desc *desc)
89 {
90 	struct irq_chip *chip = irq_desc_get_chip(desc);
91 	struct dw_pcie_rp *pp;
92 
93 	chained_irq_enter(chip, desc);
94 
95 	pp = irq_desc_get_handler_data(desc);
96 	dw_handle_msi_irq(pp);
97 
98 	chained_irq_exit(chip, desc);
99 }
100 
101 static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg)
102 {
103 	struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
104 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
105 	u64 msi_target = (u64)pp->msi_data;
106 
107 	msg->address_lo = lower_32_bits(msi_target);
108 	msg->address_hi = upper_32_bits(msi_target);
109 	msg->data = d->hwirq;
110 
111 	dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
112 		(int)d->hwirq, msg->address_hi, msg->address_lo);
113 }
114 
115 static void dw_pci_bottom_mask(struct irq_data *d)
116 {
117 	struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
118 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
119 	unsigned int res, bit, ctrl;
120 
121 	guard(raw_spinlock)(&pp->lock);
122 	ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
123 	res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
124 	bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
125 
126 	pp->irq_mask[ctrl] |= BIT(bit);
127 	dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]);
128 }
129 
130 static void dw_pci_bottom_unmask(struct irq_data *d)
131 {
132 	struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
133 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
134 	unsigned int res, bit, ctrl;
135 
136 	guard(raw_spinlock)(&pp->lock);
137 	ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
138 	res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
139 	bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
140 
141 	pp->irq_mask[ctrl] &= ~BIT(bit);
142 	dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]);
143 }
144 
145 static void dw_pci_bottom_ack(struct irq_data *d)
146 {
147 	struct dw_pcie_rp *pp  = irq_data_get_irq_chip_data(d);
148 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
149 	unsigned int res, bit, ctrl;
150 
151 	ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
152 	res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
153 	bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
154 
155 	dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_STATUS + res, BIT(bit));
156 }
157 
158 static struct irq_chip dw_pci_msi_bottom_irq_chip = {
159 	.name			= "DWPCI-MSI",
160 	.irq_compose_msi_msg	= dw_pci_setup_msi_msg,
161 	.irq_mask		= dw_pci_bottom_mask,
162 	.irq_unmask		= dw_pci_bottom_unmask,
163 #ifdef CONFIG_SMP
164 	.irq_ack		= dw_irq_noop,
165 	.irq_pre_redirect	= dw_pci_bottom_ack,
166 	.irq_set_affinity	= irq_chip_redirect_set_affinity,
167 #else
168 	.irq_ack		= dw_pci_bottom_ack,
169 #endif
170 };
171 
172 static int dw_pcie_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
173 				    unsigned int nr_irqs, void *args)
174 {
175 	struct dw_pcie_rp *pp = domain->host_data;
176 	int bit;
177 
178 	scoped_guard (raw_spinlock_irq, &pp->lock) {
179 		bit = bitmap_find_free_region(pp->msi_irq_in_use, pp->num_vectors,
180 					      order_base_2(nr_irqs));
181 	}
182 
183 	if (bit < 0)
184 		return -ENOSPC;
185 
186 	for (unsigned int i = 0; i < nr_irqs; i++) {
187 		irq_domain_set_info(domain, virq + i, bit + i, pp->msi_irq_chip,
188 				    pp, handle_edge_irq, NULL, NULL);
189 	}
190 	return 0;
191 }
192 
193 static void dw_pcie_irq_domain_free(struct irq_domain *domain, unsigned int virq,
194 				    unsigned int nr_irqs)
195 {
196 	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
197 	struct dw_pcie_rp *pp = domain->host_data;
198 
199 	guard(raw_spinlock_irq)(&pp->lock);
200 	bitmap_release_region(pp->msi_irq_in_use, d->hwirq, order_base_2(nr_irqs));
201 }
202 
203 static const struct irq_domain_ops dw_pcie_msi_domain_ops = {
204 	.alloc	= dw_pcie_irq_domain_alloc,
205 	.free	= dw_pcie_irq_domain_free,
206 };
207 
208 int dw_pcie_allocate_domains(struct dw_pcie_rp *pp)
209 {
210 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
211 	struct irq_domain_info info = {
212 		.fwnode		= dev_fwnode(pci->dev),
213 		.ops		= &dw_pcie_msi_domain_ops,
214 		.size		= pp->num_vectors,
215 		.host_data	= pp,
216 	};
217 
218 	pp->irq_domain = msi_create_parent_irq_domain(&info, &dw_pcie_msi_parent_ops);
219 	if (!pp->irq_domain) {
220 		dev_err(pci->dev, "Failed to create IRQ domain\n");
221 		return -ENOMEM;
222 	}
223 
224 	return 0;
225 }
226 EXPORT_SYMBOL_GPL(dw_pcie_allocate_domains);
227 
228 void dw_pcie_free_msi(struct dw_pcie_rp *pp)
229 {
230 	u32 ctrl;
231 
232 	for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) {
233 		if (pp->msi_irq[ctrl] > 0)
234 			irq_set_chained_handler_and_data(pp->msi_irq[ctrl], NULL, NULL);
235 	}
236 
237 	irq_domain_remove(pp->irq_domain);
238 }
239 EXPORT_SYMBOL_GPL(dw_pcie_free_msi);
240 
241 void dw_pcie_msi_init(struct dw_pcie_rp *pp)
242 {
243 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
244 	u64 msi_target = (u64)pp->msi_data;
245 	u32 ctrl, num_ctrls;
246 
247 	if (!pci_msi_enabled() || !pp->has_msi_ctrl)
248 		return;
249 
250 	num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
251 
252 	/* Initialize IRQ Status array */
253 	for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
254 		dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK +
255 				    (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
256 				    pp->irq_mask[ctrl]);
257 		dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE +
258 				    (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
259 				    ~0);
260 	}
261 
262 	/* Program the msi_data */
263 	dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_LO, lower_32_bits(msi_target));
264 	dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_HI, upper_32_bits(msi_target));
265 }
266 EXPORT_SYMBOL_GPL(dw_pcie_msi_init);
267 
268 static int dw_pcie_parse_split_msi_irq(struct dw_pcie_rp *pp)
269 {
270 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
271 	struct device *dev = pci->dev;
272 	struct platform_device *pdev = to_platform_device(dev);
273 	u32 ctrl, max_vectors;
274 	int irq;
275 
276 	/* Parse any "msiX" IRQs described in the devicetree */
277 	for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) {
278 		char msi_name[] = "msiX";
279 
280 		msi_name[3] = '0' + ctrl;
281 		irq = platform_get_irq_byname_optional(pdev, msi_name);
282 		if (irq == -ENXIO)
283 			break;
284 		if (irq < 0)
285 			return dev_err_probe(dev, irq,
286 					     "Failed to parse MSI IRQ '%s'\n",
287 					     msi_name);
288 
289 		pp->msi_irq[ctrl] = irq;
290 	}
291 
292 	/* If no "msiX" IRQs, caller should fallback to "msi" IRQ */
293 	if (ctrl == 0)
294 		return -ENXIO;
295 
296 	max_vectors = ctrl * MAX_MSI_IRQS_PER_CTRL;
297 	if (pp->num_vectors > max_vectors) {
298 		dev_warn(dev, "Exceeding number of MSI vectors, limiting to %u\n",
299 			 max_vectors);
300 		pp->num_vectors = max_vectors;
301 	}
302 	if (!pp->num_vectors)
303 		pp->num_vectors = max_vectors;
304 
305 	return 0;
306 }
307 
308 int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
309 {
310 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
311 	struct device *dev = pci->dev;
312 	struct platform_device *pdev = to_platform_device(dev);
313 	u64 *msi_vaddr = NULL;
314 	int ret;
315 	u32 ctrl, num_ctrls;
316 
317 	for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++)
318 		pp->irq_mask[ctrl] = ~0;
319 
320 	if (!pp->msi_irq[0]) {
321 		ret = dw_pcie_parse_split_msi_irq(pp);
322 		if (ret < 0 && ret != -ENXIO)
323 			return ret;
324 	}
325 
326 	if (!pp->num_vectors)
327 		pp->num_vectors = MSI_DEF_NUM_VECTORS;
328 	num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
329 
330 	if (!pp->msi_irq[0]) {
331 		pp->msi_irq[0] = platform_get_irq_byname_optional(pdev, "msi");
332 		if (pp->msi_irq[0] < 0) {
333 			pp->msi_irq[0] = platform_get_irq(pdev, 0);
334 			if (pp->msi_irq[0] < 0)
335 				return pp->msi_irq[0];
336 		}
337 	}
338 
339 	dev_dbg(dev, "Using %d MSI vectors\n", pp->num_vectors);
340 
341 	pp->msi_irq_chip = &dw_pci_msi_bottom_irq_chip;
342 
343 	ret = dw_pcie_allocate_domains(pp);
344 	if (ret)
345 		return ret;
346 
347 	for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
348 		if (pp->msi_irq[ctrl] > 0)
349 			irq_set_chained_handler_and_data(pp->msi_irq[ctrl],
350 						    dw_chained_msi_isr, pp);
351 	}
352 
353 	/*
354 	 * Even though the iMSI-RX Module supports 64-bit addresses some
355 	 * peripheral PCIe devices may lack 64-bit message support. In
356 	 * order not to miss MSI TLPs from those devices the MSI target
357 	 * address has to be within the lowest 4GB.
358 	 *
359 	 * Note until there is a better alternative found the reservation is
360 	 * done by allocating from the artificially limited DMA-coherent
361 	 * memory.
362 	 */
363 	ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
364 	if (!ret)
365 		msi_vaddr = dmam_alloc_coherent(dev, sizeof(u64), &pp->msi_data,
366 						GFP_KERNEL);
367 
368 	if (!msi_vaddr) {
369 		dev_warn(dev, "Failed to allocate 32-bit MSI address\n");
370 		dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
371 		msi_vaddr = dmam_alloc_coherent(dev, sizeof(u64), &pp->msi_data,
372 						GFP_KERNEL);
373 		if (!msi_vaddr) {
374 			dev_err(dev, "Failed to allocate MSI address\n");
375 			dw_pcie_free_msi(pp);
376 			return -ENOMEM;
377 		}
378 	}
379 
380 	return 0;
381 }
382 EXPORT_SYMBOL_GPL(dw_pcie_msi_host_init);
383 
384 static void dw_pcie_host_request_msg_tlp_res(struct dw_pcie_rp *pp)
385 {
386 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
387 	struct resource_entry *win;
388 	struct resource *res;
389 
390 	win = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM);
391 	if (win) {
392 		res = devm_kzalloc(pci->dev, sizeof(*res), GFP_KERNEL);
393 		if (!res)
394 			return;
395 
396 		/*
397 		 * Allocate MSG TLP region of size 'region_align' at the end of
398 		 * the host bridge window.
399 		 */
400 		res->start = win->res->end - pci->region_align + 1;
401 		res->end = win->res->end;
402 		res->name = "msg";
403 		res->flags = win->res->flags | IORESOURCE_BUSY;
404 
405 		if (!devm_request_resource(pci->dev, win->res, res))
406 			pp->msg_res = res;
407 	}
408 }
409 
410 static int dw_pcie_config_ecam_iatu(struct dw_pcie_rp *pp)
411 {
412 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
413 	struct dw_pcie_ob_atu_cfg atu = {0};
414 	resource_size_t bus_range_max;
415 	struct resource_entry *bus;
416 	int ret;
417 
418 	bus = resource_list_first_type(&pp->bridge->windows, IORESOURCE_BUS);
419 
420 	/*
421 	 * Root bus under the host bridge doesn't require any iATU configuration
422 	 * as DBI region will be used to access root bus config space.
423 	 * Immediate bus under Root Bus, needs type 0 iATU configuration and
424 	 * remaining buses need type 1 iATU configuration.
425 	 */
426 	atu.index = 0;
427 	atu.type = PCIE_ATU_TYPE_CFG0;
428 	atu.parent_bus_addr = pp->cfg0_base + SZ_1M;
429 	/* 1MiB is to cover 1 (bus) * 32 (devices) * 8 (functions) */
430 	atu.size = SZ_1M;
431 	atu.ctrl2 = PCIE_ATU_CFG_SHIFT_MODE_ENABLE;
432 	ret = dw_pcie_prog_outbound_atu(pci, &atu);
433 	if (ret)
434 		return ret;
435 
436 	bus_range_max = resource_size(bus->res);
437 
438 	if (bus_range_max < 2)
439 		return 0;
440 
441 	/* Configure remaining buses in type 1 iATU configuration */
442 	atu.index = 1;
443 	atu.type = PCIE_ATU_TYPE_CFG1;
444 	atu.parent_bus_addr = pp->cfg0_base + SZ_2M;
445 	atu.size = (SZ_1M * bus_range_max) - SZ_2M;
446 	atu.ctrl2 = PCIE_ATU_CFG_SHIFT_MODE_ENABLE;
447 
448 	return dw_pcie_prog_outbound_atu(pci, &atu);
449 }
450 
451 static int dw_pcie_create_ecam_window(struct dw_pcie_rp *pp, struct resource *res)
452 {
453 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
454 	struct device *dev = pci->dev;
455 	struct resource_entry *bus;
456 
457 	bus = resource_list_first_type(&pp->bridge->windows, IORESOURCE_BUS);
458 	if (!bus)
459 		return -ENODEV;
460 
461 	pp->cfg = pci_ecam_create(dev, res, bus->res, &pci_generic_ecam_ops);
462 	if (IS_ERR(pp->cfg))
463 		return PTR_ERR(pp->cfg);
464 
465 	return 0;
466 }
467 
468 static bool dw_pcie_ecam_enabled(struct dw_pcie_rp *pp, struct resource *config_res)
469 {
470 	struct resource *bus_range;
471 	u64 nr_buses;
472 
473 	/* Vendor glue drivers may implement their own ECAM mechanism */
474 	if (pp->native_ecam)
475 		return false;
476 
477 	/*
478 	 * PCIe spec r6.0, sec 7.2.2 mandates the base address used for ECAM to
479 	 * be aligned on a 2^(n+20) byte boundary, where n is the number of bits
480 	 * used for representing 'bus' in BDF. Since the DWC cores always use 8
481 	 * bits for representing 'bus', the base address has to be aligned to
482 	 * 2^28 byte boundary, which is 256 MiB.
483 	 */
484 	if (!IS_256MB_ALIGNED(config_res->start))
485 		return false;
486 
487 	bus_range = resource_list_first_type(&pp->bridge->windows, IORESOURCE_BUS)->res;
488 	if (!bus_range)
489 		return false;
490 
491 	nr_buses = resource_size(config_res) >> PCIE_ECAM_BUS_SHIFT;
492 
493 	return nr_buses >= resource_size(bus_range);
494 }
495 
496 static int dw_pcie_host_get_resources(struct dw_pcie_rp *pp)
497 {
498 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
499 	struct device *dev = pci->dev;
500 	struct platform_device *pdev = to_platform_device(dev);
501 	struct resource_entry *win;
502 	struct resource *res;
503 	int ret;
504 
505 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
506 	if (!res) {
507 		dev_err(dev, "Missing \"config\" reg space\n");
508 		return -ENODEV;
509 	}
510 
511 	pp->cfg0_size = resource_size(res);
512 	pp->cfg0_base = res->start;
513 
514 	pp->ecam_enabled = dw_pcie_ecam_enabled(pp, res);
515 	if (pp->ecam_enabled) {
516 		ret = dw_pcie_create_ecam_window(pp, res);
517 		if (ret)
518 			return ret;
519 
520 		pp->bridge->ops = &dw_pcie_ecam_ops;
521 		pp->bridge->sysdata = pp->cfg;
522 		pp->cfg->priv = pp;
523 	} else {
524 		pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res);
525 		if (IS_ERR(pp->va_cfg0_base))
526 			return PTR_ERR(pp->va_cfg0_base);
527 
528 		/* Set default bus ops */
529 		pp->bridge->ops = &dw_pcie_ops;
530 		pp->bridge->child_ops = &dw_child_pcie_ops;
531 		pp->bridge->sysdata = pp;
532 	}
533 
534 	ret = dw_pcie_get_resources(pci);
535 	if (ret) {
536 		if (pp->cfg)
537 			pci_ecam_free(pp->cfg);
538 		return ret;
539 	}
540 
541 	/* Get the I/O range from DT */
542 	win = resource_list_first_type(&pp->bridge->windows, IORESOURCE_IO);
543 	if (win) {
544 		pp->io_size = resource_size(win->res);
545 		pp->io_bus_addr = win->res->start - win->offset;
546 		pp->io_base = pci_pio_to_address(win->res->start);
547 	}
548 
549 	/*
550 	 * visconti_pcie_cpu_addr_fixup() uses pp->io_base, so we have to
551 	 * call dw_pcie_parent_bus_offset() after setting pp->io_base.
552 	 */
553 	pci->parent_bus_offset = dw_pcie_parent_bus_offset(pci, "config",
554 							   pp->cfg0_base);
555 	return 0;
556 }
557 
558 int dw_pcie_host_init(struct dw_pcie_rp *pp)
559 {
560 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
561 	struct device *dev = pci->dev;
562 	struct device_node *np = dev->of_node;
563 	struct pci_host_bridge *bridge;
564 	int ret;
565 
566 	raw_spin_lock_init(&pp->lock);
567 
568 	bridge = devm_pci_alloc_host_bridge(dev, 0);
569 	if (!bridge)
570 		return -ENOMEM;
571 
572 	pp->bridge = bridge;
573 
574 	ret = dw_pcie_host_get_resources(pp);
575 	if (ret)
576 		return ret;
577 
578 	if (pp->ops->init) {
579 		ret = pp->ops->init(pp);
580 		if (ret)
581 			goto err_free_ecam;
582 	}
583 
584 	if (pci_msi_enabled()) {
585 		pp->has_msi_ctrl = !(pp->ops->msi_init ||
586 				     of_property_present(np, "msi-parent") ||
587 				     of_property_present(np, "msi-map"));
588 
589 		/*
590 		 * For the has_msi_ctrl case the default assignment is handled
591 		 * in the dw_pcie_msi_host_init().
592 		 */
593 		if (!pp->has_msi_ctrl && !pp->num_vectors) {
594 			pp->num_vectors = MSI_DEF_NUM_VECTORS;
595 		} else if (pp->num_vectors > MAX_MSI_IRQS) {
596 			dev_err(dev, "Invalid number of vectors\n");
597 			ret = -EINVAL;
598 			goto err_deinit_host;
599 		}
600 
601 		if (pp->ops->msi_init) {
602 			ret = pp->ops->msi_init(pp);
603 			if (ret < 0)
604 				goto err_deinit_host;
605 		} else if (pp->has_msi_ctrl) {
606 			ret = dw_pcie_msi_host_init(pp);
607 			if (ret < 0)
608 				goto err_deinit_host;
609 		}
610 	}
611 
612 	dw_pcie_version_detect(pci);
613 
614 	dw_pcie_iatu_detect(pci);
615 
616 	if (pci->num_lanes < 1)
617 		pci->num_lanes = dw_pcie_link_get_max_link_width(pci);
618 
619 	ret = of_pci_get_equalization_presets(dev, &pp->presets, pci->num_lanes);
620 	if (ret)
621 		goto err_free_msi;
622 
623 	if (pp->ecam_enabled) {
624 		ret = dw_pcie_config_ecam_iatu(pp);
625 		if (ret) {
626 			dev_err(dev, "Failed to configure iATU in ECAM mode\n");
627 			goto err_free_msi;
628 		}
629 	}
630 
631 	/*
632 	 * Allocate the resource for MSG TLP before programming the iATU
633 	 * outbound window in dw_pcie_setup_rc(). Since the allocation depends
634 	 * on the value of 'region_align', this has to be done after
635 	 * dw_pcie_iatu_detect().
636 	 *
637 	 * Glue drivers need to set 'use_atu_msg' before dw_pcie_host_init() to
638 	 * make use of the generic MSG TLP implementation.
639 	 */
640 	if (pp->use_atu_msg)
641 		dw_pcie_host_request_msg_tlp_res(pp);
642 
643 	ret = dw_pcie_edma_detect(pci);
644 	if (ret)
645 		goto err_free_msi;
646 
647 	ret = dw_pcie_setup_rc(pp);
648 	if (ret)
649 		goto err_remove_edma;
650 
651 	if (!dw_pcie_link_up(pci)) {
652 		ret = dw_pcie_start_link(pci);
653 		if (ret)
654 			goto err_remove_edma;
655 	}
656 
657 	/*
658 	 * Note: Skip the link up delay only when a Link Up IRQ is present.
659 	 * If there is no Link Up IRQ, we should not bypass the delay
660 	 * because that would require users to manually rescan for devices.
661 	 */
662 	if (!pp->use_linkup_irq)
663 		/* Ignore errors, the link may come up later */
664 		dw_pcie_wait_for_link(pci);
665 
666 	ret = pci_host_probe(bridge);
667 	if (ret)
668 		goto err_stop_link;
669 
670 	if (pp->ops->post_init)
671 		pp->ops->post_init(pp);
672 
673 	dwc_pcie_debugfs_init(pci, DW_PCIE_RC_TYPE);
674 
675 	return 0;
676 
677 err_stop_link:
678 	dw_pcie_stop_link(pci);
679 
680 err_remove_edma:
681 	dw_pcie_edma_remove(pci);
682 
683 err_free_msi:
684 	if (pp->has_msi_ctrl)
685 		dw_pcie_free_msi(pp);
686 
687 err_deinit_host:
688 	if (pp->ops->deinit)
689 		pp->ops->deinit(pp);
690 
691 err_free_ecam:
692 	if (pp->cfg)
693 		pci_ecam_free(pp->cfg);
694 
695 	return ret;
696 }
697 EXPORT_SYMBOL_GPL(dw_pcie_host_init);
698 
699 void dw_pcie_host_deinit(struct dw_pcie_rp *pp)
700 {
701 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
702 
703 	dwc_pcie_debugfs_deinit(pci);
704 
705 	pci_stop_root_bus(pp->bridge->bus);
706 	pci_remove_root_bus(pp->bridge->bus);
707 
708 	dw_pcie_stop_link(pci);
709 
710 	dw_pcie_edma_remove(pci);
711 
712 	if (pp->has_msi_ctrl)
713 		dw_pcie_free_msi(pp);
714 
715 	if (pp->ops->deinit)
716 		pp->ops->deinit(pp);
717 
718 	if (pp->cfg)
719 		pci_ecam_free(pp->cfg);
720 }
721 EXPORT_SYMBOL_GPL(dw_pcie_host_deinit);
722 
723 static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,
724 						unsigned int devfn, int where)
725 {
726 	struct dw_pcie_rp *pp = bus->sysdata;
727 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
728 	struct dw_pcie_ob_atu_cfg atu = { 0 };
729 	int type, ret;
730 	u32 busdev;
731 
732 	/*
733 	 * Checking whether the link is up here is a last line of defense
734 	 * against platforms that forward errors on the system bus as
735 	 * SError upon PCI configuration transactions issued when the link
736 	 * is down. This check is racy by definition and does not stop
737 	 * the system from triggering an SError if the link goes down
738 	 * after this check is performed.
739 	 */
740 	if (!dw_pcie_link_up(pci))
741 		return NULL;
742 
743 	busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
744 		 PCIE_ATU_FUNC(PCI_FUNC(devfn));
745 
746 	if (pci_is_root_bus(bus->parent))
747 		type = PCIE_ATU_TYPE_CFG0;
748 	else
749 		type = PCIE_ATU_TYPE_CFG1;
750 
751 	atu.type = type;
752 	atu.parent_bus_addr = pp->cfg0_base - pci->parent_bus_offset;
753 	atu.pci_addr = busdev;
754 	atu.size = pp->cfg0_size;
755 
756 	ret = dw_pcie_prog_outbound_atu(pci, &atu);
757 	if (ret)
758 		return NULL;
759 
760 	return pp->va_cfg0_base + where;
761 }
762 
763 static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn,
764 				 int where, int size, u32 *val)
765 {
766 	struct dw_pcie_rp *pp = bus->sysdata;
767 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
768 	struct dw_pcie_ob_atu_cfg atu = { 0 };
769 	int ret;
770 
771 	ret = pci_generic_config_read(bus, devfn, where, size, val);
772 	if (ret != PCIBIOS_SUCCESSFUL)
773 		return ret;
774 
775 	if (pp->cfg0_io_shared) {
776 		atu.type = PCIE_ATU_TYPE_IO;
777 		atu.parent_bus_addr = pp->io_base - pci->parent_bus_offset;
778 		atu.pci_addr = pp->io_bus_addr;
779 		atu.size = pp->io_size;
780 
781 		ret = dw_pcie_prog_outbound_atu(pci, &atu);
782 		if (ret)
783 			return PCIBIOS_SET_FAILED;
784 	}
785 
786 	return PCIBIOS_SUCCESSFUL;
787 }
788 
789 static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn,
790 				 int where, int size, u32 val)
791 {
792 	struct dw_pcie_rp *pp = bus->sysdata;
793 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
794 	struct dw_pcie_ob_atu_cfg atu = { 0 };
795 	int ret;
796 
797 	ret = pci_generic_config_write(bus, devfn, where, size, val);
798 	if (ret != PCIBIOS_SUCCESSFUL)
799 		return ret;
800 
801 	if (pp->cfg0_io_shared) {
802 		atu.type = PCIE_ATU_TYPE_IO;
803 		atu.parent_bus_addr = pp->io_base - pci->parent_bus_offset;
804 		atu.pci_addr = pp->io_bus_addr;
805 		atu.size = pp->io_size;
806 
807 		ret = dw_pcie_prog_outbound_atu(pci, &atu);
808 		if (ret)
809 			return PCIBIOS_SET_FAILED;
810 	}
811 
812 	return PCIBIOS_SUCCESSFUL;
813 }
814 
815 static struct pci_ops dw_child_pcie_ops = {
816 	.map_bus = dw_pcie_other_conf_map_bus,
817 	.read = dw_pcie_rd_other_conf,
818 	.write = dw_pcie_wr_other_conf,
819 };
820 
821 void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn, int where)
822 {
823 	struct dw_pcie_rp *pp = bus->sysdata;
824 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
825 
826 	if (PCI_SLOT(devfn) > 0)
827 		return NULL;
828 
829 	return pci->dbi_base + where;
830 }
831 EXPORT_SYMBOL_GPL(dw_pcie_own_conf_map_bus);
832 
833 static void __iomem *dw_pcie_ecam_conf_map_bus(struct pci_bus *bus, unsigned int devfn, int where)
834 {
835 	struct pci_config_window *cfg = bus->sysdata;
836 	struct dw_pcie_rp *pp = cfg->priv;
837 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
838 	unsigned int busn = bus->number;
839 
840 	if (busn > 0)
841 		return pci_ecam_map_bus(bus, devfn, where);
842 
843 	if (PCI_SLOT(devfn) > 0)
844 		return NULL;
845 
846 	return pci->dbi_base + where;
847 }
848 
849 static int dw_pcie_op_assert_perst(struct pci_bus *bus, bool assert)
850 {
851 	struct dw_pcie_rp *pp = bus->sysdata;
852 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
853 
854 	return dw_pcie_assert_perst(pci, assert);
855 }
856 
857 static struct pci_ops dw_pcie_ops = {
858 	.map_bus = dw_pcie_own_conf_map_bus,
859 	.read = pci_generic_config_read,
860 	.write = pci_generic_config_write,
861 	.assert_perst = dw_pcie_op_assert_perst,
862 };
863 
864 static struct pci_ops dw_pcie_ecam_ops = {
865 	.map_bus = dw_pcie_ecam_conf_map_bus,
866 	.read = pci_generic_config_read,
867 	.write = pci_generic_config_write,
868 };
869 
870 static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
871 {
872 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
873 	struct dw_pcie_ob_atu_cfg atu = { 0 };
874 	struct resource_entry *entry;
875 	int i, ret;
876 
877 	/* Note the very first outbound ATU is used for CFG IOs */
878 	if (!pci->num_ob_windows) {
879 		dev_err(pci->dev, "No outbound iATU found\n");
880 		return -EINVAL;
881 	}
882 
883 	/*
884 	 * Ensure all out/inbound windows are disabled before proceeding with
885 	 * the MEM/IO (dma-)ranges setups.
886 	 */
887 	for (i = 0; i < pci->num_ob_windows; i++)
888 		dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_OB, i);
889 
890 	for (i = 0; i < pci->num_ib_windows; i++)
891 		dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_IB, i);
892 
893 	i = 0;
894 	resource_list_for_each_entry(entry, &pp->bridge->windows) {
895 		if (resource_type(entry->res) != IORESOURCE_MEM)
896 			continue;
897 
898 		if (pci->num_ob_windows <= ++i)
899 			break;
900 
901 		atu.index = i;
902 		atu.type = PCIE_ATU_TYPE_MEM;
903 		atu.parent_bus_addr = entry->res->start - pci->parent_bus_offset;
904 		atu.pci_addr = entry->res->start - entry->offset;
905 
906 		/* Adjust iATU size if MSG TLP region was allocated before */
907 		if (pp->msg_res && pp->msg_res->parent == entry->res)
908 			atu.size = resource_size(entry->res) -
909 					resource_size(pp->msg_res);
910 		else
911 			atu.size = resource_size(entry->res);
912 
913 		ret = dw_pcie_prog_outbound_atu(pci, &atu);
914 		if (ret) {
915 			dev_err(pci->dev, "Failed to set MEM range %pr\n",
916 				entry->res);
917 			return ret;
918 		}
919 	}
920 
921 	if (pp->io_size) {
922 		if (pci->num_ob_windows > ++i) {
923 			atu.index = i;
924 			atu.type = PCIE_ATU_TYPE_IO;
925 			atu.parent_bus_addr = pp->io_base - pci->parent_bus_offset;
926 			atu.pci_addr = pp->io_bus_addr;
927 			atu.size = pp->io_size;
928 
929 			ret = dw_pcie_prog_outbound_atu(pci, &atu);
930 			if (ret) {
931 				dev_err(pci->dev, "Failed to set IO range %pr\n",
932 					entry->res);
933 				return ret;
934 			}
935 		} else {
936 			pp->cfg0_io_shared = true;
937 		}
938 	}
939 
940 	if (pci->num_ob_windows <= i)
941 		dev_warn(pci->dev, "Ranges exceed outbound iATU size (%d)\n",
942 			 pci->num_ob_windows);
943 
944 	pp->msg_atu_index = i;
945 
946 	i = 0;
947 	resource_list_for_each_entry(entry, &pp->bridge->dma_ranges) {
948 		if (resource_type(entry->res) != IORESOURCE_MEM)
949 			continue;
950 
951 		if (pci->num_ib_windows <= i)
952 			break;
953 
954 		ret = dw_pcie_prog_inbound_atu(pci, i++, PCIE_ATU_TYPE_MEM,
955 					       entry->res->start,
956 					       entry->res->start - entry->offset,
957 					       resource_size(entry->res));
958 		if (ret) {
959 			dev_err(pci->dev, "Failed to set DMA range %pr\n",
960 				entry->res);
961 			return ret;
962 		}
963 	}
964 
965 	if (pci->num_ib_windows <= i)
966 		dev_warn(pci->dev, "Dma-ranges exceed inbound iATU size (%u)\n",
967 			 pci->num_ib_windows);
968 
969 	return 0;
970 }
971 
972 static void dw_pcie_program_presets(struct dw_pcie_rp *pp, enum pci_bus_speed speed)
973 {
974 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
975 	u8 lane_eq_offset, lane_reg_size, cap_id;
976 	u8 *presets;
977 	u32 cap;
978 	int i;
979 
980 	if (speed == PCIE_SPEED_8_0GT) {
981 		presets = (u8 *)pp->presets.eq_presets_8gts;
982 		lane_eq_offset =  PCI_SECPCI_LE_CTRL;
983 		cap_id = PCI_EXT_CAP_ID_SECPCI;
984 		/* For data rate of 8 GT/S each lane equalization control is 16bits wide*/
985 		lane_reg_size = 0x2;
986 	} else if (speed == PCIE_SPEED_16_0GT) {
987 		presets = pp->presets.eq_presets_Ngts[EQ_PRESET_TYPE_16GTS - 1];
988 		lane_eq_offset = PCI_PL_16GT_LE_CTRL;
989 		cap_id = PCI_EXT_CAP_ID_PL_16GT;
990 		lane_reg_size = 0x1;
991 	} else if (speed == PCIE_SPEED_32_0GT) {
992 		presets =  pp->presets.eq_presets_Ngts[EQ_PRESET_TYPE_32GTS - 1];
993 		lane_eq_offset = PCI_PL_32GT_LE_CTRL;
994 		cap_id = PCI_EXT_CAP_ID_PL_32GT;
995 		lane_reg_size = 0x1;
996 	} else if (speed == PCIE_SPEED_64_0GT) {
997 		presets =  pp->presets.eq_presets_Ngts[EQ_PRESET_TYPE_64GTS - 1];
998 		lane_eq_offset = PCI_PL_64GT_LE_CTRL;
999 		cap_id = PCI_EXT_CAP_ID_PL_64GT;
1000 		lane_reg_size = 0x1;
1001 	} else {
1002 		return;
1003 	}
1004 
1005 	if (presets[0] == PCI_EQ_RESV)
1006 		return;
1007 
1008 	cap = dw_pcie_find_ext_capability(pci, cap_id);
1009 	if (!cap)
1010 		return;
1011 
1012 	/*
1013 	 * Write preset values to the registers byte-by-byte for the given
1014 	 * number of lanes and register size.
1015 	 */
1016 	for (i = 0; i < pci->num_lanes * lane_reg_size; i++)
1017 		dw_pcie_writeb_dbi(pci, cap + lane_eq_offset + i, presets[i]);
1018 }
1019 
1020 static void dw_pcie_config_presets(struct dw_pcie_rp *pp)
1021 {
1022 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
1023 	enum pci_bus_speed speed = pcie_link_speed[pci->max_link_speed];
1024 
1025 	/*
1026 	 * Lane equalization settings need to be applied for all data rates the
1027 	 * controller supports and for all supported lanes.
1028 	 */
1029 
1030 	if (speed >= PCIE_SPEED_8_0GT)
1031 		dw_pcie_program_presets(pp, PCIE_SPEED_8_0GT);
1032 
1033 	if (speed >= PCIE_SPEED_16_0GT)
1034 		dw_pcie_program_presets(pp, PCIE_SPEED_16_0GT);
1035 
1036 	if (speed >= PCIE_SPEED_32_0GT)
1037 		dw_pcie_program_presets(pp, PCIE_SPEED_32_0GT);
1038 
1039 	if (speed >= PCIE_SPEED_64_0GT)
1040 		dw_pcie_program_presets(pp, PCIE_SPEED_64_0GT);
1041 }
1042 
1043 int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
1044 {
1045 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
1046 	u32 val;
1047 	int ret;
1048 
1049 	/*
1050 	 * Enable DBI read-only registers for writing/updating configuration.
1051 	 * Write permission gets disabled towards the end of this function.
1052 	 */
1053 	dw_pcie_dbi_ro_wr_en(pci);
1054 
1055 	dw_pcie_setup(pci);
1056 
1057 	dw_pcie_msi_init(pp);
1058 
1059 	/* Setup RC BARs */
1060 	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004);
1061 	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000);
1062 
1063 	/* Setup interrupt pins */
1064 	val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE);
1065 	val &= 0xffff00ff;
1066 	val |= 0x00000100;
1067 	dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val);
1068 
1069 	/* Setup bus numbers */
1070 	val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS);
1071 	val &= 0xff000000;
1072 	val |= 0x00ff0100;
1073 	dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val);
1074 
1075 	/* Setup command register */
1076 	val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
1077 	val &= 0xffff0000;
1078 	val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
1079 		PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
1080 	dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
1081 
1082 	dw_pcie_hide_unsupported_l1ss(pci);
1083 
1084 	dw_pcie_config_presets(pp);
1085 	/*
1086 	 * If the platform provides its own child bus config accesses, it means
1087 	 * the platform uses its own address translation component rather than
1088 	 * ATU, so we should not program the ATU here.
1089 	 */
1090 	if (pp->bridge->child_ops == &dw_child_pcie_ops) {
1091 		ret = dw_pcie_iatu_setup(pp);
1092 		if (ret)
1093 			return ret;
1094 	}
1095 
1096 	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
1097 
1098 	/* Program correct class for RC */
1099 	dw_pcie_writew_dbi(pci, PCI_CLASS_DEVICE, PCI_CLASS_BRIDGE_PCI);
1100 
1101 	val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
1102 	val |= PORT_LOGIC_SPEED_CHANGE;
1103 	dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
1104 
1105 	dw_pcie_dbi_ro_wr_dis(pci);
1106 
1107 	return 0;
1108 }
1109 EXPORT_SYMBOL_GPL(dw_pcie_setup_rc);
1110 
1111 static int dw_pcie_pme_turn_off(struct dw_pcie *pci)
1112 {
1113 	struct dw_pcie_ob_atu_cfg atu = { 0 };
1114 	void __iomem *mem;
1115 	int ret;
1116 
1117 	if (pci->num_ob_windows <= pci->pp.msg_atu_index)
1118 		return -ENOSPC;
1119 
1120 	if (!pci->pp.msg_res)
1121 		return -ENOSPC;
1122 
1123 	atu.code = PCIE_MSG_CODE_PME_TURN_OFF;
1124 	atu.routing = PCIE_MSG_TYPE_R_BC;
1125 	atu.type = PCIE_ATU_TYPE_MSG;
1126 	atu.size = resource_size(pci->pp.msg_res);
1127 	atu.index = pci->pp.msg_atu_index;
1128 
1129 	atu.parent_bus_addr = pci->pp.msg_res->start - pci->parent_bus_offset;
1130 
1131 	ret = dw_pcie_prog_outbound_atu(pci, &atu);
1132 	if (ret)
1133 		return ret;
1134 
1135 	mem = ioremap(pci->pp.msg_res->start, pci->region_align);
1136 	if (!mem)
1137 		return -ENOMEM;
1138 
1139 	/* A dummy write is converted to a Msg TLP */
1140 	writel(0, mem);
1141 
1142 	iounmap(mem);
1143 
1144 	return 0;
1145 }
1146 
1147 int dw_pcie_suspend_noirq(struct dw_pcie *pci)
1148 {
1149 	u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
1150 	u32 val;
1151 	int ret;
1152 
1153 	/*
1154 	 * If L1SS is supported, then do not put the link into L2 as some
1155 	 * devices such as NVMe expect low resume latency.
1156 	 */
1157 	if (dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKCTL) & PCI_EXP_LNKCTL_ASPM_L1)
1158 		return 0;
1159 
1160 	if (pci->pp.ops->pme_turn_off) {
1161 		pci->pp.ops->pme_turn_off(&pci->pp);
1162 	} else {
1163 		ret = dw_pcie_pme_turn_off(pci);
1164 		if (ret)
1165 			return ret;
1166 	}
1167 
1168 	ret = read_poll_timeout(dw_pcie_get_ltssm, val,
1169 				val == DW_PCIE_LTSSM_L2_IDLE ||
1170 				val <= DW_PCIE_LTSSM_DETECT_WAIT,
1171 				PCIE_PME_TO_L2_TIMEOUT_US/10,
1172 				PCIE_PME_TO_L2_TIMEOUT_US, false, pci);
1173 	if (ret) {
1174 		/* Only log message when LTSSM isn't in DETECT or POLL */
1175 		dev_err(pci->dev, "Timeout waiting for L2 entry! LTSSM: 0x%x\n", val);
1176 		return ret;
1177 	}
1178 
1179 	/*
1180 	 * Per PCIe r6.0, sec 5.3.3.2.1, software should wait at least
1181 	 * 100ns after L2/L3 Ready before turning off refclock and
1182 	 * main power. This is harmless when no endpoint is connected.
1183 	 */
1184 	udelay(1);
1185 
1186 	dw_pcie_stop_link(pci);
1187 	if (pci->pp.ops->deinit)
1188 		pci->pp.ops->deinit(&pci->pp);
1189 
1190 	pci->suspended = true;
1191 
1192 	return ret;
1193 }
1194 EXPORT_SYMBOL_GPL(dw_pcie_suspend_noirq);
1195 
1196 int dw_pcie_resume_noirq(struct dw_pcie *pci)
1197 {
1198 	int ret;
1199 
1200 	if (!pci->suspended)
1201 		return 0;
1202 
1203 	pci->suspended = false;
1204 
1205 	if (pci->pp.ops->init) {
1206 		ret = pci->pp.ops->init(&pci->pp);
1207 		if (ret) {
1208 			dev_err(pci->dev, "Host init failed: %d\n", ret);
1209 			return ret;
1210 		}
1211 	}
1212 
1213 	dw_pcie_setup_rc(&pci->pp);
1214 
1215 	ret = dw_pcie_start_link(pci);
1216 	if (ret)
1217 		return ret;
1218 
1219 	ret = dw_pcie_wait_for_link(pci);
1220 	if (ret)
1221 		return ret;
1222 
1223 	return ret;
1224 }
1225 EXPORT_SYMBOL_GPL(dw_pcie_resume_noirq);
1226