1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2017 Cadence
3 // Cadence PCIe host controller driver.
4 // Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
5
6 #include <linux/delay.h>
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/list_sort.h>
10 #include <linux/of_address.h>
11 #include <linux/of_pci.h>
12 #include <linux/platform_device.h>
13
14 #include "pcie-cadence.h"
15
16 #define LINK_RETRAIN_TIMEOUT HZ
17
18 static u64 bar_max_size[] = {
19 [RP_BAR0] = _ULL(128 * SZ_2G),
20 [RP_BAR1] = SZ_2G,
21 [RP_NO_BAR] = _BITULL(63),
22 };
23
24 static u8 bar_aperture_mask[] = {
25 [RP_BAR0] = 0x1F,
26 [RP_BAR1] = 0xF,
27 };
28
cdns_pci_map_bus(struct pci_bus * bus,unsigned int devfn,int where)29 void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
30 int where)
31 {
32 struct pci_host_bridge *bridge = pci_find_host_bridge(bus);
33 struct cdns_pcie_rc *rc = pci_host_bridge_priv(bridge);
34 struct cdns_pcie *pcie = &rc->pcie;
35 unsigned int busn = bus->number;
36 u32 addr0, desc0;
37
38 if (pci_is_root_bus(bus)) {
39 /*
40 * Only the root port (devfn == 0) is connected to this bus.
41 * All other PCI devices are behind some bridge hence on another
42 * bus.
43 */
44 if (devfn)
45 return NULL;
46
47 return pcie->reg_base + (where & 0xfff);
48 }
49 /* Check that the link is up */
50 if (!(cdns_pcie_readl(pcie, CDNS_PCIE_LM_BASE) & 0x1))
51 return NULL;
52 /* Clear AXI link-down status */
53 cdns_pcie_writel(pcie, CDNS_PCIE_AT_LINKDOWN, 0x0);
54
55 /* Update Output registers for AXI region 0. */
56 addr0 = CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(12) |
57 CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) |
58 CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS(busn);
59 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(0), addr0);
60
61 /* Configuration Type 0 or Type 1 access. */
62 desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID |
63 CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0);
64 /*
65 * The bus number was already set once for all in desc1 by
66 * cdns_pcie_host_init_address_translation().
67 */
68 if (busn == bridge->busnr + 1)
69 desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0;
70 else
71 desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE1;
72 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(0), desc0);
73
74 return rc->cfg_base + (where & 0xfff);
75 }
76 EXPORT_SYMBOL_GPL(cdns_pci_map_bus);
77
78 static struct pci_ops cdns_pcie_host_ops = {
79 .map_bus = cdns_pci_map_bus,
80 .read = pci_generic_config_read,
81 .write = pci_generic_config_write,
82 };
83
cdns_pcie_host_training_complete(struct cdns_pcie * pcie)84 static int cdns_pcie_host_training_complete(struct cdns_pcie *pcie)
85 {
86 u32 pcie_cap_off = CDNS_PCIE_RP_CAP_OFFSET;
87 unsigned long end_jiffies;
88 u16 lnk_stat;
89
90 /* Wait for link training to complete. Exit after timeout. */
91 end_jiffies = jiffies + LINK_RETRAIN_TIMEOUT;
92 do {
93 lnk_stat = cdns_pcie_rp_readw(pcie, pcie_cap_off + PCI_EXP_LNKSTA);
94 if (!(lnk_stat & PCI_EXP_LNKSTA_LT))
95 break;
96 usleep_range(0, 1000);
97 } while (time_before(jiffies, end_jiffies));
98
99 if (!(lnk_stat & PCI_EXP_LNKSTA_LT))
100 return 0;
101
102 return -ETIMEDOUT;
103 }
104
cdns_pcie_host_wait_for_link(struct cdns_pcie * pcie)105 static int cdns_pcie_host_wait_for_link(struct cdns_pcie *pcie)
106 {
107 struct device *dev = pcie->dev;
108 int retries;
109
110 /* Check if the link is up or not */
111 for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
112 if (cdns_pcie_link_up(pcie)) {
113 dev_info(dev, "Link up\n");
114 return 0;
115 }
116 usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
117 }
118
119 return -ETIMEDOUT;
120 }
121
cdns_pcie_retrain(struct cdns_pcie * pcie)122 static int cdns_pcie_retrain(struct cdns_pcie *pcie)
123 {
124 u32 lnk_cap_sls, pcie_cap_off = CDNS_PCIE_RP_CAP_OFFSET;
125 u16 lnk_stat, lnk_ctl;
126 int ret = 0;
127
128 /*
129 * Set retrain bit if current speed is 2.5 GB/s,
130 * but the PCIe root port support is > 2.5 GB/s.
131 */
132
133 lnk_cap_sls = cdns_pcie_readl(pcie, (CDNS_PCIE_RP_BASE + pcie_cap_off +
134 PCI_EXP_LNKCAP));
135 if ((lnk_cap_sls & PCI_EXP_LNKCAP_SLS) <= PCI_EXP_LNKCAP_SLS_2_5GB)
136 return ret;
137
138 lnk_stat = cdns_pcie_rp_readw(pcie, pcie_cap_off + PCI_EXP_LNKSTA);
139 if ((lnk_stat & PCI_EXP_LNKSTA_CLS) == PCI_EXP_LNKSTA_CLS_2_5GB) {
140 lnk_ctl = cdns_pcie_rp_readw(pcie,
141 pcie_cap_off + PCI_EXP_LNKCTL);
142 lnk_ctl |= PCI_EXP_LNKCTL_RL;
143 cdns_pcie_rp_writew(pcie, pcie_cap_off + PCI_EXP_LNKCTL,
144 lnk_ctl);
145
146 ret = cdns_pcie_host_training_complete(pcie);
147 if (ret)
148 return ret;
149
150 ret = cdns_pcie_host_wait_for_link(pcie);
151 }
152 return ret;
153 }
154
cdns_pcie_host_disable_ptm_response(struct cdns_pcie * pcie)155 static void cdns_pcie_host_disable_ptm_response(struct cdns_pcie *pcie)
156 {
157 u32 val;
158
159 val = cdns_pcie_readl(pcie, CDNS_PCIE_LM_PTM_CTRL);
160 cdns_pcie_writel(pcie, CDNS_PCIE_LM_PTM_CTRL, val & ~CDNS_PCIE_LM_TPM_CTRL_PTMRSEN);
161 }
162
cdns_pcie_host_enable_ptm_response(struct cdns_pcie * pcie)163 static void cdns_pcie_host_enable_ptm_response(struct cdns_pcie *pcie)
164 {
165 u32 val;
166
167 val = cdns_pcie_readl(pcie, CDNS_PCIE_LM_PTM_CTRL);
168 cdns_pcie_writel(pcie, CDNS_PCIE_LM_PTM_CTRL, val | CDNS_PCIE_LM_TPM_CTRL_PTMRSEN);
169 }
170
cdns_pcie_host_start_link(struct cdns_pcie_rc * rc)171 static int cdns_pcie_host_start_link(struct cdns_pcie_rc *rc)
172 {
173 struct cdns_pcie *pcie = &rc->pcie;
174 int ret;
175
176 ret = cdns_pcie_host_wait_for_link(pcie);
177
178 /*
179 * Retrain link for Gen2 training defect
180 * if quirk flag is set.
181 */
182 if (!ret && rc->quirk_retrain_flag)
183 ret = cdns_pcie_retrain(pcie);
184
185 return ret;
186 }
187
cdns_pcie_host_deinit_root_port(struct cdns_pcie_rc * rc)188 static void cdns_pcie_host_deinit_root_port(struct cdns_pcie_rc *rc)
189 {
190 struct cdns_pcie *pcie = &rc->pcie;
191 u32 value, ctrl;
192
193 cdns_pcie_rp_writew(pcie, PCI_CLASS_DEVICE, 0xffff);
194 cdns_pcie_rp_writeb(pcie, PCI_CLASS_PROG, 0xff);
195 cdns_pcie_rp_writeb(pcie, PCI_CLASS_REVISION, 0xff);
196 cdns_pcie_writel(pcie, CDNS_PCIE_LM_ID, 0xffffffff);
197 cdns_pcie_rp_writew(pcie, PCI_DEVICE_ID, 0xffff);
198 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED;
199 value = ~(CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL(ctrl) |
200 CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL(ctrl) |
201 CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE |
202 CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS |
203 CDNS_PCIE_LM_RC_BAR_CFG_IO_ENABLE |
204 CDNS_PCIE_LM_RC_BAR_CFG_IO_32BITS);
205 cdns_pcie_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, value);
206 }
207
cdns_pcie_host_init_root_port(struct cdns_pcie_rc * rc)208 static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc)
209 {
210 struct cdns_pcie *pcie = &rc->pcie;
211 u32 value, ctrl;
212 u32 id;
213
214 /*
215 * Set the root complex BAR configuration register:
216 * - disable both BAR0 and BAR1.
217 * - enable Prefetchable Memory Base and Limit registers in type 1
218 * config space (64 bits).
219 * - enable IO Base and Limit registers in type 1 config
220 * space (32 bits).
221 */
222 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED;
223 value = CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL(ctrl) |
224 CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL(ctrl) |
225 CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE |
226 CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS |
227 CDNS_PCIE_LM_RC_BAR_CFG_IO_ENABLE |
228 CDNS_PCIE_LM_RC_BAR_CFG_IO_32BITS;
229 cdns_pcie_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, value);
230
231 /* Set root port configuration space */
232 if (rc->vendor_id != 0xffff) {
233 id = CDNS_PCIE_LM_ID_VENDOR(rc->vendor_id) |
234 CDNS_PCIE_LM_ID_SUBSYS(rc->vendor_id);
235 cdns_pcie_writel(pcie, CDNS_PCIE_LM_ID, id);
236 }
237
238 if (rc->device_id != 0xffff)
239 cdns_pcie_rp_writew(pcie, PCI_DEVICE_ID, rc->device_id);
240
241 cdns_pcie_rp_writeb(pcie, PCI_CLASS_REVISION, 0);
242 cdns_pcie_rp_writeb(pcie, PCI_CLASS_PROG, 0);
243 cdns_pcie_rp_writew(pcie, PCI_CLASS_DEVICE, PCI_CLASS_BRIDGE_PCI);
244
245 return 0;
246 }
247
cdns_pcie_host_bar_ib_config(struct cdns_pcie_rc * rc,enum cdns_pcie_rp_bar bar,u64 cpu_addr,u64 size,unsigned long flags)248 static int cdns_pcie_host_bar_ib_config(struct cdns_pcie_rc *rc,
249 enum cdns_pcie_rp_bar bar,
250 u64 cpu_addr, u64 size,
251 unsigned long flags)
252 {
253 struct cdns_pcie *pcie = &rc->pcie;
254 u32 addr0, addr1, aperture, value;
255
256 if (!rc->avail_ib_bar[bar])
257 return -EBUSY;
258
259 rc->avail_ib_bar[bar] = false;
260
261 aperture = ilog2(size);
262 addr0 = CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS(aperture) |
263 (lower_32_bits(cpu_addr) & GENMASK(31, 8));
264 addr1 = upper_32_bits(cpu_addr);
265 cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR0(bar), addr0);
266 cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR1(bar), addr1);
267
268 if (bar == RP_NO_BAR)
269 return 0;
270
271 value = cdns_pcie_readl(pcie, CDNS_PCIE_LM_RC_BAR_CFG);
272 value &= ~(LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar) |
273 LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar) |
274 LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar) |
275 LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar) |
276 LM_RC_BAR_CFG_APERTURE(bar, bar_aperture_mask[bar] + 2));
277 if (size + cpu_addr >= SZ_4G) {
278 if (!(flags & IORESOURCE_PREFETCH))
279 value |= LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar);
280 value |= LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar);
281 } else {
282 if (!(flags & IORESOURCE_PREFETCH))
283 value |= LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar);
284 value |= LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar);
285 }
286
287 value |= LM_RC_BAR_CFG_APERTURE(bar, aperture);
288 cdns_pcie_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, value);
289
290 return 0;
291 }
292
293 static enum cdns_pcie_rp_bar
cdns_pcie_host_find_min_bar(struct cdns_pcie_rc * rc,u64 size)294 cdns_pcie_host_find_min_bar(struct cdns_pcie_rc *rc, u64 size)
295 {
296 enum cdns_pcie_rp_bar bar, sel_bar;
297
298 sel_bar = RP_BAR_UNDEFINED;
299 for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++) {
300 if (!rc->avail_ib_bar[bar])
301 continue;
302
303 if (size <= bar_max_size[bar]) {
304 if (sel_bar == RP_BAR_UNDEFINED) {
305 sel_bar = bar;
306 continue;
307 }
308
309 if (bar_max_size[bar] < bar_max_size[sel_bar])
310 sel_bar = bar;
311 }
312 }
313
314 return sel_bar;
315 }
316
317 static enum cdns_pcie_rp_bar
cdns_pcie_host_find_max_bar(struct cdns_pcie_rc * rc,u64 size)318 cdns_pcie_host_find_max_bar(struct cdns_pcie_rc *rc, u64 size)
319 {
320 enum cdns_pcie_rp_bar bar, sel_bar;
321
322 sel_bar = RP_BAR_UNDEFINED;
323 for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++) {
324 if (!rc->avail_ib_bar[bar])
325 continue;
326
327 if (size >= bar_max_size[bar]) {
328 if (sel_bar == RP_BAR_UNDEFINED) {
329 sel_bar = bar;
330 continue;
331 }
332
333 if (bar_max_size[bar] > bar_max_size[sel_bar])
334 sel_bar = bar;
335 }
336 }
337
338 return sel_bar;
339 }
340
cdns_pcie_host_bar_config(struct cdns_pcie_rc * rc,struct resource_entry * entry)341 static int cdns_pcie_host_bar_config(struct cdns_pcie_rc *rc,
342 struct resource_entry *entry)
343 {
344 u64 cpu_addr, pci_addr, size, winsize;
345 struct cdns_pcie *pcie = &rc->pcie;
346 struct device *dev = pcie->dev;
347 enum cdns_pcie_rp_bar bar;
348 unsigned long flags;
349 int ret;
350
351 cpu_addr = entry->res->start;
352 pci_addr = entry->res->start - entry->offset;
353 flags = entry->res->flags;
354 size = resource_size(entry->res);
355
356 if (entry->offset) {
357 dev_err(dev, "PCI addr: %llx must be equal to CPU addr: %llx\n",
358 pci_addr, cpu_addr);
359 return -EINVAL;
360 }
361
362 while (size > 0) {
363 /*
364 * Try to find a minimum BAR whose size is greater than
365 * or equal to the remaining resource_entry size. This will
366 * fail if the size of each of the available BARs is less than
367 * the remaining resource_entry size.
368 * If a minimum BAR is found, IB ATU will be configured and
369 * exited.
370 */
371 bar = cdns_pcie_host_find_min_bar(rc, size);
372 if (bar != RP_BAR_UNDEFINED) {
373 ret = cdns_pcie_host_bar_ib_config(rc, bar, cpu_addr,
374 size, flags);
375 if (ret)
376 dev_err(dev, "IB BAR: %d config failed\n", bar);
377 return ret;
378 }
379
380 /*
381 * If the control reaches here, it would mean the remaining
382 * resource_entry size cannot be fitted in a single BAR. So we
383 * find a maximum BAR whose size is less than or equal to the
384 * remaining resource_entry size and split the resource entry
385 * so that part of resource entry is fitted inside the maximum
386 * BAR. The remaining size would be fitted during the next
387 * iteration of the loop.
388 * If a maximum BAR is not found, there is no way we can fit
389 * this resource_entry, so we error out.
390 */
391 bar = cdns_pcie_host_find_max_bar(rc, size);
392 if (bar == RP_BAR_UNDEFINED) {
393 dev_err(dev, "No free BAR to map cpu_addr %llx\n",
394 cpu_addr);
395 return -EINVAL;
396 }
397
398 winsize = bar_max_size[bar];
399 ret = cdns_pcie_host_bar_ib_config(rc, bar, cpu_addr, winsize,
400 flags);
401 if (ret) {
402 dev_err(dev, "IB BAR: %d config failed\n", bar);
403 return ret;
404 }
405
406 size -= winsize;
407 cpu_addr += winsize;
408 }
409
410 return 0;
411 }
412
cdns_pcie_host_dma_ranges_cmp(void * priv,const struct list_head * a,const struct list_head * b)413 static int cdns_pcie_host_dma_ranges_cmp(void *priv, const struct list_head *a,
414 const struct list_head *b)
415 {
416 struct resource_entry *entry1, *entry2;
417
418 entry1 = container_of(a, struct resource_entry, node);
419 entry2 = container_of(b, struct resource_entry, node);
420
421 return resource_size(entry2->res) - resource_size(entry1->res);
422 }
423
cdns_pcie_host_unmap_dma_ranges(struct cdns_pcie_rc * rc)424 static void cdns_pcie_host_unmap_dma_ranges(struct cdns_pcie_rc *rc)
425 {
426 struct cdns_pcie *pcie = &rc->pcie;
427 enum cdns_pcie_rp_bar bar;
428 u32 value;
429
430 /* Reset inbound configuration for all BARs which were being used */
431 for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++) {
432 if (rc->avail_ib_bar[bar])
433 continue;
434
435 cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR0(bar), 0);
436 cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR1(bar), 0);
437
438 if (bar == RP_NO_BAR)
439 continue;
440
441 value = ~(LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar) |
442 LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar) |
443 LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar) |
444 LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar) |
445 LM_RC_BAR_CFG_APERTURE(bar, bar_aperture_mask[bar] + 2));
446 cdns_pcie_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, value);
447 }
448 }
449
cdns_pcie_host_map_dma_ranges(struct cdns_pcie_rc * rc)450 static int cdns_pcie_host_map_dma_ranges(struct cdns_pcie_rc *rc)
451 {
452 struct cdns_pcie *pcie = &rc->pcie;
453 struct device *dev = pcie->dev;
454 struct device_node *np = dev->of_node;
455 struct pci_host_bridge *bridge;
456 struct resource_entry *entry;
457 u32 no_bar_nbits = 32;
458 int err;
459
460 bridge = pci_host_bridge_from_priv(rc);
461 if (!bridge)
462 return -ENOMEM;
463
464 if (list_empty(&bridge->dma_ranges)) {
465 of_property_read_u32(np, "cdns,no-bar-match-nbits",
466 &no_bar_nbits);
467 err = cdns_pcie_host_bar_ib_config(rc, RP_NO_BAR, 0x0,
468 (u64)1 << no_bar_nbits, 0);
469 if (err)
470 dev_err(dev, "IB BAR: %d config failed\n", RP_NO_BAR);
471 return err;
472 }
473
474 list_sort(NULL, &bridge->dma_ranges, cdns_pcie_host_dma_ranges_cmp);
475
476 resource_list_for_each_entry(entry, &bridge->dma_ranges) {
477 err = cdns_pcie_host_bar_config(rc, entry);
478 if (err) {
479 dev_err(dev, "Fail to configure IB using dma-ranges\n");
480 return err;
481 }
482 }
483
484 return 0;
485 }
486
cdns_pcie_host_deinit_address_translation(struct cdns_pcie_rc * rc)487 static void cdns_pcie_host_deinit_address_translation(struct cdns_pcie_rc *rc)
488 {
489 struct cdns_pcie *pcie = &rc->pcie;
490 struct pci_host_bridge *bridge = pci_host_bridge_from_priv(rc);
491 struct resource_entry *entry;
492 int r;
493
494 cdns_pcie_host_unmap_dma_ranges(rc);
495
496 /*
497 * Reset outbound region 0 which was reserved for configuration space
498 * accesses.
499 */
500 cdns_pcie_reset_outbound_region(pcie, 0);
501
502 /* Reset rest of the outbound regions */
503 r = 1;
504 resource_list_for_each_entry(entry, &bridge->windows) {
505 cdns_pcie_reset_outbound_region(pcie, r);
506 r++;
507 }
508 }
509
cdns_pcie_host_init_address_translation(struct cdns_pcie_rc * rc)510 static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc)
511 {
512 struct cdns_pcie *pcie = &rc->pcie;
513 struct pci_host_bridge *bridge = pci_host_bridge_from_priv(rc);
514 struct resource *cfg_res = rc->cfg_res;
515 struct resource_entry *entry;
516 u64 cpu_addr = cfg_res->start;
517 u32 addr0, addr1, desc1;
518 int r, busnr = 0;
519
520 entry = resource_list_first_type(&bridge->windows, IORESOURCE_BUS);
521 if (entry)
522 busnr = entry->res->start;
523
524 /*
525 * Reserve region 0 for PCI configure space accesses:
526 * OB_REGION_PCI_ADDR0 and OB_REGION_DESC0 are updated dynamically by
527 * cdns_pci_map_bus(), other region registers are set here once for all.
528 */
529 addr1 = 0; /* Should be programmed to zero. */
530 desc1 = CDNS_PCIE_AT_OB_REGION_DESC1_BUS(busnr);
531 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(0), addr1);
532 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(0), desc1);
533
534 if (pcie->ops->cpu_addr_fixup)
535 cpu_addr = pcie->ops->cpu_addr_fixup(pcie, cpu_addr);
536
537 addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(12) |
538 (lower_32_bits(cpu_addr) & GENMASK(31, 8));
539 addr1 = upper_32_bits(cpu_addr);
540 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(0), addr0);
541 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(0), addr1);
542
543 r = 1;
544 resource_list_for_each_entry(entry, &bridge->windows) {
545 struct resource *res = entry->res;
546 u64 pci_addr = res->start - entry->offset;
547
548 if (resource_type(res) == IORESOURCE_IO)
549 cdns_pcie_set_outbound_region(pcie, busnr, 0, r,
550 true,
551 pci_pio_to_address(res->start),
552 pci_addr,
553 resource_size(res));
554 else
555 cdns_pcie_set_outbound_region(pcie, busnr, 0, r,
556 false,
557 res->start,
558 pci_addr,
559 resource_size(res));
560
561 r++;
562 }
563
564 return cdns_pcie_host_map_dma_ranges(rc);
565 }
566
cdns_pcie_host_deinit(struct cdns_pcie_rc * rc)567 static void cdns_pcie_host_deinit(struct cdns_pcie_rc *rc)
568 {
569 cdns_pcie_host_deinit_address_translation(rc);
570 cdns_pcie_host_deinit_root_port(rc);
571 }
572
cdns_pcie_host_init(struct cdns_pcie_rc * rc)573 int cdns_pcie_host_init(struct cdns_pcie_rc *rc)
574 {
575 int err;
576
577 err = cdns_pcie_host_init_root_port(rc);
578 if (err)
579 return err;
580
581 return cdns_pcie_host_init_address_translation(rc);
582 }
583 EXPORT_SYMBOL_GPL(cdns_pcie_host_init);
584
cdns_pcie_host_link_disable(struct cdns_pcie_rc * rc)585 static void cdns_pcie_host_link_disable(struct cdns_pcie_rc *rc)
586 {
587 struct cdns_pcie *pcie = &rc->pcie;
588
589 cdns_pcie_stop_link(pcie);
590 cdns_pcie_host_disable_ptm_response(pcie);
591 }
592
cdns_pcie_host_link_setup(struct cdns_pcie_rc * rc)593 int cdns_pcie_host_link_setup(struct cdns_pcie_rc *rc)
594 {
595 struct cdns_pcie *pcie = &rc->pcie;
596 struct device *dev = rc->pcie.dev;
597 int ret;
598
599 if (rc->quirk_detect_quiet_flag)
600 cdns_pcie_detect_quiet_min_delay_set(&rc->pcie);
601
602 cdns_pcie_host_enable_ptm_response(pcie);
603
604 ret = cdns_pcie_start_link(pcie);
605 if (ret) {
606 dev_err(dev, "Failed to start link\n");
607 return ret;
608 }
609
610 ret = cdns_pcie_host_start_link(rc);
611 if (ret)
612 dev_dbg(dev, "PCIe link never came up\n");
613
614 return 0;
615 }
616 EXPORT_SYMBOL_GPL(cdns_pcie_host_link_setup);
617
cdns_pcie_host_disable(struct cdns_pcie_rc * rc)618 void cdns_pcie_host_disable(struct cdns_pcie_rc *rc)
619 {
620 struct pci_host_bridge *bridge;
621
622 bridge = pci_host_bridge_from_priv(rc);
623 pci_stop_root_bus(bridge->bus);
624 pci_remove_root_bus(bridge->bus);
625
626 cdns_pcie_host_deinit(rc);
627 cdns_pcie_host_link_disable(rc);
628 }
629 EXPORT_SYMBOL_GPL(cdns_pcie_host_disable);
630
cdns_pcie_host_setup(struct cdns_pcie_rc * rc)631 int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
632 {
633 struct device *dev = rc->pcie.dev;
634 struct platform_device *pdev = to_platform_device(dev);
635 struct device_node *np = dev->of_node;
636 struct pci_host_bridge *bridge;
637 enum cdns_pcie_rp_bar bar;
638 struct cdns_pcie *pcie;
639 struct resource *res;
640 int ret;
641
642 bridge = pci_host_bridge_from_priv(rc);
643 if (!bridge)
644 return -ENOMEM;
645
646 pcie = &rc->pcie;
647 pcie->is_rc = true;
648
649 rc->vendor_id = 0xffff;
650 of_property_read_u32(np, "vendor-id", &rc->vendor_id);
651
652 rc->device_id = 0xffff;
653 of_property_read_u32(np, "device-id", &rc->device_id);
654
655 pcie->reg_base = devm_platform_ioremap_resource_byname(pdev, "reg");
656 if (IS_ERR(pcie->reg_base)) {
657 dev_err(dev, "missing \"reg\"\n");
658 return PTR_ERR(pcie->reg_base);
659 }
660
661 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
662 rc->cfg_base = devm_pci_remap_cfg_resource(dev, res);
663 if (IS_ERR(rc->cfg_base))
664 return PTR_ERR(rc->cfg_base);
665 rc->cfg_res = res;
666
667 ret = cdns_pcie_host_link_setup(rc);
668 if (ret)
669 return ret;
670
671 for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++)
672 rc->avail_ib_bar[bar] = true;
673
674 ret = cdns_pcie_host_init(rc);
675 if (ret)
676 return ret;
677
678 if (!bridge->ops)
679 bridge->ops = &cdns_pcie_host_ops;
680
681 return pci_host_probe(bridge);
682 }
683 EXPORT_SYMBOL_GPL(cdns_pcie_host_setup);
684
685 MODULE_LICENSE("GPL");
686 MODULE_DESCRIPTION("Cadence PCIe host controller driver");
687 MODULE_AUTHOR("Cyrille Pitchen <cyrille.pitchen@free-electrons.com>");
688