Lines Matching +full:no +full:- +full:bar +full:- +full:match +full:- +full:nbits
1 // SPDX-License-Identifier: GPL-2.0
4 // Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
13 #include "pcie-cadence.h"
33 struct cdns_pcie *pcie = &rc->pcie; in cdns_pci_map_bus()
34 unsigned int busn = bus->number; in cdns_pci_map_bus()
46 return pcie->reg_base + (where & 0xfff); in cdns_pci_map_bus()
51 /* Clear AXI link-down status */ in cdns_pci_map_bus()
67 if (busn == bridge->busnr + 1) in cdns_pci_map_bus()
73 return rc->cfg_base + (where & 0xfff); in cdns_pci_map_bus()
100 return -ETIMEDOUT; in cdns_pcie_host_training_complete()
105 struct device *dev = pcie->dev; in cdns_pcie_host_wait_for_link()
117 return -ETIMEDOUT; in cdns_pcie_host_wait_for_link()
163 struct cdns_pcie *pcie = &rc->pcie; in cdns_pcie_host_start_link()
172 if (!ret && rc->quirk_retrain_flag) in cdns_pcie_host_start_link()
180 struct cdns_pcie *pcie = &rc->pcie; in cdns_pcie_host_init_root_port()
185 * Set the root complex BAR configuration register: in cdns_pcie_host_init_root_port()
186 * - disable both BAR0 and BAR1. in cdns_pcie_host_init_root_port()
187 * - enable Prefetchable Memory Base and Limit registers in type 1 in cdns_pcie_host_init_root_port()
189 * - enable IO Base and Limit registers in type 1 config in cdns_pcie_host_init_root_port()
202 if (rc->vendor_id != 0xffff) { in cdns_pcie_host_init_root_port()
203 id = CDNS_PCIE_LM_ID_VENDOR(rc->vendor_id) | in cdns_pcie_host_init_root_port()
204 CDNS_PCIE_LM_ID_SUBSYS(rc->vendor_id); in cdns_pcie_host_init_root_port()
208 if (rc->device_id != 0xffff) in cdns_pcie_host_init_root_port()
209 cdns_pcie_rp_writew(pcie, PCI_DEVICE_ID, rc->device_id); in cdns_pcie_host_init_root_port()
219 enum cdns_pcie_rp_bar bar, in cdns_pcie_host_bar_ib_config() argument
223 struct cdns_pcie *pcie = &rc->pcie; in cdns_pcie_host_bar_ib_config()
226 if (!rc->avail_ib_bar[bar]) in cdns_pcie_host_bar_ib_config()
227 return -EBUSY; in cdns_pcie_host_bar_ib_config()
229 rc->avail_ib_bar[bar] = false; in cdns_pcie_host_bar_ib_config()
235 cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR0(bar), addr0); in cdns_pcie_host_bar_ib_config()
236 cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR1(bar), addr1); in cdns_pcie_host_bar_ib_config()
238 if (bar == RP_NO_BAR) in cdns_pcie_host_bar_ib_config()
242 value &= ~(LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar) | in cdns_pcie_host_bar_ib_config()
243 LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar) | in cdns_pcie_host_bar_ib_config()
244 LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar) | in cdns_pcie_host_bar_ib_config()
245 LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar) | in cdns_pcie_host_bar_ib_config()
246 LM_RC_BAR_CFG_APERTURE(bar, bar_aperture_mask[bar] + 2)); in cdns_pcie_host_bar_ib_config()
249 value |= LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar); in cdns_pcie_host_bar_ib_config()
250 value |= LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar); in cdns_pcie_host_bar_ib_config()
253 value |= LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar); in cdns_pcie_host_bar_ib_config()
254 value |= LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar); in cdns_pcie_host_bar_ib_config()
257 value |= LM_RC_BAR_CFG_APERTURE(bar, aperture); in cdns_pcie_host_bar_ib_config()
266 enum cdns_pcie_rp_bar bar, sel_bar; in cdns_pcie_host_find_min_bar() local
269 for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++) { in cdns_pcie_host_find_min_bar()
270 if (!rc->avail_ib_bar[bar]) in cdns_pcie_host_find_min_bar()
273 if (size <= bar_max_size[bar]) { in cdns_pcie_host_find_min_bar()
275 sel_bar = bar; in cdns_pcie_host_find_min_bar()
279 if (bar_max_size[bar] < bar_max_size[sel_bar]) in cdns_pcie_host_find_min_bar()
280 sel_bar = bar; in cdns_pcie_host_find_min_bar()
290 enum cdns_pcie_rp_bar bar, sel_bar; in cdns_pcie_host_find_max_bar() local
293 for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++) { in cdns_pcie_host_find_max_bar()
294 if (!rc->avail_ib_bar[bar]) in cdns_pcie_host_find_max_bar()
297 if (size >= bar_max_size[bar]) { in cdns_pcie_host_find_max_bar()
299 sel_bar = bar; in cdns_pcie_host_find_max_bar()
303 if (bar_max_size[bar] > bar_max_size[sel_bar]) in cdns_pcie_host_find_max_bar()
304 sel_bar = bar; in cdns_pcie_host_find_max_bar()
315 struct cdns_pcie *pcie = &rc->pcie; in cdns_pcie_host_bar_config()
316 struct device *dev = pcie->dev; in cdns_pcie_host_bar_config()
317 enum cdns_pcie_rp_bar bar; in cdns_pcie_host_bar_config() local
321 cpu_addr = entry->res->start; in cdns_pcie_host_bar_config()
322 pci_addr = entry->res->start - entry->offset; in cdns_pcie_host_bar_config()
323 flags = entry->res->flags; in cdns_pcie_host_bar_config()
324 size = resource_size(entry->res); in cdns_pcie_host_bar_config()
326 if (entry->offset) { in cdns_pcie_host_bar_config()
329 return -EINVAL; in cdns_pcie_host_bar_config()
334 * Try to find a minimum BAR whose size is greater than in cdns_pcie_host_bar_config()
338 * If a minimum BAR is found, IB ATU will be configured and in cdns_pcie_host_bar_config()
341 bar = cdns_pcie_host_find_min_bar(rc, size); in cdns_pcie_host_bar_config()
342 if (bar != RP_BAR_UNDEFINED) { in cdns_pcie_host_bar_config()
343 ret = cdns_pcie_host_bar_ib_config(rc, bar, cpu_addr, in cdns_pcie_host_bar_config()
346 dev_err(dev, "IB BAR: %d config failed\n", bar); in cdns_pcie_host_bar_config()
352 * resource_entry size cannot be fitted in a single BAR. So we in cdns_pcie_host_bar_config()
353 * find a maximum BAR whose size is less than or equal to the in cdns_pcie_host_bar_config()
356 * BAR. The remaining size would be fitted during the next in cdns_pcie_host_bar_config()
358 * If a maximum BAR is not found, there is no way we can fit in cdns_pcie_host_bar_config()
361 bar = cdns_pcie_host_find_max_bar(rc, size); in cdns_pcie_host_bar_config()
362 if (bar == RP_BAR_UNDEFINED) { in cdns_pcie_host_bar_config()
363 dev_err(dev, "No free BAR to map cpu_addr %llx\n", in cdns_pcie_host_bar_config()
365 return -EINVAL; in cdns_pcie_host_bar_config()
368 winsize = bar_max_size[bar]; in cdns_pcie_host_bar_config()
369 ret = cdns_pcie_host_bar_ib_config(rc, bar, cpu_addr, winsize, in cdns_pcie_host_bar_config()
372 dev_err(dev, "IB BAR: %d config failed\n", bar); in cdns_pcie_host_bar_config()
376 size -= winsize; in cdns_pcie_host_bar_config()
391 return resource_size(entry2->res) - resource_size(entry1->res); in cdns_pcie_host_dma_ranges_cmp()
396 struct cdns_pcie *pcie = &rc->pcie; in cdns_pcie_host_map_dma_ranges()
397 struct device *dev = pcie->dev; in cdns_pcie_host_map_dma_ranges()
398 struct device_node *np = dev->of_node; in cdns_pcie_host_map_dma_ranges()
406 return -ENOMEM; in cdns_pcie_host_map_dma_ranges()
408 if (list_empty(&bridge->dma_ranges)) { in cdns_pcie_host_map_dma_ranges()
409 of_property_read_u32(np, "cdns,no-bar-match-nbits", in cdns_pcie_host_map_dma_ranges()
414 dev_err(dev, "IB BAR: %d config failed\n", RP_NO_BAR); in cdns_pcie_host_map_dma_ranges()
418 list_sort(NULL, &bridge->dma_ranges, cdns_pcie_host_dma_ranges_cmp); in cdns_pcie_host_map_dma_ranges()
420 resource_list_for_each_entry(entry, &bridge->dma_ranges) { in cdns_pcie_host_map_dma_ranges()
423 dev_err(dev, "Fail to configure IB using dma-ranges\n"); in cdns_pcie_host_map_dma_ranges()
433 struct cdns_pcie *pcie = &rc->pcie; in cdns_pcie_host_init_address_translation()
435 struct resource *cfg_res = rc->cfg_res; in cdns_pcie_host_init_address_translation()
437 u64 cpu_addr = cfg_res->start; in cdns_pcie_host_init_address_translation()
441 entry = resource_list_first_type(&bridge->windows, IORESOURCE_BUS); in cdns_pcie_host_init_address_translation()
443 busnr = entry->res->start; in cdns_pcie_host_init_address_translation()
455 if (pcie->ops->cpu_addr_fixup) in cdns_pcie_host_init_address_translation()
456 cpu_addr = pcie->ops->cpu_addr_fixup(pcie, cpu_addr); in cdns_pcie_host_init_address_translation()
465 resource_list_for_each_entry(entry, &bridge->windows) { in cdns_pcie_host_init_address_translation()
466 struct resource *res = entry->res; in cdns_pcie_host_init_address_translation()
467 u64 pci_addr = res->start - entry->offset; in cdns_pcie_host_init_address_translation()
472 pci_pio_to_address(res->start), in cdns_pcie_host_init_address_translation()
478 res->start, in cdns_pcie_host_init_address_translation()
501 struct cdns_pcie *pcie = &rc->pcie; in cdns_pcie_host_link_setup()
502 struct device *dev = rc->pcie.dev; in cdns_pcie_host_link_setup()
505 if (rc->quirk_detect_quiet_flag) in cdns_pcie_host_link_setup()
506 cdns_pcie_detect_quiet_min_delay_set(&rc->pcie); in cdns_pcie_host_link_setup()
525 struct device *dev = rc->pcie.dev; in cdns_pcie_host_setup()
527 struct device_node *np = dev->of_node; in cdns_pcie_host_setup()
529 enum cdns_pcie_rp_bar bar; in cdns_pcie_host_setup() local
536 return -ENOMEM; in cdns_pcie_host_setup()
538 pcie = &rc->pcie; in cdns_pcie_host_setup()
539 pcie->is_rc = true; in cdns_pcie_host_setup()
541 rc->vendor_id = 0xffff; in cdns_pcie_host_setup()
542 of_property_read_u32(np, "vendor-id", &rc->vendor_id); in cdns_pcie_host_setup()
544 rc->device_id = 0xffff; in cdns_pcie_host_setup()
545 of_property_read_u32(np, "device-id", &rc->device_id); in cdns_pcie_host_setup()
547 pcie->reg_base = devm_platform_ioremap_resource_byname(pdev, "reg"); in cdns_pcie_host_setup()
548 if (IS_ERR(pcie->reg_base)) { in cdns_pcie_host_setup()
550 return PTR_ERR(pcie->reg_base); in cdns_pcie_host_setup()
554 rc->cfg_base = devm_pci_remap_cfg_resource(dev, res); in cdns_pcie_host_setup()
555 if (IS_ERR(rc->cfg_base)) in cdns_pcie_host_setup()
556 return PTR_ERR(rc->cfg_base); in cdns_pcie_host_setup()
557 rc->cfg_res = res; in cdns_pcie_host_setup()
563 for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++) in cdns_pcie_host_setup()
564 rc->avail_ib_bar[bar] = true; in cdns_pcie_host_setup()
570 if (!bridge->ops) in cdns_pcie_host_setup()
571 bridge->ops = &cdns_pcie_host_ops; in cdns_pcie_host_setup()