1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * PCIe host controller driver for Xilinx Versal CPM DMA Bridge 4 * 5 * (C) Copyright 2019 - 2020, Xilinx, Inc. 6 */ 7 8 #include <linux/bitfield.h> 9 #include <linux/interrupt.h> 10 #include <linux/irq.h> 11 #include <linux/irqchip.h> 12 #include <linux/irqchip/chained_irq.h> 13 #include <linux/irqdomain.h> 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/of_address.h> 17 #include <linux/of_pci.h> 18 #include <linux/of_platform.h> 19 20 #include "../pci.h" 21 #include "pcie-xilinx-common.h" 22 23 /* Register definitions */ 24 #define XILINX_CPM_PCIE_REG_IDR 0x00000E10 25 #define XILINX_CPM_PCIE_REG_IMR 0x00000E14 26 #define XILINX_CPM_PCIE_REG_PSCR 0x00000E1C 27 #define XILINX_CPM_PCIE_REG_RPSC 0x00000E20 28 #define XILINX_CPM_PCIE_REG_RPEFR 0x00000E2C 29 #define XILINX_CPM_PCIE_REG_IDRN 0x00000E38 30 #define XILINX_CPM_PCIE_REG_IDRN_MASK 0x00000E3C 31 #define XILINX_CPM_PCIE_MISC_IR_STATUS 0x00000340 32 #define XILINX_CPM_PCIE_MISC_IR_ENABLE 0x00000348 33 #define XILINX_CPM_PCIE0_MISC_IR_LOCAL BIT(1) 34 #define XILINX_CPM_PCIE1_MISC_IR_LOCAL BIT(2) 35 36 #define XILINX_CPM_PCIE0_IR_STATUS 0x000002A0 37 #define XILINX_CPM_PCIE1_IR_STATUS 0x000002B4 38 #define XILINX_CPM_PCIE0_IR_ENABLE 0x000002A8 39 #define XILINX_CPM_PCIE1_IR_ENABLE 0x000002BC 40 #define XILINX_CPM_PCIE_IR_LOCAL BIT(0) 41 42 #define IMR(x) BIT(XILINX_PCIE_INTR_ ##x) 43 44 #define XILINX_CPM_PCIE_IMR_ALL_MASK \ 45 ( \ 46 IMR(LINK_DOWN) | \ 47 IMR(HOT_RESET) | \ 48 IMR(CFG_PCIE_TIMEOUT) | \ 49 IMR(CFG_TIMEOUT) | \ 50 IMR(CORRECTABLE) | \ 51 IMR(NONFATAL) | \ 52 IMR(FATAL) | \ 53 IMR(CFG_ERR_POISON) | \ 54 IMR(PME_TO_ACK_RCVD) | \ 55 IMR(INTX) | \ 56 IMR(PM_PME_RCVD) | \ 57 IMR(SLV_UNSUPP) | \ 58 IMR(SLV_UNEXP) | \ 59 IMR(SLV_COMPL) | \ 60 IMR(SLV_ERRP) | \ 61 IMR(SLV_CMPABT) | \ 62 IMR(SLV_ILLBUR) | \ 63 IMR(MST_DECERR) | \ 64 IMR(MST_SLVERR) | \ 65 IMR(SLV_PCIE_TIMEOUT) \ 66 ) 67 68 #define XILINX_CPM_PCIE_IDR_ALL_MASK 0xFFFFFFFF 69 #define XILINX_CPM_PCIE_IDRN_MASK GENMASK(19, 16) 70 #define XILINX_CPM_PCIE_IDRN_SHIFT 16 71 72 /* Root Port Error FIFO Read Register definitions */ 73 #define XILINX_CPM_PCIE_RPEFR_ERR_VALID BIT(18) 74 #define XILINX_CPM_PCIE_RPEFR_REQ_ID GENMASK(15, 0) 75 #define XILINX_CPM_PCIE_RPEFR_ALL_MASK 0xFFFFFFFF 76 77 /* Root Port Status/control Register definitions */ 78 #define XILINX_CPM_PCIE_REG_RPSC_BEN BIT(0) 79 80 /* Phy Status/Control Register definitions */ 81 #define XILINX_CPM_PCIE_REG_PSCR_LNKUP BIT(11) 82 83 enum xilinx_cpm_version { 84 CPM, 85 CPM5, 86 CPM5_HOST1, 87 CPM5NC_HOST, 88 }; 89 90 /** 91 * struct xilinx_cpm_variant - CPM variant information 92 * @version: CPM version 93 * @ir_status: Offset for the error interrupt status register 94 * @ir_enable: Offset for the CPM5 local error interrupt enable register 95 * @ir_misc_value: A bitmask for the miscellaneous interrupt status 96 */ 97 struct xilinx_cpm_variant { 98 enum xilinx_cpm_version version; 99 u32 ir_status; 100 u32 ir_enable; 101 u32 ir_misc_value; 102 }; 103 104 /** 105 * struct xilinx_cpm_pcie - PCIe port information 106 * @dev: Device pointer 107 * @reg_base: Bridge Register Base 108 * @cpm_base: CPM System Level Control and Status Register(SLCR) Base 109 * @intx_domain: Legacy IRQ domain pointer 110 * @cpm_domain: CPM IRQ domain pointer 111 * @cfg: Holds mappings of config space window 112 * @intx_irq: legacy interrupt number 113 * @irq: Error interrupt number 114 * @lock: lock protecting shared register access 115 * @variant: CPM version check pointer 116 */ 117 struct xilinx_cpm_pcie { 118 struct device *dev; 119 void __iomem *reg_base; 120 void __iomem *cpm_base; 121 struct irq_domain *intx_domain; 122 struct irq_domain *cpm_domain; 123 struct pci_config_window *cfg; 124 int intx_irq; 125 int irq; 126 raw_spinlock_t lock; 127 const struct xilinx_cpm_variant *variant; 128 }; 129 130 static u32 pcie_read(struct xilinx_cpm_pcie *port, u32 reg) 131 { 132 return readl_relaxed(port->reg_base + reg); 133 } 134 135 static void pcie_write(struct xilinx_cpm_pcie *port, 136 u32 val, u32 reg) 137 { 138 writel_relaxed(val, port->reg_base + reg); 139 } 140 141 static bool cpm_pcie_link_up(struct xilinx_cpm_pcie *port) 142 { 143 return (pcie_read(port, XILINX_CPM_PCIE_REG_PSCR) & 144 XILINX_CPM_PCIE_REG_PSCR_LNKUP); 145 } 146 147 static void cpm_pcie_clear_err_interrupts(struct xilinx_cpm_pcie *port) 148 { 149 unsigned long val = pcie_read(port, XILINX_CPM_PCIE_REG_RPEFR); 150 151 if (val & XILINX_CPM_PCIE_RPEFR_ERR_VALID) { 152 dev_dbg(port->dev, "Requester ID %lu\n", 153 val & XILINX_CPM_PCIE_RPEFR_REQ_ID); 154 pcie_write(port, XILINX_CPM_PCIE_RPEFR_ALL_MASK, 155 XILINX_CPM_PCIE_REG_RPEFR); 156 } 157 } 158 159 static void xilinx_cpm_mask_leg_irq(struct irq_data *data) 160 { 161 struct xilinx_cpm_pcie *port = irq_data_get_irq_chip_data(data); 162 unsigned long flags; 163 u32 mask; 164 u32 val; 165 166 mask = BIT(data->hwirq + XILINX_CPM_PCIE_IDRN_SHIFT); 167 raw_spin_lock_irqsave(&port->lock, flags); 168 val = pcie_read(port, XILINX_CPM_PCIE_REG_IDRN_MASK); 169 pcie_write(port, (val & (~mask)), XILINX_CPM_PCIE_REG_IDRN_MASK); 170 raw_spin_unlock_irqrestore(&port->lock, flags); 171 } 172 173 static void xilinx_cpm_unmask_leg_irq(struct irq_data *data) 174 { 175 struct xilinx_cpm_pcie *port = irq_data_get_irq_chip_data(data); 176 unsigned long flags; 177 u32 mask; 178 u32 val; 179 180 mask = BIT(data->hwirq + XILINX_CPM_PCIE_IDRN_SHIFT); 181 raw_spin_lock_irqsave(&port->lock, flags); 182 val = pcie_read(port, XILINX_CPM_PCIE_REG_IDRN_MASK); 183 pcie_write(port, (val | mask), XILINX_CPM_PCIE_REG_IDRN_MASK); 184 raw_spin_unlock_irqrestore(&port->lock, flags); 185 } 186 187 static struct irq_chip xilinx_cpm_leg_irq_chip = { 188 .name = "INTx", 189 .irq_mask = xilinx_cpm_mask_leg_irq, 190 .irq_unmask = xilinx_cpm_unmask_leg_irq, 191 }; 192 193 /** 194 * xilinx_cpm_pcie_intx_map - Set the handler for the INTx and mark IRQ as valid 195 * @domain: IRQ domain 196 * @irq: Virtual IRQ number 197 * @hwirq: HW interrupt number 198 * 199 * Return: Always returns 0. 200 */ 201 static int xilinx_cpm_pcie_intx_map(struct irq_domain *domain, 202 unsigned int irq, irq_hw_number_t hwirq) 203 { 204 irq_set_chip_and_handler(irq, &xilinx_cpm_leg_irq_chip, 205 handle_level_irq); 206 irq_set_chip_data(irq, domain->host_data); 207 irq_set_status_flags(irq, IRQ_LEVEL); 208 209 return 0; 210 } 211 212 /* INTx IRQ Domain operations */ 213 static const struct irq_domain_ops intx_domain_ops = { 214 .map = xilinx_cpm_pcie_intx_map, 215 }; 216 217 static void xilinx_cpm_pcie_intx_flow(struct irq_desc *desc) 218 { 219 struct xilinx_cpm_pcie *port = irq_desc_get_handler_data(desc); 220 struct irq_chip *chip = irq_desc_get_chip(desc); 221 unsigned long val; 222 int i; 223 224 chained_irq_enter(chip, desc); 225 226 val = FIELD_GET(XILINX_CPM_PCIE_IDRN_MASK, 227 pcie_read(port, XILINX_CPM_PCIE_REG_IDRN)); 228 229 for_each_set_bit(i, &val, PCI_NUM_INTX) 230 generic_handle_domain_irq(port->intx_domain, i); 231 232 chained_irq_exit(chip, desc); 233 } 234 235 static void xilinx_cpm_mask_event_irq(struct irq_data *d) 236 { 237 struct xilinx_cpm_pcie *port = irq_data_get_irq_chip_data(d); 238 u32 val; 239 240 raw_spin_lock(&port->lock); 241 val = pcie_read(port, XILINX_CPM_PCIE_REG_IMR); 242 val &= ~BIT(d->hwirq); 243 pcie_write(port, val, XILINX_CPM_PCIE_REG_IMR); 244 raw_spin_unlock(&port->lock); 245 } 246 247 static void xilinx_cpm_unmask_event_irq(struct irq_data *d) 248 { 249 struct xilinx_cpm_pcie *port = irq_data_get_irq_chip_data(d); 250 u32 val; 251 252 raw_spin_lock(&port->lock); 253 val = pcie_read(port, XILINX_CPM_PCIE_REG_IMR); 254 val |= BIT(d->hwirq); 255 pcie_write(port, val, XILINX_CPM_PCIE_REG_IMR); 256 raw_spin_unlock(&port->lock); 257 } 258 259 static struct irq_chip xilinx_cpm_event_irq_chip = { 260 .name = "RC-Event", 261 .irq_mask = xilinx_cpm_mask_event_irq, 262 .irq_unmask = xilinx_cpm_unmask_event_irq, 263 }; 264 265 static int xilinx_cpm_pcie_event_map(struct irq_domain *domain, 266 unsigned int irq, irq_hw_number_t hwirq) 267 { 268 irq_set_chip_and_handler(irq, &xilinx_cpm_event_irq_chip, 269 handle_level_irq); 270 irq_set_chip_data(irq, domain->host_data); 271 irq_set_status_flags(irq, IRQ_LEVEL); 272 return 0; 273 } 274 275 static const struct irq_domain_ops event_domain_ops = { 276 .map = xilinx_cpm_pcie_event_map, 277 }; 278 279 static void xilinx_cpm_pcie_event_flow(struct irq_desc *desc) 280 { 281 struct xilinx_cpm_pcie *port = irq_desc_get_handler_data(desc); 282 struct irq_chip *chip = irq_desc_get_chip(desc); 283 const struct xilinx_cpm_variant *variant = port->variant; 284 unsigned long val; 285 int i; 286 287 chained_irq_enter(chip, desc); 288 val = pcie_read(port, XILINX_CPM_PCIE_REG_IDR); 289 val &= pcie_read(port, XILINX_CPM_PCIE_REG_IMR); 290 for_each_set_bit(i, &val, 32) 291 generic_handle_domain_irq(port->cpm_domain, i); 292 pcie_write(port, val, XILINX_CPM_PCIE_REG_IDR); 293 294 if (variant->ir_status) { 295 val = readl_relaxed(port->cpm_base + variant->ir_status); 296 if (val) 297 writel_relaxed(val, port->cpm_base + 298 variant->ir_status); 299 } 300 301 /* 302 * XILINX_CPM_PCIE_MISC_IR_STATUS register is mapped to 303 * CPM SLCR block. 304 */ 305 val = readl_relaxed(port->cpm_base + XILINX_CPM_PCIE_MISC_IR_STATUS); 306 if (val) 307 writel_relaxed(val, 308 port->cpm_base + XILINX_CPM_PCIE_MISC_IR_STATUS); 309 310 chained_irq_exit(chip, desc); 311 } 312 313 #define _IC(x, s) \ 314 [XILINX_PCIE_INTR_ ## x] = { __stringify(x), s } 315 316 static const struct { 317 const char *sym; 318 const char *str; 319 } intr_cause[32] = { 320 _IC(LINK_DOWN, "Link Down"), 321 _IC(HOT_RESET, "Hot reset"), 322 _IC(CFG_TIMEOUT, "ECAM access timeout"), 323 _IC(CORRECTABLE, "Correctable error message"), 324 _IC(NONFATAL, "Non fatal error message"), 325 _IC(FATAL, "Fatal error message"), 326 _IC(SLV_UNSUPP, "Slave unsupported request"), 327 _IC(SLV_UNEXP, "Slave unexpected completion"), 328 _IC(SLV_COMPL, "Slave completion timeout"), 329 _IC(SLV_ERRP, "Slave Error Poison"), 330 _IC(SLV_CMPABT, "Slave Completer Abort"), 331 _IC(SLV_ILLBUR, "Slave Illegal Burst"), 332 _IC(MST_DECERR, "Master decode error"), 333 _IC(MST_SLVERR, "Master slave error"), 334 _IC(CFG_PCIE_TIMEOUT, "PCIe ECAM access timeout"), 335 _IC(CFG_ERR_POISON, "ECAM poisoned completion received"), 336 _IC(PME_TO_ACK_RCVD, "PME_TO_ACK message received"), 337 _IC(PM_PME_RCVD, "PM_PME message received"), 338 _IC(SLV_PCIE_TIMEOUT, "PCIe completion timeout received"), 339 }; 340 341 static irqreturn_t xilinx_cpm_pcie_intr_handler(int irq, void *dev_id) 342 { 343 struct xilinx_cpm_pcie *port = dev_id; 344 struct device *dev = port->dev; 345 struct irq_data *d; 346 347 d = irq_domain_get_irq_data(port->cpm_domain, irq); 348 349 switch (d->hwirq) { 350 case XILINX_PCIE_INTR_CORRECTABLE: 351 case XILINX_PCIE_INTR_NONFATAL: 352 case XILINX_PCIE_INTR_FATAL: 353 cpm_pcie_clear_err_interrupts(port); 354 fallthrough; 355 356 default: 357 if (intr_cause[d->hwirq].str) 358 dev_warn(dev, "%s\n", intr_cause[d->hwirq].str); 359 else 360 dev_warn(dev, "Unknown IRQ %ld\n", d->hwirq); 361 } 362 363 return IRQ_HANDLED; 364 } 365 366 static void xilinx_cpm_free_irq_domains(struct xilinx_cpm_pcie *port) 367 { 368 if (port->intx_domain) { 369 irq_domain_remove(port->intx_domain); 370 port->intx_domain = NULL; 371 } 372 373 if (port->cpm_domain) { 374 irq_domain_remove(port->cpm_domain); 375 port->cpm_domain = NULL; 376 } 377 } 378 379 /** 380 * xilinx_cpm_pcie_init_irq_domain - Initialize IRQ domain 381 * @port: PCIe port information 382 * 383 * Return: '0' on success and error value on failure 384 */ 385 static int xilinx_cpm_pcie_init_irq_domain(struct xilinx_cpm_pcie *port) 386 { 387 struct device *dev = port->dev; 388 struct device_node *node = dev->of_node; 389 struct device_node *pcie_intc_node; 390 391 /* Setup INTx */ 392 pcie_intc_node = of_get_next_child(node, NULL); 393 if (!pcie_intc_node) { 394 dev_err(dev, "No PCIe Intc node found\n"); 395 return -EINVAL; 396 } 397 398 port->cpm_domain = irq_domain_add_linear(pcie_intc_node, 32, 399 &event_domain_ops, 400 port); 401 if (!port->cpm_domain) 402 goto out; 403 404 irq_domain_update_bus_token(port->cpm_domain, DOMAIN_BUS_NEXUS); 405 406 port->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, 407 &intx_domain_ops, 408 port); 409 if (!port->intx_domain) 410 goto out; 411 412 irq_domain_update_bus_token(port->intx_domain, DOMAIN_BUS_WIRED); 413 414 of_node_put(pcie_intc_node); 415 raw_spin_lock_init(&port->lock); 416 417 return 0; 418 out: 419 xilinx_cpm_free_irq_domains(port); 420 of_node_put(pcie_intc_node); 421 dev_err(dev, "Failed to allocate IRQ domains\n"); 422 423 return -ENOMEM; 424 } 425 426 static int xilinx_cpm_setup_irq(struct xilinx_cpm_pcie *port) 427 { 428 struct device *dev = port->dev; 429 struct platform_device *pdev = to_platform_device(dev); 430 int i, irq; 431 432 port->irq = platform_get_irq(pdev, 0); 433 if (port->irq < 0) 434 return port->irq; 435 436 for (i = 0; i < ARRAY_SIZE(intr_cause); i++) { 437 int err; 438 439 if (!intr_cause[i].str) 440 continue; 441 442 irq = irq_create_mapping(port->cpm_domain, i); 443 if (!irq) { 444 dev_err(dev, "Failed to map interrupt\n"); 445 return -ENXIO; 446 } 447 448 err = devm_request_irq(dev, irq, xilinx_cpm_pcie_intr_handler, 449 0, intr_cause[i].sym, port); 450 if (err) { 451 dev_err(dev, "Failed to request IRQ %d\n", irq); 452 return err; 453 } 454 } 455 456 port->intx_irq = irq_create_mapping(port->cpm_domain, 457 XILINX_PCIE_INTR_INTX); 458 if (!port->intx_irq) { 459 dev_err(dev, "Failed to map INTx interrupt\n"); 460 return -ENXIO; 461 } 462 463 /* Plug the INTx chained handler */ 464 irq_set_chained_handler_and_data(port->intx_irq, 465 xilinx_cpm_pcie_intx_flow, port); 466 467 /* Plug the main event chained handler */ 468 irq_set_chained_handler_and_data(port->irq, 469 xilinx_cpm_pcie_event_flow, port); 470 471 return 0; 472 } 473 474 /** 475 * xilinx_cpm_pcie_init_port - Initialize hardware 476 * @port: PCIe port information 477 */ 478 static void xilinx_cpm_pcie_init_port(struct xilinx_cpm_pcie *port) 479 { 480 const struct xilinx_cpm_variant *variant = port->variant; 481 482 if (variant->version == CPM5NC_HOST) 483 return; 484 485 if (cpm_pcie_link_up(port)) 486 dev_info(port->dev, "PCIe Link is UP\n"); 487 else 488 dev_info(port->dev, "PCIe Link is DOWN\n"); 489 490 /* Disable all interrupts */ 491 pcie_write(port, ~XILINX_CPM_PCIE_IDR_ALL_MASK, 492 XILINX_CPM_PCIE_REG_IMR); 493 494 /* Clear pending interrupts */ 495 pcie_write(port, pcie_read(port, XILINX_CPM_PCIE_REG_IDR) & 496 XILINX_CPM_PCIE_IMR_ALL_MASK, 497 XILINX_CPM_PCIE_REG_IDR); 498 499 /* 500 * XILINX_CPM_PCIE_MISC_IR_ENABLE register is mapped to 501 * CPM SLCR block. 502 */ 503 writel(variant->ir_misc_value, 504 port->cpm_base + XILINX_CPM_PCIE_MISC_IR_ENABLE); 505 506 if (variant->ir_enable) { 507 writel(XILINX_CPM_PCIE_IR_LOCAL, 508 port->cpm_base + variant->ir_enable); 509 } 510 511 /* Set Bridge enable bit */ 512 pcie_write(port, pcie_read(port, XILINX_CPM_PCIE_REG_RPSC) | 513 XILINX_CPM_PCIE_REG_RPSC_BEN, 514 XILINX_CPM_PCIE_REG_RPSC); 515 } 516 517 /** 518 * xilinx_cpm_pcie_parse_dt - Parse Device tree 519 * @port: PCIe port information 520 * @bus_range: Bus resource 521 * 522 * Return: '0' on success and error value on failure 523 */ 524 static int xilinx_cpm_pcie_parse_dt(struct xilinx_cpm_pcie *port, 525 struct resource *bus_range) 526 { 527 struct device *dev = port->dev; 528 struct platform_device *pdev = to_platform_device(dev); 529 struct resource *res; 530 531 port->cpm_base = devm_platform_ioremap_resource_byname(pdev, 532 "cpm_slcr"); 533 if (IS_ERR(port->cpm_base)) 534 return PTR_ERR(port->cpm_base); 535 536 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg"); 537 if (!res) 538 return -ENXIO; 539 540 port->cfg = pci_ecam_create(dev, res, bus_range, 541 &pci_generic_ecam_ops); 542 if (IS_ERR(port->cfg)) 543 return PTR_ERR(port->cfg); 544 545 if (port->variant->version == CPM5 || 546 port->variant->version == CPM5_HOST1) { 547 port->reg_base = devm_platform_ioremap_resource_byname(pdev, 548 "cpm_csr"); 549 if (IS_ERR(port->reg_base)) 550 return PTR_ERR(port->reg_base); 551 } else { 552 port->reg_base = port->cfg->win; 553 } 554 555 return 0; 556 } 557 558 static void xilinx_cpm_free_interrupts(struct xilinx_cpm_pcie *port) 559 { 560 irq_set_chained_handler_and_data(port->intx_irq, NULL, NULL); 561 irq_set_chained_handler_and_data(port->irq, NULL, NULL); 562 } 563 564 /** 565 * xilinx_cpm_pcie_probe - Probe function 566 * @pdev: Platform device pointer 567 * 568 * Return: '0' on success and error value on failure 569 */ 570 static int xilinx_cpm_pcie_probe(struct platform_device *pdev) 571 { 572 struct xilinx_cpm_pcie *port; 573 struct device *dev = &pdev->dev; 574 struct pci_host_bridge *bridge; 575 struct resource_entry *bus; 576 int err; 577 578 bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port)); 579 if (!bridge) 580 return -ENODEV; 581 582 port = pci_host_bridge_priv(bridge); 583 584 port->dev = dev; 585 586 port->variant = of_device_get_match_data(dev); 587 588 if (port->variant->version != CPM5NC_HOST) { 589 err = xilinx_cpm_pcie_init_irq_domain(port); 590 if (err) 591 return err; 592 } 593 594 bus = resource_list_first_type(&bridge->windows, IORESOURCE_BUS); 595 if (!bus) { 596 err = -ENODEV; 597 goto err_free_irq_domains; 598 } 599 600 err = xilinx_cpm_pcie_parse_dt(port, bus->res); 601 if (err) { 602 dev_err(dev, "Parsing DT failed\n"); 603 goto err_free_irq_domains; 604 } 605 606 xilinx_cpm_pcie_init_port(port); 607 608 if (port->variant->version != CPM5NC_HOST) { 609 err = xilinx_cpm_setup_irq(port); 610 if (err) { 611 dev_err(dev, "Failed to set up interrupts\n"); 612 goto err_setup_irq; 613 } 614 } 615 616 bridge->sysdata = port->cfg; 617 bridge->ops = (struct pci_ops *)&pci_generic_ecam_ops.pci_ops; 618 619 err = pci_host_probe(bridge); 620 if (err < 0) 621 goto err_host_bridge; 622 623 return 0; 624 625 err_host_bridge: 626 if (port->variant->version != CPM5NC_HOST) 627 xilinx_cpm_free_interrupts(port); 628 err_setup_irq: 629 pci_ecam_free(port->cfg); 630 err_free_irq_domains: 631 if (port->variant->version != CPM5NC_HOST) 632 xilinx_cpm_free_irq_domains(port); 633 return err; 634 } 635 636 static const struct xilinx_cpm_variant cpm_host = { 637 .version = CPM, 638 .ir_misc_value = XILINX_CPM_PCIE0_MISC_IR_LOCAL, 639 }; 640 641 static const struct xilinx_cpm_variant cpm5_host = { 642 .version = CPM5, 643 .ir_misc_value = XILINX_CPM_PCIE0_MISC_IR_LOCAL, 644 .ir_status = XILINX_CPM_PCIE0_IR_STATUS, 645 .ir_enable = XILINX_CPM_PCIE0_IR_ENABLE, 646 }; 647 648 static const struct xilinx_cpm_variant cpm5_host1 = { 649 .version = CPM5_HOST1, 650 .ir_misc_value = XILINX_CPM_PCIE1_MISC_IR_LOCAL, 651 .ir_status = XILINX_CPM_PCIE1_IR_STATUS, 652 .ir_enable = XILINX_CPM_PCIE1_IR_ENABLE, 653 }; 654 655 static const struct xilinx_cpm_variant cpm5n_host = { 656 .version = CPM5NC_HOST, 657 }; 658 659 static const struct of_device_id xilinx_cpm_pcie_of_match[] = { 660 { 661 .compatible = "xlnx,versal-cpm-host-1.00", 662 .data = &cpm_host, 663 }, 664 { 665 .compatible = "xlnx,versal-cpm5-host", 666 .data = &cpm5_host, 667 }, 668 { 669 .compatible = "xlnx,versal-cpm5-host1", 670 .data = &cpm5_host1, 671 }, 672 { 673 .compatible = "xlnx,versal-cpm5nc-host", 674 .data = &cpm5n_host, 675 }, 676 {} 677 }; 678 679 static struct platform_driver xilinx_cpm_pcie_driver = { 680 .driver = { 681 .name = "xilinx-cpm-pcie", 682 .of_match_table = xilinx_cpm_pcie_of_match, 683 .suppress_bind_attrs = true, 684 }, 685 .probe = xilinx_cpm_pcie_probe, 686 }; 687 688 builtin_platform_driver(xilinx_cpm_pcie_driver); 689