1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * PCIe host controller driver for Xilinx Versal CPM DMA Bridge 4 * 5 * (C) Copyright 2019 - 2020, Xilinx, Inc. 6 */ 7 8 #include <linux/bitfield.h> 9 #include <linux/interrupt.h> 10 #include <linux/irq.h> 11 #include <linux/irqchip.h> 12 #include <linux/irqchip/chained_irq.h> 13 #include <linux/irqdomain.h> 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/of_address.h> 17 #include <linux/of_pci.h> 18 #include <linux/of_platform.h> 19 20 #include "../pci.h" 21 #include "pcie-xilinx-common.h" 22 23 /* Register definitions */ 24 #define XILINX_CPM_PCIE_REG_IDR 0x00000E10 25 #define XILINX_CPM_PCIE_REG_IMR 0x00000E14 26 #define XILINX_CPM_PCIE_REG_PSCR 0x00000E1C 27 #define XILINX_CPM_PCIE_REG_RPSC 0x00000E20 28 #define XILINX_CPM_PCIE_REG_RPEFR 0x00000E2C 29 #define XILINX_CPM_PCIE_REG_IDRN 0x00000E38 30 #define XILINX_CPM_PCIE_REG_IDRN_MASK 0x00000E3C 31 #define XILINX_CPM_PCIE_MISC_IR_STATUS 0x00000340 32 #define XILINX_CPM_PCIE_MISC_IR_ENABLE 0x00000348 33 #define XILINX_CPM_PCIE0_MISC_IR_LOCAL BIT(1) 34 #define XILINX_CPM_PCIE1_MISC_IR_LOCAL BIT(2) 35 36 #define XILINX_CPM_PCIE0_IR_STATUS 0x000002A0 37 #define XILINX_CPM_PCIE1_IR_STATUS 0x000002B4 38 #define XILINX_CPM_PCIE0_IR_ENABLE 0x000002A8 39 #define XILINX_CPM_PCIE1_IR_ENABLE 0x000002BC 40 #define XILINX_CPM_PCIE_IR_LOCAL BIT(0) 41 42 #define IMR(x) BIT(XILINX_PCIE_INTR_ ##x) 43 44 #define XILINX_CPM_PCIE_IMR_ALL_MASK \ 45 ( \ 46 IMR(LINK_DOWN) | \ 47 IMR(HOT_RESET) | \ 48 IMR(CFG_PCIE_TIMEOUT) | \ 49 IMR(CFG_TIMEOUT) | \ 50 IMR(CORRECTABLE) | \ 51 IMR(NONFATAL) | \ 52 IMR(FATAL) | \ 53 IMR(CFG_ERR_POISON) | \ 54 IMR(PME_TO_ACK_RCVD) | \ 55 IMR(INTX) | \ 56 IMR(PM_PME_RCVD) | \ 57 IMR(SLV_UNSUPP) | \ 58 IMR(SLV_UNEXP) | \ 59 IMR(SLV_COMPL) | \ 60 IMR(SLV_ERRP) | \ 61 IMR(SLV_CMPABT) | \ 62 IMR(SLV_ILLBUR) | \ 63 IMR(MST_DECERR) | \ 64 IMR(MST_SLVERR) | \ 65 IMR(SLV_PCIE_TIMEOUT) \ 66 ) 67 68 #define XILINX_CPM_PCIE_IDR_ALL_MASK 0xFFFFFFFF 69 #define XILINX_CPM_PCIE_IDRN_MASK GENMASK(19, 16) 70 #define XILINX_CPM_PCIE_IDRN_SHIFT 16 71 72 /* Root Port Error FIFO Read Register definitions */ 73 #define XILINX_CPM_PCIE_RPEFR_ERR_VALID BIT(18) 74 #define XILINX_CPM_PCIE_RPEFR_REQ_ID GENMASK(15, 0) 75 #define XILINX_CPM_PCIE_RPEFR_ALL_MASK 0xFFFFFFFF 76 77 /* Root Port Status/control Register definitions */ 78 #define XILINX_CPM_PCIE_REG_RPSC_BEN BIT(0) 79 80 /* Phy Status/Control Register definitions */ 81 #define XILINX_CPM_PCIE_REG_PSCR_LNKUP BIT(11) 82 83 enum xilinx_cpm_version { 84 CPM, 85 CPM5, 86 CPM5_HOST1, 87 }; 88 89 /** 90 * struct xilinx_cpm_variant - CPM variant information 91 * @version: CPM version 92 * @ir_status: Offset for the error interrupt status register 93 * @ir_enable: Offset for the CPM5 local error interrupt enable register 94 * @ir_misc_value: A bitmask for the miscellaneous interrupt status 95 */ 96 struct xilinx_cpm_variant { 97 enum xilinx_cpm_version version; 98 u32 ir_status; 99 u32 ir_enable; 100 u32 ir_misc_value; 101 }; 102 103 /** 104 * struct xilinx_cpm_pcie - PCIe port information 105 * @dev: Device pointer 106 * @reg_base: Bridge Register Base 107 * @cpm_base: CPM System Level Control and Status Register(SLCR) Base 108 * @intx_domain: Legacy IRQ domain pointer 109 * @cpm_domain: CPM IRQ domain pointer 110 * @cfg: Holds mappings of config space window 111 * @intx_irq: legacy interrupt number 112 * @irq: Error interrupt number 113 * @lock: lock protecting shared register access 114 * @variant: CPM version check pointer 115 */ 116 struct xilinx_cpm_pcie { 117 struct device *dev; 118 void __iomem *reg_base; 119 void __iomem *cpm_base; 120 struct irq_domain *intx_domain; 121 struct irq_domain *cpm_domain; 122 struct pci_config_window *cfg; 123 int intx_irq; 124 int irq; 125 raw_spinlock_t lock; 126 const struct xilinx_cpm_variant *variant; 127 }; 128 129 static u32 pcie_read(struct xilinx_cpm_pcie *port, u32 reg) 130 { 131 return readl_relaxed(port->reg_base + reg); 132 } 133 134 static void pcie_write(struct xilinx_cpm_pcie *port, 135 u32 val, u32 reg) 136 { 137 writel_relaxed(val, port->reg_base + reg); 138 } 139 140 static bool cpm_pcie_link_up(struct xilinx_cpm_pcie *port) 141 { 142 return (pcie_read(port, XILINX_CPM_PCIE_REG_PSCR) & 143 XILINX_CPM_PCIE_REG_PSCR_LNKUP); 144 } 145 146 static void cpm_pcie_clear_err_interrupts(struct xilinx_cpm_pcie *port) 147 { 148 unsigned long val = pcie_read(port, XILINX_CPM_PCIE_REG_RPEFR); 149 150 if (val & XILINX_CPM_PCIE_RPEFR_ERR_VALID) { 151 dev_dbg(port->dev, "Requester ID %lu\n", 152 val & XILINX_CPM_PCIE_RPEFR_REQ_ID); 153 pcie_write(port, XILINX_CPM_PCIE_RPEFR_ALL_MASK, 154 XILINX_CPM_PCIE_REG_RPEFR); 155 } 156 } 157 158 static void xilinx_cpm_mask_leg_irq(struct irq_data *data) 159 { 160 struct xilinx_cpm_pcie *port = irq_data_get_irq_chip_data(data); 161 unsigned long flags; 162 u32 mask; 163 u32 val; 164 165 mask = BIT(data->hwirq + XILINX_CPM_PCIE_IDRN_SHIFT); 166 raw_spin_lock_irqsave(&port->lock, flags); 167 val = pcie_read(port, XILINX_CPM_PCIE_REG_IDRN_MASK); 168 pcie_write(port, (val & (~mask)), XILINX_CPM_PCIE_REG_IDRN_MASK); 169 raw_spin_unlock_irqrestore(&port->lock, flags); 170 } 171 172 static void xilinx_cpm_unmask_leg_irq(struct irq_data *data) 173 { 174 struct xilinx_cpm_pcie *port = irq_data_get_irq_chip_data(data); 175 unsigned long flags; 176 u32 mask; 177 u32 val; 178 179 mask = BIT(data->hwirq + XILINX_CPM_PCIE_IDRN_SHIFT); 180 raw_spin_lock_irqsave(&port->lock, flags); 181 val = pcie_read(port, XILINX_CPM_PCIE_REG_IDRN_MASK); 182 pcie_write(port, (val | mask), XILINX_CPM_PCIE_REG_IDRN_MASK); 183 raw_spin_unlock_irqrestore(&port->lock, flags); 184 } 185 186 static struct irq_chip xilinx_cpm_leg_irq_chip = { 187 .name = "INTx", 188 .irq_mask = xilinx_cpm_mask_leg_irq, 189 .irq_unmask = xilinx_cpm_unmask_leg_irq, 190 }; 191 192 /** 193 * xilinx_cpm_pcie_intx_map - Set the handler for the INTx and mark IRQ as valid 194 * @domain: IRQ domain 195 * @irq: Virtual IRQ number 196 * @hwirq: HW interrupt number 197 * 198 * Return: Always returns 0. 199 */ 200 static int xilinx_cpm_pcie_intx_map(struct irq_domain *domain, 201 unsigned int irq, irq_hw_number_t hwirq) 202 { 203 irq_set_chip_and_handler(irq, &xilinx_cpm_leg_irq_chip, 204 handle_level_irq); 205 irq_set_chip_data(irq, domain->host_data); 206 irq_set_status_flags(irq, IRQ_LEVEL); 207 208 return 0; 209 } 210 211 /* INTx IRQ Domain operations */ 212 static const struct irq_domain_ops intx_domain_ops = { 213 .map = xilinx_cpm_pcie_intx_map, 214 }; 215 216 static void xilinx_cpm_pcie_intx_flow(struct irq_desc *desc) 217 { 218 struct xilinx_cpm_pcie *port = irq_desc_get_handler_data(desc); 219 struct irq_chip *chip = irq_desc_get_chip(desc); 220 unsigned long val; 221 int i; 222 223 chained_irq_enter(chip, desc); 224 225 val = FIELD_GET(XILINX_CPM_PCIE_IDRN_MASK, 226 pcie_read(port, XILINX_CPM_PCIE_REG_IDRN)); 227 228 for_each_set_bit(i, &val, PCI_NUM_INTX) 229 generic_handle_domain_irq(port->intx_domain, i); 230 231 chained_irq_exit(chip, desc); 232 } 233 234 static void xilinx_cpm_mask_event_irq(struct irq_data *d) 235 { 236 struct xilinx_cpm_pcie *port = irq_data_get_irq_chip_data(d); 237 u32 val; 238 239 raw_spin_lock(&port->lock); 240 val = pcie_read(port, XILINX_CPM_PCIE_REG_IMR); 241 val &= ~BIT(d->hwirq); 242 pcie_write(port, val, XILINX_CPM_PCIE_REG_IMR); 243 raw_spin_unlock(&port->lock); 244 } 245 246 static void xilinx_cpm_unmask_event_irq(struct irq_data *d) 247 { 248 struct xilinx_cpm_pcie *port = irq_data_get_irq_chip_data(d); 249 u32 val; 250 251 raw_spin_lock(&port->lock); 252 val = pcie_read(port, XILINX_CPM_PCIE_REG_IMR); 253 val |= BIT(d->hwirq); 254 pcie_write(port, val, XILINX_CPM_PCIE_REG_IMR); 255 raw_spin_unlock(&port->lock); 256 } 257 258 static struct irq_chip xilinx_cpm_event_irq_chip = { 259 .name = "RC-Event", 260 .irq_mask = xilinx_cpm_mask_event_irq, 261 .irq_unmask = xilinx_cpm_unmask_event_irq, 262 }; 263 264 static int xilinx_cpm_pcie_event_map(struct irq_domain *domain, 265 unsigned int irq, irq_hw_number_t hwirq) 266 { 267 irq_set_chip_and_handler(irq, &xilinx_cpm_event_irq_chip, 268 handle_level_irq); 269 irq_set_chip_data(irq, domain->host_data); 270 irq_set_status_flags(irq, IRQ_LEVEL); 271 return 0; 272 } 273 274 static const struct irq_domain_ops event_domain_ops = { 275 .map = xilinx_cpm_pcie_event_map, 276 }; 277 278 static void xilinx_cpm_pcie_event_flow(struct irq_desc *desc) 279 { 280 struct xilinx_cpm_pcie *port = irq_desc_get_handler_data(desc); 281 struct irq_chip *chip = irq_desc_get_chip(desc); 282 const struct xilinx_cpm_variant *variant = port->variant; 283 unsigned long val; 284 int i; 285 286 chained_irq_enter(chip, desc); 287 val = pcie_read(port, XILINX_CPM_PCIE_REG_IDR); 288 val &= pcie_read(port, XILINX_CPM_PCIE_REG_IMR); 289 for_each_set_bit(i, &val, 32) 290 generic_handle_domain_irq(port->cpm_domain, i); 291 pcie_write(port, val, XILINX_CPM_PCIE_REG_IDR); 292 293 if (variant->ir_status) { 294 val = readl_relaxed(port->cpm_base + variant->ir_status); 295 if (val) 296 writel_relaxed(val, port->cpm_base + 297 variant->ir_status); 298 } 299 300 /* 301 * XILINX_CPM_PCIE_MISC_IR_STATUS register is mapped to 302 * CPM SLCR block. 303 */ 304 val = readl_relaxed(port->cpm_base + XILINX_CPM_PCIE_MISC_IR_STATUS); 305 if (val) 306 writel_relaxed(val, 307 port->cpm_base + XILINX_CPM_PCIE_MISC_IR_STATUS); 308 309 chained_irq_exit(chip, desc); 310 } 311 312 #define _IC(x, s) \ 313 [XILINX_PCIE_INTR_ ## x] = { __stringify(x), s } 314 315 static const struct { 316 const char *sym; 317 const char *str; 318 } intr_cause[32] = { 319 _IC(LINK_DOWN, "Link Down"), 320 _IC(HOT_RESET, "Hot reset"), 321 _IC(CFG_TIMEOUT, "ECAM access timeout"), 322 _IC(CORRECTABLE, "Correctable error message"), 323 _IC(NONFATAL, "Non fatal error message"), 324 _IC(FATAL, "Fatal error message"), 325 _IC(SLV_UNSUPP, "Slave unsupported request"), 326 _IC(SLV_UNEXP, "Slave unexpected completion"), 327 _IC(SLV_COMPL, "Slave completion timeout"), 328 _IC(SLV_ERRP, "Slave Error Poison"), 329 _IC(SLV_CMPABT, "Slave Completer Abort"), 330 _IC(SLV_ILLBUR, "Slave Illegal Burst"), 331 _IC(MST_DECERR, "Master decode error"), 332 _IC(MST_SLVERR, "Master slave error"), 333 _IC(CFG_PCIE_TIMEOUT, "PCIe ECAM access timeout"), 334 _IC(CFG_ERR_POISON, "ECAM poisoned completion received"), 335 _IC(PME_TO_ACK_RCVD, "PME_TO_ACK message received"), 336 _IC(PM_PME_RCVD, "PM_PME message received"), 337 _IC(SLV_PCIE_TIMEOUT, "PCIe completion timeout received"), 338 }; 339 340 static irqreturn_t xilinx_cpm_pcie_intr_handler(int irq, void *dev_id) 341 { 342 struct xilinx_cpm_pcie *port = dev_id; 343 struct device *dev = port->dev; 344 struct irq_data *d; 345 346 d = irq_domain_get_irq_data(port->cpm_domain, irq); 347 348 switch (d->hwirq) { 349 case XILINX_PCIE_INTR_CORRECTABLE: 350 case XILINX_PCIE_INTR_NONFATAL: 351 case XILINX_PCIE_INTR_FATAL: 352 cpm_pcie_clear_err_interrupts(port); 353 fallthrough; 354 355 default: 356 if (intr_cause[d->hwirq].str) 357 dev_warn(dev, "%s\n", intr_cause[d->hwirq].str); 358 else 359 dev_warn(dev, "Unknown IRQ %ld\n", d->hwirq); 360 } 361 362 return IRQ_HANDLED; 363 } 364 365 static void xilinx_cpm_free_irq_domains(struct xilinx_cpm_pcie *port) 366 { 367 if (port->intx_domain) { 368 irq_domain_remove(port->intx_domain); 369 port->intx_domain = NULL; 370 } 371 372 if (port->cpm_domain) { 373 irq_domain_remove(port->cpm_domain); 374 port->cpm_domain = NULL; 375 } 376 } 377 378 /** 379 * xilinx_cpm_pcie_init_irq_domain - Initialize IRQ domain 380 * @port: PCIe port information 381 * 382 * Return: '0' on success and error value on failure 383 */ 384 static int xilinx_cpm_pcie_init_irq_domain(struct xilinx_cpm_pcie *port) 385 { 386 struct device *dev = port->dev; 387 struct device_node *node = dev->of_node; 388 struct device_node *pcie_intc_node; 389 390 /* Setup INTx */ 391 pcie_intc_node = of_get_next_child(node, NULL); 392 if (!pcie_intc_node) { 393 dev_err(dev, "No PCIe Intc node found\n"); 394 return -EINVAL; 395 } 396 397 port->cpm_domain = irq_domain_add_linear(pcie_intc_node, 32, 398 &event_domain_ops, 399 port); 400 if (!port->cpm_domain) 401 goto out; 402 403 irq_domain_update_bus_token(port->cpm_domain, DOMAIN_BUS_NEXUS); 404 405 port->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, 406 &intx_domain_ops, 407 port); 408 if (!port->intx_domain) 409 goto out; 410 411 irq_domain_update_bus_token(port->intx_domain, DOMAIN_BUS_WIRED); 412 413 of_node_put(pcie_intc_node); 414 raw_spin_lock_init(&port->lock); 415 416 return 0; 417 out: 418 xilinx_cpm_free_irq_domains(port); 419 of_node_put(pcie_intc_node); 420 dev_err(dev, "Failed to allocate IRQ domains\n"); 421 422 return -ENOMEM; 423 } 424 425 static int xilinx_cpm_setup_irq(struct xilinx_cpm_pcie *port) 426 { 427 struct device *dev = port->dev; 428 struct platform_device *pdev = to_platform_device(dev); 429 int i, irq; 430 431 port->irq = platform_get_irq(pdev, 0); 432 if (port->irq < 0) 433 return port->irq; 434 435 for (i = 0; i < ARRAY_SIZE(intr_cause); i++) { 436 int err; 437 438 if (!intr_cause[i].str) 439 continue; 440 441 irq = irq_create_mapping(port->cpm_domain, i); 442 if (!irq) { 443 dev_err(dev, "Failed to map interrupt\n"); 444 return -ENXIO; 445 } 446 447 err = devm_request_irq(dev, irq, xilinx_cpm_pcie_intr_handler, 448 0, intr_cause[i].sym, port); 449 if (err) { 450 dev_err(dev, "Failed to request IRQ %d\n", irq); 451 return err; 452 } 453 } 454 455 port->intx_irq = irq_create_mapping(port->cpm_domain, 456 XILINX_PCIE_INTR_INTX); 457 if (!port->intx_irq) { 458 dev_err(dev, "Failed to map INTx interrupt\n"); 459 return -ENXIO; 460 } 461 462 /* Plug the INTx chained handler */ 463 irq_set_chained_handler_and_data(port->intx_irq, 464 xilinx_cpm_pcie_intx_flow, port); 465 466 /* Plug the main event chained handler */ 467 irq_set_chained_handler_and_data(port->irq, 468 xilinx_cpm_pcie_event_flow, port); 469 470 return 0; 471 } 472 473 /** 474 * xilinx_cpm_pcie_init_port - Initialize hardware 475 * @port: PCIe port information 476 */ 477 static void xilinx_cpm_pcie_init_port(struct xilinx_cpm_pcie *port) 478 { 479 const struct xilinx_cpm_variant *variant = port->variant; 480 481 if (cpm_pcie_link_up(port)) 482 dev_info(port->dev, "PCIe Link is UP\n"); 483 else 484 dev_info(port->dev, "PCIe Link is DOWN\n"); 485 486 /* Disable all interrupts */ 487 pcie_write(port, ~XILINX_CPM_PCIE_IDR_ALL_MASK, 488 XILINX_CPM_PCIE_REG_IMR); 489 490 /* Clear pending interrupts */ 491 pcie_write(port, pcie_read(port, XILINX_CPM_PCIE_REG_IDR) & 492 XILINX_CPM_PCIE_IMR_ALL_MASK, 493 XILINX_CPM_PCIE_REG_IDR); 494 495 /* 496 * XILINX_CPM_PCIE_MISC_IR_ENABLE register is mapped to 497 * CPM SLCR block. 498 */ 499 writel(variant->ir_misc_value, 500 port->cpm_base + XILINX_CPM_PCIE_MISC_IR_ENABLE); 501 502 if (variant->ir_enable) { 503 writel(XILINX_CPM_PCIE_IR_LOCAL, 504 port->cpm_base + variant->ir_enable); 505 } 506 507 /* Set Bridge enable bit */ 508 pcie_write(port, pcie_read(port, XILINX_CPM_PCIE_REG_RPSC) | 509 XILINX_CPM_PCIE_REG_RPSC_BEN, 510 XILINX_CPM_PCIE_REG_RPSC); 511 } 512 513 /** 514 * xilinx_cpm_pcie_parse_dt - Parse Device tree 515 * @port: PCIe port information 516 * @bus_range: Bus resource 517 * 518 * Return: '0' on success and error value on failure 519 */ 520 static int xilinx_cpm_pcie_parse_dt(struct xilinx_cpm_pcie *port, 521 struct resource *bus_range) 522 { 523 struct device *dev = port->dev; 524 struct platform_device *pdev = to_platform_device(dev); 525 struct resource *res; 526 527 port->cpm_base = devm_platform_ioremap_resource_byname(pdev, 528 "cpm_slcr"); 529 if (IS_ERR(port->cpm_base)) 530 return PTR_ERR(port->cpm_base); 531 532 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg"); 533 if (!res) 534 return -ENXIO; 535 536 port->cfg = pci_ecam_create(dev, res, bus_range, 537 &pci_generic_ecam_ops); 538 if (IS_ERR(port->cfg)) 539 return PTR_ERR(port->cfg); 540 541 if (port->variant->version == CPM5) { 542 port->reg_base = devm_platform_ioremap_resource_byname(pdev, 543 "cpm_csr"); 544 if (IS_ERR(port->reg_base)) 545 return PTR_ERR(port->reg_base); 546 } else { 547 port->reg_base = port->cfg->win; 548 } 549 550 return 0; 551 } 552 553 static void xilinx_cpm_free_interrupts(struct xilinx_cpm_pcie *port) 554 { 555 irq_set_chained_handler_and_data(port->intx_irq, NULL, NULL); 556 irq_set_chained_handler_and_data(port->irq, NULL, NULL); 557 } 558 559 /** 560 * xilinx_cpm_pcie_probe - Probe function 561 * @pdev: Platform device pointer 562 * 563 * Return: '0' on success and error value on failure 564 */ 565 static int xilinx_cpm_pcie_probe(struct platform_device *pdev) 566 { 567 struct xilinx_cpm_pcie *port; 568 struct device *dev = &pdev->dev; 569 struct pci_host_bridge *bridge; 570 struct resource_entry *bus; 571 int err; 572 573 bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port)); 574 if (!bridge) 575 return -ENODEV; 576 577 port = pci_host_bridge_priv(bridge); 578 579 port->dev = dev; 580 581 err = xilinx_cpm_pcie_init_irq_domain(port); 582 if (err) 583 return err; 584 585 bus = resource_list_first_type(&bridge->windows, IORESOURCE_BUS); 586 if (!bus) 587 return -ENODEV; 588 589 port->variant = of_device_get_match_data(dev); 590 591 err = xilinx_cpm_pcie_parse_dt(port, bus->res); 592 if (err) { 593 dev_err(dev, "Parsing DT failed\n"); 594 goto err_parse_dt; 595 } 596 597 xilinx_cpm_pcie_init_port(port); 598 599 err = xilinx_cpm_setup_irq(port); 600 if (err) { 601 dev_err(dev, "Failed to set up interrupts\n"); 602 goto err_setup_irq; 603 } 604 605 bridge->sysdata = port->cfg; 606 bridge->ops = (struct pci_ops *)&pci_generic_ecam_ops.pci_ops; 607 608 err = pci_host_probe(bridge); 609 if (err < 0) 610 goto err_host_bridge; 611 612 return 0; 613 614 err_host_bridge: 615 xilinx_cpm_free_interrupts(port); 616 err_setup_irq: 617 pci_ecam_free(port->cfg); 618 err_parse_dt: 619 xilinx_cpm_free_irq_domains(port); 620 return err; 621 } 622 623 static const struct xilinx_cpm_variant cpm_host = { 624 .version = CPM, 625 .ir_misc_value = XILINX_CPM_PCIE0_MISC_IR_LOCAL, 626 }; 627 628 static const struct xilinx_cpm_variant cpm5_host = { 629 .version = CPM5, 630 .ir_misc_value = XILINX_CPM_PCIE0_MISC_IR_LOCAL, 631 .ir_status = XILINX_CPM_PCIE0_IR_STATUS, 632 .ir_enable = XILINX_CPM_PCIE0_IR_ENABLE, 633 }; 634 635 static const struct xilinx_cpm_variant cpm5_host1 = { 636 .version = CPM5_HOST1, 637 .ir_misc_value = XILINX_CPM_PCIE1_MISC_IR_LOCAL, 638 .ir_status = XILINX_CPM_PCIE1_IR_STATUS, 639 .ir_enable = XILINX_CPM_PCIE1_IR_ENABLE, 640 }; 641 642 static const struct of_device_id xilinx_cpm_pcie_of_match[] = { 643 { 644 .compatible = "xlnx,versal-cpm-host-1.00", 645 .data = &cpm_host, 646 }, 647 { 648 .compatible = "xlnx,versal-cpm5-host", 649 .data = &cpm5_host, 650 }, 651 { 652 .compatible = "xlnx,versal-cpm5-host1", 653 .data = &cpm5_host1, 654 }, 655 {} 656 }; 657 658 static struct platform_driver xilinx_cpm_pcie_driver = { 659 .driver = { 660 .name = "xilinx-cpm-pcie", 661 .of_match_table = xilinx_cpm_pcie_of_match, 662 .suppress_bind_attrs = true, 663 }, 664 .probe = xilinx_cpm_pcie_probe, 665 }; 666 667 builtin_platform_driver(xilinx_cpm_pcie_driver); 668