1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * PCIe host controller driver for Xilinx Versal CPM DMA Bridge 4 * 5 * (C) Copyright 2019 - 2020, Xilinx, Inc. 6 */ 7 8 #include <linux/bitfield.h> 9 #include <linux/interrupt.h> 10 #include <linux/irq.h> 11 #include <linux/irqchip.h> 12 #include <linux/irqchip/chained_irq.h> 13 #include <linux/irqdomain.h> 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/of_address.h> 17 #include <linux/of_pci.h> 18 #include <linux/of_platform.h> 19 20 #include "../pci.h" 21 #include "pcie-xilinx-common.h" 22 23 /* Register definitions */ 24 #define XILINX_CPM_PCIE_REG_IDR 0x00000E10 25 #define XILINX_CPM_PCIE_REG_IMR 0x00000E14 26 #define XILINX_CPM_PCIE_REG_PSCR 0x00000E1C 27 #define XILINX_CPM_PCIE_REG_RPSC 0x00000E20 28 #define XILINX_CPM_PCIE_REG_RPEFR 0x00000E2C 29 #define XILINX_CPM_PCIE_REG_IDRN 0x00000E38 30 #define XILINX_CPM_PCIE_REG_IDRN_MASK 0x00000E3C 31 #define XILINX_CPM_PCIE_MISC_IR_STATUS 0x00000340 32 #define XILINX_CPM_PCIE_MISC_IR_ENABLE 0x00000348 33 #define XILINX_CPM_PCIE_MISC_IR_LOCAL BIT(1) 34 35 #define XILINX_CPM_PCIE_IR_STATUS 0x000002A0 36 #define XILINX_CPM_PCIE_IR_ENABLE 0x000002A8 37 #define XILINX_CPM_PCIE_IR_LOCAL BIT(0) 38 39 #define IMR(x) BIT(XILINX_PCIE_INTR_ ##x) 40 41 #define XILINX_CPM_PCIE_IMR_ALL_MASK \ 42 ( \ 43 IMR(LINK_DOWN) | \ 44 IMR(HOT_RESET) | \ 45 IMR(CFG_PCIE_TIMEOUT) | \ 46 IMR(CFG_TIMEOUT) | \ 47 IMR(CORRECTABLE) | \ 48 IMR(NONFATAL) | \ 49 IMR(FATAL) | \ 50 IMR(CFG_ERR_POISON) | \ 51 IMR(PME_TO_ACK_RCVD) | \ 52 IMR(INTX) | \ 53 IMR(PM_PME_RCVD) | \ 54 IMR(SLV_UNSUPP) | \ 55 IMR(SLV_UNEXP) | \ 56 IMR(SLV_COMPL) | \ 57 IMR(SLV_ERRP) | \ 58 IMR(SLV_CMPABT) | \ 59 IMR(SLV_ILLBUR) | \ 60 IMR(MST_DECERR) | \ 61 IMR(MST_SLVERR) | \ 62 IMR(SLV_PCIE_TIMEOUT) \ 63 ) 64 65 #define XILINX_CPM_PCIE_IDR_ALL_MASK 0xFFFFFFFF 66 #define XILINX_CPM_PCIE_IDRN_MASK GENMASK(19, 16) 67 #define XILINX_CPM_PCIE_IDRN_SHIFT 16 68 69 /* Root Port Error FIFO Read Register definitions */ 70 #define XILINX_CPM_PCIE_RPEFR_ERR_VALID BIT(18) 71 #define XILINX_CPM_PCIE_RPEFR_REQ_ID GENMASK(15, 0) 72 #define XILINX_CPM_PCIE_RPEFR_ALL_MASK 0xFFFFFFFF 73 74 /* Root Port Status/control Register definitions */ 75 #define XILINX_CPM_PCIE_REG_RPSC_BEN BIT(0) 76 77 /* Phy Status/Control Register definitions */ 78 #define XILINX_CPM_PCIE_REG_PSCR_LNKUP BIT(11) 79 80 enum xilinx_cpm_version { 81 CPM, 82 CPM5, 83 }; 84 85 /** 86 * struct xilinx_cpm_variant - CPM variant information 87 * @version: CPM version 88 */ 89 struct xilinx_cpm_variant { 90 enum xilinx_cpm_version version; 91 }; 92 93 /** 94 * struct xilinx_cpm_pcie - PCIe port information 95 * @dev: Device pointer 96 * @reg_base: Bridge Register Base 97 * @cpm_base: CPM System Level Control and Status Register(SLCR) Base 98 * @intx_domain: Legacy IRQ domain pointer 99 * @cpm_domain: CPM IRQ domain pointer 100 * @cfg: Holds mappings of config space window 101 * @intx_irq: legacy interrupt number 102 * @irq: Error interrupt number 103 * @lock: lock protecting shared register access 104 * @variant: CPM version check pointer 105 */ 106 struct xilinx_cpm_pcie { 107 struct device *dev; 108 void __iomem *reg_base; 109 void __iomem *cpm_base; 110 struct irq_domain *intx_domain; 111 struct irq_domain *cpm_domain; 112 struct pci_config_window *cfg; 113 int intx_irq; 114 int irq; 115 raw_spinlock_t lock; 116 const struct xilinx_cpm_variant *variant; 117 }; 118 119 static u32 pcie_read(struct xilinx_cpm_pcie *port, u32 reg) 120 { 121 return readl_relaxed(port->reg_base + reg); 122 } 123 124 static void pcie_write(struct xilinx_cpm_pcie *port, 125 u32 val, u32 reg) 126 { 127 writel_relaxed(val, port->reg_base + reg); 128 } 129 130 static bool cpm_pcie_link_up(struct xilinx_cpm_pcie *port) 131 { 132 return (pcie_read(port, XILINX_CPM_PCIE_REG_PSCR) & 133 XILINX_CPM_PCIE_REG_PSCR_LNKUP); 134 } 135 136 static void cpm_pcie_clear_err_interrupts(struct xilinx_cpm_pcie *port) 137 { 138 unsigned long val = pcie_read(port, XILINX_CPM_PCIE_REG_RPEFR); 139 140 if (val & XILINX_CPM_PCIE_RPEFR_ERR_VALID) { 141 dev_dbg(port->dev, "Requester ID %lu\n", 142 val & XILINX_CPM_PCIE_RPEFR_REQ_ID); 143 pcie_write(port, XILINX_CPM_PCIE_RPEFR_ALL_MASK, 144 XILINX_CPM_PCIE_REG_RPEFR); 145 } 146 } 147 148 static void xilinx_cpm_mask_leg_irq(struct irq_data *data) 149 { 150 struct xilinx_cpm_pcie *port = irq_data_get_irq_chip_data(data); 151 unsigned long flags; 152 u32 mask; 153 u32 val; 154 155 mask = BIT(data->hwirq + XILINX_CPM_PCIE_IDRN_SHIFT); 156 raw_spin_lock_irqsave(&port->lock, flags); 157 val = pcie_read(port, XILINX_CPM_PCIE_REG_IDRN_MASK); 158 pcie_write(port, (val & (~mask)), XILINX_CPM_PCIE_REG_IDRN_MASK); 159 raw_spin_unlock_irqrestore(&port->lock, flags); 160 } 161 162 static void xilinx_cpm_unmask_leg_irq(struct irq_data *data) 163 { 164 struct xilinx_cpm_pcie *port = irq_data_get_irq_chip_data(data); 165 unsigned long flags; 166 u32 mask; 167 u32 val; 168 169 mask = BIT(data->hwirq + XILINX_CPM_PCIE_IDRN_SHIFT); 170 raw_spin_lock_irqsave(&port->lock, flags); 171 val = pcie_read(port, XILINX_CPM_PCIE_REG_IDRN_MASK); 172 pcie_write(port, (val | mask), XILINX_CPM_PCIE_REG_IDRN_MASK); 173 raw_spin_unlock_irqrestore(&port->lock, flags); 174 } 175 176 static struct irq_chip xilinx_cpm_leg_irq_chip = { 177 .name = "INTx", 178 .irq_mask = xilinx_cpm_mask_leg_irq, 179 .irq_unmask = xilinx_cpm_unmask_leg_irq, 180 }; 181 182 /** 183 * xilinx_cpm_pcie_intx_map - Set the handler for the INTx and mark IRQ as valid 184 * @domain: IRQ domain 185 * @irq: Virtual IRQ number 186 * @hwirq: HW interrupt number 187 * 188 * Return: Always returns 0. 189 */ 190 static int xilinx_cpm_pcie_intx_map(struct irq_domain *domain, 191 unsigned int irq, irq_hw_number_t hwirq) 192 { 193 irq_set_chip_and_handler(irq, &xilinx_cpm_leg_irq_chip, 194 handle_level_irq); 195 irq_set_chip_data(irq, domain->host_data); 196 irq_set_status_flags(irq, IRQ_LEVEL); 197 198 return 0; 199 } 200 201 /* INTx IRQ Domain operations */ 202 static const struct irq_domain_ops intx_domain_ops = { 203 .map = xilinx_cpm_pcie_intx_map, 204 }; 205 206 static void xilinx_cpm_pcie_intx_flow(struct irq_desc *desc) 207 { 208 struct xilinx_cpm_pcie *port = irq_desc_get_handler_data(desc); 209 struct irq_chip *chip = irq_desc_get_chip(desc); 210 unsigned long val; 211 int i; 212 213 chained_irq_enter(chip, desc); 214 215 val = FIELD_GET(XILINX_CPM_PCIE_IDRN_MASK, 216 pcie_read(port, XILINX_CPM_PCIE_REG_IDRN)); 217 218 for_each_set_bit(i, &val, PCI_NUM_INTX) 219 generic_handle_domain_irq(port->intx_domain, i); 220 221 chained_irq_exit(chip, desc); 222 } 223 224 static void xilinx_cpm_mask_event_irq(struct irq_data *d) 225 { 226 struct xilinx_cpm_pcie *port = irq_data_get_irq_chip_data(d); 227 u32 val; 228 229 raw_spin_lock(&port->lock); 230 val = pcie_read(port, XILINX_CPM_PCIE_REG_IMR); 231 val &= ~BIT(d->hwirq); 232 pcie_write(port, val, XILINX_CPM_PCIE_REG_IMR); 233 raw_spin_unlock(&port->lock); 234 } 235 236 static void xilinx_cpm_unmask_event_irq(struct irq_data *d) 237 { 238 struct xilinx_cpm_pcie *port = irq_data_get_irq_chip_data(d); 239 u32 val; 240 241 raw_spin_lock(&port->lock); 242 val = pcie_read(port, XILINX_CPM_PCIE_REG_IMR); 243 val |= BIT(d->hwirq); 244 pcie_write(port, val, XILINX_CPM_PCIE_REG_IMR); 245 raw_spin_unlock(&port->lock); 246 } 247 248 static struct irq_chip xilinx_cpm_event_irq_chip = { 249 .name = "RC-Event", 250 .irq_mask = xilinx_cpm_mask_event_irq, 251 .irq_unmask = xilinx_cpm_unmask_event_irq, 252 }; 253 254 static int xilinx_cpm_pcie_event_map(struct irq_domain *domain, 255 unsigned int irq, irq_hw_number_t hwirq) 256 { 257 irq_set_chip_and_handler(irq, &xilinx_cpm_event_irq_chip, 258 handle_level_irq); 259 irq_set_chip_data(irq, domain->host_data); 260 irq_set_status_flags(irq, IRQ_LEVEL); 261 return 0; 262 } 263 264 static const struct irq_domain_ops event_domain_ops = { 265 .map = xilinx_cpm_pcie_event_map, 266 }; 267 268 static void xilinx_cpm_pcie_event_flow(struct irq_desc *desc) 269 { 270 struct xilinx_cpm_pcie *port = irq_desc_get_handler_data(desc); 271 struct irq_chip *chip = irq_desc_get_chip(desc); 272 unsigned long val; 273 int i; 274 275 chained_irq_enter(chip, desc); 276 val = pcie_read(port, XILINX_CPM_PCIE_REG_IDR); 277 val &= pcie_read(port, XILINX_CPM_PCIE_REG_IMR); 278 for_each_set_bit(i, &val, 32) 279 generic_handle_domain_irq(port->cpm_domain, i); 280 pcie_write(port, val, XILINX_CPM_PCIE_REG_IDR); 281 282 if (port->variant->version == CPM5) { 283 val = readl_relaxed(port->cpm_base + XILINX_CPM_PCIE_IR_STATUS); 284 if (val) 285 writel_relaxed(val, port->cpm_base + 286 XILINX_CPM_PCIE_IR_STATUS); 287 } 288 289 /* 290 * XILINX_CPM_PCIE_MISC_IR_STATUS register is mapped to 291 * CPM SLCR block. 292 */ 293 val = readl_relaxed(port->cpm_base + XILINX_CPM_PCIE_MISC_IR_STATUS); 294 if (val) 295 writel_relaxed(val, 296 port->cpm_base + XILINX_CPM_PCIE_MISC_IR_STATUS); 297 298 chained_irq_exit(chip, desc); 299 } 300 301 #define _IC(x, s) \ 302 [XILINX_PCIE_INTR_ ## x] = { __stringify(x), s } 303 304 static const struct { 305 const char *sym; 306 const char *str; 307 } intr_cause[32] = { 308 _IC(LINK_DOWN, "Link Down"), 309 _IC(HOT_RESET, "Hot reset"), 310 _IC(CFG_TIMEOUT, "ECAM access timeout"), 311 _IC(CORRECTABLE, "Correctable error message"), 312 _IC(NONFATAL, "Non fatal error message"), 313 _IC(FATAL, "Fatal error message"), 314 _IC(SLV_UNSUPP, "Slave unsupported request"), 315 _IC(SLV_UNEXP, "Slave unexpected completion"), 316 _IC(SLV_COMPL, "Slave completion timeout"), 317 _IC(SLV_ERRP, "Slave Error Poison"), 318 _IC(SLV_CMPABT, "Slave Completer Abort"), 319 _IC(SLV_ILLBUR, "Slave Illegal Burst"), 320 _IC(MST_DECERR, "Master decode error"), 321 _IC(MST_SLVERR, "Master slave error"), 322 _IC(CFG_PCIE_TIMEOUT, "PCIe ECAM access timeout"), 323 _IC(CFG_ERR_POISON, "ECAM poisoned completion received"), 324 _IC(PME_TO_ACK_RCVD, "PME_TO_ACK message received"), 325 _IC(PM_PME_RCVD, "PM_PME message received"), 326 _IC(SLV_PCIE_TIMEOUT, "PCIe completion timeout received"), 327 }; 328 329 static irqreturn_t xilinx_cpm_pcie_intr_handler(int irq, void *dev_id) 330 { 331 struct xilinx_cpm_pcie *port = dev_id; 332 struct device *dev = port->dev; 333 struct irq_data *d; 334 335 d = irq_domain_get_irq_data(port->cpm_domain, irq); 336 337 switch (d->hwirq) { 338 case XILINX_PCIE_INTR_CORRECTABLE: 339 case XILINX_PCIE_INTR_NONFATAL: 340 case XILINX_PCIE_INTR_FATAL: 341 cpm_pcie_clear_err_interrupts(port); 342 fallthrough; 343 344 default: 345 if (intr_cause[d->hwirq].str) 346 dev_warn(dev, "%s\n", intr_cause[d->hwirq].str); 347 else 348 dev_warn(dev, "Unknown IRQ %ld\n", d->hwirq); 349 } 350 351 return IRQ_HANDLED; 352 } 353 354 static void xilinx_cpm_free_irq_domains(struct xilinx_cpm_pcie *port) 355 { 356 if (port->intx_domain) { 357 irq_domain_remove(port->intx_domain); 358 port->intx_domain = NULL; 359 } 360 361 if (port->cpm_domain) { 362 irq_domain_remove(port->cpm_domain); 363 port->cpm_domain = NULL; 364 } 365 } 366 367 /** 368 * xilinx_cpm_pcie_init_irq_domain - Initialize IRQ domain 369 * @port: PCIe port information 370 * 371 * Return: '0' on success and error value on failure 372 */ 373 static int xilinx_cpm_pcie_init_irq_domain(struct xilinx_cpm_pcie *port) 374 { 375 struct device *dev = port->dev; 376 struct device_node *node = dev->of_node; 377 struct device_node *pcie_intc_node; 378 379 /* Setup INTx */ 380 pcie_intc_node = of_get_next_child(node, NULL); 381 if (!pcie_intc_node) { 382 dev_err(dev, "No PCIe Intc node found\n"); 383 return -EINVAL; 384 } 385 386 port->cpm_domain = irq_domain_add_linear(pcie_intc_node, 32, 387 &event_domain_ops, 388 port); 389 if (!port->cpm_domain) 390 goto out; 391 392 irq_domain_update_bus_token(port->cpm_domain, DOMAIN_BUS_NEXUS); 393 394 port->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, 395 &intx_domain_ops, 396 port); 397 if (!port->intx_domain) 398 goto out; 399 400 irq_domain_update_bus_token(port->intx_domain, DOMAIN_BUS_WIRED); 401 402 of_node_put(pcie_intc_node); 403 raw_spin_lock_init(&port->lock); 404 405 return 0; 406 out: 407 xilinx_cpm_free_irq_domains(port); 408 of_node_put(pcie_intc_node); 409 dev_err(dev, "Failed to allocate IRQ domains\n"); 410 411 return -ENOMEM; 412 } 413 414 static int xilinx_cpm_setup_irq(struct xilinx_cpm_pcie *port) 415 { 416 struct device *dev = port->dev; 417 struct platform_device *pdev = to_platform_device(dev); 418 int i, irq; 419 420 port->irq = platform_get_irq(pdev, 0); 421 if (port->irq < 0) 422 return port->irq; 423 424 for (i = 0; i < ARRAY_SIZE(intr_cause); i++) { 425 int err; 426 427 if (!intr_cause[i].str) 428 continue; 429 430 irq = irq_create_mapping(port->cpm_domain, i); 431 if (!irq) { 432 dev_err(dev, "Failed to map interrupt\n"); 433 return -ENXIO; 434 } 435 436 err = devm_request_irq(dev, irq, xilinx_cpm_pcie_intr_handler, 437 0, intr_cause[i].sym, port); 438 if (err) { 439 dev_err(dev, "Failed to request IRQ %d\n", irq); 440 return err; 441 } 442 } 443 444 port->intx_irq = irq_create_mapping(port->cpm_domain, 445 XILINX_PCIE_INTR_INTX); 446 if (!port->intx_irq) { 447 dev_err(dev, "Failed to map INTx interrupt\n"); 448 return -ENXIO; 449 } 450 451 /* Plug the INTx chained handler */ 452 irq_set_chained_handler_and_data(port->intx_irq, 453 xilinx_cpm_pcie_intx_flow, port); 454 455 /* Plug the main event chained handler */ 456 irq_set_chained_handler_and_data(port->irq, 457 xilinx_cpm_pcie_event_flow, port); 458 459 return 0; 460 } 461 462 /** 463 * xilinx_cpm_pcie_init_port - Initialize hardware 464 * @port: PCIe port information 465 */ 466 static void xilinx_cpm_pcie_init_port(struct xilinx_cpm_pcie *port) 467 { 468 if (cpm_pcie_link_up(port)) 469 dev_info(port->dev, "PCIe Link is UP\n"); 470 else 471 dev_info(port->dev, "PCIe Link is DOWN\n"); 472 473 /* Disable all interrupts */ 474 pcie_write(port, ~XILINX_CPM_PCIE_IDR_ALL_MASK, 475 XILINX_CPM_PCIE_REG_IMR); 476 477 /* Clear pending interrupts */ 478 pcie_write(port, pcie_read(port, XILINX_CPM_PCIE_REG_IDR) & 479 XILINX_CPM_PCIE_IMR_ALL_MASK, 480 XILINX_CPM_PCIE_REG_IDR); 481 482 /* 483 * XILINX_CPM_PCIE_MISC_IR_ENABLE register is mapped to 484 * CPM SLCR block. 485 */ 486 writel(XILINX_CPM_PCIE_MISC_IR_LOCAL, 487 port->cpm_base + XILINX_CPM_PCIE_MISC_IR_ENABLE); 488 489 if (port->variant->version == CPM5) { 490 writel(XILINX_CPM_PCIE_IR_LOCAL, 491 port->cpm_base + XILINX_CPM_PCIE_IR_ENABLE); 492 } 493 494 /* Enable the Bridge enable bit */ 495 pcie_write(port, pcie_read(port, XILINX_CPM_PCIE_REG_RPSC) | 496 XILINX_CPM_PCIE_REG_RPSC_BEN, 497 XILINX_CPM_PCIE_REG_RPSC); 498 } 499 500 /** 501 * xilinx_cpm_pcie_parse_dt - Parse Device tree 502 * @port: PCIe port information 503 * @bus_range: Bus resource 504 * 505 * Return: '0' on success and error value on failure 506 */ 507 static int xilinx_cpm_pcie_parse_dt(struct xilinx_cpm_pcie *port, 508 struct resource *bus_range) 509 { 510 struct device *dev = port->dev; 511 struct platform_device *pdev = to_platform_device(dev); 512 struct resource *res; 513 514 port->cpm_base = devm_platform_ioremap_resource_byname(pdev, 515 "cpm_slcr"); 516 if (IS_ERR(port->cpm_base)) 517 return PTR_ERR(port->cpm_base); 518 519 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg"); 520 if (!res) 521 return -ENXIO; 522 523 port->cfg = pci_ecam_create(dev, res, bus_range, 524 &pci_generic_ecam_ops); 525 if (IS_ERR(port->cfg)) 526 return PTR_ERR(port->cfg); 527 528 if (port->variant->version == CPM5) { 529 port->reg_base = devm_platform_ioremap_resource_byname(pdev, 530 "cpm_csr"); 531 if (IS_ERR(port->reg_base)) 532 return PTR_ERR(port->reg_base); 533 } else { 534 port->reg_base = port->cfg->win; 535 } 536 537 return 0; 538 } 539 540 static void xilinx_cpm_free_interrupts(struct xilinx_cpm_pcie *port) 541 { 542 irq_set_chained_handler_and_data(port->intx_irq, NULL, NULL); 543 irq_set_chained_handler_and_data(port->irq, NULL, NULL); 544 } 545 546 /** 547 * xilinx_cpm_pcie_probe - Probe function 548 * @pdev: Platform device pointer 549 * 550 * Return: '0' on success and error value on failure 551 */ 552 static int xilinx_cpm_pcie_probe(struct platform_device *pdev) 553 { 554 struct xilinx_cpm_pcie *port; 555 struct device *dev = &pdev->dev; 556 struct pci_host_bridge *bridge; 557 struct resource_entry *bus; 558 int err; 559 560 bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port)); 561 if (!bridge) 562 return -ENODEV; 563 564 port = pci_host_bridge_priv(bridge); 565 566 port->dev = dev; 567 568 err = xilinx_cpm_pcie_init_irq_domain(port); 569 if (err) 570 return err; 571 572 bus = resource_list_first_type(&bridge->windows, IORESOURCE_BUS); 573 if (!bus) 574 return -ENODEV; 575 576 port->variant = of_device_get_match_data(dev); 577 578 err = xilinx_cpm_pcie_parse_dt(port, bus->res); 579 if (err) { 580 dev_err(dev, "Parsing DT failed\n"); 581 goto err_parse_dt; 582 } 583 584 xilinx_cpm_pcie_init_port(port); 585 586 err = xilinx_cpm_setup_irq(port); 587 if (err) { 588 dev_err(dev, "Failed to set up interrupts\n"); 589 goto err_setup_irq; 590 } 591 592 bridge->sysdata = port->cfg; 593 bridge->ops = (struct pci_ops *)&pci_generic_ecam_ops.pci_ops; 594 595 err = pci_host_probe(bridge); 596 if (err < 0) 597 goto err_host_bridge; 598 599 return 0; 600 601 err_host_bridge: 602 xilinx_cpm_free_interrupts(port); 603 err_setup_irq: 604 pci_ecam_free(port->cfg); 605 err_parse_dt: 606 xilinx_cpm_free_irq_domains(port); 607 return err; 608 } 609 610 static const struct xilinx_cpm_variant cpm_host = { 611 .version = CPM, 612 }; 613 614 static const struct xilinx_cpm_variant cpm5_host = { 615 .version = CPM5, 616 }; 617 618 static const struct of_device_id xilinx_cpm_pcie_of_match[] = { 619 { 620 .compatible = "xlnx,versal-cpm-host-1.00", 621 .data = &cpm_host, 622 }, 623 { 624 .compatible = "xlnx,versal-cpm5-host", 625 .data = &cpm5_host, 626 }, 627 {} 628 }; 629 630 static struct platform_driver xilinx_cpm_pcie_driver = { 631 .driver = { 632 .name = "xilinx-cpm-pcie", 633 .of_match_table = xilinx_cpm_pcie_of_match, 634 .suppress_bind_attrs = true, 635 }, 636 .probe = xilinx_cpm_pcie_probe, 637 }; 638 639 builtin_platform_driver(xilinx_cpm_pcie_driver); 640