1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * PCIe host controller driver for Xilinx XDMA PCIe Bridge 4 * 5 * Copyright (C) 2023 Xilinx, Inc. All rights reserved. 6 */ 7 #include <linux/bitfield.h> 8 #include <linux/interrupt.h> 9 #include <linux/irq.h> 10 #include <linux/irqchip/irq-msi-lib.h> 11 #include <linux/irqdomain.h> 12 #include <linux/kernel.h> 13 #include <linux/module.h> 14 #include <linux/msi.h> 15 #include <linux/of_address.h> 16 #include <linux/of_pci.h> 17 18 #include "../pci.h" 19 #include "pcie-xilinx-common.h" 20 21 /* Register definitions */ 22 #define XILINX_PCIE_DMA_REG_IDR 0x00000138 23 #define XILINX_PCIE_DMA_REG_IMR 0x0000013c 24 #define XILINX_PCIE_DMA_REG_PSCR 0x00000144 25 #define XILINX_PCIE_DMA_REG_RPSC 0x00000148 26 #define XILINX_PCIE_DMA_REG_MSIBASE1 0x0000014c 27 #define XILINX_PCIE_DMA_REG_MSIBASE2 0x00000150 28 #define XILINX_PCIE_DMA_REG_RPEFR 0x00000154 29 #define XILINX_PCIE_DMA_REG_IDRN 0x00000160 30 #define XILINX_PCIE_DMA_REG_IDRN_MASK 0x00000164 31 #define XILINX_PCIE_DMA_REG_MSI_LOW 0x00000170 32 #define XILINX_PCIE_DMA_REG_MSI_HI 0x00000174 33 #define XILINX_PCIE_DMA_REG_MSI_LOW_MASK 0x00000178 34 #define XILINX_PCIE_DMA_REG_MSI_HI_MASK 0x0000017c 35 36 #define IMR(x) BIT(XILINX_PCIE_INTR_ ##x) 37 38 #define XILINX_PCIE_INTR_IMR_ALL_MASK \ 39 ( \ 40 IMR(LINK_DOWN) | \ 41 IMR(HOT_RESET) | \ 42 IMR(CFG_TIMEOUT) | \ 43 IMR(CORRECTABLE) | \ 44 IMR(NONFATAL) | \ 45 IMR(FATAL) | \ 46 IMR(INTX) | \ 47 IMR(MSI) | \ 48 IMR(SLV_UNSUPP) | \ 49 IMR(SLV_UNEXP) | \ 50 IMR(SLV_COMPL) | \ 51 IMR(SLV_ERRP) | \ 52 IMR(SLV_CMPABT) | \ 53 IMR(SLV_ILLBUR) | \ 54 IMR(MST_DECERR) | \ 55 IMR(MST_SLVERR) | \ 56 ) 57 58 #define XILINX_PCIE_DMA_IMR_ALL_MASK 0x0ff30fe9 59 #define XILINX_PCIE_DMA_IDR_ALL_MASK 0xffffffff 60 #define XILINX_PCIE_DMA_IDRN_MASK GENMASK(19, 16) 61 62 /* Root Port Error Register definitions */ 63 #define XILINX_PCIE_DMA_RPEFR_ERR_VALID BIT(18) 64 #define XILINX_PCIE_DMA_RPEFR_REQ_ID GENMASK(15, 0) 65 #define XILINX_PCIE_DMA_RPEFR_ALL_MASK 0xffffffff 66 67 /* Root Port Interrupt Register definitions */ 68 #define XILINX_PCIE_DMA_IDRN_SHIFT 16 69 70 /* Root Port Status/control Register definitions */ 71 #define XILINX_PCIE_DMA_REG_RPSC_BEN BIT(0) 72 73 /* Phy Status/Control Register definitions */ 74 #define XILINX_PCIE_DMA_REG_PSCR_LNKUP BIT(11) 75 #define QDMA_BRIDGE_BASE_OFF 0xcd8 76 77 /* Number of MSI IRQs */ 78 #define XILINX_NUM_MSI_IRQS 64 79 80 enum xilinx_pl_dma_version { 81 XDMA, 82 QDMA, 83 }; 84 85 /** 86 * struct xilinx_pl_dma_variant - PL DMA PCIe variant information 87 * @version: DMA version 88 */ 89 struct xilinx_pl_dma_variant { 90 enum xilinx_pl_dma_version version; 91 }; 92 93 struct xilinx_msi { 94 unsigned long *bitmap; 95 struct irq_domain *dev_domain; 96 struct mutex lock; /* Protect bitmap variable */ 97 int irq_msi0; 98 int irq_msi1; 99 }; 100 101 /** 102 * struct pl_dma_pcie - PCIe port information 103 * @dev: Device pointer 104 * @reg_base: IO Mapped Register Base 105 * @cfg_base: IO Mapped Configuration Base 106 * @irq: Interrupt number 107 * @cfg: Holds mappings of config space window 108 * @phys_reg_base: Physical address of reg base 109 * @intx_domain: Legacy IRQ domain pointer 110 * @pldma_domain: PL DMA IRQ domain pointer 111 * @resources: Bus Resources 112 * @msi: MSI information 113 * @intx_irq: INTx error interrupt number 114 * @lock: Lock protecting shared register access 115 * @variant: PL DMA PCIe version check pointer 116 */ 117 struct pl_dma_pcie { 118 struct device *dev; 119 void __iomem *reg_base; 120 void __iomem *cfg_base; 121 int irq; 122 struct pci_config_window *cfg; 123 phys_addr_t phys_reg_base; 124 struct irq_domain *intx_domain; 125 struct irq_domain *pldma_domain; 126 struct list_head resources; 127 struct xilinx_msi msi; 128 int intx_irq; 129 raw_spinlock_t lock; 130 const struct xilinx_pl_dma_variant *variant; 131 }; 132 133 static inline u32 pcie_read(struct pl_dma_pcie *port, u32 reg) 134 { 135 if (port->variant->version == QDMA) 136 return readl(port->reg_base + reg + QDMA_BRIDGE_BASE_OFF); 137 138 return readl(port->reg_base + reg); 139 } 140 141 static inline void pcie_write(struct pl_dma_pcie *port, u32 val, u32 reg) 142 { 143 if (port->variant->version == QDMA) 144 writel(val, port->reg_base + reg + QDMA_BRIDGE_BASE_OFF); 145 else 146 writel(val, port->reg_base + reg); 147 } 148 149 static inline bool xilinx_pl_dma_pcie_link_up(struct pl_dma_pcie *port) 150 { 151 return (pcie_read(port, XILINX_PCIE_DMA_REG_PSCR) & 152 XILINX_PCIE_DMA_REG_PSCR_LNKUP) ? true : false; 153 } 154 155 static void xilinx_pl_dma_pcie_clear_err_interrupts(struct pl_dma_pcie *port) 156 { 157 unsigned long val = pcie_read(port, XILINX_PCIE_DMA_REG_RPEFR); 158 159 if (val & XILINX_PCIE_DMA_RPEFR_ERR_VALID) { 160 dev_dbg(port->dev, "Requester ID %lu\n", 161 val & XILINX_PCIE_DMA_RPEFR_REQ_ID); 162 pcie_write(port, XILINX_PCIE_DMA_RPEFR_ALL_MASK, 163 XILINX_PCIE_DMA_REG_RPEFR); 164 } 165 } 166 167 static bool xilinx_pl_dma_pcie_valid_device(struct pci_bus *bus, 168 unsigned int devfn) 169 { 170 struct pl_dma_pcie *port = bus->sysdata; 171 172 if (!pci_is_root_bus(bus)) { 173 /* 174 * Checking whether the link is up is the last line of 175 * defense, and this check is inherently racy by definition. 176 * Sending a PIO request to a downstream device when the link is 177 * down causes an unrecoverable error, and a reset of the entire 178 * PCIe controller will be needed. We can reduce the likelihood 179 * of that unrecoverable error by checking whether the link is 180 * up, but we can't completely prevent it because the link may 181 * go down between the link-up check and the PIO request. 182 */ 183 if (!xilinx_pl_dma_pcie_link_up(port)) 184 return false; 185 } else if (devfn > 0) 186 /* Only one device down on each root port */ 187 return false; 188 189 return true; 190 } 191 192 static void __iomem *xilinx_pl_dma_pcie_map_bus(struct pci_bus *bus, 193 unsigned int devfn, int where) 194 { 195 struct pl_dma_pcie *port = bus->sysdata; 196 197 if (!xilinx_pl_dma_pcie_valid_device(bus, devfn)) 198 return NULL; 199 200 if (port->variant->version == QDMA) 201 return port->cfg_base + PCIE_ECAM_OFFSET(bus->number, devfn, where); 202 203 return port->reg_base + PCIE_ECAM_OFFSET(bus->number, devfn, where); 204 } 205 206 /* PCIe operations */ 207 static struct pci_ecam_ops xilinx_pl_dma_pcie_ops = { 208 .pci_ops = { 209 .map_bus = xilinx_pl_dma_pcie_map_bus, 210 .read = pci_generic_config_read, 211 .write = pci_generic_config_write, 212 } 213 }; 214 215 static void xilinx_pl_dma_pcie_enable_msi(struct pl_dma_pcie *port) 216 { 217 phys_addr_t msi_addr = port->phys_reg_base; 218 219 pcie_write(port, upper_32_bits(msi_addr), XILINX_PCIE_DMA_REG_MSIBASE1); 220 pcie_write(port, lower_32_bits(msi_addr), XILINX_PCIE_DMA_REG_MSIBASE2); 221 } 222 223 static void xilinx_mask_intx_irq(struct irq_data *data) 224 { 225 struct pl_dma_pcie *port = irq_data_get_irq_chip_data(data); 226 unsigned long flags; 227 u32 mask, val; 228 229 mask = BIT(data->hwirq + XILINX_PCIE_DMA_IDRN_SHIFT); 230 raw_spin_lock_irqsave(&port->lock, flags); 231 val = pcie_read(port, XILINX_PCIE_DMA_REG_IDRN_MASK); 232 pcie_write(port, (val & (~mask)), XILINX_PCIE_DMA_REG_IDRN_MASK); 233 raw_spin_unlock_irqrestore(&port->lock, flags); 234 } 235 236 static void xilinx_unmask_intx_irq(struct irq_data *data) 237 { 238 struct pl_dma_pcie *port = irq_data_get_irq_chip_data(data); 239 unsigned long flags; 240 u32 mask, val; 241 242 mask = BIT(data->hwirq + XILINX_PCIE_DMA_IDRN_SHIFT); 243 raw_spin_lock_irqsave(&port->lock, flags); 244 val = pcie_read(port, XILINX_PCIE_DMA_REG_IDRN_MASK); 245 pcie_write(port, (val | mask), XILINX_PCIE_DMA_REG_IDRN_MASK); 246 raw_spin_unlock_irqrestore(&port->lock, flags); 247 } 248 249 static struct irq_chip xilinx_leg_irq_chip = { 250 .name = "pl_dma:INTx", 251 .irq_mask = xilinx_mask_intx_irq, 252 .irq_unmask = xilinx_unmask_intx_irq, 253 }; 254 255 static int xilinx_pl_dma_pcie_intx_map(struct irq_domain *domain, 256 unsigned int irq, irq_hw_number_t hwirq) 257 { 258 irq_set_chip_and_handler(irq, &xilinx_leg_irq_chip, handle_level_irq); 259 irq_set_chip_data(irq, domain->host_data); 260 irq_set_status_flags(irq, IRQ_LEVEL); 261 262 return 0; 263 } 264 265 /* INTx IRQ Domain operations */ 266 static const struct irq_domain_ops intx_domain_ops = { 267 .map = xilinx_pl_dma_pcie_intx_map, 268 }; 269 270 static irqreturn_t xilinx_pl_dma_pcie_msi_handler_high(int irq, void *args) 271 { 272 struct xilinx_msi *msi; 273 unsigned long status; 274 u32 bit, virq; 275 struct pl_dma_pcie *port = args; 276 277 msi = &port->msi; 278 279 while ((status = pcie_read(port, XILINX_PCIE_DMA_REG_MSI_HI)) != 0) { 280 for_each_set_bit(bit, &status, 32) { 281 pcie_write(port, 1 << bit, XILINX_PCIE_DMA_REG_MSI_HI); 282 bit = bit + 32; 283 virq = irq_find_mapping(msi->dev_domain, bit); 284 if (virq) 285 generic_handle_irq(virq); 286 } 287 } 288 289 return IRQ_HANDLED; 290 } 291 292 static irqreturn_t xilinx_pl_dma_pcie_msi_handler_low(int irq, void *args) 293 { 294 struct pl_dma_pcie *port = args; 295 struct xilinx_msi *msi; 296 unsigned long status; 297 u32 bit, virq; 298 299 msi = &port->msi; 300 301 while ((status = pcie_read(port, XILINX_PCIE_DMA_REG_MSI_LOW)) != 0) { 302 for_each_set_bit(bit, &status, 32) { 303 pcie_write(port, 1 << bit, XILINX_PCIE_DMA_REG_MSI_LOW); 304 virq = irq_find_mapping(msi->dev_domain, bit); 305 if (virq) 306 generic_handle_irq(virq); 307 } 308 } 309 310 return IRQ_HANDLED; 311 } 312 313 static irqreturn_t xilinx_pl_dma_pcie_event_flow(int irq, void *args) 314 { 315 struct pl_dma_pcie *port = args; 316 unsigned long val; 317 int i; 318 319 val = pcie_read(port, XILINX_PCIE_DMA_REG_IDR); 320 val &= pcie_read(port, XILINX_PCIE_DMA_REG_IMR); 321 for_each_set_bit(i, &val, 32) 322 generic_handle_domain_irq(port->pldma_domain, i); 323 324 pcie_write(port, val, XILINX_PCIE_DMA_REG_IDR); 325 326 return IRQ_HANDLED; 327 } 328 329 #define _IC(x, s) \ 330 [XILINX_PCIE_INTR_ ## x] = { __stringify(x), s } 331 332 static const struct { 333 const char *sym; 334 const char *str; 335 } intr_cause[32] = { 336 _IC(LINK_DOWN, "Link Down"), 337 _IC(HOT_RESET, "Hot reset"), 338 _IC(CFG_TIMEOUT, "ECAM access timeout"), 339 _IC(CORRECTABLE, "Correctable error message"), 340 _IC(NONFATAL, "Non fatal error message"), 341 _IC(FATAL, "Fatal error message"), 342 _IC(SLV_UNSUPP, "Slave unsupported request"), 343 _IC(SLV_UNEXP, "Slave unexpected completion"), 344 _IC(SLV_COMPL, "Slave completion timeout"), 345 _IC(SLV_ERRP, "Slave Error Poison"), 346 _IC(SLV_CMPABT, "Slave Completer Abort"), 347 _IC(SLV_ILLBUR, "Slave Illegal Burst"), 348 _IC(MST_DECERR, "Master decode error"), 349 _IC(MST_SLVERR, "Master slave error"), 350 }; 351 352 static irqreturn_t xilinx_pl_dma_pcie_intr_handler(int irq, void *dev_id) 353 { 354 struct pl_dma_pcie *port = (struct pl_dma_pcie *)dev_id; 355 struct device *dev = port->dev; 356 struct irq_data *d; 357 358 d = irq_domain_get_irq_data(port->pldma_domain, irq); 359 switch (d->hwirq) { 360 case XILINX_PCIE_INTR_CORRECTABLE: 361 case XILINX_PCIE_INTR_NONFATAL: 362 case XILINX_PCIE_INTR_FATAL: 363 xilinx_pl_dma_pcie_clear_err_interrupts(port); 364 fallthrough; 365 366 default: 367 if (intr_cause[d->hwirq].str) 368 dev_warn(dev, "%s\n", intr_cause[d->hwirq].str); 369 else 370 dev_warn(dev, "Unknown IRQ %ld\n", d->hwirq); 371 } 372 373 return IRQ_HANDLED; 374 } 375 376 #define XILINX_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \ 377 MSI_FLAG_USE_DEF_CHIP_OPS | \ 378 MSI_FLAG_NO_AFFINITY) 379 380 #define XILINX_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \ 381 MSI_FLAG_MULTI_PCI_MSI) 382 383 static const struct msi_parent_ops xilinx_msi_parent_ops = { 384 .required_flags = XILINX_MSI_FLAGS_REQUIRED, 385 .supported_flags = XILINX_MSI_FLAGS_SUPPORTED, 386 .bus_select_token = DOMAIN_BUS_PCI_MSI, 387 .prefix = "pl_dma-", 388 .init_dev_msi_info = msi_lib_init_dev_msi_info, 389 }; 390 static void xilinx_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) 391 { 392 struct pl_dma_pcie *pcie = irq_data_get_irq_chip_data(data); 393 phys_addr_t msi_addr = pcie->phys_reg_base; 394 395 msg->address_lo = lower_32_bits(msi_addr); 396 msg->address_hi = upper_32_bits(msi_addr); 397 msg->data = data->hwirq; 398 } 399 400 static struct irq_chip xilinx_irq_chip = { 401 .name = "pl_dma:MSI", 402 .irq_compose_msi_msg = xilinx_compose_msi_msg, 403 }; 404 405 static int xilinx_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, 406 unsigned int nr_irqs, void *args) 407 { 408 struct pl_dma_pcie *pcie = domain->host_data; 409 struct xilinx_msi *msi = &pcie->msi; 410 int bit, i; 411 412 mutex_lock(&msi->lock); 413 bit = bitmap_find_free_region(msi->bitmap, XILINX_NUM_MSI_IRQS, 414 get_count_order(nr_irqs)); 415 if (bit < 0) { 416 mutex_unlock(&msi->lock); 417 return -ENOSPC; 418 } 419 420 for (i = 0; i < nr_irqs; i++) { 421 irq_domain_set_info(domain, virq + i, bit + i, &xilinx_irq_chip, 422 domain->host_data, handle_simple_irq, 423 NULL, NULL); 424 } 425 mutex_unlock(&msi->lock); 426 427 return 0; 428 } 429 430 static void xilinx_irq_domain_free(struct irq_domain *domain, unsigned int virq, 431 unsigned int nr_irqs) 432 { 433 struct irq_data *data = irq_domain_get_irq_data(domain, virq); 434 struct pl_dma_pcie *pcie = irq_data_get_irq_chip_data(data); 435 struct xilinx_msi *msi = &pcie->msi; 436 437 mutex_lock(&msi->lock); 438 bitmap_release_region(msi->bitmap, data->hwirq, 439 get_count_order(nr_irqs)); 440 mutex_unlock(&msi->lock); 441 } 442 443 static const struct irq_domain_ops dev_msi_domain_ops = { 444 .alloc = xilinx_irq_domain_alloc, 445 .free = xilinx_irq_domain_free, 446 }; 447 448 static void xilinx_pl_dma_pcie_free_irq_domains(struct pl_dma_pcie *port) 449 { 450 struct xilinx_msi *msi = &port->msi; 451 452 if (port->intx_domain) { 453 irq_domain_remove(port->intx_domain); 454 port->intx_domain = NULL; 455 } 456 457 if (msi->dev_domain) { 458 irq_domain_remove(msi->dev_domain); 459 msi->dev_domain = NULL; 460 } 461 } 462 463 static int xilinx_pl_dma_pcie_init_msi_irq_domain(struct pl_dma_pcie *port) 464 { 465 struct device *dev = port->dev; 466 struct xilinx_msi *msi = &port->msi; 467 int size = BITS_TO_LONGS(XILINX_NUM_MSI_IRQS) * sizeof(long); 468 struct irq_domain_info info = { 469 .fwnode = dev_fwnode(port->dev), 470 .ops = &dev_msi_domain_ops, 471 .host_data = port, 472 .size = XILINX_NUM_MSI_IRQS, 473 }; 474 475 msi->dev_domain = msi_create_parent_irq_domain(&info, &xilinx_msi_parent_ops); 476 if (!msi->dev_domain) 477 goto out; 478 479 mutex_init(&msi->lock); 480 msi->bitmap = kzalloc(size, GFP_KERNEL); 481 if (!msi->bitmap) 482 goto out; 483 484 raw_spin_lock_init(&port->lock); 485 xilinx_pl_dma_pcie_enable_msi(port); 486 487 return 0; 488 489 out: 490 xilinx_pl_dma_pcie_free_irq_domains(port); 491 dev_err(dev, "Failed to allocate MSI IRQ domains\n"); 492 493 return -ENOMEM; 494 } 495 496 /* 497 * INTx error interrupts are Xilinx controller specific interrupt, used to 498 * notify user about errors such as cfg timeout, slave unsupported requests, 499 * fatal and non fatal error etc. 500 */ 501 502 static irqreturn_t xilinx_pl_dma_pcie_intx_flow(int irq, void *args) 503 { 504 unsigned long val; 505 int i; 506 struct pl_dma_pcie *port = args; 507 508 val = FIELD_GET(XILINX_PCIE_DMA_IDRN_MASK, 509 pcie_read(port, XILINX_PCIE_DMA_REG_IDRN)); 510 511 for_each_set_bit(i, &val, PCI_NUM_INTX) 512 generic_handle_domain_irq(port->intx_domain, i); 513 return IRQ_HANDLED; 514 } 515 516 static void xilinx_pl_dma_pcie_mask_event_irq(struct irq_data *d) 517 { 518 struct pl_dma_pcie *port = irq_data_get_irq_chip_data(d); 519 u32 val; 520 521 raw_spin_lock(&port->lock); 522 val = pcie_read(port, XILINX_PCIE_DMA_REG_IMR); 523 val &= ~BIT(d->hwirq); 524 pcie_write(port, val, XILINX_PCIE_DMA_REG_IMR); 525 raw_spin_unlock(&port->lock); 526 } 527 528 static void xilinx_pl_dma_pcie_unmask_event_irq(struct irq_data *d) 529 { 530 struct pl_dma_pcie *port = irq_data_get_irq_chip_data(d); 531 u32 val; 532 533 raw_spin_lock(&port->lock); 534 val = pcie_read(port, XILINX_PCIE_DMA_REG_IMR); 535 val |= BIT(d->hwirq); 536 pcie_write(port, val, XILINX_PCIE_DMA_REG_IMR); 537 raw_spin_unlock(&port->lock); 538 } 539 540 static struct irq_chip xilinx_pl_dma_pcie_event_irq_chip = { 541 .name = "pl_dma:RC-Event", 542 .irq_mask = xilinx_pl_dma_pcie_mask_event_irq, 543 .irq_unmask = xilinx_pl_dma_pcie_unmask_event_irq, 544 }; 545 546 static int xilinx_pl_dma_pcie_event_map(struct irq_domain *domain, 547 unsigned int irq, irq_hw_number_t hwirq) 548 { 549 irq_set_chip_and_handler(irq, &xilinx_pl_dma_pcie_event_irq_chip, 550 handle_level_irq); 551 irq_set_chip_data(irq, domain->host_data); 552 irq_set_status_flags(irq, IRQ_LEVEL); 553 554 return 0; 555 } 556 557 static const struct irq_domain_ops event_domain_ops = { 558 .map = xilinx_pl_dma_pcie_event_map, 559 }; 560 561 /** 562 * xilinx_pl_dma_pcie_init_irq_domain - Initialize IRQ domain 563 * @port: PCIe port information 564 * 565 * Return: '0' on success and error value on failure. 566 */ 567 static int xilinx_pl_dma_pcie_init_irq_domain(struct pl_dma_pcie *port) 568 { 569 struct device *dev = port->dev; 570 struct device_node *node = dev->of_node; 571 struct device_node *pcie_intc_node; 572 int ret; 573 574 /* Setup INTx */ 575 pcie_intc_node = of_get_child_by_name(node, "interrupt-controller"); 576 if (!pcie_intc_node) { 577 dev_err(dev, "No PCIe Intc node found\n"); 578 return -EINVAL; 579 } 580 581 port->pldma_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), 32, 582 &event_domain_ops, port); 583 if (!port->pldma_domain) 584 return -ENOMEM; 585 586 irq_domain_update_bus_token(port->pldma_domain, DOMAIN_BUS_NEXUS); 587 588 port->intx_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), PCI_NUM_INTX, 589 &intx_domain_ops, port); 590 if (!port->intx_domain) { 591 dev_err(dev, "Failed to get a INTx IRQ domain\n"); 592 return -ENOMEM; 593 } 594 595 irq_domain_update_bus_token(port->intx_domain, DOMAIN_BUS_WIRED); 596 597 ret = xilinx_pl_dma_pcie_init_msi_irq_domain(port); 598 if (ret != 0) { 599 irq_domain_remove(port->intx_domain); 600 return -ENOMEM; 601 } 602 603 of_node_put(pcie_intc_node); 604 raw_spin_lock_init(&port->lock); 605 606 return 0; 607 } 608 609 static int xilinx_pl_dma_pcie_setup_irq(struct pl_dma_pcie *port) 610 { 611 struct device *dev = port->dev; 612 struct platform_device *pdev = to_platform_device(dev); 613 int i, irq, err; 614 615 port->irq = platform_get_irq(pdev, 0); 616 if (port->irq < 0) 617 return port->irq; 618 619 for (i = 0; i < ARRAY_SIZE(intr_cause); i++) { 620 int err; 621 622 if (!intr_cause[i].str) 623 continue; 624 625 irq = irq_create_mapping(port->pldma_domain, i); 626 if (!irq) { 627 dev_err(dev, "Failed to map interrupt\n"); 628 return -ENXIO; 629 } 630 631 err = devm_request_irq(dev, irq, 632 xilinx_pl_dma_pcie_intr_handler, 633 IRQF_SHARED | IRQF_NO_THREAD, 634 intr_cause[i].sym, port); 635 if (err) { 636 dev_err(dev, "Failed to request IRQ %d\n", irq); 637 return err; 638 } 639 } 640 641 port->intx_irq = irq_create_mapping(port->pldma_domain, 642 XILINX_PCIE_INTR_INTX); 643 if (!port->intx_irq) { 644 dev_err(dev, "Failed to map INTx interrupt\n"); 645 return -ENXIO; 646 } 647 648 err = devm_request_irq(dev, port->intx_irq, xilinx_pl_dma_pcie_intx_flow, 649 IRQF_SHARED | IRQF_NO_THREAD, NULL, port); 650 if (err) { 651 dev_err(dev, "Failed to request INTx IRQ %d\n", port->intx_irq); 652 return err; 653 } 654 655 err = devm_request_irq(dev, port->irq, xilinx_pl_dma_pcie_event_flow, 656 IRQF_SHARED | IRQF_NO_THREAD, NULL, port); 657 if (err) { 658 dev_err(dev, "Failed to request event IRQ %d\n", port->irq); 659 return err; 660 } 661 662 return 0; 663 } 664 665 static void xilinx_pl_dma_pcie_init_port(struct pl_dma_pcie *port) 666 { 667 if (xilinx_pl_dma_pcie_link_up(port)) 668 dev_info(port->dev, "PCIe Link is UP\n"); 669 else 670 dev_info(port->dev, "PCIe Link is DOWN\n"); 671 672 /* Disable all interrupts */ 673 pcie_write(port, ~XILINX_PCIE_DMA_IDR_ALL_MASK, 674 XILINX_PCIE_DMA_REG_IMR); 675 676 /* Clear pending interrupts */ 677 pcie_write(port, pcie_read(port, XILINX_PCIE_DMA_REG_IDR) & 678 XILINX_PCIE_DMA_IMR_ALL_MASK, 679 XILINX_PCIE_DMA_REG_IDR); 680 681 /* Needed for MSI DECODE MODE */ 682 pcie_write(port, XILINX_PCIE_DMA_IDR_ALL_MASK, 683 XILINX_PCIE_DMA_REG_MSI_LOW_MASK); 684 pcie_write(port, XILINX_PCIE_DMA_IDR_ALL_MASK, 685 XILINX_PCIE_DMA_REG_MSI_HI_MASK); 686 687 /* Set the Bridge enable bit */ 688 pcie_write(port, pcie_read(port, XILINX_PCIE_DMA_REG_RPSC) | 689 XILINX_PCIE_DMA_REG_RPSC_BEN, 690 XILINX_PCIE_DMA_REG_RPSC); 691 } 692 693 static int xilinx_request_msi_irq(struct pl_dma_pcie *port) 694 { 695 struct device *dev = port->dev; 696 struct platform_device *pdev = to_platform_device(dev); 697 int ret; 698 699 port->msi.irq_msi0 = platform_get_irq_byname(pdev, "msi0"); 700 if (port->msi.irq_msi0 <= 0) 701 return port->msi.irq_msi0; 702 703 ret = devm_request_irq(dev, port->msi.irq_msi0, xilinx_pl_dma_pcie_msi_handler_low, 704 IRQF_SHARED | IRQF_NO_THREAD, "xlnx-pcie-dma-pl", 705 port); 706 if (ret) { 707 dev_err(dev, "Failed to register interrupt\n"); 708 return ret; 709 } 710 711 port->msi.irq_msi1 = platform_get_irq_byname(pdev, "msi1"); 712 if (port->msi.irq_msi1 <= 0) 713 return port->msi.irq_msi1; 714 715 ret = devm_request_irq(dev, port->msi.irq_msi1, xilinx_pl_dma_pcie_msi_handler_high, 716 IRQF_SHARED | IRQF_NO_THREAD, "xlnx-pcie-dma-pl", 717 port); 718 if (ret) { 719 dev_err(dev, "Failed to register interrupt\n"); 720 return ret; 721 } 722 723 return 0; 724 } 725 726 static int xilinx_pl_dma_pcie_parse_dt(struct pl_dma_pcie *port, 727 struct resource *bus_range) 728 { 729 struct device *dev = port->dev; 730 struct platform_device *pdev = to_platform_device(dev); 731 struct resource *res; 732 int err; 733 734 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 735 if (!res) { 736 dev_err(dev, "Missing \"reg\" property\n"); 737 return -ENXIO; 738 } 739 port->phys_reg_base = res->start; 740 741 port->cfg = pci_ecam_create(dev, res, bus_range, &xilinx_pl_dma_pcie_ops); 742 if (IS_ERR(port->cfg)) 743 return PTR_ERR(port->cfg); 744 745 port->reg_base = port->cfg->win; 746 747 if (port->variant->version == QDMA) { 748 port->cfg_base = port->cfg->win; 749 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "breg"); 750 port->reg_base = devm_ioremap_resource(dev, res); 751 if (IS_ERR(port->reg_base)) 752 return PTR_ERR(port->reg_base); 753 port->phys_reg_base = res->start; 754 } 755 756 err = xilinx_request_msi_irq(port); 757 if (err) { 758 pci_ecam_free(port->cfg); 759 return err; 760 } 761 762 return 0; 763 } 764 765 static int xilinx_pl_dma_pcie_probe(struct platform_device *pdev) 766 { 767 struct device *dev = &pdev->dev; 768 struct pl_dma_pcie *port; 769 struct pci_host_bridge *bridge; 770 struct resource_entry *bus; 771 int err; 772 773 bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port)); 774 if (!bridge) 775 return -ENODEV; 776 777 port = pci_host_bridge_priv(bridge); 778 779 port->dev = dev; 780 781 bus = resource_list_first_type(&bridge->windows, IORESOURCE_BUS); 782 if (!bus) 783 return -ENODEV; 784 785 port->variant = of_device_get_match_data(dev); 786 787 err = xilinx_pl_dma_pcie_parse_dt(port, bus->res); 788 if (err) { 789 dev_err(dev, "Parsing DT failed\n"); 790 return err; 791 } 792 793 xilinx_pl_dma_pcie_init_port(port); 794 795 err = xilinx_pl_dma_pcie_init_irq_domain(port); 796 if (err) 797 goto err_irq_domain; 798 799 err = xilinx_pl_dma_pcie_setup_irq(port); 800 801 bridge->sysdata = port; 802 bridge->ops = &xilinx_pl_dma_pcie_ops.pci_ops; 803 804 err = pci_host_probe(bridge); 805 if (err < 0) 806 goto err_host_bridge; 807 808 return 0; 809 810 err_host_bridge: 811 xilinx_pl_dma_pcie_free_irq_domains(port); 812 813 err_irq_domain: 814 pci_ecam_free(port->cfg); 815 return err; 816 } 817 818 static const struct xilinx_pl_dma_variant xdma_host = { 819 .version = XDMA, 820 }; 821 822 static const struct xilinx_pl_dma_variant qdma_host = { 823 .version = QDMA, 824 }; 825 826 static const struct of_device_id xilinx_pl_dma_pcie_of_match[] = { 827 { 828 .compatible = "xlnx,xdma-host-3.00", 829 .data = &xdma_host, 830 }, 831 { 832 .compatible = "xlnx,qdma-host-3.00", 833 .data = &qdma_host, 834 }, 835 {} 836 }; 837 838 static struct platform_driver xilinx_pl_dma_pcie_driver = { 839 .driver = { 840 .name = "xilinx-xdma-pcie", 841 .of_match_table = xilinx_pl_dma_pcie_of_match, 842 .suppress_bind_attrs = true, 843 }, 844 .probe = xilinx_pl_dma_pcie_probe, 845 }; 846 847 builtin_platform_driver(xilinx_pl_dma_pcie_driver); 848