1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Rockchip AXI PCIe endpoint controller driver 4 * 5 * Copyright (c) 2018 Rockchip, Inc. 6 * 7 * Author: Shawn Lin <shawn.lin@rock-chips.com> 8 * Simon Xue <xxm@rock-chips.com> 9 */ 10 11 #include <linux/configfs.h> 12 #include <linux/delay.h> 13 #include <linux/gpio/consumer.h> 14 #include <linux/iopoll.h> 15 #include <linux/kernel.h> 16 #include <linux/irq.h> 17 #include <linux/of.h> 18 #include <linux/pci-epc.h> 19 #include <linux/platform_device.h> 20 #include <linux/pci-epf.h> 21 #include <linux/sizes.h> 22 #include <linux/workqueue.h> 23 24 #include "pcie-rockchip.h" 25 26 /** 27 * struct rockchip_pcie_ep - private data for PCIe endpoint controller driver 28 * @rockchip: Rockchip PCIe controller 29 * @epc: PCI EPC device 30 * @max_regions: maximum number of regions supported by hardware 31 * @ob_region_map: bitmask of mapped outbound regions 32 * @ob_addr: base addresses in the AXI bus where the outbound regions start 33 * @irq_phys_addr: base address on the AXI bus where the MSI/INTX IRQ 34 * dedicated outbound regions is mapped. 35 * @irq_cpu_addr: base address in the CPU space where a write access triggers 36 * the sending of a memory write (MSI) / normal message (INTX 37 * IRQ) TLP through the PCIe bus. 38 * @irq_pci_addr: used to save the current mapping of the MSI/INTX IRQ 39 * dedicated outbound region. 40 * @irq_pci_fn: the latest PCI function that has updated the mapping of 41 * the MSI/INTX IRQ dedicated outbound region. 42 * @irq_pending: bitmask of asserted INTX IRQs. 43 * @perst_irq: IRQ used for the PERST# signal. 44 * @perst_asserted: True if the PERST# signal was asserted. 45 * @link_up: True if the PCI link is up. 46 * @link_training: Work item to execute PCI link training. 47 */ 48 struct rockchip_pcie_ep { 49 struct rockchip_pcie rockchip; 50 struct pci_epc *epc; 51 u32 max_regions; 52 unsigned long ob_region_map; 53 phys_addr_t *ob_addr; 54 phys_addr_t irq_phys_addr; 55 void __iomem *irq_cpu_addr; 56 u64 irq_pci_addr; 57 u8 irq_pci_fn; 58 u8 irq_pending; 59 int perst_irq; 60 bool perst_asserted; 61 bool link_up; 62 struct delayed_work link_training; 63 }; 64 65 static void rockchip_pcie_clear_ep_ob_atu(struct rockchip_pcie *rockchip, 66 u32 region) 67 { 68 rockchip_pcie_write(rockchip, 0, 69 ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(region)); 70 rockchip_pcie_write(rockchip, 0, 71 ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(region)); 72 rockchip_pcie_write(rockchip, 0, 73 ROCKCHIP_PCIE_AT_OB_REGION_DESC0(region)); 74 rockchip_pcie_write(rockchip, 0, 75 ROCKCHIP_PCIE_AT_OB_REGION_DESC1(region)); 76 } 77 78 static int rockchip_pcie_ep_ob_atu_num_bits(struct rockchip_pcie *rockchip, 79 u64 pci_addr, size_t size) 80 { 81 int num_pass_bits = fls64(pci_addr ^ (pci_addr + size - 1)); 82 83 return clamp(num_pass_bits, 84 ROCKCHIP_PCIE_AT_MIN_NUM_BITS, 85 ROCKCHIP_PCIE_AT_MAX_NUM_BITS); 86 } 87 88 static void rockchip_pcie_prog_ep_ob_atu(struct rockchip_pcie *rockchip, u8 fn, 89 u32 r, u64 cpu_addr, u64 pci_addr, 90 size_t size) 91 { 92 int num_pass_bits; 93 u32 addr0, addr1, desc0; 94 95 num_pass_bits = rockchip_pcie_ep_ob_atu_num_bits(rockchip, 96 pci_addr, size); 97 98 addr0 = ((num_pass_bits - 1) & PCIE_CORE_OB_REGION_ADDR0_NUM_BITS) | 99 (lower_32_bits(pci_addr) & PCIE_CORE_OB_REGION_ADDR0_LO_ADDR); 100 addr1 = upper_32_bits(pci_addr); 101 desc0 = ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN(fn) | AXI_WRAPPER_MEM_WRITE; 102 103 /* PCI bus address region */ 104 rockchip_pcie_write(rockchip, addr0, 105 ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(r)); 106 rockchip_pcie_write(rockchip, addr1, 107 ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(r)); 108 rockchip_pcie_write(rockchip, desc0, 109 ROCKCHIP_PCIE_AT_OB_REGION_DESC0(r)); 110 rockchip_pcie_write(rockchip, 0, 111 ROCKCHIP_PCIE_AT_OB_REGION_DESC1(r)); 112 } 113 114 static int rockchip_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn, 115 struct pci_epf_header *hdr) 116 { 117 u32 reg; 118 struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); 119 struct rockchip_pcie *rockchip = &ep->rockchip; 120 121 /* All functions share the same vendor ID with function 0 */ 122 if (fn == 0) { 123 rockchip_pcie_write(rockchip, 124 hdr->vendorid | hdr->subsys_vendor_id << 16, 125 PCIE_CORE_CONFIG_VENDOR); 126 } 127 128 reg = rockchip_pcie_read(rockchip, PCIE_EP_CONFIG_DID_VID); 129 reg = (reg & 0xFFFF) | (hdr->deviceid << 16); 130 rockchip_pcie_write(rockchip, reg, PCIE_EP_CONFIG_DID_VID); 131 132 rockchip_pcie_write(rockchip, 133 hdr->revid | 134 hdr->progif_code << 8 | 135 hdr->subclass_code << 16 | 136 hdr->baseclass_code << 24, 137 ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + PCI_REVISION_ID); 138 rockchip_pcie_write(rockchip, hdr->cache_line_size, 139 ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + 140 PCI_CACHE_LINE_SIZE); 141 rockchip_pcie_write(rockchip, hdr->subsys_id << 16, 142 ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + 143 PCI_SUBSYSTEM_VENDOR_ID); 144 rockchip_pcie_write(rockchip, hdr->interrupt_pin << 8, 145 ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + 146 PCI_INTERRUPT_LINE); 147 148 return 0; 149 } 150 151 static int rockchip_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, u8 vfn, 152 struct pci_epf_bar *epf_bar) 153 { 154 struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); 155 struct rockchip_pcie *rockchip = &ep->rockchip; 156 dma_addr_t bar_phys = epf_bar->phys_addr; 157 enum pci_barno bar = epf_bar->barno; 158 int flags = epf_bar->flags; 159 u32 addr0, addr1, reg, cfg, b, aperture, ctrl; 160 u64 sz; 161 162 /* BAR size is 2^(aperture + 7) */ 163 sz = max_t(size_t, epf_bar->size, MIN_EP_APERTURE); 164 165 /* 166 * roundup_pow_of_two() returns an unsigned long, which is not suited 167 * for 64bit values. 168 */ 169 sz = 1ULL << fls64(sz - 1); 170 aperture = ilog2(sz) - 7; /* 128B -> 0, 256B -> 1, 512B -> 2, ... */ 171 172 if ((flags & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) { 173 ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_IO_32BITS; 174 } else { 175 bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH); 176 bool is_64bits = !!(flags & PCI_BASE_ADDRESS_MEM_TYPE_64); 177 178 if (is_64bits && (bar & 1)) 179 return -EINVAL; 180 181 if (is_64bits && is_prefetch) 182 ctrl = 183 ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_PREFETCH_MEM_64BITS; 184 else if (is_prefetch) 185 ctrl = 186 ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_PREFETCH_MEM_32BITS; 187 else if (is_64bits) 188 ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_MEM_64BITS; 189 else 190 ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_MEM_32BITS; 191 } 192 193 if (bar < BAR_4) { 194 reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG0(fn); 195 b = bar; 196 } else { 197 reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG1(fn); 198 b = bar - BAR_4; 199 } 200 201 addr0 = lower_32_bits(bar_phys); 202 addr1 = upper_32_bits(bar_phys); 203 204 cfg = rockchip_pcie_read(rockchip, reg); 205 cfg &= ~(ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) | 206 ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b)); 207 cfg |= (ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE(b, aperture) | 208 ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl)); 209 210 rockchip_pcie_write(rockchip, cfg, reg); 211 rockchip_pcie_write(rockchip, addr0, 212 ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar)); 213 rockchip_pcie_write(rockchip, addr1, 214 ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar)); 215 216 return 0; 217 } 218 219 static void rockchip_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, u8 vfn, 220 struct pci_epf_bar *epf_bar) 221 { 222 struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); 223 struct rockchip_pcie *rockchip = &ep->rockchip; 224 u32 reg, cfg, b, ctrl; 225 enum pci_barno bar = epf_bar->barno; 226 227 if (bar < BAR_4) { 228 reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG0(fn); 229 b = bar; 230 } else { 231 reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG1(fn); 232 b = bar - BAR_4; 233 } 234 235 ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_DISABLED; 236 cfg = rockchip_pcie_read(rockchip, reg); 237 cfg &= ~(ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) | 238 ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b)); 239 cfg |= ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl); 240 241 rockchip_pcie_write(rockchip, cfg, reg); 242 rockchip_pcie_write(rockchip, 0x0, 243 ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar)); 244 rockchip_pcie_write(rockchip, 0x0, 245 ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar)); 246 } 247 248 static inline u32 rockchip_ob_region(phys_addr_t addr) 249 { 250 return (addr >> ilog2(SZ_1M)) & 0x1f; 251 } 252 253 static u64 rockchip_pcie_ep_align_addr(struct pci_epc *epc, u64 pci_addr, 254 size_t *pci_size, size_t *addr_offset) 255 { 256 struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); 257 size_t size = *pci_size; 258 u64 offset, mask; 259 int num_bits; 260 261 num_bits = rockchip_pcie_ep_ob_atu_num_bits(&ep->rockchip, 262 pci_addr, size); 263 mask = (1ULL << num_bits) - 1; 264 265 offset = pci_addr & mask; 266 if (size + offset > SZ_1M) 267 size = SZ_1M - offset; 268 269 *pci_size = ALIGN(offset + size, ROCKCHIP_PCIE_AT_SIZE_ALIGN); 270 *addr_offset = offset; 271 272 return pci_addr & ~mask; 273 } 274 275 static int rockchip_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn, 276 phys_addr_t addr, u64 pci_addr, 277 size_t size) 278 { 279 struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); 280 struct rockchip_pcie *pcie = &ep->rockchip; 281 u32 r = rockchip_ob_region(addr); 282 283 if (test_bit(r, &ep->ob_region_map)) 284 return -EBUSY; 285 286 rockchip_pcie_prog_ep_ob_atu(pcie, fn, r, addr, pci_addr, size); 287 288 set_bit(r, &ep->ob_region_map); 289 ep->ob_addr[r] = addr; 290 291 return 0; 292 } 293 294 static void rockchip_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn, 295 phys_addr_t addr) 296 { 297 struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); 298 struct rockchip_pcie *rockchip = &ep->rockchip; 299 u32 r = rockchip_ob_region(addr); 300 301 if (addr != ep->ob_addr[r] || !test_bit(r, &ep->ob_region_map)) 302 return; 303 304 rockchip_pcie_clear_ep_ob_atu(rockchip, r); 305 306 ep->ob_addr[r] = 0; 307 clear_bit(r, &ep->ob_region_map); 308 } 309 310 static int rockchip_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn, 311 u8 nr_irqs) 312 { 313 struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); 314 struct rockchip_pcie *rockchip = &ep->rockchip; 315 u8 mmc = order_base_2(nr_irqs); 316 u32 flags; 317 318 flags = rockchip_pcie_read(rockchip, 319 ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + 320 ROCKCHIP_PCIE_EP_MSI_CTRL_REG); 321 flags &= ~ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_MASK; 322 flags |= 323 (mmc << ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_OFFSET) | 324 (PCI_MSI_FLAGS_64BIT << ROCKCHIP_PCIE_EP_MSI_FLAGS_OFFSET); 325 flags &= ~ROCKCHIP_PCIE_EP_MSI_CTRL_MASK_MSI_CAP; 326 rockchip_pcie_write(rockchip, flags, 327 ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + 328 ROCKCHIP_PCIE_EP_MSI_CTRL_REG); 329 return 0; 330 } 331 332 static int rockchip_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn) 333 { 334 struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); 335 struct rockchip_pcie *rockchip = &ep->rockchip; 336 u32 flags; 337 338 flags = rockchip_pcie_read(rockchip, 339 ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + 340 ROCKCHIP_PCIE_EP_MSI_CTRL_REG); 341 if (!(flags & ROCKCHIP_PCIE_EP_MSI_CTRL_ME)) 342 return -EINVAL; 343 344 return 1 << ((flags & ROCKCHIP_PCIE_EP_MSI_CTRL_MME_MASK) >> 345 ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET); 346 } 347 348 static void rockchip_pcie_ep_assert_intx(struct rockchip_pcie_ep *ep, u8 fn, 349 u8 intx, bool do_assert) 350 { 351 struct rockchip_pcie *rockchip = &ep->rockchip; 352 353 intx &= 3; 354 355 if (do_assert) { 356 ep->irq_pending |= BIT(intx); 357 rockchip_pcie_write(rockchip, 358 PCIE_CLIENT_INT_IN_ASSERT | 359 PCIE_CLIENT_INT_PEND_ST_PEND, 360 PCIE_CLIENT_LEGACY_INT_CTRL); 361 } else { 362 ep->irq_pending &= ~BIT(intx); 363 rockchip_pcie_write(rockchip, 364 PCIE_CLIENT_INT_IN_DEASSERT | 365 PCIE_CLIENT_INT_PEND_ST_NORMAL, 366 PCIE_CLIENT_LEGACY_INT_CTRL); 367 } 368 } 369 370 static int rockchip_pcie_ep_send_intx_irq(struct rockchip_pcie_ep *ep, u8 fn, 371 u8 intx) 372 { 373 u16 cmd; 374 375 cmd = rockchip_pcie_read(&ep->rockchip, 376 ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + 377 ROCKCHIP_PCIE_EP_CMD_STATUS); 378 379 if (cmd & PCI_COMMAND_INTX_DISABLE) 380 return -EINVAL; 381 382 /* 383 * Should add some delay between toggling INTx per TRM vaguely saying 384 * it depends on some cycles of the AHB bus clock to function it. So 385 * add sufficient 1ms here. 386 */ 387 rockchip_pcie_ep_assert_intx(ep, fn, intx, true); 388 mdelay(1); 389 rockchip_pcie_ep_assert_intx(ep, fn, intx, false); 390 return 0; 391 } 392 393 static int rockchip_pcie_ep_send_msi_irq(struct rockchip_pcie_ep *ep, u8 fn, 394 u8 interrupt_num) 395 { 396 struct rockchip_pcie *rockchip = &ep->rockchip; 397 u32 flags, mme, data, data_mask; 398 size_t irq_pci_size, offset; 399 u64 irq_pci_addr; 400 u8 msi_count; 401 u64 pci_addr; 402 403 /* Check MSI enable bit */ 404 flags = rockchip_pcie_read(&ep->rockchip, 405 ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + 406 ROCKCHIP_PCIE_EP_MSI_CTRL_REG); 407 if (!(flags & ROCKCHIP_PCIE_EP_MSI_CTRL_ME)) 408 return -EINVAL; 409 410 /* Get MSI numbers from MME */ 411 mme = ((flags & ROCKCHIP_PCIE_EP_MSI_CTRL_MME_MASK) >> 412 ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET); 413 msi_count = 1 << mme; 414 if (!interrupt_num || interrupt_num > msi_count) 415 return -EINVAL; 416 417 /* Set MSI private data */ 418 data_mask = msi_count - 1; 419 data = rockchip_pcie_read(rockchip, 420 ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + 421 ROCKCHIP_PCIE_EP_MSI_CTRL_REG + 422 PCI_MSI_DATA_64); 423 data = (data & ~data_mask) | ((interrupt_num - 1) & data_mask); 424 425 /* Get MSI PCI address */ 426 pci_addr = rockchip_pcie_read(rockchip, 427 ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + 428 ROCKCHIP_PCIE_EP_MSI_CTRL_REG + 429 PCI_MSI_ADDRESS_HI); 430 pci_addr <<= 32; 431 pci_addr |= rockchip_pcie_read(rockchip, 432 ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + 433 ROCKCHIP_PCIE_EP_MSI_CTRL_REG + 434 PCI_MSI_ADDRESS_LO); 435 436 /* Set the outbound region if needed. */ 437 irq_pci_size = ~PCIE_ADDR_MASK + 1; 438 irq_pci_addr = rockchip_pcie_ep_align_addr(ep->epc, 439 pci_addr & PCIE_ADDR_MASK, 440 &irq_pci_size, &offset); 441 if (unlikely(ep->irq_pci_addr != irq_pci_addr || 442 ep->irq_pci_fn != fn)) { 443 rockchip_pcie_prog_ep_ob_atu(rockchip, fn, 444 rockchip_ob_region(ep->irq_phys_addr), 445 ep->irq_phys_addr, 446 irq_pci_addr, irq_pci_size); 447 ep->irq_pci_addr = irq_pci_addr; 448 ep->irq_pci_fn = fn; 449 } 450 451 writew(data, ep->irq_cpu_addr + offset + (pci_addr & ~PCIE_ADDR_MASK)); 452 return 0; 453 } 454 455 static int rockchip_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn, 456 unsigned int type, u16 interrupt_num) 457 { 458 struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); 459 460 switch (type) { 461 case PCI_IRQ_INTX: 462 return rockchip_pcie_ep_send_intx_irq(ep, fn, 0); 463 case PCI_IRQ_MSI: 464 return rockchip_pcie_ep_send_msi_irq(ep, fn, interrupt_num); 465 default: 466 return -EINVAL; 467 } 468 } 469 470 static int rockchip_pcie_ep_start(struct pci_epc *epc) 471 { 472 struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); 473 struct rockchip_pcie *rockchip = &ep->rockchip; 474 struct pci_epf *epf; 475 u32 cfg; 476 477 cfg = BIT(0); 478 list_for_each_entry(epf, &epc->pci_epf, list) 479 cfg |= BIT(epf->func_no); 480 481 rockchip_pcie_write(rockchip, cfg, PCIE_CORE_PHY_FUNC_CFG); 482 483 if (rockchip->perst_gpio) 484 enable_irq(ep->perst_irq); 485 486 /* Enable configuration and start link training */ 487 rockchip_pcie_write(rockchip, 488 PCIE_CLIENT_LINK_TRAIN_ENABLE | 489 PCIE_CLIENT_CONF_ENABLE, 490 PCIE_CLIENT_CONFIG); 491 492 if (!rockchip->perst_gpio) 493 schedule_delayed_work(&ep->link_training, 0); 494 495 return 0; 496 } 497 498 static void rockchip_pcie_ep_stop(struct pci_epc *epc) 499 { 500 struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); 501 struct rockchip_pcie *rockchip = &ep->rockchip; 502 503 if (rockchip->perst_gpio) { 504 ep->perst_asserted = true; 505 disable_irq(ep->perst_irq); 506 } 507 508 cancel_delayed_work_sync(&ep->link_training); 509 510 /* Stop link training and disable configuration */ 511 rockchip_pcie_write(rockchip, 512 PCIE_CLIENT_CONF_DISABLE | 513 PCIE_CLIENT_LINK_TRAIN_DISABLE, 514 PCIE_CLIENT_CONFIG); 515 } 516 517 static void rockchip_pcie_ep_retrain_link(struct rockchip_pcie *rockchip) 518 { 519 u32 status; 520 521 status = rockchip_pcie_read(rockchip, PCIE_EP_CONFIG_LCS); 522 status |= PCI_EXP_LNKCTL_RL; 523 rockchip_pcie_write(rockchip, status, PCIE_EP_CONFIG_LCS); 524 } 525 526 static bool rockchip_pcie_ep_link_up(struct rockchip_pcie *rockchip) 527 { 528 u32 val = rockchip_pcie_read(rockchip, PCIE_CLIENT_BASIC_STATUS1); 529 530 return PCIE_LINK_UP(val); 531 } 532 533 static void rockchip_pcie_ep_link_training(struct work_struct *work) 534 { 535 struct rockchip_pcie_ep *ep = 536 container_of(work, struct rockchip_pcie_ep, link_training.work); 537 struct rockchip_pcie *rockchip = &ep->rockchip; 538 struct device *dev = rockchip->dev; 539 u32 val; 540 int ret; 541 542 /* Enable Gen1 training and wait for its completion */ 543 ret = readl_poll_timeout(rockchip->apb_base + PCIE_CORE_CTRL, 544 val, PCIE_LINK_TRAINING_DONE(val), 50, 545 LINK_TRAIN_TIMEOUT); 546 if (ret) 547 goto again; 548 549 /* Make sure that the link is up */ 550 ret = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_BASIC_STATUS1, 551 val, PCIE_LINK_UP(val), 50, 552 LINK_TRAIN_TIMEOUT); 553 if (ret) 554 goto again; 555 556 /* 557 * Check the current speed: if gen2 speed was requested and we are not 558 * at gen2 speed yet, retrain again for gen2. 559 */ 560 val = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL); 561 if (!PCIE_LINK_IS_GEN2(val) && rockchip->link_gen == 2) { 562 /* Enable retrain for gen2 */ 563 rockchip_pcie_ep_retrain_link(rockchip); 564 readl_poll_timeout(rockchip->apb_base + PCIE_CORE_CTRL, 565 val, PCIE_LINK_IS_GEN2(val), 50, 566 LINK_TRAIN_TIMEOUT); 567 } 568 569 /* Check again that the link is up */ 570 if (!rockchip_pcie_ep_link_up(rockchip)) 571 goto again; 572 573 /* 574 * If PERST# was asserted while polling the link, do not notify 575 * the function. 576 */ 577 if (ep->perst_asserted) 578 return; 579 580 val = rockchip_pcie_read(rockchip, PCIE_CLIENT_BASIC_STATUS0); 581 dev_info(dev, 582 "link up (negotiated speed: %sGT/s, width: x%lu)\n", 583 (val & PCIE_CLIENT_NEG_LINK_SPEED) ? "5" : "2.5", 584 ((val & PCIE_CLIENT_NEG_LINK_WIDTH_MASK) >> 585 PCIE_CLIENT_NEG_LINK_WIDTH_SHIFT) << 1); 586 587 /* Notify the function */ 588 pci_epc_linkup(ep->epc); 589 ep->link_up = true; 590 591 return; 592 593 again: 594 schedule_delayed_work(&ep->link_training, msecs_to_jiffies(5)); 595 } 596 597 static void rockchip_pcie_ep_perst_assert(struct rockchip_pcie_ep *ep) 598 { 599 struct rockchip_pcie *rockchip = &ep->rockchip; 600 601 dev_dbg(rockchip->dev, "PERST# asserted, link down\n"); 602 603 if (ep->perst_asserted) 604 return; 605 606 ep->perst_asserted = true; 607 608 cancel_delayed_work_sync(&ep->link_training); 609 610 if (ep->link_up) { 611 pci_epc_linkdown(ep->epc); 612 ep->link_up = false; 613 } 614 } 615 616 static void rockchip_pcie_ep_perst_deassert(struct rockchip_pcie_ep *ep) 617 { 618 struct rockchip_pcie *rockchip = &ep->rockchip; 619 620 dev_dbg(rockchip->dev, "PERST# de-asserted, starting link training\n"); 621 622 if (!ep->perst_asserted) 623 return; 624 625 ep->perst_asserted = false; 626 627 /* Enable link re-training */ 628 rockchip_pcie_ep_retrain_link(rockchip); 629 630 /* Start link training */ 631 schedule_delayed_work(&ep->link_training, 0); 632 } 633 634 static irqreturn_t rockchip_pcie_ep_perst_irq_thread(int irq, void *data) 635 { 636 struct pci_epc *epc = data; 637 struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); 638 struct rockchip_pcie *rockchip = &ep->rockchip; 639 u32 perst = gpiod_get_value(rockchip->perst_gpio); 640 641 if (perst) 642 rockchip_pcie_ep_perst_assert(ep); 643 else 644 rockchip_pcie_ep_perst_deassert(ep); 645 646 irq_set_irq_type(ep->perst_irq, 647 (perst ? IRQF_TRIGGER_HIGH : IRQF_TRIGGER_LOW)); 648 649 return IRQ_HANDLED; 650 } 651 652 static int rockchip_pcie_ep_setup_irq(struct pci_epc *epc) 653 { 654 struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); 655 struct rockchip_pcie *rockchip = &ep->rockchip; 656 struct device *dev = rockchip->dev; 657 int ret; 658 659 if (!rockchip->perst_gpio) 660 return 0; 661 662 /* PCIe reset interrupt */ 663 ep->perst_irq = gpiod_to_irq(rockchip->perst_gpio); 664 if (ep->perst_irq < 0) { 665 dev_err(dev, 666 "failed to get IRQ for PERST# GPIO: %d\n", 667 ep->perst_irq); 668 669 return ep->perst_irq; 670 } 671 672 /* 673 * The perst_gpio is active low, so when it is inactive on start, it 674 * is high and will trigger the perst_irq handler. So treat this initial 675 * IRQ as a dummy one by faking the host asserting PERST#. 676 */ 677 ep->perst_asserted = true; 678 irq_set_status_flags(ep->perst_irq, IRQ_NOAUTOEN); 679 ret = devm_request_threaded_irq(dev, ep->perst_irq, NULL, 680 rockchip_pcie_ep_perst_irq_thread, 681 IRQF_TRIGGER_HIGH | IRQF_ONESHOT, 682 "pcie-ep-perst", epc); 683 if (ret) { 684 dev_err(dev, 685 "failed to request IRQ for PERST# GPIO: %d\n", 686 ret); 687 688 return ret; 689 } 690 691 return 0; 692 } 693 694 static const struct pci_epc_features rockchip_pcie_epc_features = { 695 .linkup_notifier = true, 696 .msi_capable = true, 697 .msix_capable = false, 698 .intx_capable = true, 699 .align = ROCKCHIP_PCIE_AT_SIZE_ALIGN, 700 }; 701 702 static const struct pci_epc_features* 703 rockchip_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no) 704 { 705 return &rockchip_pcie_epc_features; 706 } 707 708 static const struct pci_epc_ops rockchip_pcie_epc_ops = { 709 .write_header = rockchip_pcie_ep_write_header, 710 .set_bar = rockchip_pcie_ep_set_bar, 711 .clear_bar = rockchip_pcie_ep_clear_bar, 712 .align_addr = rockchip_pcie_ep_align_addr, 713 .map_addr = rockchip_pcie_ep_map_addr, 714 .unmap_addr = rockchip_pcie_ep_unmap_addr, 715 .set_msi = rockchip_pcie_ep_set_msi, 716 .get_msi = rockchip_pcie_ep_get_msi, 717 .raise_irq = rockchip_pcie_ep_raise_irq, 718 .start = rockchip_pcie_ep_start, 719 .stop = rockchip_pcie_ep_stop, 720 .get_features = rockchip_pcie_ep_get_features, 721 }; 722 723 static int rockchip_pcie_ep_get_resources(struct rockchip_pcie *rockchip, 724 struct rockchip_pcie_ep *ep) 725 { 726 struct device *dev = rockchip->dev; 727 int err; 728 729 err = rockchip_pcie_parse_dt(rockchip); 730 if (err) 731 return err; 732 733 err = rockchip_pcie_get_phys(rockchip); 734 if (err) 735 return err; 736 737 err = of_property_read_u32(dev->of_node, 738 "rockchip,max-outbound-regions", 739 &ep->max_regions); 740 if (err < 0 || ep->max_regions > MAX_REGION_LIMIT) 741 ep->max_regions = MAX_REGION_LIMIT; 742 743 ep->ob_region_map = 0; 744 745 err = of_property_read_u8(dev->of_node, "max-functions", 746 &ep->epc->max_functions); 747 if (err < 0) 748 ep->epc->max_functions = 1; 749 750 return 0; 751 } 752 753 static const struct of_device_id rockchip_pcie_ep_of_match[] = { 754 { .compatible = "rockchip,rk3399-pcie-ep"}, 755 {}, 756 }; 757 758 static int rockchip_pcie_ep_init_ob_mem(struct rockchip_pcie_ep *ep) 759 { 760 struct rockchip_pcie *rockchip = &ep->rockchip; 761 struct device *dev = rockchip->dev; 762 struct pci_epc_mem_window *windows = NULL; 763 int err, i; 764 765 ep->ob_addr = devm_kcalloc(dev, ep->max_regions, sizeof(*ep->ob_addr), 766 GFP_KERNEL); 767 768 if (!ep->ob_addr) 769 return -ENOMEM; 770 771 windows = devm_kcalloc(dev, ep->max_regions, 772 sizeof(struct pci_epc_mem_window), GFP_KERNEL); 773 if (!windows) 774 return -ENOMEM; 775 776 for (i = 0; i < ep->max_regions; i++) { 777 windows[i].phys_base = rockchip->mem_res->start + (SZ_1M * i); 778 windows[i].size = SZ_1M; 779 windows[i].page_size = SZ_1M; 780 } 781 err = pci_epc_multi_mem_init(ep->epc, windows, ep->max_regions); 782 devm_kfree(dev, windows); 783 784 if (err < 0) { 785 dev_err(dev, "failed to initialize the memory space\n"); 786 return err; 787 } 788 789 ep->irq_cpu_addr = pci_epc_mem_alloc_addr(ep->epc, &ep->irq_phys_addr, 790 SZ_1M); 791 if (!ep->irq_cpu_addr) { 792 dev_err(dev, "failed to reserve memory space for MSI\n"); 793 err = -ENOMEM; 794 goto err_epc_mem_exit; 795 } 796 797 ep->irq_pci_addr = ROCKCHIP_PCIE_EP_DUMMY_IRQ_ADDR; 798 799 return 0; 800 801 err_epc_mem_exit: 802 pci_epc_mem_exit(ep->epc); 803 804 return err; 805 } 806 807 static void rockchip_pcie_ep_exit_ob_mem(struct rockchip_pcie_ep *ep) 808 { 809 pci_epc_mem_exit(ep->epc); 810 } 811 812 static void rockchip_pcie_ep_hide_broken_msix_cap(struct rockchip_pcie *rockchip) 813 { 814 u32 cfg_msi, cfg_msix_cp; 815 816 /* 817 * MSI-X is not supported but the controller still advertises the MSI-X 818 * capability by default, which can lead to the Root Complex side 819 * allocating MSI-X vectors which cannot be used. Avoid this by skipping 820 * the MSI-X capability entry in the PCIe capabilities linked-list: get 821 * the next pointer from the MSI-X entry and set that in the MSI 822 * capability entry (which is the previous entry). This way the MSI-X 823 * entry is skipped (left out of the linked-list) and not advertised. 824 */ 825 cfg_msi = rockchip_pcie_read(rockchip, PCIE_EP_CONFIG_BASE + 826 ROCKCHIP_PCIE_EP_MSI_CTRL_REG); 827 828 cfg_msi &= ~ROCKCHIP_PCIE_EP_MSI_CP1_MASK; 829 830 cfg_msix_cp = rockchip_pcie_read(rockchip, PCIE_EP_CONFIG_BASE + 831 ROCKCHIP_PCIE_EP_MSIX_CAP_REG) & 832 ROCKCHIP_PCIE_EP_MSIX_CAP_CP_MASK; 833 834 cfg_msi |= cfg_msix_cp; 835 836 rockchip_pcie_write(rockchip, cfg_msi, 837 PCIE_EP_CONFIG_BASE + ROCKCHIP_PCIE_EP_MSI_CTRL_REG); 838 } 839 840 static int rockchip_pcie_ep_probe(struct platform_device *pdev) 841 { 842 struct device *dev = &pdev->dev; 843 struct rockchip_pcie_ep *ep; 844 struct rockchip_pcie *rockchip; 845 struct pci_epc *epc; 846 int err; 847 848 ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL); 849 if (!ep) 850 return -ENOMEM; 851 852 rockchip = &ep->rockchip; 853 rockchip->is_rc = false; 854 rockchip->dev = dev; 855 INIT_DELAYED_WORK(&ep->link_training, rockchip_pcie_ep_link_training); 856 857 epc = devm_pci_epc_create(dev, &rockchip_pcie_epc_ops); 858 if (IS_ERR(epc)) { 859 dev_err(dev, "failed to create EPC device\n"); 860 return PTR_ERR(epc); 861 } 862 863 ep->epc = epc; 864 epc_set_drvdata(epc, ep); 865 866 err = rockchip_pcie_ep_get_resources(rockchip, ep); 867 if (err) 868 return err; 869 870 err = rockchip_pcie_ep_init_ob_mem(ep); 871 if (err) 872 return err; 873 874 err = rockchip_pcie_enable_clocks(rockchip); 875 if (err) 876 goto err_exit_ob_mem; 877 878 err = rockchip_pcie_init_port(rockchip); 879 if (err) 880 goto err_disable_clocks; 881 882 rockchip_pcie_ep_hide_broken_msix_cap(rockchip); 883 884 /* Only enable function 0 by default */ 885 rockchip_pcie_write(rockchip, BIT(0), PCIE_CORE_PHY_FUNC_CFG); 886 887 pci_epc_init_notify(epc); 888 889 err = rockchip_pcie_ep_setup_irq(epc); 890 if (err < 0) 891 goto err_uninit_port; 892 893 return 0; 894 err_uninit_port: 895 rockchip_pcie_deinit_phys(rockchip); 896 err_disable_clocks: 897 rockchip_pcie_disable_clocks(rockchip); 898 err_exit_ob_mem: 899 rockchip_pcie_ep_exit_ob_mem(ep); 900 return err; 901 } 902 903 static struct platform_driver rockchip_pcie_ep_driver = { 904 .driver = { 905 .name = "rockchip-pcie-ep", 906 .of_match_table = rockchip_pcie_ep_of_match, 907 }, 908 .probe = rockchip_pcie_ep_probe, 909 }; 910 911 builtin_platform_driver(rockchip_pcie_ep_driver); 912