1 // SPDX-License-Identifier: GPL-2.0 2 // Copyright (c) 2017 Cadence 3 // Cadence PCIe endpoint controller driver. 4 // Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com> 5 6 #include <linux/bitfield.h> 7 #include <linux/delay.h> 8 #include <linux/kernel.h> 9 #include <linux/module.h> 10 #include <linux/of.h> 11 #include <linux/pci-epc.h> 12 #include <linux/platform_device.h> 13 #include <linux/sizes.h> 14 15 #include "pcie-cadence.h" 16 #include "../../pci.h" 17 18 #define CDNS_PCIE_EP_MIN_APERTURE 128 /* 128 bytes */ 19 #define CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE 0x1 20 #define CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY 0x3 21 22 static u8 cdns_pcie_get_fn_from_vfn(struct cdns_pcie *pcie, u8 fn, u8 vfn) 23 { 24 u32 first_vf_offset, stride; 25 u16 cap; 26 27 if (vfn == 0) 28 return fn; 29 30 cap = cdns_pcie_find_ext_capability(pcie, PCI_EXT_CAP_ID_SRIOV); 31 first_vf_offset = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_SRIOV_VF_OFFSET); 32 stride = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_SRIOV_VF_STRIDE); 33 fn = fn + first_vf_offset + ((vfn - 1) * stride); 34 35 return fn; 36 } 37 38 static int cdns_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn, 39 struct pci_epf_header *hdr) 40 { 41 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 42 struct cdns_pcie *pcie = &ep->pcie; 43 u32 reg; 44 u16 cap; 45 46 cap = cdns_pcie_find_ext_capability(pcie, PCI_EXT_CAP_ID_SRIOV); 47 if (vfn > 1) { 48 dev_err(&epc->dev, "Only Virtual Function #1 has deviceID\n"); 49 return -EINVAL; 50 } else if (vfn == 1) { 51 reg = cap + PCI_SRIOV_VF_DID; 52 cdns_pcie_ep_fn_writew(pcie, fn, reg, hdr->deviceid); 53 return 0; 54 } 55 56 cdns_pcie_ep_fn_writew(pcie, fn, PCI_DEVICE_ID, hdr->deviceid); 57 cdns_pcie_ep_fn_writeb(pcie, fn, PCI_REVISION_ID, hdr->revid); 58 cdns_pcie_ep_fn_writeb(pcie, fn, PCI_CLASS_PROG, hdr->progif_code); 59 cdns_pcie_ep_fn_writew(pcie, fn, PCI_CLASS_DEVICE, 60 hdr->subclass_code | hdr->baseclass_code << 8); 61 cdns_pcie_ep_fn_writeb(pcie, fn, PCI_CACHE_LINE_SIZE, 62 hdr->cache_line_size); 63 cdns_pcie_ep_fn_writew(pcie, fn, PCI_SUBSYSTEM_ID, hdr->subsys_id); 64 cdns_pcie_ep_fn_writeb(pcie, fn, PCI_INTERRUPT_PIN, hdr->interrupt_pin); 65 66 /* 67 * Vendor ID can only be modified from function 0, all other functions 68 * use the same vendor ID as function 0. 69 */ 70 if (fn == 0) { 71 /* Update the vendor IDs. */ 72 u32 id = CDNS_PCIE_LM_ID_VENDOR(hdr->vendorid) | 73 CDNS_PCIE_LM_ID_SUBSYS(hdr->subsys_vendor_id); 74 75 cdns_pcie_writel(pcie, CDNS_PCIE_LM_ID, id); 76 } 77 78 return 0; 79 } 80 81 static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, u8 vfn, 82 struct pci_epf_bar *epf_bar) 83 { 84 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 85 struct cdns_pcie_epf *epf = &ep->epf[fn]; 86 struct cdns_pcie *pcie = &ep->pcie; 87 dma_addr_t bar_phys = epf_bar->phys_addr; 88 enum pci_barno bar = epf_bar->barno; 89 int flags = epf_bar->flags; 90 u32 addr0, addr1, reg, cfg, b, aperture, ctrl; 91 u64 sz; 92 93 /* BAR size is 2^(aperture + 7) */ 94 sz = max_t(size_t, epf_bar->size, CDNS_PCIE_EP_MIN_APERTURE); 95 /* 96 * roundup_pow_of_two() returns an unsigned long, which is not suited 97 * for 64bit values. 98 */ 99 sz = 1ULL << fls64(sz - 1); 100 aperture = ilog2(sz) - 7; /* 128B -> 0, 256B -> 1, 512B -> 2, ... */ 101 102 if ((flags & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) { 103 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS; 104 } else { 105 bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH); 106 bool is_64bits = !!(flags & PCI_BASE_ADDRESS_MEM_TYPE_64); 107 108 if (is_64bits && (bar & 1)) 109 return -EINVAL; 110 111 if (is_64bits && is_prefetch) 112 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS; 113 else if (is_prefetch) 114 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS; 115 else if (is_64bits) 116 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS; 117 else 118 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS; 119 } 120 121 addr0 = lower_32_bits(bar_phys); 122 addr1 = upper_32_bits(bar_phys); 123 124 if (vfn == 1) 125 reg = CDNS_PCIE_LM_EP_VFUNC_BAR_CFG(bar, fn); 126 else 127 reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG(bar, fn); 128 b = (bar < BAR_4) ? bar : bar - BAR_4; 129 130 if (vfn == 0 || vfn == 1) { 131 cfg = cdns_pcie_readl(pcie, reg); 132 cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) | 133 CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b)); 134 cfg |= (CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, aperture) | 135 CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl)); 136 cdns_pcie_writel(pcie, reg, cfg); 137 } 138 139 fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); 140 cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar), 141 addr0); 142 cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar), 143 addr1); 144 145 if (vfn > 0) 146 epf = &epf->epf[vfn - 1]; 147 epf->epf_bar[bar] = epf_bar; 148 149 return 0; 150 } 151 152 static void cdns_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, u8 vfn, 153 struct pci_epf_bar *epf_bar) 154 { 155 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 156 struct cdns_pcie_epf *epf = &ep->epf[fn]; 157 struct cdns_pcie *pcie = &ep->pcie; 158 enum pci_barno bar = epf_bar->barno; 159 u32 reg, cfg, b, ctrl; 160 161 if (vfn == 1) 162 reg = CDNS_PCIE_LM_EP_VFUNC_BAR_CFG(bar, fn); 163 else 164 reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG(bar, fn); 165 b = (bar < BAR_4) ? bar : bar - BAR_4; 166 167 if (vfn == 0 || vfn == 1) { 168 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED; 169 cfg = cdns_pcie_readl(pcie, reg); 170 cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) | 171 CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b)); 172 cfg |= CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl); 173 cdns_pcie_writel(pcie, reg, cfg); 174 } 175 176 fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); 177 cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar), 0); 178 cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar), 0); 179 180 if (vfn > 0) 181 epf = &epf->epf[vfn - 1]; 182 epf->epf_bar[bar] = NULL; 183 } 184 185 static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn, 186 phys_addr_t addr, u64 pci_addr, size_t size) 187 { 188 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 189 struct cdns_pcie *pcie = &ep->pcie; 190 u32 r; 191 192 r = find_first_zero_bit(&ep->ob_region_map, BITS_PER_LONG); 193 if (r >= ep->max_regions - 1) { 194 dev_err(&epc->dev, "no free outbound region\n"); 195 return -EINVAL; 196 } 197 198 fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); 199 cdns_pcie_set_outbound_region(pcie, 0, fn, r, false, addr, pci_addr, size); 200 201 set_bit(r, &ep->ob_region_map); 202 ep->ob_addr[r] = addr; 203 204 return 0; 205 } 206 207 static void cdns_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn, 208 phys_addr_t addr) 209 { 210 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 211 struct cdns_pcie *pcie = &ep->pcie; 212 u32 r; 213 214 for (r = 0; r < ep->max_regions - 1; r++) 215 if (ep->ob_addr[r] == addr) 216 break; 217 218 if (r == ep->max_regions - 1) 219 return; 220 221 cdns_pcie_reset_outbound_region(pcie, r); 222 223 ep->ob_addr[r] = 0; 224 clear_bit(r, &ep->ob_region_map); 225 } 226 227 static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn, u8 nr_irqs) 228 { 229 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 230 struct cdns_pcie *pcie = &ep->pcie; 231 u8 mmc = order_base_2(nr_irqs); 232 u16 flags; 233 u8 cap; 234 235 cap = cdns_pcie_find_capability(pcie, PCI_CAP_ID_MSI); 236 fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); 237 238 /* 239 * Set the Multiple Message Capable bitfield into the Message Control 240 * register. 241 */ 242 flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS); 243 flags = (flags & ~PCI_MSI_FLAGS_QMASK) | (mmc << 1); 244 flags |= PCI_MSI_FLAGS_64BIT; 245 flags &= ~PCI_MSI_FLAGS_MASKBIT; 246 cdns_pcie_ep_fn_writew(pcie, fn, cap + PCI_MSI_FLAGS, flags); 247 248 return 0; 249 } 250 251 static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn) 252 { 253 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 254 struct cdns_pcie *pcie = &ep->pcie; 255 u16 flags, mme; 256 u8 cap; 257 258 cap = cdns_pcie_find_capability(pcie, PCI_CAP_ID_MSIX); 259 fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); 260 261 /* Validate that the MSI feature is actually enabled. */ 262 flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS); 263 if (!(flags & PCI_MSI_FLAGS_ENABLE)) 264 return -EINVAL; 265 266 /* 267 * Get the Multiple Message Enable bitfield from the Message Control 268 * register. 269 */ 270 mme = FIELD_GET(PCI_MSI_FLAGS_QSIZE, flags); 271 272 return 1 << mme; 273 } 274 275 static int cdns_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no) 276 { 277 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 278 struct cdns_pcie *pcie = &ep->pcie; 279 u32 val, reg; 280 u8 cap; 281 282 cap = cdns_pcie_find_capability(pcie, PCI_CAP_ID_MSIX); 283 func_no = cdns_pcie_get_fn_from_vfn(pcie, func_no, vfunc_no); 284 285 reg = cap + PCI_MSIX_FLAGS; 286 val = cdns_pcie_ep_fn_readw(pcie, func_no, reg); 287 if (!(val & PCI_MSIX_FLAGS_ENABLE)) 288 return -EINVAL; 289 290 val &= PCI_MSIX_FLAGS_QSIZE; 291 292 return val + 1; 293 } 294 295 static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u8 vfn, 296 u16 nr_irqs, enum pci_barno bir, u32 offset) 297 { 298 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 299 struct cdns_pcie *pcie = &ep->pcie; 300 u32 val, reg; 301 u8 cap; 302 303 cap = cdns_pcie_find_capability(pcie, PCI_CAP_ID_MSIX); 304 fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); 305 306 reg = cap + PCI_MSIX_FLAGS; 307 val = cdns_pcie_ep_fn_readw(pcie, fn, reg); 308 val &= ~PCI_MSIX_FLAGS_QSIZE; 309 val |= nr_irqs - 1; /* encoded as N-1 */ 310 cdns_pcie_ep_fn_writew(pcie, fn, reg, val); 311 312 /* Set MSI-X BAR and offset */ 313 reg = cap + PCI_MSIX_TABLE; 314 val = offset | bir; 315 cdns_pcie_ep_fn_writel(pcie, fn, reg, val); 316 317 /* Set PBA BAR and offset. BAR must match MSI-X BAR */ 318 reg = cap + PCI_MSIX_PBA; 319 val = (offset + (nr_irqs * PCI_MSIX_ENTRY_SIZE)) | bir; 320 cdns_pcie_ep_fn_writel(pcie, fn, reg, val); 321 322 return 0; 323 } 324 325 static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, u8 intx, 326 bool is_asserted) 327 { 328 struct cdns_pcie *pcie = &ep->pcie; 329 unsigned long flags; 330 u32 offset; 331 u16 status; 332 u8 msg_code; 333 334 intx &= 3; 335 336 /* Set the outbound region if needed. */ 337 if (unlikely(ep->irq_pci_addr != CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY || 338 ep->irq_pci_fn != fn)) { 339 /* First region was reserved for IRQ writes. */ 340 cdns_pcie_set_outbound_region_for_normal_msg(pcie, 0, fn, 0, 341 ep->irq_phys_addr); 342 ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY; 343 ep->irq_pci_fn = fn; 344 } 345 346 if (is_asserted) { 347 ep->irq_pending |= BIT(intx); 348 msg_code = PCIE_MSG_CODE_ASSERT_INTA + intx; 349 } else { 350 ep->irq_pending &= ~BIT(intx); 351 msg_code = PCIE_MSG_CODE_DEASSERT_INTA + intx; 352 } 353 354 spin_lock_irqsave(&ep->lock, flags); 355 status = cdns_pcie_ep_fn_readw(pcie, fn, PCI_STATUS); 356 if (((status & PCI_STATUS_INTERRUPT) != 0) ^ (ep->irq_pending != 0)) { 357 status ^= PCI_STATUS_INTERRUPT; 358 cdns_pcie_ep_fn_writew(pcie, fn, PCI_STATUS, status); 359 } 360 spin_unlock_irqrestore(&ep->lock, flags); 361 362 offset = CDNS_PCIE_NORMAL_MSG_ROUTING(PCIE_MSG_TYPE_R_LOCAL) | 363 CDNS_PCIE_NORMAL_MSG_CODE(msg_code); 364 writel(0, ep->irq_cpu_addr + offset); 365 } 366 367 static int cdns_pcie_ep_send_intx_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn, 368 u8 intx) 369 { 370 u16 cmd; 371 372 cmd = cdns_pcie_ep_fn_readw(&ep->pcie, fn, PCI_COMMAND); 373 if (cmd & PCI_COMMAND_INTX_DISABLE) 374 return -EINVAL; 375 376 cdns_pcie_ep_assert_intx(ep, fn, intx, true); 377 /* 378 * The mdelay() value was taken from dra7xx_pcie_raise_intx_irq() 379 */ 380 mdelay(1); 381 cdns_pcie_ep_assert_intx(ep, fn, intx, false); 382 return 0; 383 } 384 385 static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn, 386 u8 interrupt_num) 387 { 388 struct cdns_pcie *pcie = &ep->pcie; 389 u16 flags, mme, data, data_mask; 390 u64 pci_addr, pci_addr_mask = 0xff; 391 u8 msi_count, cap; 392 393 cap = cdns_pcie_find_capability(pcie, PCI_CAP_ID_MSI); 394 fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); 395 396 /* Check whether the MSI feature has been enabled by the PCI host. */ 397 flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS); 398 if (!(flags & PCI_MSI_FLAGS_ENABLE)) 399 return -EINVAL; 400 401 /* Get the number of enabled MSIs */ 402 mme = FIELD_GET(PCI_MSI_FLAGS_QSIZE, flags); 403 msi_count = 1 << mme; 404 if (!interrupt_num || interrupt_num > msi_count) 405 return -EINVAL; 406 407 /* Compute the data value to be written. */ 408 data_mask = msi_count - 1; 409 data = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_DATA_64); 410 data = (data & ~data_mask) | ((interrupt_num - 1) & data_mask); 411 412 /* Get the PCI address where to write the data into. */ 413 pci_addr = cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_HI); 414 pci_addr <<= 32; 415 pci_addr |= cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_LO); 416 pci_addr &= GENMASK_ULL(63, 2); 417 418 /* Set the outbound region if needed. */ 419 if (unlikely(ep->irq_pci_addr != (pci_addr & ~pci_addr_mask) || 420 ep->irq_pci_fn != fn)) { 421 /* First region was reserved for IRQ writes. */ 422 cdns_pcie_set_outbound_region(pcie, 0, fn, 0, 423 false, 424 ep->irq_phys_addr, 425 pci_addr & ~pci_addr_mask, 426 pci_addr_mask + 1); 427 ep->irq_pci_addr = (pci_addr & ~pci_addr_mask); 428 ep->irq_pci_fn = fn; 429 } 430 writel(data, ep->irq_cpu_addr + (pci_addr & pci_addr_mask)); 431 432 return 0; 433 } 434 435 static int cdns_pcie_ep_map_msi_irq(struct pci_epc *epc, u8 fn, u8 vfn, 436 phys_addr_t addr, u8 interrupt_num, 437 u32 entry_size, u32 *msi_data, 438 u32 *msi_addr_offset) 439 { 440 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 441 struct cdns_pcie *pcie = &ep->pcie; 442 u64 pci_addr, pci_addr_mask = 0xff; 443 u16 flags, mme, data, data_mask; 444 u8 msi_count, cap; 445 int ret; 446 int i; 447 448 cap = cdns_pcie_find_capability(pcie, PCI_CAP_ID_MSI); 449 fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); 450 451 /* Check whether the MSI feature has been enabled by the PCI host. */ 452 flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS); 453 if (!(flags & PCI_MSI_FLAGS_ENABLE)) 454 return -EINVAL; 455 456 /* Get the number of enabled MSIs */ 457 mme = FIELD_GET(PCI_MSI_FLAGS_QSIZE, flags); 458 msi_count = 1 << mme; 459 if (!interrupt_num || interrupt_num > msi_count) 460 return -EINVAL; 461 462 /* Compute the data value to be written. */ 463 data_mask = msi_count - 1; 464 data = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_DATA_64); 465 data = data & ~data_mask; 466 467 /* Get the PCI address where to write the data into. */ 468 pci_addr = cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_HI); 469 pci_addr <<= 32; 470 pci_addr |= cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_LO); 471 pci_addr &= GENMASK_ULL(63, 2); 472 473 for (i = 0; i < interrupt_num; i++) { 474 ret = cdns_pcie_ep_map_addr(epc, fn, vfn, addr, 475 pci_addr & ~pci_addr_mask, 476 entry_size); 477 if (ret) 478 return ret; 479 addr = addr + entry_size; 480 } 481 482 *msi_data = data; 483 *msi_addr_offset = pci_addr & pci_addr_mask; 484 485 return 0; 486 } 487 488 static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn, 489 u16 interrupt_num) 490 { 491 u32 tbl_offset, msg_data, reg; 492 struct cdns_pcie *pcie = &ep->pcie; 493 struct pci_epf_msix_tbl *msix_tbl; 494 struct cdns_pcie_epf *epf; 495 u64 pci_addr_mask = 0xff; 496 u64 msg_addr; 497 u8 bir, cap; 498 u16 flags; 499 500 cap = cdns_pcie_find_capability(pcie, PCI_CAP_ID_MSIX); 501 epf = &ep->epf[fn]; 502 if (vfn > 0) 503 epf = &epf->epf[vfn - 1]; 504 505 fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); 506 507 /* Check whether the MSI-X feature has been enabled by the PCI host. */ 508 flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSIX_FLAGS); 509 if (!(flags & PCI_MSIX_FLAGS_ENABLE)) 510 return -EINVAL; 511 512 reg = cap + PCI_MSIX_TABLE; 513 tbl_offset = cdns_pcie_ep_fn_readl(pcie, fn, reg); 514 bir = FIELD_GET(PCI_MSIX_TABLE_BIR, tbl_offset); 515 tbl_offset &= PCI_MSIX_TABLE_OFFSET; 516 517 msix_tbl = epf->epf_bar[bir]->addr + tbl_offset; 518 msg_addr = msix_tbl[(interrupt_num - 1)].msg_addr; 519 msg_data = msix_tbl[(interrupt_num - 1)].msg_data; 520 521 /* Set the outbound region if needed. */ 522 if (ep->irq_pci_addr != (msg_addr & ~pci_addr_mask) || 523 ep->irq_pci_fn != fn) { 524 /* First region was reserved for IRQ writes. */ 525 cdns_pcie_set_outbound_region(pcie, 0, fn, 0, 526 false, 527 ep->irq_phys_addr, 528 msg_addr & ~pci_addr_mask, 529 pci_addr_mask + 1); 530 ep->irq_pci_addr = (msg_addr & ~pci_addr_mask); 531 ep->irq_pci_fn = fn; 532 } 533 writel(msg_data, ep->irq_cpu_addr + (msg_addr & pci_addr_mask)); 534 535 return 0; 536 } 537 538 static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn, 539 unsigned int type, u16 interrupt_num) 540 { 541 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 542 struct cdns_pcie *pcie = &ep->pcie; 543 struct device *dev = pcie->dev; 544 545 switch (type) { 546 case PCI_IRQ_INTX: 547 if (vfn > 0) { 548 dev_err(dev, "Cannot raise INTX interrupts for VF\n"); 549 return -EINVAL; 550 } 551 return cdns_pcie_ep_send_intx_irq(ep, fn, vfn, 0); 552 553 case PCI_IRQ_MSI: 554 return cdns_pcie_ep_send_msi_irq(ep, fn, vfn, interrupt_num); 555 556 case PCI_IRQ_MSIX: 557 return cdns_pcie_ep_send_msix_irq(ep, fn, vfn, interrupt_num); 558 559 default: 560 break; 561 } 562 563 return -EINVAL; 564 } 565 566 static int cdns_pcie_ep_start(struct pci_epc *epc) 567 { 568 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 569 struct cdns_pcie *pcie = &ep->pcie; 570 struct device *dev = pcie->dev; 571 int max_epfs = sizeof(epc->function_num_map) * 8; 572 int ret, epf, last_fn; 573 u32 reg, value; 574 u8 cap; 575 576 cap = cdns_pcie_find_capability(pcie, PCI_CAP_ID_EXP); 577 /* 578 * BIT(0) is hardwired to 1, hence function 0 is always enabled 579 * and can't be disabled anyway. 580 */ 581 cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, epc->function_num_map); 582 583 /* 584 * Next function field in ARI_CAP_AND_CTR register for last function 585 * should be 0. Clear Next Function Number field for the last 586 * function used. 587 */ 588 last_fn = find_last_bit(&epc->function_num_map, BITS_PER_LONG); 589 reg = CDNS_PCIE_CORE_PF_I_ARI_CAP_AND_CTRL(last_fn); 590 value = cdns_pcie_readl(pcie, reg); 591 value &= ~CDNS_PCIE_ARI_CAP_NFN_MASK; 592 cdns_pcie_writel(pcie, reg, value); 593 594 if (ep->quirk_disable_flr) { 595 for (epf = 0; epf < max_epfs; epf++) { 596 if (!(epc->function_num_map & BIT(epf))) 597 continue; 598 599 value = cdns_pcie_ep_fn_readl(pcie, epf, 600 cap + PCI_EXP_DEVCAP); 601 value &= ~PCI_EXP_DEVCAP_FLR; 602 cdns_pcie_ep_fn_writel(pcie, epf, 603 cap + PCI_EXP_DEVCAP, value); 604 } 605 } 606 607 ret = cdns_pcie_start_link(pcie); 608 if (ret) { 609 dev_err(dev, "Failed to start link\n"); 610 return ret; 611 } 612 613 return 0; 614 } 615 616 static const struct pci_epc_features cdns_pcie_epc_vf_features = { 617 .msi_capable = true, 618 .msix_capable = true, 619 .align = 65536, 620 }; 621 622 static const struct pci_epc_features cdns_pcie_epc_features = { 623 .msi_capable = true, 624 .msix_capable = true, 625 .align = 256, 626 }; 627 628 static const struct pci_epc_features* 629 cdns_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no) 630 { 631 if (!vfunc_no) 632 return &cdns_pcie_epc_features; 633 634 return &cdns_pcie_epc_vf_features; 635 } 636 637 static const struct pci_epc_ops cdns_pcie_epc_ops = { 638 .write_header = cdns_pcie_ep_write_header, 639 .set_bar = cdns_pcie_ep_set_bar, 640 .clear_bar = cdns_pcie_ep_clear_bar, 641 .map_addr = cdns_pcie_ep_map_addr, 642 .unmap_addr = cdns_pcie_ep_unmap_addr, 643 .set_msi = cdns_pcie_ep_set_msi, 644 .get_msi = cdns_pcie_ep_get_msi, 645 .set_msix = cdns_pcie_ep_set_msix, 646 .get_msix = cdns_pcie_ep_get_msix, 647 .raise_irq = cdns_pcie_ep_raise_irq, 648 .map_msi_irq = cdns_pcie_ep_map_msi_irq, 649 .start = cdns_pcie_ep_start, 650 .get_features = cdns_pcie_ep_get_features, 651 }; 652 653 void cdns_pcie_ep_disable(struct cdns_pcie_ep *ep) 654 { 655 struct device *dev = ep->pcie.dev; 656 struct pci_epc *epc = to_pci_epc(dev); 657 658 pci_epc_deinit_notify(epc); 659 pci_epc_mem_free_addr(epc, ep->irq_phys_addr, ep->irq_cpu_addr, 660 SZ_128K); 661 pci_epc_mem_exit(epc); 662 } 663 EXPORT_SYMBOL_GPL(cdns_pcie_ep_disable); 664 665 int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep) 666 { 667 struct device *dev = ep->pcie.dev; 668 struct platform_device *pdev = to_platform_device(dev); 669 struct device_node *np = dev->of_node; 670 struct cdns_pcie *pcie = &ep->pcie; 671 struct cdns_pcie_epf *epf; 672 struct resource *res; 673 struct pci_epc *epc; 674 int ret; 675 int i; 676 677 pcie->is_rc = false; 678 679 pcie->reg_base = devm_platform_ioremap_resource_byname(pdev, "reg"); 680 if (IS_ERR(pcie->reg_base)) { 681 dev_err(dev, "missing \"reg\"\n"); 682 return PTR_ERR(pcie->reg_base); 683 } 684 685 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem"); 686 if (!res) { 687 dev_err(dev, "missing \"mem\"\n"); 688 return -EINVAL; 689 } 690 pcie->mem_res = res; 691 692 ep->max_regions = CDNS_PCIE_MAX_OB; 693 of_property_read_u32(np, "cdns,max-outbound-regions", &ep->max_regions); 694 695 ep->ob_addr = devm_kcalloc(dev, 696 ep->max_regions, sizeof(*ep->ob_addr), 697 GFP_KERNEL); 698 if (!ep->ob_addr) 699 return -ENOMEM; 700 701 /* Disable all but function 0 (anyway BIT(0) is hardwired to 1). */ 702 cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, BIT(0)); 703 704 epc = devm_pci_epc_create(dev, &cdns_pcie_epc_ops); 705 if (IS_ERR(epc)) { 706 dev_err(dev, "failed to create epc device\n"); 707 return PTR_ERR(epc); 708 } 709 710 epc_set_drvdata(epc, ep); 711 712 if (of_property_read_u8(np, "max-functions", &epc->max_functions) < 0) 713 epc->max_functions = 1; 714 715 ep->epf = devm_kcalloc(dev, epc->max_functions, sizeof(*ep->epf), 716 GFP_KERNEL); 717 if (!ep->epf) 718 return -ENOMEM; 719 720 epc->max_vfs = devm_kcalloc(dev, epc->max_functions, 721 sizeof(*epc->max_vfs), GFP_KERNEL); 722 if (!epc->max_vfs) 723 return -ENOMEM; 724 725 ret = of_property_read_u8_array(np, "max-virtual-functions", 726 epc->max_vfs, epc->max_functions); 727 if (ret == 0) { 728 for (i = 0; i < epc->max_functions; i++) { 729 epf = &ep->epf[i]; 730 if (epc->max_vfs[i] == 0) 731 continue; 732 epf->epf = devm_kcalloc(dev, epc->max_vfs[i], 733 sizeof(*ep->epf), GFP_KERNEL); 734 if (!epf->epf) 735 return -ENOMEM; 736 } 737 } 738 739 ret = pci_epc_mem_init(epc, pcie->mem_res->start, 740 resource_size(pcie->mem_res), PAGE_SIZE); 741 if (ret < 0) { 742 dev_err(dev, "failed to initialize the memory space\n"); 743 return ret; 744 } 745 746 ep->irq_cpu_addr = pci_epc_mem_alloc_addr(epc, &ep->irq_phys_addr, 747 SZ_128K); 748 if (!ep->irq_cpu_addr) { 749 dev_err(dev, "failed to reserve memory space for MSI\n"); 750 ret = -ENOMEM; 751 goto free_epc_mem; 752 } 753 ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE; 754 /* Reserve region 0 for IRQs */ 755 set_bit(0, &ep->ob_region_map); 756 757 if (ep->quirk_detect_quiet_flag) 758 cdns_pcie_detect_quiet_min_delay_set(&ep->pcie); 759 760 spin_lock_init(&ep->lock); 761 762 pci_epc_init_notify(epc); 763 764 return 0; 765 766 free_epc_mem: 767 pci_epc_mem_exit(epc); 768 769 return ret; 770 } 771 EXPORT_SYMBOL_GPL(cdns_pcie_ep_setup); 772 773 MODULE_LICENSE("GPL"); 774 MODULE_DESCRIPTION("Cadence PCIe endpoint controller driver"); 775 MODULE_AUTHOR("Cyrille Pitchen <cyrille.pitchen@free-electrons.com>"); 776