1 // SPDX-License-Identifier: GPL-2.0 2 // Copyright (c) 2017 Cadence 3 // Cadence PCIe endpoint controller driver. 4 // Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com> 5 6 #include <linux/bitfield.h> 7 #include <linux/delay.h> 8 #include <linux/kernel.h> 9 #include <linux/module.h> 10 #include <linux/of.h> 11 #include <linux/pci-epc.h> 12 #include <linux/platform_device.h> 13 #include <linux/sizes.h> 14 15 #include "pcie-cadence.h" 16 #include "../../pci.h" 17 18 #define CDNS_PCIE_EP_MIN_APERTURE 128 /* 128 bytes */ 19 #define CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE 0x1 20 #define CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY 0x3 21 22 static u8 cdns_pcie_get_fn_from_vfn(struct cdns_pcie *pcie, u8 fn, u8 vfn) 23 { 24 u32 cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET; 25 u32 first_vf_offset, stride; 26 27 if (vfn == 0) 28 return fn; 29 30 first_vf_offset = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_SRIOV_VF_OFFSET); 31 stride = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_SRIOV_VF_STRIDE); 32 fn = fn + first_vf_offset + ((vfn - 1) * stride); 33 34 return fn; 35 } 36 37 static int cdns_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn, 38 struct pci_epf_header *hdr) 39 { 40 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 41 u32 cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET; 42 struct cdns_pcie *pcie = &ep->pcie; 43 u32 reg; 44 45 if (vfn > 1) { 46 dev_err(&epc->dev, "Only Virtual Function #1 has deviceID\n"); 47 return -EINVAL; 48 } else if (vfn == 1) { 49 reg = cap + PCI_SRIOV_VF_DID; 50 cdns_pcie_ep_fn_writew(pcie, fn, reg, hdr->deviceid); 51 return 0; 52 } 53 54 cdns_pcie_ep_fn_writew(pcie, fn, PCI_DEVICE_ID, hdr->deviceid); 55 cdns_pcie_ep_fn_writeb(pcie, fn, PCI_REVISION_ID, hdr->revid); 56 cdns_pcie_ep_fn_writeb(pcie, fn, PCI_CLASS_PROG, hdr->progif_code); 57 cdns_pcie_ep_fn_writew(pcie, fn, PCI_CLASS_DEVICE, 58 hdr->subclass_code | hdr->baseclass_code << 8); 59 cdns_pcie_ep_fn_writeb(pcie, fn, PCI_CACHE_LINE_SIZE, 60 hdr->cache_line_size); 61 cdns_pcie_ep_fn_writew(pcie, fn, PCI_SUBSYSTEM_ID, hdr->subsys_id); 62 cdns_pcie_ep_fn_writeb(pcie, fn, PCI_INTERRUPT_PIN, hdr->interrupt_pin); 63 64 /* 65 * Vendor ID can only be modified from function 0, all other functions 66 * use the same vendor ID as function 0. 67 */ 68 if (fn == 0) { 69 /* Update the vendor IDs. */ 70 u32 id = CDNS_PCIE_LM_ID_VENDOR(hdr->vendorid) | 71 CDNS_PCIE_LM_ID_SUBSYS(hdr->subsys_vendor_id); 72 73 cdns_pcie_writel(pcie, CDNS_PCIE_LM_ID, id); 74 } 75 76 return 0; 77 } 78 79 static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, u8 vfn, 80 struct pci_epf_bar *epf_bar) 81 { 82 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 83 struct cdns_pcie_epf *epf = &ep->epf[fn]; 84 struct cdns_pcie *pcie = &ep->pcie; 85 dma_addr_t bar_phys = epf_bar->phys_addr; 86 enum pci_barno bar = epf_bar->barno; 87 int flags = epf_bar->flags; 88 u32 addr0, addr1, reg, cfg, b, aperture, ctrl; 89 u64 sz; 90 91 /* BAR size is 2^(aperture + 7) */ 92 sz = max_t(size_t, epf_bar->size, CDNS_PCIE_EP_MIN_APERTURE); 93 /* 94 * roundup_pow_of_two() returns an unsigned long, which is not suited 95 * for 64bit values. 96 */ 97 sz = 1ULL << fls64(sz - 1); 98 aperture = ilog2(sz) - 7; /* 128B -> 0, 256B -> 1, 512B -> 2, ... */ 99 100 if ((flags & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) { 101 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS; 102 } else { 103 bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH); 104 bool is_64bits = !!(flags & PCI_BASE_ADDRESS_MEM_TYPE_64); 105 106 if (is_64bits && (bar & 1)) 107 return -EINVAL; 108 109 if (is_64bits && is_prefetch) 110 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS; 111 else if (is_prefetch) 112 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS; 113 else if (is_64bits) 114 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS; 115 else 116 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS; 117 } 118 119 addr0 = lower_32_bits(bar_phys); 120 addr1 = upper_32_bits(bar_phys); 121 122 if (vfn == 1) 123 reg = CDNS_PCIE_LM_EP_VFUNC_BAR_CFG(bar, fn); 124 else 125 reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG(bar, fn); 126 b = (bar < BAR_4) ? bar : bar - BAR_4; 127 128 if (vfn == 0 || vfn == 1) { 129 cfg = cdns_pcie_readl(pcie, reg); 130 cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) | 131 CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b)); 132 cfg |= (CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, aperture) | 133 CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl)); 134 cdns_pcie_writel(pcie, reg, cfg); 135 } 136 137 fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); 138 cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar), 139 addr0); 140 cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar), 141 addr1); 142 143 if (vfn > 0) 144 epf = &epf->epf[vfn - 1]; 145 epf->epf_bar[bar] = epf_bar; 146 147 return 0; 148 } 149 150 static void cdns_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, u8 vfn, 151 struct pci_epf_bar *epf_bar) 152 { 153 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 154 struct cdns_pcie_epf *epf = &ep->epf[fn]; 155 struct cdns_pcie *pcie = &ep->pcie; 156 enum pci_barno bar = epf_bar->barno; 157 u32 reg, cfg, b, ctrl; 158 159 if (vfn == 1) 160 reg = CDNS_PCIE_LM_EP_VFUNC_BAR_CFG(bar, fn); 161 else 162 reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG(bar, fn); 163 b = (bar < BAR_4) ? bar : bar - BAR_4; 164 165 if (vfn == 0 || vfn == 1) { 166 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED; 167 cfg = cdns_pcie_readl(pcie, reg); 168 cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) | 169 CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b)); 170 cfg |= CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl); 171 cdns_pcie_writel(pcie, reg, cfg); 172 } 173 174 fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); 175 cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar), 0); 176 cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar), 0); 177 178 if (vfn > 0) 179 epf = &epf->epf[vfn - 1]; 180 epf->epf_bar[bar] = NULL; 181 } 182 183 static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn, 184 phys_addr_t addr, u64 pci_addr, size_t size) 185 { 186 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 187 struct cdns_pcie *pcie = &ep->pcie; 188 u32 r; 189 190 r = find_first_zero_bit(&ep->ob_region_map, BITS_PER_LONG); 191 if (r >= ep->max_regions - 1) { 192 dev_err(&epc->dev, "no free outbound region\n"); 193 return -EINVAL; 194 } 195 196 fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); 197 cdns_pcie_set_outbound_region(pcie, 0, fn, r, false, addr, pci_addr, size); 198 199 set_bit(r, &ep->ob_region_map); 200 ep->ob_addr[r] = addr; 201 202 return 0; 203 } 204 205 static void cdns_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn, 206 phys_addr_t addr) 207 { 208 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 209 struct cdns_pcie *pcie = &ep->pcie; 210 u32 r; 211 212 for (r = 0; r < ep->max_regions - 1; r++) 213 if (ep->ob_addr[r] == addr) 214 break; 215 216 if (r == ep->max_regions - 1) 217 return; 218 219 cdns_pcie_reset_outbound_region(pcie, r); 220 221 ep->ob_addr[r] = 0; 222 clear_bit(r, &ep->ob_region_map); 223 } 224 225 static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn, u8 nr_irqs) 226 { 227 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 228 struct cdns_pcie *pcie = &ep->pcie; 229 u8 mmc = order_base_2(nr_irqs); 230 u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET; 231 u16 flags; 232 233 fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); 234 235 /* 236 * Set the Multiple Message Capable bitfield into the Message Control 237 * register. 238 */ 239 flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS); 240 flags = (flags & ~PCI_MSI_FLAGS_QMASK) | (mmc << 1); 241 flags |= PCI_MSI_FLAGS_64BIT; 242 flags &= ~PCI_MSI_FLAGS_MASKBIT; 243 cdns_pcie_ep_fn_writew(pcie, fn, cap + PCI_MSI_FLAGS, flags); 244 245 return 0; 246 } 247 248 static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn) 249 { 250 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 251 struct cdns_pcie *pcie = &ep->pcie; 252 u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET; 253 u16 flags, mme; 254 255 fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); 256 257 /* Validate that the MSI feature is actually enabled. */ 258 flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS); 259 if (!(flags & PCI_MSI_FLAGS_ENABLE)) 260 return -EINVAL; 261 262 /* 263 * Get the Multiple Message Enable bitfield from the Message Control 264 * register. 265 */ 266 mme = FIELD_GET(PCI_MSI_FLAGS_QSIZE, flags); 267 268 return 1 << mme; 269 } 270 271 static int cdns_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no) 272 { 273 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 274 struct cdns_pcie *pcie = &ep->pcie; 275 u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET; 276 u32 val, reg; 277 278 func_no = cdns_pcie_get_fn_from_vfn(pcie, func_no, vfunc_no); 279 280 reg = cap + PCI_MSIX_FLAGS; 281 val = cdns_pcie_ep_fn_readw(pcie, func_no, reg); 282 if (!(val & PCI_MSIX_FLAGS_ENABLE)) 283 return -EINVAL; 284 285 val &= PCI_MSIX_FLAGS_QSIZE; 286 287 return val + 1; 288 } 289 290 static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u8 vfn, 291 u16 nr_irqs, enum pci_barno bir, u32 offset) 292 { 293 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 294 struct cdns_pcie *pcie = &ep->pcie; 295 u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET; 296 u32 val, reg; 297 298 fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); 299 300 reg = cap + PCI_MSIX_FLAGS; 301 val = cdns_pcie_ep_fn_readw(pcie, fn, reg); 302 val &= ~PCI_MSIX_FLAGS_QSIZE; 303 val |= nr_irqs - 1; /* encoded as N-1 */ 304 cdns_pcie_ep_fn_writew(pcie, fn, reg, val); 305 306 /* Set MSI-X BAR and offset */ 307 reg = cap + PCI_MSIX_TABLE; 308 val = offset | bir; 309 cdns_pcie_ep_fn_writel(pcie, fn, reg, val); 310 311 /* Set PBA BAR and offset. BAR must match MSI-X BAR */ 312 reg = cap + PCI_MSIX_PBA; 313 val = (offset + (nr_irqs * PCI_MSIX_ENTRY_SIZE)) | bir; 314 cdns_pcie_ep_fn_writel(pcie, fn, reg, val); 315 316 return 0; 317 } 318 319 static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, u8 intx, 320 bool is_asserted) 321 { 322 struct cdns_pcie *pcie = &ep->pcie; 323 unsigned long flags; 324 u32 offset; 325 u16 status; 326 u8 msg_code; 327 328 intx &= 3; 329 330 /* Set the outbound region if needed. */ 331 if (unlikely(ep->irq_pci_addr != CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY || 332 ep->irq_pci_fn != fn)) { 333 /* First region was reserved for IRQ writes. */ 334 cdns_pcie_set_outbound_region_for_normal_msg(pcie, 0, fn, 0, 335 ep->irq_phys_addr); 336 ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY; 337 ep->irq_pci_fn = fn; 338 } 339 340 if (is_asserted) { 341 ep->irq_pending |= BIT(intx); 342 msg_code = PCIE_MSG_CODE_ASSERT_INTA + intx; 343 } else { 344 ep->irq_pending &= ~BIT(intx); 345 msg_code = PCIE_MSG_CODE_DEASSERT_INTA + intx; 346 } 347 348 spin_lock_irqsave(&ep->lock, flags); 349 status = cdns_pcie_ep_fn_readw(pcie, fn, PCI_STATUS); 350 if (((status & PCI_STATUS_INTERRUPT) != 0) ^ (ep->irq_pending != 0)) { 351 status ^= PCI_STATUS_INTERRUPT; 352 cdns_pcie_ep_fn_writew(pcie, fn, PCI_STATUS, status); 353 } 354 spin_unlock_irqrestore(&ep->lock, flags); 355 356 offset = CDNS_PCIE_NORMAL_MSG_ROUTING(MSG_ROUTING_LOCAL) | 357 CDNS_PCIE_NORMAL_MSG_CODE(msg_code); 358 writel(0, ep->irq_cpu_addr + offset); 359 } 360 361 static int cdns_pcie_ep_send_intx_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn, 362 u8 intx) 363 { 364 u16 cmd; 365 366 cmd = cdns_pcie_ep_fn_readw(&ep->pcie, fn, PCI_COMMAND); 367 if (cmd & PCI_COMMAND_INTX_DISABLE) 368 return -EINVAL; 369 370 cdns_pcie_ep_assert_intx(ep, fn, intx, true); 371 /* 372 * The mdelay() value was taken from dra7xx_pcie_raise_intx_irq() 373 */ 374 mdelay(1); 375 cdns_pcie_ep_assert_intx(ep, fn, intx, false); 376 return 0; 377 } 378 379 static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn, 380 u8 interrupt_num) 381 { 382 struct cdns_pcie *pcie = &ep->pcie; 383 u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET; 384 u16 flags, mme, data, data_mask; 385 u8 msi_count; 386 u64 pci_addr, pci_addr_mask = 0xff; 387 388 fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); 389 390 /* Check whether the MSI feature has been enabled by the PCI host. */ 391 flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS); 392 if (!(flags & PCI_MSI_FLAGS_ENABLE)) 393 return -EINVAL; 394 395 /* Get the number of enabled MSIs */ 396 mme = FIELD_GET(PCI_MSI_FLAGS_QSIZE, flags); 397 msi_count = 1 << mme; 398 if (!interrupt_num || interrupt_num > msi_count) 399 return -EINVAL; 400 401 /* Compute the data value to be written. */ 402 data_mask = msi_count - 1; 403 data = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_DATA_64); 404 data = (data & ~data_mask) | ((interrupt_num - 1) & data_mask); 405 406 /* Get the PCI address where to write the data into. */ 407 pci_addr = cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_HI); 408 pci_addr <<= 32; 409 pci_addr |= cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_LO); 410 pci_addr &= GENMASK_ULL(63, 2); 411 412 /* Set the outbound region if needed. */ 413 if (unlikely(ep->irq_pci_addr != (pci_addr & ~pci_addr_mask) || 414 ep->irq_pci_fn != fn)) { 415 /* First region was reserved for IRQ writes. */ 416 cdns_pcie_set_outbound_region(pcie, 0, fn, 0, 417 false, 418 ep->irq_phys_addr, 419 pci_addr & ~pci_addr_mask, 420 pci_addr_mask + 1); 421 ep->irq_pci_addr = (pci_addr & ~pci_addr_mask); 422 ep->irq_pci_fn = fn; 423 } 424 writel(data, ep->irq_cpu_addr + (pci_addr & pci_addr_mask)); 425 426 return 0; 427 } 428 429 static int cdns_pcie_ep_map_msi_irq(struct pci_epc *epc, u8 fn, u8 vfn, 430 phys_addr_t addr, u8 interrupt_num, 431 u32 entry_size, u32 *msi_data, 432 u32 *msi_addr_offset) 433 { 434 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 435 u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET; 436 struct cdns_pcie *pcie = &ep->pcie; 437 u64 pci_addr, pci_addr_mask = 0xff; 438 u16 flags, mme, data, data_mask; 439 u8 msi_count; 440 int ret; 441 int i; 442 443 fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); 444 445 /* Check whether the MSI feature has been enabled by the PCI host. */ 446 flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS); 447 if (!(flags & PCI_MSI_FLAGS_ENABLE)) 448 return -EINVAL; 449 450 /* Get the number of enabled MSIs */ 451 mme = FIELD_GET(PCI_MSI_FLAGS_QSIZE, flags); 452 msi_count = 1 << mme; 453 if (!interrupt_num || interrupt_num > msi_count) 454 return -EINVAL; 455 456 /* Compute the data value to be written. */ 457 data_mask = msi_count - 1; 458 data = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_DATA_64); 459 data = data & ~data_mask; 460 461 /* Get the PCI address where to write the data into. */ 462 pci_addr = cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_HI); 463 pci_addr <<= 32; 464 pci_addr |= cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_LO); 465 pci_addr &= GENMASK_ULL(63, 2); 466 467 for (i = 0; i < interrupt_num; i++) { 468 ret = cdns_pcie_ep_map_addr(epc, fn, vfn, addr, 469 pci_addr & ~pci_addr_mask, 470 entry_size); 471 if (ret) 472 return ret; 473 addr = addr + entry_size; 474 } 475 476 *msi_data = data; 477 *msi_addr_offset = pci_addr & pci_addr_mask; 478 479 return 0; 480 } 481 482 static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn, 483 u16 interrupt_num) 484 { 485 u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET; 486 u32 tbl_offset, msg_data, reg; 487 struct cdns_pcie *pcie = &ep->pcie; 488 struct pci_epf_msix_tbl *msix_tbl; 489 struct cdns_pcie_epf *epf; 490 u64 pci_addr_mask = 0xff; 491 u64 msg_addr; 492 u16 flags; 493 u8 bir; 494 495 epf = &ep->epf[fn]; 496 if (vfn > 0) 497 epf = &epf->epf[vfn - 1]; 498 499 fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); 500 501 /* Check whether the MSI-X feature has been enabled by the PCI host. */ 502 flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSIX_FLAGS); 503 if (!(flags & PCI_MSIX_FLAGS_ENABLE)) 504 return -EINVAL; 505 506 reg = cap + PCI_MSIX_TABLE; 507 tbl_offset = cdns_pcie_ep_fn_readl(pcie, fn, reg); 508 bir = FIELD_GET(PCI_MSIX_TABLE_BIR, tbl_offset); 509 tbl_offset &= PCI_MSIX_TABLE_OFFSET; 510 511 msix_tbl = epf->epf_bar[bir]->addr + tbl_offset; 512 msg_addr = msix_tbl[(interrupt_num - 1)].msg_addr; 513 msg_data = msix_tbl[(interrupt_num - 1)].msg_data; 514 515 /* Set the outbound region if needed. */ 516 if (ep->irq_pci_addr != (msg_addr & ~pci_addr_mask) || 517 ep->irq_pci_fn != fn) { 518 /* First region was reserved for IRQ writes. */ 519 cdns_pcie_set_outbound_region(pcie, 0, fn, 0, 520 false, 521 ep->irq_phys_addr, 522 msg_addr & ~pci_addr_mask, 523 pci_addr_mask + 1); 524 ep->irq_pci_addr = (msg_addr & ~pci_addr_mask); 525 ep->irq_pci_fn = fn; 526 } 527 writel(msg_data, ep->irq_cpu_addr + (msg_addr & pci_addr_mask)); 528 529 return 0; 530 } 531 532 static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn, 533 unsigned int type, u16 interrupt_num) 534 { 535 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 536 struct cdns_pcie *pcie = &ep->pcie; 537 struct device *dev = pcie->dev; 538 539 switch (type) { 540 case PCI_IRQ_INTX: 541 if (vfn > 0) { 542 dev_err(dev, "Cannot raise INTX interrupts for VF\n"); 543 return -EINVAL; 544 } 545 return cdns_pcie_ep_send_intx_irq(ep, fn, vfn, 0); 546 547 case PCI_IRQ_MSI: 548 return cdns_pcie_ep_send_msi_irq(ep, fn, vfn, interrupt_num); 549 550 case PCI_IRQ_MSIX: 551 return cdns_pcie_ep_send_msix_irq(ep, fn, vfn, interrupt_num); 552 553 default: 554 break; 555 } 556 557 return -EINVAL; 558 } 559 560 static int cdns_pcie_ep_start(struct pci_epc *epc) 561 { 562 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 563 struct cdns_pcie *pcie = &ep->pcie; 564 struct device *dev = pcie->dev; 565 int max_epfs = sizeof(epc->function_num_map) * 8; 566 int ret, epf, last_fn; 567 u32 reg, value; 568 569 /* 570 * BIT(0) is hardwired to 1, hence function 0 is always enabled 571 * and can't be disabled anyway. 572 */ 573 cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, epc->function_num_map); 574 575 /* 576 * Next function field in ARI_CAP_AND_CTR register for last function 577 * should be 0. Clear Next Function Number field for the last 578 * function used. 579 */ 580 last_fn = find_last_bit(&epc->function_num_map, BITS_PER_LONG); 581 reg = CDNS_PCIE_CORE_PF_I_ARI_CAP_AND_CTRL(last_fn); 582 value = cdns_pcie_readl(pcie, reg); 583 value &= ~CDNS_PCIE_ARI_CAP_NFN_MASK; 584 cdns_pcie_writel(pcie, reg, value); 585 586 if (ep->quirk_disable_flr) { 587 for (epf = 0; epf < max_epfs; epf++) { 588 if (!(epc->function_num_map & BIT(epf))) 589 continue; 590 591 value = cdns_pcie_ep_fn_readl(pcie, epf, 592 CDNS_PCIE_EP_FUNC_DEV_CAP_OFFSET + 593 PCI_EXP_DEVCAP); 594 value &= ~PCI_EXP_DEVCAP_FLR; 595 cdns_pcie_ep_fn_writel(pcie, epf, 596 CDNS_PCIE_EP_FUNC_DEV_CAP_OFFSET + 597 PCI_EXP_DEVCAP, value); 598 } 599 } 600 601 ret = cdns_pcie_start_link(pcie); 602 if (ret) { 603 dev_err(dev, "Failed to start link\n"); 604 return ret; 605 } 606 607 return 0; 608 } 609 610 static const struct pci_epc_features cdns_pcie_epc_vf_features = { 611 .linkup_notifier = false, 612 .msi_capable = true, 613 .msix_capable = true, 614 .align = 65536, 615 }; 616 617 static const struct pci_epc_features cdns_pcie_epc_features = { 618 .linkup_notifier = false, 619 .msi_capable = true, 620 .msix_capable = true, 621 .align = 256, 622 }; 623 624 static const struct pci_epc_features* 625 cdns_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no) 626 { 627 if (!vfunc_no) 628 return &cdns_pcie_epc_features; 629 630 return &cdns_pcie_epc_vf_features; 631 } 632 633 static const struct pci_epc_ops cdns_pcie_epc_ops = { 634 .write_header = cdns_pcie_ep_write_header, 635 .set_bar = cdns_pcie_ep_set_bar, 636 .clear_bar = cdns_pcie_ep_clear_bar, 637 .map_addr = cdns_pcie_ep_map_addr, 638 .unmap_addr = cdns_pcie_ep_unmap_addr, 639 .set_msi = cdns_pcie_ep_set_msi, 640 .get_msi = cdns_pcie_ep_get_msi, 641 .set_msix = cdns_pcie_ep_set_msix, 642 .get_msix = cdns_pcie_ep_get_msix, 643 .raise_irq = cdns_pcie_ep_raise_irq, 644 .map_msi_irq = cdns_pcie_ep_map_msi_irq, 645 .start = cdns_pcie_ep_start, 646 .get_features = cdns_pcie_ep_get_features, 647 }; 648 649 void cdns_pcie_ep_disable(struct cdns_pcie_ep *ep) 650 { 651 struct device *dev = ep->pcie.dev; 652 struct pci_epc *epc = to_pci_epc(dev); 653 654 pci_epc_deinit_notify(epc); 655 pci_epc_mem_free_addr(epc, ep->irq_phys_addr, ep->irq_cpu_addr, 656 SZ_128K); 657 pci_epc_mem_exit(epc); 658 } 659 EXPORT_SYMBOL_GPL(cdns_pcie_ep_disable); 660 661 int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep) 662 { 663 struct device *dev = ep->pcie.dev; 664 struct platform_device *pdev = to_platform_device(dev); 665 struct device_node *np = dev->of_node; 666 struct cdns_pcie *pcie = &ep->pcie; 667 struct cdns_pcie_epf *epf; 668 struct resource *res; 669 struct pci_epc *epc; 670 int ret; 671 int i; 672 673 pcie->is_rc = false; 674 675 pcie->reg_base = devm_platform_ioremap_resource_byname(pdev, "reg"); 676 if (IS_ERR(pcie->reg_base)) { 677 dev_err(dev, "missing \"reg\"\n"); 678 return PTR_ERR(pcie->reg_base); 679 } 680 681 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem"); 682 if (!res) { 683 dev_err(dev, "missing \"mem\"\n"); 684 return -EINVAL; 685 } 686 pcie->mem_res = res; 687 688 ep->max_regions = CDNS_PCIE_MAX_OB; 689 of_property_read_u32(np, "cdns,max-outbound-regions", &ep->max_regions); 690 691 ep->ob_addr = devm_kcalloc(dev, 692 ep->max_regions, sizeof(*ep->ob_addr), 693 GFP_KERNEL); 694 if (!ep->ob_addr) 695 return -ENOMEM; 696 697 /* Disable all but function 0 (anyway BIT(0) is hardwired to 1). */ 698 cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, BIT(0)); 699 700 epc = devm_pci_epc_create(dev, &cdns_pcie_epc_ops); 701 if (IS_ERR(epc)) { 702 dev_err(dev, "failed to create epc device\n"); 703 return PTR_ERR(epc); 704 } 705 706 epc_set_drvdata(epc, ep); 707 708 if (of_property_read_u8(np, "max-functions", &epc->max_functions) < 0) 709 epc->max_functions = 1; 710 711 ep->epf = devm_kcalloc(dev, epc->max_functions, sizeof(*ep->epf), 712 GFP_KERNEL); 713 if (!ep->epf) 714 return -ENOMEM; 715 716 epc->max_vfs = devm_kcalloc(dev, epc->max_functions, 717 sizeof(*epc->max_vfs), GFP_KERNEL); 718 if (!epc->max_vfs) 719 return -ENOMEM; 720 721 ret = of_property_read_u8_array(np, "max-virtual-functions", 722 epc->max_vfs, epc->max_functions); 723 if (ret == 0) { 724 for (i = 0; i < epc->max_functions; i++) { 725 epf = &ep->epf[i]; 726 if (epc->max_vfs[i] == 0) 727 continue; 728 epf->epf = devm_kcalloc(dev, epc->max_vfs[i], 729 sizeof(*ep->epf), GFP_KERNEL); 730 if (!epf->epf) 731 return -ENOMEM; 732 } 733 } 734 735 ret = pci_epc_mem_init(epc, pcie->mem_res->start, 736 resource_size(pcie->mem_res), PAGE_SIZE); 737 if (ret < 0) { 738 dev_err(dev, "failed to initialize the memory space\n"); 739 return ret; 740 } 741 742 ep->irq_cpu_addr = pci_epc_mem_alloc_addr(epc, &ep->irq_phys_addr, 743 SZ_128K); 744 if (!ep->irq_cpu_addr) { 745 dev_err(dev, "failed to reserve memory space for MSI\n"); 746 ret = -ENOMEM; 747 goto free_epc_mem; 748 } 749 ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE; 750 /* Reserve region 0 for IRQs */ 751 set_bit(0, &ep->ob_region_map); 752 753 if (ep->quirk_detect_quiet_flag) 754 cdns_pcie_detect_quiet_min_delay_set(&ep->pcie); 755 756 spin_lock_init(&ep->lock); 757 758 pci_epc_init_notify(epc); 759 760 return 0; 761 762 free_epc_mem: 763 pci_epc_mem_exit(epc); 764 765 return ret; 766 } 767 EXPORT_SYMBOL_GPL(cdns_pcie_ep_setup); 768 769 MODULE_LICENSE("GPL"); 770 MODULE_DESCRIPTION("Cadence PCIe endpoint controller driver"); 771 MODULE_AUTHOR("Cyrille Pitchen <cyrille.pitchen@free-electrons.com>"); 772