1 // SPDX-License-Identifier: GPL-2.0 2 // Copyright (c) 2017 Cadence 3 // Cadence PCIe endpoint controller driver. 4 // Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com> 5 6 #include <linux/bitfield.h> 7 #include <linux/delay.h> 8 #include <linux/kernel.h> 9 #include <linux/of.h> 10 #include <linux/pci-epc.h> 11 #include <linux/platform_device.h> 12 #include <linux/sizes.h> 13 14 #include "pcie-cadence.h" 15 16 #define CDNS_PCIE_EP_MIN_APERTURE 128 /* 128 bytes */ 17 #define CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE 0x1 18 #define CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY 0x3 19 20 static u8 cdns_pcie_get_fn_from_vfn(struct cdns_pcie *pcie, u8 fn, u8 vfn) 21 { 22 u32 cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET; 23 u32 first_vf_offset, stride; 24 25 if (vfn == 0) 26 return fn; 27 28 first_vf_offset = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_SRIOV_VF_OFFSET); 29 stride = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_SRIOV_VF_STRIDE); 30 fn = fn + first_vf_offset + ((vfn - 1) * stride); 31 32 return fn; 33 } 34 35 static int cdns_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn, 36 struct pci_epf_header *hdr) 37 { 38 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 39 u32 cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET; 40 struct cdns_pcie *pcie = &ep->pcie; 41 u32 reg; 42 43 if (vfn > 1) { 44 dev_err(&epc->dev, "Only Virtual Function #1 has deviceID\n"); 45 return -EINVAL; 46 } else if (vfn == 1) { 47 reg = cap + PCI_SRIOV_VF_DID; 48 cdns_pcie_ep_fn_writew(pcie, fn, reg, hdr->deviceid); 49 return 0; 50 } 51 52 cdns_pcie_ep_fn_writew(pcie, fn, PCI_DEVICE_ID, hdr->deviceid); 53 cdns_pcie_ep_fn_writeb(pcie, fn, PCI_REVISION_ID, hdr->revid); 54 cdns_pcie_ep_fn_writeb(pcie, fn, PCI_CLASS_PROG, hdr->progif_code); 55 cdns_pcie_ep_fn_writew(pcie, fn, PCI_CLASS_DEVICE, 56 hdr->subclass_code | hdr->baseclass_code << 8); 57 cdns_pcie_ep_fn_writeb(pcie, fn, PCI_CACHE_LINE_SIZE, 58 hdr->cache_line_size); 59 cdns_pcie_ep_fn_writew(pcie, fn, PCI_SUBSYSTEM_ID, hdr->subsys_id); 60 cdns_pcie_ep_fn_writeb(pcie, fn, PCI_INTERRUPT_PIN, hdr->interrupt_pin); 61 62 /* 63 * Vendor ID can only be modified from function 0, all other functions 64 * use the same vendor ID as function 0. 65 */ 66 if (fn == 0) { 67 /* Update the vendor IDs. */ 68 u32 id = CDNS_PCIE_LM_ID_VENDOR(hdr->vendorid) | 69 CDNS_PCIE_LM_ID_SUBSYS(hdr->subsys_vendor_id); 70 71 cdns_pcie_writel(pcie, CDNS_PCIE_LM_ID, id); 72 } 73 74 return 0; 75 } 76 77 static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, u8 vfn, 78 struct pci_epf_bar *epf_bar) 79 { 80 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 81 struct cdns_pcie_epf *epf = &ep->epf[fn]; 82 struct cdns_pcie *pcie = &ep->pcie; 83 dma_addr_t bar_phys = epf_bar->phys_addr; 84 enum pci_barno bar = epf_bar->barno; 85 int flags = epf_bar->flags; 86 u32 addr0, addr1, reg, cfg, b, aperture, ctrl; 87 u64 sz; 88 89 /* BAR size is 2^(aperture + 7) */ 90 sz = max_t(size_t, epf_bar->size, CDNS_PCIE_EP_MIN_APERTURE); 91 /* 92 * roundup_pow_of_two() returns an unsigned long, which is not suited 93 * for 64bit values. 94 */ 95 sz = 1ULL << fls64(sz - 1); 96 aperture = ilog2(sz) - 7; /* 128B -> 0, 256B -> 1, 512B -> 2, ... */ 97 98 if ((flags & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) { 99 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS; 100 } else { 101 bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH); 102 bool is_64bits = !!(flags & PCI_BASE_ADDRESS_MEM_TYPE_64); 103 104 if (is_64bits && (bar & 1)) 105 return -EINVAL; 106 107 if (is_64bits && is_prefetch) 108 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS; 109 else if (is_prefetch) 110 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS; 111 else if (is_64bits) 112 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS; 113 else 114 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS; 115 } 116 117 addr0 = lower_32_bits(bar_phys); 118 addr1 = upper_32_bits(bar_phys); 119 120 if (vfn == 1) 121 reg = CDNS_PCIE_LM_EP_VFUNC_BAR_CFG(bar, fn); 122 else 123 reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG(bar, fn); 124 b = (bar < BAR_4) ? bar : bar - BAR_4; 125 126 if (vfn == 0 || vfn == 1) { 127 cfg = cdns_pcie_readl(pcie, reg); 128 cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) | 129 CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b)); 130 cfg |= (CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, aperture) | 131 CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl)); 132 cdns_pcie_writel(pcie, reg, cfg); 133 } 134 135 fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); 136 cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar), 137 addr0); 138 cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar), 139 addr1); 140 141 if (vfn > 0) 142 epf = &epf->epf[vfn - 1]; 143 epf->epf_bar[bar] = epf_bar; 144 145 return 0; 146 } 147 148 static void cdns_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, u8 vfn, 149 struct pci_epf_bar *epf_bar) 150 { 151 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 152 struct cdns_pcie_epf *epf = &ep->epf[fn]; 153 struct cdns_pcie *pcie = &ep->pcie; 154 enum pci_barno bar = epf_bar->barno; 155 u32 reg, cfg, b, ctrl; 156 157 if (vfn == 1) 158 reg = CDNS_PCIE_LM_EP_VFUNC_BAR_CFG(bar, fn); 159 else 160 reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG(bar, fn); 161 b = (bar < BAR_4) ? bar : bar - BAR_4; 162 163 if (vfn == 0 || vfn == 1) { 164 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED; 165 cfg = cdns_pcie_readl(pcie, reg); 166 cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) | 167 CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b)); 168 cfg |= CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl); 169 cdns_pcie_writel(pcie, reg, cfg); 170 } 171 172 fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); 173 cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar), 0); 174 cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar), 0); 175 176 if (vfn > 0) 177 epf = &epf->epf[vfn - 1]; 178 epf->epf_bar[bar] = NULL; 179 } 180 181 static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn, 182 phys_addr_t addr, u64 pci_addr, size_t size) 183 { 184 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 185 struct cdns_pcie *pcie = &ep->pcie; 186 u32 r; 187 188 r = find_first_zero_bit(&ep->ob_region_map, BITS_PER_LONG); 189 if (r >= ep->max_regions - 1) { 190 dev_err(&epc->dev, "no free outbound region\n"); 191 return -EINVAL; 192 } 193 194 fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); 195 cdns_pcie_set_outbound_region(pcie, 0, fn, r, false, addr, pci_addr, size); 196 197 set_bit(r, &ep->ob_region_map); 198 ep->ob_addr[r] = addr; 199 200 return 0; 201 } 202 203 static void cdns_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn, 204 phys_addr_t addr) 205 { 206 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 207 struct cdns_pcie *pcie = &ep->pcie; 208 u32 r; 209 210 for (r = 0; r < ep->max_regions - 1; r++) 211 if (ep->ob_addr[r] == addr) 212 break; 213 214 if (r == ep->max_regions - 1) 215 return; 216 217 cdns_pcie_reset_outbound_region(pcie, r); 218 219 ep->ob_addr[r] = 0; 220 clear_bit(r, &ep->ob_region_map); 221 } 222 223 static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn, u8 mmc) 224 { 225 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 226 struct cdns_pcie *pcie = &ep->pcie; 227 u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET; 228 u16 flags; 229 230 fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); 231 232 /* 233 * Set the Multiple Message Capable bitfield into the Message Control 234 * register. 235 */ 236 flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS); 237 flags = (flags & ~PCI_MSI_FLAGS_QMASK) | (mmc << 1); 238 flags |= PCI_MSI_FLAGS_64BIT; 239 flags &= ~PCI_MSI_FLAGS_MASKBIT; 240 cdns_pcie_ep_fn_writew(pcie, fn, cap + PCI_MSI_FLAGS, flags); 241 242 return 0; 243 } 244 245 static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn) 246 { 247 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 248 struct cdns_pcie *pcie = &ep->pcie; 249 u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET; 250 u16 flags, mme; 251 252 fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); 253 254 /* Validate that the MSI feature is actually enabled. */ 255 flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS); 256 if (!(flags & PCI_MSI_FLAGS_ENABLE)) 257 return -EINVAL; 258 259 /* 260 * Get the Multiple Message Enable bitfield from the Message Control 261 * register. 262 */ 263 mme = FIELD_GET(PCI_MSI_FLAGS_QSIZE, flags); 264 265 return mme; 266 } 267 268 static int cdns_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no) 269 { 270 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 271 struct cdns_pcie *pcie = &ep->pcie; 272 u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET; 273 u32 val, reg; 274 275 func_no = cdns_pcie_get_fn_from_vfn(pcie, func_no, vfunc_no); 276 277 reg = cap + PCI_MSIX_FLAGS; 278 val = cdns_pcie_ep_fn_readw(pcie, func_no, reg); 279 if (!(val & PCI_MSIX_FLAGS_ENABLE)) 280 return -EINVAL; 281 282 val &= PCI_MSIX_FLAGS_QSIZE; 283 284 return val; 285 } 286 287 static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u8 vfn, 288 u16 interrupts, enum pci_barno bir, 289 u32 offset) 290 { 291 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 292 struct cdns_pcie *pcie = &ep->pcie; 293 u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET; 294 u32 val, reg; 295 296 fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); 297 298 reg = cap + PCI_MSIX_FLAGS; 299 val = cdns_pcie_ep_fn_readw(pcie, fn, reg); 300 val &= ~PCI_MSIX_FLAGS_QSIZE; 301 val |= interrupts; 302 cdns_pcie_ep_fn_writew(pcie, fn, reg, val); 303 304 /* Set MSI-X BAR and offset */ 305 reg = cap + PCI_MSIX_TABLE; 306 val = offset | bir; 307 cdns_pcie_ep_fn_writel(pcie, fn, reg, val); 308 309 /* Set PBA BAR and offset. BAR must match MSI-X BAR */ 310 reg = cap + PCI_MSIX_PBA; 311 val = (offset + (interrupts * PCI_MSIX_ENTRY_SIZE)) | bir; 312 cdns_pcie_ep_fn_writel(pcie, fn, reg, val); 313 314 return 0; 315 } 316 317 static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, u8 intx, 318 bool is_asserted) 319 { 320 struct cdns_pcie *pcie = &ep->pcie; 321 unsigned long flags; 322 u32 offset; 323 u16 status; 324 u8 msg_code; 325 326 intx &= 3; 327 328 /* Set the outbound region if needed. */ 329 if (unlikely(ep->irq_pci_addr != CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY || 330 ep->irq_pci_fn != fn)) { 331 /* First region was reserved for IRQ writes. */ 332 cdns_pcie_set_outbound_region_for_normal_msg(pcie, 0, fn, 0, 333 ep->irq_phys_addr); 334 ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY; 335 ep->irq_pci_fn = fn; 336 } 337 338 if (is_asserted) { 339 ep->irq_pending |= BIT(intx); 340 msg_code = MSG_CODE_ASSERT_INTA + intx; 341 } else { 342 ep->irq_pending &= ~BIT(intx); 343 msg_code = MSG_CODE_DEASSERT_INTA + intx; 344 } 345 346 spin_lock_irqsave(&ep->lock, flags); 347 status = cdns_pcie_ep_fn_readw(pcie, fn, PCI_STATUS); 348 if (((status & PCI_STATUS_INTERRUPT) != 0) ^ (ep->irq_pending != 0)) { 349 status ^= PCI_STATUS_INTERRUPT; 350 cdns_pcie_ep_fn_writew(pcie, fn, PCI_STATUS, status); 351 } 352 spin_unlock_irqrestore(&ep->lock, flags); 353 354 offset = CDNS_PCIE_NORMAL_MSG_ROUTING(MSG_ROUTING_LOCAL) | 355 CDNS_PCIE_NORMAL_MSG_CODE(msg_code); 356 writel(0, ep->irq_cpu_addr + offset); 357 } 358 359 static int cdns_pcie_ep_send_intx_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn, 360 u8 intx) 361 { 362 u16 cmd; 363 364 cmd = cdns_pcie_ep_fn_readw(&ep->pcie, fn, PCI_COMMAND); 365 if (cmd & PCI_COMMAND_INTX_DISABLE) 366 return -EINVAL; 367 368 cdns_pcie_ep_assert_intx(ep, fn, intx, true); 369 /* 370 * The mdelay() value was taken from dra7xx_pcie_raise_intx_irq() 371 */ 372 mdelay(1); 373 cdns_pcie_ep_assert_intx(ep, fn, intx, false); 374 return 0; 375 } 376 377 static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn, 378 u8 interrupt_num) 379 { 380 struct cdns_pcie *pcie = &ep->pcie; 381 u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET; 382 u16 flags, mme, data, data_mask; 383 u8 msi_count; 384 u64 pci_addr, pci_addr_mask = 0xff; 385 386 fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); 387 388 /* Check whether the MSI feature has been enabled by the PCI host. */ 389 flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS); 390 if (!(flags & PCI_MSI_FLAGS_ENABLE)) 391 return -EINVAL; 392 393 /* Get the number of enabled MSIs */ 394 mme = FIELD_GET(PCI_MSI_FLAGS_QSIZE, flags); 395 msi_count = 1 << mme; 396 if (!interrupt_num || interrupt_num > msi_count) 397 return -EINVAL; 398 399 /* Compute the data value to be written. */ 400 data_mask = msi_count - 1; 401 data = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_DATA_64); 402 data = (data & ~data_mask) | ((interrupt_num - 1) & data_mask); 403 404 /* Get the PCI address where to write the data into. */ 405 pci_addr = cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_HI); 406 pci_addr <<= 32; 407 pci_addr |= cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_LO); 408 pci_addr &= GENMASK_ULL(63, 2); 409 410 /* Set the outbound region if needed. */ 411 if (unlikely(ep->irq_pci_addr != (pci_addr & ~pci_addr_mask) || 412 ep->irq_pci_fn != fn)) { 413 /* First region was reserved for IRQ writes. */ 414 cdns_pcie_set_outbound_region(pcie, 0, fn, 0, 415 false, 416 ep->irq_phys_addr, 417 pci_addr & ~pci_addr_mask, 418 pci_addr_mask + 1); 419 ep->irq_pci_addr = (pci_addr & ~pci_addr_mask); 420 ep->irq_pci_fn = fn; 421 } 422 writel(data, ep->irq_cpu_addr + (pci_addr & pci_addr_mask)); 423 424 return 0; 425 } 426 427 static int cdns_pcie_ep_map_msi_irq(struct pci_epc *epc, u8 fn, u8 vfn, 428 phys_addr_t addr, u8 interrupt_num, 429 u32 entry_size, u32 *msi_data, 430 u32 *msi_addr_offset) 431 { 432 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 433 u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET; 434 struct cdns_pcie *pcie = &ep->pcie; 435 u64 pci_addr, pci_addr_mask = 0xff; 436 u16 flags, mme, data, data_mask; 437 u8 msi_count; 438 int ret; 439 int i; 440 441 fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); 442 443 /* Check whether the MSI feature has been enabled by the PCI host. */ 444 flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS); 445 if (!(flags & PCI_MSI_FLAGS_ENABLE)) 446 return -EINVAL; 447 448 /* Get the number of enabled MSIs */ 449 mme = FIELD_GET(PCI_MSI_FLAGS_QSIZE, flags); 450 msi_count = 1 << mme; 451 if (!interrupt_num || interrupt_num > msi_count) 452 return -EINVAL; 453 454 /* Compute the data value to be written. */ 455 data_mask = msi_count - 1; 456 data = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_DATA_64); 457 data = data & ~data_mask; 458 459 /* Get the PCI address where to write the data into. */ 460 pci_addr = cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_HI); 461 pci_addr <<= 32; 462 pci_addr |= cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_LO); 463 pci_addr &= GENMASK_ULL(63, 2); 464 465 for (i = 0; i < interrupt_num; i++) { 466 ret = cdns_pcie_ep_map_addr(epc, fn, vfn, addr, 467 pci_addr & ~pci_addr_mask, 468 entry_size); 469 if (ret) 470 return ret; 471 addr = addr + entry_size; 472 } 473 474 *msi_data = data; 475 *msi_addr_offset = pci_addr & pci_addr_mask; 476 477 return 0; 478 } 479 480 static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn, 481 u16 interrupt_num) 482 { 483 u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET; 484 u32 tbl_offset, msg_data, reg; 485 struct cdns_pcie *pcie = &ep->pcie; 486 struct pci_epf_msix_tbl *msix_tbl; 487 struct cdns_pcie_epf *epf; 488 u64 pci_addr_mask = 0xff; 489 u64 msg_addr; 490 u16 flags; 491 u8 bir; 492 493 epf = &ep->epf[fn]; 494 if (vfn > 0) 495 epf = &epf->epf[vfn - 1]; 496 497 fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); 498 499 /* Check whether the MSI-X feature has been enabled by the PCI host. */ 500 flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSIX_FLAGS); 501 if (!(flags & PCI_MSIX_FLAGS_ENABLE)) 502 return -EINVAL; 503 504 reg = cap + PCI_MSIX_TABLE; 505 tbl_offset = cdns_pcie_ep_fn_readl(pcie, fn, reg); 506 bir = FIELD_GET(PCI_MSIX_TABLE_BIR, tbl_offset); 507 tbl_offset &= PCI_MSIX_TABLE_OFFSET; 508 509 msix_tbl = epf->epf_bar[bir]->addr + tbl_offset; 510 msg_addr = msix_tbl[(interrupt_num - 1)].msg_addr; 511 msg_data = msix_tbl[(interrupt_num - 1)].msg_data; 512 513 /* Set the outbound region if needed. */ 514 if (ep->irq_pci_addr != (msg_addr & ~pci_addr_mask) || 515 ep->irq_pci_fn != fn) { 516 /* First region was reserved for IRQ writes. */ 517 cdns_pcie_set_outbound_region(pcie, 0, fn, 0, 518 false, 519 ep->irq_phys_addr, 520 msg_addr & ~pci_addr_mask, 521 pci_addr_mask + 1); 522 ep->irq_pci_addr = (msg_addr & ~pci_addr_mask); 523 ep->irq_pci_fn = fn; 524 } 525 writel(msg_data, ep->irq_cpu_addr + (msg_addr & pci_addr_mask)); 526 527 return 0; 528 } 529 530 static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn, 531 unsigned int type, u16 interrupt_num) 532 { 533 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 534 struct cdns_pcie *pcie = &ep->pcie; 535 struct device *dev = pcie->dev; 536 537 switch (type) { 538 case PCI_IRQ_INTX: 539 if (vfn > 0) { 540 dev_err(dev, "Cannot raise INTX interrupts for VF\n"); 541 return -EINVAL; 542 } 543 return cdns_pcie_ep_send_intx_irq(ep, fn, vfn, 0); 544 545 case PCI_IRQ_MSI: 546 return cdns_pcie_ep_send_msi_irq(ep, fn, vfn, interrupt_num); 547 548 case PCI_IRQ_MSIX: 549 return cdns_pcie_ep_send_msix_irq(ep, fn, vfn, interrupt_num); 550 551 default: 552 break; 553 } 554 555 return -EINVAL; 556 } 557 558 static int cdns_pcie_ep_start(struct pci_epc *epc) 559 { 560 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 561 struct cdns_pcie *pcie = &ep->pcie; 562 struct device *dev = pcie->dev; 563 int max_epfs = sizeof(epc->function_num_map) * 8; 564 int ret, epf, last_fn; 565 u32 reg, value; 566 567 /* 568 * BIT(0) is hardwired to 1, hence function 0 is always enabled 569 * and can't be disabled anyway. 570 */ 571 cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, epc->function_num_map); 572 573 /* 574 * Next function field in ARI_CAP_AND_CTR register for last function 575 * should be 0. Clear Next Function Number field for the last 576 * function used. 577 */ 578 last_fn = find_last_bit(&epc->function_num_map, BITS_PER_LONG); 579 reg = CDNS_PCIE_CORE_PF_I_ARI_CAP_AND_CTRL(last_fn); 580 value = cdns_pcie_readl(pcie, reg); 581 value &= ~CDNS_PCIE_ARI_CAP_NFN_MASK; 582 cdns_pcie_writel(pcie, reg, value); 583 584 if (ep->quirk_disable_flr) { 585 for (epf = 0; epf < max_epfs; epf++) { 586 if (!(epc->function_num_map & BIT(epf))) 587 continue; 588 589 value = cdns_pcie_ep_fn_readl(pcie, epf, 590 CDNS_PCIE_EP_FUNC_DEV_CAP_OFFSET + 591 PCI_EXP_DEVCAP); 592 value &= ~PCI_EXP_DEVCAP_FLR; 593 cdns_pcie_ep_fn_writel(pcie, epf, 594 CDNS_PCIE_EP_FUNC_DEV_CAP_OFFSET + 595 PCI_EXP_DEVCAP, value); 596 } 597 } 598 599 ret = cdns_pcie_start_link(pcie); 600 if (ret) { 601 dev_err(dev, "Failed to start link\n"); 602 return ret; 603 } 604 605 return 0; 606 } 607 608 static const struct pci_epc_features cdns_pcie_epc_vf_features = { 609 .linkup_notifier = false, 610 .msi_capable = true, 611 .msix_capable = true, 612 .align = 65536, 613 }; 614 615 static const struct pci_epc_features cdns_pcie_epc_features = { 616 .linkup_notifier = false, 617 .msi_capable = true, 618 .msix_capable = true, 619 .align = 256, 620 }; 621 622 static const struct pci_epc_features* 623 cdns_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no) 624 { 625 if (!vfunc_no) 626 return &cdns_pcie_epc_features; 627 628 return &cdns_pcie_epc_vf_features; 629 } 630 631 static const struct pci_epc_ops cdns_pcie_epc_ops = { 632 .write_header = cdns_pcie_ep_write_header, 633 .set_bar = cdns_pcie_ep_set_bar, 634 .clear_bar = cdns_pcie_ep_clear_bar, 635 .map_addr = cdns_pcie_ep_map_addr, 636 .unmap_addr = cdns_pcie_ep_unmap_addr, 637 .set_msi = cdns_pcie_ep_set_msi, 638 .get_msi = cdns_pcie_ep_get_msi, 639 .set_msix = cdns_pcie_ep_set_msix, 640 .get_msix = cdns_pcie_ep_get_msix, 641 .raise_irq = cdns_pcie_ep_raise_irq, 642 .map_msi_irq = cdns_pcie_ep_map_msi_irq, 643 .start = cdns_pcie_ep_start, 644 .get_features = cdns_pcie_ep_get_features, 645 }; 646 647 648 int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep) 649 { 650 struct device *dev = ep->pcie.dev; 651 struct platform_device *pdev = to_platform_device(dev); 652 struct device_node *np = dev->of_node; 653 struct cdns_pcie *pcie = &ep->pcie; 654 struct cdns_pcie_epf *epf; 655 struct resource *res; 656 struct pci_epc *epc; 657 int ret; 658 int i; 659 660 pcie->is_rc = false; 661 662 pcie->reg_base = devm_platform_ioremap_resource_byname(pdev, "reg"); 663 if (IS_ERR(pcie->reg_base)) { 664 dev_err(dev, "missing \"reg\"\n"); 665 return PTR_ERR(pcie->reg_base); 666 } 667 668 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem"); 669 if (!res) { 670 dev_err(dev, "missing \"mem\"\n"); 671 return -EINVAL; 672 } 673 pcie->mem_res = res; 674 675 ep->max_regions = CDNS_PCIE_MAX_OB; 676 of_property_read_u32(np, "cdns,max-outbound-regions", &ep->max_regions); 677 678 ep->ob_addr = devm_kcalloc(dev, 679 ep->max_regions, sizeof(*ep->ob_addr), 680 GFP_KERNEL); 681 if (!ep->ob_addr) 682 return -ENOMEM; 683 684 /* Disable all but function 0 (anyway BIT(0) is hardwired to 1). */ 685 cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, BIT(0)); 686 687 epc = devm_pci_epc_create(dev, &cdns_pcie_epc_ops); 688 if (IS_ERR(epc)) { 689 dev_err(dev, "failed to create epc device\n"); 690 return PTR_ERR(epc); 691 } 692 693 epc_set_drvdata(epc, ep); 694 695 if (of_property_read_u8(np, "max-functions", &epc->max_functions) < 0) 696 epc->max_functions = 1; 697 698 ep->epf = devm_kcalloc(dev, epc->max_functions, sizeof(*ep->epf), 699 GFP_KERNEL); 700 if (!ep->epf) 701 return -ENOMEM; 702 703 epc->max_vfs = devm_kcalloc(dev, epc->max_functions, 704 sizeof(*epc->max_vfs), GFP_KERNEL); 705 if (!epc->max_vfs) 706 return -ENOMEM; 707 708 ret = of_property_read_u8_array(np, "max-virtual-functions", 709 epc->max_vfs, epc->max_functions); 710 if (ret == 0) { 711 for (i = 0; i < epc->max_functions; i++) { 712 epf = &ep->epf[i]; 713 if (epc->max_vfs[i] == 0) 714 continue; 715 epf->epf = devm_kcalloc(dev, epc->max_vfs[i], 716 sizeof(*ep->epf), GFP_KERNEL); 717 if (!epf->epf) 718 return -ENOMEM; 719 } 720 } 721 722 ret = pci_epc_mem_init(epc, pcie->mem_res->start, 723 resource_size(pcie->mem_res), PAGE_SIZE); 724 if (ret < 0) { 725 dev_err(dev, "failed to initialize the memory space\n"); 726 return ret; 727 } 728 729 ep->irq_cpu_addr = pci_epc_mem_alloc_addr(epc, &ep->irq_phys_addr, 730 SZ_128K); 731 if (!ep->irq_cpu_addr) { 732 dev_err(dev, "failed to reserve memory space for MSI\n"); 733 ret = -ENOMEM; 734 goto free_epc_mem; 735 } 736 ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE; 737 /* Reserve region 0 for IRQs */ 738 set_bit(0, &ep->ob_region_map); 739 740 if (ep->quirk_detect_quiet_flag) 741 cdns_pcie_detect_quiet_min_delay_set(&ep->pcie); 742 743 spin_lock_init(&ep->lock); 744 745 pci_epc_init_notify(epc); 746 747 return 0; 748 749 free_epc_mem: 750 pci_epc_mem_exit(epc); 751 752 return ret; 753 } 754