1 // SPDX-License-Identifier: GPL-2.0 2 // Copyright (c) 2017 Cadence 3 // Cadence PCIe endpoint controller driver. 4 // Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com> 5 6 #include <linux/bitfield.h> 7 #include <linux/delay.h> 8 #include <linux/kernel.h> 9 #include <linux/of.h> 10 #include <linux/pci-epc.h> 11 #include <linux/platform_device.h> 12 #include <linux/sizes.h> 13 14 #include "pcie-cadence.h" 15 16 #define CDNS_PCIE_EP_MIN_APERTURE 128 /* 128 bytes */ 17 #define CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE 0x1 18 #define CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY 0x3 19 20 static u8 cdns_pcie_get_fn_from_vfn(struct cdns_pcie *pcie, u8 fn, u8 vfn) 21 { 22 u32 cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET; 23 u32 first_vf_offset, stride; 24 25 if (vfn == 0) 26 return fn; 27 28 first_vf_offset = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_SRIOV_VF_OFFSET); 29 stride = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_SRIOV_VF_STRIDE); 30 fn = fn + first_vf_offset + ((vfn - 1) * stride); 31 32 return fn; 33 } 34 35 static int cdns_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn, 36 struct pci_epf_header *hdr) 37 { 38 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 39 u32 cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET; 40 struct cdns_pcie *pcie = &ep->pcie; 41 u32 reg; 42 43 if (vfn > 1) { 44 dev_err(&epc->dev, "Only Virtual Function #1 has deviceID\n"); 45 return -EINVAL; 46 } else if (vfn == 1) { 47 reg = cap + PCI_SRIOV_VF_DID; 48 cdns_pcie_ep_fn_writew(pcie, fn, reg, hdr->deviceid); 49 return 0; 50 } 51 52 cdns_pcie_ep_fn_writew(pcie, fn, PCI_DEVICE_ID, hdr->deviceid); 53 cdns_pcie_ep_fn_writeb(pcie, fn, PCI_REVISION_ID, hdr->revid); 54 cdns_pcie_ep_fn_writeb(pcie, fn, PCI_CLASS_PROG, hdr->progif_code); 55 cdns_pcie_ep_fn_writew(pcie, fn, PCI_CLASS_DEVICE, 56 hdr->subclass_code | hdr->baseclass_code << 8); 57 cdns_pcie_ep_fn_writeb(pcie, fn, PCI_CACHE_LINE_SIZE, 58 hdr->cache_line_size); 59 cdns_pcie_ep_fn_writew(pcie, fn, PCI_SUBSYSTEM_ID, hdr->subsys_id); 60 cdns_pcie_ep_fn_writeb(pcie, fn, PCI_INTERRUPT_PIN, hdr->interrupt_pin); 61 62 /* 63 * Vendor ID can only be modified from function 0, all other functions 64 * use the same vendor ID as function 0. 65 */ 66 if (fn == 0) { 67 /* Update the vendor IDs. */ 68 u32 id = CDNS_PCIE_LM_ID_VENDOR(hdr->vendorid) | 69 CDNS_PCIE_LM_ID_SUBSYS(hdr->subsys_vendor_id); 70 71 cdns_pcie_writel(pcie, CDNS_PCIE_LM_ID, id); 72 } 73 74 return 0; 75 } 76 77 static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, u8 vfn, 78 struct pci_epf_bar *epf_bar) 79 { 80 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 81 struct cdns_pcie_epf *epf = &ep->epf[fn]; 82 struct cdns_pcie *pcie = &ep->pcie; 83 dma_addr_t bar_phys = epf_bar->phys_addr; 84 enum pci_barno bar = epf_bar->barno; 85 int flags = epf_bar->flags; 86 u32 addr0, addr1, reg, cfg, b, aperture, ctrl; 87 u64 sz; 88 89 /* BAR size is 2^(aperture + 7) */ 90 sz = max_t(size_t, epf_bar->size, CDNS_PCIE_EP_MIN_APERTURE); 91 /* 92 * roundup_pow_of_two() returns an unsigned long, which is not suited 93 * for 64bit values. 94 */ 95 sz = 1ULL << fls64(sz - 1); 96 aperture = ilog2(sz) - 7; /* 128B -> 0, 256B -> 1, 512B -> 2, ... */ 97 98 if ((flags & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) { 99 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS; 100 } else { 101 bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH); 102 bool is_64bits = !!(flags & PCI_BASE_ADDRESS_MEM_TYPE_64); 103 104 if (is_64bits && (bar & 1)) 105 return -EINVAL; 106 107 if (is_64bits && is_prefetch) 108 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS; 109 else if (is_prefetch) 110 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS; 111 else if (is_64bits) 112 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS; 113 else 114 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS; 115 } 116 117 addr0 = lower_32_bits(bar_phys); 118 addr1 = upper_32_bits(bar_phys); 119 120 if (vfn == 1) 121 reg = CDNS_PCIE_LM_EP_VFUNC_BAR_CFG(bar, fn); 122 else 123 reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG(bar, fn); 124 b = (bar < BAR_4) ? bar : bar - BAR_4; 125 126 if (vfn == 0 || vfn == 1) { 127 cfg = cdns_pcie_readl(pcie, reg); 128 cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) | 129 CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b)); 130 cfg |= (CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, aperture) | 131 CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl)); 132 cdns_pcie_writel(pcie, reg, cfg); 133 } 134 135 fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); 136 cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar), 137 addr0); 138 cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar), 139 addr1); 140 141 if (vfn > 0) 142 epf = &epf->epf[vfn - 1]; 143 epf->epf_bar[bar] = epf_bar; 144 145 return 0; 146 } 147 148 static void cdns_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, u8 vfn, 149 struct pci_epf_bar *epf_bar) 150 { 151 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 152 struct cdns_pcie_epf *epf = &ep->epf[fn]; 153 struct cdns_pcie *pcie = &ep->pcie; 154 enum pci_barno bar = epf_bar->barno; 155 u32 reg, cfg, b, ctrl; 156 157 if (vfn == 1) 158 reg = CDNS_PCIE_LM_EP_VFUNC_BAR_CFG(bar, fn); 159 else 160 reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG(bar, fn); 161 b = (bar < BAR_4) ? bar : bar - BAR_4; 162 163 if (vfn == 0 || vfn == 1) { 164 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED; 165 cfg = cdns_pcie_readl(pcie, reg); 166 cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) | 167 CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b)); 168 cfg |= CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl); 169 cdns_pcie_writel(pcie, reg, cfg); 170 } 171 172 fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); 173 cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar), 0); 174 cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar), 0); 175 176 if (vfn > 0) 177 epf = &epf->epf[vfn - 1]; 178 epf->epf_bar[bar] = NULL; 179 } 180 181 static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn, 182 phys_addr_t addr, u64 pci_addr, size_t size) 183 { 184 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 185 struct cdns_pcie *pcie = &ep->pcie; 186 u32 r; 187 188 r = find_first_zero_bit(&ep->ob_region_map, BITS_PER_LONG); 189 if (r >= ep->max_regions - 1) { 190 dev_err(&epc->dev, "no free outbound region\n"); 191 return -EINVAL; 192 } 193 194 fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); 195 cdns_pcie_set_outbound_region(pcie, 0, fn, r, false, addr, pci_addr, size); 196 197 set_bit(r, &ep->ob_region_map); 198 ep->ob_addr[r] = addr; 199 200 return 0; 201 } 202 203 static void cdns_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn, 204 phys_addr_t addr) 205 { 206 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 207 struct cdns_pcie *pcie = &ep->pcie; 208 u32 r; 209 210 for (r = 0; r < ep->max_regions - 1; r++) 211 if (ep->ob_addr[r] == addr) 212 break; 213 214 if (r == ep->max_regions - 1) 215 return; 216 217 cdns_pcie_reset_outbound_region(pcie, r); 218 219 ep->ob_addr[r] = 0; 220 clear_bit(r, &ep->ob_region_map); 221 } 222 223 static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn, u8 mmc) 224 { 225 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 226 struct cdns_pcie *pcie = &ep->pcie; 227 u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET; 228 u16 flags; 229 230 fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); 231 232 /* 233 * Set the Multiple Message Capable bitfield into the Message Control 234 * register. 235 */ 236 flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS); 237 flags = (flags & ~PCI_MSI_FLAGS_QMASK) | (mmc << 1); 238 flags |= PCI_MSI_FLAGS_64BIT; 239 flags &= ~PCI_MSI_FLAGS_MASKBIT; 240 cdns_pcie_ep_fn_writew(pcie, fn, cap + PCI_MSI_FLAGS, flags); 241 242 return 0; 243 } 244 245 static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn) 246 { 247 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 248 struct cdns_pcie *pcie = &ep->pcie; 249 u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET; 250 u16 flags, mme; 251 252 fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); 253 254 /* Validate that the MSI feature is actually enabled. */ 255 flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS); 256 if (!(flags & PCI_MSI_FLAGS_ENABLE)) 257 return -EINVAL; 258 259 /* 260 * Get the Multiple Message Enable bitfield from the Message Control 261 * register. 262 */ 263 mme = FIELD_GET(PCI_MSI_FLAGS_QSIZE, flags); 264 265 return mme; 266 } 267 268 static int cdns_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no) 269 { 270 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 271 struct cdns_pcie *pcie = &ep->pcie; 272 u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET; 273 u32 val, reg; 274 275 func_no = cdns_pcie_get_fn_from_vfn(pcie, func_no, vfunc_no); 276 277 reg = cap + PCI_MSIX_FLAGS; 278 val = cdns_pcie_ep_fn_readw(pcie, func_no, reg); 279 if (!(val & PCI_MSIX_FLAGS_ENABLE)) 280 return -EINVAL; 281 282 val &= PCI_MSIX_FLAGS_QSIZE; 283 284 return val; 285 } 286 287 static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u8 vfn, 288 u16 interrupts, enum pci_barno bir, 289 u32 offset) 290 { 291 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 292 struct cdns_pcie *pcie = &ep->pcie; 293 u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET; 294 u32 val, reg; 295 296 fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); 297 298 reg = cap + PCI_MSIX_FLAGS; 299 val = cdns_pcie_ep_fn_readw(pcie, fn, reg); 300 val &= ~PCI_MSIX_FLAGS_QSIZE; 301 val |= interrupts; 302 cdns_pcie_ep_fn_writew(pcie, fn, reg, val); 303 304 /* Set MSIX BAR and offset */ 305 reg = cap + PCI_MSIX_TABLE; 306 val = offset | bir; 307 cdns_pcie_ep_fn_writel(pcie, fn, reg, val); 308 309 /* Set PBA BAR and offset. BAR must match MSIX BAR */ 310 reg = cap + PCI_MSIX_PBA; 311 val = (offset + (interrupts * PCI_MSIX_ENTRY_SIZE)) | bir; 312 cdns_pcie_ep_fn_writel(pcie, fn, reg, val); 313 314 return 0; 315 } 316 317 static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, u8 intx, 318 bool is_asserted) 319 { 320 struct cdns_pcie *pcie = &ep->pcie; 321 unsigned long flags; 322 u32 offset; 323 u16 status; 324 u8 msg_code; 325 326 intx &= 3; 327 328 /* Set the outbound region if needed. */ 329 if (unlikely(ep->irq_pci_addr != CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY || 330 ep->irq_pci_fn != fn)) { 331 /* First region was reserved for IRQ writes. */ 332 cdns_pcie_set_outbound_region_for_normal_msg(pcie, 0, fn, 0, 333 ep->irq_phys_addr); 334 ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY; 335 ep->irq_pci_fn = fn; 336 } 337 338 if (is_asserted) { 339 ep->irq_pending |= BIT(intx); 340 msg_code = MSG_CODE_ASSERT_INTA + intx; 341 } else { 342 ep->irq_pending &= ~BIT(intx); 343 msg_code = MSG_CODE_DEASSERT_INTA + intx; 344 } 345 346 spin_lock_irqsave(&ep->lock, flags); 347 status = cdns_pcie_ep_fn_readw(pcie, fn, PCI_STATUS); 348 if (((status & PCI_STATUS_INTERRUPT) != 0) ^ (ep->irq_pending != 0)) { 349 status ^= PCI_STATUS_INTERRUPT; 350 cdns_pcie_ep_fn_writew(pcie, fn, PCI_STATUS, status); 351 } 352 spin_unlock_irqrestore(&ep->lock, flags); 353 354 offset = CDNS_PCIE_NORMAL_MSG_ROUTING(MSG_ROUTING_LOCAL) | 355 CDNS_PCIE_NORMAL_MSG_CODE(msg_code) | 356 CDNS_PCIE_MSG_NO_DATA; 357 writel(0, ep->irq_cpu_addr + offset); 358 } 359 360 static int cdns_pcie_ep_send_intx_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn, 361 u8 intx) 362 { 363 u16 cmd; 364 365 cmd = cdns_pcie_ep_fn_readw(&ep->pcie, fn, PCI_COMMAND); 366 if (cmd & PCI_COMMAND_INTX_DISABLE) 367 return -EINVAL; 368 369 cdns_pcie_ep_assert_intx(ep, fn, intx, true); 370 /* 371 * The mdelay() value was taken from dra7xx_pcie_raise_intx_irq() 372 */ 373 mdelay(1); 374 cdns_pcie_ep_assert_intx(ep, fn, intx, false); 375 return 0; 376 } 377 378 static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn, 379 u8 interrupt_num) 380 { 381 struct cdns_pcie *pcie = &ep->pcie; 382 u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET; 383 u16 flags, mme, data, data_mask; 384 u8 msi_count; 385 u64 pci_addr, pci_addr_mask = 0xff; 386 387 fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); 388 389 /* Check whether the MSI feature has been enabled by the PCI host. */ 390 flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS); 391 if (!(flags & PCI_MSI_FLAGS_ENABLE)) 392 return -EINVAL; 393 394 /* Get the number of enabled MSIs */ 395 mme = FIELD_GET(PCI_MSI_FLAGS_QSIZE, flags); 396 msi_count = 1 << mme; 397 if (!interrupt_num || interrupt_num > msi_count) 398 return -EINVAL; 399 400 /* Compute the data value to be written. */ 401 data_mask = msi_count - 1; 402 data = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_DATA_64); 403 data = (data & ~data_mask) | ((interrupt_num - 1) & data_mask); 404 405 /* Get the PCI address where to write the data into. */ 406 pci_addr = cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_HI); 407 pci_addr <<= 32; 408 pci_addr |= cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_LO); 409 pci_addr &= GENMASK_ULL(63, 2); 410 411 /* Set the outbound region if needed. */ 412 if (unlikely(ep->irq_pci_addr != (pci_addr & ~pci_addr_mask) || 413 ep->irq_pci_fn != fn)) { 414 /* First region was reserved for IRQ writes. */ 415 cdns_pcie_set_outbound_region(pcie, 0, fn, 0, 416 false, 417 ep->irq_phys_addr, 418 pci_addr & ~pci_addr_mask, 419 pci_addr_mask + 1); 420 ep->irq_pci_addr = (pci_addr & ~pci_addr_mask); 421 ep->irq_pci_fn = fn; 422 } 423 writel(data, ep->irq_cpu_addr + (pci_addr & pci_addr_mask)); 424 425 return 0; 426 } 427 428 static int cdns_pcie_ep_map_msi_irq(struct pci_epc *epc, u8 fn, u8 vfn, 429 phys_addr_t addr, u8 interrupt_num, 430 u32 entry_size, u32 *msi_data, 431 u32 *msi_addr_offset) 432 { 433 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 434 u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET; 435 struct cdns_pcie *pcie = &ep->pcie; 436 u64 pci_addr, pci_addr_mask = 0xff; 437 u16 flags, mme, data, data_mask; 438 u8 msi_count; 439 int ret; 440 int i; 441 442 fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); 443 444 /* Check whether the MSI feature has been enabled by the PCI host. */ 445 flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS); 446 if (!(flags & PCI_MSI_FLAGS_ENABLE)) 447 return -EINVAL; 448 449 /* Get the number of enabled MSIs */ 450 mme = FIELD_GET(PCI_MSI_FLAGS_QSIZE, flags); 451 msi_count = 1 << mme; 452 if (!interrupt_num || interrupt_num > msi_count) 453 return -EINVAL; 454 455 /* Compute the data value to be written. */ 456 data_mask = msi_count - 1; 457 data = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_DATA_64); 458 data = data & ~data_mask; 459 460 /* Get the PCI address where to write the data into. */ 461 pci_addr = cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_HI); 462 pci_addr <<= 32; 463 pci_addr |= cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_LO); 464 pci_addr &= GENMASK_ULL(63, 2); 465 466 for (i = 0; i < interrupt_num; i++) { 467 ret = cdns_pcie_ep_map_addr(epc, fn, vfn, addr, 468 pci_addr & ~pci_addr_mask, 469 entry_size); 470 if (ret) 471 return ret; 472 addr = addr + entry_size; 473 } 474 475 *msi_data = data; 476 *msi_addr_offset = pci_addr & pci_addr_mask; 477 478 return 0; 479 } 480 481 static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn, 482 u16 interrupt_num) 483 { 484 u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET; 485 u32 tbl_offset, msg_data, reg; 486 struct cdns_pcie *pcie = &ep->pcie; 487 struct pci_epf_msix_tbl *msix_tbl; 488 struct cdns_pcie_epf *epf; 489 u64 pci_addr_mask = 0xff; 490 u64 msg_addr; 491 u16 flags; 492 u8 bir; 493 494 epf = &ep->epf[fn]; 495 if (vfn > 0) 496 epf = &epf->epf[vfn - 1]; 497 498 fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); 499 500 /* Check whether the MSI-X feature has been enabled by the PCI host. */ 501 flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSIX_FLAGS); 502 if (!(flags & PCI_MSIX_FLAGS_ENABLE)) 503 return -EINVAL; 504 505 reg = cap + PCI_MSIX_TABLE; 506 tbl_offset = cdns_pcie_ep_fn_readl(pcie, fn, reg); 507 bir = FIELD_GET(PCI_MSIX_TABLE_BIR, tbl_offset); 508 tbl_offset &= PCI_MSIX_TABLE_OFFSET; 509 510 msix_tbl = epf->epf_bar[bir]->addr + tbl_offset; 511 msg_addr = msix_tbl[(interrupt_num - 1)].msg_addr; 512 msg_data = msix_tbl[(interrupt_num - 1)].msg_data; 513 514 /* Set the outbound region if needed. */ 515 if (ep->irq_pci_addr != (msg_addr & ~pci_addr_mask) || 516 ep->irq_pci_fn != fn) { 517 /* First region was reserved for IRQ writes. */ 518 cdns_pcie_set_outbound_region(pcie, 0, fn, 0, 519 false, 520 ep->irq_phys_addr, 521 msg_addr & ~pci_addr_mask, 522 pci_addr_mask + 1); 523 ep->irq_pci_addr = (msg_addr & ~pci_addr_mask); 524 ep->irq_pci_fn = fn; 525 } 526 writel(msg_data, ep->irq_cpu_addr + (msg_addr & pci_addr_mask)); 527 528 return 0; 529 } 530 531 static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn, 532 unsigned int type, u16 interrupt_num) 533 { 534 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 535 struct cdns_pcie *pcie = &ep->pcie; 536 struct device *dev = pcie->dev; 537 538 switch (type) { 539 case PCI_IRQ_INTX: 540 if (vfn > 0) { 541 dev_err(dev, "Cannot raise INTX interrupts for VF\n"); 542 return -EINVAL; 543 } 544 return cdns_pcie_ep_send_intx_irq(ep, fn, vfn, 0); 545 546 case PCI_IRQ_MSI: 547 return cdns_pcie_ep_send_msi_irq(ep, fn, vfn, interrupt_num); 548 549 case PCI_IRQ_MSIX: 550 return cdns_pcie_ep_send_msix_irq(ep, fn, vfn, interrupt_num); 551 552 default: 553 break; 554 } 555 556 return -EINVAL; 557 } 558 559 static int cdns_pcie_ep_start(struct pci_epc *epc) 560 { 561 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 562 struct cdns_pcie *pcie = &ep->pcie; 563 struct device *dev = pcie->dev; 564 int max_epfs = sizeof(epc->function_num_map) * 8; 565 int ret, epf, last_fn; 566 u32 reg, value; 567 568 /* 569 * BIT(0) is hardwired to 1, hence function 0 is always enabled 570 * and can't be disabled anyway. 571 */ 572 cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, epc->function_num_map); 573 574 /* 575 * Next function field in ARI_CAP_AND_CTR register for last function 576 * should be 0. 577 * Clearing Next Function Number field for the last function used. 578 */ 579 last_fn = find_last_bit(&epc->function_num_map, BITS_PER_LONG); 580 reg = CDNS_PCIE_CORE_PF_I_ARI_CAP_AND_CTRL(last_fn); 581 value = cdns_pcie_readl(pcie, reg); 582 value &= ~CDNS_PCIE_ARI_CAP_NFN_MASK; 583 cdns_pcie_writel(pcie, reg, value); 584 585 if (ep->quirk_disable_flr) { 586 for (epf = 0; epf < max_epfs; epf++) { 587 if (!(epc->function_num_map & BIT(epf))) 588 continue; 589 590 value = cdns_pcie_ep_fn_readl(pcie, epf, 591 CDNS_PCIE_EP_FUNC_DEV_CAP_OFFSET + 592 PCI_EXP_DEVCAP); 593 value &= ~PCI_EXP_DEVCAP_FLR; 594 cdns_pcie_ep_fn_writel(pcie, epf, 595 CDNS_PCIE_EP_FUNC_DEV_CAP_OFFSET + 596 PCI_EXP_DEVCAP, value); 597 } 598 } 599 600 ret = cdns_pcie_start_link(pcie); 601 if (ret) { 602 dev_err(dev, "Failed to start link\n"); 603 return ret; 604 } 605 606 return 0; 607 } 608 609 static const struct pci_epc_features cdns_pcie_epc_vf_features = { 610 .linkup_notifier = false, 611 .msi_capable = true, 612 .msix_capable = true, 613 .align = 65536, 614 }; 615 616 static const struct pci_epc_features cdns_pcie_epc_features = { 617 .linkup_notifier = false, 618 .msi_capable = true, 619 .msix_capable = true, 620 .align = 256, 621 }; 622 623 static const struct pci_epc_features* 624 cdns_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no) 625 { 626 if (!vfunc_no) 627 return &cdns_pcie_epc_features; 628 629 return &cdns_pcie_epc_vf_features; 630 } 631 632 static const struct pci_epc_ops cdns_pcie_epc_ops = { 633 .write_header = cdns_pcie_ep_write_header, 634 .set_bar = cdns_pcie_ep_set_bar, 635 .clear_bar = cdns_pcie_ep_clear_bar, 636 .map_addr = cdns_pcie_ep_map_addr, 637 .unmap_addr = cdns_pcie_ep_unmap_addr, 638 .set_msi = cdns_pcie_ep_set_msi, 639 .get_msi = cdns_pcie_ep_get_msi, 640 .set_msix = cdns_pcie_ep_set_msix, 641 .get_msix = cdns_pcie_ep_get_msix, 642 .raise_irq = cdns_pcie_ep_raise_irq, 643 .map_msi_irq = cdns_pcie_ep_map_msi_irq, 644 .start = cdns_pcie_ep_start, 645 .get_features = cdns_pcie_ep_get_features, 646 }; 647 648 649 int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep) 650 { 651 struct device *dev = ep->pcie.dev; 652 struct platform_device *pdev = to_platform_device(dev); 653 struct device_node *np = dev->of_node; 654 struct cdns_pcie *pcie = &ep->pcie; 655 struct cdns_pcie_epf *epf; 656 struct resource *res; 657 struct pci_epc *epc; 658 int ret; 659 int i; 660 661 pcie->is_rc = false; 662 663 pcie->reg_base = devm_platform_ioremap_resource_byname(pdev, "reg"); 664 if (IS_ERR(pcie->reg_base)) { 665 dev_err(dev, "missing \"reg\"\n"); 666 return PTR_ERR(pcie->reg_base); 667 } 668 669 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem"); 670 if (!res) { 671 dev_err(dev, "missing \"mem\"\n"); 672 return -EINVAL; 673 } 674 pcie->mem_res = res; 675 676 ep->max_regions = CDNS_PCIE_MAX_OB; 677 of_property_read_u32(np, "cdns,max-outbound-regions", &ep->max_regions); 678 679 ep->ob_addr = devm_kcalloc(dev, 680 ep->max_regions, sizeof(*ep->ob_addr), 681 GFP_KERNEL); 682 if (!ep->ob_addr) 683 return -ENOMEM; 684 685 /* Disable all but function 0 (anyway BIT(0) is hardwired to 1). */ 686 cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, BIT(0)); 687 688 epc = devm_pci_epc_create(dev, &cdns_pcie_epc_ops); 689 if (IS_ERR(epc)) { 690 dev_err(dev, "failed to create epc device\n"); 691 return PTR_ERR(epc); 692 } 693 694 epc_set_drvdata(epc, ep); 695 696 if (of_property_read_u8(np, "max-functions", &epc->max_functions) < 0) 697 epc->max_functions = 1; 698 699 ep->epf = devm_kcalloc(dev, epc->max_functions, sizeof(*ep->epf), 700 GFP_KERNEL); 701 if (!ep->epf) 702 return -ENOMEM; 703 704 epc->max_vfs = devm_kcalloc(dev, epc->max_functions, 705 sizeof(*epc->max_vfs), GFP_KERNEL); 706 if (!epc->max_vfs) 707 return -ENOMEM; 708 709 ret = of_property_read_u8_array(np, "max-virtual-functions", 710 epc->max_vfs, epc->max_functions); 711 if (ret == 0) { 712 for (i = 0; i < epc->max_functions; i++) { 713 epf = &ep->epf[i]; 714 if (epc->max_vfs[i] == 0) 715 continue; 716 epf->epf = devm_kcalloc(dev, epc->max_vfs[i], 717 sizeof(*ep->epf), GFP_KERNEL); 718 if (!epf->epf) 719 return -ENOMEM; 720 } 721 } 722 723 ret = pci_epc_mem_init(epc, pcie->mem_res->start, 724 resource_size(pcie->mem_res), PAGE_SIZE); 725 if (ret < 0) { 726 dev_err(dev, "failed to initialize the memory space\n"); 727 return ret; 728 } 729 730 ep->irq_cpu_addr = pci_epc_mem_alloc_addr(epc, &ep->irq_phys_addr, 731 SZ_128K); 732 if (!ep->irq_cpu_addr) { 733 dev_err(dev, "failed to reserve memory space for MSI\n"); 734 ret = -ENOMEM; 735 goto free_epc_mem; 736 } 737 ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE; 738 /* Reserve region 0 for IRQs */ 739 set_bit(0, &ep->ob_region_map); 740 741 if (ep->quirk_detect_quiet_flag) 742 cdns_pcie_detect_quiet_min_delay_set(&ep->pcie); 743 744 spin_lock_init(&ep->lock); 745 746 pci_epc_init_notify(epc); 747 748 return 0; 749 750 free_epc_mem: 751 pci_epc_mem_exit(epc); 752 753 return ret; 754 } 755