1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Synopsys DesignWare PCIe Endpoint controller driver 4 * 5 * Copyright (C) 2017 Texas Instruments 6 * Author: Kishon Vijay Abraham I <kishon@ti.com> 7 */ 8 9 #include <linux/align.h> 10 #include <linux/bitfield.h> 11 #include <linux/of.h> 12 #include <linux/platform_device.h> 13 14 #include "pcie-designware.h" 15 #include <linux/pci-epc.h> 16 #include <linux/pci-epf.h> 17 18 /** 19 * dw_pcie_ep_get_func_from_ep - Get the struct dw_pcie_ep_func corresponding to 20 * the endpoint function 21 * @ep: DWC EP device 22 * @func_no: Function number of the endpoint device 23 * 24 * Return: struct dw_pcie_ep_func if success, NULL otherwise. 25 */ 26 struct dw_pcie_ep_func * 27 dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no) 28 { 29 struct dw_pcie_ep_func *ep_func; 30 31 list_for_each_entry(ep_func, &ep->func_list, list) { 32 if (ep_func->func_no == func_no) 33 return ep_func; 34 } 35 36 return NULL; 37 } 38 39 static void __dw_pcie_ep_reset_bar(struct dw_pcie *pci, u8 func_no, 40 enum pci_barno bar, int flags) 41 { 42 struct dw_pcie_ep *ep = &pci->ep; 43 u32 reg; 44 45 reg = PCI_BASE_ADDRESS_0 + (4 * bar); 46 dw_pcie_dbi_ro_wr_en(pci); 47 dw_pcie_ep_writel_dbi2(ep, func_no, reg, 0x0); 48 dw_pcie_ep_writel_dbi(ep, func_no, reg, 0x0); 49 if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) { 50 dw_pcie_ep_writel_dbi2(ep, func_no, reg + 4, 0x0); 51 dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0x0); 52 } 53 dw_pcie_dbi_ro_wr_dis(pci); 54 } 55 56 /** 57 * dw_pcie_ep_reset_bar - Reset endpoint BAR 58 * @pci: DWC PCI device 59 * @bar: BAR number of the endpoint 60 */ 61 void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar) 62 { 63 u8 func_no, funcs; 64 65 funcs = pci->ep.epc->max_functions; 66 67 for (func_no = 0; func_no < funcs; func_no++) 68 __dw_pcie_ep_reset_bar(pci, func_no, bar, 0); 69 } 70 EXPORT_SYMBOL_GPL(dw_pcie_ep_reset_bar); 71 72 static u8 __dw_pcie_ep_find_next_cap(struct dw_pcie_ep *ep, u8 func_no, 73 u8 cap_ptr, u8 cap) 74 { 75 u8 cap_id, next_cap_ptr; 76 u16 reg; 77 78 if (!cap_ptr) 79 return 0; 80 81 reg = dw_pcie_ep_readw_dbi(ep, func_no, cap_ptr); 82 cap_id = (reg & 0x00ff); 83 84 if (cap_id > PCI_CAP_ID_MAX) 85 return 0; 86 87 if (cap_id == cap) 88 return cap_ptr; 89 90 next_cap_ptr = (reg & 0xff00) >> 8; 91 return __dw_pcie_ep_find_next_cap(ep, func_no, next_cap_ptr, cap); 92 } 93 94 static u8 dw_pcie_ep_find_capability(struct dw_pcie_ep *ep, u8 func_no, u8 cap) 95 { 96 u8 next_cap_ptr; 97 u16 reg; 98 99 reg = dw_pcie_ep_readw_dbi(ep, func_no, PCI_CAPABILITY_LIST); 100 next_cap_ptr = (reg & 0x00ff); 101 102 return __dw_pcie_ep_find_next_cap(ep, func_no, next_cap_ptr, cap); 103 } 104 105 static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 106 struct pci_epf_header *hdr) 107 { 108 struct dw_pcie_ep *ep = epc_get_drvdata(epc); 109 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 110 111 dw_pcie_dbi_ro_wr_en(pci); 112 dw_pcie_ep_writew_dbi(ep, func_no, PCI_VENDOR_ID, hdr->vendorid); 113 dw_pcie_ep_writew_dbi(ep, func_no, PCI_DEVICE_ID, hdr->deviceid); 114 dw_pcie_ep_writeb_dbi(ep, func_no, PCI_REVISION_ID, hdr->revid); 115 dw_pcie_ep_writeb_dbi(ep, func_no, PCI_CLASS_PROG, hdr->progif_code); 116 dw_pcie_ep_writew_dbi(ep, func_no, PCI_CLASS_DEVICE, 117 hdr->subclass_code | hdr->baseclass_code << 8); 118 dw_pcie_ep_writeb_dbi(ep, func_no, PCI_CACHE_LINE_SIZE, 119 hdr->cache_line_size); 120 dw_pcie_ep_writew_dbi(ep, func_no, PCI_SUBSYSTEM_VENDOR_ID, 121 hdr->subsys_vendor_id); 122 dw_pcie_ep_writew_dbi(ep, func_no, PCI_SUBSYSTEM_ID, hdr->subsys_id); 123 dw_pcie_ep_writeb_dbi(ep, func_no, PCI_INTERRUPT_PIN, 124 hdr->interrupt_pin); 125 dw_pcie_dbi_ro_wr_dis(pci); 126 127 return 0; 128 } 129 130 static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type, 131 dma_addr_t cpu_addr, enum pci_barno bar) 132 { 133 int ret; 134 u32 free_win; 135 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 136 137 if (!ep->bar_to_atu[bar]) 138 free_win = find_first_zero_bit(ep->ib_window_map, pci->num_ib_windows); 139 else 140 free_win = ep->bar_to_atu[bar] - 1; 141 142 if (free_win >= pci->num_ib_windows) { 143 dev_err(pci->dev, "No free inbound window\n"); 144 return -EINVAL; 145 } 146 147 ret = dw_pcie_prog_ep_inbound_atu(pci, func_no, free_win, type, 148 cpu_addr, bar); 149 if (ret < 0) { 150 dev_err(pci->dev, "Failed to program IB window\n"); 151 return ret; 152 } 153 154 /* 155 * Always increment free_win before assignment, since value 0 is used to identify 156 * unallocated mapping. 157 */ 158 ep->bar_to_atu[bar] = free_win + 1; 159 set_bit(free_win, ep->ib_window_map); 160 161 return 0; 162 } 163 164 static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, 165 struct dw_pcie_ob_atu_cfg *atu) 166 { 167 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 168 u32 free_win; 169 int ret; 170 171 free_win = find_first_zero_bit(ep->ob_window_map, pci->num_ob_windows); 172 if (free_win >= pci->num_ob_windows) { 173 dev_err(pci->dev, "No free outbound window\n"); 174 return -EINVAL; 175 } 176 177 atu->index = free_win; 178 ret = dw_pcie_prog_outbound_atu(pci, atu); 179 if (ret) 180 return ret; 181 182 set_bit(free_win, ep->ob_window_map); 183 ep->outbound_addr[free_win] = atu->cpu_addr; 184 185 return 0; 186 } 187 188 static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 189 struct pci_epf_bar *epf_bar) 190 { 191 struct dw_pcie_ep *ep = epc_get_drvdata(epc); 192 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 193 enum pci_barno bar = epf_bar->barno; 194 u32 atu_index = ep->bar_to_atu[bar] - 1; 195 196 if (!ep->bar_to_atu[bar]) 197 return; 198 199 __dw_pcie_ep_reset_bar(pci, func_no, bar, epf_bar->flags); 200 201 dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_IB, atu_index); 202 clear_bit(atu_index, ep->ib_window_map); 203 ep->epf_bar[bar] = NULL; 204 ep->bar_to_atu[bar] = 0; 205 } 206 207 static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 208 struct pci_epf_bar *epf_bar) 209 { 210 struct dw_pcie_ep *ep = epc_get_drvdata(epc); 211 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 212 enum pci_barno bar = epf_bar->barno; 213 size_t size = epf_bar->size; 214 int flags = epf_bar->flags; 215 int ret, type; 216 u32 reg; 217 218 /* 219 * DWC does not allow BAR pairs to overlap, e.g. you cannot combine BARs 220 * 1 and 2 to form a 64-bit BAR. 221 */ 222 if ((flags & PCI_BASE_ADDRESS_MEM_TYPE_64) && (bar & 1)) 223 return -EINVAL; 224 225 reg = PCI_BASE_ADDRESS_0 + (4 * bar); 226 227 if (!(flags & PCI_BASE_ADDRESS_SPACE)) 228 type = PCIE_ATU_TYPE_MEM; 229 else 230 type = PCIE_ATU_TYPE_IO; 231 232 ret = dw_pcie_ep_inbound_atu(ep, func_no, type, epf_bar->phys_addr, bar); 233 if (ret) 234 return ret; 235 236 if (ep->epf_bar[bar]) 237 return 0; 238 239 dw_pcie_dbi_ro_wr_en(pci); 240 241 dw_pcie_ep_writel_dbi2(ep, func_no, reg, lower_32_bits(size - 1)); 242 dw_pcie_ep_writel_dbi(ep, func_no, reg, flags); 243 244 if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) { 245 dw_pcie_ep_writel_dbi2(ep, func_no, reg + 4, upper_32_bits(size - 1)); 246 dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0); 247 } 248 249 ep->epf_bar[bar] = epf_bar; 250 dw_pcie_dbi_ro_wr_dis(pci); 251 252 return 0; 253 } 254 255 static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr, 256 u32 *atu_index) 257 { 258 u32 index; 259 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 260 261 for (index = 0; index < pci->num_ob_windows; index++) { 262 if (ep->outbound_addr[index] != addr) 263 continue; 264 *atu_index = index; 265 return 0; 266 } 267 268 return -EINVAL; 269 } 270 271 static u64 dw_pcie_ep_align_addr(struct pci_epc *epc, u64 pci_addr, 272 size_t *pci_size, size_t *offset) 273 { 274 struct dw_pcie_ep *ep = epc_get_drvdata(epc); 275 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 276 u64 mask = pci->region_align - 1; 277 size_t ofst = pci_addr & mask; 278 279 *pci_size = ALIGN(ofst + *pci_size, epc->mem->window.page_size); 280 *offset = ofst; 281 282 return pci_addr & ~mask; 283 } 284 285 static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 286 phys_addr_t addr) 287 { 288 int ret; 289 u32 atu_index; 290 struct dw_pcie_ep *ep = epc_get_drvdata(epc); 291 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 292 293 ret = dw_pcie_find_index(ep, addr, &atu_index); 294 if (ret < 0) 295 return; 296 297 ep->outbound_addr[atu_index] = 0; 298 dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_OB, atu_index); 299 clear_bit(atu_index, ep->ob_window_map); 300 } 301 302 static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 303 phys_addr_t addr, u64 pci_addr, size_t size) 304 { 305 int ret; 306 struct dw_pcie_ep *ep = epc_get_drvdata(epc); 307 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 308 struct dw_pcie_ob_atu_cfg atu = { 0 }; 309 310 atu.func_no = func_no; 311 atu.type = PCIE_ATU_TYPE_MEM; 312 atu.cpu_addr = addr; 313 atu.pci_addr = pci_addr; 314 atu.size = size; 315 ret = dw_pcie_ep_outbound_atu(ep, &atu); 316 if (ret) { 317 dev_err(pci->dev, "Failed to enable address\n"); 318 return ret; 319 } 320 321 return 0; 322 } 323 324 static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no) 325 { 326 struct dw_pcie_ep *ep = epc_get_drvdata(epc); 327 struct dw_pcie_ep_func *ep_func; 328 u32 val, reg; 329 330 ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no); 331 if (!ep_func || !ep_func->msi_cap) 332 return -EINVAL; 333 334 reg = ep_func->msi_cap + PCI_MSI_FLAGS; 335 val = dw_pcie_ep_readw_dbi(ep, func_no, reg); 336 if (!(val & PCI_MSI_FLAGS_ENABLE)) 337 return -EINVAL; 338 339 val = FIELD_GET(PCI_MSI_FLAGS_QSIZE, val); 340 341 return val; 342 } 343 344 static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 345 u8 interrupts) 346 { 347 struct dw_pcie_ep *ep = epc_get_drvdata(epc); 348 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 349 struct dw_pcie_ep_func *ep_func; 350 u32 val, reg; 351 352 ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no); 353 if (!ep_func || !ep_func->msi_cap) 354 return -EINVAL; 355 356 reg = ep_func->msi_cap + PCI_MSI_FLAGS; 357 val = dw_pcie_ep_readw_dbi(ep, func_no, reg); 358 val &= ~PCI_MSI_FLAGS_QMASK; 359 val |= FIELD_PREP(PCI_MSI_FLAGS_QMASK, interrupts); 360 dw_pcie_dbi_ro_wr_en(pci); 361 dw_pcie_ep_writew_dbi(ep, func_no, reg, val); 362 dw_pcie_dbi_ro_wr_dis(pci); 363 364 return 0; 365 } 366 367 static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no) 368 { 369 struct dw_pcie_ep *ep = epc_get_drvdata(epc); 370 struct dw_pcie_ep_func *ep_func; 371 u32 val, reg; 372 373 ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no); 374 if (!ep_func || !ep_func->msix_cap) 375 return -EINVAL; 376 377 reg = ep_func->msix_cap + PCI_MSIX_FLAGS; 378 val = dw_pcie_ep_readw_dbi(ep, func_no, reg); 379 if (!(val & PCI_MSIX_FLAGS_ENABLE)) 380 return -EINVAL; 381 382 val &= PCI_MSIX_FLAGS_QSIZE; 383 384 return val; 385 } 386 387 static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 388 u16 interrupts, enum pci_barno bir, u32 offset) 389 { 390 struct dw_pcie_ep *ep = epc_get_drvdata(epc); 391 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 392 struct dw_pcie_ep_func *ep_func; 393 u32 val, reg; 394 395 ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no); 396 if (!ep_func || !ep_func->msix_cap) 397 return -EINVAL; 398 399 dw_pcie_dbi_ro_wr_en(pci); 400 401 reg = ep_func->msix_cap + PCI_MSIX_FLAGS; 402 val = dw_pcie_ep_readw_dbi(ep, func_no, reg); 403 val &= ~PCI_MSIX_FLAGS_QSIZE; 404 val |= interrupts; 405 dw_pcie_writew_dbi(pci, reg, val); 406 407 reg = ep_func->msix_cap + PCI_MSIX_TABLE; 408 val = offset | bir; 409 dw_pcie_ep_writel_dbi(ep, func_no, reg, val); 410 411 reg = ep_func->msix_cap + PCI_MSIX_PBA; 412 val = (offset + (interrupts * PCI_MSIX_ENTRY_SIZE)) | bir; 413 dw_pcie_ep_writel_dbi(ep, func_no, reg, val); 414 415 dw_pcie_dbi_ro_wr_dis(pci); 416 417 return 0; 418 } 419 420 static int dw_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 421 unsigned int type, u16 interrupt_num) 422 { 423 struct dw_pcie_ep *ep = epc_get_drvdata(epc); 424 425 if (!ep->ops->raise_irq) 426 return -EINVAL; 427 428 return ep->ops->raise_irq(ep, func_no, type, interrupt_num); 429 } 430 431 static void dw_pcie_ep_stop(struct pci_epc *epc) 432 { 433 struct dw_pcie_ep *ep = epc_get_drvdata(epc); 434 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 435 436 dw_pcie_stop_link(pci); 437 } 438 439 static int dw_pcie_ep_start(struct pci_epc *epc) 440 { 441 struct dw_pcie_ep *ep = epc_get_drvdata(epc); 442 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 443 444 return dw_pcie_start_link(pci); 445 } 446 447 static const struct pci_epc_features* 448 dw_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no) 449 { 450 struct dw_pcie_ep *ep = epc_get_drvdata(epc); 451 452 if (!ep->ops->get_features) 453 return NULL; 454 455 return ep->ops->get_features(ep); 456 } 457 458 static const struct pci_epc_ops epc_ops = { 459 .write_header = dw_pcie_ep_write_header, 460 .set_bar = dw_pcie_ep_set_bar, 461 .clear_bar = dw_pcie_ep_clear_bar, 462 .align_addr = dw_pcie_ep_align_addr, 463 .map_addr = dw_pcie_ep_map_addr, 464 .unmap_addr = dw_pcie_ep_unmap_addr, 465 .set_msi = dw_pcie_ep_set_msi, 466 .get_msi = dw_pcie_ep_get_msi, 467 .set_msix = dw_pcie_ep_set_msix, 468 .get_msix = dw_pcie_ep_get_msix, 469 .raise_irq = dw_pcie_ep_raise_irq, 470 .start = dw_pcie_ep_start, 471 .stop = dw_pcie_ep_stop, 472 .get_features = dw_pcie_ep_get_features, 473 }; 474 475 /** 476 * dw_pcie_ep_raise_intx_irq - Raise INTx IRQ to the host 477 * @ep: DWC EP device 478 * @func_no: Function number of the endpoint 479 * 480 * Return: 0 if success, errono otherwise. 481 */ 482 int dw_pcie_ep_raise_intx_irq(struct dw_pcie_ep *ep, u8 func_no) 483 { 484 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 485 struct device *dev = pci->dev; 486 487 dev_err(dev, "EP cannot raise INTX IRQs\n"); 488 489 return -EINVAL; 490 } 491 EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_intx_irq); 492 493 /** 494 * dw_pcie_ep_raise_msi_irq - Raise MSI IRQ to the host 495 * @ep: DWC EP device 496 * @func_no: Function number of the endpoint 497 * @interrupt_num: Interrupt number to be raised 498 * 499 * Return: 0 if success, errono otherwise. 500 */ 501 int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no, 502 u8 interrupt_num) 503 { 504 u32 msg_addr_lower, msg_addr_upper, reg; 505 struct dw_pcie_ep_func *ep_func; 506 struct pci_epc *epc = ep->epc; 507 size_t map_size = sizeof(u32); 508 size_t offset; 509 u16 msg_ctrl, msg_data; 510 bool has_upper; 511 u64 msg_addr; 512 int ret; 513 514 ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no); 515 if (!ep_func || !ep_func->msi_cap) 516 return -EINVAL; 517 518 /* Raise MSI per the PCI Local Bus Specification Revision 3.0, 6.8.1. */ 519 reg = ep_func->msi_cap + PCI_MSI_FLAGS; 520 msg_ctrl = dw_pcie_ep_readw_dbi(ep, func_no, reg); 521 has_upper = !!(msg_ctrl & PCI_MSI_FLAGS_64BIT); 522 reg = ep_func->msi_cap + PCI_MSI_ADDRESS_LO; 523 msg_addr_lower = dw_pcie_ep_readl_dbi(ep, func_no, reg); 524 if (has_upper) { 525 reg = ep_func->msi_cap + PCI_MSI_ADDRESS_HI; 526 msg_addr_upper = dw_pcie_ep_readl_dbi(ep, func_no, reg); 527 reg = ep_func->msi_cap + PCI_MSI_DATA_64; 528 msg_data = dw_pcie_ep_readw_dbi(ep, func_no, reg); 529 } else { 530 msg_addr_upper = 0; 531 reg = ep_func->msi_cap + PCI_MSI_DATA_32; 532 msg_data = dw_pcie_ep_readw_dbi(ep, func_no, reg); 533 } 534 msg_addr = ((u64)msg_addr_upper) << 32 | msg_addr_lower; 535 536 msg_addr = dw_pcie_ep_align_addr(epc, msg_addr, &map_size, &offset); 537 ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr, 538 map_size); 539 if (ret) 540 return ret; 541 542 writel(msg_data | (interrupt_num - 1), ep->msi_mem + offset); 543 544 dw_pcie_ep_unmap_addr(epc, func_no, 0, ep->msi_mem_phys); 545 546 return 0; 547 } 548 EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_msi_irq); 549 550 /** 551 * dw_pcie_ep_raise_msix_irq_doorbell - Raise MSI-X to the host using Doorbell 552 * method 553 * @ep: DWC EP device 554 * @func_no: Function number of the endpoint device 555 * @interrupt_num: Interrupt number to be raised 556 * 557 * Return: 0 if success, errno otherwise. 558 */ 559 int dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep *ep, u8 func_no, 560 u16 interrupt_num) 561 { 562 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 563 struct dw_pcie_ep_func *ep_func; 564 u32 msg_data; 565 566 ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no); 567 if (!ep_func || !ep_func->msix_cap) 568 return -EINVAL; 569 570 msg_data = (func_no << PCIE_MSIX_DOORBELL_PF_SHIFT) | 571 (interrupt_num - 1); 572 573 dw_pcie_writel_dbi(pci, PCIE_MSIX_DOORBELL, msg_data); 574 575 return 0; 576 } 577 578 /** 579 * dw_pcie_ep_raise_msix_irq - Raise MSI-X to the host 580 * @ep: DWC EP device 581 * @func_no: Function number of the endpoint device 582 * @interrupt_num: Interrupt number to be raised 583 * 584 * Return: 0 if success, errno otherwise. 585 */ 586 int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no, 587 u16 interrupt_num) 588 { 589 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 590 struct pci_epf_msix_tbl *msix_tbl; 591 struct dw_pcie_ep_func *ep_func; 592 struct pci_epc *epc = ep->epc; 593 size_t map_size = sizeof(u32); 594 size_t offset; 595 u32 reg, msg_data, vec_ctrl; 596 u32 tbl_offset; 597 u64 msg_addr; 598 int ret; 599 u8 bir; 600 601 ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no); 602 if (!ep_func || !ep_func->msix_cap) 603 return -EINVAL; 604 605 reg = ep_func->msix_cap + PCI_MSIX_TABLE; 606 tbl_offset = dw_pcie_ep_readl_dbi(ep, func_no, reg); 607 bir = FIELD_GET(PCI_MSIX_TABLE_BIR, tbl_offset); 608 tbl_offset &= PCI_MSIX_TABLE_OFFSET; 609 610 msix_tbl = ep->epf_bar[bir]->addr + tbl_offset; 611 msg_addr = msix_tbl[(interrupt_num - 1)].msg_addr; 612 msg_data = msix_tbl[(interrupt_num - 1)].msg_data; 613 vec_ctrl = msix_tbl[(interrupt_num - 1)].vector_ctrl; 614 615 if (vec_ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT) { 616 dev_dbg(pci->dev, "MSI-X entry ctrl set\n"); 617 return -EPERM; 618 } 619 620 msg_addr = dw_pcie_ep_align_addr(epc, msg_addr, &map_size, &offset); 621 ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr, 622 map_size); 623 if (ret) 624 return ret; 625 626 writel(msg_data, ep->msi_mem + offset); 627 628 dw_pcie_ep_unmap_addr(epc, func_no, 0, ep->msi_mem_phys); 629 630 return 0; 631 } 632 633 /** 634 * dw_pcie_ep_cleanup - Cleanup DWC EP resources after fundamental reset 635 * @ep: DWC EP device 636 * 637 * Cleans up the DWC EP specific resources like eDMA etc... after fundamental 638 * reset like PERST#. Note that this API is only applicable for drivers 639 * supporting PERST# or any other methods of fundamental reset. 640 */ 641 void dw_pcie_ep_cleanup(struct dw_pcie_ep *ep) 642 { 643 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 644 645 dw_pcie_edma_remove(pci); 646 } 647 EXPORT_SYMBOL_GPL(dw_pcie_ep_cleanup); 648 649 /** 650 * dw_pcie_ep_deinit - Deinitialize the endpoint device 651 * @ep: DWC EP device 652 * 653 * Deinitialize the endpoint device. EPC device is not destroyed since that will 654 * be taken care by Devres. 655 */ 656 void dw_pcie_ep_deinit(struct dw_pcie_ep *ep) 657 { 658 struct pci_epc *epc = ep->epc; 659 660 dw_pcie_ep_cleanup(ep); 661 662 pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem, 663 epc->mem->window.page_size); 664 665 pci_epc_mem_exit(epc); 666 } 667 EXPORT_SYMBOL_GPL(dw_pcie_ep_deinit); 668 669 static unsigned int dw_pcie_ep_find_ext_capability(struct dw_pcie *pci, int cap) 670 { 671 u32 header; 672 int pos = PCI_CFG_SPACE_SIZE; 673 674 while (pos) { 675 header = dw_pcie_readl_dbi(pci, pos); 676 if (PCI_EXT_CAP_ID(header) == cap) 677 return pos; 678 679 pos = PCI_EXT_CAP_NEXT(header); 680 if (!pos) 681 break; 682 } 683 684 return 0; 685 } 686 687 static void dw_pcie_ep_init_non_sticky_registers(struct dw_pcie *pci) 688 { 689 unsigned int offset; 690 unsigned int nbars; 691 u32 reg, i; 692 693 offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR); 694 695 dw_pcie_dbi_ro_wr_en(pci); 696 697 if (offset) { 698 reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL); 699 nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >> 700 PCI_REBAR_CTRL_NBAR_SHIFT; 701 702 /* 703 * PCIe r6.0, sec 7.8.6.2 require us to support at least one 704 * size in the range from 1 MB to 512 GB. Advertise support 705 * for 1 MB BAR size only. 706 */ 707 for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL) 708 dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, BIT(4)); 709 } 710 711 dw_pcie_setup(pci); 712 dw_pcie_dbi_ro_wr_dis(pci); 713 } 714 715 /** 716 * dw_pcie_ep_init_registers - Initialize DWC EP specific registers 717 * @ep: DWC EP device 718 * 719 * Initialize the registers (CSRs) specific to DWC EP. This API should be called 720 * only when the endpoint receives an active refclk (either from host or 721 * generated locally). 722 */ 723 int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep) 724 { 725 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 726 struct dw_pcie_ep_func *ep_func; 727 struct device *dev = pci->dev; 728 struct pci_epc *epc = ep->epc; 729 u32 ptm_cap_base, reg; 730 u8 hdr_type; 731 u8 func_no; 732 void *addr; 733 int ret; 734 735 hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE) & 736 PCI_HEADER_TYPE_MASK; 737 if (hdr_type != PCI_HEADER_TYPE_NORMAL) { 738 dev_err(pci->dev, 739 "PCIe controller is not set to EP mode (hdr_type:0x%x)!\n", 740 hdr_type); 741 return -EIO; 742 } 743 744 dw_pcie_version_detect(pci); 745 746 dw_pcie_iatu_detect(pci); 747 748 ret = dw_pcie_edma_detect(pci); 749 if (ret) 750 return ret; 751 752 if (!ep->ib_window_map) { 753 ep->ib_window_map = devm_bitmap_zalloc(dev, pci->num_ib_windows, 754 GFP_KERNEL); 755 if (!ep->ib_window_map) 756 goto err_remove_edma; 757 } 758 759 if (!ep->ob_window_map) { 760 ep->ob_window_map = devm_bitmap_zalloc(dev, pci->num_ob_windows, 761 GFP_KERNEL); 762 if (!ep->ob_window_map) 763 goto err_remove_edma; 764 } 765 766 if (!ep->outbound_addr) { 767 addr = devm_kcalloc(dev, pci->num_ob_windows, sizeof(phys_addr_t), 768 GFP_KERNEL); 769 if (!addr) 770 goto err_remove_edma; 771 ep->outbound_addr = addr; 772 } 773 774 for (func_no = 0; func_no < epc->max_functions; func_no++) { 775 776 ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no); 777 if (ep_func) 778 continue; 779 780 ep_func = devm_kzalloc(dev, sizeof(*ep_func), GFP_KERNEL); 781 if (!ep_func) 782 goto err_remove_edma; 783 784 ep_func->func_no = func_no; 785 ep_func->msi_cap = dw_pcie_ep_find_capability(ep, func_no, 786 PCI_CAP_ID_MSI); 787 ep_func->msix_cap = dw_pcie_ep_find_capability(ep, func_no, 788 PCI_CAP_ID_MSIX); 789 790 list_add_tail(&ep_func->list, &ep->func_list); 791 } 792 793 if (ep->ops->init) 794 ep->ops->init(ep); 795 796 ptm_cap_base = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_PTM); 797 798 /* 799 * PTM responder capability can be disabled only after disabling 800 * PTM root capability. 801 */ 802 if (ptm_cap_base) { 803 dw_pcie_dbi_ro_wr_en(pci); 804 reg = dw_pcie_readl_dbi(pci, ptm_cap_base + PCI_PTM_CAP); 805 reg &= ~PCI_PTM_CAP_ROOT; 806 dw_pcie_writel_dbi(pci, ptm_cap_base + PCI_PTM_CAP, reg); 807 808 reg = dw_pcie_readl_dbi(pci, ptm_cap_base + PCI_PTM_CAP); 809 reg &= ~(PCI_PTM_CAP_RES | PCI_PTM_GRANULARITY_MASK); 810 dw_pcie_writel_dbi(pci, ptm_cap_base + PCI_PTM_CAP, reg); 811 dw_pcie_dbi_ro_wr_dis(pci); 812 } 813 814 dw_pcie_ep_init_non_sticky_registers(pci); 815 816 return 0; 817 818 err_remove_edma: 819 dw_pcie_edma_remove(pci); 820 821 return ret; 822 } 823 EXPORT_SYMBOL_GPL(dw_pcie_ep_init_registers); 824 825 /** 826 * dw_pcie_ep_linkup - Notify EPF drivers about Link Up event 827 * @ep: DWC EP device 828 */ 829 void dw_pcie_ep_linkup(struct dw_pcie_ep *ep) 830 { 831 struct pci_epc *epc = ep->epc; 832 833 pci_epc_linkup(epc); 834 } 835 EXPORT_SYMBOL_GPL(dw_pcie_ep_linkup); 836 837 /** 838 * dw_pcie_ep_linkdown - Notify EPF drivers about Link Down event 839 * @ep: DWC EP device 840 * 841 * Non-sticky registers are also initialized before sending the notification to 842 * the EPF drivers. This is needed since the registers need to be initialized 843 * before the link comes back again. 844 */ 845 void dw_pcie_ep_linkdown(struct dw_pcie_ep *ep) 846 { 847 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 848 struct pci_epc *epc = ep->epc; 849 850 /* 851 * Initialize the non-sticky DWC registers as they would've reset post 852 * Link Down. This is specifically needed for drivers not supporting 853 * PERST# as they have no way to reinitialize the registers before the 854 * link comes back again. 855 */ 856 dw_pcie_ep_init_non_sticky_registers(pci); 857 858 pci_epc_linkdown(epc); 859 } 860 EXPORT_SYMBOL_GPL(dw_pcie_ep_linkdown); 861 862 /** 863 * dw_pcie_ep_init - Initialize the endpoint device 864 * @ep: DWC EP device 865 * 866 * Initialize the endpoint device. Allocate resources and create the EPC 867 * device with the endpoint framework. 868 * 869 * Return: 0 if success, errno otherwise. 870 */ 871 int dw_pcie_ep_init(struct dw_pcie_ep *ep) 872 { 873 int ret; 874 struct resource *res; 875 struct pci_epc *epc; 876 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 877 struct device *dev = pci->dev; 878 struct platform_device *pdev = to_platform_device(dev); 879 struct device_node *np = dev->of_node; 880 881 INIT_LIST_HEAD(&ep->func_list); 882 883 ret = dw_pcie_get_resources(pci); 884 if (ret) 885 return ret; 886 887 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space"); 888 if (!res) 889 return -EINVAL; 890 891 ep->phys_base = res->start; 892 ep->addr_size = resource_size(res); 893 894 if (ep->ops->pre_init) 895 ep->ops->pre_init(ep); 896 897 epc = devm_pci_epc_create(dev, &epc_ops); 898 if (IS_ERR(epc)) { 899 dev_err(dev, "Failed to create epc device\n"); 900 return PTR_ERR(epc); 901 } 902 903 ep->epc = epc; 904 epc_set_drvdata(epc, ep); 905 906 ret = of_property_read_u8(np, "max-functions", &epc->max_functions); 907 if (ret < 0) 908 epc->max_functions = 1; 909 910 ret = pci_epc_mem_init(epc, ep->phys_base, ep->addr_size, 911 ep->page_size); 912 if (ret < 0) { 913 dev_err(dev, "Failed to initialize address space\n"); 914 return ret; 915 } 916 917 ep->msi_mem = pci_epc_mem_alloc_addr(epc, &ep->msi_mem_phys, 918 epc->mem->window.page_size); 919 if (!ep->msi_mem) { 920 ret = -ENOMEM; 921 dev_err(dev, "Failed to reserve memory for MSI/MSI-X\n"); 922 goto err_exit_epc_mem; 923 } 924 925 return 0; 926 927 err_exit_epc_mem: 928 pci_epc_mem_exit(epc); 929 930 return ret; 931 } 932 EXPORT_SYMBOL_GPL(dw_pcie_ep_init); 933