1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Synopsys DesignWare PCIe Endpoint controller driver 4 * 5 * Copyright (C) 2017 Texas Instruments 6 * Author: Kishon Vijay Abraham I <kishon@ti.com> 7 */ 8 9 #include <linux/align.h> 10 #include <linux/bitfield.h> 11 #include <linux/of.h> 12 #include <linux/platform_device.h> 13 14 #include "pcie-designware.h" 15 #include <linux/pci-epc.h> 16 #include <linux/pci-epf.h> 17 18 /** 19 * dw_pcie_ep_get_func_from_ep - Get the struct dw_pcie_ep_func corresponding to 20 * the endpoint function 21 * @ep: DWC EP device 22 * @func_no: Function number of the endpoint device 23 * 24 * Return: struct dw_pcie_ep_func if success, NULL otherwise. 25 */ 26 struct dw_pcie_ep_func * 27 dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no) 28 { 29 struct dw_pcie_ep_func *ep_func; 30 31 list_for_each_entry(ep_func, &ep->func_list, list) { 32 if (ep_func->func_no == func_no) 33 return ep_func; 34 } 35 36 return NULL; 37 } 38 39 static void __dw_pcie_ep_reset_bar(struct dw_pcie *pci, u8 func_no, 40 enum pci_barno bar, int flags) 41 { 42 struct dw_pcie_ep *ep = &pci->ep; 43 u32 reg; 44 45 reg = PCI_BASE_ADDRESS_0 + (4 * bar); 46 dw_pcie_dbi_ro_wr_en(pci); 47 dw_pcie_ep_writel_dbi2(ep, func_no, reg, 0x0); 48 dw_pcie_ep_writel_dbi(ep, func_no, reg, 0x0); 49 if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) { 50 dw_pcie_ep_writel_dbi2(ep, func_no, reg + 4, 0x0); 51 dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0x0); 52 } 53 dw_pcie_dbi_ro_wr_dis(pci); 54 } 55 56 /** 57 * dw_pcie_ep_reset_bar - Reset endpoint BAR 58 * @pci: DWC PCI device 59 * @bar: BAR number of the endpoint 60 */ 61 void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar) 62 { 63 u8 func_no, funcs; 64 65 funcs = pci->ep.epc->max_functions; 66 67 for (func_no = 0; func_no < funcs; func_no++) 68 __dw_pcie_ep_reset_bar(pci, func_no, bar, 0); 69 } 70 EXPORT_SYMBOL_GPL(dw_pcie_ep_reset_bar); 71 72 static u8 __dw_pcie_ep_find_next_cap(struct dw_pcie_ep *ep, u8 func_no, 73 u8 cap_ptr, u8 cap) 74 { 75 u8 cap_id, next_cap_ptr; 76 u16 reg; 77 78 if (!cap_ptr) 79 return 0; 80 81 reg = dw_pcie_ep_readw_dbi(ep, func_no, cap_ptr); 82 cap_id = (reg & 0x00ff); 83 84 if (cap_id > PCI_CAP_ID_MAX) 85 return 0; 86 87 if (cap_id == cap) 88 return cap_ptr; 89 90 next_cap_ptr = (reg & 0xff00) >> 8; 91 return __dw_pcie_ep_find_next_cap(ep, func_no, next_cap_ptr, cap); 92 } 93 94 static u8 dw_pcie_ep_find_capability(struct dw_pcie_ep *ep, u8 func_no, u8 cap) 95 { 96 u8 next_cap_ptr; 97 u16 reg; 98 99 reg = dw_pcie_ep_readw_dbi(ep, func_no, PCI_CAPABILITY_LIST); 100 next_cap_ptr = (reg & 0x00ff); 101 102 return __dw_pcie_ep_find_next_cap(ep, func_no, next_cap_ptr, cap); 103 } 104 105 static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 106 struct pci_epf_header *hdr) 107 { 108 struct dw_pcie_ep *ep = epc_get_drvdata(epc); 109 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 110 111 dw_pcie_dbi_ro_wr_en(pci); 112 dw_pcie_ep_writew_dbi(ep, func_no, PCI_VENDOR_ID, hdr->vendorid); 113 dw_pcie_ep_writew_dbi(ep, func_no, PCI_DEVICE_ID, hdr->deviceid); 114 dw_pcie_ep_writeb_dbi(ep, func_no, PCI_REVISION_ID, hdr->revid); 115 dw_pcie_ep_writeb_dbi(ep, func_no, PCI_CLASS_PROG, hdr->progif_code); 116 dw_pcie_ep_writew_dbi(ep, func_no, PCI_CLASS_DEVICE, 117 hdr->subclass_code | hdr->baseclass_code << 8); 118 dw_pcie_ep_writeb_dbi(ep, func_no, PCI_CACHE_LINE_SIZE, 119 hdr->cache_line_size); 120 dw_pcie_ep_writew_dbi(ep, func_no, PCI_SUBSYSTEM_VENDOR_ID, 121 hdr->subsys_vendor_id); 122 dw_pcie_ep_writew_dbi(ep, func_no, PCI_SUBSYSTEM_ID, hdr->subsys_id); 123 dw_pcie_ep_writeb_dbi(ep, func_no, PCI_INTERRUPT_PIN, 124 hdr->interrupt_pin); 125 dw_pcie_dbi_ro_wr_dis(pci); 126 127 return 0; 128 } 129 130 static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type, 131 dma_addr_t cpu_addr, enum pci_barno bar) 132 { 133 int ret; 134 u32 free_win; 135 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 136 137 if (!ep->bar_to_atu[bar]) 138 free_win = find_first_zero_bit(ep->ib_window_map, pci->num_ib_windows); 139 else 140 free_win = ep->bar_to_atu[bar] - 1; 141 142 if (free_win >= pci->num_ib_windows) { 143 dev_err(pci->dev, "No free inbound window\n"); 144 return -EINVAL; 145 } 146 147 ret = dw_pcie_prog_ep_inbound_atu(pci, func_no, free_win, type, 148 cpu_addr, bar); 149 if (ret < 0) { 150 dev_err(pci->dev, "Failed to program IB window\n"); 151 return ret; 152 } 153 154 /* 155 * Always increment free_win before assignment, since value 0 is used to identify 156 * unallocated mapping. 157 */ 158 ep->bar_to_atu[bar] = free_win + 1; 159 set_bit(free_win, ep->ib_window_map); 160 161 return 0; 162 } 163 164 static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, 165 struct dw_pcie_ob_atu_cfg *atu) 166 { 167 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 168 u32 free_win; 169 int ret; 170 171 free_win = find_first_zero_bit(ep->ob_window_map, pci->num_ob_windows); 172 if (free_win >= pci->num_ob_windows) { 173 dev_err(pci->dev, "No free outbound window\n"); 174 return -EINVAL; 175 } 176 177 atu->index = free_win; 178 ret = dw_pcie_prog_outbound_atu(pci, atu); 179 if (ret) 180 return ret; 181 182 set_bit(free_win, ep->ob_window_map); 183 ep->outbound_addr[free_win] = atu->cpu_addr; 184 185 return 0; 186 } 187 188 static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 189 struct pci_epf_bar *epf_bar) 190 { 191 struct dw_pcie_ep *ep = epc_get_drvdata(epc); 192 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 193 enum pci_barno bar = epf_bar->barno; 194 u32 atu_index = ep->bar_to_atu[bar] - 1; 195 196 if (!ep->bar_to_atu[bar]) 197 return; 198 199 __dw_pcie_ep_reset_bar(pci, func_no, bar, epf_bar->flags); 200 201 dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_IB, atu_index); 202 clear_bit(atu_index, ep->ib_window_map); 203 ep->epf_bar[bar] = NULL; 204 ep->bar_to_atu[bar] = 0; 205 } 206 207 static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 208 struct pci_epf_bar *epf_bar) 209 { 210 struct dw_pcie_ep *ep = epc_get_drvdata(epc); 211 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 212 enum pci_barno bar = epf_bar->barno; 213 size_t size = epf_bar->size; 214 int flags = epf_bar->flags; 215 int ret, type; 216 u32 reg; 217 218 /* 219 * DWC does not allow BAR pairs to overlap, e.g. you cannot combine BARs 220 * 1 and 2 to form a 64-bit BAR. 221 */ 222 if ((flags & PCI_BASE_ADDRESS_MEM_TYPE_64) && (bar & 1)) 223 return -EINVAL; 224 225 reg = PCI_BASE_ADDRESS_0 + (4 * bar); 226 227 if (!(flags & PCI_BASE_ADDRESS_SPACE)) 228 type = PCIE_ATU_TYPE_MEM; 229 else 230 type = PCIE_ATU_TYPE_IO; 231 232 ret = dw_pcie_ep_inbound_atu(ep, func_no, type, epf_bar->phys_addr, bar); 233 if (ret) 234 return ret; 235 236 if (ep->epf_bar[bar]) 237 return 0; 238 239 dw_pcie_dbi_ro_wr_en(pci); 240 241 dw_pcie_ep_writel_dbi2(ep, func_no, reg, lower_32_bits(size - 1)); 242 dw_pcie_ep_writel_dbi(ep, func_no, reg, flags); 243 244 if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) { 245 dw_pcie_ep_writel_dbi2(ep, func_no, reg + 4, upper_32_bits(size - 1)); 246 dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0); 247 } 248 249 ep->epf_bar[bar] = epf_bar; 250 dw_pcie_dbi_ro_wr_dis(pci); 251 252 return 0; 253 } 254 255 static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr, 256 u32 *atu_index) 257 { 258 u32 index; 259 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 260 261 for (index = 0; index < pci->num_ob_windows; index++) { 262 if (ep->outbound_addr[index] != addr) 263 continue; 264 *atu_index = index; 265 return 0; 266 } 267 268 return -EINVAL; 269 } 270 271 static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 272 phys_addr_t addr) 273 { 274 int ret; 275 u32 atu_index; 276 struct dw_pcie_ep *ep = epc_get_drvdata(epc); 277 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 278 279 ret = dw_pcie_find_index(ep, addr, &atu_index); 280 if (ret < 0) 281 return; 282 283 dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_OB, atu_index); 284 clear_bit(atu_index, ep->ob_window_map); 285 } 286 287 static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 288 phys_addr_t addr, u64 pci_addr, size_t size) 289 { 290 int ret; 291 struct dw_pcie_ep *ep = epc_get_drvdata(epc); 292 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 293 struct dw_pcie_ob_atu_cfg atu = { 0 }; 294 295 atu.func_no = func_no; 296 atu.type = PCIE_ATU_TYPE_MEM; 297 atu.cpu_addr = addr; 298 atu.pci_addr = pci_addr; 299 atu.size = size; 300 ret = dw_pcie_ep_outbound_atu(ep, &atu); 301 if (ret) { 302 dev_err(pci->dev, "Failed to enable address\n"); 303 return ret; 304 } 305 306 return 0; 307 } 308 309 static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no) 310 { 311 struct dw_pcie_ep *ep = epc_get_drvdata(epc); 312 struct dw_pcie_ep_func *ep_func; 313 u32 val, reg; 314 315 ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no); 316 if (!ep_func || !ep_func->msi_cap) 317 return -EINVAL; 318 319 reg = ep_func->msi_cap + PCI_MSI_FLAGS; 320 val = dw_pcie_ep_readw_dbi(ep, func_no, reg); 321 if (!(val & PCI_MSI_FLAGS_ENABLE)) 322 return -EINVAL; 323 324 val = FIELD_GET(PCI_MSI_FLAGS_QSIZE, val); 325 326 return val; 327 } 328 329 static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 330 u8 interrupts) 331 { 332 struct dw_pcie_ep *ep = epc_get_drvdata(epc); 333 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 334 struct dw_pcie_ep_func *ep_func; 335 u32 val, reg; 336 337 ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no); 338 if (!ep_func || !ep_func->msi_cap) 339 return -EINVAL; 340 341 reg = ep_func->msi_cap + PCI_MSI_FLAGS; 342 val = dw_pcie_ep_readw_dbi(ep, func_no, reg); 343 val &= ~PCI_MSI_FLAGS_QMASK; 344 val |= FIELD_PREP(PCI_MSI_FLAGS_QMASK, interrupts); 345 dw_pcie_dbi_ro_wr_en(pci); 346 dw_pcie_ep_writew_dbi(ep, func_no, reg, val); 347 dw_pcie_dbi_ro_wr_dis(pci); 348 349 return 0; 350 } 351 352 static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no) 353 { 354 struct dw_pcie_ep *ep = epc_get_drvdata(epc); 355 struct dw_pcie_ep_func *ep_func; 356 u32 val, reg; 357 358 ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no); 359 if (!ep_func || !ep_func->msix_cap) 360 return -EINVAL; 361 362 reg = ep_func->msix_cap + PCI_MSIX_FLAGS; 363 val = dw_pcie_ep_readw_dbi(ep, func_no, reg); 364 if (!(val & PCI_MSIX_FLAGS_ENABLE)) 365 return -EINVAL; 366 367 val &= PCI_MSIX_FLAGS_QSIZE; 368 369 return val; 370 } 371 372 static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 373 u16 interrupts, enum pci_barno bir, u32 offset) 374 { 375 struct dw_pcie_ep *ep = epc_get_drvdata(epc); 376 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 377 struct dw_pcie_ep_func *ep_func; 378 u32 val, reg; 379 380 ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no); 381 if (!ep_func || !ep_func->msix_cap) 382 return -EINVAL; 383 384 dw_pcie_dbi_ro_wr_en(pci); 385 386 reg = ep_func->msix_cap + PCI_MSIX_FLAGS; 387 val = dw_pcie_ep_readw_dbi(ep, func_no, reg); 388 val &= ~PCI_MSIX_FLAGS_QSIZE; 389 val |= interrupts; 390 dw_pcie_writew_dbi(pci, reg, val); 391 392 reg = ep_func->msix_cap + PCI_MSIX_TABLE; 393 val = offset | bir; 394 dw_pcie_ep_writel_dbi(ep, func_no, reg, val); 395 396 reg = ep_func->msix_cap + PCI_MSIX_PBA; 397 val = (offset + (interrupts * PCI_MSIX_ENTRY_SIZE)) | bir; 398 dw_pcie_ep_writel_dbi(ep, func_no, reg, val); 399 400 dw_pcie_dbi_ro_wr_dis(pci); 401 402 return 0; 403 } 404 405 static int dw_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 406 unsigned int type, u16 interrupt_num) 407 { 408 struct dw_pcie_ep *ep = epc_get_drvdata(epc); 409 410 if (!ep->ops->raise_irq) 411 return -EINVAL; 412 413 return ep->ops->raise_irq(ep, func_no, type, interrupt_num); 414 } 415 416 static void dw_pcie_ep_stop(struct pci_epc *epc) 417 { 418 struct dw_pcie_ep *ep = epc_get_drvdata(epc); 419 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 420 421 dw_pcie_stop_link(pci); 422 } 423 424 static int dw_pcie_ep_start(struct pci_epc *epc) 425 { 426 struct dw_pcie_ep *ep = epc_get_drvdata(epc); 427 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 428 429 return dw_pcie_start_link(pci); 430 } 431 432 static const struct pci_epc_features* 433 dw_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no) 434 { 435 struct dw_pcie_ep *ep = epc_get_drvdata(epc); 436 437 if (!ep->ops->get_features) 438 return NULL; 439 440 return ep->ops->get_features(ep); 441 } 442 443 static const struct pci_epc_ops epc_ops = { 444 .write_header = dw_pcie_ep_write_header, 445 .set_bar = dw_pcie_ep_set_bar, 446 .clear_bar = dw_pcie_ep_clear_bar, 447 .map_addr = dw_pcie_ep_map_addr, 448 .unmap_addr = dw_pcie_ep_unmap_addr, 449 .set_msi = dw_pcie_ep_set_msi, 450 .get_msi = dw_pcie_ep_get_msi, 451 .set_msix = dw_pcie_ep_set_msix, 452 .get_msix = dw_pcie_ep_get_msix, 453 .raise_irq = dw_pcie_ep_raise_irq, 454 .start = dw_pcie_ep_start, 455 .stop = dw_pcie_ep_stop, 456 .get_features = dw_pcie_ep_get_features, 457 }; 458 459 /** 460 * dw_pcie_ep_raise_intx_irq - Raise INTx IRQ to the host 461 * @ep: DWC EP device 462 * @func_no: Function number of the endpoint 463 * 464 * Return: 0 if success, errono otherwise. 465 */ 466 int dw_pcie_ep_raise_intx_irq(struct dw_pcie_ep *ep, u8 func_no) 467 { 468 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 469 struct device *dev = pci->dev; 470 471 dev_err(dev, "EP cannot raise INTX IRQs\n"); 472 473 return -EINVAL; 474 } 475 EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_intx_irq); 476 477 /** 478 * dw_pcie_ep_raise_msi_irq - Raise MSI IRQ to the host 479 * @ep: DWC EP device 480 * @func_no: Function number of the endpoint 481 * @interrupt_num: Interrupt number to be raised 482 * 483 * Return: 0 if success, errono otherwise. 484 */ 485 int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no, 486 u8 interrupt_num) 487 { 488 u32 msg_addr_lower, msg_addr_upper, reg; 489 struct dw_pcie_ep_func *ep_func; 490 struct pci_epc *epc = ep->epc; 491 unsigned int aligned_offset; 492 u16 msg_ctrl, msg_data; 493 bool has_upper; 494 u64 msg_addr; 495 int ret; 496 497 ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no); 498 if (!ep_func || !ep_func->msi_cap) 499 return -EINVAL; 500 501 /* Raise MSI per the PCI Local Bus Specification Revision 3.0, 6.8.1. */ 502 reg = ep_func->msi_cap + PCI_MSI_FLAGS; 503 msg_ctrl = dw_pcie_ep_readw_dbi(ep, func_no, reg); 504 has_upper = !!(msg_ctrl & PCI_MSI_FLAGS_64BIT); 505 reg = ep_func->msi_cap + PCI_MSI_ADDRESS_LO; 506 msg_addr_lower = dw_pcie_ep_readl_dbi(ep, func_no, reg); 507 if (has_upper) { 508 reg = ep_func->msi_cap + PCI_MSI_ADDRESS_HI; 509 msg_addr_upper = dw_pcie_ep_readl_dbi(ep, func_no, reg); 510 reg = ep_func->msi_cap + PCI_MSI_DATA_64; 511 msg_data = dw_pcie_ep_readw_dbi(ep, func_no, reg); 512 } else { 513 msg_addr_upper = 0; 514 reg = ep_func->msi_cap + PCI_MSI_DATA_32; 515 msg_data = dw_pcie_ep_readw_dbi(ep, func_no, reg); 516 } 517 msg_addr = ((u64)msg_addr_upper) << 32 | msg_addr_lower; 518 519 aligned_offset = msg_addr & (epc->mem->window.page_size - 1); 520 msg_addr = ALIGN_DOWN(msg_addr, epc->mem->window.page_size); 521 ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr, 522 epc->mem->window.page_size); 523 if (ret) 524 return ret; 525 526 writel(msg_data | (interrupt_num - 1), ep->msi_mem + aligned_offset); 527 528 dw_pcie_ep_unmap_addr(epc, func_no, 0, ep->msi_mem_phys); 529 530 return 0; 531 } 532 EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_msi_irq); 533 534 /** 535 * dw_pcie_ep_raise_msix_irq_doorbell - Raise MSI-X to the host using Doorbell 536 * method 537 * @ep: DWC EP device 538 * @func_no: Function number of the endpoint device 539 * @interrupt_num: Interrupt number to be raised 540 * 541 * Return: 0 if success, errno otherwise. 542 */ 543 int dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep *ep, u8 func_no, 544 u16 interrupt_num) 545 { 546 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 547 struct dw_pcie_ep_func *ep_func; 548 u32 msg_data; 549 550 ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no); 551 if (!ep_func || !ep_func->msix_cap) 552 return -EINVAL; 553 554 msg_data = (func_no << PCIE_MSIX_DOORBELL_PF_SHIFT) | 555 (interrupt_num - 1); 556 557 dw_pcie_writel_dbi(pci, PCIE_MSIX_DOORBELL, msg_data); 558 559 return 0; 560 } 561 562 /** 563 * dw_pcie_ep_raise_msix_irq - Raise MSI-X to the host 564 * @ep: DWC EP device 565 * @func_no: Function number of the endpoint device 566 * @interrupt_num: Interrupt number to be raised 567 * 568 * Return: 0 if success, errno otherwise. 569 */ 570 int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no, 571 u16 interrupt_num) 572 { 573 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 574 struct pci_epf_msix_tbl *msix_tbl; 575 struct dw_pcie_ep_func *ep_func; 576 struct pci_epc *epc = ep->epc; 577 u32 reg, msg_data, vec_ctrl; 578 unsigned int aligned_offset; 579 u32 tbl_offset; 580 u64 msg_addr; 581 int ret; 582 u8 bir; 583 584 ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no); 585 if (!ep_func || !ep_func->msix_cap) 586 return -EINVAL; 587 588 reg = ep_func->msix_cap + PCI_MSIX_TABLE; 589 tbl_offset = dw_pcie_ep_readl_dbi(ep, func_no, reg); 590 bir = FIELD_GET(PCI_MSIX_TABLE_BIR, tbl_offset); 591 tbl_offset &= PCI_MSIX_TABLE_OFFSET; 592 593 msix_tbl = ep->epf_bar[bir]->addr + tbl_offset; 594 msg_addr = msix_tbl[(interrupt_num - 1)].msg_addr; 595 msg_data = msix_tbl[(interrupt_num - 1)].msg_data; 596 vec_ctrl = msix_tbl[(interrupt_num - 1)].vector_ctrl; 597 598 if (vec_ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT) { 599 dev_dbg(pci->dev, "MSI-X entry ctrl set\n"); 600 return -EPERM; 601 } 602 603 aligned_offset = msg_addr & (epc->mem->window.page_size - 1); 604 msg_addr = ALIGN_DOWN(msg_addr, epc->mem->window.page_size); 605 ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr, 606 epc->mem->window.page_size); 607 if (ret) 608 return ret; 609 610 writel(msg_data, ep->msi_mem + aligned_offset); 611 612 dw_pcie_ep_unmap_addr(epc, func_no, 0, ep->msi_mem_phys); 613 614 return 0; 615 } 616 617 /** 618 * dw_pcie_ep_cleanup - Cleanup DWC EP resources after fundamental reset 619 * @ep: DWC EP device 620 * 621 * Cleans up the DWC EP specific resources like eDMA etc... after fundamental 622 * reset like PERST#. Note that this API is only applicable for drivers 623 * supporting PERST# or any other methods of fundamental reset. 624 */ 625 void dw_pcie_ep_cleanup(struct dw_pcie_ep *ep) 626 { 627 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 628 629 dw_pcie_edma_remove(pci); 630 } 631 EXPORT_SYMBOL_GPL(dw_pcie_ep_cleanup); 632 633 /** 634 * dw_pcie_ep_deinit - Deinitialize the endpoint device 635 * @ep: DWC EP device 636 * 637 * Deinitialize the endpoint device. EPC device is not destroyed since that will 638 * be taken care by Devres. 639 */ 640 void dw_pcie_ep_deinit(struct dw_pcie_ep *ep) 641 { 642 struct pci_epc *epc = ep->epc; 643 644 dw_pcie_ep_cleanup(ep); 645 646 pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem, 647 epc->mem->window.page_size); 648 649 pci_epc_mem_exit(epc); 650 } 651 EXPORT_SYMBOL_GPL(dw_pcie_ep_deinit); 652 653 static unsigned int dw_pcie_ep_find_ext_capability(struct dw_pcie *pci, int cap) 654 { 655 u32 header; 656 int pos = PCI_CFG_SPACE_SIZE; 657 658 while (pos) { 659 header = dw_pcie_readl_dbi(pci, pos); 660 if (PCI_EXT_CAP_ID(header) == cap) 661 return pos; 662 663 pos = PCI_EXT_CAP_NEXT(header); 664 if (!pos) 665 break; 666 } 667 668 return 0; 669 } 670 671 static void dw_pcie_ep_init_non_sticky_registers(struct dw_pcie *pci) 672 { 673 unsigned int offset; 674 unsigned int nbars; 675 u32 reg, i; 676 677 offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR); 678 679 dw_pcie_dbi_ro_wr_en(pci); 680 681 if (offset) { 682 reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL); 683 nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >> 684 PCI_REBAR_CTRL_NBAR_SHIFT; 685 686 /* 687 * PCIe r6.0, sec 7.8.6.2 require us to support at least one 688 * size in the range from 1 MB to 512 GB. Advertise support 689 * for 1 MB BAR size only. 690 */ 691 for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL) 692 dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, 0x0); 693 } 694 695 dw_pcie_setup(pci); 696 dw_pcie_dbi_ro_wr_dis(pci); 697 } 698 699 /** 700 * dw_pcie_ep_init_registers - Initialize DWC EP specific registers 701 * @ep: DWC EP device 702 * 703 * Initialize the registers (CSRs) specific to DWC EP. This API should be called 704 * only when the endpoint receives an active refclk (either from host or 705 * generated locally). 706 */ 707 int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep) 708 { 709 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 710 struct dw_pcie_ep_func *ep_func; 711 struct device *dev = pci->dev; 712 struct pci_epc *epc = ep->epc; 713 u32 ptm_cap_base, reg; 714 u8 hdr_type; 715 u8 func_no; 716 void *addr; 717 int ret; 718 719 hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE) & 720 PCI_HEADER_TYPE_MASK; 721 if (hdr_type != PCI_HEADER_TYPE_NORMAL) { 722 dev_err(pci->dev, 723 "PCIe controller is not set to EP mode (hdr_type:0x%x)!\n", 724 hdr_type); 725 return -EIO; 726 } 727 728 dw_pcie_version_detect(pci); 729 730 dw_pcie_iatu_detect(pci); 731 732 ret = dw_pcie_edma_detect(pci); 733 if (ret) 734 return ret; 735 736 if (!ep->ib_window_map) { 737 ep->ib_window_map = devm_bitmap_zalloc(dev, pci->num_ib_windows, 738 GFP_KERNEL); 739 if (!ep->ib_window_map) 740 goto err_remove_edma; 741 } 742 743 if (!ep->ob_window_map) { 744 ep->ob_window_map = devm_bitmap_zalloc(dev, pci->num_ob_windows, 745 GFP_KERNEL); 746 if (!ep->ob_window_map) 747 goto err_remove_edma; 748 } 749 750 if (!ep->outbound_addr) { 751 addr = devm_kcalloc(dev, pci->num_ob_windows, sizeof(phys_addr_t), 752 GFP_KERNEL); 753 if (!addr) 754 goto err_remove_edma; 755 ep->outbound_addr = addr; 756 } 757 758 for (func_no = 0; func_no < epc->max_functions; func_no++) { 759 760 ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no); 761 if (ep_func) 762 continue; 763 764 ep_func = devm_kzalloc(dev, sizeof(*ep_func), GFP_KERNEL); 765 if (!ep_func) 766 goto err_remove_edma; 767 768 ep_func->func_no = func_no; 769 ep_func->msi_cap = dw_pcie_ep_find_capability(ep, func_no, 770 PCI_CAP_ID_MSI); 771 ep_func->msix_cap = dw_pcie_ep_find_capability(ep, func_no, 772 PCI_CAP_ID_MSIX); 773 774 list_add_tail(&ep_func->list, &ep->func_list); 775 } 776 777 if (ep->ops->init) 778 ep->ops->init(ep); 779 780 ptm_cap_base = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_PTM); 781 782 /* 783 * PTM responder capability can be disabled only after disabling 784 * PTM root capability. 785 */ 786 if (ptm_cap_base) { 787 dw_pcie_dbi_ro_wr_en(pci); 788 reg = dw_pcie_readl_dbi(pci, ptm_cap_base + PCI_PTM_CAP); 789 reg &= ~PCI_PTM_CAP_ROOT; 790 dw_pcie_writel_dbi(pci, ptm_cap_base + PCI_PTM_CAP, reg); 791 792 reg = dw_pcie_readl_dbi(pci, ptm_cap_base + PCI_PTM_CAP); 793 reg &= ~(PCI_PTM_CAP_RES | PCI_PTM_GRANULARITY_MASK); 794 dw_pcie_writel_dbi(pci, ptm_cap_base + PCI_PTM_CAP, reg); 795 dw_pcie_dbi_ro_wr_dis(pci); 796 } 797 798 dw_pcie_ep_init_non_sticky_registers(pci); 799 800 return 0; 801 802 err_remove_edma: 803 dw_pcie_edma_remove(pci); 804 805 return ret; 806 } 807 EXPORT_SYMBOL_GPL(dw_pcie_ep_init_registers); 808 809 /** 810 * dw_pcie_ep_linkup - Notify EPF drivers about Link Up event 811 * @ep: DWC EP device 812 */ 813 void dw_pcie_ep_linkup(struct dw_pcie_ep *ep) 814 { 815 struct pci_epc *epc = ep->epc; 816 817 pci_epc_linkup(epc); 818 } 819 EXPORT_SYMBOL_GPL(dw_pcie_ep_linkup); 820 821 /** 822 * dw_pcie_ep_linkdown - Notify EPF drivers about Link Down event 823 * @ep: DWC EP device 824 * 825 * Non-sticky registers are also initialized before sending the notification to 826 * the EPF drivers. This is needed since the registers need to be initialized 827 * before the link comes back again. 828 */ 829 void dw_pcie_ep_linkdown(struct dw_pcie_ep *ep) 830 { 831 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 832 struct pci_epc *epc = ep->epc; 833 834 /* 835 * Initialize the non-sticky DWC registers as they would've reset post 836 * Link Down. This is specifically needed for drivers not supporting 837 * PERST# as they have no way to reinitialize the registers before the 838 * link comes back again. 839 */ 840 dw_pcie_ep_init_non_sticky_registers(pci); 841 842 pci_epc_linkdown(epc); 843 } 844 EXPORT_SYMBOL_GPL(dw_pcie_ep_linkdown); 845 846 /** 847 * dw_pcie_ep_init - Initialize the endpoint device 848 * @ep: DWC EP device 849 * 850 * Initialize the endpoint device. Allocate resources and create the EPC 851 * device with the endpoint framework. 852 * 853 * Return: 0 if success, errno otherwise. 854 */ 855 int dw_pcie_ep_init(struct dw_pcie_ep *ep) 856 { 857 int ret; 858 struct resource *res; 859 struct pci_epc *epc; 860 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 861 struct device *dev = pci->dev; 862 struct platform_device *pdev = to_platform_device(dev); 863 struct device_node *np = dev->of_node; 864 865 INIT_LIST_HEAD(&ep->func_list); 866 867 ret = dw_pcie_get_resources(pci); 868 if (ret) 869 return ret; 870 871 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space"); 872 if (!res) 873 return -EINVAL; 874 875 ep->phys_base = res->start; 876 ep->addr_size = resource_size(res); 877 878 if (ep->ops->pre_init) 879 ep->ops->pre_init(ep); 880 881 epc = devm_pci_epc_create(dev, &epc_ops); 882 if (IS_ERR(epc)) { 883 dev_err(dev, "Failed to create epc device\n"); 884 return PTR_ERR(epc); 885 } 886 887 ep->epc = epc; 888 epc_set_drvdata(epc, ep); 889 890 ret = of_property_read_u8(np, "max-functions", &epc->max_functions); 891 if (ret < 0) 892 epc->max_functions = 1; 893 894 ret = pci_epc_mem_init(epc, ep->phys_base, ep->addr_size, 895 ep->page_size); 896 if (ret < 0) { 897 dev_err(dev, "Failed to initialize address space\n"); 898 return ret; 899 } 900 901 ep->msi_mem = pci_epc_mem_alloc_addr(epc, &ep->msi_mem_phys, 902 epc->mem->window.page_size); 903 if (!ep->msi_mem) { 904 ret = -ENOMEM; 905 dev_err(dev, "Failed to reserve memory for MSI/MSI-X\n"); 906 goto err_exit_epc_mem; 907 } 908 909 return 0; 910 911 err_exit_epc_mem: 912 pci_epc_mem_exit(epc); 913 914 return ret; 915 } 916 EXPORT_SYMBOL_GPL(dw_pcie_ep_init); 917