1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Synopsys DesignWare PCIe Endpoint controller driver 4 * 5 * Copyright (C) 2017 Texas Instruments 6 * Author: Kishon Vijay Abraham I <kishon@ti.com> 7 */ 8 9 #include <linux/align.h> 10 #include <linux/bitfield.h> 11 #include <linux/of.h> 12 #include <linux/platform_device.h> 13 14 #include "pcie-designware.h" 15 #include <linux/pci-epc.h> 16 #include <linux/pci-epf.h> 17 18 /** 19 * dw_pcie_ep_get_func_from_ep - Get the struct dw_pcie_ep_func corresponding to 20 * the endpoint function 21 * @ep: DWC EP device 22 * @func_no: Function number of the endpoint device 23 * 24 * Return: struct dw_pcie_ep_func if success, NULL otherwise. 25 */ 26 struct dw_pcie_ep_func * 27 dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no) 28 { 29 struct dw_pcie_ep_func *ep_func; 30 31 list_for_each_entry(ep_func, &ep->func_list, list) { 32 if (ep_func->func_no == func_no) 33 return ep_func; 34 } 35 36 return NULL; 37 } 38 39 static void __dw_pcie_ep_reset_bar(struct dw_pcie *pci, u8 func_no, 40 enum pci_barno bar, int flags) 41 { 42 struct dw_pcie_ep *ep = &pci->ep; 43 u32 reg; 44 45 reg = PCI_BASE_ADDRESS_0 + (4 * bar); 46 dw_pcie_dbi_ro_wr_en(pci); 47 dw_pcie_ep_writel_dbi2(ep, func_no, reg, 0x0); 48 dw_pcie_ep_writel_dbi(ep, func_no, reg, 0x0); 49 if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) { 50 dw_pcie_ep_writel_dbi2(ep, func_no, reg + 4, 0x0); 51 dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0x0); 52 } 53 dw_pcie_dbi_ro_wr_dis(pci); 54 } 55 56 /** 57 * dw_pcie_ep_reset_bar - Reset endpoint BAR 58 * @pci: DWC PCI device 59 * @bar: BAR number of the endpoint 60 */ 61 void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar) 62 { 63 u8 func_no, funcs; 64 65 funcs = pci->ep.epc->max_functions; 66 67 for (func_no = 0; func_no < funcs; func_no++) 68 __dw_pcie_ep_reset_bar(pci, func_no, bar, 0); 69 } 70 EXPORT_SYMBOL_GPL(dw_pcie_ep_reset_bar); 71 72 static u8 dw_pcie_ep_find_capability(struct dw_pcie_ep *ep, u8 func_no, u8 cap) 73 { 74 return PCI_FIND_NEXT_CAP(dw_pcie_ep_read_cfg, PCI_CAPABILITY_LIST, 75 cap, NULL, ep, func_no); 76 } 77 78 static u16 dw_pcie_ep_find_ext_capability(struct dw_pcie_ep *ep, 79 u8 func_no, u8 cap) 80 { 81 return PCI_FIND_NEXT_EXT_CAP(dw_pcie_ep_read_cfg, 0, 82 cap, NULL, ep, func_no); 83 } 84 85 static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 86 struct pci_epf_header *hdr) 87 { 88 struct dw_pcie_ep *ep = epc_get_drvdata(epc); 89 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 90 91 dw_pcie_dbi_ro_wr_en(pci); 92 dw_pcie_ep_writew_dbi(ep, func_no, PCI_VENDOR_ID, hdr->vendorid); 93 dw_pcie_ep_writew_dbi(ep, func_no, PCI_DEVICE_ID, hdr->deviceid); 94 dw_pcie_ep_writeb_dbi(ep, func_no, PCI_REVISION_ID, hdr->revid); 95 dw_pcie_ep_writeb_dbi(ep, func_no, PCI_CLASS_PROG, hdr->progif_code); 96 dw_pcie_ep_writew_dbi(ep, func_no, PCI_CLASS_DEVICE, 97 hdr->subclass_code | hdr->baseclass_code << 8); 98 dw_pcie_ep_writeb_dbi(ep, func_no, PCI_CACHE_LINE_SIZE, 99 hdr->cache_line_size); 100 dw_pcie_ep_writew_dbi(ep, func_no, PCI_SUBSYSTEM_VENDOR_ID, 101 hdr->subsys_vendor_id); 102 dw_pcie_ep_writew_dbi(ep, func_no, PCI_SUBSYSTEM_ID, hdr->subsys_id); 103 dw_pcie_ep_writeb_dbi(ep, func_no, PCI_INTERRUPT_PIN, 104 hdr->interrupt_pin); 105 dw_pcie_dbi_ro_wr_dis(pci); 106 107 return 0; 108 } 109 110 /* BAR Match Mode inbound iATU mapping */ 111 static int dw_pcie_ep_ib_atu_bar(struct dw_pcie_ep *ep, u8 func_no, int type, 112 dma_addr_t parent_bus_addr, enum pci_barno bar, 113 size_t size) 114 { 115 int ret; 116 u32 free_win; 117 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 118 struct dw_pcie_ep_func *ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no); 119 120 if (!ep_func) 121 return -EINVAL; 122 123 if (!ep_func->bar_to_atu[bar]) 124 free_win = find_first_zero_bit(ep->ib_window_map, pci->num_ib_windows); 125 else 126 free_win = ep_func->bar_to_atu[bar] - 1; 127 128 if (free_win >= pci->num_ib_windows) { 129 dev_err(pci->dev, "No free inbound window\n"); 130 return -EINVAL; 131 } 132 133 ret = dw_pcie_prog_ep_inbound_atu(pci, func_no, free_win, type, 134 parent_bus_addr, bar, size); 135 if (ret < 0) { 136 dev_err(pci->dev, "Failed to program IB window\n"); 137 return ret; 138 } 139 140 /* 141 * Always increment free_win before assignment, since value 0 is used to identify 142 * unallocated mapping. 143 */ 144 ep_func->bar_to_atu[bar] = free_win + 1; 145 set_bit(free_win, ep->ib_window_map); 146 147 return 0; 148 } 149 150 static void dw_pcie_ep_clear_ib_maps(struct dw_pcie_ep *ep, u8 func_no, enum pci_barno bar) 151 { 152 struct dw_pcie_ep_func *ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no); 153 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 154 struct device *dev = pci->dev; 155 unsigned int i, num; 156 u32 atu_index; 157 u32 *indexes; 158 159 if (!ep_func) 160 return; 161 162 /* Tear down the BAR Match Mode mapping, if any. */ 163 if (ep_func->bar_to_atu[bar]) { 164 atu_index = ep_func->bar_to_atu[bar] - 1; 165 dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_IB, atu_index); 166 clear_bit(atu_index, ep->ib_window_map); 167 ep_func->bar_to_atu[bar] = 0; 168 } 169 170 /* Tear down all Address Match Mode mappings, if any. */ 171 indexes = ep_func->ib_atu_indexes[bar]; 172 num = ep_func->num_ib_atu_indexes[bar]; 173 ep_func->ib_atu_indexes[bar] = NULL; 174 ep_func->num_ib_atu_indexes[bar] = 0; 175 if (!indexes) 176 return; 177 for (i = 0; i < num; i++) { 178 dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_IB, indexes[i]); 179 clear_bit(indexes[i], ep->ib_window_map); 180 } 181 devm_kfree(dev, indexes); 182 } 183 184 static u64 dw_pcie_ep_read_bar_assigned(struct dw_pcie_ep *ep, u8 func_no, 185 enum pci_barno bar, int flags) 186 { 187 u32 reg = PCI_BASE_ADDRESS_0 + (4 * bar); 188 u32 lo, hi; 189 u64 addr; 190 191 lo = dw_pcie_ep_readl_dbi(ep, func_no, reg); 192 193 if (flags & PCI_BASE_ADDRESS_SPACE) 194 return lo & PCI_BASE_ADDRESS_IO_MASK; 195 196 addr = lo & PCI_BASE_ADDRESS_MEM_MASK; 197 if (!(flags & PCI_BASE_ADDRESS_MEM_TYPE_64)) 198 return addr; 199 200 hi = dw_pcie_ep_readl_dbi(ep, func_no, reg + 4); 201 return addr | ((u64)hi << 32); 202 } 203 204 static int dw_pcie_ep_validate_submap(struct dw_pcie_ep *ep, 205 const struct pci_epf_bar_submap *submap, 206 unsigned int num_submap, size_t bar_size) 207 { 208 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 209 u32 align = pci->region_align; 210 size_t off = 0; 211 unsigned int i; 212 size_t size; 213 214 if (!align || !IS_ALIGNED(bar_size, align)) 215 return -EINVAL; 216 217 /* 218 * The submap array order defines the BAR layout (submap[0] starts 219 * at offset 0 and each entry immediately follows the previous 220 * one). Here, validate that it forms a strict, gapless 221 * decomposition of the BAR: 222 * - each entry has a non-zero size 223 * - sizes, implicit offsets and phys_addr are aligned to 224 * pci->region_align 225 * - each entry lies within the BAR range 226 * - the entries exactly cover the whole BAR 227 * 228 * Note: dw_pcie_prog_inbound_atu() also checks alignment for the 229 * PCI address and the target phys_addr, but validating up-front 230 * avoids partially programming iATU windows in vain. 231 */ 232 for (i = 0; i < num_submap; i++) { 233 size = submap[i].size; 234 235 if (!size) 236 return -EINVAL; 237 238 if (!IS_ALIGNED(size, align) || !IS_ALIGNED(off, align)) 239 return -EINVAL; 240 241 if (!IS_ALIGNED(submap[i].phys_addr, align)) 242 return -EINVAL; 243 244 if (off > bar_size || size > bar_size - off) 245 return -EINVAL; 246 247 off += size; 248 } 249 if (off != bar_size) 250 return -EINVAL; 251 252 return 0; 253 } 254 255 /* Address Match Mode inbound iATU mapping */ 256 static int dw_pcie_ep_ib_atu_addr(struct dw_pcie_ep *ep, u8 func_no, int type, 257 const struct pci_epf_bar *epf_bar) 258 { 259 struct dw_pcie_ep_func *ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no); 260 const struct pci_epf_bar_submap *submap = epf_bar->submap; 261 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 262 enum pci_barno bar = epf_bar->barno; 263 struct device *dev = pci->dev; 264 u64 pci_addr, parent_bus_addr; 265 u64 size, base, off = 0; 266 int free_win, ret; 267 unsigned int i; 268 u32 *indexes; 269 270 if (!ep_func || !epf_bar->num_submap || !submap || !epf_bar->size) 271 return -EINVAL; 272 273 ret = dw_pcie_ep_validate_submap(ep, submap, epf_bar->num_submap, 274 epf_bar->size); 275 if (ret) 276 return ret; 277 278 base = dw_pcie_ep_read_bar_assigned(ep, func_no, bar, epf_bar->flags); 279 if (!base) { 280 dev_err(dev, 281 "BAR%u not assigned, cannot set up sub-range mappings\n", 282 bar); 283 return -EINVAL; 284 } 285 286 indexes = devm_kcalloc(dev, epf_bar->num_submap, sizeof(*indexes), 287 GFP_KERNEL); 288 if (!indexes) 289 return -ENOMEM; 290 291 ep_func->ib_atu_indexes[bar] = indexes; 292 ep_func->num_ib_atu_indexes[bar] = 0; 293 294 for (i = 0; i < epf_bar->num_submap; i++) { 295 size = submap[i].size; 296 parent_bus_addr = submap[i].phys_addr; 297 298 if (off > (~0ULL) - base) { 299 ret = -EINVAL; 300 goto err; 301 } 302 303 pci_addr = base + off; 304 off += size; 305 306 free_win = find_first_zero_bit(ep->ib_window_map, 307 pci->num_ib_windows); 308 if (free_win >= pci->num_ib_windows) { 309 ret = -ENOSPC; 310 goto err; 311 } 312 313 ret = dw_pcie_prog_inbound_atu(pci, free_win, type, 314 parent_bus_addr, pci_addr, size); 315 if (ret) 316 goto err; 317 318 set_bit(free_win, ep->ib_window_map); 319 indexes[i] = free_win; 320 ep_func->num_ib_atu_indexes[bar] = i + 1; 321 } 322 return 0; 323 err: 324 dw_pcie_ep_clear_ib_maps(ep, func_no, bar); 325 return ret; 326 } 327 328 static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, 329 struct dw_pcie_ob_atu_cfg *atu) 330 { 331 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 332 u32 free_win; 333 int ret; 334 335 free_win = find_first_zero_bit(ep->ob_window_map, pci->num_ob_windows); 336 if (free_win >= pci->num_ob_windows) { 337 dev_err(pci->dev, "No free outbound window\n"); 338 return -EINVAL; 339 } 340 341 atu->index = free_win; 342 ret = dw_pcie_prog_outbound_atu(pci, atu); 343 if (ret) 344 return ret; 345 346 set_bit(free_win, ep->ob_window_map); 347 ep->outbound_addr[free_win] = atu->parent_bus_addr; 348 349 return 0; 350 } 351 352 static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 353 struct pci_epf_bar *epf_bar) 354 { 355 struct dw_pcie_ep *ep = epc_get_drvdata(epc); 356 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 357 enum pci_barno bar = epf_bar->barno; 358 struct dw_pcie_ep_func *ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no); 359 360 if (!ep_func || !ep_func->epf_bar[bar]) 361 return; 362 363 __dw_pcie_ep_reset_bar(pci, func_no, bar, epf_bar->flags); 364 365 dw_pcie_ep_clear_ib_maps(ep, func_no, bar); 366 367 ep_func->epf_bar[bar] = NULL; 368 } 369 370 static unsigned int dw_pcie_ep_get_rebar_offset(struct dw_pcie_ep *ep, u8 func_no, 371 enum pci_barno bar) 372 { 373 u32 reg, bar_index; 374 unsigned int offset, nbars; 375 int i; 376 377 offset = dw_pcie_ep_find_ext_capability(ep, func_no, PCI_EXT_CAP_ID_REBAR); 378 if (!offset) 379 return offset; 380 381 reg = dw_pcie_ep_readl_dbi(ep, func_no, offset + PCI_REBAR_CTRL); 382 nbars = FIELD_GET(PCI_REBAR_CTRL_NBAR_MASK, reg); 383 384 for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL) { 385 reg = dw_pcie_ep_readl_dbi(ep, func_no, offset + PCI_REBAR_CTRL); 386 bar_index = FIELD_GET(PCI_REBAR_CTRL_BAR_IDX, reg); 387 if (bar_index == bar) 388 return offset; 389 } 390 391 return 0; 392 } 393 394 static int dw_pcie_ep_set_bar_resizable(struct dw_pcie_ep *ep, u8 func_no, 395 struct pci_epf_bar *epf_bar) 396 { 397 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 398 enum pci_barno bar = epf_bar->barno; 399 size_t size = epf_bar->size; 400 int flags = epf_bar->flags; 401 u32 reg = PCI_BASE_ADDRESS_0 + (4 * bar); 402 unsigned int rebar_offset; 403 u32 rebar_cap, rebar_ctrl; 404 int ret; 405 406 rebar_offset = dw_pcie_ep_get_rebar_offset(ep, func_no, bar); 407 if (!rebar_offset) 408 return -EINVAL; 409 410 ret = pci_epc_bar_size_to_rebar_cap(size, &rebar_cap); 411 if (ret) 412 return ret; 413 414 dw_pcie_dbi_ro_wr_en(pci); 415 416 /* 417 * A BAR mask should not be written for a resizable BAR. The BAR mask 418 * is automatically derived by the controller every time the "selected 419 * size" bits are updated, see "Figure 3-26 Resizable BAR Example for 420 * 32-bit Memory BAR0" in DWC EP databook 5.96a. We simply need to write 421 * BIT(0) to set the BAR enable bit. 422 */ 423 dw_pcie_ep_writel_dbi2(ep, func_no, reg, BIT(0)); 424 dw_pcie_ep_writel_dbi(ep, func_no, reg, flags); 425 426 if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) { 427 dw_pcie_ep_writel_dbi2(ep, func_no, reg + 4, 0); 428 dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0); 429 } 430 431 /* 432 * Bits 31:0 in PCI_REBAR_CAP define "supported sizes" bits for sizes 433 * 1 MB to 128 TB. Bits 31:16 in PCI_REBAR_CTRL define "supported sizes" 434 * bits for sizes 256 TB to 8 EB. Disallow sizes 256 TB to 8 EB. 435 */ 436 rebar_ctrl = dw_pcie_ep_readl_dbi(ep, func_no, rebar_offset + PCI_REBAR_CTRL); 437 rebar_ctrl &= ~GENMASK(31, 16); 438 dw_pcie_ep_writel_dbi(ep, func_no, rebar_offset + PCI_REBAR_CTRL, rebar_ctrl); 439 440 /* 441 * The "selected size" (bits 13:8) in PCI_REBAR_CTRL are automatically 442 * updated when writing PCI_REBAR_CAP, see "Figure 3-26 Resizable BAR 443 * Example for 32-bit Memory BAR0" in DWC EP databook 5.96a. 444 */ 445 dw_pcie_ep_writel_dbi(ep, func_no, rebar_offset + PCI_REBAR_CAP, rebar_cap); 446 447 dw_pcie_dbi_ro_wr_dis(pci); 448 449 return 0; 450 } 451 452 static int dw_pcie_ep_set_bar_programmable(struct dw_pcie_ep *ep, u8 func_no, 453 struct pci_epf_bar *epf_bar) 454 { 455 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 456 enum pci_barno bar = epf_bar->barno; 457 size_t size = epf_bar->size; 458 int flags = epf_bar->flags; 459 u32 reg = PCI_BASE_ADDRESS_0 + (4 * bar); 460 461 dw_pcie_dbi_ro_wr_en(pci); 462 463 dw_pcie_ep_writel_dbi2(ep, func_no, reg, lower_32_bits(size - 1)); 464 dw_pcie_ep_writel_dbi(ep, func_no, reg, flags); 465 466 if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) { 467 dw_pcie_ep_writel_dbi2(ep, func_no, reg + 4, upper_32_bits(size - 1)); 468 dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0); 469 } 470 471 dw_pcie_dbi_ro_wr_dis(pci); 472 473 return 0; 474 } 475 476 static enum pci_epc_bar_type dw_pcie_ep_get_bar_type(struct dw_pcie_ep *ep, 477 enum pci_barno bar) 478 { 479 const struct pci_epc_features *epc_features; 480 481 if (!ep->ops->get_features) 482 return BAR_PROGRAMMABLE; 483 484 epc_features = ep->ops->get_features(ep); 485 486 return epc_features->bar[bar].type; 487 } 488 489 static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 490 struct pci_epf_bar *epf_bar) 491 { 492 struct dw_pcie_ep *ep = epc_get_drvdata(epc); 493 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 494 struct dw_pcie_ep_func *ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no); 495 enum pci_barno bar = epf_bar->barno; 496 size_t size = epf_bar->size; 497 enum pci_epc_bar_type bar_type; 498 int flags = epf_bar->flags; 499 int ret, type; 500 501 if (!ep_func) 502 return -EINVAL; 503 504 /* 505 * DWC does not allow BAR pairs to overlap, e.g. you cannot combine BARs 506 * 1 and 2 to form a 64-bit BAR. 507 */ 508 if ((flags & PCI_BASE_ADDRESS_MEM_TYPE_64) && (bar & 1)) 509 return -EINVAL; 510 511 /* 512 * Certain EPF drivers dynamically change the physical address of a BAR 513 * (i.e. they call set_bar() twice, without ever calling clear_bar(), as 514 * calling clear_bar() would clear the BAR's PCI address assigned by the 515 * host). 516 */ 517 if (ep_func->epf_bar[bar]) { 518 /* 519 * We can only dynamically change a BAR if the new BAR size and 520 * BAR flags do not differ from the existing configuration. 521 */ 522 if (ep_func->epf_bar[bar]->barno != bar || 523 ep_func->epf_bar[bar]->size != size || 524 ep_func->epf_bar[bar]->flags != flags) 525 return -EINVAL; 526 527 /* 528 * When dynamically changing a BAR, tear down any existing 529 * mappings before re-programming. 530 */ 531 if (ep_func->epf_bar[bar]->num_submap || epf_bar->num_submap) 532 dw_pcie_ep_clear_ib_maps(ep, func_no, bar); 533 534 /* 535 * When dynamically changing a BAR, skip writing the BAR reg, as 536 * that would clear the BAR's PCI address assigned by the host. 537 */ 538 goto config_atu; 539 } else { 540 /* 541 * Subrange mapping is an update-only operation. The BAR 542 * must have been configured once without submaps so that 543 * subsequent set_bar() calls can update inbound mappings 544 * without touching the BAR register (and clobbering the 545 * host-assigned address). 546 */ 547 if (epf_bar->num_submap) 548 return -EINVAL; 549 } 550 551 bar_type = dw_pcie_ep_get_bar_type(ep, bar); 552 switch (bar_type) { 553 case BAR_FIXED: 554 /* 555 * There is no need to write a BAR mask for a fixed BAR (except 556 * to write 1 to the LSB of the BAR mask register, to enable the 557 * BAR). Write the BAR mask regardless. (The fixed bits in the 558 * BAR mask register will be read-only anyway.) 559 */ 560 fallthrough; 561 case BAR_PROGRAMMABLE: 562 ret = dw_pcie_ep_set_bar_programmable(ep, func_no, epf_bar); 563 break; 564 case BAR_RESIZABLE: 565 ret = dw_pcie_ep_set_bar_resizable(ep, func_no, epf_bar); 566 break; 567 default: 568 ret = -EINVAL; 569 dev_err(pci->dev, "Invalid BAR type\n"); 570 break; 571 } 572 573 if (ret) 574 return ret; 575 576 config_atu: 577 if (!(flags & PCI_BASE_ADDRESS_SPACE)) 578 type = PCIE_ATU_TYPE_MEM; 579 else 580 type = PCIE_ATU_TYPE_IO; 581 582 if (epf_bar->num_submap) 583 ret = dw_pcie_ep_ib_atu_addr(ep, func_no, type, epf_bar); 584 else 585 ret = dw_pcie_ep_ib_atu_bar(ep, func_no, type, 586 epf_bar->phys_addr, bar, size); 587 588 if (ret) 589 return ret; 590 591 ep_func->epf_bar[bar] = epf_bar; 592 593 return 0; 594 } 595 596 static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr, 597 u32 *atu_index) 598 { 599 u32 index; 600 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 601 602 for_each_set_bit(index, ep->ob_window_map, pci->num_ob_windows) { 603 if (ep->outbound_addr[index] != addr) 604 continue; 605 *atu_index = index; 606 return 0; 607 } 608 609 return -EINVAL; 610 } 611 612 static u64 dw_pcie_ep_align_addr(struct pci_epc *epc, u64 pci_addr, 613 size_t *pci_size, size_t *offset) 614 { 615 struct dw_pcie_ep *ep = epc_get_drvdata(epc); 616 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 617 u64 mask = pci->region_align - 1; 618 size_t ofst = pci_addr & mask; 619 620 *pci_size = ALIGN(ofst + *pci_size, epc->mem->window.page_size); 621 *offset = ofst; 622 623 return pci_addr & ~mask; 624 } 625 626 static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 627 phys_addr_t addr) 628 { 629 int ret; 630 u32 atu_index; 631 struct dw_pcie_ep *ep = epc_get_drvdata(epc); 632 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 633 634 ret = dw_pcie_find_index(ep, addr - pci->parent_bus_offset, 635 &atu_index); 636 if (ret < 0) 637 return; 638 639 ep->outbound_addr[atu_index] = 0; 640 dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_OB, atu_index); 641 clear_bit(atu_index, ep->ob_window_map); 642 } 643 644 static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 645 phys_addr_t addr, u64 pci_addr, size_t size) 646 { 647 int ret; 648 struct dw_pcie_ep *ep = epc_get_drvdata(epc); 649 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 650 struct dw_pcie_ob_atu_cfg atu = { 0 }; 651 652 atu.func_no = func_no; 653 atu.type = PCIE_ATU_TYPE_MEM; 654 atu.parent_bus_addr = addr - pci->parent_bus_offset; 655 atu.pci_addr = pci_addr; 656 atu.size = size; 657 ret = dw_pcie_ep_outbound_atu(ep, &atu); 658 if (ret) { 659 dev_err(pci->dev, "Failed to enable address\n"); 660 return ret; 661 } 662 663 return 0; 664 } 665 666 static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no) 667 { 668 struct dw_pcie_ep *ep = epc_get_drvdata(epc); 669 struct dw_pcie_ep_func *ep_func; 670 u32 val, reg; 671 672 ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no); 673 if (!ep_func || !ep_func->msi_cap) 674 return -EINVAL; 675 676 reg = ep_func->msi_cap + PCI_MSI_FLAGS; 677 val = dw_pcie_ep_readw_dbi(ep, func_no, reg); 678 if (!(val & PCI_MSI_FLAGS_ENABLE)) 679 return -EINVAL; 680 681 val = FIELD_GET(PCI_MSI_FLAGS_QSIZE, val); 682 683 return 1 << val; 684 } 685 686 static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 687 u8 nr_irqs) 688 { 689 struct dw_pcie_ep *ep = epc_get_drvdata(epc); 690 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 691 struct dw_pcie_ep_func *ep_func; 692 u8 mmc = order_base_2(nr_irqs); 693 u32 val, reg; 694 695 ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no); 696 if (!ep_func || !ep_func->msi_cap) 697 return -EINVAL; 698 699 reg = ep_func->msi_cap + PCI_MSI_FLAGS; 700 val = dw_pcie_ep_readw_dbi(ep, func_no, reg); 701 val &= ~PCI_MSI_FLAGS_QMASK; 702 val |= FIELD_PREP(PCI_MSI_FLAGS_QMASK, mmc); 703 dw_pcie_dbi_ro_wr_en(pci); 704 dw_pcie_ep_writew_dbi(ep, func_no, reg, val); 705 dw_pcie_dbi_ro_wr_dis(pci); 706 707 return 0; 708 } 709 710 static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no) 711 { 712 struct dw_pcie_ep *ep = epc_get_drvdata(epc); 713 struct dw_pcie_ep_func *ep_func; 714 u32 val, reg; 715 716 ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no); 717 if (!ep_func || !ep_func->msix_cap) 718 return -EINVAL; 719 720 reg = ep_func->msix_cap + PCI_MSIX_FLAGS; 721 val = dw_pcie_ep_readw_dbi(ep, func_no, reg); 722 if (!(val & PCI_MSIX_FLAGS_ENABLE)) 723 return -EINVAL; 724 725 val &= PCI_MSIX_FLAGS_QSIZE; 726 727 return val + 1; 728 } 729 730 static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 731 u16 nr_irqs, enum pci_barno bir, u32 offset) 732 { 733 struct dw_pcie_ep *ep = epc_get_drvdata(epc); 734 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 735 struct dw_pcie_ep_func *ep_func; 736 u32 val, reg; 737 738 ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no); 739 if (!ep_func || !ep_func->msix_cap) 740 return -EINVAL; 741 742 dw_pcie_dbi_ro_wr_en(pci); 743 744 reg = ep_func->msix_cap + PCI_MSIX_FLAGS; 745 val = dw_pcie_ep_readw_dbi(ep, func_no, reg); 746 val &= ~PCI_MSIX_FLAGS_QSIZE; 747 val |= nr_irqs - 1; /* encoded as N-1 */ 748 dw_pcie_writew_dbi(pci, reg, val); 749 750 reg = ep_func->msix_cap + PCI_MSIX_TABLE; 751 val = offset | bir; 752 dw_pcie_ep_writel_dbi(ep, func_no, reg, val); 753 754 reg = ep_func->msix_cap + PCI_MSIX_PBA; 755 val = (offset + (nr_irqs * PCI_MSIX_ENTRY_SIZE)) | bir; 756 dw_pcie_ep_writel_dbi(ep, func_no, reg, val); 757 758 dw_pcie_dbi_ro_wr_dis(pci); 759 760 return 0; 761 } 762 763 static int dw_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 764 unsigned int type, u16 interrupt_num) 765 { 766 struct dw_pcie_ep *ep = epc_get_drvdata(epc); 767 768 if (!ep->ops->raise_irq) 769 return -EINVAL; 770 771 return ep->ops->raise_irq(ep, func_no, type, interrupt_num); 772 } 773 774 static void dw_pcie_ep_stop(struct pci_epc *epc) 775 { 776 struct dw_pcie_ep *ep = epc_get_drvdata(epc); 777 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 778 779 /* 780 * Tear down the dedicated outbound window used for MSI 781 * generation. This avoids leaking an iATU window across 782 * endpoint stop/start cycles. 783 */ 784 if (ep->msi_iatu_mapped) { 785 dw_pcie_ep_unmap_addr(epc, 0, 0, ep->msi_mem_phys); 786 ep->msi_iatu_mapped = false; 787 } 788 789 dw_pcie_stop_link(pci); 790 } 791 792 static int dw_pcie_ep_start(struct pci_epc *epc) 793 { 794 struct dw_pcie_ep *ep = epc_get_drvdata(epc); 795 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 796 797 return dw_pcie_start_link(pci); 798 } 799 800 static const struct pci_epc_features* 801 dw_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no) 802 { 803 struct dw_pcie_ep *ep = epc_get_drvdata(epc); 804 805 if (!ep->ops->get_features) 806 return NULL; 807 808 return ep->ops->get_features(ep); 809 } 810 811 static const struct pci_epc_ops epc_ops = { 812 .write_header = dw_pcie_ep_write_header, 813 .set_bar = dw_pcie_ep_set_bar, 814 .clear_bar = dw_pcie_ep_clear_bar, 815 .align_addr = dw_pcie_ep_align_addr, 816 .map_addr = dw_pcie_ep_map_addr, 817 .unmap_addr = dw_pcie_ep_unmap_addr, 818 .set_msi = dw_pcie_ep_set_msi, 819 .get_msi = dw_pcie_ep_get_msi, 820 .set_msix = dw_pcie_ep_set_msix, 821 .get_msix = dw_pcie_ep_get_msix, 822 .raise_irq = dw_pcie_ep_raise_irq, 823 .start = dw_pcie_ep_start, 824 .stop = dw_pcie_ep_stop, 825 .get_features = dw_pcie_ep_get_features, 826 }; 827 828 /** 829 * dw_pcie_ep_raise_intx_irq - Raise INTx IRQ to the host 830 * @ep: DWC EP device 831 * @func_no: Function number of the endpoint 832 * 833 * Return: 0 if success, errno otherwise. 834 */ 835 int dw_pcie_ep_raise_intx_irq(struct dw_pcie_ep *ep, u8 func_no) 836 { 837 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 838 struct device *dev = pci->dev; 839 840 dev_err(dev, "EP cannot raise INTX IRQs\n"); 841 842 return -EINVAL; 843 } 844 EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_intx_irq); 845 846 /** 847 * dw_pcie_ep_raise_msi_irq - Raise MSI IRQ to the host 848 * @ep: DWC EP device 849 * @func_no: Function number of the endpoint 850 * @interrupt_num: Interrupt number to be raised 851 * 852 * Return: 0 if success, errno otherwise. 853 */ 854 int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no, 855 u8 interrupt_num) 856 { 857 u32 msg_addr_lower, msg_addr_upper, reg; 858 struct dw_pcie_ep_func *ep_func; 859 struct pci_epc *epc = ep->epc; 860 size_t map_size = sizeof(u32); 861 size_t offset; 862 u16 msg_ctrl, msg_data; 863 bool has_upper; 864 u64 msg_addr; 865 int ret; 866 867 ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no); 868 if (!ep_func || !ep_func->msi_cap) 869 return -EINVAL; 870 871 /* Raise MSI per the PCI Local Bus Specification Revision 3.0, 6.8.1. */ 872 reg = ep_func->msi_cap + PCI_MSI_FLAGS; 873 msg_ctrl = dw_pcie_ep_readw_dbi(ep, func_no, reg); 874 has_upper = !!(msg_ctrl & PCI_MSI_FLAGS_64BIT); 875 reg = ep_func->msi_cap + PCI_MSI_ADDRESS_LO; 876 msg_addr_lower = dw_pcie_ep_readl_dbi(ep, func_no, reg); 877 if (has_upper) { 878 reg = ep_func->msi_cap + PCI_MSI_ADDRESS_HI; 879 msg_addr_upper = dw_pcie_ep_readl_dbi(ep, func_no, reg); 880 reg = ep_func->msi_cap + PCI_MSI_DATA_64; 881 msg_data = dw_pcie_ep_readw_dbi(ep, func_no, reg); 882 } else { 883 msg_addr_upper = 0; 884 reg = ep_func->msi_cap + PCI_MSI_DATA_32; 885 msg_data = dw_pcie_ep_readw_dbi(ep, func_no, reg); 886 } 887 msg_addr = ((u64)msg_addr_upper) << 32 | msg_addr_lower; 888 889 msg_addr = dw_pcie_ep_align_addr(epc, msg_addr, &map_size, &offset); 890 891 /* 892 * Program the outbound iATU once and keep it enabled. 893 * 894 * The spec warns that updating iATU registers while there are 895 * operations in flight on the AXI bridge interface is not 896 * supported, so we avoid reprogramming the region on every MSI, 897 * specifically unmapping immediately after writel(). 898 */ 899 if (!ep->msi_iatu_mapped) { 900 ret = dw_pcie_ep_map_addr(epc, func_no, 0, 901 ep->msi_mem_phys, msg_addr, 902 map_size); 903 if (ret) 904 return ret; 905 906 ep->msi_iatu_mapped = true; 907 ep->msi_msg_addr = msg_addr; 908 ep->msi_map_size = map_size; 909 } else if (WARN_ON_ONCE(ep->msi_msg_addr != msg_addr || 910 ep->msi_map_size != map_size)) { 911 /* 912 * The host changed the MSI target address or the required 913 * mapping size changed. Reprogramming the iATU at runtime is 914 * unsafe on this controller, so bail out instead of trying to 915 * update the existing region. 916 */ 917 return -EINVAL; 918 } 919 920 writel(msg_data | (interrupt_num - 1), ep->msi_mem + offset); 921 922 return 0; 923 } 924 EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_msi_irq); 925 926 /** 927 * dw_pcie_ep_raise_msix_irq_doorbell - Raise MSI-X to the host using Doorbell 928 * method 929 * @ep: DWC EP device 930 * @func_no: Function number of the endpoint device 931 * @interrupt_num: Interrupt number to be raised 932 * 933 * Return: 0 if success, errno otherwise. 934 */ 935 int dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep *ep, u8 func_no, 936 u16 interrupt_num) 937 { 938 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 939 struct dw_pcie_ep_func *ep_func; 940 u32 msg_data; 941 942 ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no); 943 if (!ep_func || !ep_func->msix_cap) 944 return -EINVAL; 945 946 msg_data = (func_no << PCIE_MSIX_DOORBELL_PF_SHIFT) | 947 (interrupt_num - 1); 948 949 dw_pcie_writel_dbi(pci, PCIE_MSIX_DOORBELL, msg_data); 950 951 return 0; 952 } 953 954 /** 955 * dw_pcie_ep_raise_msix_irq - Raise MSI-X to the host 956 * @ep: DWC EP device 957 * @func_no: Function number of the endpoint device 958 * @interrupt_num: Interrupt number to be raised 959 * 960 * Return: 0 if success, errno otherwise. 961 */ 962 int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no, 963 u16 interrupt_num) 964 { 965 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 966 struct pci_epf_msix_tbl *msix_tbl; 967 struct dw_pcie_ep_func *ep_func; 968 struct pci_epc *epc = ep->epc; 969 size_t map_size = sizeof(u32); 970 size_t offset; 971 u32 reg, msg_data, vec_ctrl; 972 u32 tbl_offset; 973 u64 msg_addr; 974 int ret; 975 u8 bir; 976 977 ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no); 978 if (!ep_func || !ep_func->msix_cap) 979 return -EINVAL; 980 981 reg = ep_func->msix_cap + PCI_MSIX_TABLE; 982 tbl_offset = dw_pcie_ep_readl_dbi(ep, func_no, reg); 983 bir = FIELD_GET(PCI_MSIX_TABLE_BIR, tbl_offset); 984 tbl_offset &= PCI_MSIX_TABLE_OFFSET; 985 986 msix_tbl = ep_func->epf_bar[bir]->addr + tbl_offset; 987 msg_addr = msix_tbl[(interrupt_num - 1)].msg_addr; 988 msg_data = msix_tbl[(interrupt_num - 1)].msg_data; 989 vec_ctrl = msix_tbl[(interrupt_num - 1)].vector_ctrl; 990 991 if (vec_ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT) { 992 dev_dbg(pci->dev, "MSI-X entry ctrl set\n"); 993 return -EPERM; 994 } 995 996 msg_addr = dw_pcie_ep_align_addr(epc, msg_addr, &map_size, &offset); 997 ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr, 998 map_size); 999 if (ret) 1000 return ret; 1001 1002 writel(msg_data, ep->msi_mem + offset); 1003 1004 dw_pcie_ep_unmap_addr(epc, func_no, 0, ep->msi_mem_phys); 1005 1006 return 0; 1007 } 1008 EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_msix_irq); 1009 1010 /** 1011 * dw_pcie_ep_cleanup - Cleanup DWC EP resources after fundamental reset 1012 * @ep: DWC EP device 1013 * 1014 * Cleans up the DWC EP specific resources like eDMA etc... after fundamental 1015 * reset like PERST#. Note that this API is only applicable for drivers 1016 * supporting PERST# or any other methods of fundamental reset. 1017 */ 1018 void dw_pcie_ep_cleanup(struct dw_pcie_ep *ep) 1019 { 1020 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 1021 1022 dwc_pcie_debugfs_deinit(pci); 1023 dw_pcie_edma_remove(pci); 1024 } 1025 EXPORT_SYMBOL_GPL(dw_pcie_ep_cleanup); 1026 1027 /** 1028 * dw_pcie_ep_deinit - Deinitialize the endpoint device 1029 * @ep: DWC EP device 1030 * 1031 * Deinitialize the endpoint device. EPC device is not destroyed since that will 1032 * be taken care by Devres. 1033 */ 1034 void dw_pcie_ep_deinit(struct dw_pcie_ep *ep) 1035 { 1036 struct pci_epc *epc = ep->epc; 1037 1038 dw_pcie_ep_cleanup(ep); 1039 1040 pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem, 1041 epc->mem->window.page_size); 1042 1043 pci_epc_mem_exit(epc); 1044 } 1045 EXPORT_SYMBOL_GPL(dw_pcie_ep_deinit); 1046 1047 static void dw_pcie_ep_init_rebar_registers(struct dw_pcie_ep *ep, u8 func_no) 1048 { 1049 struct dw_pcie_ep_func *ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no); 1050 unsigned int offset, nbars; 1051 enum pci_barno bar; 1052 u32 reg, i, val; 1053 1054 if (!ep_func) 1055 return; 1056 1057 offset = dw_pcie_ep_find_ext_capability(ep, func_no, PCI_EXT_CAP_ID_REBAR); 1058 1059 if (offset) { 1060 reg = dw_pcie_ep_readl_dbi(ep, func_no, offset + PCI_REBAR_CTRL); 1061 nbars = FIELD_GET(PCI_REBAR_CTRL_NBAR_MASK, reg); 1062 1063 /* 1064 * PCIe r6.0, sec 7.8.6.2 require us to support at least one 1065 * size in the range from 1 MB to 512 GB. Advertise support 1066 * for 1 MB BAR size only. 1067 * 1068 * For a BAR that has been configured via dw_pcie_ep_set_bar(), 1069 * advertise support for only that size instead. 1070 */ 1071 for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL) { 1072 /* 1073 * While the RESBAR_CAP_REG_* fields are sticky, the 1074 * RESBAR_CTRL_REG_BAR_SIZE field is non-sticky (it is 1075 * sticky in certain versions of DWC PCIe, but not all). 1076 * 1077 * RESBAR_CTRL_REG_BAR_SIZE is updated automatically by 1078 * the controller when RESBAR_CAP_REG is written, which 1079 * is why RESBAR_CAP_REG is written here. 1080 */ 1081 val = dw_pcie_ep_readl_dbi(ep, func_no, offset + PCI_REBAR_CTRL); 1082 bar = FIELD_GET(PCI_REBAR_CTRL_BAR_IDX, val); 1083 if (ep_func->epf_bar[bar]) 1084 pci_epc_bar_size_to_rebar_cap(ep_func->epf_bar[bar]->size, &val); 1085 else 1086 val = BIT(4); 1087 1088 dw_pcie_ep_writel_dbi(ep, func_no, offset + PCI_REBAR_CAP, val); 1089 } 1090 } 1091 } 1092 1093 static void dw_pcie_ep_init_non_sticky_registers(struct dw_pcie *pci) 1094 { 1095 struct dw_pcie_ep *ep = &pci->ep; 1096 u8 funcs = ep->epc->max_functions; 1097 u8 func_no; 1098 1099 dw_pcie_dbi_ro_wr_en(pci); 1100 1101 for (func_no = 0; func_no < funcs; func_no++) 1102 dw_pcie_ep_init_rebar_registers(ep, func_no); 1103 1104 dw_pcie_setup(pci); 1105 dw_pcie_dbi_ro_wr_dis(pci); 1106 } 1107 1108 /** 1109 * dw_pcie_ep_init_registers - Initialize DWC EP specific registers 1110 * @ep: DWC EP device 1111 * 1112 * Initialize the registers (CSRs) specific to DWC EP. This API should be called 1113 * only when the endpoint receives an active refclk (either from host or 1114 * generated locally). 1115 */ 1116 int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep) 1117 { 1118 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 1119 struct dw_pcie_ep_func *ep_func; 1120 struct device *dev = pci->dev; 1121 struct pci_epc *epc = ep->epc; 1122 u32 ptm_cap_base, reg; 1123 u8 hdr_type; 1124 u8 func_no; 1125 void *addr; 1126 int ret; 1127 1128 hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE) & 1129 PCI_HEADER_TYPE_MASK; 1130 if (hdr_type != PCI_HEADER_TYPE_NORMAL) { 1131 dev_err(pci->dev, 1132 "PCIe controller is not set to EP mode (hdr_type:0x%x)!\n", 1133 hdr_type); 1134 return -EIO; 1135 } 1136 1137 dw_pcie_version_detect(pci); 1138 1139 dw_pcie_iatu_detect(pci); 1140 1141 ret = dw_pcie_edma_detect(pci); 1142 if (ret) 1143 return ret; 1144 1145 ret = -ENOMEM; 1146 if (!ep->ib_window_map) { 1147 ep->ib_window_map = devm_bitmap_zalloc(dev, pci->num_ib_windows, 1148 GFP_KERNEL); 1149 if (!ep->ib_window_map) 1150 goto err_remove_edma; 1151 } 1152 1153 if (!ep->ob_window_map) { 1154 ep->ob_window_map = devm_bitmap_zalloc(dev, pci->num_ob_windows, 1155 GFP_KERNEL); 1156 if (!ep->ob_window_map) 1157 goto err_remove_edma; 1158 } 1159 1160 if (!ep->outbound_addr) { 1161 addr = devm_kcalloc(dev, pci->num_ob_windows, sizeof(phys_addr_t), 1162 GFP_KERNEL); 1163 if (!addr) 1164 goto err_remove_edma; 1165 ep->outbound_addr = addr; 1166 } 1167 1168 for (func_no = 0; func_no < epc->max_functions; func_no++) { 1169 1170 ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no); 1171 if (ep_func) 1172 continue; 1173 1174 ep_func = devm_kzalloc(dev, sizeof(*ep_func), GFP_KERNEL); 1175 if (!ep_func) 1176 goto err_remove_edma; 1177 1178 ep_func->func_no = func_no; 1179 ep_func->msi_cap = dw_pcie_ep_find_capability(ep, func_no, 1180 PCI_CAP_ID_MSI); 1181 ep_func->msix_cap = dw_pcie_ep_find_capability(ep, func_no, 1182 PCI_CAP_ID_MSIX); 1183 1184 list_add_tail(&ep_func->list, &ep->func_list); 1185 } 1186 1187 if (ep->ops->init) 1188 ep->ops->init(ep); 1189 1190 /* 1191 * PCIe r6.0, section 7.9.15 states that for endpoints that support 1192 * PTM, this capability structure is required in exactly one 1193 * function, which controls the PTM behavior of all PTM capable 1194 * functions. This indicates the PTM capability structure 1195 * represents controller-level registers rather than per-function 1196 * registers. 1197 * 1198 * Therefore, PTM capability registers are configured using the 1199 * standard DBI accessors, instead of func_no indexed per-function 1200 * accessors. 1201 */ 1202 ptm_cap_base = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_PTM); 1203 1204 /* 1205 * PTM responder capability can be disabled only after disabling 1206 * PTM root capability. 1207 */ 1208 if (ptm_cap_base) { 1209 dw_pcie_dbi_ro_wr_en(pci); 1210 reg = dw_pcie_readl_dbi(pci, ptm_cap_base + PCI_PTM_CAP); 1211 reg &= ~PCI_PTM_CAP_ROOT; 1212 dw_pcie_writel_dbi(pci, ptm_cap_base + PCI_PTM_CAP, reg); 1213 1214 reg = dw_pcie_readl_dbi(pci, ptm_cap_base + PCI_PTM_CAP); 1215 reg &= ~(PCI_PTM_CAP_RES | PCI_PTM_GRANULARITY_MASK); 1216 dw_pcie_writel_dbi(pci, ptm_cap_base + PCI_PTM_CAP, reg); 1217 dw_pcie_dbi_ro_wr_dis(pci); 1218 } 1219 1220 dw_pcie_ep_init_non_sticky_registers(pci); 1221 1222 dwc_pcie_debugfs_init(pci, DW_PCIE_EP_TYPE); 1223 1224 return 0; 1225 1226 err_remove_edma: 1227 dw_pcie_edma_remove(pci); 1228 1229 return ret; 1230 } 1231 EXPORT_SYMBOL_GPL(dw_pcie_ep_init_registers); 1232 1233 /** 1234 * dw_pcie_ep_linkup - Notify EPF drivers about Link Up event 1235 * @ep: DWC EP device 1236 */ 1237 void dw_pcie_ep_linkup(struct dw_pcie_ep *ep) 1238 { 1239 struct pci_epc *epc = ep->epc; 1240 1241 pci_epc_linkup(epc); 1242 } 1243 EXPORT_SYMBOL_GPL(dw_pcie_ep_linkup); 1244 1245 /** 1246 * dw_pcie_ep_linkdown - Notify EPF drivers about Link Down event 1247 * @ep: DWC EP device 1248 * 1249 * Non-sticky registers are also initialized before sending the notification to 1250 * the EPF drivers. This is needed since the registers need to be initialized 1251 * before the link comes back again. 1252 */ 1253 void dw_pcie_ep_linkdown(struct dw_pcie_ep *ep) 1254 { 1255 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 1256 struct pci_epc *epc = ep->epc; 1257 1258 /* 1259 * Initialize the non-sticky DWC registers as they would've reset post 1260 * Link Down. This is specifically needed for drivers not supporting 1261 * PERST# as they have no way to reinitialize the registers before the 1262 * link comes back again. 1263 */ 1264 dw_pcie_ep_init_non_sticky_registers(pci); 1265 1266 pci_epc_linkdown(epc); 1267 } 1268 EXPORT_SYMBOL_GPL(dw_pcie_ep_linkdown); 1269 1270 static int dw_pcie_ep_get_resources(struct dw_pcie_ep *ep) 1271 { 1272 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 1273 struct device *dev = pci->dev; 1274 struct platform_device *pdev = to_platform_device(dev); 1275 struct device_node *np = dev->of_node; 1276 struct pci_epc *epc = ep->epc; 1277 struct resource *res; 1278 int ret; 1279 1280 ret = dw_pcie_get_resources(pci); 1281 if (ret) 1282 return ret; 1283 1284 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space"); 1285 if (!res) 1286 return -EINVAL; 1287 1288 ep->phys_base = res->start; 1289 ep->addr_size = resource_size(res); 1290 1291 /* 1292 * artpec6_pcie_cpu_addr_fixup() uses ep->phys_base, so call 1293 * dw_pcie_parent_bus_offset() after setting ep->phys_base. 1294 */ 1295 pci->parent_bus_offset = dw_pcie_parent_bus_offset(pci, "addr_space", 1296 ep->phys_base); 1297 1298 ret = of_property_read_u8(np, "max-functions", &epc->max_functions); 1299 if (ret < 0) 1300 epc->max_functions = 1; 1301 1302 return 0; 1303 } 1304 1305 /** 1306 * dw_pcie_ep_init - Initialize the endpoint device 1307 * @ep: DWC EP device 1308 * 1309 * Initialize the endpoint device. Allocate resources and create the EPC 1310 * device with the endpoint framework. 1311 * 1312 * Return: 0 if success, errno otherwise. 1313 */ 1314 int dw_pcie_ep_init(struct dw_pcie_ep *ep) 1315 { 1316 int ret; 1317 struct pci_epc *epc; 1318 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 1319 struct device *dev = pci->dev; 1320 1321 INIT_LIST_HEAD(&ep->func_list); 1322 ep->msi_iatu_mapped = false; 1323 ep->msi_msg_addr = 0; 1324 ep->msi_map_size = 0; 1325 1326 epc = devm_pci_epc_create(dev, &epc_ops); 1327 if (IS_ERR(epc)) { 1328 dev_err(dev, "Failed to create epc device\n"); 1329 return PTR_ERR(epc); 1330 } 1331 1332 ep->epc = epc; 1333 epc_set_drvdata(epc, ep); 1334 1335 ret = dw_pcie_ep_get_resources(ep); 1336 if (ret) 1337 return ret; 1338 1339 if (ep->ops->pre_init) 1340 ep->ops->pre_init(ep); 1341 1342 ret = pci_epc_mem_init(epc, ep->phys_base, ep->addr_size, 1343 ep->page_size); 1344 if (ret < 0) { 1345 dev_err(dev, "Failed to initialize address space\n"); 1346 return ret; 1347 } 1348 1349 ep->msi_mem = pci_epc_mem_alloc_addr(epc, &ep->msi_mem_phys, 1350 epc->mem->window.page_size); 1351 if (!ep->msi_mem) { 1352 ret = -ENOMEM; 1353 dev_err(dev, "Failed to reserve memory for MSI/MSI-X\n"); 1354 goto err_exit_epc_mem; 1355 } 1356 1357 return 0; 1358 1359 err_exit_epc_mem: 1360 pci_epc_mem_exit(epc); 1361 1362 return ret; 1363 } 1364 EXPORT_SYMBOL_GPL(dw_pcie_ep_init); 1365