1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright(c) 2021 Intel Corporation. All rights reserved. */ 3 #include <linux/units.h> 4 #include <linux/io-64-nonatomic-lo-hi.h> 5 #include <linux/device.h> 6 #include <linux/delay.h> 7 #include <linux/pci.h> 8 #include <linux/pci-doe.h> 9 #include <linux/aer.h> 10 #include <cxlpci.h> 11 #include <cxlmem.h> 12 #include <cxl.h> 13 #include "core.h" 14 #include "trace.h" 15 16 /** 17 * DOC: cxl core pci 18 * 19 * Compute Express Link protocols are layered on top of PCIe. CXL core provides 20 * a set of helpers for CXL interactions which occur via PCIe. 21 */ 22 23 static unsigned short media_ready_timeout = 60; 24 module_param(media_ready_timeout, ushort, 0644); 25 MODULE_PARM_DESC(media_ready_timeout, "seconds to wait for media ready"); 26 27 static int pci_get_port_num(struct pci_dev *pdev) 28 { 29 u32 lnkcap; 30 int type; 31 32 type = pci_pcie_type(pdev); 33 if (type != PCI_EXP_TYPE_DOWNSTREAM && type != PCI_EXP_TYPE_ROOT_PORT) 34 return -EINVAL; 35 36 if (pci_read_config_dword(pdev, pci_pcie_cap(pdev) + PCI_EXP_LNKCAP, 37 &lnkcap)) 38 return -ENXIO; 39 40 return FIELD_GET(PCI_EXP_LNKCAP_PN, lnkcap); 41 } 42 43 /** 44 * devm_cxl_add_dport_by_dev - allocate a dport by dport device 45 * @port: cxl_port that hosts the dport 46 * @dport_dev: 'struct device' of the dport 47 * 48 * Returns the allocated dport on success or ERR_PTR() of -errno on error 49 */ 50 struct cxl_dport *devm_cxl_add_dport_by_dev(struct cxl_port *port, 51 struct device *dport_dev) 52 { 53 struct cxl_register_map map; 54 struct pci_dev *pdev; 55 int port_num, rc; 56 57 if (!dev_is_pci(dport_dev)) 58 return ERR_PTR(-EINVAL); 59 60 pdev = to_pci_dev(dport_dev); 61 port_num = pci_get_port_num(pdev); 62 if (port_num < 0) 63 return ERR_PTR(port_num); 64 65 rc = cxl_find_regblock(pdev, CXL_REGLOC_RBI_COMPONENT, &map); 66 if (rc) 67 return ERR_PTR(rc); 68 69 device_lock_assert(&port->dev); 70 return devm_cxl_add_dport(port, dport_dev, port_num, map.resource); 71 } 72 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_dport_by_dev, "CXL"); 73 74 static int cxl_dvsec_mem_range_valid(struct cxl_dev_state *cxlds, int id) 75 { 76 struct pci_dev *pdev = to_pci_dev(cxlds->dev); 77 int d = cxlds->cxl_dvsec; 78 bool valid = false; 79 int rc, i; 80 u32 temp; 81 82 if (id > CXL_DVSEC_RANGE_MAX) 83 return -EINVAL; 84 85 /* Check MEM INFO VALID bit first, give up after 1s */ 86 i = 1; 87 do { 88 rc = pci_read_config_dword(pdev, 89 d + PCI_DVSEC_CXL_RANGE_SIZE_LOW(id), 90 &temp); 91 if (rc) 92 return rc; 93 94 valid = FIELD_GET(PCI_DVSEC_CXL_MEM_INFO_VALID, temp); 95 if (valid) 96 break; 97 msleep(1000); 98 } while (i--); 99 100 if (!valid) { 101 dev_err(&pdev->dev, 102 "Timeout awaiting memory range %d valid after 1s.\n", 103 id); 104 return -ETIMEDOUT; 105 } 106 107 return 0; 108 } 109 110 static int cxl_dvsec_mem_range_active(struct cxl_dev_state *cxlds, int id) 111 { 112 struct pci_dev *pdev = to_pci_dev(cxlds->dev); 113 int d = cxlds->cxl_dvsec; 114 bool active = false; 115 int rc, i; 116 u32 temp; 117 118 if (id > CXL_DVSEC_RANGE_MAX) 119 return -EINVAL; 120 121 /* Check MEM ACTIVE bit, up to 60s timeout by default */ 122 for (i = media_ready_timeout; i; i--) { 123 rc = pci_read_config_dword( 124 pdev, d + PCI_DVSEC_CXL_RANGE_SIZE_LOW(id), &temp); 125 if (rc) 126 return rc; 127 128 active = FIELD_GET(PCI_DVSEC_CXL_MEM_ACTIVE, temp); 129 if (active) 130 break; 131 msleep(1000); 132 } 133 134 if (!active) { 135 dev_err(&pdev->dev, 136 "timeout awaiting memory active after %d seconds\n", 137 media_ready_timeout); 138 return -ETIMEDOUT; 139 } 140 141 return 0; 142 } 143 144 /* 145 * Wait up to @media_ready_timeout for the device to report memory 146 * active. 147 */ 148 int cxl_await_media_ready(struct cxl_dev_state *cxlds) 149 { 150 struct pci_dev *pdev = to_pci_dev(cxlds->dev); 151 int d = cxlds->cxl_dvsec; 152 int rc, i, hdm_count; 153 u64 md_status; 154 u16 cap; 155 156 rc = pci_read_config_word(pdev, 157 d + PCI_DVSEC_CXL_CAP, &cap); 158 if (rc) 159 return rc; 160 161 hdm_count = FIELD_GET(PCI_DVSEC_CXL_HDM_COUNT, cap); 162 for (i = 0; i < hdm_count; i++) { 163 rc = cxl_dvsec_mem_range_valid(cxlds, i); 164 if (rc) 165 return rc; 166 } 167 168 for (i = 0; i < hdm_count; i++) { 169 rc = cxl_dvsec_mem_range_active(cxlds, i); 170 if (rc) 171 return rc; 172 } 173 174 md_status = readq(cxlds->regs.memdev + CXLMDEV_STATUS_OFFSET); 175 if (!CXLMDEV_READY(md_status)) 176 return -EIO; 177 178 return 0; 179 } 180 EXPORT_SYMBOL_NS_GPL(cxl_await_media_ready, "CXL"); 181 182 static int cxl_set_mem_enable(struct cxl_dev_state *cxlds, u16 val) 183 { 184 struct pci_dev *pdev = to_pci_dev(cxlds->dev); 185 int d = cxlds->cxl_dvsec; 186 u16 ctrl; 187 int rc; 188 189 rc = pci_read_config_word(pdev, d + PCI_DVSEC_CXL_CTRL, &ctrl); 190 if (rc < 0) 191 return rc; 192 193 if ((ctrl & PCI_DVSEC_CXL_MEM_ENABLE) == val) 194 return 1; 195 ctrl &= ~PCI_DVSEC_CXL_MEM_ENABLE; 196 ctrl |= val; 197 198 rc = pci_write_config_word(pdev, d + PCI_DVSEC_CXL_CTRL, ctrl); 199 if (rc < 0) 200 return rc; 201 202 return 0; 203 } 204 205 static void clear_mem_enable(void *cxlds) 206 { 207 cxl_set_mem_enable(cxlds, 0); 208 } 209 210 static int devm_cxl_enable_mem(struct device *host, struct cxl_dev_state *cxlds) 211 { 212 int rc; 213 214 rc = cxl_set_mem_enable(cxlds, PCI_DVSEC_CXL_MEM_ENABLE); 215 if (rc < 0) 216 return rc; 217 if (rc > 0) 218 return 0; 219 return devm_add_action_or_reset(host, clear_mem_enable, cxlds); 220 } 221 222 /* require dvsec ranges to be covered by a locked platform window */ 223 static int dvsec_range_allowed(struct device *dev, const void *arg) 224 { 225 const struct range *dev_range = arg; 226 struct cxl_decoder *cxld; 227 228 if (!is_root_decoder(dev)) 229 return 0; 230 231 cxld = to_cxl_decoder(dev); 232 233 if (!(cxld->flags & CXL_DECODER_F_RAM)) 234 return 0; 235 236 return range_contains(&cxld->hpa_range, dev_range); 237 } 238 239 static void disable_hdm(void *_cxlhdm) 240 { 241 u32 global_ctrl; 242 struct cxl_hdm *cxlhdm = _cxlhdm; 243 void __iomem *hdm = cxlhdm->regs.hdm_decoder; 244 245 global_ctrl = readl(hdm + CXL_HDM_DECODER_CTRL_OFFSET); 246 writel(global_ctrl & ~CXL_HDM_DECODER_ENABLE, 247 hdm + CXL_HDM_DECODER_CTRL_OFFSET); 248 } 249 250 static int devm_cxl_enable_hdm(struct device *host, struct cxl_hdm *cxlhdm) 251 { 252 void __iomem *hdm = cxlhdm->regs.hdm_decoder; 253 u32 global_ctrl; 254 255 global_ctrl = readl(hdm + CXL_HDM_DECODER_CTRL_OFFSET); 256 writel(global_ctrl | CXL_HDM_DECODER_ENABLE, 257 hdm + CXL_HDM_DECODER_CTRL_OFFSET); 258 259 return devm_add_action_or_reset(host, disable_hdm, cxlhdm); 260 } 261 262 int cxl_dvsec_rr_decode(struct cxl_dev_state *cxlds, 263 struct cxl_endpoint_dvsec_info *info) 264 { 265 struct pci_dev *pdev = to_pci_dev(cxlds->dev); 266 struct device *dev = cxlds->dev; 267 int hdm_count, rc, i, ranges = 0; 268 int d = cxlds->cxl_dvsec; 269 u16 cap, ctrl; 270 271 if (!d) { 272 dev_dbg(dev, "No DVSEC Capability\n"); 273 return -ENXIO; 274 } 275 276 rc = pci_read_config_word(pdev, d + PCI_DVSEC_CXL_CAP, &cap); 277 if (rc) 278 return rc; 279 280 if (!(cap & PCI_DVSEC_CXL_MEM_CAPABLE)) { 281 dev_dbg(dev, "Not MEM Capable\n"); 282 return -ENXIO; 283 } 284 285 /* 286 * It is not allowed by spec for MEM.capable to be set and have 0 legacy 287 * HDM decoders (values > 2 are also undefined as of CXL 2.0). As this 288 * driver is for a spec defined class code which must be CXL.mem 289 * capable, there is no point in continuing to enable CXL.mem. 290 */ 291 hdm_count = FIELD_GET(PCI_DVSEC_CXL_HDM_COUNT, cap); 292 if (!hdm_count || hdm_count > 2) 293 return -EINVAL; 294 295 /* 296 * The current DVSEC values are moot if the memory capability is 297 * disabled, and they will remain moot after the HDM Decoder 298 * capability is enabled. 299 */ 300 rc = pci_read_config_word(pdev, d + PCI_DVSEC_CXL_CTRL, &ctrl); 301 if (rc) 302 return rc; 303 304 info->mem_enabled = FIELD_GET(PCI_DVSEC_CXL_MEM_ENABLE, ctrl); 305 if (!info->mem_enabled) 306 return 0; 307 308 for (i = 0; i < hdm_count; i++) { 309 u64 base, size; 310 u32 temp; 311 312 rc = cxl_dvsec_mem_range_valid(cxlds, i); 313 if (rc) 314 return rc; 315 316 rc = pci_read_config_dword( 317 pdev, d + PCI_DVSEC_CXL_RANGE_SIZE_HIGH(i), &temp); 318 if (rc) 319 return rc; 320 321 size = (u64)temp << 32; 322 323 rc = pci_read_config_dword( 324 pdev, d + PCI_DVSEC_CXL_RANGE_SIZE_LOW(i), &temp); 325 if (rc) 326 return rc; 327 328 size |= temp & PCI_DVSEC_CXL_MEM_SIZE_LOW; 329 if (!size) { 330 continue; 331 } 332 333 rc = pci_read_config_dword( 334 pdev, d + PCI_DVSEC_CXL_RANGE_BASE_HIGH(i), &temp); 335 if (rc) 336 return rc; 337 338 base = (u64)temp << 32; 339 340 rc = pci_read_config_dword( 341 pdev, d + PCI_DVSEC_CXL_RANGE_BASE_LOW(i), &temp); 342 if (rc) 343 return rc; 344 345 base |= temp & PCI_DVSEC_CXL_MEM_BASE_LOW; 346 347 info->dvsec_range[ranges++] = (struct range) { 348 .start = base, 349 .end = base + size - 1 350 }; 351 } 352 353 info->ranges = ranges; 354 355 return 0; 356 } 357 EXPORT_SYMBOL_NS_GPL(cxl_dvsec_rr_decode, "CXL"); 358 359 /** 360 * cxl_hdm_decode_init() - Setup HDM decoding for the endpoint 361 * @cxlds: Device state 362 * @cxlhdm: Mapped HDM decoder Capability 363 * @info: Cached DVSEC range registers info 364 * 365 * Try to enable the endpoint's HDM Decoder Capability 366 */ 367 int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm, 368 struct cxl_endpoint_dvsec_info *info) 369 { 370 void __iomem *hdm = cxlhdm->regs.hdm_decoder; 371 struct cxl_port *port = cxlhdm->port; 372 struct device *dev = cxlds->dev; 373 struct cxl_port *root; 374 int i, rc, allowed; 375 u32 global_ctrl = 0; 376 377 if (hdm) 378 global_ctrl = readl(hdm + CXL_HDM_DECODER_CTRL_OFFSET); 379 380 /* 381 * If the HDM Decoder Capability is already enabled then assume 382 * that some other agent like platform firmware set it up. 383 */ 384 if (global_ctrl & CXL_HDM_DECODER_ENABLE || (!hdm && info->mem_enabled)) 385 return devm_cxl_enable_mem(&port->dev, cxlds); 386 387 /* 388 * If the HDM Decoder Capability does not exist and DVSEC was 389 * not setup, the DVSEC based emulation cannot be used. 390 */ 391 if (!hdm) 392 return -ENODEV; 393 394 /* The HDM Decoder Capability exists but is globally disabled. */ 395 396 /* 397 * If the DVSEC CXL Range registers are not enabled, just 398 * enable and use the HDM Decoder Capability registers. 399 */ 400 if (!info->mem_enabled) { 401 rc = devm_cxl_enable_hdm(&port->dev, cxlhdm); 402 if (rc) 403 return rc; 404 405 return devm_cxl_enable_mem(&port->dev, cxlds); 406 } 407 408 /* 409 * Per CXL 2.0 Section 8.1.3.8.3 and 8.1.3.8.4 DVSEC CXL Range 1 Base 410 * [High,Low] when HDM operation is enabled the range register values 411 * are ignored by the device, but the spec also recommends matching the 412 * DVSEC Range 1,2 to HDM Decoder Range 0,1. So, non-zero info->ranges 413 * are expected even though Linux does not require or maintain that 414 * match. Check if at least one DVSEC range is enabled and allowed by 415 * the platform. That is, the DVSEC range must be covered by a locked 416 * platform window (CFMWS). Fail otherwise as the endpoint's decoders 417 * cannot be used. 418 */ 419 420 root = to_cxl_port(port->dev.parent); 421 while (!is_cxl_root(root) && is_cxl_port(root->dev.parent)) 422 root = to_cxl_port(root->dev.parent); 423 if (!is_cxl_root(root)) { 424 dev_err(dev, "Failed to acquire root port for HDM enable\n"); 425 return -ENODEV; 426 } 427 428 for (i = 0, allowed = 0; i < info->ranges; i++) { 429 struct device *cxld_dev; 430 431 cxld_dev = device_find_child(&root->dev, &info->dvsec_range[i], 432 dvsec_range_allowed); 433 if (!cxld_dev) { 434 dev_dbg(dev, "DVSEC Range%d denied by platform\n", i); 435 continue; 436 } 437 dev_dbg(dev, "DVSEC Range%d allowed by platform\n", i); 438 put_device(cxld_dev); 439 allowed++; 440 } 441 442 if (!allowed) { 443 dev_err(dev, "Range register decodes outside platform defined CXL ranges.\n"); 444 return -ENXIO; 445 } 446 447 return 0; 448 } 449 EXPORT_SYMBOL_NS_GPL(cxl_hdm_decode_init, "CXL"); 450 451 #define CXL_DOE_TABLE_ACCESS_REQ_CODE 0x000000ff 452 #define CXL_DOE_TABLE_ACCESS_REQ_CODE_READ 0 453 #define CXL_DOE_TABLE_ACCESS_TABLE_TYPE 0x0000ff00 454 #define CXL_DOE_TABLE_ACCESS_TABLE_TYPE_CDATA 0 455 #define CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE 0xffff0000 456 #define CXL_DOE_TABLE_ACCESS_LAST_ENTRY 0xffff 457 #define CXL_DOE_PROTOCOL_TABLE_ACCESS 2 458 459 #define CDAT_DOE_REQ(entry_handle) cpu_to_le32 \ 460 (FIELD_PREP(CXL_DOE_TABLE_ACCESS_REQ_CODE, \ 461 CXL_DOE_TABLE_ACCESS_REQ_CODE_READ) | \ 462 FIELD_PREP(CXL_DOE_TABLE_ACCESS_TABLE_TYPE, \ 463 CXL_DOE_TABLE_ACCESS_TABLE_TYPE_CDATA) | \ 464 FIELD_PREP(CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE, (entry_handle))) 465 466 static int cxl_cdat_get_length(struct device *dev, 467 struct pci_doe_mb *doe_mb, 468 size_t *length) 469 { 470 __le32 request = CDAT_DOE_REQ(0); 471 __le32 response[2]; 472 int rc; 473 474 rc = pci_doe(doe_mb, PCI_VENDOR_ID_CXL, 475 CXL_DOE_PROTOCOL_TABLE_ACCESS, 476 &request, sizeof(request), 477 &response, sizeof(response)); 478 if (rc < 0) { 479 dev_err(dev, "DOE failed: %d", rc); 480 return rc; 481 } 482 if (rc < sizeof(response)) 483 return -EIO; 484 485 *length = le32_to_cpu(response[1]); 486 dev_dbg(dev, "CDAT length %zu\n", *length); 487 488 return 0; 489 } 490 491 static int cxl_cdat_read_table(struct device *dev, 492 struct pci_doe_mb *doe_mb, 493 struct cdat_doe_rsp *rsp, size_t *length) 494 { 495 size_t received, remaining = *length; 496 unsigned int entry_handle = 0; 497 union cdat_data *data; 498 __le32 saved_dw = 0; 499 500 do { 501 __le32 request = CDAT_DOE_REQ(entry_handle); 502 int rc; 503 504 rc = pci_doe(doe_mb, PCI_VENDOR_ID_CXL, 505 CXL_DOE_PROTOCOL_TABLE_ACCESS, 506 &request, sizeof(request), 507 rsp, sizeof(*rsp) + remaining); 508 if (rc < 0) { 509 dev_err(dev, "DOE failed: %d", rc); 510 return rc; 511 } 512 513 if (rc < sizeof(*rsp)) 514 return -EIO; 515 516 data = (union cdat_data *)rsp->data; 517 received = rc - sizeof(*rsp); 518 519 if (entry_handle == 0) { 520 if (received != sizeof(data->header)) 521 return -EIO; 522 } else { 523 if (received < sizeof(data->entry) || 524 received != le16_to_cpu(data->entry.length)) 525 return -EIO; 526 } 527 528 /* Get the CXL table access header entry handle */ 529 entry_handle = FIELD_GET(CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE, 530 le32_to_cpu(rsp->doe_header)); 531 532 /* 533 * Table Access Response Header overwrote the last DW of 534 * previous entry, so restore that DW 535 */ 536 rsp->doe_header = saved_dw; 537 remaining -= received; 538 rsp = (void *)rsp + received; 539 saved_dw = rsp->doe_header; 540 } while (entry_handle != CXL_DOE_TABLE_ACCESS_LAST_ENTRY); 541 542 /* Length in CDAT header may exceed concatenation of CDAT entries */ 543 *length -= remaining; 544 545 return 0; 546 } 547 548 static unsigned char cdat_checksum(void *buf, size_t size) 549 { 550 unsigned char sum, *data = buf; 551 size_t i; 552 553 for (sum = 0, i = 0; i < size; i++) 554 sum += data[i]; 555 return sum; 556 } 557 558 /** 559 * read_cdat_data - Read the CDAT data on this port 560 * @port: Port to read data from 561 * 562 * This call will sleep waiting for responses from the DOE mailbox. 563 */ 564 void read_cdat_data(struct cxl_port *port) 565 { 566 struct device *uport = port->uport_dev; 567 struct device *dev = &port->dev; 568 struct pci_doe_mb *doe_mb; 569 struct pci_dev *pdev = NULL; 570 struct cxl_memdev *cxlmd; 571 struct cdat_doe_rsp *buf; 572 size_t table_length, length; 573 int rc; 574 575 if (is_cxl_memdev(uport)) { 576 struct device *host; 577 578 cxlmd = to_cxl_memdev(uport); 579 host = cxlmd->dev.parent; 580 if (dev_is_pci(host)) 581 pdev = to_pci_dev(host); 582 } else if (dev_is_pci(uport)) { 583 pdev = to_pci_dev(uport); 584 } 585 586 if (!pdev) 587 return; 588 589 doe_mb = pci_find_doe_mailbox(pdev, PCI_VENDOR_ID_CXL, 590 CXL_DOE_PROTOCOL_TABLE_ACCESS); 591 if (!doe_mb) { 592 dev_dbg(dev, "No CDAT mailbox\n"); 593 return; 594 } 595 596 port->cdat_available = true; 597 598 if (cxl_cdat_get_length(dev, doe_mb, &length)) { 599 dev_dbg(dev, "No CDAT length\n"); 600 return; 601 } 602 603 /* 604 * The begin of the CDAT buffer needs space for additional 4 605 * bytes for the DOE header. Table data starts afterwards. 606 */ 607 buf = devm_kzalloc(dev, sizeof(*buf) + length, GFP_KERNEL); 608 if (!buf) 609 goto err; 610 611 table_length = length; 612 613 rc = cxl_cdat_read_table(dev, doe_mb, buf, &length); 614 if (rc) 615 goto err; 616 617 if (table_length != length) 618 dev_warn(dev, "Malformed CDAT table length (%zu:%zu), discarding trailing data\n", 619 table_length, length); 620 621 if (cdat_checksum(buf->data, length)) 622 goto err; 623 624 port->cdat.table = buf->data; 625 port->cdat.length = length; 626 627 return; 628 err: 629 /* Don't leave table data allocated on error */ 630 devm_kfree(dev, buf); 631 dev_err(dev, "Failed to read/validate CDAT.\n"); 632 } 633 EXPORT_SYMBOL_NS_GPL(read_cdat_data, "CXL"); 634 635 static int cxl_flit_size(struct pci_dev *pdev) 636 { 637 if (cxl_pci_flit_256(pdev)) 638 return 256; 639 640 return 68; 641 } 642 643 /** 644 * cxl_pci_get_latency - calculate the link latency for the PCIe link 645 * @pdev: PCI device 646 * 647 * return: calculated latency or 0 for no latency 648 * 649 * CXL Memory Device SW Guide v1.0 2.11.4 Link latency calculation 650 * Link latency = LinkPropagationLatency + FlitLatency + RetimerLatency 651 * LinkProgationLatency is negligible, so 0 will be used 652 * RetimerLatency is assumed to be negligible and 0 will be used 653 * FlitLatency = FlitSize / LinkBandwidth 654 * FlitSize is defined by spec. CXL rev3.0 4.2.1. 655 * 68B flit is used up to 32GT/s. >32GT/s, 256B flit size is used. 656 * The FlitLatency is converted to picoseconds. 657 */ 658 long cxl_pci_get_latency(struct pci_dev *pdev) 659 { 660 long bw; 661 662 bw = pcie_link_speed_mbps(pdev); 663 if (bw < 0) 664 return 0; 665 bw /= BITS_PER_BYTE; 666 667 return cxl_flit_size(pdev) * MEGA / bw; 668 } 669 670 static int __cxl_endpoint_decoder_reset_detected(struct device *dev, void *data) 671 { 672 struct cxl_port *port = data; 673 struct cxl_decoder *cxld; 674 struct cxl_hdm *cxlhdm; 675 void __iomem *hdm; 676 u32 ctrl; 677 678 if (!is_endpoint_decoder(dev)) 679 return 0; 680 681 cxld = to_cxl_decoder(dev); 682 if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0) 683 return 0; 684 685 cxlhdm = dev_get_drvdata(&port->dev); 686 hdm = cxlhdm->regs.hdm_decoder; 687 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(cxld->id)); 688 689 return !FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl); 690 } 691 692 bool cxl_endpoint_decoder_reset_detected(struct cxl_port *port) 693 { 694 return device_for_each_child(&port->dev, port, 695 __cxl_endpoint_decoder_reset_detected); 696 } 697 EXPORT_SYMBOL_NS_GPL(cxl_endpoint_decoder_reset_detected, "CXL"); 698 699 int cxl_pci_get_bandwidth(struct pci_dev *pdev, struct access_coordinate *c) 700 { 701 int speed, bw; 702 u16 lnksta; 703 u32 width; 704 705 speed = pcie_link_speed_mbps(pdev); 706 if (speed < 0) 707 return speed; 708 speed /= BITS_PER_BYTE; 709 710 pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnksta); 711 width = FIELD_GET(PCI_EXP_LNKSTA_NLW, lnksta); 712 bw = speed * width; 713 714 for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) { 715 c[i].read_bandwidth = bw; 716 c[i].write_bandwidth = bw; 717 } 718 719 return 0; 720 } 721 722 /* 723 * Set max timeout such that platforms will optimize GPF flow to avoid 724 * the implied worst-case scenario delays. On a sane platform, all 725 * devices should always complete GPF within the energy budget of 726 * the GPF flow. The kernel does not have enough information to pick 727 * anything better than "maximize timeouts and hope it works". 728 * 729 * A misbehaving device could block forward progress of GPF for all 730 * the other devices, exhausting the energy budget of the platform. 731 * However, the spec seems to assume that moving on from slow to respond 732 * devices is a virtue. It is not possible to know that, in actuality, 733 * the slow to respond device is *the* most critical device in the 734 * system to wait. 735 */ 736 #define GPF_TIMEOUT_BASE_MAX 2 737 #define GPF_TIMEOUT_SCALE_MAX 7 /* 10 seconds */ 738 739 u16 cxl_gpf_get_dvsec(struct device *dev) 740 { 741 struct pci_dev *pdev; 742 bool is_port = true; 743 u16 dvsec; 744 745 if (!dev_is_pci(dev)) 746 return 0; 747 748 pdev = to_pci_dev(dev); 749 if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ENDPOINT) 750 is_port = false; 751 752 dvsec = pci_find_dvsec_capability(pdev, PCI_VENDOR_ID_CXL, 753 is_port ? PCI_DVSEC_CXL_PORT_GPF : PCI_DVSEC_CXL_DEVICE_GPF); 754 if (!dvsec) 755 dev_warn(dev, "%s GPF DVSEC not present\n", 756 is_port ? "Port" : "Device"); 757 return dvsec; 758 } 759 EXPORT_SYMBOL_NS_GPL(cxl_gpf_get_dvsec, "CXL"); 760 761 static int update_gpf_port_dvsec(struct pci_dev *pdev, int dvsec, int phase) 762 { 763 u64 base, scale; 764 int rc, offset; 765 u16 ctrl; 766 767 switch (phase) { 768 case 1: 769 offset = PCI_DVSEC_CXL_PORT_GPF_PHASE_1_CONTROL; 770 base = PCI_DVSEC_CXL_PORT_GPF_PHASE_1_TMO_BASE; 771 scale = PCI_DVSEC_CXL_PORT_GPF_PHASE_1_TMO_SCALE; 772 break; 773 case 2: 774 offset = PCI_DVSEC_CXL_PORT_GPF_PHASE_2_CONTROL; 775 base = PCI_DVSEC_CXL_PORT_GPF_PHASE_2_TMO_BASE; 776 scale = PCI_DVSEC_CXL_PORT_GPF_PHASE_2_TMO_SCALE; 777 break; 778 default: 779 return -EINVAL; 780 } 781 782 rc = pci_read_config_word(pdev, dvsec + offset, &ctrl); 783 if (rc) 784 return rc; 785 786 if (FIELD_GET(base, ctrl) == GPF_TIMEOUT_BASE_MAX && 787 FIELD_GET(scale, ctrl) == GPF_TIMEOUT_SCALE_MAX) 788 return 0; 789 790 ctrl = FIELD_PREP(base, GPF_TIMEOUT_BASE_MAX); 791 ctrl |= FIELD_PREP(scale, GPF_TIMEOUT_SCALE_MAX); 792 793 rc = pci_write_config_word(pdev, dvsec + offset, ctrl); 794 if (!rc) 795 pci_dbg(pdev, "Port GPF phase %d timeout: %d0 secs\n", 796 phase, GPF_TIMEOUT_BASE_MAX); 797 798 return rc; 799 } 800 801 int cxl_gpf_port_setup(struct cxl_dport *dport) 802 { 803 if (!dport) 804 return -EINVAL; 805 806 if (!dport->gpf_dvsec) { 807 struct pci_dev *pdev; 808 int dvsec; 809 810 dvsec = cxl_gpf_get_dvsec(dport->dport_dev); 811 if (!dvsec) 812 return -EINVAL; 813 814 dport->gpf_dvsec = dvsec; 815 pdev = to_pci_dev(dport->dport_dev); 816 update_gpf_port_dvsec(pdev, dport->gpf_dvsec, 1); 817 update_gpf_port_dvsec(pdev, dport->gpf_dvsec, 2); 818 } 819 820 return 0; 821 } 822 823 struct cxl_walk_context { 824 struct pci_bus *bus; 825 struct cxl_port *port; 826 int type; 827 int error; 828 int count; 829 }; 830 831 static int count_dports(struct pci_dev *pdev, void *data) 832 { 833 struct cxl_walk_context *ctx = data; 834 int type = pci_pcie_type(pdev); 835 836 if (pdev->bus != ctx->bus) 837 return 0; 838 if (!pci_is_pcie(pdev)) 839 return 0; 840 if (type != ctx->type) 841 return 0; 842 843 ctx->count++; 844 return 0; 845 } 846 847 int cxl_port_get_possible_dports(struct cxl_port *port) 848 { 849 struct pci_bus *bus = cxl_port_to_pci_bus(port); 850 struct cxl_walk_context ctx; 851 int type; 852 853 if (!bus) { 854 dev_err(&port->dev, "No PCI bus found for port %s\n", 855 dev_name(&port->dev)); 856 return -ENXIO; 857 } 858 859 if (pci_is_root_bus(bus)) 860 type = PCI_EXP_TYPE_ROOT_PORT; 861 else 862 type = PCI_EXP_TYPE_DOWNSTREAM; 863 864 ctx = (struct cxl_walk_context) { 865 .bus = bus, 866 .type = type, 867 }; 868 pci_walk_bus(bus, count_dports, &ctx); 869 870 return ctx.count; 871 } 872