1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright(c) 2021 Intel Corporation. All rights reserved. */ 3 #include <linux/io-64-nonatomic-lo-hi.h> 4 #include <linux/device.h> 5 #include <linux/delay.h> 6 #include <linux/pci.h> 7 #include <linux/pci-doe.h> 8 #include <cxlpci.h> 9 #include <cxlmem.h> 10 #include <cxl.h> 11 #include "core.h" 12 #include "trace.h" 13 14 /** 15 * DOC: cxl core pci 16 * 17 * Compute Express Link protocols are layered on top of PCIe. CXL core provides 18 * a set of helpers for CXL interactions which occur via PCIe. 19 */ 20 21 static unsigned short media_ready_timeout = 60; 22 module_param(media_ready_timeout, ushort, 0644); 23 MODULE_PARM_DESC(media_ready_timeout, "seconds to wait for media ready"); 24 25 struct cxl_walk_context { 26 struct pci_bus *bus; 27 struct cxl_port *port; 28 int type; 29 int error; 30 int count; 31 }; 32 33 static int match_add_dports(struct pci_dev *pdev, void *data) 34 { 35 struct cxl_walk_context *ctx = data; 36 struct cxl_port *port = ctx->port; 37 int type = pci_pcie_type(pdev); 38 struct cxl_register_map map; 39 struct cxl_dport *dport; 40 u32 lnkcap, port_num; 41 int rc; 42 43 if (pdev->bus != ctx->bus) 44 return 0; 45 if (!pci_is_pcie(pdev)) 46 return 0; 47 if (type != ctx->type) 48 return 0; 49 if (pci_read_config_dword(pdev, pci_pcie_cap(pdev) + PCI_EXP_LNKCAP, 50 &lnkcap)) 51 return 0; 52 53 rc = cxl_find_regblock(pdev, CXL_REGLOC_RBI_COMPONENT, &map); 54 if (rc) 55 dev_dbg(&port->dev, "failed to find component registers\n"); 56 57 port_num = FIELD_GET(PCI_EXP_LNKCAP_PN, lnkcap); 58 dport = devm_cxl_add_dport(port, &pdev->dev, port_num, map.resource); 59 if (IS_ERR(dport)) { 60 ctx->error = PTR_ERR(dport); 61 return PTR_ERR(dport); 62 } 63 ctx->count++; 64 65 return 0; 66 } 67 68 /** 69 * devm_cxl_port_enumerate_dports - enumerate downstream ports of the upstream port 70 * @port: cxl_port whose ->uport is the upstream of dports to be enumerated 71 * 72 * Returns a positive number of dports enumerated or a negative error 73 * code. 74 */ 75 int devm_cxl_port_enumerate_dports(struct cxl_port *port) 76 { 77 struct pci_bus *bus = cxl_port_to_pci_bus(port); 78 struct cxl_walk_context ctx; 79 int type; 80 81 if (!bus) 82 return -ENXIO; 83 84 if (pci_is_root_bus(bus)) 85 type = PCI_EXP_TYPE_ROOT_PORT; 86 else 87 type = PCI_EXP_TYPE_DOWNSTREAM; 88 89 ctx = (struct cxl_walk_context) { 90 .port = port, 91 .bus = bus, 92 .type = type, 93 }; 94 pci_walk_bus(bus, match_add_dports, &ctx); 95 96 if (ctx.count == 0) 97 return -ENODEV; 98 if (ctx.error) 99 return ctx.error; 100 return ctx.count; 101 } 102 EXPORT_SYMBOL_NS_GPL(devm_cxl_port_enumerate_dports, CXL); 103 104 static int cxl_dvsec_mem_range_valid(struct cxl_dev_state *cxlds, int id) 105 { 106 struct pci_dev *pdev = to_pci_dev(cxlds->dev); 107 int d = cxlds->cxl_dvsec; 108 bool valid = false; 109 int rc, i; 110 u32 temp; 111 112 if (id > CXL_DVSEC_RANGE_MAX) 113 return -EINVAL; 114 115 /* Check MEM INFO VALID bit first, give up after 1s */ 116 i = 1; 117 do { 118 rc = pci_read_config_dword(pdev, 119 d + CXL_DVSEC_RANGE_SIZE_LOW(id), 120 &temp); 121 if (rc) 122 return rc; 123 124 valid = FIELD_GET(CXL_DVSEC_MEM_INFO_VALID, temp); 125 if (valid) 126 break; 127 msleep(1000); 128 } while (i--); 129 130 if (!valid) { 131 dev_err(&pdev->dev, 132 "Timeout awaiting memory range %d valid after 1s.\n", 133 id); 134 return -ETIMEDOUT; 135 } 136 137 return 0; 138 } 139 140 static int cxl_dvsec_mem_range_active(struct cxl_dev_state *cxlds, int id) 141 { 142 struct pci_dev *pdev = to_pci_dev(cxlds->dev); 143 int d = cxlds->cxl_dvsec; 144 bool active = false; 145 int rc, i; 146 u32 temp; 147 148 if (id > CXL_DVSEC_RANGE_MAX) 149 return -EINVAL; 150 151 /* Check MEM ACTIVE bit, up to 60s timeout by default */ 152 for (i = media_ready_timeout; i; i--) { 153 rc = pci_read_config_dword( 154 pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(id), &temp); 155 if (rc) 156 return rc; 157 158 active = FIELD_GET(CXL_DVSEC_MEM_ACTIVE, temp); 159 if (active) 160 break; 161 msleep(1000); 162 } 163 164 if (!active) { 165 dev_err(&pdev->dev, 166 "timeout awaiting memory active after %d seconds\n", 167 media_ready_timeout); 168 return -ETIMEDOUT; 169 } 170 171 return 0; 172 } 173 174 /* 175 * Wait up to @media_ready_timeout for the device to report memory 176 * active. 177 */ 178 int cxl_await_media_ready(struct cxl_dev_state *cxlds) 179 { 180 struct pci_dev *pdev = to_pci_dev(cxlds->dev); 181 int d = cxlds->cxl_dvsec; 182 int rc, i, hdm_count; 183 u64 md_status; 184 u16 cap; 185 186 rc = pci_read_config_word(pdev, 187 d + CXL_DVSEC_CAP_OFFSET, &cap); 188 if (rc) 189 return rc; 190 191 hdm_count = FIELD_GET(CXL_DVSEC_HDM_COUNT_MASK, cap); 192 for (i = 0; i < hdm_count; i++) { 193 rc = cxl_dvsec_mem_range_valid(cxlds, i); 194 if (rc) 195 return rc; 196 } 197 198 for (i = 0; i < hdm_count; i++) { 199 rc = cxl_dvsec_mem_range_active(cxlds, i); 200 if (rc) 201 return rc; 202 } 203 204 md_status = readq(cxlds->regs.memdev + CXLMDEV_STATUS_OFFSET); 205 if (!CXLMDEV_READY(md_status)) 206 return -EIO; 207 208 return 0; 209 } 210 EXPORT_SYMBOL_NS_GPL(cxl_await_media_ready, CXL); 211 212 static int wait_for_valid(struct pci_dev *pdev, int d) 213 { 214 u32 val; 215 int rc; 216 217 /* 218 * Memory_Info_Valid: When set, indicates that the CXL Range 1 Size high 219 * and Size Low registers are valid. Must be set within 1 second of 220 * deassertion of reset to CXL device. Likely it is already set by the 221 * time this runs, but otherwise give a 1.5 second timeout in case of 222 * clock skew. 223 */ 224 rc = pci_read_config_dword(pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &val); 225 if (rc) 226 return rc; 227 228 if (val & CXL_DVSEC_MEM_INFO_VALID) 229 return 0; 230 231 msleep(1500); 232 233 rc = pci_read_config_dword(pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &val); 234 if (rc) 235 return rc; 236 237 if (val & CXL_DVSEC_MEM_INFO_VALID) 238 return 0; 239 240 return -ETIMEDOUT; 241 } 242 243 static int cxl_set_mem_enable(struct cxl_dev_state *cxlds, u16 val) 244 { 245 struct pci_dev *pdev = to_pci_dev(cxlds->dev); 246 int d = cxlds->cxl_dvsec; 247 u16 ctrl; 248 int rc; 249 250 rc = pci_read_config_word(pdev, d + CXL_DVSEC_CTRL_OFFSET, &ctrl); 251 if (rc < 0) 252 return rc; 253 254 if ((ctrl & CXL_DVSEC_MEM_ENABLE) == val) 255 return 1; 256 ctrl &= ~CXL_DVSEC_MEM_ENABLE; 257 ctrl |= val; 258 259 rc = pci_write_config_word(pdev, d + CXL_DVSEC_CTRL_OFFSET, ctrl); 260 if (rc < 0) 261 return rc; 262 263 return 0; 264 } 265 266 static void clear_mem_enable(void *cxlds) 267 { 268 cxl_set_mem_enable(cxlds, 0); 269 } 270 271 static int devm_cxl_enable_mem(struct device *host, struct cxl_dev_state *cxlds) 272 { 273 int rc; 274 275 rc = cxl_set_mem_enable(cxlds, CXL_DVSEC_MEM_ENABLE); 276 if (rc < 0) 277 return rc; 278 if (rc > 0) 279 return 0; 280 return devm_add_action_or_reset(host, clear_mem_enable, cxlds); 281 } 282 283 /* require dvsec ranges to be covered by a locked platform window */ 284 static int dvsec_range_allowed(struct device *dev, void *arg) 285 { 286 struct range *dev_range = arg; 287 struct cxl_decoder *cxld; 288 289 if (!is_root_decoder(dev)) 290 return 0; 291 292 cxld = to_cxl_decoder(dev); 293 294 if (!(cxld->flags & CXL_DECODER_F_RAM)) 295 return 0; 296 297 return range_contains(&cxld->hpa_range, dev_range); 298 } 299 300 static void disable_hdm(void *_cxlhdm) 301 { 302 u32 global_ctrl; 303 struct cxl_hdm *cxlhdm = _cxlhdm; 304 void __iomem *hdm = cxlhdm->regs.hdm_decoder; 305 306 global_ctrl = readl(hdm + CXL_HDM_DECODER_CTRL_OFFSET); 307 writel(global_ctrl & ~CXL_HDM_DECODER_ENABLE, 308 hdm + CXL_HDM_DECODER_CTRL_OFFSET); 309 } 310 311 int devm_cxl_enable_hdm(struct cxl_port *port, struct cxl_hdm *cxlhdm) 312 { 313 void __iomem *hdm; 314 u32 global_ctrl; 315 316 /* 317 * If the hdm capability was not mapped there is nothing to enable and 318 * the caller is responsible for what happens next. For example, 319 * emulate a passthrough decoder. 320 */ 321 if (IS_ERR(cxlhdm)) 322 return 0; 323 324 hdm = cxlhdm->regs.hdm_decoder; 325 global_ctrl = readl(hdm + CXL_HDM_DECODER_CTRL_OFFSET); 326 327 /* 328 * If the HDM decoder capability was enabled on entry, skip 329 * registering disable_hdm() since this decode capability may be 330 * owned by platform firmware. 331 */ 332 if (global_ctrl & CXL_HDM_DECODER_ENABLE) 333 return 0; 334 335 writel(global_ctrl | CXL_HDM_DECODER_ENABLE, 336 hdm + CXL_HDM_DECODER_CTRL_OFFSET); 337 338 return devm_add_action_or_reset(&port->dev, disable_hdm, cxlhdm); 339 } 340 EXPORT_SYMBOL_NS_GPL(devm_cxl_enable_hdm, CXL); 341 342 int cxl_dvsec_rr_decode(struct device *dev, int d, 343 struct cxl_endpoint_dvsec_info *info) 344 { 345 struct pci_dev *pdev = to_pci_dev(dev); 346 int hdm_count, rc, i, ranges = 0; 347 u16 cap, ctrl; 348 349 if (!d) { 350 dev_dbg(dev, "No DVSEC Capability\n"); 351 return -ENXIO; 352 } 353 354 rc = pci_read_config_word(pdev, d + CXL_DVSEC_CAP_OFFSET, &cap); 355 if (rc) 356 return rc; 357 358 rc = pci_read_config_word(pdev, d + CXL_DVSEC_CTRL_OFFSET, &ctrl); 359 if (rc) 360 return rc; 361 362 if (!(cap & CXL_DVSEC_MEM_CAPABLE)) { 363 dev_dbg(dev, "Not MEM Capable\n"); 364 return -ENXIO; 365 } 366 367 /* 368 * It is not allowed by spec for MEM.capable to be set and have 0 legacy 369 * HDM decoders (values > 2 are also undefined as of CXL 2.0). As this 370 * driver is for a spec defined class code which must be CXL.mem 371 * capable, there is no point in continuing to enable CXL.mem. 372 */ 373 hdm_count = FIELD_GET(CXL_DVSEC_HDM_COUNT_MASK, cap); 374 if (!hdm_count || hdm_count > 2) 375 return -EINVAL; 376 377 rc = wait_for_valid(pdev, d); 378 if (rc) { 379 dev_dbg(dev, "Failure awaiting MEM_INFO_VALID (%d)\n", rc); 380 return rc; 381 } 382 383 /* 384 * The current DVSEC values are moot if the memory capability is 385 * disabled, and they will remain moot after the HDM Decoder 386 * capability is enabled. 387 */ 388 info->mem_enabled = FIELD_GET(CXL_DVSEC_MEM_ENABLE, ctrl); 389 if (!info->mem_enabled) 390 return 0; 391 392 for (i = 0; i < hdm_count; i++) { 393 u64 base, size; 394 u32 temp; 395 396 rc = pci_read_config_dword( 397 pdev, d + CXL_DVSEC_RANGE_SIZE_HIGH(i), &temp); 398 if (rc) 399 return rc; 400 401 size = (u64)temp << 32; 402 403 rc = pci_read_config_dword( 404 pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(i), &temp); 405 if (rc) 406 return rc; 407 408 size |= temp & CXL_DVSEC_MEM_SIZE_LOW_MASK; 409 if (!size) { 410 info->dvsec_range[i] = (struct range) { 411 .start = 0, 412 .end = CXL_RESOURCE_NONE, 413 }; 414 continue; 415 } 416 417 rc = pci_read_config_dword( 418 pdev, d + CXL_DVSEC_RANGE_BASE_HIGH(i), &temp); 419 if (rc) 420 return rc; 421 422 base = (u64)temp << 32; 423 424 rc = pci_read_config_dword( 425 pdev, d + CXL_DVSEC_RANGE_BASE_LOW(i), &temp); 426 if (rc) 427 return rc; 428 429 base |= temp & CXL_DVSEC_MEM_BASE_LOW_MASK; 430 431 info->dvsec_range[i] = (struct range) { 432 .start = base, 433 .end = base + size - 1 434 }; 435 436 ranges++; 437 } 438 439 info->ranges = ranges; 440 441 return 0; 442 } 443 EXPORT_SYMBOL_NS_GPL(cxl_dvsec_rr_decode, CXL); 444 445 /** 446 * cxl_hdm_decode_init() - Setup HDM decoding for the endpoint 447 * @cxlds: Device state 448 * @cxlhdm: Mapped HDM decoder Capability 449 * @info: Cached DVSEC range registers info 450 * 451 * Try to enable the endpoint's HDM Decoder Capability 452 */ 453 int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm, 454 struct cxl_endpoint_dvsec_info *info) 455 { 456 void __iomem *hdm = cxlhdm->regs.hdm_decoder; 457 struct cxl_port *port = cxlhdm->port; 458 struct device *dev = cxlds->dev; 459 struct cxl_port *root; 460 int i, rc, allowed; 461 u32 global_ctrl = 0; 462 463 if (hdm) 464 global_ctrl = readl(hdm + CXL_HDM_DECODER_CTRL_OFFSET); 465 466 /* 467 * If the HDM Decoder Capability is already enabled then assume 468 * that some other agent like platform firmware set it up. 469 */ 470 if (global_ctrl & CXL_HDM_DECODER_ENABLE || (!hdm && info->mem_enabled)) 471 return devm_cxl_enable_mem(&port->dev, cxlds); 472 else if (!hdm) 473 return -ENODEV; 474 475 root = to_cxl_port(port->dev.parent); 476 while (!is_cxl_root(root) && is_cxl_port(root->dev.parent)) 477 root = to_cxl_port(root->dev.parent); 478 if (!is_cxl_root(root)) { 479 dev_err(dev, "Failed to acquire root port for HDM enable\n"); 480 return -ENODEV; 481 } 482 483 for (i = 0, allowed = 0; info->mem_enabled && i < info->ranges; i++) { 484 struct device *cxld_dev; 485 486 cxld_dev = device_find_child(&root->dev, &info->dvsec_range[i], 487 dvsec_range_allowed); 488 if (!cxld_dev) { 489 dev_dbg(dev, "DVSEC Range%d denied by platform\n", i); 490 continue; 491 } 492 dev_dbg(dev, "DVSEC Range%d allowed by platform\n", i); 493 put_device(cxld_dev); 494 allowed++; 495 } 496 497 if (!allowed) { 498 cxl_set_mem_enable(cxlds, 0); 499 info->mem_enabled = 0; 500 } 501 502 /* 503 * Per CXL 2.0 Section 8.1.3.8.3 and 8.1.3.8.4 DVSEC CXL Range 1 Base 504 * [High,Low] when HDM operation is enabled the range register values 505 * are ignored by the device, but the spec also recommends matching the 506 * DVSEC Range 1,2 to HDM Decoder Range 0,1. So, non-zero info->ranges 507 * are expected even though Linux does not require or maintain that 508 * match. If at least one DVSEC range is enabled and allowed, skip HDM 509 * Decoder Capability Enable. 510 */ 511 if (info->mem_enabled) 512 return 0; 513 514 rc = devm_cxl_enable_hdm(port, cxlhdm); 515 if (rc) 516 return rc; 517 518 return devm_cxl_enable_mem(&port->dev, cxlds); 519 } 520 EXPORT_SYMBOL_NS_GPL(cxl_hdm_decode_init, CXL); 521 522 #define CXL_DOE_TABLE_ACCESS_REQ_CODE 0x000000ff 523 #define CXL_DOE_TABLE_ACCESS_REQ_CODE_READ 0 524 #define CXL_DOE_TABLE_ACCESS_TABLE_TYPE 0x0000ff00 525 #define CXL_DOE_TABLE_ACCESS_TABLE_TYPE_CDATA 0 526 #define CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE 0xffff0000 527 #define CXL_DOE_TABLE_ACCESS_LAST_ENTRY 0xffff 528 #define CXL_DOE_PROTOCOL_TABLE_ACCESS 2 529 530 #define CDAT_DOE_REQ(entry_handle) cpu_to_le32 \ 531 (FIELD_PREP(CXL_DOE_TABLE_ACCESS_REQ_CODE, \ 532 CXL_DOE_TABLE_ACCESS_REQ_CODE_READ) | \ 533 FIELD_PREP(CXL_DOE_TABLE_ACCESS_TABLE_TYPE, \ 534 CXL_DOE_TABLE_ACCESS_TABLE_TYPE_CDATA) | \ 535 FIELD_PREP(CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE, (entry_handle))) 536 537 static int cxl_cdat_get_length(struct device *dev, 538 struct pci_doe_mb *cdat_doe, 539 size_t *length) 540 { 541 __le32 request = CDAT_DOE_REQ(0); 542 __le32 response[2]; 543 int rc; 544 545 rc = pci_doe(cdat_doe, PCI_DVSEC_VENDOR_ID_CXL, 546 CXL_DOE_PROTOCOL_TABLE_ACCESS, 547 &request, sizeof(request), 548 &response, sizeof(response)); 549 if (rc < 0) { 550 dev_err(dev, "DOE failed: %d", rc); 551 return rc; 552 } 553 if (rc < sizeof(response)) 554 return -EIO; 555 556 *length = le32_to_cpu(response[1]); 557 dev_dbg(dev, "CDAT length %zu\n", *length); 558 559 return 0; 560 } 561 562 static int cxl_cdat_read_table(struct device *dev, 563 struct pci_doe_mb *cdat_doe, 564 void *cdat_table, size_t *cdat_length) 565 { 566 size_t length = *cdat_length + sizeof(__le32); 567 __le32 *data = cdat_table; 568 int entry_handle = 0; 569 __le32 saved_dw = 0; 570 571 do { 572 __le32 request = CDAT_DOE_REQ(entry_handle); 573 struct cdat_entry_header *entry; 574 size_t entry_dw; 575 int rc; 576 577 rc = pci_doe(cdat_doe, PCI_DVSEC_VENDOR_ID_CXL, 578 CXL_DOE_PROTOCOL_TABLE_ACCESS, 579 &request, sizeof(request), 580 data, length); 581 if (rc < 0) { 582 dev_err(dev, "DOE failed: %d", rc); 583 return rc; 584 } 585 586 /* 1 DW Table Access Response Header + CDAT entry */ 587 entry = (struct cdat_entry_header *)(data + 1); 588 if ((entry_handle == 0 && 589 rc != sizeof(__le32) + sizeof(struct cdat_header)) || 590 (entry_handle > 0 && 591 (rc < sizeof(__le32) + sizeof(*entry) || 592 rc != sizeof(__le32) + le16_to_cpu(entry->length)))) 593 return -EIO; 594 595 /* Get the CXL table access header entry handle */ 596 entry_handle = FIELD_GET(CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE, 597 le32_to_cpu(data[0])); 598 entry_dw = rc / sizeof(__le32); 599 /* Skip Header */ 600 entry_dw -= 1; 601 /* 602 * Table Access Response Header overwrote the last DW of 603 * previous entry, so restore that DW 604 */ 605 *data = saved_dw; 606 length -= entry_dw * sizeof(__le32); 607 data += entry_dw; 608 saved_dw = *data; 609 } while (entry_handle != CXL_DOE_TABLE_ACCESS_LAST_ENTRY); 610 611 /* Length in CDAT header may exceed concatenation of CDAT entries */ 612 *cdat_length -= length - sizeof(__le32); 613 614 return 0; 615 } 616 617 /** 618 * read_cdat_data - Read the CDAT data on this port 619 * @port: Port to read data from 620 * 621 * This call will sleep waiting for responses from the DOE mailbox. 622 */ 623 void read_cdat_data(struct cxl_port *port) 624 { 625 struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport); 626 struct device *host = cxlmd->dev.parent; 627 struct device *dev = &port->dev; 628 struct pci_doe_mb *cdat_doe; 629 size_t cdat_length; 630 void *cdat_table; 631 int rc; 632 633 if (!dev_is_pci(host)) 634 return; 635 cdat_doe = pci_find_doe_mailbox(to_pci_dev(host), 636 PCI_DVSEC_VENDOR_ID_CXL, 637 CXL_DOE_PROTOCOL_TABLE_ACCESS); 638 if (!cdat_doe) { 639 dev_dbg(dev, "No CDAT mailbox\n"); 640 return; 641 } 642 643 port->cdat_available = true; 644 645 if (cxl_cdat_get_length(dev, cdat_doe, &cdat_length)) { 646 dev_dbg(dev, "No CDAT length\n"); 647 return; 648 } 649 650 cdat_table = devm_kzalloc(dev, cdat_length + sizeof(__le32), 651 GFP_KERNEL); 652 if (!cdat_table) 653 return; 654 655 rc = cxl_cdat_read_table(dev, cdat_doe, cdat_table, &cdat_length); 656 if (rc) { 657 /* Don't leave table data allocated on error */ 658 devm_kfree(dev, cdat_table); 659 dev_err(dev, "CDAT data read error\n"); 660 return; 661 } 662 663 port->cdat.table = cdat_table + sizeof(__le32); 664 port->cdat.length = cdat_length; 665 } 666 EXPORT_SYMBOL_NS_GPL(read_cdat_data, CXL); 667 668 void cxl_cor_error_detected(struct pci_dev *pdev) 669 { 670 struct cxl_dev_state *cxlds = pci_get_drvdata(pdev); 671 void __iomem *addr; 672 u32 status; 673 674 if (!cxlds->regs.ras) 675 return; 676 677 addr = cxlds->regs.ras + CXL_RAS_CORRECTABLE_STATUS_OFFSET; 678 status = readl(addr); 679 if (status & CXL_RAS_CORRECTABLE_STATUS_MASK) { 680 writel(status & CXL_RAS_CORRECTABLE_STATUS_MASK, addr); 681 trace_cxl_aer_correctable_error(cxlds->cxlmd, status); 682 } 683 } 684 EXPORT_SYMBOL_NS_GPL(cxl_cor_error_detected, CXL); 685 686 /* CXL spec rev3.0 8.2.4.16.1 */ 687 static void header_log_copy(struct cxl_dev_state *cxlds, u32 *log) 688 { 689 void __iomem *addr; 690 u32 *log_addr; 691 int i, log_u32_size = CXL_HEADERLOG_SIZE / sizeof(u32); 692 693 addr = cxlds->regs.ras + CXL_RAS_HEADER_LOG_OFFSET; 694 log_addr = log; 695 696 for (i = 0; i < log_u32_size; i++) { 697 *log_addr = readl(addr); 698 log_addr++; 699 addr += sizeof(u32); 700 } 701 } 702 703 /* 704 * Log the state of the RAS status registers and prepare them to log the 705 * next error status. Return 1 if reset needed. 706 */ 707 static bool cxl_report_and_clear(struct cxl_dev_state *cxlds) 708 { 709 u32 hl[CXL_HEADERLOG_SIZE_U32]; 710 void __iomem *addr; 711 u32 status; 712 u32 fe; 713 714 if (!cxlds->regs.ras) 715 return false; 716 717 addr = cxlds->regs.ras + CXL_RAS_UNCORRECTABLE_STATUS_OFFSET; 718 status = readl(addr); 719 if (!(status & CXL_RAS_UNCORRECTABLE_STATUS_MASK)) 720 return false; 721 722 /* If multiple errors, log header points to first error from ctrl reg */ 723 if (hweight32(status) > 1) { 724 void __iomem *rcc_addr = 725 cxlds->regs.ras + CXL_RAS_CAP_CONTROL_OFFSET; 726 727 fe = BIT(FIELD_GET(CXL_RAS_CAP_CONTROL_FE_MASK, 728 readl(rcc_addr))); 729 } else { 730 fe = status; 731 } 732 733 header_log_copy(cxlds, hl); 734 trace_cxl_aer_uncorrectable_error(cxlds->cxlmd, status, fe, hl); 735 writel(status & CXL_RAS_UNCORRECTABLE_STATUS_MASK, addr); 736 737 return true; 738 } 739 740 pci_ers_result_t cxl_error_detected(struct pci_dev *pdev, 741 pci_channel_state_t state) 742 { 743 struct cxl_dev_state *cxlds = pci_get_drvdata(pdev); 744 struct cxl_memdev *cxlmd = cxlds->cxlmd; 745 struct device *dev = &cxlmd->dev; 746 bool ue; 747 748 /* 749 * A frozen channel indicates an impending reset which is fatal to 750 * CXL.mem operation, and will likely crash the system. On the off 751 * chance the situation is recoverable dump the status of the RAS 752 * capability registers and bounce the active state of the memdev. 753 */ 754 ue = cxl_report_and_clear(cxlds); 755 756 switch (state) { 757 case pci_channel_io_normal: 758 if (ue) { 759 device_release_driver(dev); 760 return PCI_ERS_RESULT_NEED_RESET; 761 } 762 return PCI_ERS_RESULT_CAN_RECOVER; 763 case pci_channel_io_frozen: 764 dev_warn(&pdev->dev, 765 "%s: frozen state error detected, disable CXL.mem\n", 766 dev_name(dev)); 767 device_release_driver(dev); 768 return PCI_ERS_RESULT_NEED_RESET; 769 case pci_channel_io_perm_failure: 770 dev_warn(&pdev->dev, 771 "failure state error detected, request disconnect\n"); 772 return PCI_ERS_RESULT_DISCONNECT; 773 } 774 return PCI_ERS_RESULT_NEED_RESET; 775 } 776 EXPORT_SYMBOL_NS_GPL(cxl_error_detected, CXL); 777