pci.c (2f6e9c305127f8dea4e2d697b4bdd33e126ccbf7) | pci.c (2905cb5236cba63a5dc8a83752dcc31f3cc819f9) |
---|---|
1// SPDX-License-Identifier: GPL-2.0-only 2/* Copyright(c) 2020 Intel Corporation. All rights reserved. */ 3#include <linux/io-64-nonatomic-lo-hi.h> 4#include <linux/moduleparam.h> 5#include <linux/module.h> 6#include <linux/delay.h> 7#include <linux/sizes.h> 8#include <linux/mutex.h> 9#include <linux/list.h> 10#include <linux/pci.h> 11#include <linux/pci-doe.h> | 1// SPDX-License-Identifier: GPL-2.0-only 2/* Copyright(c) 2020 Intel Corporation. All rights reserved. */ 3#include <linux/io-64-nonatomic-lo-hi.h> 4#include <linux/moduleparam.h> 5#include <linux/module.h> 6#include <linux/delay.h> 7#include <linux/sizes.h> 8#include <linux/mutex.h> 9#include <linux/list.h> 10#include <linux/pci.h> 11#include <linux/pci-doe.h> |
12#include <linux/aer.h> |
|
12#include <linux/io.h> 13#include "cxlmem.h" 14#include "cxlpci.h" 15#include "cxl.h" 16#define CREATE_TRACE_POINTS 17#include <trace/events/cxl.h> 18 19/** --- 374 unchanged lines hidden (view full) --- 394 off); 395 continue; 396 } 397 398 dev_dbg(dev, "Created DOE mailbox @%x\n", off); 399 } 400} 401 | 13#include <linux/io.h> 14#include "cxlmem.h" 15#include "cxlpci.h" 16#include "cxl.h" 17#define CREATE_TRACE_POINTS 18#include <trace/events/cxl.h> 19 20/** --- 374 unchanged lines hidden (view full) --- 395 off); 396 continue; 397 } 398 399 dev_dbg(dev, "Created DOE mailbox @%x\n", off); 400 } 401} 402 |
403static void disable_aer(void *pdev) 404{ 405 pci_disable_pcie_error_reporting(pdev); 406} 407 |
|
402static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 403{ 404 struct cxl_register_map map; 405 struct cxl_memdev *cxlmd; 406 struct cxl_dev_state *cxlds; 407 int rc; 408 409 /* --- 5 unchanged lines hidden (view full) --- 415 416 rc = pcim_enable_device(pdev); 417 if (rc) 418 return rc; 419 420 cxlds = cxl_dev_state_create(&pdev->dev); 421 if (IS_ERR(cxlds)) 422 return PTR_ERR(cxlds); | 408static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 409{ 410 struct cxl_register_map map; 411 struct cxl_memdev *cxlmd; 412 struct cxl_dev_state *cxlds; 413 int rc; 414 415 /* --- 5 unchanged lines hidden (view full) --- 421 422 rc = pcim_enable_device(pdev); 423 if (rc) 424 return rc; 425 426 cxlds = cxl_dev_state_create(&pdev->dev); 427 if (IS_ERR(cxlds)) 428 return PTR_ERR(cxlds); |
429 pci_set_drvdata(pdev, cxlds); |
|
423 424 cxlds->serial = pci_get_dsn(pdev); 425 cxlds->cxl_dvsec = pci_find_dvsec_capability( 426 pdev, PCI_DVSEC_VENDOR_ID_CXL, CXL_DVSEC_PCIE_DEVICE); 427 if (!cxlds->cxl_dvsec) 428 dev_warn(&pdev->dev, 429 "Device DVSEC not present, skip CXL.mem init\n"); 430 --- 38 unchanged lines hidden (view full) --- 469 rc = cxl_mem_create_range_info(cxlds); 470 if (rc) 471 return rc; 472 473 cxlmd = devm_cxl_add_memdev(cxlds); 474 if (IS_ERR(cxlmd)) 475 return PTR_ERR(cxlmd); 476 | 430 431 cxlds->serial = pci_get_dsn(pdev); 432 cxlds->cxl_dvsec = pci_find_dvsec_capability( 433 pdev, PCI_DVSEC_VENDOR_ID_CXL, CXL_DVSEC_PCIE_DEVICE); 434 if (!cxlds->cxl_dvsec) 435 dev_warn(&pdev->dev, 436 "Device DVSEC not present, skip CXL.mem init\n"); 437 --- 38 unchanged lines hidden (view full) --- 476 rc = cxl_mem_create_range_info(cxlds); 477 if (rc) 478 return rc; 479 480 cxlmd = devm_cxl_add_memdev(cxlds); 481 if (IS_ERR(cxlmd)) 482 return PTR_ERR(cxlmd); 483 |
484 if (cxlds->regs.ras) { 485 pci_enable_pcie_error_reporting(pdev); 486 rc = devm_add_action_or_reset(&pdev->dev, disable_aer, pdev); 487 if (rc) 488 return rc; 489 } 490 pci_save_state(pdev); 491 |
|
477 if (resource_size(&cxlds->pmem_res) && IS_ENABLED(CONFIG_CXL_PMEM)) 478 rc = devm_cxl_add_nvdimm(&pdev->dev, cxlmd); 479 480 return rc; 481} 482 483static const struct pci_device_id cxl_mem_pci_tbl[] = { 484 /* PCI class code for CXL.mem Type-3 Devices */ 485 { PCI_DEVICE_CLASS((PCI_CLASS_MEMORY_CXL << 8 | CXL_MEMORY_PROGIF), ~0)}, 486 { /* terminate list */ }, 487}; 488MODULE_DEVICE_TABLE(pci, cxl_mem_pci_tbl); 489 | 492 if (resource_size(&cxlds->pmem_res) && IS_ENABLED(CONFIG_CXL_PMEM)) 493 rc = devm_cxl_add_nvdimm(&pdev->dev, cxlmd); 494 495 return rc; 496} 497 498static const struct pci_device_id cxl_mem_pci_tbl[] = { 499 /* PCI class code for CXL.mem Type-3 Devices */ 500 { PCI_DEVICE_CLASS((PCI_CLASS_MEMORY_CXL << 8 | CXL_MEMORY_PROGIF), ~0)}, 501 { /* terminate list */ }, 502}; 503MODULE_DEVICE_TABLE(pci, cxl_mem_pci_tbl); 504 |
505/* CXL spec rev3.0 8.2.4.16.1 */ 506static void header_log_copy(struct cxl_dev_state *cxlds, u32 *log) 507{ 508 void __iomem *addr; 509 u32 *log_addr; 510 int i, log_u32_size = CXL_HEADERLOG_SIZE / sizeof(u32); 511 512 addr = cxlds->regs.ras + CXL_RAS_HEADER_LOG_OFFSET; 513 log_addr = log; 514 515 for (i = 0; i < log_u32_size; i++) { 516 *log_addr = readl(addr); 517 log_addr++; 518 addr += sizeof(u32); 519 } 520} 521 522/* 523 * Log the state of the RAS status registers and prepare them to log the 524 * next error status. Return 1 if reset needed. 525 */ 526static bool cxl_report_and_clear(struct cxl_dev_state *cxlds) 527{ 528 struct cxl_memdev *cxlmd = cxlds->cxlmd; 529 struct device *dev = &cxlmd->dev; 530 u32 hl[CXL_HEADERLOG_SIZE_U32]; 531 void __iomem *addr; 532 u32 status; 533 u32 fe; 534 535 if (!cxlds->regs.ras) 536 return false; 537 538 addr = cxlds->regs.ras + CXL_RAS_UNCORRECTABLE_STATUS_OFFSET; 539 status = le32_to_cpu((__force __le32)readl(addr)); 540 if (!(status & CXL_RAS_UNCORRECTABLE_STATUS_MASK)) 541 return false; 542 543 /* If multiple errors, log header points to first error from ctrl reg */ 544 if (hweight32(status) > 1) { 545 addr = cxlds->regs.ras + CXL_RAS_CAP_CONTROL_OFFSET; 546 fe = BIT(le32_to_cpu((__force __le32)readl(addr)) & 547 CXL_RAS_CAP_CONTROL_FE_MASK); 548 } else { 549 fe = status; 550 } 551 552 header_log_copy(cxlds, hl); 553 trace_cxl_aer_uncorrectable_error(dev_name(dev), status, fe, hl); 554 writel(status & CXL_RAS_UNCORRECTABLE_STATUS_MASK, addr); 555 556 return true; 557} 558 559static pci_ers_result_t cxl_error_detected(struct pci_dev *pdev, 560 pci_channel_state_t state) 561{ 562 struct cxl_dev_state *cxlds = pci_get_drvdata(pdev); 563 struct cxl_memdev *cxlmd = cxlds->cxlmd; 564 struct device *dev = &cxlmd->dev; 565 bool ue; 566 567 /* 568 * A frozen channel indicates an impending reset which is fatal to 569 * CXL.mem operation, and will likely crash the system. On the off 570 * chance the situation is recoverable dump the status of the RAS 571 * capability registers and bounce the active state of the memdev. 572 */ 573 ue = cxl_report_and_clear(cxlds); 574 575 switch (state) { 576 case pci_channel_io_normal: 577 if (ue) { 578 device_release_driver(dev); 579 return PCI_ERS_RESULT_NEED_RESET; 580 } 581 return PCI_ERS_RESULT_CAN_RECOVER; 582 case pci_channel_io_frozen: 583 dev_warn(&pdev->dev, 584 "%s: frozen state error detected, disable CXL.mem\n", 585 dev_name(dev)); 586 device_release_driver(dev); 587 return PCI_ERS_RESULT_NEED_RESET; 588 case pci_channel_io_perm_failure: 589 dev_warn(&pdev->dev, 590 "failure state error detected, request disconnect\n"); 591 return PCI_ERS_RESULT_DISCONNECT; 592 } 593 return PCI_ERS_RESULT_NEED_RESET; 594} 595 596static pci_ers_result_t cxl_slot_reset(struct pci_dev *pdev) 597{ 598 struct cxl_dev_state *cxlds = pci_get_drvdata(pdev); 599 struct cxl_memdev *cxlmd = cxlds->cxlmd; 600 struct device *dev = &cxlmd->dev; 601 602 dev_info(&pdev->dev, "%s: restart CXL.mem after slot reset\n", 603 dev_name(dev)); 604 pci_restore_state(pdev); 605 if (device_attach(dev) <= 0) 606 return PCI_ERS_RESULT_DISCONNECT; 607 return PCI_ERS_RESULT_RECOVERED; 608} 609 610static void cxl_error_resume(struct pci_dev *pdev) 611{ 612 struct cxl_dev_state *cxlds = pci_get_drvdata(pdev); 613 struct cxl_memdev *cxlmd = cxlds->cxlmd; 614 struct device *dev = &cxlmd->dev; 615 616 dev_info(&pdev->dev, "%s: error resume %s\n", dev_name(dev), 617 dev->driver ? "successful" : "failed"); 618} 619 620static const struct pci_error_handlers cxl_error_handlers = { 621 .error_detected = cxl_error_detected, 622 .slot_reset = cxl_slot_reset, 623 .resume = cxl_error_resume, 624}; 625 |
|
490static struct pci_driver cxl_pci_driver = { 491 .name = KBUILD_MODNAME, 492 .id_table = cxl_mem_pci_tbl, 493 .probe = cxl_pci_probe, | 626static struct pci_driver cxl_pci_driver = { 627 .name = KBUILD_MODNAME, 628 .id_table = cxl_mem_pci_tbl, 629 .probe = cxl_pci_probe, |
630 .err_handler = &cxl_error_handlers, |
|
494 .driver = { 495 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 496 }, 497}; 498 499MODULE_LICENSE("GPL v2"); 500module_pci_driver(cxl_pci_driver); 501MODULE_IMPORT_NS(CXL); | 631 .driver = { 632 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 633 }, 634}; 635 636MODULE_LICENSE("GPL v2"); 637module_pci_driver(cxl_pci_driver); 638MODULE_IMPORT_NS(CXL); |