1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright(c) 2025 AMD Corporation. All rights reserved. */ 3 4 #include <linux/types.h> 5 #include <linux/aer.h> 6 #include "cxl.h" 7 #include "core.h" 8 #include "cxlmem.h" 9 10 void cxl_dport_map_rch_aer(struct cxl_dport *dport) 11 { 12 resource_size_t aer_phys; 13 struct device *host; 14 u16 aer_cap; 15 16 aer_cap = cxl_rcrb_to_aer(dport->dport_dev, dport->rcrb.base); 17 if (aer_cap) { 18 host = dport->reg_map.host; 19 aer_phys = aer_cap + dport->rcrb.base; 20 dport->regs.dport_aer = 21 devm_cxl_iomap_block(host, aer_phys, 22 sizeof(struct aer_capability_regs)); 23 } 24 } 25 26 void cxl_disable_rch_root_ints(struct cxl_dport *dport) 27 { 28 void __iomem *aer_base = dport->regs.dport_aer; 29 u32 aer_cmd_mask, aer_cmd; 30 31 if (!aer_base) 32 return; 33 34 /* 35 * Disable RCH root port command interrupts. 36 * CXL 3.0 12.2.1.1 - RCH Downstream Port-detected Errors 37 * 38 * This sequence may not be necessary. CXL spec states disabling 39 * the root cmd register's interrupts is required. But, PCI spec 40 * shows these are disabled by default on reset. 41 */ 42 aer_cmd_mask = (PCI_ERR_ROOT_CMD_COR_EN | 43 PCI_ERR_ROOT_CMD_NONFATAL_EN | 44 PCI_ERR_ROOT_CMD_FATAL_EN); 45 aer_cmd = readl(aer_base + PCI_ERR_ROOT_COMMAND); 46 aer_cmd &= ~aer_cmd_mask; 47 writel(aer_cmd, aer_base + PCI_ERR_ROOT_COMMAND); 48 } 49 50 /* 51 * Copy the AER capability registers using 32 bit read accesses. 52 * This is necessary because RCRB AER capability is MMIO mapped. Clear the 53 * status after copying. 54 * 55 * @aer_base: base address of AER capability block in RCRB 56 * @aer_regs: destination for copying AER capability 57 */ 58 static bool cxl_rch_get_aer_info(void __iomem *aer_base, 59 struct aer_capability_regs *aer_regs) 60 { 61 int read_cnt = sizeof(struct aer_capability_regs) / sizeof(u32); 62 u32 *aer_regs_buf = (u32 *)aer_regs; 63 int n; 64 65 if (!aer_base) 66 return false; 67 68 /* Use readl() to guarantee 32-bit accesses */ 69 for (n = 0; n < read_cnt; n++) 70 aer_regs_buf[n] = readl(aer_base + n * sizeof(u32)); 71 72 writel(aer_regs->uncor_status, aer_base + PCI_ERR_UNCOR_STATUS); 73 writel(aer_regs->cor_status, aer_base + PCI_ERR_COR_STATUS); 74 75 return true; 76 } 77 78 /* Get AER severity. Return false if there is no error. */ 79 static bool cxl_rch_get_aer_severity(struct aer_capability_regs *aer_regs, 80 int *severity) 81 { 82 if (aer_regs->uncor_status & ~aer_regs->uncor_mask) { 83 if (aer_regs->uncor_status & PCI_ERR_ROOT_FATAL_RCV) 84 *severity = AER_FATAL; 85 else 86 *severity = AER_NONFATAL; 87 return true; 88 } 89 90 if (aer_regs->cor_status & ~aer_regs->cor_mask) { 91 *severity = AER_CORRECTABLE; 92 return true; 93 } 94 95 return false; 96 } 97 98 void cxl_handle_rdport_errors(struct cxl_dev_state *cxlds) 99 { 100 struct pci_dev *pdev = to_pci_dev(cxlds->dev); 101 struct aer_capability_regs aer_regs; 102 struct cxl_dport *dport; 103 int severity; 104 105 struct cxl_port *port __free(put_cxl_port) = 106 cxl_pci_find_port(pdev, &dport); 107 if (!port) 108 return; 109 110 if (!cxl_rch_get_aer_info(dport->regs.dport_aer, &aer_regs)) 111 return; 112 113 if (!cxl_rch_get_aer_severity(&aer_regs, &severity)) 114 return; 115 116 pci_print_aer(pdev, severity, &aer_regs); 117 if (severity == AER_CORRECTABLE) 118 cxl_handle_cor_ras(&cxlds->cxlmd->dev, dport->regs.ras); 119 else 120 cxl_handle_ras(&cxlds->cxlmd->dev, dport->regs.ras); 121 } 122