1 /* 2 * Copyright 2021 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include "amdgpu_ras.h" 24 #include "amdgpu.h" 25 #include "amdgpu_mca.h" 26 27 #include "umc/umc_6_7_0_offset.h" 28 #include "umc/umc_6_7_0_sh_mask.h" 29 30 void amdgpu_mca_query_correctable_error_count(struct amdgpu_device *adev, 31 uint64_t mc_status_addr, 32 unsigned long *error_count) 33 { 34 uint64_t mc_status = RREG64_PCIE(mc_status_addr); 35 36 if (REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && 37 REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1) 38 *error_count += 1; 39 } 40 41 void amdgpu_mca_query_uncorrectable_error_count(struct amdgpu_device *adev, 42 uint64_t mc_status_addr, 43 unsigned long *error_count) 44 { 45 uint64_t mc_status = RREG64_PCIE(mc_status_addr); 46 47 if ((REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) && 48 (REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 || 49 REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 || 50 REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 || 51 REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 || 52 REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1)) 53 *error_count += 1; 54 } 55 56 void amdgpu_mca_reset_error_count(struct amdgpu_device *adev, 57 uint64_t mc_status_addr) 58 { 59 WREG64_PCIE(mc_status_addr, 0x0ULL); 60 } 61 62 void amdgpu_mca_query_ras_error_count(struct amdgpu_device *adev, 63 uint64_t mc_status_addr, 64 void *ras_error_status) 65 { 66 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; 67 68 amdgpu_mca_query_correctable_error_count(adev, mc_status_addr, &(err_data->ce_count)); 69 amdgpu_mca_query_uncorrectable_error_count(adev, mc_status_addr, &(err_data->ue_count)); 70 71 amdgpu_mca_reset_error_count(adev, mc_status_addr); 72 } 73 74 int amdgpu_mca_mp0_ras_sw_init(struct amdgpu_device *adev) 75 { 76 int err; 77 struct amdgpu_mca_ras_block *ras; 78 79 if (!adev->mca.mp0.ras) 80 return 0; 81 82 ras = adev->mca.mp0.ras; 83 84 err = amdgpu_ras_register_ras_block(adev, &ras->ras_block); 85 if (err) { 86 dev_err(adev->dev, "Failed to register mca.mp0 ras block!\n"); 87 return err; 88 } 89 90 strcpy(ras->ras_block.ras_comm.name, "mca.mp0"); 91 ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__MCA; 92 ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; 93 adev->mca.mp0.ras_if = &ras->ras_block.ras_comm; 94 95 return 0; 96 } 97 98 int amdgpu_mca_mp1_ras_sw_init(struct amdgpu_device *adev) 99 { 100 int err; 101 struct amdgpu_mca_ras_block *ras; 102 103 if (!adev->mca.mp1.ras) 104 return 0; 105 106 ras = adev->mca.mp1.ras; 107 108 err = amdgpu_ras_register_ras_block(adev, &ras->ras_block); 109 if (err) { 110 dev_err(adev->dev, "Failed to register mca.mp1 ras block!\n"); 111 return err; 112 } 113 114 strcpy(ras->ras_block.ras_comm.name, "mca.mp1"); 115 ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__MCA; 116 ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; 117 adev->mca.mp1.ras_if = &ras->ras_block.ras_comm; 118 119 return 0; 120 } 121 122 int amdgpu_mca_mpio_ras_sw_init(struct amdgpu_device *adev) 123 { 124 int err; 125 struct amdgpu_mca_ras_block *ras; 126 127 if (!adev->mca.mpio.ras) 128 return 0; 129 130 ras = adev->mca.mpio.ras; 131 132 err = amdgpu_ras_register_ras_block(adev, &ras->ras_block); 133 if (err) { 134 dev_err(adev->dev, "Failed to register mca.mpio ras block!\n"); 135 return err; 136 } 137 138 strcpy(ras->ras_block.ras_comm.name, "mca.mpio"); 139 ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__MCA; 140 ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; 141 adev->mca.mpio.ras_if = &ras->ras_block.ras_comm; 142 143 return 0; 144 } 145 146 void amdgpu_mca_smu_init_funcs(struct amdgpu_device *adev, const struct amdgpu_mca_smu_funcs *mca_funcs) 147 { 148 struct amdgpu_mca *mca = &adev->mca; 149 150 mca->mca_funcs = mca_funcs; 151 } 152 153 int amdgpu_mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable) 154 { 155 const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs; 156 157 if (mca_funcs && mca_funcs->mca_set_debug_mode) 158 return mca_funcs->mca_set_debug_mode(adev, enable); 159 160 return -EOPNOTSUPP; 161 } 162 163 int amdgpu_mca_smu_get_valid_mca_count(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, uint32_t *count) 164 { 165 const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs; 166 167 if (!count) 168 return -EINVAL; 169 170 if (mca_funcs && mca_funcs->mca_get_valid_mca_count) 171 return mca_funcs->mca_get_valid_mca_count(adev, type, count); 172 173 return -EOPNOTSUPP; 174 } 175 176 int amdgpu_mca_smu_get_error_count(struct amdgpu_device *adev, enum amdgpu_ras_block blk, 177 enum amdgpu_mca_error_type type, uint32_t *count) 178 { 179 const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs; 180 if (!count) 181 return -EINVAL; 182 183 if (mca_funcs && mca_funcs->mca_get_error_count) 184 return mca_funcs->mca_get_error_count(adev, blk, type, count); 185 186 return -EOPNOTSUPP; 187 } 188 189 int amdgpu_mca_smu_get_mca_entry(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, 190 int idx, struct mca_bank_entry *entry) 191 { 192 const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs; 193 int count; 194 195 switch (type) { 196 case AMDGPU_MCA_ERROR_TYPE_UE: 197 count = mca_funcs->max_ue_count; 198 break; 199 case AMDGPU_MCA_ERROR_TYPE_CE: 200 count = mca_funcs->max_ce_count; 201 break; 202 default: 203 return -EINVAL; 204 } 205 206 if (idx >= count) 207 return -EINVAL; 208 209 if (mca_funcs && mca_funcs->mca_get_mca_entry) 210 return mca_funcs->mca_get_mca_entry(adev, type, idx, entry); 211 212 return -EOPNOTSUPP; 213 } 214 215 #if defined(CONFIG_DEBUG_FS) 216 static int amdgpu_mca_smu_debug_mode_set(void *data, u64 val) 217 { 218 struct amdgpu_device *adev = (struct amdgpu_device *)data; 219 int ret; 220 221 ret = amdgpu_mca_smu_set_debug_mode(adev, val ? true : false); 222 if (ret) 223 return ret; 224 225 dev_info(adev->dev, "amdgpu set smu mca debug mode %s success\n", val ? "on" : "off"); 226 227 return 0; 228 } 229 230 static void mca_dump_entry(struct seq_file *m, struct mca_bank_entry *entry) 231 { 232 int i, idx = entry->idx; 233 234 seq_printf(m, "mca entry[%d].type: %s\n", idx, entry->type == AMDGPU_MCA_ERROR_TYPE_UE ? "UE" : "CE"); 235 seq_printf(m, "mca entry[%d].ip: %d\n", idx, entry->ip); 236 seq_printf(m, "mca entry[%d].info: socketid:%d aid:%d hwid:0x%03x mcatype:0x%04x\n", 237 idx, entry->info.socket_id, entry->info.aid, entry->info.hwid, entry->info.mcatype); 238 239 for (i = 0; i < ARRAY_SIZE(entry->regs); i++) 240 seq_printf(m, "mca entry[%d].regs[%d]: 0x%016llx\n", idx, i, entry->regs[i]); 241 } 242 243 static int mca_dump_show(struct seq_file *m, enum amdgpu_mca_error_type type) 244 { 245 struct amdgpu_device *adev = (struct amdgpu_device *)m->private; 246 struct mca_bank_entry *entry; 247 uint32_t count = 0; 248 int i, ret; 249 250 ret = amdgpu_mca_smu_get_valid_mca_count(adev, type, &count); 251 if (ret) 252 return ret; 253 254 seq_printf(m, "amdgpu smu %s valid mca count: %d\n", 255 type == AMDGPU_MCA_ERROR_TYPE_UE ? "UE" : "CE", count); 256 257 if (!count) 258 return 0; 259 260 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 261 if (!entry) 262 return -ENOMEM; 263 264 for (i = 0; i < count; i++) { 265 memset(entry, 0, sizeof(*entry)); 266 267 ret = amdgpu_mca_smu_get_mca_entry(adev, type, i, entry); 268 if (ret) 269 goto err_free_entry; 270 271 mca_dump_entry(m, entry); 272 } 273 274 err_free_entry: 275 kfree(entry); 276 277 return ret; 278 } 279 280 static int mca_dump_ce_show(struct seq_file *m, void *unused) 281 { 282 return mca_dump_show(m, AMDGPU_MCA_ERROR_TYPE_CE); 283 } 284 285 static int mca_dump_ce_open(struct inode *inode, struct file *file) 286 { 287 return single_open(file, mca_dump_ce_show, inode->i_private); 288 } 289 290 static const struct file_operations mca_ce_dump_debug_fops = { 291 .owner = THIS_MODULE, 292 .open = mca_dump_ce_open, 293 .read = seq_read, 294 .llseek = seq_lseek, 295 .release = single_release, 296 }; 297 298 static int mca_dump_ue_show(struct seq_file *m, void *unused) 299 { 300 return mca_dump_show(m, AMDGPU_MCA_ERROR_TYPE_UE); 301 } 302 303 static int mca_dump_ue_open(struct inode *inode, struct file *file) 304 { 305 return single_open(file, mca_dump_ue_show, inode->i_private); 306 } 307 308 static const struct file_operations mca_ue_dump_debug_fops = { 309 .owner = THIS_MODULE, 310 .open = mca_dump_ue_open, 311 .read = seq_read, 312 .llseek = seq_lseek, 313 .release = single_release, 314 }; 315 316 DEFINE_DEBUGFS_ATTRIBUTE(mca_debug_mode_fops, NULL, amdgpu_mca_smu_debug_mode_set, "%llu\n"); 317 #endif 318 319 void amdgpu_mca_smu_debugfs_init(struct amdgpu_device *adev, struct dentry *root) 320 { 321 #if defined(CONFIG_DEBUG_FS) 322 if (!root || adev->ip_versions[MP1_HWIP][0] != IP_VERSION(13, 0, 6)) 323 return; 324 325 debugfs_create_file("mca_debug_mode", 0200, root, adev, &mca_debug_mode_fops); 326 debugfs_create_file("mca_ue_dump", 0400, root, adev, &mca_ue_dump_debug_fops); 327 debugfs_create_file("mca_ce_dump", 0400, root, adev, &mca_ce_dump_debug_fops); 328 #endif 329 } 330 331