xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c (revision e7d759f31ca295d589f7420719c311870bb3166f)
1 /*
2  * Copyright 2021 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "amdgpu_ras.h"
24 #include "amdgpu.h"
25 #include "amdgpu_mca.h"
26 
27 #include "umc/umc_6_7_0_offset.h"
28 #include "umc/umc_6_7_0_sh_mask.h"
29 
30 void amdgpu_mca_query_correctable_error_count(struct amdgpu_device *adev,
31 					      uint64_t mc_status_addr,
32 					      unsigned long *error_count)
33 {
34 	uint64_t mc_status = RREG64_PCIE(mc_status_addr);
35 
36 	if (REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
37 	    REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)
38 		*error_count += 1;
39 }
40 
41 void amdgpu_mca_query_uncorrectable_error_count(struct amdgpu_device *adev,
42 						uint64_t mc_status_addr,
43 						unsigned long *error_count)
44 {
45 	uint64_t mc_status = RREG64_PCIE(mc_status_addr);
46 
47 	if ((REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
48 	    (REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 ||
49 	    REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
50 	    REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
51 	    REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 ||
52 	    REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1))
53 		*error_count += 1;
54 }
55 
56 void amdgpu_mca_reset_error_count(struct amdgpu_device *adev,
57 				  uint64_t mc_status_addr)
58 {
59 	WREG64_PCIE(mc_status_addr, 0x0ULL);
60 }
61 
62 void amdgpu_mca_query_ras_error_count(struct amdgpu_device *adev,
63 				      uint64_t mc_status_addr,
64 				      void *ras_error_status)
65 {
66 	struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
67 
68 	amdgpu_mca_query_correctable_error_count(adev, mc_status_addr, &(err_data->ce_count));
69 	amdgpu_mca_query_uncorrectable_error_count(adev, mc_status_addr, &(err_data->ue_count));
70 
71 	amdgpu_mca_reset_error_count(adev, mc_status_addr);
72 }
73 
74 int amdgpu_mca_mp0_ras_sw_init(struct amdgpu_device *adev)
75 {
76 	int err;
77 	struct amdgpu_mca_ras_block *ras;
78 
79 	if (!adev->mca.mp0.ras)
80 		return 0;
81 
82 	ras = adev->mca.mp0.ras;
83 
84 	err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
85 	if (err) {
86 		dev_err(adev->dev, "Failed to register mca.mp0 ras block!\n");
87 		return err;
88 	}
89 
90 	strcpy(ras->ras_block.ras_comm.name, "mca.mp0");
91 	ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__MCA;
92 	ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
93 	adev->mca.mp0.ras_if = &ras->ras_block.ras_comm;
94 
95 	return 0;
96 }
97 
98 int amdgpu_mca_mp1_ras_sw_init(struct amdgpu_device *adev)
99 {
100 	int err;
101 	struct amdgpu_mca_ras_block *ras;
102 
103 	if (!adev->mca.mp1.ras)
104 		return 0;
105 
106 	ras = adev->mca.mp1.ras;
107 
108 	err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
109 	if (err) {
110 		dev_err(adev->dev, "Failed to register mca.mp1 ras block!\n");
111 		return err;
112 	}
113 
114 	strcpy(ras->ras_block.ras_comm.name, "mca.mp1");
115 	ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__MCA;
116 	ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
117 	adev->mca.mp1.ras_if = &ras->ras_block.ras_comm;
118 
119 	return 0;
120 }
121 
122 int amdgpu_mca_mpio_ras_sw_init(struct amdgpu_device *adev)
123 {
124 	int err;
125 	struct amdgpu_mca_ras_block *ras;
126 
127 	if (!adev->mca.mpio.ras)
128 		return 0;
129 
130 	ras = adev->mca.mpio.ras;
131 
132 	err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
133 	if (err) {
134 		dev_err(adev->dev, "Failed to register mca.mpio ras block!\n");
135 		return err;
136 	}
137 
138 	strcpy(ras->ras_block.ras_comm.name, "mca.mpio");
139 	ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__MCA;
140 	ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
141 	adev->mca.mpio.ras_if = &ras->ras_block.ras_comm;
142 
143 	return 0;
144 }
145 
146 void amdgpu_mca_bank_set_init(struct mca_bank_set *mca_set)
147 {
148 	if (!mca_set)
149 		return;
150 
151 	memset(mca_set, 0, sizeof(*mca_set));
152 	INIT_LIST_HEAD(&mca_set->list);
153 }
154 
155 int amdgpu_mca_bank_set_add_entry(struct mca_bank_set *mca_set, struct mca_bank_entry *entry)
156 {
157 	struct mca_bank_node *node;
158 
159 	if (!entry)
160 		return -EINVAL;
161 
162 	node = kvzalloc(sizeof(*node), GFP_KERNEL);
163 	if (!node)
164 		return -ENOMEM;
165 
166 	memcpy(&node->entry, entry, sizeof(*entry));
167 
168 	INIT_LIST_HEAD(&node->node);
169 	list_add_tail(&node->node, &mca_set->list);
170 
171 	mca_set->nr_entries++;
172 
173 	return 0;
174 }
175 
176 void amdgpu_mca_bank_set_release(struct mca_bank_set *mca_set)
177 {
178 	struct mca_bank_node *node, *tmp;
179 
180 	list_for_each_entry_safe(node, tmp, &mca_set->list, node) {
181 		list_del(&node->node);
182 		kvfree(node);
183 	}
184 }
185 
186 void amdgpu_mca_smu_init_funcs(struct amdgpu_device *adev, const struct amdgpu_mca_smu_funcs *mca_funcs)
187 {
188 	struct amdgpu_mca *mca = &adev->mca;
189 
190 	mca->mca_funcs = mca_funcs;
191 }
192 
193 int amdgpu_mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable)
194 {
195 	const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
196 
197 	if (mca_funcs && mca_funcs->mca_set_debug_mode)
198 		return mca_funcs->mca_set_debug_mode(adev, enable);
199 
200 	return -EOPNOTSUPP;
201 }
202 
203 static void amdgpu_mca_smu_mca_bank_dump(struct amdgpu_device *adev, int idx, struct mca_bank_entry *entry)
204 {
205 	dev_info(adev->dev, "[Hardware error] Accelerator Check Architecture events logged\n");
206 	dev_info(adev->dev, "[Hardware error] aca entry[%02d].STATUS=0x%016llx\n",
207 		 idx, entry->regs[MCA_REG_IDX_STATUS]);
208 	dev_info(adev->dev, "[Hardware error] aca entry[%02d].ADDR=0x%016llx\n",
209 		 idx, entry->regs[MCA_REG_IDX_ADDR]);
210 	dev_info(adev->dev, "[Hardware error] aca entry[%02d].MISC0=0x%016llx\n",
211 		 idx, entry->regs[MCA_REG_IDX_MISC0]);
212 	dev_info(adev->dev, "[Hardware error] aca entry[%02d].IPID=0x%016llx\n",
213 		 idx, entry->regs[MCA_REG_IDX_IPID]);
214 	dev_info(adev->dev, "[Hardware error] aca entry[%02d].SYND=0x%016llx\n",
215 		 idx, entry->regs[MCA_REG_IDX_SYND]);
216 }
217 
218 int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type, struct ras_err_data *err_data)
219 {
220 	struct amdgpu_smuio_mcm_config_info mcm_info;
221 	struct ras_err_addr err_addr = {0};
222 	struct mca_bank_set mca_set;
223 	struct mca_bank_node *node;
224 	struct mca_bank_entry *entry;
225 	uint32_t count;
226 	int ret, i = 0;
227 
228 	amdgpu_mca_bank_set_init(&mca_set);
229 
230 	ret = amdgpu_mca_smu_get_mca_set(adev, blk, type, &mca_set);
231 	if (ret)
232 		goto out_mca_release;
233 
234 	list_for_each_entry(node, &mca_set.list, node) {
235 		entry = &node->entry;
236 
237 		amdgpu_mca_smu_mca_bank_dump(adev, i++, entry);
238 
239 		count = 0;
240 		ret = amdgpu_mca_smu_parse_mca_error_count(adev, blk, type, entry, &count);
241 		if (ret)
242 			goto out_mca_release;
243 
244 		if (!count)
245 			continue;
246 
247 		mcm_info.socket_id = entry->info.socket_id;
248 		mcm_info.die_id = entry->info.aid;
249 
250 		if (blk == AMDGPU_RAS_BLOCK__UMC) {
251 			err_addr.err_status = entry->regs[MCA_REG_IDX_STATUS];
252 			err_addr.err_ipid = entry->regs[MCA_REG_IDX_IPID];
253 			err_addr.err_addr = entry->regs[MCA_REG_IDX_ADDR];
254 		}
255 
256 		if (type == AMDGPU_MCA_ERROR_TYPE_UE)
257 			amdgpu_ras_error_statistic_ue_count(err_data,
258 				&mcm_info, &err_addr, (uint64_t)count);
259 		else
260 			amdgpu_ras_error_statistic_ce_count(err_data,
261 				&mcm_info, &err_addr, (uint64_t)count);
262 	}
263 
264 out_mca_release:
265 	amdgpu_mca_bank_set_release(&mca_set);
266 
267 	return ret;
268 }
269 
270 
271 int amdgpu_mca_smu_get_valid_mca_count(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, uint32_t *count)
272 {
273 	const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
274 
275 	if (!count)
276 		return -EINVAL;
277 
278 	if (mca_funcs && mca_funcs->mca_get_valid_mca_count)
279 		return mca_funcs->mca_get_valid_mca_count(adev, type, count);
280 
281 	return -EOPNOTSUPP;
282 }
283 
284 int amdgpu_mca_smu_get_mca_set_error_count(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
285 					    enum amdgpu_mca_error_type type, uint32_t *total)
286 {
287 	const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
288 	struct mca_bank_set mca_set;
289 	struct mca_bank_node *node;
290 	struct mca_bank_entry *entry;
291 	uint32_t count;
292 	int ret;
293 
294 	if (!total)
295 		return -EINVAL;
296 
297 	if (!mca_funcs)
298 		return -EOPNOTSUPP;
299 
300 	if (!mca_funcs->mca_get_ras_mca_set || !mca_funcs->mca_get_valid_mca_count)
301 		return -EOPNOTSUPP;
302 
303 	amdgpu_mca_bank_set_init(&mca_set);
304 
305 	ret = mca_funcs->mca_get_ras_mca_set(adev, blk, type, &mca_set);
306 	if (ret)
307 		goto err_mca_set_release;
308 
309 	*total = 0;
310 	list_for_each_entry(node, &mca_set.list, node) {
311 		entry = &node->entry;
312 
313 		count = 0;
314 		ret = mca_funcs->mca_parse_mca_error_count(adev, blk, type, entry, &count);
315 		if (ret)
316 			goto err_mca_set_release;
317 
318 		*total += count;
319 	}
320 
321 err_mca_set_release:
322 	amdgpu_mca_bank_set_release(&mca_set);
323 
324 	return ret;
325 }
326 
327 int amdgpu_mca_smu_parse_mca_error_count(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
328 					 enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, uint32_t *count)
329 {
330 	const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
331 	if (!count || !entry)
332 		return -EINVAL;
333 
334 	if (!mca_funcs || !mca_funcs->mca_parse_mca_error_count)
335 		return -EOPNOTSUPP;
336 
337 
338 	return mca_funcs->mca_parse_mca_error_count(adev, blk, type, entry, count);
339 }
340 
341 int amdgpu_mca_smu_get_mca_set(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
342 			       enum amdgpu_mca_error_type type, struct mca_bank_set *mca_set)
343 {
344 	const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
345 
346 	if (!mca_set)
347 		return -EINVAL;
348 
349 	if (!mca_funcs || !mca_funcs->mca_get_ras_mca_set)
350 		return -EOPNOTSUPP;
351 
352 	WARN_ON(!list_empty(&mca_set->list));
353 
354 	return mca_funcs->mca_get_ras_mca_set(adev, blk, type, mca_set);
355 }
356 
357 int amdgpu_mca_smu_get_mca_entry(struct amdgpu_device *adev, enum amdgpu_mca_error_type type,
358 				 int idx, struct mca_bank_entry *entry)
359 {
360 	const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
361 	int count;
362 
363 	if (!mca_funcs || !mca_funcs->mca_get_mca_entry)
364 		return -EOPNOTSUPP;
365 
366 	switch (type) {
367 	case AMDGPU_MCA_ERROR_TYPE_UE:
368 		count = mca_funcs->max_ue_count;
369 		break;
370 	case AMDGPU_MCA_ERROR_TYPE_CE:
371 		count = mca_funcs->max_ce_count;
372 		break;
373 	default:
374 		return -EINVAL;
375 	}
376 
377 	if (idx >= count)
378 		return -EINVAL;
379 
380 	return mca_funcs->mca_get_mca_entry(adev, type, idx, entry);
381 }
382 
383 #if defined(CONFIG_DEBUG_FS)
384 static int amdgpu_mca_smu_debug_mode_set(void *data, u64 val)
385 {
386 	struct amdgpu_device *adev = (struct amdgpu_device *)data;
387 	int ret;
388 
389 	ret = amdgpu_ras_set_mca_debug_mode(adev, val ? true : false);
390 	if (ret)
391 		return ret;
392 
393 	dev_info(adev->dev, "amdgpu set smu mca debug mode %s success\n", val ? "on" : "off");
394 
395 	return 0;
396 }
397 
398 static void mca_dump_entry(struct seq_file *m, struct mca_bank_entry *entry)
399 {
400 	int i, idx = entry->idx;
401 	int reg_idx_array[] = {
402 		MCA_REG_IDX_STATUS,
403 		MCA_REG_IDX_ADDR,
404 		MCA_REG_IDX_MISC0,
405 		MCA_REG_IDX_IPID,
406 		MCA_REG_IDX_SYND,
407 	};
408 
409 	seq_printf(m, "mca entry[%d].type: %s\n", idx, entry->type == AMDGPU_MCA_ERROR_TYPE_UE ? "UE" : "CE");
410 	seq_printf(m, "mca entry[%d].ip: %d\n", idx, entry->ip);
411 	seq_printf(m, "mca entry[%d].info: socketid:%d aid:%d hwid:0x%03x mcatype:0x%04x\n",
412 		   idx, entry->info.socket_id, entry->info.aid, entry->info.hwid, entry->info.mcatype);
413 
414 	for (i = 0; i < ARRAY_SIZE(reg_idx_array); i++)
415 		seq_printf(m, "mca entry[%d].regs[%d]: 0x%016llx\n", idx, reg_idx_array[i], entry->regs[reg_idx_array[i]]);
416 }
417 
418 static int mca_dump_show(struct seq_file *m, enum amdgpu_mca_error_type type)
419 {
420 	struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
421 	struct mca_bank_entry *entry;
422 	uint32_t count = 0;
423 	int i, ret;
424 
425 	ret = amdgpu_mca_smu_get_valid_mca_count(adev, type, &count);
426 	if (ret)
427 		return ret;
428 
429 	seq_printf(m, "amdgpu smu %s valid mca count: %d\n",
430 		   type == AMDGPU_MCA_ERROR_TYPE_UE ? "UE" : "CE", count);
431 
432 	if (!count)
433 		return 0;
434 
435 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
436 	if (!entry)
437 		return -ENOMEM;
438 
439 	for (i = 0; i < count; i++) {
440 		memset(entry, 0, sizeof(*entry));
441 
442 		ret = amdgpu_mca_smu_get_mca_entry(adev, type, i, entry);
443 		if (ret)
444 			goto err_free_entry;
445 
446 		mca_dump_entry(m, entry);
447 	}
448 
449 err_free_entry:
450 	kfree(entry);
451 
452 	return ret;
453 }
454 
455 static int mca_dump_ce_show(struct seq_file *m, void *unused)
456 {
457 	return mca_dump_show(m, AMDGPU_MCA_ERROR_TYPE_CE);
458 }
459 
460 static int mca_dump_ce_open(struct inode *inode, struct file *file)
461 {
462 	return single_open(file, mca_dump_ce_show, inode->i_private);
463 }
464 
465 static const struct file_operations mca_ce_dump_debug_fops = {
466 	.owner = THIS_MODULE,
467 	.open = mca_dump_ce_open,
468 	.read = seq_read,
469 	.llseek = seq_lseek,
470 	.release = single_release,
471 };
472 
473 static int mca_dump_ue_show(struct seq_file *m, void *unused)
474 {
475 	return mca_dump_show(m, AMDGPU_MCA_ERROR_TYPE_UE);
476 }
477 
478 static int mca_dump_ue_open(struct inode *inode, struct file *file)
479 {
480 	return single_open(file, mca_dump_ue_show, inode->i_private);
481 }
482 
483 static const struct file_operations mca_ue_dump_debug_fops = {
484 	.owner = THIS_MODULE,
485 	.open = mca_dump_ue_open,
486 	.read = seq_read,
487 	.llseek = seq_lseek,
488 	.release = single_release,
489 };
490 
491 DEFINE_DEBUGFS_ATTRIBUTE(mca_debug_mode_fops, NULL, amdgpu_mca_smu_debug_mode_set, "%llu\n");
492 #endif
493 
494 void amdgpu_mca_smu_debugfs_init(struct amdgpu_device *adev, struct dentry *root)
495 {
496 #if defined(CONFIG_DEBUG_FS)
497 	if (!root || amdgpu_ip_version(adev, MP1_HWIP, 0) != IP_VERSION(13, 0, 6))
498 		return;
499 
500 	debugfs_create_file("mca_debug_mode", 0200, root, adev, &mca_debug_mode_fops);
501 	debugfs_create_file("mca_ue_dump", 0400, root, adev, &mca_ue_dump_debug_fops);
502 	debugfs_create_file("mca_ce_dump", 0400, root, adev, &mca_ce_dump_debug_fops);
503 #endif
504 }
505 
506