xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c (revision 6ac05ae5fff84866a56358740681869c3bc62af3)
1 /*
2  * Copyright 2021 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "amdgpu_ras.h"
24 #include "amdgpu.h"
25 #include "amdgpu_mca.h"
26 
27 #include "umc/umc_6_7_0_offset.h"
28 #include "umc/umc_6_7_0_sh_mask.h"
29 
30 static bool amdgpu_mca_is_deferred_error(struct amdgpu_device *adev,
31 					uint64_t mc_status)
32 {
33 	if (adev->umc.ras->check_ecc_err_status)
34 		return adev->umc.ras->check_ecc_err_status(adev,
35 				AMDGPU_MCA_ERROR_TYPE_DE, &mc_status);
36 
37 	return false;
38 }
39 
40 void amdgpu_mca_query_correctable_error_count(struct amdgpu_device *adev,
41 					      uint64_t mc_status_addr,
42 					      unsigned long *error_count)
43 {
44 	uint64_t mc_status = RREG64_PCIE(mc_status_addr);
45 
46 	if (REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
47 	    REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)
48 		*error_count += 1;
49 }
50 
51 void amdgpu_mca_query_uncorrectable_error_count(struct amdgpu_device *adev,
52 						uint64_t mc_status_addr,
53 						unsigned long *error_count)
54 {
55 	uint64_t mc_status = RREG64_PCIE(mc_status_addr);
56 
57 	if ((REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
58 	    (REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 ||
59 	    REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
60 	    REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
61 	    REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 ||
62 	    REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1))
63 		*error_count += 1;
64 }
65 
66 void amdgpu_mca_reset_error_count(struct amdgpu_device *adev,
67 				  uint64_t mc_status_addr)
68 {
69 	WREG64_PCIE(mc_status_addr, 0x0ULL);
70 }
71 
72 void amdgpu_mca_query_ras_error_count(struct amdgpu_device *adev,
73 				      uint64_t mc_status_addr,
74 				      void *ras_error_status)
75 {
76 	struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
77 
78 	amdgpu_mca_query_correctable_error_count(adev, mc_status_addr, &(err_data->ce_count));
79 	amdgpu_mca_query_uncorrectable_error_count(adev, mc_status_addr, &(err_data->ue_count));
80 
81 	amdgpu_mca_reset_error_count(adev, mc_status_addr);
82 }
83 
84 int amdgpu_mca_mp0_ras_sw_init(struct amdgpu_device *adev)
85 {
86 	int err;
87 	struct amdgpu_mca_ras_block *ras;
88 
89 	if (!adev->mca.mp0.ras)
90 		return 0;
91 
92 	ras = adev->mca.mp0.ras;
93 
94 	err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
95 	if (err) {
96 		dev_err(adev->dev, "Failed to register mca.mp0 ras block!\n");
97 		return err;
98 	}
99 
100 	strcpy(ras->ras_block.ras_comm.name, "mca.mp0");
101 	ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__MCA;
102 	ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
103 	adev->mca.mp0.ras_if = &ras->ras_block.ras_comm;
104 
105 	return 0;
106 }
107 
108 int amdgpu_mca_mp1_ras_sw_init(struct amdgpu_device *adev)
109 {
110 	int err;
111 	struct amdgpu_mca_ras_block *ras;
112 
113 	if (!adev->mca.mp1.ras)
114 		return 0;
115 
116 	ras = adev->mca.mp1.ras;
117 
118 	err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
119 	if (err) {
120 		dev_err(adev->dev, "Failed to register mca.mp1 ras block!\n");
121 		return err;
122 	}
123 
124 	strcpy(ras->ras_block.ras_comm.name, "mca.mp1");
125 	ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__MCA;
126 	ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
127 	adev->mca.mp1.ras_if = &ras->ras_block.ras_comm;
128 
129 	return 0;
130 }
131 
132 int amdgpu_mca_mpio_ras_sw_init(struct amdgpu_device *adev)
133 {
134 	int err;
135 	struct amdgpu_mca_ras_block *ras;
136 
137 	if (!adev->mca.mpio.ras)
138 		return 0;
139 
140 	ras = adev->mca.mpio.ras;
141 
142 	err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
143 	if (err) {
144 		dev_err(adev->dev, "Failed to register mca.mpio ras block!\n");
145 		return err;
146 	}
147 
148 	strcpy(ras->ras_block.ras_comm.name, "mca.mpio");
149 	ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__MCA;
150 	ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
151 	adev->mca.mpio.ras_if = &ras->ras_block.ras_comm;
152 
153 	return 0;
154 }
155 
156 static void amdgpu_mca_bank_set_init(struct mca_bank_set *mca_set)
157 {
158 	if (!mca_set)
159 		return;
160 
161 	memset(mca_set, 0, sizeof(*mca_set));
162 	INIT_LIST_HEAD(&mca_set->list);
163 }
164 
165 static int amdgpu_mca_bank_set_add_entry(struct mca_bank_set *mca_set, struct mca_bank_entry *entry)
166 {
167 	struct mca_bank_node *node;
168 
169 	if (!entry)
170 		return -EINVAL;
171 
172 	node = kvzalloc(sizeof(*node), GFP_KERNEL);
173 	if (!node)
174 		return -ENOMEM;
175 
176 	memcpy(&node->entry, entry, sizeof(*entry));
177 
178 	INIT_LIST_HEAD(&node->node);
179 	list_add_tail(&node->node, &mca_set->list);
180 
181 	mca_set->nr_entries++;
182 
183 	return 0;
184 }
185 
186 static int amdgpu_mca_bank_set_merge(struct mca_bank_set *mca_set, struct mca_bank_set *new)
187 {
188 	struct mca_bank_node *node;
189 
190 	list_for_each_entry(node, &new->list, node)
191 		amdgpu_mca_bank_set_add_entry(mca_set, &node->entry);
192 
193 	return 0;
194 }
195 
196 static int amdgpu_mca_bank_set_remove_node(struct mca_bank_set *mca_set, struct mca_bank_node *node)
197 {
198 	if (!node)
199 		return -EINVAL;
200 
201 	list_del(&node->node);
202 	kvfree(node);
203 
204 	mca_set->nr_entries--;
205 
206 	return 0;
207 }
208 
209 static void amdgpu_mca_bank_set_release(struct mca_bank_set *mca_set)
210 {
211 	struct mca_bank_node *node, *tmp;
212 
213 	list_for_each_entry_safe(node, tmp, &mca_set->list, node) {
214 		list_del(&node->node);
215 		kvfree(node);
216 	}
217 }
218 
219 void amdgpu_mca_smu_init_funcs(struct amdgpu_device *adev, const struct amdgpu_mca_smu_funcs *mca_funcs)
220 {
221 	struct amdgpu_mca *mca = &adev->mca;
222 
223 	mca->mca_funcs = mca_funcs;
224 }
225 
226 int amdgpu_mca_init(struct amdgpu_device *adev)
227 {
228 	struct amdgpu_mca *mca = &adev->mca;
229 	struct mca_bank_cache *mca_cache;
230 	int i;
231 
232 	atomic_set(&mca->ue_update_flag, 0);
233 
234 	for (i = 0; i < ARRAY_SIZE(mca->mca_caches); i++) {
235 		mca_cache = &mca->mca_caches[i];
236 		mutex_init(&mca_cache->lock);
237 		amdgpu_mca_bank_set_init(&mca_cache->mca_set);
238 	}
239 
240 	return 0;
241 }
242 
243 void amdgpu_mca_fini(struct amdgpu_device *adev)
244 {
245 	struct amdgpu_mca *mca = &adev->mca;
246 	struct mca_bank_cache *mca_cache;
247 	int i;
248 
249 	atomic_set(&mca->ue_update_flag, 0);
250 
251 	for (i = 0; i < ARRAY_SIZE(mca->mca_caches); i++) {
252 		mca_cache = &mca->mca_caches[i];
253 		amdgpu_mca_bank_set_release(&mca_cache->mca_set);
254 		mutex_destroy(&mca_cache->lock);
255 	}
256 }
257 
258 int amdgpu_mca_reset(struct amdgpu_device *adev)
259 {
260 	amdgpu_mca_fini(adev);
261 
262 	return amdgpu_mca_init(adev);
263 }
264 
265 int amdgpu_mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable)
266 {
267 	const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
268 
269 	if (mca_funcs && mca_funcs->mca_set_debug_mode)
270 		return mca_funcs->mca_set_debug_mode(adev, enable);
271 
272 	return -EOPNOTSUPP;
273 }
274 
275 static void amdgpu_mca_smu_mca_bank_dump(struct amdgpu_device *adev, int idx, struct mca_bank_entry *entry,
276 					 struct ras_query_context *qctx)
277 {
278 	u64 event_id = qctx->event_id;
279 
280 	RAS_EVENT_LOG(adev, event_id, HW_ERR "Accelerator Check Architecture events logged\n");
281 	RAS_EVENT_LOG(adev, event_id, HW_ERR "aca entry[%02d].STATUS=0x%016llx\n",
282 		      idx, entry->regs[MCA_REG_IDX_STATUS]);
283 	RAS_EVENT_LOG(adev, event_id, HW_ERR "aca entry[%02d].ADDR=0x%016llx\n",
284 		      idx, entry->regs[MCA_REG_IDX_ADDR]);
285 	RAS_EVENT_LOG(adev, event_id, HW_ERR "aca entry[%02d].MISC0=0x%016llx\n",
286 		      idx, entry->regs[MCA_REG_IDX_MISC0]);
287 	RAS_EVENT_LOG(adev, event_id, HW_ERR "aca entry[%02d].IPID=0x%016llx\n",
288 		      idx, entry->regs[MCA_REG_IDX_IPID]);
289 	RAS_EVENT_LOG(adev, event_id, HW_ERR "aca entry[%02d].SYND=0x%016llx\n",
290 		      idx, entry->regs[MCA_REG_IDX_SYND]);
291 }
292 
293 static int amdgpu_mca_smu_get_valid_mca_count(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, uint32_t *count)
294 {
295 	const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
296 
297 	if (!count)
298 		return -EINVAL;
299 
300 	if (mca_funcs && mca_funcs->mca_get_valid_mca_count)
301 		return mca_funcs->mca_get_valid_mca_count(adev, type, count);
302 
303 	return -EOPNOTSUPP;
304 }
305 
306 static int amdgpu_mca_smu_get_mca_entry(struct amdgpu_device *adev, enum amdgpu_mca_error_type type,
307 					int idx, struct mca_bank_entry *entry)
308 {
309 	const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
310 	int count;
311 
312 	if (!mca_funcs || !mca_funcs->mca_get_mca_entry)
313 		return -EOPNOTSUPP;
314 
315 	switch (type) {
316 	case AMDGPU_MCA_ERROR_TYPE_UE:
317 		count = mca_funcs->max_ue_count;
318 		break;
319 	case AMDGPU_MCA_ERROR_TYPE_CE:
320 		count = mca_funcs->max_ce_count;
321 		break;
322 	default:
323 		return -EINVAL;
324 	}
325 
326 	if (idx >= count)
327 		return -EINVAL;
328 
329 	return mca_funcs->mca_get_mca_entry(adev, type, idx, entry);
330 }
331 
332 static bool amdgpu_mca_bank_should_update(struct amdgpu_device *adev, enum amdgpu_mca_error_type type)
333 {
334 	struct amdgpu_mca *mca = &adev->mca;
335 	bool ret = true;
336 
337 	/*
338 	 * Because the UE Valid MCA count will only be cleared after reset,
339 	 * in order to avoid repeated counting of the error count,
340 	 * the aca bank is only updated once during the gpu recovery stage.
341 	 */
342 	if (type == AMDGPU_MCA_ERROR_TYPE_UE) {
343 		if (amdgpu_ras_intr_triggered())
344 			ret = atomic_cmpxchg(&mca->ue_update_flag, 0, 1) == 0;
345 		else
346 			atomic_set(&mca->ue_update_flag, 0);
347 	}
348 
349 	return ret;
350 }
351 
352 static int amdgpu_mca_smu_get_mca_set(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, struct mca_bank_set *mca_set,
353 				      struct ras_query_context *qctx)
354 {
355 	struct mca_bank_entry entry;
356 	uint32_t count = 0, i;
357 	int ret;
358 
359 	if (!mca_set)
360 		return -EINVAL;
361 
362 	if (!amdgpu_mca_bank_should_update(adev, type))
363 		return 0;
364 
365 	ret = amdgpu_mca_smu_get_valid_mca_count(adev, type, &count);
366 	if (ret)
367 		return ret;
368 
369 	for (i = 0; i < count; i++) {
370 		memset(&entry, 0, sizeof(entry));
371 		ret = amdgpu_mca_smu_get_mca_entry(adev, type, i, &entry);
372 		if (ret)
373 			return ret;
374 
375 		amdgpu_mca_bank_set_add_entry(mca_set, &entry);
376 
377 		amdgpu_mca_smu_mca_bank_dump(adev, i, &entry, qctx);
378 	}
379 
380 	return 0;
381 }
382 
383 static int amdgpu_mca_smu_parse_mca_error_count(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
384 						enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, uint32_t *count)
385 {
386 	const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
387 
388 	if (!count || !entry)
389 		return -EINVAL;
390 
391 	if (!mca_funcs || !mca_funcs->mca_parse_mca_error_count)
392 		return -EOPNOTSUPP;
393 
394 	return mca_funcs->mca_parse_mca_error_count(adev, blk, type, entry, count);
395 }
396 
397 static int amdgpu_mca_dispatch_mca_set(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type,
398 				       struct mca_bank_set *mca_set, struct ras_err_data *err_data)
399 {
400 	struct ras_err_addr err_addr;
401 	struct amdgpu_smuio_mcm_config_info mcm_info;
402 	struct mca_bank_node *node, *tmp;
403 	struct mca_bank_entry *entry;
404 	uint32_t count;
405 	int ret;
406 
407 	if (!mca_set)
408 		return -EINVAL;
409 
410 	if (!mca_set->nr_entries)
411 		return 0;
412 
413 	list_for_each_entry_safe(node, tmp, &mca_set->list, node) {
414 		entry = &node->entry;
415 
416 		count = 0;
417 		ret = amdgpu_mca_smu_parse_mca_error_count(adev, blk, type, entry, &count);
418 		if (ret && ret != -EOPNOTSUPP)
419 			return ret;
420 
421 		if (!count)
422 			continue;
423 
424 		memset(&mcm_info, 0, sizeof(mcm_info));
425 		memset(&err_addr, 0, sizeof(err_addr));
426 
427 		mcm_info.socket_id = entry->info.socket_id;
428 		mcm_info.die_id = entry->info.aid;
429 
430 		if (blk == AMDGPU_RAS_BLOCK__UMC) {
431 			err_addr.err_status = entry->regs[MCA_REG_IDX_STATUS];
432 			err_addr.err_ipid = entry->regs[MCA_REG_IDX_IPID];
433 			err_addr.err_addr = entry->regs[MCA_REG_IDX_ADDR];
434 		}
435 
436 		if (type == AMDGPU_MCA_ERROR_TYPE_UE) {
437 			amdgpu_ras_error_statistic_ue_count(err_data,
438 							    &mcm_info, &err_addr, (uint64_t)count);
439 		} else {
440 			if (amdgpu_mca_is_deferred_error(adev, entry->regs[MCA_REG_IDX_STATUS]))
441 				amdgpu_ras_error_statistic_de_count(err_data,
442 								    &mcm_info, &err_addr, (uint64_t)count);
443 			else
444 				amdgpu_ras_error_statistic_ce_count(err_data,
445 								    &mcm_info, &err_addr, (uint64_t)count);
446 		}
447 
448 		amdgpu_mca_bank_set_remove_node(mca_set, node);
449 	}
450 
451 	return 0;
452 }
453 
454 static int amdgpu_mca_add_mca_set_to_cache(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, struct mca_bank_set *new)
455 {
456 	struct mca_bank_cache *mca_cache = &adev->mca.mca_caches[type];
457 	int ret;
458 
459 	mutex_lock(&mca_cache->lock);
460 	ret = amdgpu_mca_bank_set_merge(&mca_cache->mca_set, new);
461 	mutex_unlock(&mca_cache->lock);
462 
463 	return ret;
464 }
465 
466 int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type,
467 				 struct ras_err_data *err_data, struct ras_query_context *qctx)
468 {
469 	struct mca_bank_set mca_set;
470 	struct mca_bank_cache *mca_cache = &adev->mca.mca_caches[type];
471 	int ret;
472 
473 	amdgpu_mca_bank_set_init(&mca_set);
474 
475 	ret = amdgpu_mca_smu_get_mca_set(adev, type, &mca_set, qctx);
476 	if (ret)
477 		goto out_mca_release;
478 
479 	ret = amdgpu_mca_dispatch_mca_set(adev, blk, type, &mca_set, err_data);
480 	if (ret)
481 		goto out_mca_release;
482 
483 	/* add remain mca bank to mca cache */
484 	if (mca_set.nr_entries) {
485 		ret = amdgpu_mca_add_mca_set_to_cache(adev, type, &mca_set);
486 		if (ret)
487 			goto out_mca_release;
488 	}
489 
490 	/* dispatch mca set again if mca cache has valid data */
491 	mutex_lock(&mca_cache->lock);
492 	if (mca_cache->mca_set.nr_entries)
493 		ret = amdgpu_mca_dispatch_mca_set(adev, blk, type, &mca_cache->mca_set, err_data);
494 	mutex_unlock(&mca_cache->lock);
495 
496 out_mca_release:
497 	amdgpu_mca_bank_set_release(&mca_set);
498 
499 	return ret;
500 }
501 
502 #if defined(CONFIG_DEBUG_FS)
503 static int amdgpu_mca_smu_debug_mode_set(void *data, u64 val)
504 {
505 	struct amdgpu_device *adev = (struct amdgpu_device *)data;
506 	int ret;
507 
508 	ret = amdgpu_ras_set_mca_debug_mode(adev, val ? true : false);
509 	if (ret)
510 		return ret;
511 
512 	dev_info(adev->dev, "amdgpu set smu mca debug mode %s success\n", val ? "on" : "off");
513 
514 	return 0;
515 }
516 
517 static void mca_dump_entry(struct seq_file *m, struct mca_bank_entry *entry)
518 {
519 	int i, idx = entry->idx;
520 	int reg_idx_array[] = {
521 		MCA_REG_IDX_STATUS,
522 		MCA_REG_IDX_ADDR,
523 		MCA_REG_IDX_MISC0,
524 		MCA_REG_IDX_IPID,
525 		MCA_REG_IDX_SYND,
526 	};
527 
528 	seq_printf(m, "mca entry[%d].type: %s\n", idx, entry->type == AMDGPU_MCA_ERROR_TYPE_UE ? "UE" : "CE");
529 	seq_printf(m, "mca entry[%d].ip: %d\n", idx, entry->ip);
530 	seq_printf(m, "mca entry[%d].info: socketid:%d aid:%d hwid:0x%03x mcatype:0x%04x\n",
531 		   idx, entry->info.socket_id, entry->info.aid, entry->info.hwid, entry->info.mcatype);
532 
533 	for (i = 0; i < ARRAY_SIZE(reg_idx_array); i++)
534 		seq_printf(m, "mca entry[%d].regs[%d]: 0x%016llx\n", idx, reg_idx_array[i], entry->regs[reg_idx_array[i]]);
535 }
536 
537 static int mca_dump_show(struct seq_file *m, enum amdgpu_mca_error_type type)
538 {
539 	struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
540 	struct mca_bank_node *node;
541 	struct mca_bank_set mca_set;
542 	struct ras_query_context qctx;
543 	int ret;
544 
545 	amdgpu_mca_bank_set_init(&mca_set);
546 
547 	qctx.event_id = 0ULL;
548 	ret = amdgpu_mca_smu_get_mca_set(adev, type, &mca_set, &qctx);
549 	if (ret)
550 		goto err_free_mca_set;
551 
552 	seq_printf(m, "amdgpu smu %s valid mca count: %d\n",
553 		   type == AMDGPU_MCA_ERROR_TYPE_UE ? "UE" : "CE", mca_set.nr_entries);
554 
555 	if (!mca_set.nr_entries)
556 		goto err_free_mca_set;
557 
558 	list_for_each_entry(node, &mca_set.list, node)
559 		mca_dump_entry(m, &node->entry);
560 
561 	/* add mca bank to mca bank cache */
562 	ret = amdgpu_mca_add_mca_set_to_cache(adev, type, &mca_set);
563 
564 err_free_mca_set:
565 	amdgpu_mca_bank_set_release(&mca_set);
566 
567 	return ret;
568 }
569 
570 static int mca_dump_ce_show(struct seq_file *m, void *unused)
571 {
572 	return mca_dump_show(m, AMDGPU_MCA_ERROR_TYPE_CE);
573 }
574 
575 static int mca_dump_ce_open(struct inode *inode, struct file *file)
576 {
577 	return single_open(file, mca_dump_ce_show, inode->i_private);
578 }
579 
580 static const struct file_operations mca_ce_dump_debug_fops = {
581 	.owner = THIS_MODULE,
582 	.open = mca_dump_ce_open,
583 	.read = seq_read,
584 	.llseek = seq_lseek,
585 	.release = single_release,
586 };
587 
588 static int mca_dump_ue_show(struct seq_file *m, void *unused)
589 {
590 	return mca_dump_show(m, AMDGPU_MCA_ERROR_TYPE_UE);
591 }
592 
593 static int mca_dump_ue_open(struct inode *inode, struct file *file)
594 {
595 	return single_open(file, mca_dump_ue_show, inode->i_private);
596 }
597 
598 static const struct file_operations mca_ue_dump_debug_fops = {
599 	.owner = THIS_MODULE,
600 	.open = mca_dump_ue_open,
601 	.read = seq_read,
602 	.llseek = seq_lseek,
603 	.release = single_release,
604 };
605 
606 DEFINE_DEBUGFS_ATTRIBUTE(mca_debug_mode_fops, NULL, amdgpu_mca_smu_debug_mode_set, "%llu\n");
607 #endif
608 
609 void amdgpu_mca_smu_debugfs_init(struct amdgpu_device *adev, struct dentry *root)
610 {
611 #if defined(CONFIG_DEBUG_FS)
612 	if (!root ||
613 	    (amdgpu_ip_version(adev, MP1_HWIP, 0) != IP_VERSION(13, 0, 6) &&
614 	     amdgpu_ip_version(adev, MP1_HWIP, 0) != IP_VERSION(13, 0, 14)))
615 		return;
616 
617 	debugfs_create_file("mca_debug_mode", 0200, root, adev, &mca_debug_mode_fops);
618 	debugfs_create_file("mca_ue_dump", 0400, root, adev, &mca_ue_dump_debug_fops);
619 	debugfs_create_file("mca_ce_dump", 0400, root, adev, &mca_ce_dump_debug_fops);
620 #endif
621 }
622 
623