xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c (revision 69050f8d6d075dc01af7a5f2f550a8067510366f)
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/sort.h>
25 #include "amdgpu.h"
26 #include "umc_v6_7.h"
27 #include "amdgpu_ras_mgr.h"
28 #define MAX_UMC_POISON_POLLING_TIME_SYNC   20  //ms
29 
30 #define MAX_UMC_HASH_STRING_SIZE  256
31 
32 static int amdgpu_umc_convert_error_address(struct amdgpu_device *adev,
33 				    struct ras_err_data *err_data, uint64_t err_addr,
34 				    uint32_t ch_inst, uint32_t umc_inst)
35 {
36 	switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) {
37 	case IP_VERSION(6, 7, 0):
38 		umc_v6_7_convert_error_address(adev,
39 				err_data, err_addr, ch_inst, umc_inst);
40 		break;
41 	default:
42 		dev_warn(adev->dev,
43 			 "UMC address to Physical address translation is not supported\n");
44 		return AMDGPU_RAS_FAIL;
45 	}
46 
47 	return AMDGPU_RAS_SUCCESS;
48 }
49 
50 int amdgpu_umc_page_retirement_mca(struct amdgpu_device *adev,
51 			uint64_t err_addr, uint32_t ch_inst, uint32_t umc_inst)
52 {
53 	struct ras_err_data err_data;
54 	int ret;
55 
56 	ret = amdgpu_ras_error_data_init(&err_data);
57 	if (ret)
58 		return ret;
59 
60 	err_data.err_addr =
61 		kzalloc_objs(struct eeprom_table_record,
62 			     adev->umc.max_ras_err_cnt_per_query, GFP_KERNEL);
63 	if (!err_data.err_addr) {
64 		dev_warn(adev->dev,
65 			"Failed to alloc memory for umc error record in MCA notifier!\n");
66 		ret = AMDGPU_RAS_FAIL;
67 		goto out_fini_err_data;
68 	}
69 
70 	err_data.err_addr_len = adev->umc.max_ras_err_cnt_per_query;
71 
72 	/*
73 	 * Translate UMC channel address to Physical address
74 	 */
75 	ret = amdgpu_umc_convert_error_address(adev, &err_data, err_addr,
76 					ch_inst, umc_inst);
77 	if (ret)
78 		goto out_free_err_addr;
79 
80 	if (amdgpu_bad_page_threshold != 0) {
81 		amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
82 						err_data.err_addr_cnt, false);
83 		amdgpu_ras_save_bad_pages(adev, NULL);
84 	}
85 
86 out_free_err_addr:
87 	kfree(err_data.err_addr);
88 
89 out_fini_err_data:
90 	amdgpu_ras_error_data_fini(&err_data);
91 
92 	return ret;
93 }
94 
95 void amdgpu_umc_handle_bad_pages(struct amdgpu_device *adev,
96 			void *ras_error_status)
97 {
98 	struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
99 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
100 	struct amdgpu_ras_eeprom_control *control = &con->eeprom_control;
101 	unsigned int error_query_mode;
102 	int ret = 0;
103 	unsigned long err_count;
104 
105 	amdgpu_ras_get_error_query_mode(adev, &error_query_mode);
106 
107 	err_data->err_addr =
108 		kzalloc_objs(struct eeprom_table_record,
109 			     adev->umc.max_ras_err_cnt_per_query, GFP_KERNEL);
110 
111 	/* still call query_ras_error_address to clear error status
112 	 * even NOMEM error is encountered
113 	 */
114 	if (!err_data->err_addr)
115 		dev_warn(adev->dev,
116 			"Failed to alloc memory for umc error address record!\n");
117 	else
118 		err_data->err_addr_len = adev->umc.max_ras_err_cnt_per_query;
119 
120 	mutex_lock(&con->page_retirement_lock);
121 	if (!amdgpu_ras_smu_eeprom_supported(adev)) {
122 		ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(con->umc_ecc));
123 		if (ret == -EOPNOTSUPP &&
124 		    error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) {
125 			if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
126 			    adev->umc.ras->ras_block.hw_ops->query_ras_error_count)
127 				adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev,
128 								ras_error_status);
129 
130 			if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
131 			    adev->umc.ras->ras_block.hw_ops->query_ras_error_address &&
132 			    adev->umc.max_ras_err_cnt_per_query) {
133 				err_data->err_addr =
134 					kzalloc_objs(struct eeprom_table_record,
135 						     adev->umc.max_ras_err_cnt_per_query,
136 						     GFP_KERNEL);
137 
138 				/* still call query_ras_error_address to clear error status
139 				 * even NOMEM error is encountered
140 				 */
141 				if (!err_data->err_addr)
142 					dev_warn(adev->dev,
143 						"Failed to alloc memory for umc error address record!\n");
144 				else
145 					err_data->err_addr_len =
146 						adev->umc.max_ras_err_cnt_per_query;
147 
148 				/* umc query_ras_error_address is also responsible for clearing
149 				 * error status
150 				 */
151 				adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev,
152 								ras_error_status);
153 			}
154 		} else if (error_query_mode == AMDGPU_RAS_FIRMWARE_ERROR_QUERY ||
155 		    (!ret && error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY)) {
156 			if (adev->umc.ras &&
157 			    adev->umc.ras->ecc_info_query_ras_error_count)
158 				adev->umc.ras->ecc_info_query_ras_error_count(adev,
159 								ras_error_status);
160 
161 			if (adev->umc.ras &&
162 			    adev->umc.ras->ecc_info_query_ras_error_address &&
163 			    adev->umc.max_ras_err_cnt_per_query) {
164 				err_data->err_addr =
165 					kzalloc_objs(struct eeprom_table_record,
166 						     adev->umc.max_ras_err_cnt_per_query,
167 						     GFP_KERNEL);
168 
169 				/* still call query_ras_error_address to clear error status
170 				 * even NOMEM error is encountered
171 				 */
172 				if (!err_data->err_addr)
173 					dev_warn(adev->dev,
174 						"Failed to alloc memory for umc error address record!\n");
175 				else
176 					err_data->err_addr_len =
177 						adev->umc.max_ras_err_cnt_per_query;
178 
179 				/* umc query_ras_error_address is also responsible for clearing
180 				 * error status
181 				 */
182 				adev->umc.ras->ecc_info_query_ras_error_address(adev,
183 								ras_error_status);
184 			}
185 		}
186 	} else {
187 		if (!amdgpu_ras_eeprom_update_record_num(control)) {
188 			err_data->err_addr_cnt = err_data->de_count =
189 				control->ras_num_recs -	control->ras_num_recs_old;
190 			amdgpu_ras_eeprom_read_idx(control, err_data->err_addr,
191 				control->ras_num_recs_old, err_data->de_count);
192 		}
193 	}
194 
195 	/* only uncorrectable error needs gpu reset */
196 	if (err_data->ue_count || err_data->de_count) {
197 		err_count = err_data->ue_count + err_data->de_count;
198 		if ((amdgpu_bad_page_threshold != 0) &&
199 			err_data->err_addr_cnt) {
200 			amdgpu_ras_add_bad_pages(adev, err_data->err_addr,
201 				err_data->err_addr_cnt, amdgpu_ras_smu_eeprom_supported(adev));
202 			amdgpu_ras_save_bad_pages(adev, &err_count);
203 
204 			amdgpu_dpm_send_hbm_bad_pages_num(adev,
205 					con->eeprom_control.ras_num_bad_pages);
206 
207 			if (con->update_channel_flag == true) {
208 				amdgpu_dpm_send_hbm_bad_channel_flag(adev, con->eeprom_control.bad_channel_bitmap);
209 				con->update_channel_flag = false;
210 			}
211 		}
212 	}
213 
214 	kfree(err_data->err_addr);
215 	err_data->err_addr = NULL;
216 
217 	mutex_unlock(&con->page_retirement_lock);
218 }
219 
220 static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev,
221 		void *ras_error_status,
222 		struct amdgpu_iv_entry *entry,
223 		uint32_t reset)
224 {
225 	struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
226 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
227 
228 	kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
229 	amdgpu_umc_handle_bad_pages(adev, ras_error_status);
230 
231 	if ((err_data->ue_count || err_data->de_count) &&
232 	    (reset || amdgpu_ras_is_rma(adev))) {
233 		con->gpu_reset_flags |= reset;
234 		amdgpu_ras_reset_gpu(adev);
235 	}
236 
237 	return AMDGPU_RAS_SUCCESS;
238 }
239 
240 int amdgpu_umc_pasid_poison_handler(struct amdgpu_device *adev,
241 			enum amdgpu_ras_block block, uint16_t pasid,
242 			pasid_notify pasid_fn, void *data, uint32_t reset)
243 {
244 	int ret = AMDGPU_RAS_SUCCESS;
245 
246 	if (adev->gmc.xgmi.connected_to_cpu ||
247 		adev->gmc.is_app_apu) {
248 		if (reset) {
249 			/* MCA poison handler is only responsible for GPU reset,
250 			 * let MCA notifier do page retirement.
251 			 */
252 			kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
253 			amdgpu_ras_reset_gpu(adev);
254 		}
255 		return ret;
256 	}
257 
258 	if (!amdgpu_sriov_vf(adev)) {
259 		if (amdgpu_ip_version(adev, UMC_HWIP, 0) < IP_VERSION(12, 0, 0)) {
260 			struct ras_err_data err_data;
261 			struct ras_common_if head = {
262 				.block = AMDGPU_RAS_BLOCK__UMC,
263 			};
264 			struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head);
265 
266 			ret = amdgpu_ras_error_data_init(&err_data);
267 			if (ret)
268 				return ret;
269 
270 			ret = amdgpu_umc_do_page_retirement(adev, &err_data, NULL, reset);
271 
272 			if (ret == AMDGPU_RAS_SUCCESS && obj) {
273 				obj->err_data.ue_count += err_data.ue_count;
274 				obj->err_data.ce_count += err_data.ce_count;
275 				obj->err_data.de_count += err_data.de_count;
276 			}
277 
278 			amdgpu_ras_error_data_fini(&err_data);
279 		} else if (amdgpu_uniras_enabled(adev)) {
280 			struct ras_ih_info ih_info = {0};
281 
282 			ih_info.block = block;
283 			ih_info.pasid = pasid;
284 			ih_info.reset = reset;
285 			ih_info.pasid_fn = pasid_fn;
286 			ih_info.data = data;
287 			amdgpu_ras_mgr_handle_consumer_interrupt(adev, &ih_info);
288 		} else {
289 			struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
290 			int ret;
291 
292 			ret = amdgpu_ras_put_poison_req(adev,
293 				block, pasid, pasid_fn, data, reset);
294 			if (!ret) {
295 				atomic_inc(&con->page_retirement_req_cnt);
296 				atomic_inc(&con->poison_consumption_count);
297 				wake_up(&con->page_retirement_wq);
298 			}
299 		}
300 	} else {
301 		if (adev->virt.ops && adev->virt.ops->ras_poison_handler)
302 			adev->virt.ops->ras_poison_handler(adev, block);
303 		else
304 			dev_warn(adev->dev,
305 				"No ras_poison_handler interface in SRIOV!\n");
306 	}
307 
308 	return ret;
309 }
310 
311 int amdgpu_umc_poison_handler(struct amdgpu_device *adev,
312 			enum amdgpu_ras_block block, uint32_t reset)
313 {
314 	return amdgpu_umc_pasid_poison_handler(adev,
315 				block, 0, NULL, NULL, reset);
316 }
317 
318 int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
319 		void *ras_error_status,
320 		struct amdgpu_iv_entry *entry)
321 {
322 	return amdgpu_umc_do_page_retirement(adev, ras_error_status, entry,
323 				AMDGPU_RAS_GPU_RESET_MODE1_RESET);
324 }
325 
326 int amdgpu_umc_ras_sw_init(struct amdgpu_device *adev)
327 {
328 	int err;
329 	struct amdgpu_umc_ras *ras;
330 
331 	if (!adev->umc.ras)
332 		return 0;
333 
334 	ras = adev->umc.ras;
335 
336 	err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
337 	if (err) {
338 		dev_err(adev->dev, "Failed to register umc ras block!\n");
339 		return err;
340 	}
341 
342 	strcpy(adev->umc.ras->ras_block.ras_comm.name, "umc");
343 	ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__UMC;
344 	ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
345 	adev->umc.ras_if = &ras->ras_block.ras_comm;
346 
347 	if (!ras->ras_block.ras_late_init)
348 		ras->ras_block.ras_late_init = amdgpu_umc_ras_late_init;
349 
350 	if (!ras->ras_block.ras_cb)
351 		ras->ras_block.ras_cb = amdgpu_umc_process_ras_data_cb;
352 
353 	return 0;
354 }
355 
356 int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
357 {
358 	int r;
359 
360 	r = amdgpu_ras_block_late_init(adev, ras_block);
361 	if (r)
362 		return r;
363 
364 	if (amdgpu_sriov_vf(adev))
365 		return r;
366 
367 	if (amdgpu_ras_is_supported(adev, ras_block->block)) {
368 		r = amdgpu_irq_get(adev, &adev->gmc.ecc_irq, 0);
369 		if (r)
370 			goto late_fini;
371 	}
372 
373 	/* ras init of specific umc version */
374 	if (adev->umc.ras &&
375 	    adev->umc.ras->err_cnt_init)
376 		adev->umc.ras->err_cnt_init(adev);
377 
378 	return 0;
379 
380 late_fini:
381 	amdgpu_ras_block_late_fini(adev, ras_block);
382 	return r;
383 }
384 
385 int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev,
386 		struct amdgpu_irq_src *source,
387 		struct amdgpu_iv_entry *entry)
388 {
389 	struct ras_common_if *ras_if = adev->umc.ras_if;
390 	struct ras_dispatch_if ih_data = {
391 		.entry = entry,
392 	};
393 
394 	if (!ras_if)
395 		return 0;
396 
397 	ih_data.head = *ras_if;
398 
399 	amdgpu_ras_interrupt_dispatch(adev, &ih_data);
400 	return 0;
401 }
402 
403 int amdgpu_umc_fill_error_record(struct ras_err_data *err_data,
404 		uint64_t err_addr,
405 		uint64_t retired_page,
406 		uint32_t channel_index,
407 		uint32_t umc_inst)
408 {
409 	struct eeprom_table_record *err_rec;
410 
411 	if (!err_data ||
412 	    !err_data->err_addr ||
413 	    (err_data->err_addr_cnt >= err_data->err_addr_len))
414 		return -EINVAL;
415 
416 	err_rec = &err_data->err_addr[err_data->err_addr_cnt];
417 
418 	err_rec->address = err_addr;
419 	/* page frame address is saved */
420 	err_rec->retired_page = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
421 	err_rec->ts = (uint64_t)ktime_get_real_seconds();
422 	err_rec->err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE;
423 	err_rec->cu = 0;
424 	err_rec->mem_channel = channel_index;
425 	err_rec->mcumc_id = umc_inst;
426 
427 	err_data->err_addr_cnt++;
428 
429 	return 0;
430 }
431 
432 static int amdgpu_umc_loop_all_aid(struct amdgpu_device *adev, umc_func func,
433 				   void *data)
434 {
435 	uint32_t umc_node_inst;
436 	uint32_t node_inst;
437 	uint32_t umc_inst;
438 	uint32_t ch_inst;
439 	int ret;
440 
441 	/*
442 	 * This loop is done based on the following -
443 	 * umc.active mask = mask of active umc instances across all nodes
444 	 * umc.umc_inst_num = maximum number of umc instancess per node
445 	 * umc.node_inst_num = maximum number of node instances
446 	 * Channel instances are not assumed to be harvested.
447 	 */
448 	dev_dbg(adev->dev, "active umcs :%lx umc_inst per node: %d",
449 		adev->umc.active_mask, adev->umc.umc_inst_num);
450 	for_each_set_bit(umc_node_inst, &(adev->umc.active_mask),
451 			 adev->umc.node_inst_num * adev->umc.umc_inst_num) {
452 		node_inst = umc_node_inst / adev->umc.umc_inst_num;
453 		umc_inst = umc_node_inst % adev->umc.umc_inst_num;
454 		LOOP_UMC_CH_INST(ch_inst) {
455 			dev_dbg(adev->dev,
456 				"node_inst :%d umc_inst: %d ch_inst: %d",
457 				node_inst, umc_inst, ch_inst);
458 			ret = func(adev, node_inst, umc_inst, ch_inst, data);
459 			if (ret) {
460 				dev_err(adev->dev,
461 					"Node %d umc %d ch %d func returns %d\n",
462 					node_inst, umc_inst, ch_inst, ret);
463 				return ret;
464 			}
465 		}
466 	}
467 
468 	return 0;
469 }
470 
471 int amdgpu_umc_loop_channels(struct amdgpu_device *adev,
472 			umc_func func, void *data)
473 {
474 	uint32_t node_inst       = 0;
475 	uint32_t umc_inst        = 0;
476 	uint32_t ch_inst         = 0;
477 	int ret = 0;
478 
479 	if (adev->aid_mask)
480 		return amdgpu_umc_loop_all_aid(adev, func, data);
481 
482 	if (adev->umc.node_inst_num) {
483 		LOOP_UMC_EACH_NODE_INST_AND_CH(node_inst, umc_inst, ch_inst) {
484 			ret = func(adev, node_inst, umc_inst, ch_inst, data);
485 			if (ret) {
486 				dev_err(adev->dev, "Node %d umc %d ch %d func returns %d\n",
487 					node_inst, umc_inst, ch_inst, ret);
488 				return ret;
489 			}
490 		}
491 	} else {
492 		LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
493 			ret = func(adev, 0, umc_inst, ch_inst, data);
494 			if (ret) {
495 				dev_err(adev->dev, "Umc %d ch %d func returns %d\n",
496 					umc_inst, ch_inst, ret);
497 				return ret;
498 			}
499 		}
500 	}
501 
502 	return 0;
503 }
504 
505 int amdgpu_umc_update_ecc_status(struct amdgpu_device *adev,
506 				uint64_t status, uint64_t ipid, uint64_t addr)
507 {
508 	if (adev->umc.ras->update_ecc_status)
509 		return adev->umc.ras->update_ecc_status(adev,
510 					status, ipid, addr);
511 	return 0;
512 }
513 
514 int amdgpu_umc_logs_ecc_err(struct amdgpu_device *adev,
515 		struct radix_tree_root *ecc_tree, struct ras_ecc_err *ecc_err)
516 {
517 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
518 	struct ras_ecc_log_info *ecc_log;
519 	int ret;
520 
521 	ecc_log = &con->umc_ecc_log;
522 
523 	mutex_lock(&ecc_log->lock);
524 	ret = radix_tree_insert(ecc_tree, ecc_err->pa_pfn, ecc_err);
525 	if (!ret)
526 		radix_tree_tag_set(ecc_tree,
527 			ecc_err->pa_pfn, UMC_ECC_NEW_DETECTED_TAG);
528 	mutex_unlock(&ecc_log->lock);
529 
530 	return ret;
531 }
532 
533 int amdgpu_umc_pages_in_a_row(struct amdgpu_device *adev,
534 			struct ras_err_data *err_data, uint64_t pa_addr)
535 {
536 	struct ta_ras_query_address_output addr_out;
537 
538 	/* reinit err_data */
539 	err_data->err_addr_cnt = 0;
540 	err_data->err_addr_len = adev->umc.retire_unit;
541 
542 	addr_out.pa.pa = pa_addr;
543 	if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr)
544 		return adev->umc.ras->convert_ras_err_addr(adev, err_data, NULL,
545 				&addr_out, false);
546 	else
547 		return -EINVAL;
548 }
549 
550 int amdgpu_umc_lookup_bad_pages_in_a_row(struct amdgpu_device *adev,
551 			uint64_t pa_addr, uint64_t *pfns, int len)
552 {
553 	int i, ret;
554 	struct ras_err_data err_data;
555 
556 	err_data.err_addr = kzalloc_objs(struct eeprom_table_record,
557 					 adev->umc.retire_unit, GFP_KERNEL);
558 	if (!err_data.err_addr) {
559 		dev_warn(adev->dev, "Failed to alloc memory in bad page lookup!\n");
560 		return 0;
561 	}
562 
563 	ret = amdgpu_umc_pages_in_a_row(adev, &err_data, pa_addr);
564 	if (ret)
565 		goto out;
566 
567 	for (i = 0; i < adev->umc.retire_unit; i++) {
568 		if (i >= len)
569 			goto out;
570 
571 		pfns[i] = err_data.err_addr[i].retired_page;
572 	}
573 	ret = i;
574 	adev->umc.err_addr_cnt = err_data.err_addr_cnt;
575 
576 out:
577 	kfree(err_data.err_addr);
578 	return ret;
579 }
580 
581 int amdgpu_umc_mca_to_addr(struct amdgpu_device *adev,
582 			uint64_t err_addr, uint32_t ch, uint32_t umc,
583 			uint32_t node, uint32_t socket,
584 			struct ta_ras_query_address_output *addr_out, bool dump_addr)
585 {
586 	struct ta_ras_query_address_input addr_in;
587 	int ret;
588 
589 	memset(&addr_in, 0, sizeof(addr_in));
590 	addr_in.ma.err_addr = err_addr;
591 	addr_in.ma.ch_inst = ch;
592 	addr_in.ma.umc_inst = umc;
593 	addr_in.ma.node_inst = node;
594 	addr_in.ma.socket_id = socket;
595 
596 	if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr) {
597 		ret = adev->umc.ras->convert_ras_err_addr(adev, NULL, &addr_in,
598 				addr_out, dump_addr);
599 		if (ret)
600 			return ret;
601 	} else {
602 		return 0;
603 	}
604 
605 	return 0;
606 }
607 
608 int amdgpu_umc_pa2mca(struct amdgpu_device *adev,
609 		uint64_t pa, uint64_t *mca, enum amdgpu_memory_partition nps)
610 {
611 	struct ta_ras_query_address_input addr_in;
612 	struct ta_ras_query_address_output addr_out;
613 	int ret;
614 
615 	/* nps: the pa belongs to */
616 	addr_in.pa.pa = pa | ((uint64_t)nps << 58);
617 	addr_in.addr_type = TA_RAS_PA_TO_MCA;
618 	ret = psp_ras_query_address(&adev->psp, &addr_in, &addr_out);
619 	if (ret) {
620 		dev_warn(adev->dev, "Failed to query RAS MCA address for 0x%llx",
621 			pa);
622 
623 		return ret;
624 	}
625 
626 	*mca = addr_out.ma.err_addr;
627 
628 	return 0;
629 }
630