xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c (revision d9bc24d263fdb420f61a8a8b8bbb1a68f5a0f803)
1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  *
23  */
24 #include <linux/debugfs.h>
25 #include <linux/list.h>
26 #include <linux/module.h>
27 #include <linux/uaccess.h>
28 #include <linux/reboot.h>
29 #include <linux/syscalls.h>
30 #include <linux/pm_runtime.h>
31 #include <linux/list_sort.h>
32 
33 #include "amdgpu.h"
34 #include "amdgpu_ras.h"
35 #include "amdgpu_atomfirmware.h"
36 #include "amdgpu_xgmi.h"
37 #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
38 #include "nbio_v4_3.h"
39 #include "nbif_v6_3_1.h"
40 #include "nbio_v7_9.h"
41 #include "atom.h"
42 #include "amdgpu_reset.h"
43 #include "amdgpu_psp.h"
44 #include "amdgpu_ras_mgr.h"
45 #include "amdgpu_virt_ras_cmd.h"
46 
47 #ifdef CONFIG_X86_MCE_AMD
48 #include <asm/mce.h>
49 
50 static bool notifier_registered;
51 #endif
52 static const char *RAS_FS_NAME = "ras";
53 
54 const char *ras_error_string[] = {
55 	"none",
56 	"parity",
57 	"single_correctable",
58 	"multi_uncorrectable",
59 	"poison",
60 };
61 
62 const char *ras_block_string[] = {
63 	"umc",
64 	"sdma",
65 	"gfx",
66 	"mmhub",
67 	"athub",
68 	"pcie_bif",
69 	"hdp",
70 	"xgmi_wafl",
71 	"df",
72 	"smn",
73 	"sem",
74 	"mp0",
75 	"mp1",
76 	"fuse",
77 	"mca",
78 	"vcn",
79 	"jpeg",
80 	"ih",
81 	"mpio",
82 	"mmsch",
83 };
84 
85 const char *ras_mca_block_string[] = {
86 	"mca_mp0",
87 	"mca_mp1",
88 	"mca_mpio",
89 	"mca_iohc",
90 };
91 
92 struct amdgpu_ras_block_list {
93 	/* ras block link */
94 	struct list_head node;
95 
96 	struct amdgpu_ras_block_object *ras_obj;
97 };
98 
99 const char *get_ras_block_str(struct ras_common_if *ras_block)
100 {
101 	if (!ras_block)
102 		return "NULL";
103 
104 	if (ras_block->block >= AMDGPU_RAS_BLOCK_COUNT ||
105 	    ras_block->block >= ARRAY_SIZE(ras_block_string))
106 		return "OUT OF RANGE";
107 
108 	if (ras_block->block == AMDGPU_RAS_BLOCK__MCA)
109 		return ras_mca_block_string[ras_block->sub_block_index];
110 
111 	return ras_block_string[ras_block->block];
112 }
113 
114 #define ras_block_str(_BLOCK_) \
115 	(((_BLOCK_) < ARRAY_SIZE(ras_block_string)) ? ras_block_string[_BLOCK_] : "Out Of Range")
116 
117 #define ras_err_str(i) (ras_error_string[ffs(i)])
118 
119 #define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
120 
121 /* inject address is 52 bits */
122 #define	RAS_UMC_INJECT_ADDR_LIMIT	(0x1ULL << 52)
123 
124 /* typical ECC bad page rate is 1 bad page per 100MB VRAM */
125 #define RAS_BAD_PAGE_COVER              (100 * 1024 * 1024ULL)
126 
127 #define MAX_UMC_POISON_POLLING_TIME_ASYNC  10
128 
129 #define AMDGPU_RAS_RETIRE_PAGE_INTERVAL 100  //ms
130 
131 #define MAX_FLUSH_RETIRE_DWORK_TIMES  100
132 
133 #define BYPASS_ALLOCATED_ADDRESS        0x0
134 #define BYPASS_INITIALIZATION_ADDRESS   0x1
135 
136 enum amdgpu_ras_retire_page_reservation {
137 	AMDGPU_RAS_RETIRE_PAGE_RESERVED,
138 	AMDGPU_RAS_RETIRE_PAGE_PENDING,
139 	AMDGPU_RAS_RETIRE_PAGE_FAULT,
140 };
141 
142 atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0);
143 
144 static int amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
145 				uint64_t addr);
146 static int amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
147 				uint64_t addr);
148 
149 static void amdgpu_ras_critical_region_init(struct amdgpu_device *adev);
150 static void amdgpu_ras_critical_region_fini(struct amdgpu_device *adev);
151 
152 #ifdef CONFIG_X86_MCE_AMD
153 static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev);
154 static void
155 amdgpu_unregister_bad_pages_mca_notifier(struct amdgpu_device *adev);
156 struct mce_notifier_adev_list {
157 	struct amdgpu_device *devs[MAX_GPU_INSTANCE];
158 	int num_gpu;
159 };
160 static struct mce_notifier_adev_list mce_adev_list;
161 #endif
162 
163 void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready)
164 {
165 	if (adev && amdgpu_ras_get_context(adev))
166 		amdgpu_ras_get_context(adev)->error_query_ready = ready;
167 }
168 
169 static bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev)
170 {
171 	if (adev && amdgpu_ras_get_context(adev))
172 		return amdgpu_ras_get_context(adev)->error_query_ready;
173 
174 	return false;
175 }
176 
177 static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t address)
178 {
179 	struct ras_err_data err_data;
180 	struct eeprom_table_record err_rec;
181 	int ret;
182 
183 	ret = amdgpu_ras_check_bad_page(adev, address);
184 	if (ret == -EINVAL) {
185 		dev_warn(adev->dev,
186 			"RAS WARN: input address 0x%llx is invalid.\n",
187 			address);
188 		return -EINVAL;
189 	} else if (ret == 1) {
190 		dev_warn(adev->dev,
191 			"RAS WARN: 0x%llx has already been marked as bad page!\n",
192 			address);
193 		return 0;
194 	}
195 
196 	ret = amdgpu_ras_error_data_init(&err_data);
197 	if (ret)
198 		return ret;
199 
200 	memset(&err_rec, 0x0, sizeof(struct eeprom_table_record));
201 	err_data.err_addr = &err_rec;
202 	amdgpu_umc_fill_error_record(&err_data, address, address, 0, 0);
203 
204 	if (amdgpu_bad_page_threshold != 0) {
205 		amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
206 					 err_data.err_addr_cnt, false);
207 		amdgpu_ras_save_bad_pages(adev, NULL);
208 	}
209 
210 	amdgpu_ras_error_data_fini(&err_data);
211 
212 	dev_warn(adev->dev, "WARNING: THIS IS ONLY FOR TEST PURPOSES AND WILL CORRUPT RAS EEPROM\n");
213 	dev_warn(adev->dev, "Clear EEPROM:\n");
214 	dev_warn(adev->dev, "    echo 1 > /sys/kernel/debug/dri/0/ras/ras_eeprom_reset\n");
215 
216 	return 0;
217 }
218 
219 static int amdgpu_check_address_validity(struct amdgpu_device *adev,
220 			uint64_t address, uint64_t flags)
221 {
222 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
223 	struct amdgpu_vram_block_info blk_info;
224 	uint64_t page_pfns[32] = {0};
225 	int i, ret, count;
226 	bool hit = false;
227 
228 	if (amdgpu_ip_version(adev, UMC_HWIP, 0) < IP_VERSION(12, 0, 0))
229 		return 0;
230 
231 	if (amdgpu_sriov_vf(adev)) {
232 		if (amdgpu_uniras_enabled(adev)) {
233 			if (amdgpu_virt_ras_check_address_validity(adev, address, &hit))
234 				return -EPERM;
235 			if (hit)
236 				return -EACCES;
237 		} else {
238 			if (amdgpu_virt_check_vf_critical_region(adev, address, &hit))
239 				return -EPERM;
240 			return hit ? -EACCES : 0;
241 		}
242 	}
243 
244 	if ((address >= adev->gmc.mc_vram_size) ||
245 	    (address >= RAS_UMC_INJECT_ADDR_LIMIT))
246 		return -EFAULT;
247 
248 	if (amdgpu_uniras_enabled(adev)) {
249 		if (amdgpu_sriov_vf(adev))
250 			count = amdgpu_virt_ras_convert_retired_address(adev, address,
251 				page_pfns, ARRAY_SIZE(page_pfns));
252 		else
253 			count = amdgpu_ras_mgr_lookup_bad_pages_in_a_row(adev, address,
254 				page_pfns, ARRAY_SIZE(page_pfns));
255 	} else
256 		count = amdgpu_umc_lookup_bad_pages_in_a_row(adev,
257 				address, page_pfns, ARRAY_SIZE(page_pfns));
258 
259 	if (count <= 0)
260 		return -EPERM;
261 
262 	for (i = 0; i < count; i++) {
263 		memset(&blk_info, 0, sizeof(blk_info));
264 		ret = amdgpu_vram_mgr_query_address_block_info(&adev->mman.vram_mgr,
265 					page_pfns[i] << AMDGPU_GPU_PAGE_SHIFT, &blk_info);
266 		if (!ret) {
267 			/* The input address that needs to be checked is allocated by
268 			 * current calling process, so it is necessary to exclude
269 			 * the calling process.
270 			 */
271 			if ((flags == BYPASS_ALLOCATED_ADDRESS) &&
272 			    ((blk_info.task.pid != task_pid_nr(current)) ||
273 				strncmp(blk_info.task.comm, current->comm, TASK_COMM_LEN)))
274 				return -EACCES;
275 			else if ((flags == BYPASS_INITIALIZATION_ADDRESS) &&
276 				(blk_info.task.pid == con->init_task_pid) &&
277 				!strncmp(blk_info.task.comm, con->init_task_comm, TASK_COMM_LEN))
278 				return -EACCES;
279 		}
280 	}
281 
282 	return 0;
283 }
284 
285 static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
286 					size_t size, loff_t *pos)
287 {
288 	struct ras_manager *obj = (struct ras_manager *)file_inode(f)->i_private;
289 	struct ras_query_if info = {
290 		.head = obj->head,
291 	};
292 	ssize_t s;
293 	char val[128];
294 
295 	if (amdgpu_ras_query_error_status(obj->adev, &info))
296 		return -EINVAL;
297 
298 	/* Hardware counter will be reset automatically after the query on Vega20 and Arcturus */
299 	if (amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
300 	    amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {
301 		if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
302 			dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
303 	}
304 
305 	s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n",
306 			"ue", info.ue_count,
307 			"ce", info.ce_count);
308 	if (*pos >= s)
309 		return 0;
310 
311 	s -= *pos;
312 	s = min_t(u64, s, size);
313 
314 
315 	if (copy_to_user(buf, &val[*pos], s))
316 		return -EINVAL;
317 
318 	*pos += s;
319 
320 	return s;
321 }
322 
323 static const struct file_operations amdgpu_ras_debugfs_ops = {
324 	.owner = THIS_MODULE,
325 	.read = amdgpu_ras_debugfs_read,
326 	.write = NULL,
327 	.llseek = default_llseek
328 };
329 
330 static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id)
331 {
332 	int i;
333 
334 	for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) {
335 		*block_id = i;
336 		if (strcmp(name, ras_block_string[i]) == 0)
337 			return 0;
338 	}
339 	return -EINVAL;
340 }
341 
342 static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
343 		const char __user *buf, size_t size,
344 		loff_t *pos, struct ras_debug_if *data)
345 {
346 	ssize_t s = min_t(u64, 64, size);
347 	char str[65];
348 	char block_name[33];
349 	char err[9] = "ue";
350 	int op = -1;
351 	int block_id;
352 	uint32_t sub_block;
353 	u64 address, value;
354 	/* default value is 0 if the mask is not set by user */
355 	u32 instance_mask = 0;
356 
357 	if (*pos)
358 		return -EINVAL;
359 	*pos = size;
360 
361 	memset(str, 0, sizeof(str));
362 	memset(data, 0, sizeof(*data));
363 
364 	if (copy_from_user(str, buf, s))
365 		return -EINVAL;
366 
367 	if (sscanf(str, "disable %32s", block_name) == 1)
368 		op = 0;
369 	else if (sscanf(str, "enable %32s %8s", block_name, err) == 2)
370 		op = 1;
371 	else if (sscanf(str, "inject %32s %8s", block_name, err) == 2)
372 		op = 2;
373 	else if (strstr(str, "retire_page") != NULL)
374 		op = 3;
375 	else if (strstr(str, "check_address") != NULL)
376 		op = 4;
377 	else if (str[0] && str[1] && str[2] && str[3])
378 		/* ascii string, but commands are not matched. */
379 		return -EINVAL;
380 
381 	if (op != -1) {
382 		if (op == 3) {
383 			if (sscanf(str, "%*s 0x%llx", &address) != 1 &&
384 			    sscanf(str, "%*s %llu", &address) != 1)
385 				return -EINVAL;
386 
387 			data->op = op;
388 			data->inject.address = address;
389 
390 			return 0;
391 		} else if (op == 4) {
392 			if (sscanf(str, "%*s 0x%llx 0x%llx", &address, &value) != 2 &&
393 			    sscanf(str, "%*s %llu %llu", &address, &value) != 2)
394 				return -EINVAL;
395 
396 			data->op = op;
397 			data->inject.address = address;
398 			data->inject.value = value;
399 			return 0;
400 		}
401 
402 		if (amdgpu_ras_find_block_id_by_name(block_name, &block_id))
403 			return -EINVAL;
404 
405 		data->head.block = block_id;
406 		/* only ue, ce and poison errors are supported */
407 		if (!memcmp("ue", err, 2))
408 			data->head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
409 		else if (!memcmp("ce", err, 2))
410 			data->head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE;
411 		else if (!memcmp("poison", err, 6))
412 			data->head.type = AMDGPU_RAS_ERROR__POISON;
413 		else
414 			return -EINVAL;
415 
416 		data->op = op;
417 
418 		if (op == 2) {
419 			if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx 0x%x",
420 				   &sub_block, &address, &value, &instance_mask) != 4 &&
421 			    sscanf(str, "%*s %*s %*s %u %llu %llu %u",
422 				   &sub_block, &address, &value, &instance_mask) != 4 &&
423 				sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx",
424 				   &sub_block, &address, &value) != 3 &&
425 			    sscanf(str, "%*s %*s %*s %u %llu %llu",
426 				   &sub_block, &address, &value) != 3)
427 				return -EINVAL;
428 			data->head.sub_block_index = sub_block;
429 			data->inject.address = address;
430 			data->inject.value = value;
431 			data->inject.instance_mask = instance_mask;
432 		}
433 	} else {
434 		if (size < sizeof(*data))
435 			return -EINVAL;
436 
437 		if (copy_from_user(data, buf, sizeof(*data)))
438 			return -EINVAL;
439 	}
440 
441 	return 0;
442 }
443 
444 static void amdgpu_ras_instance_mask_check(struct amdgpu_device *adev,
445 				struct ras_debug_if *data)
446 {
447 	int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1;
448 	uint32_t mask, inst_mask = data->inject.instance_mask;
449 
450 	/* no need to set instance mask if there is only one instance */
451 	if (num_xcc <= 1 && inst_mask) {
452 		data->inject.instance_mask = 0;
453 		dev_dbg(adev->dev,
454 			"RAS inject mask(0x%x) isn't supported and force it to 0.\n",
455 			inst_mask);
456 
457 		return;
458 	}
459 
460 	switch (data->head.block) {
461 	case AMDGPU_RAS_BLOCK__GFX:
462 		mask = GENMASK(num_xcc - 1, 0);
463 		break;
464 	case AMDGPU_RAS_BLOCK__SDMA:
465 		mask = GENMASK(adev->sdma.num_instances - 1, 0);
466 		break;
467 	case AMDGPU_RAS_BLOCK__VCN:
468 	case AMDGPU_RAS_BLOCK__JPEG:
469 		mask = GENMASK(adev->vcn.num_vcn_inst - 1, 0);
470 		break;
471 	default:
472 		mask = inst_mask;
473 		break;
474 	}
475 
476 	/* remove invalid bits in instance mask */
477 	data->inject.instance_mask &= mask;
478 	if (inst_mask != data->inject.instance_mask)
479 		dev_dbg(adev->dev,
480 			"Adjust RAS inject mask 0x%x to 0x%x\n",
481 			inst_mask, data->inject.instance_mask);
482 }
483 
484 /**
485  * DOC: AMDGPU RAS debugfs control interface
486  *
487  * The control interface accepts struct ras_debug_if which has two members.
488  *
489  * First member: ras_debug_if::head or ras_debug_if::inject.
490  *
491  * head is used to indicate which IP block will be under control.
492  *
493  * head has four members, they are block, type, sub_block_index, name.
494  * block: which IP will be under control.
495  * type: what kind of error will be enabled/disabled/injected.
496  * sub_block_index: some IPs have subcomponets. say, GFX, sDMA.
497  * name: the name of IP.
498  *
499  * inject has three more members than head, they are address, value and mask.
500  * As their names indicate, inject operation will write the
501  * value to the address.
502  *
503  * The second member: struct ras_debug_if::op.
504  * It has three kinds of operations.
505  *
506  * - 0: disable RAS on the block. Take ::head as its data.
507  * - 1: enable RAS on the block. Take ::head as its data.
508  * - 2: inject errors on the block. Take ::inject as its data.
509  *
510  * How to use the interface?
511  *
512  * In a program
513  *
514  * Copy the struct ras_debug_if in your code and initialize it.
515  * Write the struct to the control interface.
516  *
517  * From shell
518  *
519  * .. code-block:: bash
520  *
521  *	echo "disable <block>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
522  *	echo "enable  <block> <error>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
523  *	echo "inject  <block> <error> <sub-block> <address> <value> <mask>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
524  *
525  * Where N, is the card which you want to affect.
526  *
527  * "disable" requires only the block.
528  * "enable" requires the block and error type.
529  * "inject" requires the block, error type, address, and value.
530  *
531  * The block is one of: umc, sdma, gfx, etc.
532  *	see ras_block_string[] for details
533  *
534  * The error type is one of: ue, ce and poison where,
535  *	ue is multi-uncorrectable
536  *	ce is single-correctable
537  *	poison is poison
538  *
539  * The sub-block is a the sub-block index, pass 0 if there is no sub-block.
540  * The address and value are hexadecimal numbers, leading 0x is optional.
541  * The mask means instance mask, is optional, default value is 0x1.
542  *
543  * For instance,
544  *
545  * .. code-block:: bash
546  *
547  *	echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
548  *	echo inject umc ce 0 0 0 3 > /sys/kernel/debug/dri/0/ras/ras_ctrl
549  *	echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl
550  *
551  * How to check the result of the operation?
552  *
553  * To check disable/enable, see "ras" features at,
554  * /sys/class/drm/card[0/1/2...]/device/ras/features
555  *
556  * To check inject, see the corresponding error count at,
557  * /sys/class/drm/card[0/1/2...]/device/ras/[gfx|sdma|umc|...]_err_count
558  *
559  * .. note::
560  *	Operations are only allowed on blocks which are supported.
561  *	Check the "ras" mask at /sys/module/amdgpu/parameters/ras_mask
562  *	to see which blocks support RAS on a particular asic.
563  *
564  */
565 static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f,
566 					     const char __user *buf,
567 					     size_t size, loff_t *pos)
568 {
569 	struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
570 	struct ras_debug_if data;
571 	int ret = 0;
572 
573 	if (!amdgpu_ras_get_error_query_ready(adev)) {
574 		dev_warn(adev->dev, "RAS WARN: error injection "
575 				"currently inaccessible\n");
576 		return size;
577 	}
578 
579 	ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data);
580 	if (ret)
581 		return ret;
582 
583 	if (data.op == 3) {
584 		ret = amdgpu_reserve_page_direct(adev, data.inject.address);
585 		if (!ret)
586 			return size;
587 		else
588 			return ret;
589 	} else if (data.op == 4) {
590 		ret = amdgpu_check_address_validity(adev, data.inject.address, data.inject.value);
591 		return ret ? ret : size;
592 	}
593 
594 	if (!amdgpu_ras_is_supported(adev, data.head.block))
595 		return -EINVAL;
596 
597 	switch (data.op) {
598 	case 0:
599 		ret = amdgpu_ras_feature_enable(adev, &data.head, 0);
600 		break;
601 	case 1:
602 		ret = amdgpu_ras_feature_enable(adev, &data.head, 1);
603 		break;
604 	case 2:
605 		/* umc ce/ue error injection for a bad page is not allowed */
606 		if (data.head.block == AMDGPU_RAS_BLOCK__UMC)
607 			ret = amdgpu_ras_check_bad_page(adev, data.inject.address);
608 		if (ret == -EINVAL) {
609 			dev_warn(adev->dev, "RAS WARN: input address 0x%llx is invalid.",
610 					data.inject.address);
611 			break;
612 		} else if (ret == 1) {
613 			dev_warn(adev->dev, "RAS WARN: inject: 0x%llx has already been marked as bad!\n",
614 					data.inject.address);
615 			break;
616 		}
617 
618 		amdgpu_ras_instance_mask_check(adev, &data);
619 
620 		/* data.inject.address is offset instead of absolute gpu address */
621 		ret = amdgpu_ras_error_inject(adev, &data.inject);
622 		break;
623 	default:
624 		ret = -EINVAL;
625 		break;
626 	}
627 
628 	if (ret)
629 		return ret;
630 
631 	return size;
632 }
633 
634 static int amdgpu_uniras_clear_badpages_info(struct amdgpu_device *adev);
635 
636 /**
637  * DOC: AMDGPU RAS debugfs EEPROM table reset interface
638  *
639  * Some boards contain an EEPROM which is used to persistently store a list of
640  * bad pages which experiences ECC errors in vram.  This interface provides
641  * a way to reset the EEPROM, e.g., after testing error injection.
642  *
643  * Usage:
644  *
645  * .. code-block:: bash
646  *
647  *	echo 1 > ../ras/ras_eeprom_reset
648  *
649  * will reset EEPROM table to 0 entries.
650  *
651  */
652 static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f,
653 					       const char __user *buf,
654 					       size_t size, loff_t *pos)
655 {
656 	struct amdgpu_device *adev =
657 		(struct amdgpu_device *)file_inode(f)->i_private;
658 	int ret;
659 
660 	if (amdgpu_uniras_enabled(adev)) {
661 		ret = amdgpu_uniras_clear_badpages_info(adev);
662 		return ret ? ret : size;
663 	}
664 
665 	ret = amdgpu_ras_eeprom_reset_table(
666 		&(amdgpu_ras_get_context(adev)->eeprom_control));
667 
668 	if (!ret) {
669 		/* Something was written to EEPROM.
670 		 */
671 		amdgpu_ras_get_context(adev)->flags = RAS_DEFAULT_FLAGS;
672 		return size;
673 	} else {
674 		return ret;
675 	}
676 }
677 
678 static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
679 	.owner = THIS_MODULE,
680 	.read = NULL,
681 	.write = amdgpu_ras_debugfs_ctrl_write,
682 	.llseek = default_llseek
683 };
684 
685 static const struct file_operations amdgpu_ras_debugfs_eeprom_ops = {
686 	.owner = THIS_MODULE,
687 	.read = NULL,
688 	.write = amdgpu_ras_debugfs_eeprom_write,
689 	.llseek = default_llseek
690 };
691 
692 /**
693  * DOC: AMDGPU RAS sysfs Error Count Interface
694  *
695  * It allows the user to read the error count for each IP block on the gpu through
696  * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
697  *
698  * It outputs the multiple lines which report the uncorrected (ue) and corrected
699  * (ce) error counts.
700  *
701  * The format of one line is below,
702  *
703  * [ce|ue]: count
704  *
705  * Example:
706  *
707  * .. code-block:: bash
708  *
709  *	ue: 0
710  *	ce: 1
711  *
712  */
713 static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
714 		struct device_attribute *attr, char *buf)
715 {
716 	struct ras_manager *obj = container_of(attr, struct ras_manager, sysfs_attr);
717 	struct ras_query_if info = {
718 		.head = obj->head,
719 	};
720 
721 	if (!amdgpu_ras_get_error_query_ready(obj->adev))
722 		return sysfs_emit(buf, "Query currently inaccessible\n");
723 
724 	if (amdgpu_ras_query_error_status(obj->adev, &info))
725 		return -EINVAL;
726 
727 	if (amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
728 	    amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {
729 		if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
730 			dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
731 	}
732 
733 	if (info.head.block == AMDGPU_RAS_BLOCK__UMC)
734 		return sysfs_emit(buf, "%s: %lu\n%s: %lu\n%s: %lu\n", "ue", info.ue_count,
735 				"ce", info.ce_count, "de", info.de_count);
736 	else
737 		return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count,
738 				"ce", info.ce_count);
739 }
740 
741 /* obj begin */
742 
743 #define get_obj(obj) do { (obj)->use++; } while (0)
744 #define alive_obj(obj) ((obj)->use)
745 
746 static inline void put_obj(struct ras_manager *obj)
747 {
748 	if (obj && (--obj->use == 0)) {
749 		list_del(&obj->node);
750 		amdgpu_ras_error_data_fini(&obj->err_data);
751 	}
752 
753 	if (obj && (obj->use < 0))
754 		DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", get_ras_block_str(&obj->head));
755 }
756 
757 /* make one obj and return it. */
758 static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev,
759 		struct ras_common_if *head)
760 {
761 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
762 	struct ras_manager *obj;
763 
764 	if (!adev->ras_enabled || !con)
765 		return NULL;
766 
767 	if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
768 		return NULL;
769 
770 	if (head->block == AMDGPU_RAS_BLOCK__MCA) {
771 		if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
772 			return NULL;
773 
774 		obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
775 	} else
776 		obj = &con->objs[head->block];
777 
778 	/* already exist. return obj? */
779 	if (alive_obj(obj))
780 		return NULL;
781 
782 	if (amdgpu_ras_error_data_init(&obj->err_data))
783 		return NULL;
784 
785 	obj->head = *head;
786 	obj->adev = adev;
787 	list_add(&obj->node, &con->head);
788 	get_obj(obj);
789 
790 	return obj;
791 }
792 
793 /* return an obj equal to head, or the first when head is NULL */
794 struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
795 		struct ras_common_if *head)
796 {
797 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
798 	struct ras_manager *obj;
799 	int i;
800 
801 	if (!adev->ras_enabled || !con)
802 		return NULL;
803 
804 	if (head) {
805 		if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
806 			return NULL;
807 
808 		if (head->block == AMDGPU_RAS_BLOCK__MCA) {
809 			if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
810 				return NULL;
811 
812 			obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
813 		} else
814 			obj = &con->objs[head->block];
815 
816 		if (alive_obj(obj))
817 			return obj;
818 	} else {
819 		for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT + AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
820 			obj = &con->objs[i];
821 			if (alive_obj(obj))
822 				return obj;
823 		}
824 	}
825 
826 	return NULL;
827 }
828 /* obj end */
829 
830 /* feature ctl begin */
831 static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev,
832 					 struct ras_common_if *head)
833 {
834 	return adev->ras_hw_enabled & BIT(head->block);
835 }
836 
837 static int amdgpu_ras_is_feature_enabled(struct amdgpu_device *adev,
838 		struct ras_common_if *head)
839 {
840 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
841 
842 	return con->features & BIT(head->block);
843 }
844 
845 /*
846  * if obj is not created, then create one.
847  * set feature enable flag.
848  */
849 static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev,
850 		struct ras_common_if *head, int enable)
851 {
852 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
853 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
854 
855 	/* If hardware does not support ras, then do not create obj.
856 	 * But if hardware support ras, we can create the obj.
857 	 * Ras framework checks con->hw_supported to see if it need do
858 	 * corresponding initialization.
859 	 * IP checks con->support to see if it need disable ras.
860 	 */
861 	if (!amdgpu_ras_is_feature_allowed(adev, head))
862 		return 0;
863 
864 	if (enable) {
865 		if (!obj) {
866 			obj = amdgpu_ras_create_obj(adev, head);
867 			if (!obj)
868 				return -EINVAL;
869 		} else {
870 			/* In case we create obj somewhere else */
871 			get_obj(obj);
872 		}
873 		con->features |= BIT(head->block);
874 	} else {
875 		if (obj && amdgpu_ras_is_feature_enabled(adev, head)) {
876 			con->features &= ~BIT(head->block);
877 			put_obj(obj);
878 		}
879 	}
880 
881 	return 0;
882 }
883 
884 /* wrapper of psp_ras_enable_features */
885 int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
886 		struct ras_common_if *head, bool enable)
887 {
888 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
889 	union ta_ras_cmd_input *info;
890 	int ret;
891 
892 	if (!con)
893 		return -EINVAL;
894 
895 	/* For non-gfx ip, do not enable ras feature if it is not allowed */
896 	/* For gfx ip, regardless of feature support status, */
897 	/* Force issue enable or disable ras feature commands */
898 	if (head->block != AMDGPU_RAS_BLOCK__GFX &&
899 	    !amdgpu_ras_is_feature_allowed(adev, head))
900 		return 0;
901 
902 	/* Only enable gfx ras feature from host side */
903 	if (head->block == AMDGPU_RAS_BLOCK__GFX &&
904 	    !amdgpu_sriov_vf(adev) &&
905 	    !amdgpu_ras_intr_triggered()) {
906 		info = kzalloc_obj(union ta_ras_cmd_input);
907 		if (!info)
908 			return -ENOMEM;
909 
910 		if (!enable) {
911 			info->disable_features = (struct ta_ras_disable_features_input) {
912 				.block_id =  amdgpu_ras_block_to_ta(head->block),
913 				.error_type = amdgpu_ras_error_to_ta(head->type),
914 			};
915 		} else {
916 			info->enable_features = (struct ta_ras_enable_features_input) {
917 				.block_id =  amdgpu_ras_block_to_ta(head->block),
918 				.error_type = amdgpu_ras_error_to_ta(head->type),
919 			};
920 		}
921 
922 		ret = psp_ras_enable_features(&adev->psp, info, enable);
923 		if (ret) {
924 			dev_err(adev->dev, "ras %s %s failed poison:%d ret:%d\n",
925 				enable ? "enable":"disable",
926 				get_ras_block_str(head),
927 				amdgpu_ras_is_poison_mode_supported(adev), ret);
928 			kfree(info);
929 			return ret;
930 		}
931 
932 		kfree(info);
933 	}
934 
935 	/* setup the obj */
936 	__amdgpu_ras_feature_enable(adev, head, enable);
937 
938 	return 0;
939 }
940 
941 /* Only used in device probe stage and called only once. */
942 int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
943 		struct ras_common_if *head, bool enable)
944 {
945 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
946 	int ret;
947 
948 	if (!con)
949 		return -EINVAL;
950 
951 	if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
952 		if (enable) {
953 			/* There is no harm to issue a ras TA cmd regardless of
954 			 * the currecnt ras state.
955 			 * If current state == target state, it will do nothing
956 			 * But sometimes it requests driver to reset and repost
957 			 * with error code -EAGAIN.
958 			 */
959 			ret = amdgpu_ras_feature_enable(adev, head, 1);
960 			/* With old ras TA, we might fail to enable ras.
961 			 * Log it and just setup the object.
962 			 * TODO need remove this WA in the future.
963 			 */
964 			if (ret == -EINVAL) {
965 				ret = __amdgpu_ras_feature_enable(adev, head, 1);
966 				if (!ret)
967 					dev_info(adev->dev,
968 						"RAS INFO: %s setup object\n",
969 						get_ras_block_str(head));
970 			}
971 		} else {
972 			/* setup the object then issue a ras TA disable cmd.*/
973 			ret = __amdgpu_ras_feature_enable(adev, head, 1);
974 			if (ret)
975 				return ret;
976 
977 			/* gfx block ras disable cmd must send to ras-ta */
978 			if (head->block == AMDGPU_RAS_BLOCK__GFX)
979 				con->features |= BIT(head->block);
980 
981 			ret = amdgpu_ras_feature_enable(adev, head, 0);
982 
983 			/* clean gfx block ras features flag */
984 			if (adev->ras_enabled && head->block == AMDGPU_RAS_BLOCK__GFX)
985 				con->features &= ~BIT(head->block);
986 		}
987 	} else
988 		ret = amdgpu_ras_feature_enable(adev, head, enable);
989 
990 	return ret;
991 }
992 
993 static int amdgpu_ras_disable_all_features(struct amdgpu_device *adev,
994 		bool bypass)
995 {
996 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
997 	struct ras_manager *obj, *tmp;
998 
999 	list_for_each_entry_safe(obj, tmp, &con->head, node) {
1000 		/* bypass psp.
1001 		 * aka just release the obj and corresponding flags
1002 		 */
1003 		if (bypass) {
1004 			if (__amdgpu_ras_feature_enable(adev, &obj->head, 0))
1005 				break;
1006 		} else {
1007 			if (amdgpu_ras_feature_enable(adev, &obj->head, 0))
1008 				break;
1009 		}
1010 	}
1011 
1012 	return con->features;
1013 }
1014 
1015 static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
1016 		bool bypass)
1017 {
1018 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1019 	int i;
1020 	const enum amdgpu_ras_error_type default_ras_type = AMDGPU_RAS_ERROR__NONE;
1021 
1022 	for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) {
1023 		struct ras_common_if head = {
1024 			.block = i,
1025 			.type = default_ras_type,
1026 			.sub_block_index = 0,
1027 		};
1028 
1029 		if (i == AMDGPU_RAS_BLOCK__MCA)
1030 			continue;
1031 
1032 		if (bypass) {
1033 			/*
1034 			 * bypass psp. vbios enable ras for us.
1035 			 * so just create the obj
1036 			 */
1037 			if (__amdgpu_ras_feature_enable(adev, &head, 1))
1038 				break;
1039 		} else {
1040 			if (amdgpu_ras_feature_enable(adev, &head, 1))
1041 				break;
1042 		}
1043 	}
1044 
1045 	for (i = 0; i < AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
1046 		struct ras_common_if head = {
1047 			.block = AMDGPU_RAS_BLOCK__MCA,
1048 			.type = default_ras_type,
1049 			.sub_block_index = i,
1050 		};
1051 
1052 		if (bypass) {
1053 			/*
1054 			 * bypass psp. vbios enable ras for us.
1055 			 * so just create the obj
1056 			 */
1057 			if (__amdgpu_ras_feature_enable(adev, &head, 1))
1058 				break;
1059 		} else {
1060 			if (amdgpu_ras_feature_enable(adev, &head, 1))
1061 				break;
1062 		}
1063 	}
1064 
1065 	return con->features;
1066 }
1067 /* feature ctl end */
1068 
1069 static int amdgpu_ras_block_match_default(struct amdgpu_ras_block_object *block_obj,
1070 		enum amdgpu_ras_block block)
1071 {
1072 	if (!block_obj)
1073 		return -EINVAL;
1074 
1075 	if (block_obj->ras_comm.block == block)
1076 		return 0;
1077 
1078 	return -EINVAL;
1079 }
1080 
1081 static struct amdgpu_ras_block_object *amdgpu_ras_get_ras_block(struct amdgpu_device *adev,
1082 					enum amdgpu_ras_block block, uint32_t sub_block_index)
1083 {
1084 	struct amdgpu_ras_block_list *node, *tmp;
1085 	struct amdgpu_ras_block_object *obj;
1086 
1087 	if (block >= AMDGPU_RAS_BLOCK__LAST)
1088 		return NULL;
1089 
1090 	list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
1091 		if (!node->ras_obj) {
1092 			dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
1093 			continue;
1094 		}
1095 
1096 		obj = node->ras_obj;
1097 		if (obj->ras_block_match) {
1098 			if (obj->ras_block_match(obj, block, sub_block_index) == 0)
1099 				return obj;
1100 		} else {
1101 			if (amdgpu_ras_block_match_default(obj, block) == 0)
1102 				return obj;
1103 		}
1104 	}
1105 
1106 	return NULL;
1107 }
1108 
1109 static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_data *err_data)
1110 {
1111 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
1112 	int ret = 0;
1113 
1114 	/*
1115 	 * choosing right query method according to
1116 	 * whether smu support query error information
1117 	 */
1118 	ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(ras->umc_ecc));
1119 	if (ret == -EOPNOTSUPP) {
1120 		if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
1121 			adev->umc.ras->ras_block.hw_ops->query_ras_error_count)
1122 			adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data);
1123 
1124 		/* umc query_ras_error_address is also responsible for clearing
1125 		 * error status
1126 		 */
1127 		if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
1128 		    adev->umc.ras->ras_block.hw_ops->query_ras_error_address)
1129 			adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, err_data);
1130 	} else if (!ret) {
1131 		if (adev->umc.ras &&
1132 			adev->umc.ras->ecc_info_query_ras_error_count)
1133 			adev->umc.ras->ecc_info_query_ras_error_count(adev, err_data);
1134 
1135 		if (adev->umc.ras &&
1136 			adev->umc.ras->ecc_info_query_ras_error_address)
1137 			adev->umc.ras->ecc_info_query_ras_error_address(adev, err_data);
1138 	}
1139 }
1140 
1141 static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev,
1142 					      struct ras_manager *ras_mgr,
1143 					      struct ras_err_data *err_data,
1144 					      struct ras_query_context *qctx,
1145 					      const char *blk_name,
1146 					      bool is_ue,
1147 					      bool is_de)
1148 {
1149 	struct amdgpu_smuio_mcm_config_info *mcm_info;
1150 	struct ras_err_node *err_node;
1151 	struct ras_err_info *err_info;
1152 	u64 event_id = qctx->evid.event_id;
1153 
1154 	if (is_ue) {
1155 		for_each_ras_error(err_node, err_data) {
1156 			err_info = &err_node->err_info;
1157 			mcm_info = &err_info->mcm_info;
1158 			if (err_info->ue_count) {
1159 				RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1160 					      "%lld new uncorrectable hardware errors detected in %s block\n",
1161 					      mcm_info->socket_id,
1162 					      mcm_info->die_id,
1163 					      err_info->ue_count,
1164 					      blk_name);
1165 			}
1166 		}
1167 
1168 		for_each_ras_error(err_node, &ras_mgr->err_data) {
1169 			err_info = &err_node->err_info;
1170 			mcm_info = &err_info->mcm_info;
1171 			RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1172 				      "%lld uncorrectable hardware errors detected in total in %s block\n",
1173 				      mcm_info->socket_id, mcm_info->die_id, err_info->ue_count, blk_name);
1174 		}
1175 
1176 	} else {
1177 		if (is_de) {
1178 			for_each_ras_error(err_node, err_data) {
1179 				err_info = &err_node->err_info;
1180 				mcm_info = &err_info->mcm_info;
1181 				if (err_info->de_count) {
1182 					RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1183 						      "%lld new deferred hardware errors detected in %s block\n",
1184 						      mcm_info->socket_id,
1185 						      mcm_info->die_id,
1186 						      err_info->de_count,
1187 						      blk_name);
1188 				}
1189 			}
1190 
1191 			for_each_ras_error(err_node, &ras_mgr->err_data) {
1192 				err_info = &err_node->err_info;
1193 				mcm_info = &err_info->mcm_info;
1194 				RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1195 					      "%lld deferred hardware errors detected in total in %s block\n",
1196 					      mcm_info->socket_id, mcm_info->die_id,
1197 					      err_info->de_count, blk_name);
1198 			}
1199 		} else {
1200 			if (adev->debug_disable_ce_logs)
1201 				return;
1202 
1203 			for_each_ras_error(err_node, err_data) {
1204 				err_info = &err_node->err_info;
1205 				mcm_info = &err_info->mcm_info;
1206 				if (err_info->ce_count) {
1207 					RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1208 						      "%lld new correctable hardware errors detected in %s block\n",
1209 						      mcm_info->socket_id,
1210 						      mcm_info->die_id,
1211 						      err_info->ce_count,
1212 						      blk_name);
1213 				}
1214 			}
1215 
1216 			for_each_ras_error(err_node, &ras_mgr->err_data) {
1217 				err_info = &err_node->err_info;
1218 				mcm_info = &err_info->mcm_info;
1219 				RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1220 					      "%lld correctable hardware errors detected in total in %s block\n",
1221 					      mcm_info->socket_id, mcm_info->die_id,
1222 					      err_info->ce_count, blk_name);
1223 			}
1224 		}
1225 	}
1226 }
1227 
1228 static inline bool err_data_has_source_info(struct ras_err_data *data)
1229 {
1230 	return !list_empty(&data->err_node_list);
1231 }
1232 
1233 static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev,
1234 					     struct ras_query_if *query_if,
1235 					     struct ras_err_data *err_data,
1236 					     struct ras_query_context *qctx)
1237 {
1238 	struct ras_manager *ras_mgr = amdgpu_ras_find_obj(adev, &query_if->head);
1239 	const char *blk_name = get_ras_block_str(&query_if->head);
1240 	u64 event_id = qctx->evid.event_id;
1241 
1242 	if (err_data->ce_count) {
1243 		if (err_data_has_source_info(err_data)) {
1244 			amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx,
1245 							  blk_name, false, false);
1246 		} else if (!adev->aid_mask &&
1247 			   adev->smuio.funcs &&
1248 			   adev->smuio.funcs->get_socket_id &&
1249 			   adev->smuio.funcs->get_die_id) {
1250 			RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d "
1251 				      "%ld correctable hardware errors "
1252 				      "detected in %s block\n",
1253 				      adev->smuio.funcs->get_socket_id(adev),
1254 				      adev->smuio.funcs->get_die_id(adev),
1255 				      ras_mgr->err_data.ce_count,
1256 				      blk_name);
1257 		} else {
1258 			RAS_EVENT_LOG(adev, event_id, "%ld correctable hardware errors "
1259 				      "detected in %s block\n",
1260 				      ras_mgr->err_data.ce_count,
1261 				      blk_name);
1262 		}
1263 	}
1264 
1265 	if (err_data->ue_count) {
1266 		if (err_data_has_source_info(err_data)) {
1267 			amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx,
1268 							  blk_name, true, false);
1269 		} else if (!adev->aid_mask &&
1270 			   adev->smuio.funcs &&
1271 			   adev->smuio.funcs->get_socket_id &&
1272 			   adev->smuio.funcs->get_die_id) {
1273 			RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d "
1274 				      "%ld uncorrectable hardware errors "
1275 				      "detected in %s block\n",
1276 				      adev->smuio.funcs->get_socket_id(adev),
1277 				      adev->smuio.funcs->get_die_id(adev),
1278 				      ras_mgr->err_data.ue_count,
1279 				      blk_name);
1280 		} else {
1281 			RAS_EVENT_LOG(adev, event_id, "%ld uncorrectable hardware errors "
1282 				      "detected in %s block\n",
1283 				      ras_mgr->err_data.ue_count,
1284 				      blk_name);
1285 		}
1286 	}
1287 
1288 	if (err_data->de_count) {
1289 		if (err_data_has_source_info(err_data)) {
1290 			amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx,
1291 							  blk_name, false, true);
1292 		} else if (!adev->aid_mask &&
1293 			   adev->smuio.funcs &&
1294 			   adev->smuio.funcs->get_socket_id &&
1295 			   adev->smuio.funcs->get_die_id) {
1296 			RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d "
1297 				      "%ld deferred hardware errors "
1298 				      "detected in %s block\n",
1299 				      adev->smuio.funcs->get_socket_id(adev),
1300 				      adev->smuio.funcs->get_die_id(adev),
1301 				      ras_mgr->err_data.de_count,
1302 				      blk_name);
1303 		} else {
1304 			RAS_EVENT_LOG(adev, event_id, "%ld deferred hardware errors "
1305 				      "detected in %s block\n",
1306 				      ras_mgr->err_data.de_count,
1307 				      blk_name);
1308 		}
1309 	}
1310 }
1311 
1312 static void amdgpu_ras_virt_error_generate_report(struct amdgpu_device *adev,
1313 						  struct ras_query_if *query_if,
1314 						  struct ras_err_data *err_data,
1315 						  struct ras_query_context *qctx)
1316 {
1317 	unsigned long new_ue, new_ce, new_de;
1318 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, &query_if->head);
1319 	const char *blk_name = get_ras_block_str(&query_if->head);
1320 	u64 event_id = qctx->evid.event_id;
1321 
1322 	new_ce = err_data->ce_count - obj->err_data.ce_count;
1323 	new_ue = err_data->ue_count - obj->err_data.ue_count;
1324 	new_de = err_data->de_count - obj->err_data.de_count;
1325 
1326 	if (new_ce) {
1327 		RAS_EVENT_LOG(adev, event_id, "%lu correctable hardware errors "
1328 			      "detected in %s block\n",
1329 			      new_ce,
1330 			      blk_name);
1331 	}
1332 
1333 	if (new_ue) {
1334 		RAS_EVENT_LOG(adev, event_id, "%lu uncorrectable hardware errors "
1335 			      "detected in %s block\n",
1336 			      new_ue,
1337 			      blk_name);
1338 	}
1339 
1340 	if (new_de) {
1341 		RAS_EVENT_LOG(adev, event_id, "%lu deferred hardware errors "
1342 			      "detected in %s block\n",
1343 			      new_de,
1344 			      blk_name);
1345 	}
1346 }
1347 
1348 static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, struct ras_err_data *err_data)
1349 {
1350 	struct ras_err_node *err_node;
1351 	struct ras_err_info *err_info;
1352 
1353 	if (err_data_has_source_info(err_data)) {
1354 		for_each_ras_error(err_node, err_data) {
1355 			err_info = &err_node->err_info;
1356 			amdgpu_ras_error_statistic_de_count(&obj->err_data,
1357 					&err_info->mcm_info, err_info->de_count);
1358 			amdgpu_ras_error_statistic_ce_count(&obj->err_data,
1359 					&err_info->mcm_info, err_info->ce_count);
1360 			amdgpu_ras_error_statistic_ue_count(&obj->err_data,
1361 					&err_info->mcm_info, err_info->ue_count);
1362 		}
1363 	} else {
1364 		/* for legacy asic path which doesn't has error source info */
1365 		obj->err_data.ue_count += err_data->ue_count;
1366 		obj->err_data.ce_count += err_data->ce_count;
1367 		obj->err_data.de_count += err_data->de_count;
1368 	}
1369 }
1370 
1371 static void amdgpu_ras_mgr_virt_error_data_statistics_update(struct ras_manager *obj,
1372 							     struct ras_err_data *err_data)
1373 {
1374 	/* Host reports absolute counts */
1375 	obj->err_data.ue_count = err_data->ue_count;
1376 	obj->err_data.ce_count = err_data->ce_count;
1377 	obj->err_data.de_count = err_data->de_count;
1378 }
1379 
1380 static struct ras_manager *get_ras_manager(struct amdgpu_device *adev, enum amdgpu_ras_block blk)
1381 {
1382 	struct ras_common_if head;
1383 
1384 	memset(&head, 0, sizeof(head));
1385 	head.block = blk;
1386 
1387 	return amdgpu_ras_find_obj(adev, &head);
1388 }
1389 
1390 int amdgpu_ras_bind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
1391 			const struct aca_info *aca_info, void *data)
1392 {
1393 	struct ras_manager *obj;
1394 
1395 	/* in resume phase, no need to create aca fs node */
1396 	if (adev->in_suspend || amdgpu_reset_in_recovery(adev))
1397 		return 0;
1398 
1399 	obj = get_ras_manager(adev, blk);
1400 	if (!obj)
1401 		return -EINVAL;
1402 
1403 	return amdgpu_aca_add_handle(adev, &obj->aca_handle, ras_block_str(blk), aca_info, data);
1404 }
1405 
1406 int amdgpu_ras_unbind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk)
1407 {
1408 	struct ras_manager *obj;
1409 
1410 	obj = get_ras_manager(adev, blk);
1411 	if (!obj)
1412 		return -EINVAL;
1413 
1414 	amdgpu_aca_remove_handle(&obj->aca_handle);
1415 
1416 	return 0;
1417 }
1418 
1419 static int amdgpu_aca_log_ras_error_data(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
1420 					 enum aca_error_type type, struct ras_err_data *err_data,
1421 					 struct ras_query_context *qctx)
1422 {
1423 	struct ras_manager *obj;
1424 
1425 	obj = get_ras_manager(adev, blk);
1426 	if (!obj)
1427 		return -EINVAL;
1428 
1429 	return amdgpu_aca_get_error_data(adev, &obj->aca_handle, type, err_data, qctx);
1430 }
1431 
1432 ssize_t amdgpu_ras_aca_sysfs_read(struct device *dev, struct device_attribute *attr,
1433 				  struct aca_handle *handle, char *buf, void *data)
1434 {
1435 	struct ras_manager *obj = container_of(handle, struct ras_manager, aca_handle);
1436 	struct ras_query_if info = {
1437 		.head = obj->head,
1438 	};
1439 
1440 	if (!amdgpu_ras_get_error_query_ready(obj->adev))
1441 		return sysfs_emit(buf, "Query currently inaccessible\n");
1442 
1443 	if (amdgpu_ras_query_error_status(obj->adev, &info))
1444 		return -EINVAL;
1445 
1446 	return sysfs_emit(buf, "%s: %lu\n%s: %lu\n%s: %lu\n", "ue", info.ue_count,
1447 			  "ce", info.ce_count, "de", info.de_count);
1448 }
1449 
1450 static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev,
1451 						struct ras_query_if *info,
1452 						struct ras_err_data *err_data,
1453 						struct ras_query_context *qctx,
1454 						unsigned int error_query_mode)
1455 {
1456 	enum amdgpu_ras_block blk = info ? info->head.block : AMDGPU_RAS_BLOCK_COUNT;
1457 	struct amdgpu_ras_block_object *block_obj = NULL;
1458 	int ret;
1459 
1460 	if (blk == AMDGPU_RAS_BLOCK_COUNT)
1461 		return -EINVAL;
1462 
1463 	if (error_query_mode == AMDGPU_RAS_INVALID_ERROR_QUERY)
1464 		return -EINVAL;
1465 
1466 	if (error_query_mode == AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY) {
1467 		return amdgpu_virt_req_ras_err_count(adev, blk, err_data);
1468 	} else if (error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) {
1469 		if (info->head.block == AMDGPU_RAS_BLOCK__UMC) {
1470 			amdgpu_ras_get_ecc_info(adev, err_data);
1471 		} else {
1472 			block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, 0);
1473 			if (!block_obj || !block_obj->hw_ops) {
1474 				dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1475 					     get_ras_block_str(&info->head));
1476 				return -EINVAL;
1477 			}
1478 
1479 			if (block_obj->hw_ops->query_ras_error_count)
1480 				block_obj->hw_ops->query_ras_error_count(adev, err_data);
1481 
1482 			if ((info->head.block == AMDGPU_RAS_BLOCK__SDMA) ||
1483 			    (info->head.block == AMDGPU_RAS_BLOCK__GFX) ||
1484 			    (info->head.block == AMDGPU_RAS_BLOCK__MMHUB)) {
1485 				if (block_obj->hw_ops->query_ras_error_status)
1486 					block_obj->hw_ops->query_ras_error_status(adev);
1487 			}
1488 		}
1489 	} else {
1490 		if (amdgpu_aca_is_enabled(adev)) {
1491 			ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_UE, err_data, qctx);
1492 			if (ret)
1493 				return ret;
1494 
1495 			ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_CE, err_data, qctx);
1496 			if (ret)
1497 				return ret;
1498 
1499 			ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_DEFERRED, err_data, qctx);
1500 			if (ret)
1501 				return ret;
1502 		} else {
1503 			/* FIXME: add code to check return value later */
1504 			amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_UE, err_data, qctx);
1505 			amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_CE, err_data, qctx);
1506 		}
1507 	}
1508 
1509 	return 0;
1510 }
1511 
1512 /* query/inject/cure begin */
1513 static int amdgpu_ras_query_error_status_with_event(struct amdgpu_device *adev,
1514 						    struct ras_query_if *info,
1515 						    enum ras_event_type type)
1516 {
1517 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1518 	struct ras_err_data err_data;
1519 	struct ras_query_context qctx;
1520 	unsigned int error_query_mode;
1521 	int ret;
1522 
1523 	if (!obj)
1524 		return -EINVAL;
1525 
1526 	ret = amdgpu_ras_error_data_init(&err_data);
1527 	if (ret)
1528 		return ret;
1529 
1530 	if (!amdgpu_ras_get_error_query_mode(adev, &error_query_mode))
1531 		return -EINVAL;
1532 
1533 	memset(&qctx, 0, sizeof(qctx));
1534 	qctx.evid.type = type;
1535 	qctx.evid.event_id = amdgpu_ras_acquire_event_id(adev, type);
1536 
1537 	if (!down_read_trylock(&adev->reset_domain->sem)) {
1538 		ret = -EIO;
1539 		goto out_fini_err_data;
1540 	}
1541 
1542 	ret = amdgpu_ras_query_error_status_helper(adev, info,
1543 						   &err_data,
1544 						   &qctx,
1545 						   error_query_mode);
1546 	up_read(&adev->reset_domain->sem);
1547 	if (ret)
1548 		goto out_fini_err_data;
1549 
1550 	if (error_query_mode != AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY) {
1551 		amdgpu_rasmgr_error_data_statistic_update(obj, &err_data);
1552 		amdgpu_ras_error_generate_report(adev, info, &err_data, &qctx);
1553 	} else {
1554 		/* Host provides absolute error counts. First generate the report
1555 		 * using the previous VF internal count against new host count.
1556 		 * Then Update VF internal count.
1557 		 */
1558 		amdgpu_ras_virt_error_generate_report(adev, info, &err_data, &qctx);
1559 		amdgpu_ras_mgr_virt_error_data_statistics_update(obj, &err_data);
1560 	}
1561 
1562 	info->ue_count = obj->err_data.ue_count;
1563 	info->ce_count = obj->err_data.ce_count;
1564 	info->de_count = obj->err_data.de_count;
1565 
1566 out_fini_err_data:
1567 	amdgpu_ras_error_data_fini(&err_data);
1568 
1569 	return ret;
1570 }
1571 
1572 static int amdgpu_uniras_clear_badpages_info(struct amdgpu_device *adev)
1573 {
1574 	struct ras_cmd_dev_handle req = {0};
1575 	int ret;
1576 
1577 	ret = amdgpu_ras_mgr_handle_ras_cmd(adev, RAS_CMD__CLEAR_BAD_PAGE_INFO,
1578 				&req, sizeof(req), NULL, 0);
1579 	if (ret) {
1580 		dev_err(adev->dev, "Failed to clear bad pages info, ret: %d\n", ret);
1581 		return ret;
1582 	}
1583 
1584 	return 0;
1585 }
1586 
1587 static int amdgpu_uniras_query_block_ecc(struct amdgpu_device *adev,
1588 			struct ras_query_if *info)
1589 {
1590 	struct ras_cmd_block_ecc_info_req req = {0};
1591 	struct ras_cmd_block_ecc_info_rsp rsp = {0};
1592 	int ret;
1593 
1594 	if (!info)
1595 		return -EINVAL;
1596 
1597 	req.block_id = info->head.block;
1598 	req.subblock_id = info->head.sub_block_index;
1599 
1600 	ret = amdgpu_ras_mgr_handle_ras_cmd(adev, RAS_CMD__GET_BLOCK_ECC_STATUS,
1601 				&req, sizeof(req), &rsp, sizeof(rsp));
1602 	if (!ret) {
1603 		info->ce_count = rsp.ce_count;
1604 		info->ue_count = rsp.ue_count;
1605 		info->de_count = rsp.de_count;
1606 	}
1607 
1608 	return ret;
1609 }
1610 
1611 int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_if *info)
1612 {
1613 	if (amdgpu_uniras_enabled(adev))
1614 		return amdgpu_uniras_query_block_ecc(adev, info);
1615 	else
1616 		return amdgpu_ras_query_error_status_with_event(adev, info, RAS_EVENT_TYPE_INVALID);
1617 }
1618 
1619 int amdgpu_ras_reset_error_count(struct amdgpu_device *adev,
1620 		enum amdgpu_ras_block block)
1621 {
1622 	struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
1623 	const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
1624 	const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
1625 
1626 	if (!block_obj || !block_obj->hw_ops) {
1627 		dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1628 				ras_block_str(block));
1629 		return -EOPNOTSUPP;
1630 	}
1631 
1632 	if (!amdgpu_ras_is_supported(adev, block) ||
1633 	    !amdgpu_ras_get_aca_debug_mode(adev))
1634 		return -EOPNOTSUPP;
1635 
1636 	if (amdgpu_sriov_vf(adev))
1637 		return -EOPNOTSUPP;
1638 
1639 	/* skip ras error reset in gpu reset */
1640 	if ((amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev)) &&
1641 	    ((smu_funcs && smu_funcs->set_debug_mode) ||
1642 	     (mca_funcs && mca_funcs->mca_set_debug_mode)))
1643 		return -EOPNOTSUPP;
1644 
1645 	if (block_obj->hw_ops->reset_ras_error_count)
1646 		block_obj->hw_ops->reset_ras_error_count(adev);
1647 
1648 	return 0;
1649 }
1650 
1651 int amdgpu_ras_reset_error_status(struct amdgpu_device *adev,
1652 		enum amdgpu_ras_block block)
1653 {
1654 	struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
1655 
1656 	if (amdgpu_ras_reset_error_count(adev, block) == -EOPNOTSUPP)
1657 		return 0;
1658 
1659 	if ((block == AMDGPU_RAS_BLOCK__GFX) ||
1660 	    (block == AMDGPU_RAS_BLOCK__MMHUB)) {
1661 		if (block_obj->hw_ops->reset_ras_error_status)
1662 			block_obj->hw_ops->reset_ras_error_status(adev);
1663 	}
1664 
1665 	return 0;
1666 }
1667 
1668 static int amdgpu_uniras_error_inject(struct amdgpu_device *adev,
1669 		struct ras_inject_if *info)
1670 {
1671 	struct ras_cmd_inject_error_req inject_req;
1672 	struct ras_cmd_inject_error_rsp rsp;
1673 
1674 	if (!info)
1675 		return -EINVAL;
1676 
1677 	memset(&inject_req, 0, sizeof(inject_req));
1678 	inject_req.block_id = info->head.block;
1679 	inject_req.subblock_id = info->head.sub_block_index;
1680 	inject_req.address = info->address;
1681 	inject_req.error_type = info->head.type;
1682 	inject_req.instance_mask = info->instance_mask;
1683 	inject_req.method = info->value;
1684 
1685 	return amdgpu_ras_mgr_handle_ras_cmd(adev, RAS_CMD__INJECT_ERROR,
1686 			&inject_req, sizeof(inject_req), &rsp, sizeof(rsp));
1687 }
1688 
1689 /* wrapper of psp_ras_trigger_error */
1690 int amdgpu_ras_error_inject(struct amdgpu_device *adev,
1691 		struct ras_inject_if *info)
1692 {
1693 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1694 	struct ta_ras_trigger_error_input block_info = {
1695 		.block_id =  amdgpu_ras_block_to_ta(info->head.block),
1696 		.inject_error_type = amdgpu_ras_error_to_ta(info->head.type),
1697 		.sub_block_index = info->head.sub_block_index,
1698 		.address = info->address,
1699 		.value = info->value,
1700 	};
1701 	int ret = -EINVAL;
1702 	struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev,
1703 							info->head.block,
1704 							info->head.sub_block_index);
1705 
1706 	if (amdgpu_uniras_enabled(adev))
1707 		return amdgpu_uniras_error_inject(adev, info);
1708 
1709 	/* inject on guest isn't allowed, return success directly */
1710 	if (amdgpu_sriov_vf(adev))
1711 		return 0;
1712 
1713 	if (!obj)
1714 		return -EINVAL;
1715 
1716 	if (!block_obj || !block_obj->hw_ops)	{
1717 		dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1718 			     get_ras_block_str(&info->head));
1719 		return -EINVAL;
1720 	}
1721 
1722 	/* Calculate XGMI relative offset */
1723 	if (adev->gmc.xgmi.num_physical_nodes > 1 &&
1724 	    info->head.block != AMDGPU_RAS_BLOCK__GFX) {
1725 		block_info.address =
1726 			amdgpu_xgmi_get_relative_phy_addr(adev,
1727 							  block_info.address);
1728 	}
1729 
1730 	if (block_obj->hw_ops->ras_error_inject) {
1731 		if (info->head.block == AMDGPU_RAS_BLOCK__GFX)
1732 			ret = block_obj->hw_ops->ras_error_inject(adev, info, info->instance_mask);
1733 		else /* Special ras_error_inject is defined (e.g: xgmi) */
1734 			ret = block_obj->hw_ops->ras_error_inject(adev, &block_info,
1735 						info->instance_mask);
1736 	} else {
1737 		/* default path */
1738 		ret = psp_ras_trigger_error(&adev->psp, &block_info, info->instance_mask);
1739 	}
1740 
1741 	if (ret)
1742 		dev_err(adev->dev, "ras inject %s failed %d\n",
1743 			get_ras_block_str(&info->head), ret);
1744 
1745 	return ret;
1746 }
1747 
1748 /**
1749  * amdgpu_ras_query_error_count_helper -- Get error counter for specific IP
1750  * @adev: pointer to AMD GPU device
1751  * @ce_count: pointer to an integer to be set to the count of correctible errors.
1752  * @ue_count: pointer to an integer to be set to the count of uncorrectible errors.
1753  * @query_info: pointer to ras_query_if
1754  *
1755  * Return 0 for query success or do nothing, otherwise return an error
1756  * on failures
1757  */
1758 static int amdgpu_ras_query_error_count_helper(struct amdgpu_device *adev,
1759 					       unsigned long *ce_count,
1760 					       unsigned long *ue_count,
1761 					       struct ras_query_if *query_info)
1762 {
1763 	int ret;
1764 
1765 	if (!query_info)
1766 		/* do nothing if query_info is not specified */
1767 		return 0;
1768 
1769 	ret = amdgpu_ras_query_error_status(adev, query_info);
1770 	if (ret)
1771 		return ret;
1772 
1773 	*ce_count += query_info->ce_count;
1774 	*ue_count += query_info->ue_count;
1775 
1776 	/* some hardware/IP supports read to clear
1777 	 * no need to explictly reset the err status after the query call */
1778 	if (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
1779 	    amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {
1780 		if (amdgpu_ras_reset_error_status(adev, query_info->head.block))
1781 			dev_warn(adev->dev,
1782 				 "Failed to reset error counter and error status\n");
1783 	}
1784 
1785 	return 0;
1786 }
1787 
1788 /**
1789  * amdgpu_ras_query_error_count -- Get error counts of all IPs or specific IP
1790  * @adev: pointer to AMD GPU device
1791  * @ce_count: pointer to an integer to be set to the count of correctible errors.
1792  * @ue_count: pointer to an integer to be set to the count of uncorrectible
1793  * errors.
1794  * @query_info: pointer to ras_query_if if the query request is only for
1795  * specific ip block; if info is NULL, then the qurey request is for
1796  * all the ip blocks that support query ras error counters/status
1797  *
1798  * If set, @ce_count or @ue_count, count and return the corresponding
1799  * error counts in those integer pointers. Return 0 if the device
1800  * supports RAS. Return -EOPNOTSUPP if the device doesn't support RAS.
1801  */
1802 int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
1803 				 unsigned long *ce_count,
1804 				 unsigned long *ue_count,
1805 				 struct ras_query_if *query_info)
1806 {
1807 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1808 	struct ras_manager *obj;
1809 	unsigned long ce, ue;
1810 	int ret;
1811 
1812 	if (!adev->ras_enabled || !con)
1813 		return -EOPNOTSUPP;
1814 
1815 	/* Don't count since no reporting.
1816 	 */
1817 	if (!ce_count && !ue_count)
1818 		return 0;
1819 
1820 	ce = 0;
1821 	ue = 0;
1822 	if (!query_info) {
1823 		/* query all the ip blocks that support ras query interface */
1824 		list_for_each_entry(obj, &con->head, node) {
1825 			struct ras_query_if info = {
1826 				.head = obj->head,
1827 			};
1828 
1829 			ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, &info);
1830 		}
1831 	} else {
1832 		/* query specific ip block */
1833 		ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, query_info);
1834 	}
1835 
1836 	if (ret)
1837 		return ret;
1838 
1839 	if (ce_count)
1840 		*ce_count = ce;
1841 
1842 	if (ue_count)
1843 		*ue_count = ue;
1844 
1845 	return 0;
1846 }
1847 /* query/inject/cure end */
1848 
1849 
1850 /* sysfs begin */
1851 
1852 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
1853 		struct ras_badpage *bps, uint32_t count, uint32_t start);
1854 static int amdgpu_uniras_badpages_read(struct amdgpu_device *adev,
1855 		struct ras_badpage *bps, uint32_t count, uint32_t start);
1856 
1857 static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
1858 {
1859 	switch (flags) {
1860 	case AMDGPU_RAS_RETIRE_PAGE_RESERVED:
1861 		return "R";
1862 	case AMDGPU_RAS_RETIRE_PAGE_PENDING:
1863 		return "P";
1864 	case AMDGPU_RAS_RETIRE_PAGE_FAULT:
1865 	default:
1866 		return "F";
1867 	}
1868 }
1869 
1870 /**
1871  * DOC: AMDGPU RAS sysfs gpu_vram_bad_pages Interface
1872  *
1873  * It allows user to read the bad pages of vram on the gpu through
1874  * /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages
1875  *
1876  * It outputs multiple lines, and each line stands for one gpu page.
1877  *
1878  * The format of one line is below,
1879  * gpu pfn : gpu page size : flags
1880  *
1881  * gpu pfn and gpu page size are printed in hex format.
1882  * flags can be one of below character,
1883  *
1884  * R: reserved, this gpu page is reserved and not able to use.
1885  *
1886  * P: pending for reserve, this gpu page is marked as bad, will be reserved
1887  * in next window of page_reserve.
1888  *
1889  * F: unable to reserve. this gpu page can't be reserved due to some reasons.
1890  *
1891  * Examples:
1892  *
1893  * .. code-block:: bash
1894  *
1895  *	0x00000001 : 0x00001000 : R
1896  *	0x00000002 : 0x00001000 : P
1897  *
1898  */
1899 
1900 static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f,
1901 		struct kobject *kobj, const struct bin_attribute *attr,
1902 		char *buf, loff_t ppos, size_t count)
1903 {
1904 	struct amdgpu_ras *con =
1905 		container_of(attr, struct amdgpu_ras, badpages_attr);
1906 	struct amdgpu_device *adev = con->adev;
1907 	const unsigned int element_size =
1908 		sizeof("0xabcdabcd : 0x12345678 : R\n") - 1;
1909 	unsigned int start = div64_ul(ppos + element_size - 1, element_size);
1910 	unsigned int end = div64_ul(ppos + count - 1, element_size);
1911 	ssize_t s = 0;
1912 	struct ras_badpage *bps = NULL;
1913 	int bps_count = 0, i, status;
1914 	uint64_t address;
1915 
1916 	memset(buf, 0, count);
1917 
1918 	bps_count = end - start;
1919 	bps = kmalloc_objs(*bps, bps_count);
1920 	if (!bps)
1921 		return 0;
1922 
1923 	memset(bps, 0, sizeof(*bps) * bps_count);
1924 
1925 	if (amdgpu_uniras_enabled(adev))
1926 		bps_count = amdgpu_uniras_badpages_read(adev, bps, bps_count, start);
1927 	else
1928 		bps_count = amdgpu_ras_badpages_read(adev, bps, bps_count, start);
1929 
1930 	if (bps_count <= 0) {
1931 		kfree(bps);
1932 		return 0;
1933 	}
1934 
1935 	for (i = 0; i < bps_count; i++) {
1936 		address = ((uint64_t)bps[i].bp) << AMDGPU_GPU_PAGE_SHIFT;
1937 
1938 		bps[i].size = AMDGPU_GPU_PAGE_SIZE;
1939 
1940 		status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr,
1941 					address);
1942 		if (status == -EBUSY)
1943 			bps[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
1944 		else if (status == -ENOENT)
1945 			bps[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
1946 		else
1947 			bps[i].flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED;
1948 
1949 		if ((bps[i].flags != AMDGPU_RAS_RETIRE_PAGE_RESERVED) &&
1950 		    amdgpu_ras_check_critical_address(adev, address))
1951 			bps[i].flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED;
1952 
1953 		s += scnprintf(&buf[s], element_size + 1,
1954 				"0x%08x : 0x%08x : %1s\n",
1955 				bps[i].bp,
1956 				bps[i].size,
1957 				amdgpu_ras_badpage_flags_str(bps[i].flags));
1958 	}
1959 
1960 	kfree(bps);
1961 
1962 	return s;
1963 }
1964 
1965 static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev,
1966 		struct device_attribute *attr, char *buf)
1967 {
1968 	struct amdgpu_ras *con =
1969 		container_of(attr, struct amdgpu_ras, features_attr);
1970 
1971 	return sysfs_emit(buf, "feature mask: 0x%x\n", con->features);
1972 }
1973 
1974 static bool amdgpu_ras_get_version_info(struct amdgpu_device *adev, u32 *major,
1975 			u32 *minor, u32 *rev)
1976 {
1977 	int i;
1978 
1979 	if (!adev || !major || !minor || !rev || !amdgpu_uniras_enabled(adev))
1980 		return false;
1981 
1982 	for (i = 0; i < adev->num_ip_blocks; i++) {
1983 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_RAS) {
1984 			*major = adev->ip_blocks[i].version->major;
1985 			*minor = adev->ip_blocks[i].version->minor;
1986 			*rev = adev->ip_blocks[i].version->rev;
1987 			return true;
1988 		}
1989 	}
1990 
1991 	return false;
1992 }
1993 
1994 static ssize_t amdgpu_ras_sysfs_version_show(struct device *dev,
1995 		struct device_attribute *attr, char *buf)
1996 {
1997 	struct amdgpu_ras *con =
1998 		container_of(attr, struct amdgpu_ras, version_attr);
1999 	u32 major, minor, rev;
2000 	ssize_t size = 0;
2001 
2002 	size += sysfs_emit_at(buf, size, "table version: 0x%x\n",
2003 			con->eeprom_control.tbl_hdr.version);
2004 
2005 	if (amdgpu_ras_get_version_info(con->adev, &major, &minor, &rev))
2006 		size += sysfs_emit_at(buf, size, "ras version: %u.%u.%u\n",
2007 			major, minor, rev);
2008 
2009 	return size;
2010 }
2011 
2012 static ssize_t amdgpu_ras_sysfs_schema_show(struct device *dev,
2013 		struct device_attribute *attr, char *buf)
2014 {
2015 	struct amdgpu_ras *con =
2016 		container_of(attr, struct amdgpu_ras, schema_attr);
2017 	return sysfs_emit(buf, "schema: 0x%x\n", con->schema);
2018 }
2019 
2020 static struct {
2021 	enum ras_event_type type;
2022 	const char *name;
2023 } dump_event[] = {
2024 	{RAS_EVENT_TYPE_FATAL, "Fatal Error"},
2025 	{RAS_EVENT_TYPE_POISON_CREATION, "Poison Creation"},
2026 	{RAS_EVENT_TYPE_POISON_CONSUMPTION, "Poison Consumption"},
2027 };
2028 
2029 static ssize_t amdgpu_ras_sysfs_event_state_show(struct device *dev,
2030 						 struct device_attribute *attr, char *buf)
2031 {
2032 	struct amdgpu_ras *con =
2033 		container_of(attr, struct amdgpu_ras, event_state_attr);
2034 	struct ras_event_manager *event_mgr = con->event_mgr;
2035 	struct ras_event_state *event_state;
2036 	int i, size = 0;
2037 
2038 	if (!event_mgr)
2039 		return -EINVAL;
2040 
2041 	size += sysfs_emit_at(buf, size, "current seqno: %llu\n", atomic64_read(&event_mgr->seqno));
2042 	for (i = 0; i < ARRAY_SIZE(dump_event); i++) {
2043 		event_state = &event_mgr->event_state[dump_event[i].type];
2044 		size += sysfs_emit_at(buf, size, "%s: count:%llu, last_seqno:%llu\n",
2045 				      dump_event[i].name,
2046 				      atomic64_read(&event_state->count),
2047 				      event_state->last_seqno);
2048 	}
2049 
2050 	return (ssize_t)size;
2051 }
2052 
2053 static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev)
2054 {
2055 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2056 
2057 	if (adev->dev->kobj.sd)
2058 		sysfs_remove_file_from_group(&adev->dev->kobj,
2059 				&con->badpages_attr.attr,
2060 				RAS_FS_NAME);
2061 }
2062 
2063 static int amdgpu_ras_sysfs_remove_dev_attr_node(struct amdgpu_device *adev)
2064 {
2065 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2066 	struct attribute *attrs[] = {
2067 		&con->features_attr.attr,
2068 		&con->version_attr.attr,
2069 		&con->schema_attr.attr,
2070 		&con->event_state_attr.attr,
2071 		NULL
2072 	};
2073 	struct attribute_group group = {
2074 		.name = RAS_FS_NAME,
2075 		.attrs = attrs,
2076 	};
2077 
2078 	if (adev->dev->kobj.sd)
2079 		sysfs_remove_group(&adev->dev->kobj, &group);
2080 
2081 	return 0;
2082 }
2083 
2084 int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
2085 		struct ras_common_if *head)
2086 {
2087 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
2088 
2089 	if (amdgpu_aca_is_enabled(adev))
2090 		return 0;
2091 
2092 	if (!obj || obj->attr_inuse)
2093 		return -EINVAL;
2094 
2095 	if (amdgpu_sriov_vf(adev) && !amdgpu_virt_ras_telemetry_block_en(adev, head->block))
2096 		return 0;
2097 
2098 	get_obj(obj);
2099 
2100 	snprintf(obj->fs_data.sysfs_name, sizeof(obj->fs_data.sysfs_name),
2101 		"%s_err_count", head->name);
2102 
2103 	obj->sysfs_attr = (struct device_attribute){
2104 		.attr = {
2105 			.name = obj->fs_data.sysfs_name,
2106 			.mode = S_IRUGO,
2107 		},
2108 			.show = amdgpu_ras_sysfs_read,
2109 	};
2110 	sysfs_attr_init(&obj->sysfs_attr.attr);
2111 
2112 	if (sysfs_add_file_to_group(&adev->dev->kobj,
2113 				&obj->sysfs_attr.attr,
2114 				RAS_FS_NAME)) {
2115 		put_obj(obj);
2116 		return -EINVAL;
2117 	}
2118 
2119 	obj->attr_inuse = 1;
2120 
2121 	return 0;
2122 }
2123 
2124 int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
2125 		struct ras_common_if *head)
2126 {
2127 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
2128 
2129 	if (amdgpu_aca_is_enabled(adev))
2130 		return 0;
2131 
2132 	if (!obj || !obj->attr_inuse)
2133 		return -EINVAL;
2134 
2135 	if (adev->dev->kobj.sd)
2136 		sysfs_remove_file_from_group(&adev->dev->kobj,
2137 				&obj->sysfs_attr.attr,
2138 				RAS_FS_NAME);
2139 	obj->attr_inuse = 0;
2140 	put_obj(obj);
2141 
2142 	return 0;
2143 }
2144 
2145 static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev)
2146 {
2147 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2148 	struct ras_manager *obj, *tmp;
2149 
2150 	list_for_each_entry_safe(obj, tmp, &con->head, node) {
2151 		amdgpu_ras_sysfs_remove(adev, &obj->head);
2152 	}
2153 
2154 	if (amdgpu_bad_page_threshold != 0)
2155 		amdgpu_ras_sysfs_remove_bad_page_node(adev);
2156 
2157 	amdgpu_ras_sysfs_remove_dev_attr_node(adev);
2158 
2159 	return 0;
2160 }
2161 /* sysfs end */
2162 
2163 /**
2164  * DOC: AMDGPU RAS Reboot Behavior for Unrecoverable Errors
2165  *
2166  * Normally when there is an uncorrectable error, the driver will reset
2167  * the GPU to recover.  However, in the event of an unrecoverable error,
2168  * the driver provides an interface to reboot the system automatically
2169  * in that event.
2170  *
2171  * The following file in debugfs provides that interface:
2172  * /sys/kernel/debug/dri/[0/1/2...]/ras/auto_reboot
2173  *
2174  * Usage:
2175  *
2176  * .. code-block:: bash
2177  *
2178  *	echo true > .../ras/auto_reboot
2179  *
2180  */
2181 /* debugfs begin */
2182 static struct dentry *amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
2183 {
2184 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2185 	struct amdgpu_ras_eeprom_control *eeprom = &con->eeprom_control;
2186 	struct drm_minor  *minor = adev_to_drm(adev)->primary;
2187 	struct dentry     *dir;
2188 
2189 	dir = debugfs_create_dir(RAS_FS_NAME, minor->debugfs_root);
2190 	debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, dir, adev,
2191 			    &amdgpu_ras_debugfs_ctrl_ops);
2192 	debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, dir, adev,
2193 			    &amdgpu_ras_debugfs_eeprom_ops);
2194 	debugfs_create_u32("bad_page_cnt_threshold", 0444, dir,
2195 			   &con->bad_page_cnt_threshold);
2196 	debugfs_create_u32("ras_num_recs", 0444, dir, &eeprom->ras_num_recs);
2197 	debugfs_create_x32("ras_hw_enabled", 0444, dir, &adev->ras_hw_enabled);
2198 	debugfs_create_x32("ras_enabled", 0444, dir, &adev->ras_enabled);
2199 	debugfs_create_file("ras_eeprom_size", S_IRUGO, dir, adev,
2200 			    &amdgpu_ras_debugfs_eeprom_size_ops);
2201 	con->de_ras_eeprom_table = debugfs_create_file("ras_eeprom_table",
2202 						       S_IRUGO, dir, adev,
2203 						       &amdgpu_ras_debugfs_eeprom_table_ops);
2204 	amdgpu_ras_debugfs_set_ret_size(&con->eeprom_control);
2205 
2206 	/*
2207 	 * After one uncorrectable error happens, usually GPU recovery will
2208 	 * be scheduled. But due to the known problem in GPU recovery failing
2209 	 * to bring GPU back, below interface provides one direct way to
2210 	 * user to reboot system automatically in such case within
2211 	 * ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine
2212 	 * will never be called.
2213 	 */
2214 	debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, dir, &con->reboot);
2215 
2216 	/*
2217 	 * User could set this not to clean up hardware's error count register
2218 	 * of RAS IPs during ras recovery.
2219 	 */
2220 	debugfs_create_bool("disable_ras_err_cnt_harvest", 0644, dir,
2221 			    &con->disable_ras_err_cnt_harvest);
2222 	return dir;
2223 }
2224 
2225 static void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
2226 				      struct ras_fs_if *head,
2227 				      struct dentry *dir)
2228 {
2229 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);
2230 
2231 	if (!obj || !dir)
2232 		return;
2233 
2234 	get_obj(obj);
2235 
2236 	memcpy(obj->fs_data.debugfs_name,
2237 			head->debugfs_name,
2238 			sizeof(obj->fs_data.debugfs_name));
2239 
2240 	debugfs_create_file(obj->fs_data.debugfs_name, S_IWUGO | S_IRUGO, dir,
2241 			    obj, &amdgpu_ras_debugfs_ops);
2242 }
2243 
2244 static bool amdgpu_ras_aca_is_supported(struct amdgpu_device *adev)
2245 {
2246 	bool ret;
2247 
2248 	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
2249 	case IP_VERSION(13, 0, 6):
2250 	case IP_VERSION(13, 0, 12):
2251 	case IP_VERSION(13, 0, 14):
2252 		ret = true;
2253 		break;
2254 	default:
2255 		ret = false;
2256 		break;
2257 	}
2258 
2259 	return ret;
2260 }
2261 
2262 void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
2263 {
2264 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2265 	struct dentry *dir;
2266 	struct ras_manager *obj;
2267 	struct ras_fs_if fs_info;
2268 
2269 	/*
2270 	 * it won't be called in resume path, no need to check
2271 	 * suspend and gpu reset status
2272 	 */
2273 	if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con)
2274 		return;
2275 
2276 	dir = amdgpu_ras_debugfs_create_ctrl_node(adev);
2277 
2278 	list_for_each_entry(obj, &con->head, node) {
2279 		if (amdgpu_ras_is_supported(adev, obj->head.block) &&
2280 			(obj->attr_inuse == 1)) {
2281 			sprintf(fs_info.debugfs_name, "%s_err_inject",
2282 					get_ras_block_str(&obj->head));
2283 			fs_info.head = obj->head;
2284 			amdgpu_ras_debugfs_create(adev, &fs_info, dir);
2285 		}
2286 	}
2287 
2288 	if (amdgpu_ras_aca_is_supported(adev)) {
2289 		if (amdgpu_aca_is_enabled(adev))
2290 			amdgpu_aca_smu_debugfs_init(adev, dir);
2291 		else
2292 			amdgpu_mca_smu_debugfs_init(adev, dir);
2293 	}
2294 }
2295 
2296 /* debugfs end */
2297 
2298 /* ras fs */
2299 static const BIN_ATTR(gpu_vram_bad_pages, S_IRUGO,
2300 		      amdgpu_ras_sysfs_badpages_read, NULL, 0);
2301 static DEVICE_ATTR(features, S_IRUGO,
2302 		amdgpu_ras_sysfs_features_read, NULL);
2303 static DEVICE_ATTR(version, 0444,
2304 		amdgpu_ras_sysfs_version_show, NULL);
2305 static DEVICE_ATTR(schema, 0444,
2306 		amdgpu_ras_sysfs_schema_show, NULL);
2307 static DEVICE_ATTR(event_state, 0444,
2308 		   amdgpu_ras_sysfs_event_state_show, NULL);
2309 static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
2310 {
2311 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2312 	struct attribute_group group = {
2313 		.name = RAS_FS_NAME,
2314 	};
2315 	struct attribute *attrs[] = {
2316 		&con->features_attr.attr,
2317 		&con->version_attr.attr,
2318 		&con->schema_attr.attr,
2319 		&con->event_state_attr.attr,
2320 		NULL
2321 	};
2322 	const struct bin_attribute *bin_attrs[] = {
2323 		NULL,
2324 		NULL,
2325 	};
2326 	int r;
2327 
2328 	group.attrs = attrs;
2329 
2330 	/* add features entry */
2331 	con->features_attr = dev_attr_features;
2332 	sysfs_attr_init(attrs[0]);
2333 
2334 	/* add version entry */
2335 	con->version_attr = dev_attr_version;
2336 	sysfs_attr_init(attrs[1]);
2337 
2338 	/* add schema entry */
2339 	con->schema_attr = dev_attr_schema;
2340 	sysfs_attr_init(attrs[2]);
2341 
2342 	/* add event_state entry */
2343 	con->event_state_attr = dev_attr_event_state;
2344 	sysfs_attr_init(attrs[3]);
2345 
2346 	if (amdgpu_bad_page_threshold != 0) {
2347 		/* add bad_page_features entry */
2348 		con->badpages_attr = bin_attr_gpu_vram_bad_pages;
2349 		sysfs_bin_attr_init(&con->badpages_attr);
2350 		bin_attrs[0] = &con->badpages_attr;
2351 		group.bin_attrs = bin_attrs;
2352 	}
2353 
2354 	r = sysfs_create_group(&adev->dev->kobj, &group);
2355 	if (r)
2356 		dev_err(adev->dev, "Failed to create RAS sysfs group!");
2357 
2358 	return 0;
2359 }
2360 
2361 static int amdgpu_ras_fs_fini(struct amdgpu_device *adev)
2362 {
2363 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2364 	struct ras_manager *con_obj, *ip_obj, *tmp;
2365 
2366 	if (IS_ENABLED(CONFIG_DEBUG_FS)) {
2367 		list_for_each_entry_safe(con_obj, tmp, &con->head, node) {
2368 			ip_obj = amdgpu_ras_find_obj(adev, &con_obj->head);
2369 			if (ip_obj)
2370 				put_obj(ip_obj);
2371 		}
2372 	}
2373 
2374 	amdgpu_ras_sysfs_remove_all(adev);
2375 	return 0;
2376 }
2377 /* ras fs end */
2378 
2379 /* ih begin */
2380 
2381 /* For the hardware that cannot enable bif ring for both ras_controller_irq
2382  * and ras_err_evnet_athub_irq ih cookies, the driver has to poll status
2383  * register to check whether the interrupt is triggered or not, and properly
2384  * ack the interrupt if it is there
2385  */
2386 void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev)
2387 {
2388 	/* Fatal error events are handled on host side */
2389 	if (amdgpu_sriov_vf(adev))
2390 		return;
2391 	/*
2392 	 * If the current interrupt is caused by a non-fatal RAS error, skip
2393 	 * check for fatal error. For fatal errors, FED status of all devices
2394 	 * in XGMI hive gets set when the first device gets fatal error
2395 	 * interrupt. The error gets propagated to other devices as well, so
2396 	 * make sure to ack the interrupt regardless of FED status.
2397 	 */
2398 	if (!amdgpu_ras_get_fed_status(adev) &&
2399 	    amdgpu_ras_is_err_state(adev, AMDGPU_RAS_BLOCK__ANY))
2400 		return;
2401 
2402 	if (amdgpu_uniras_enabled(adev)) {
2403 		amdgpu_ras_mgr_handle_fatal_interrupt(adev, NULL);
2404 		return;
2405 	}
2406 
2407 	if (adev->nbio.ras &&
2408 	    adev->nbio.ras->handle_ras_controller_intr_no_bifring)
2409 		adev->nbio.ras->handle_ras_controller_intr_no_bifring(adev);
2410 
2411 	if (adev->nbio.ras &&
2412 	    adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring)
2413 		adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring(adev);
2414 }
2415 
2416 static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *obj,
2417 				struct amdgpu_iv_entry *entry)
2418 {
2419 	bool poison_stat = false;
2420 	struct amdgpu_device *adev = obj->adev;
2421 	struct amdgpu_ras_block_object *block_obj =
2422 		amdgpu_ras_get_ras_block(adev, obj->head.block, 0);
2423 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2424 	enum ras_event_type type = RAS_EVENT_TYPE_POISON_CONSUMPTION;
2425 	u64 event_id;
2426 	int ret;
2427 
2428 	if (!block_obj || !con)
2429 		return;
2430 
2431 	ret = amdgpu_ras_mark_ras_event(adev, type);
2432 	if (ret)
2433 		return;
2434 
2435 	amdgpu_ras_set_err_poison(adev, block_obj->ras_comm.block);
2436 	/* both query_poison_status and handle_poison_consumption are optional,
2437 	 * but at least one of them should be implemented if we need poison
2438 	 * consumption handler
2439 	 */
2440 	if (block_obj->hw_ops && block_obj->hw_ops->query_poison_status) {
2441 		poison_stat = block_obj->hw_ops->query_poison_status(adev);
2442 		if (!poison_stat) {
2443 			/* Not poison consumption interrupt, no need to handle it */
2444 			dev_info(adev->dev, "No RAS poison status in %s poison IH.\n",
2445 					block_obj->ras_comm.name);
2446 
2447 			return;
2448 		}
2449 	}
2450 
2451 	amdgpu_umc_poison_handler(adev, obj->head.block, 0);
2452 
2453 	if (block_obj->hw_ops && block_obj->hw_ops->handle_poison_consumption)
2454 		poison_stat = block_obj->hw_ops->handle_poison_consumption(adev);
2455 
2456 	/* gpu reset is fallback for failed and default cases.
2457 	 * For RMA case, amdgpu_umc_poison_handler will handle gpu reset.
2458 	 */
2459 	if (poison_stat && !amdgpu_ras_is_rma(adev)) {
2460 		event_id = amdgpu_ras_acquire_event_id(adev, type);
2461 		RAS_EVENT_LOG(adev, event_id,
2462 			      "GPU reset for %s RAS poison consumption is issued!\n",
2463 			      block_obj->ras_comm.name);
2464 		amdgpu_ras_reset_gpu(adev);
2465 	}
2466 
2467 	if (!poison_stat)
2468 		amdgpu_gfx_poison_consumption_handler(adev, entry);
2469 }
2470 
2471 static void amdgpu_ras_interrupt_poison_creation_handler(struct ras_manager *obj,
2472 				struct amdgpu_iv_entry *entry)
2473 {
2474 	struct amdgpu_device *adev = obj->adev;
2475 	enum ras_event_type type = RAS_EVENT_TYPE_POISON_CREATION;
2476 	u64 event_id;
2477 	int ret;
2478 
2479 	ret = amdgpu_ras_mark_ras_event(adev, type);
2480 	if (ret)
2481 		return;
2482 
2483 	event_id = amdgpu_ras_acquire_event_id(adev, type);
2484 	RAS_EVENT_LOG(adev, event_id, "Poison is created\n");
2485 
2486 	if (amdgpu_ip_version(obj->adev, UMC_HWIP, 0) >= IP_VERSION(12, 0, 0)) {
2487 		struct amdgpu_ras *con = amdgpu_ras_get_context(obj->adev);
2488 
2489 		atomic_inc(&con->page_retirement_req_cnt);
2490 		atomic_inc(&con->poison_creation_count);
2491 
2492 		wake_up(&con->page_retirement_wq);
2493 	}
2494 }
2495 
2496 static void amdgpu_ras_interrupt_umc_handler(struct ras_manager *obj,
2497 				struct amdgpu_iv_entry *entry)
2498 {
2499 	struct ras_ih_data *data = &obj->ih_data;
2500 	struct ras_err_data err_data;
2501 	int ret;
2502 
2503 	if (!data->cb)
2504 		return;
2505 
2506 	ret = amdgpu_ras_error_data_init(&err_data);
2507 	if (ret)
2508 		return;
2509 
2510 	/* Let IP handle its data, maybe we need get the output
2511 	 * from the callback to update the error type/count, etc
2512 	 */
2513 	amdgpu_ras_set_fed(obj->adev, true);
2514 	ret = data->cb(obj->adev, &err_data, entry);
2515 	/* ue will trigger an interrupt, and in that case
2516 	 * we need do a reset to recovery the whole system.
2517 	 * But leave IP do that recovery, here we just dispatch
2518 	 * the error.
2519 	 */
2520 	if (ret == AMDGPU_RAS_SUCCESS) {
2521 		/* these counts could be left as 0 if
2522 		 * some blocks do not count error number
2523 		 */
2524 		obj->err_data.ue_count += err_data.ue_count;
2525 		obj->err_data.ce_count += err_data.ce_count;
2526 		obj->err_data.de_count += err_data.de_count;
2527 	}
2528 
2529 	amdgpu_ras_error_data_fini(&err_data);
2530 }
2531 
2532 static void amdgpu_ras_interrupt_handler(struct ras_manager *obj)
2533 {
2534 	struct ras_ih_data *data = &obj->ih_data;
2535 	struct amdgpu_iv_entry entry;
2536 
2537 	while (data->rptr != data->wptr) {
2538 		rmb();
2539 		memcpy(&entry, &data->ring[data->rptr],
2540 				data->element_size);
2541 
2542 		wmb();
2543 		data->rptr = (data->aligned_element_size +
2544 				data->rptr) % data->ring_size;
2545 
2546 		if (amdgpu_ras_is_poison_mode_supported(obj->adev)) {
2547 			if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
2548 				amdgpu_ras_interrupt_poison_creation_handler(obj, &entry);
2549 			else
2550 				amdgpu_ras_interrupt_poison_consumption_handler(obj, &entry);
2551 		} else {
2552 			if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
2553 				amdgpu_ras_interrupt_umc_handler(obj, &entry);
2554 			else
2555 				dev_warn(obj->adev->dev,
2556 					"No RAS interrupt handler for non-UMC block with poison disabled.\n");
2557 		}
2558 	}
2559 }
2560 
2561 static void amdgpu_ras_interrupt_process_handler(struct work_struct *work)
2562 {
2563 	struct ras_ih_data *data =
2564 		container_of(work, struct ras_ih_data, ih_work);
2565 	struct ras_manager *obj =
2566 		container_of(data, struct ras_manager, ih_data);
2567 
2568 	amdgpu_ras_interrupt_handler(obj);
2569 }
2570 
2571 int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
2572 		struct ras_dispatch_if *info)
2573 {
2574 	struct ras_manager *obj;
2575 	struct ras_ih_data *data;
2576 
2577 	if (amdgpu_uniras_enabled(adev)) {
2578 		struct ras_ih_info ih_info;
2579 
2580 		memset(&ih_info, 0, sizeof(ih_info));
2581 		ih_info.block = info->head.block;
2582 		memcpy(&ih_info.iv_entry, info->entry, sizeof(struct amdgpu_iv_entry));
2583 
2584 		return amdgpu_ras_mgr_handle_controller_interrupt(adev, &ih_info);
2585 	}
2586 
2587 	obj = amdgpu_ras_find_obj(adev, &info->head);
2588 	if (!obj)
2589 		return -EINVAL;
2590 
2591 	data = &obj->ih_data;
2592 
2593 	if (data->inuse == 0)
2594 		return 0;
2595 
2596 	/* Might be overflow... */
2597 	memcpy(&data->ring[data->wptr], info->entry,
2598 			data->element_size);
2599 
2600 	wmb();
2601 	data->wptr = (data->aligned_element_size +
2602 			data->wptr) % data->ring_size;
2603 
2604 	schedule_work(&data->ih_work);
2605 
2606 	return 0;
2607 }
2608 
2609 int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev,
2610 		struct ras_common_if *head)
2611 {
2612 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
2613 	struct ras_ih_data *data;
2614 
2615 	if (!obj)
2616 		return -EINVAL;
2617 
2618 	data = &obj->ih_data;
2619 	if (data->inuse == 0)
2620 		return 0;
2621 
2622 	cancel_work_sync(&data->ih_work);
2623 
2624 	kfree(data->ring);
2625 	memset(data, 0, sizeof(*data));
2626 	put_obj(obj);
2627 
2628 	return 0;
2629 }
2630 
2631 int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev,
2632 		struct ras_common_if *head)
2633 {
2634 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
2635 	struct ras_ih_data *data;
2636 	struct amdgpu_ras_block_object *ras_obj;
2637 
2638 	if (!obj) {
2639 		/* in case we registe the IH before enable ras feature */
2640 		obj = amdgpu_ras_create_obj(adev, head);
2641 		if (!obj)
2642 			return -EINVAL;
2643 	} else
2644 		get_obj(obj);
2645 
2646 	ras_obj = container_of(head, struct amdgpu_ras_block_object, ras_comm);
2647 
2648 	data = &obj->ih_data;
2649 	/* add the callback.etc */
2650 	*data = (struct ras_ih_data) {
2651 		.inuse = 0,
2652 		.cb = ras_obj->ras_cb,
2653 		.element_size = sizeof(struct amdgpu_iv_entry),
2654 		.rptr = 0,
2655 		.wptr = 0,
2656 	};
2657 
2658 	INIT_WORK(&data->ih_work, amdgpu_ras_interrupt_process_handler);
2659 
2660 	data->aligned_element_size = ALIGN(data->element_size, 8);
2661 	/* the ring can store 64 iv entries. */
2662 	data->ring_size = 64 * data->aligned_element_size;
2663 	data->ring = kmalloc(data->ring_size, GFP_KERNEL);
2664 	if (!data->ring) {
2665 		put_obj(obj);
2666 		return -ENOMEM;
2667 	}
2668 
2669 	/* IH is ready */
2670 	data->inuse = 1;
2671 
2672 	return 0;
2673 }
2674 
2675 static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev)
2676 {
2677 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2678 	struct ras_manager *obj, *tmp;
2679 
2680 	list_for_each_entry_safe(obj, tmp, &con->head, node) {
2681 		amdgpu_ras_interrupt_remove_handler(adev, &obj->head);
2682 	}
2683 
2684 	return 0;
2685 }
2686 /* ih end */
2687 
2688 /* traversal all IPs except NBIO to query error counter */
2689 static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev, enum ras_event_type type)
2690 {
2691 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2692 	struct ras_manager *obj;
2693 
2694 	if (!adev->ras_enabled || !con)
2695 		return;
2696 
2697 	list_for_each_entry(obj, &con->head, node) {
2698 		struct ras_query_if info = {
2699 			.head = obj->head,
2700 		};
2701 
2702 		/*
2703 		 * PCIE_BIF IP has one different isr by ras controller
2704 		 * interrupt, the specific ras counter query will be
2705 		 * done in that isr. So skip such block from common
2706 		 * sync flood interrupt isr calling.
2707 		 */
2708 		if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF)
2709 			continue;
2710 
2711 		/*
2712 		 * this is a workaround for aldebaran, skip send msg to
2713 		 * smu to get ecc_info table due to smu handle get ecc
2714 		 * info table failed temporarily.
2715 		 * should be removed until smu fix handle ecc_info table.
2716 		 */
2717 		if ((info.head.block == AMDGPU_RAS_BLOCK__UMC) &&
2718 		    (amdgpu_ip_version(adev, MP1_HWIP, 0) ==
2719 		     IP_VERSION(13, 0, 2)))
2720 			continue;
2721 
2722 		amdgpu_ras_query_error_status_with_event(adev, &info, type);
2723 
2724 		if (amdgpu_ip_version(adev, MP0_HWIP, 0) !=
2725 			    IP_VERSION(11, 0, 2) &&
2726 		    amdgpu_ip_version(adev, MP0_HWIP, 0) !=
2727 			    IP_VERSION(11, 0, 4) &&
2728 		    amdgpu_ip_version(adev, MP0_HWIP, 0) !=
2729 			    IP_VERSION(13, 0, 0)) {
2730 			if (amdgpu_ras_reset_error_status(adev, info.head.block))
2731 				dev_warn(adev->dev, "Failed to reset error counter and error status");
2732 		}
2733 	}
2734 }
2735 
2736 /* Parse RdRspStatus and WrRspStatus */
2737 static void amdgpu_ras_error_status_query(struct amdgpu_device *adev,
2738 					  struct ras_query_if *info)
2739 {
2740 	struct amdgpu_ras_block_object *block_obj;
2741 	/*
2742 	 * Only two block need to query read/write
2743 	 * RspStatus at current state
2744 	 */
2745 	if ((info->head.block != AMDGPU_RAS_BLOCK__GFX) &&
2746 		(info->head.block != AMDGPU_RAS_BLOCK__MMHUB))
2747 		return;
2748 
2749 	block_obj = amdgpu_ras_get_ras_block(adev,
2750 					info->head.block,
2751 					info->head.sub_block_index);
2752 
2753 	if (!block_obj || !block_obj->hw_ops) {
2754 		dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
2755 			     get_ras_block_str(&info->head));
2756 		return;
2757 	}
2758 
2759 	if (block_obj->hw_ops->query_ras_error_status)
2760 		block_obj->hw_ops->query_ras_error_status(adev);
2761 
2762 }
2763 
2764 static void amdgpu_ras_query_err_status(struct amdgpu_device *adev)
2765 {
2766 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2767 	struct ras_manager *obj;
2768 
2769 	if (!adev->ras_enabled || !con)
2770 		return;
2771 
2772 	list_for_each_entry(obj, &con->head, node) {
2773 		struct ras_query_if info = {
2774 			.head = obj->head,
2775 		};
2776 
2777 		amdgpu_ras_error_status_query(adev, &info);
2778 	}
2779 }
2780 
2781 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
2782 		struct ras_badpage *bps, uint32_t count, uint32_t start)
2783 {
2784 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2785 	struct ras_err_handler_data *data;
2786 	int r = 0;
2787 	uint32_t i;
2788 
2789 	if (!con || !con->eh_data || !bps || !count)
2790 		return -EINVAL;
2791 
2792 	mutex_lock(&con->recovery_lock);
2793 	data = con->eh_data;
2794 	if (start < data->count) {
2795 		for (i = start; i < data->count; i++) {
2796 			if (!data->bps[i].ts)
2797 				continue;
2798 
2799 			/* U64_MAX is used to mark the record as invalid */
2800 			if (data->bps[i].retired_page == U64_MAX)
2801 				continue;
2802 
2803 			bps[r].bp = data->bps[i].retired_page;
2804 			r++;
2805 			if (r >= count)
2806 				break;
2807 		}
2808 	}
2809 	mutex_unlock(&con->recovery_lock);
2810 
2811 	return r;
2812 }
2813 
2814 static int amdgpu_uniras_badpages_read(struct amdgpu_device *adev,
2815 		struct ras_badpage *bps, uint32_t count, uint32_t start)
2816 {
2817 	struct ras_cmd_bad_pages_info_req cmd_input;
2818 	struct ras_cmd_bad_pages_info_rsp *output;
2819 	uint32_t group, start_group, end_group;
2820 	uint32_t pos, pos_in_group;
2821 	int r = 0, i;
2822 
2823 	if (!bps || !count)
2824 		return -EINVAL;
2825 
2826 	output = kmalloc_obj(*output);
2827 	if (!output)
2828 		return -ENOMEM;
2829 
2830 	memset(&cmd_input, 0, sizeof(cmd_input));
2831 
2832 	start_group = start / RAS_CMD_MAX_BAD_PAGES_PER_GROUP;
2833 	end_group = (start + count + RAS_CMD_MAX_BAD_PAGES_PER_GROUP - 1) /
2834 				RAS_CMD_MAX_BAD_PAGES_PER_GROUP;
2835 
2836 	pos = start;
2837 	for (group = start_group; group < end_group; group++) {
2838 		memset(output, 0, sizeof(*output));
2839 		cmd_input.group_index = group;
2840 		if (amdgpu_ras_mgr_handle_ras_cmd(adev, RAS_CMD__GET_BAD_PAGES,
2841 			&cmd_input, sizeof(cmd_input), output, sizeof(*output)))
2842 			goto out;
2843 
2844 		if (pos >= output->bp_total_cnt)
2845 			goto out;
2846 
2847 		pos_in_group = pos - group * RAS_CMD_MAX_BAD_PAGES_PER_GROUP;
2848 		for (i = pos_in_group; i < output->bp_in_group; i++, pos++) {
2849 			if (!output->records[i].ts)
2850 				continue;
2851 
2852 			bps[r].bp = output->records[i].retired_page;
2853 			r++;
2854 			if (r >= count)
2855 				goto out;
2856 		}
2857 	}
2858 
2859 out:
2860 	kfree(output);
2861 	return r;
2862 }
2863 
2864 static void amdgpu_ras_set_fed_all(struct amdgpu_device *adev,
2865 				   struct amdgpu_hive_info *hive, bool status)
2866 {
2867 	struct amdgpu_device *tmp_adev;
2868 
2869 	if (hive) {
2870 		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
2871 			amdgpu_ras_set_fed(tmp_adev, status);
2872 	} else {
2873 		amdgpu_ras_set_fed(adev, status);
2874 	}
2875 }
2876 
2877 bool amdgpu_ras_in_recovery(struct amdgpu_device *adev)
2878 {
2879 	struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2880 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
2881 	int hive_ras_recovery = 0;
2882 
2883 	if (hive) {
2884 		hive_ras_recovery = atomic_read(&hive->ras_recovery);
2885 		amdgpu_put_xgmi_hive(hive);
2886 	}
2887 
2888 	if (ras && (atomic_read(&ras->in_recovery) || hive_ras_recovery))
2889 		return true;
2890 
2891 	return false;
2892 }
2893 
2894 static enum ras_event_type amdgpu_ras_get_fatal_error_event(struct amdgpu_device *adev)
2895 {
2896 	if (amdgpu_ras_intr_triggered())
2897 		return RAS_EVENT_TYPE_FATAL;
2898 	else
2899 		return RAS_EVENT_TYPE_POISON_CONSUMPTION;
2900 }
2901 
2902 static void amdgpu_ras_do_recovery(struct work_struct *work)
2903 {
2904 	struct amdgpu_ras *ras =
2905 		container_of(work, struct amdgpu_ras, recovery_work);
2906 	struct amdgpu_device *remote_adev = NULL;
2907 	struct amdgpu_device *adev = ras->adev;
2908 	struct list_head device_list, *device_list_handle =  NULL;
2909 	struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2910 	unsigned int error_query_mode;
2911 	enum ras_event_type type;
2912 
2913 	if (hive) {
2914 		atomic_set(&hive->ras_recovery, 1);
2915 
2916 		/* If any device which is part of the hive received RAS fatal
2917 		 * error interrupt, set fatal error status on all. This
2918 		 * condition will need a recovery, and flag will be cleared
2919 		 * as part of recovery.
2920 		 */
2921 		list_for_each_entry(remote_adev, &hive->device_list,
2922 				    gmc.xgmi.head)
2923 			if (amdgpu_ras_get_fed_status(remote_adev)) {
2924 				amdgpu_ras_set_fed_all(adev, hive, true);
2925 				break;
2926 			}
2927 	}
2928 	if (!ras->disable_ras_err_cnt_harvest) {
2929 
2930 		/* Build list of devices to query RAS related errors */
2931 		if  (hive && adev->gmc.xgmi.num_physical_nodes > 1) {
2932 			device_list_handle = &hive->device_list;
2933 		} else {
2934 			INIT_LIST_HEAD(&device_list);
2935 			list_add_tail(&adev->gmc.xgmi.head, &device_list);
2936 			device_list_handle = &device_list;
2937 		}
2938 
2939 		if (amdgpu_ras_get_error_query_mode(adev, &error_query_mode)) {
2940 			if (error_query_mode == AMDGPU_RAS_FIRMWARE_ERROR_QUERY) {
2941 				/* wait 500ms to ensure pmfw polling mca bank info done */
2942 				msleep(500);
2943 			}
2944 		}
2945 
2946 		type = amdgpu_ras_get_fatal_error_event(adev);
2947 		list_for_each_entry(remote_adev,
2948 				device_list_handle, gmc.xgmi.head) {
2949 			if (amdgpu_uniras_enabled(remote_adev)) {
2950 				amdgpu_ras_mgr_update_ras_ecc(remote_adev);
2951 			} else {
2952 				amdgpu_ras_query_err_status(remote_adev);
2953 				amdgpu_ras_log_on_err_counter(remote_adev, type);
2954 			}
2955 		}
2956 
2957 	}
2958 
2959 	if (amdgpu_device_should_recover_gpu(ras->adev)) {
2960 		struct amdgpu_reset_context reset_context;
2961 		memset(&reset_context, 0, sizeof(reset_context));
2962 
2963 		reset_context.method = AMD_RESET_METHOD_NONE;
2964 		reset_context.reset_req_dev = adev;
2965 		reset_context.src = AMDGPU_RESET_SRC_RAS;
2966 		set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);
2967 
2968 		/* Perform full reset in fatal error mode */
2969 		if (!amdgpu_ras_is_poison_mode_supported(ras->adev))
2970 			set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2971 		else {
2972 			clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2973 
2974 			if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE2_RESET) {
2975 				ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE2_RESET;
2976 				reset_context.method = AMD_RESET_METHOD_MODE2;
2977 			}
2978 
2979 			/* Fatal error occurs in poison mode, mode1 reset is used to
2980 			 * recover gpu.
2981 			 */
2982 			if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET) {
2983 				ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE1_RESET;
2984 				set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2985 
2986 				psp_fatal_error_recovery_quirk(&adev->psp);
2987 			}
2988 		}
2989 
2990 		amdgpu_device_gpu_recover(ras->adev, NULL, &reset_context);
2991 	}
2992 	atomic_set(&ras->in_recovery, 0);
2993 	if (hive) {
2994 		atomic_set(&hive->ras_recovery, 0);
2995 		amdgpu_put_xgmi_hive(hive);
2996 	}
2997 }
2998 
2999 /* alloc/realloc bps array */
3000 static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
3001 		struct ras_err_handler_data *data, int pages)
3002 {
3003 	unsigned int old_space = data->count + data->space_left;
3004 	unsigned int new_space = old_space + pages;
3005 	unsigned int align_space = ALIGN(new_space, 512);
3006 	void *bps = kmalloc_objs(*data->bps, align_space);
3007 
3008 	if (!bps) {
3009 		return -ENOMEM;
3010 	}
3011 
3012 	if (data->bps) {
3013 		memcpy(bps, data->bps,
3014 				data->count * sizeof(*data->bps));
3015 		kfree(data->bps);
3016 	}
3017 
3018 	data->bps = bps;
3019 	data->space_left += align_space - old_space;
3020 	return 0;
3021 }
3022 
3023 static int amdgpu_ras_mca2pa_by_idx(struct amdgpu_device *adev,
3024 			struct eeprom_table_record *bps,
3025 			struct ras_err_data *err_data)
3026 {
3027 	struct ta_ras_query_address_input addr_in;
3028 	uint32_t socket = 0;
3029 	int ret = 0;
3030 
3031 	if (adev->smuio.funcs && adev->smuio.funcs->get_socket_id)
3032 		socket = adev->smuio.funcs->get_socket_id(adev);
3033 
3034 	/* reinit err_data */
3035 	err_data->err_addr_cnt = 0;
3036 	err_data->err_addr_len = adev->umc.retire_unit;
3037 
3038 	memset(&addr_in, 0, sizeof(addr_in));
3039 	addr_in.ma.err_addr = bps->address;
3040 	addr_in.ma.socket_id = socket;
3041 	addr_in.ma.ch_inst = bps->mem_channel;
3042 	if (!amdgpu_ras_smu_eeprom_supported(adev)) {
3043 		/* tell RAS TA the node instance is not used */
3044 		addr_in.ma.node_inst = TA_RAS_INV_NODE;
3045 	} else {
3046 		addr_in.ma.umc_inst = bps->mcumc_id;
3047 		addr_in.ma.node_inst = bps->cu;
3048 	}
3049 
3050 	if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr)
3051 		ret = adev->umc.ras->convert_ras_err_addr(adev, err_data,
3052 				&addr_in, NULL, false);
3053 
3054 	return ret;
3055 }
3056 
3057 static int amdgpu_ras_mca2pa(struct amdgpu_device *adev,
3058 			struct eeprom_table_record *bps,
3059 			struct ras_err_data *err_data)
3060 {
3061 	struct ta_ras_query_address_input addr_in;
3062 	uint32_t die_id, socket = 0;
3063 
3064 	if (adev->smuio.funcs && adev->smuio.funcs->get_socket_id)
3065 		socket = adev->smuio.funcs->get_socket_id(adev);
3066 
3067 	/* although die id is gotten from PA in nps1 mode, the id is
3068 	 * fitable for any nps mode
3069 	 */
3070 	if (adev->umc.ras && adev->umc.ras->get_die_id_from_pa)
3071 		die_id = adev->umc.ras->get_die_id_from_pa(adev, bps->address,
3072 					bps->retired_page << AMDGPU_GPU_PAGE_SHIFT);
3073 	else
3074 		return -EINVAL;
3075 
3076 	/* reinit err_data */
3077 	err_data->err_addr_cnt = 0;
3078 	err_data->err_addr_len = adev->umc.retire_unit;
3079 
3080 	memset(&addr_in, 0, sizeof(addr_in));
3081 	addr_in.ma.err_addr = bps->address;
3082 	addr_in.ma.ch_inst = bps->mem_channel;
3083 	addr_in.ma.umc_inst = bps->mcumc_id;
3084 	addr_in.ma.node_inst = die_id;
3085 	addr_in.ma.socket_id = socket;
3086 
3087 	if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr)
3088 		return adev->umc.ras->convert_ras_err_addr(adev, err_data,
3089 					&addr_in, NULL, false);
3090 	else
3091 		return  -EINVAL;
3092 }
3093 
3094 static int __amdgpu_ras_restore_bad_pages(struct amdgpu_device *adev,
3095 					struct eeprom_table_record *bps, int count)
3096 {
3097 	int j;
3098 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3099 	struct ras_err_handler_data *data = con->eh_data;
3100 
3101 	for (j = 0; j < count; j++) {
3102 		if (!data->space_left &&
3103 		    amdgpu_ras_realloc_eh_data_space(adev, data, 256)) {
3104 			return -ENOMEM;
3105 		}
3106 
3107 		if (amdgpu_ras_check_bad_page_unlock(con,
3108 			bps[j].retired_page << AMDGPU_GPU_PAGE_SHIFT)) {
3109 			/* set to U64_MAX to mark it as invalid */
3110 			data->bps[data->count].retired_page = U64_MAX;
3111 			data->count++;
3112 			data->space_left--;
3113 			continue;
3114 		}
3115 
3116 		amdgpu_ras_reserve_page(adev, bps[j].retired_page);
3117 
3118 		memcpy(&data->bps[data->count], &(bps[j]),
3119 				sizeof(struct eeprom_table_record));
3120 		data->count++;
3121 		data->space_left--;
3122 		con->bad_page_num++;
3123 	}
3124 
3125 	return 0;
3126 }
3127 
3128 static int __amdgpu_ras_convert_rec_array_from_rom(struct amdgpu_device *adev,
3129 				struct eeprom_table_record *bps, struct ras_err_data *err_data,
3130 				enum amdgpu_memory_partition nps)
3131 {
3132 	int i = 0;
3133 	enum amdgpu_memory_partition save_nps;
3134 
3135 	save_nps = (bps[0].retired_page >> UMC_NPS_SHIFT) & UMC_NPS_MASK;
3136 
3137 	/*old asics just have pa in eeprom*/
3138 	if (IP_VERSION_MAJ(amdgpu_ip_version(adev, UMC_HWIP, 0)) < 12) {
3139 		memcpy(err_data->err_addr, bps,
3140 			sizeof(struct eeprom_table_record) * adev->umc.retire_unit);
3141 		goto out;
3142 	}
3143 
3144 	for (i = 0; i < adev->umc.retire_unit; i++)
3145 		bps[i].retired_page &= ~(UMC_NPS_MASK << UMC_NPS_SHIFT);
3146 
3147 	if (save_nps) {
3148 		if (save_nps == nps) {
3149 			if (amdgpu_umc_pages_in_a_row(adev, err_data,
3150 					bps[0].retired_page << AMDGPU_GPU_PAGE_SHIFT))
3151 				return -EINVAL;
3152 			for (i = 0; i < adev->umc.retire_unit; i++) {
3153 				err_data->err_addr[i].address = bps[0].address;
3154 				err_data->err_addr[i].mem_channel = bps[0].mem_channel;
3155 				err_data->err_addr[i].bank = bps[0].bank;
3156 				err_data->err_addr[i].err_type = bps[0].err_type;
3157 				err_data->err_addr[i].mcumc_id = bps[0].mcumc_id;
3158 			}
3159 		} else {
3160 			if (amdgpu_ras_mca2pa_by_idx(adev, &bps[0], err_data))
3161 				return -EINVAL;
3162 		}
3163 	} else {
3164 		if (bps[0].address == 0) {
3165 			/* for specific old eeprom data, mca address is not stored,
3166 			 * calc it from pa
3167 			 */
3168 			if (amdgpu_umc_pa2mca(adev, bps[0].retired_page << AMDGPU_GPU_PAGE_SHIFT,
3169 				&(bps[0].address), AMDGPU_NPS1_PARTITION_MODE))
3170 				return -EINVAL;
3171 		}
3172 
3173 		if (amdgpu_ras_mca2pa(adev, &bps[0], err_data)) {
3174 			if (nps == AMDGPU_NPS1_PARTITION_MODE)
3175 				memcpy(err_data->err_addr, bps,
3176 					sizeof(struct eeprom_table_record) * adev->umc.retire_unit);
3177 			else
3178 				return -EOPNOTSUPP;
3179 		}
3180 	}
3181 
3182 out:
3183 	return __amdgpu_ras_restore_bad_pages(adev, err_data->err_addr, adev->umc.retire_unit);
3184 }
3185 
3186 static int __amdgpu_ras_convert_rec_from_rom(struct amdgpu_device *adev,
3187 				struct eeprom_table_record *bps, struct ras_err_data *err_data,
3188 				enum amdgpu_memory_partition nps)
3189 {
3190 	int i = 0;
3191 	enum amdgpu_memory_partition save_nps;
3192 
3193 	if (!amdgpu_ras_smu_eeprom_supported(adev)) {
3194 		save_nps = (bps->retired_page >> UMC_NPS_SHIFT) & UMC_NPS_MASK;
3195 		bps->retired_page &= ~(UMC_NPS_MASK << UMC_NPS_SHIFT);
3196 	} else {
3197 		/* if pmfw manages eeprom, save_nps is not stored on eeprom,
3198 		 * we should always convert mca address into physical address,
3199 		 * make save_nps different from nps
3200 		 */
3201 		save_nps = nps + 1;
3202 	}
3203 
3204 	if (save_nps == nps) {
3205 		if (amdgpu_umc_pages_in_a_row(adev, err_data,
3206 				bps->retired_page << AMDGPU_GPU_PAGE_SHIFT))
3207 			return -EINVAL;
3208 		for (i = 0; i < adev->umc.retire_unit; i++) {
3209 			err_data->err_addr[i].address = bps->address;
3210 			err_data->err_addr[i].mem_channel = bps->mem_channel;
3211 			err_data->err_addr[i].bank = bps->bank;
3212 			err_data->err_addr[i].err_type = bps->err_type;
3213 			err_data->err_addr[i].mcumc_id = bps->mcumc_id;
3214 		}
3215 	} else {
3216 		if (bps->address) {
3217 			if (amdgpu_ras_mca2pa_by_idx(adev, bps, err_data))
3218 				return -EINVAL;
3219 		} else {
3220 			/* for specific old eeprom data, mca address is not stored,
3221 			 * calc it from pa
3222 			 */
3223 			if (amdgpu_umc_pa2mca(adev, bps->retired_page << AMDGPU_GPU_PAGE_SHIFT,
3224 				&(bps->address), AMDGPU_NPS1_PARTITION_MODE))
3225 				return -EINVAL;
3226 
3227 			if (amdgpu_ras_mca2pa(adev, bps, err_data))
3228 				return -EOPNOTSUPP;
3229 		}
3230 	}
3231 
3232 	return __amdgpu_ras_restore_bad_pages(adev, err_data->err_addr,
3233 									adev->umc.retire_unit);
3234 }
3235 
3236 /* it deal with vram only. */
3237 int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
3238 		struct eeprom_table_record *bps, int pages, bool from_rom)
3239 {
3240 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3241 	struct ras_err_data err_data;
3242 	struct amdgpu_ras_eeprom_control *control =
3243 			&adev->psp.ras_context.ras->eeprom_control;
3244 	enum amdgpu_memory_partition nps = AMDGPU_NPS1_PARTITION_MODE;
3245 	int ret = 0;
3246 	uint32_t i = 0;
3247 
3248 	if (!con || !con->eh_data || !bps || pages <= 0)
3249 		return 0;
3250 
3251 	if (from_rom) {
3252 		err_data.err_addr =
3253 			kzalloc_objs(struct eeprom_table_record,
3254 				     adev->umc.retire_unit);
3255 		if (!err_data.err_addr) {
3256 			dev_warn(adev->dev, "Failed to alloc UMC error address record in mca2pa conversion!\n");
3257 			return -ENOMEM;
3258 		}
3259 
3260 		if (adev->gmc.gmc_funcs->query_mem_partition_mode)
3261 			nps = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
3262 	}
3263 
3264 	mutex_lock(&con->recovery_lock);
3265 
3266 	if (from_rom) {
3267 		/* there is no pa recs in V3, so skip pa recs processing */
3268 		if ((control->tbl_hdr.version < RAS_TABLE_VER_V3) &&
3269 		    !amdgpu_ras_smu_eeprom_supported(adev)) {
3270 			for (i = 0; i < pages; i++) {
3271 				if (control->ras_num_recs - i >= adev->umc.retire_unit) {
3272 					if ((bps[i].address == bps[i + 1].address) &&
3273 						(bps[i].mem_channel == bps[i + 1].mem_channel)) {
3274 						/* deal with retire_unit records a time */
3275 						ret = __amdgpu_ras_convert_rec_array_from_rom(adev,
3276 										&bps[i], &err_data, nps);
3277 						i += (adev->umc.retire_unit - 1);
3278 					} else {
3279 						break;
3280 					}
3281 				} else {
3282 					break;
3283 				}
3284 			}
3285 		}
3286 		for (; i < pages; i++) {
3287 			ret = __amdgpu_ras_convert_rec_from_rom(adev,
3288 				&bps[i], &err_data, nps);
3289 		}
3290 
3291 		con->eh_data->count_saved = con->eh_data->count;
3292 	} else {
3293 		ret = __amdgpu_ras_restore_bad_pages(adev, bps, pages);
3294 	}
3295 
3296 	if (from_rom)
3297 		kfree(err_data.err_addr);
3298 	mutex_unlock(&con->recovery_lock);
3299 
3300 	return ret;
3301 }
3302 
3303 /*
3304  * write error record array to eeprom, the function should be
3305  * protected by recovery_lock
3306  * new_cnt: new added UE count, excluding reserved bad pages, can be NULL
3307  */
3308 int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
3309 		unsigned long *new_cnt)
3310 {
3311 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3312 	struct ras_err_handler_data *data;
3313 	struct amdgpu_ras_eeprom_control *control;
3314 	int save_count, unit_num, i;
3315 
3316 	if (!con || !con->eh_data) {
3317 		if (new_cnt)
3318 			*new_cnt = 0;
3319 
3320 		return 0;
3321 	}
3322 
3323 	if (!con->eeprom_control.is_eeprom_valid) {
3324 		dev_warn(adev->dev,
3325 			"Failed to save EEPROM table data because of EEPROM data corruption!");
3326 		if (new_cnt)
3327 			*new_cnt = 0;
3328 
3329 		return 0;
3330 	}
3331 
3332 	mutex_lock(&con->recovery_lock);
3333 	control = &con->eeprom_control;
3334 	data = con->eh_data;
3335 	if (amdgpu_ras_smu_eeprom_supported(adev))
3336 		unit_num = control->ras_num_recs -
3337 			control->ras_num_recs_old;
3338 	else
3339 		unit_num = data->count / adev->umc.retire_unit -
3340 			control->ras_num_recs;
3341 
3342 	save_count = con->bad_page_num - control->ras_num_bad_pages;
3343 	mutex_unlock(&con->recovery_lock);
3344 
3345 	if (new_cnt)
3346 		*new_cnt = unit_num;
3347 
3348 	/* only new entries are saved */
3349 	if (unit_num && save_count) {
3350 		/*old asics only save pa to eeprom like before*/
3351 		if (IP_VERSION_MAJ(amdgpu_ip_version(adev, UMC_HWIP, 0)) < 12) {
3352 			if (amdgpu_ras_eeprom_append(control,
3353 					&data->bps[data->count_saved], unit_num)) {
3354 				dev_err(adev->dev, "Failed to save EEPROM table data!");
3355 				return -EIO;
3356 			}
3357 		} else {
3358 			for (i = 0; i < unit_num; i++) {
3359 				if (amdgpu_ras_eeprom_append(control,
3360 						&data->bps[data->count_saved +
3361 						i * adev->umc.retire_unit], 1)) {
3362 					dev_err(adev->dev, "Failed to save EEPROM table data!");
3363 					return -EIO;
3364 				}
3365 			}
3366 		}
3367 
3368 		dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count);
3369 		data->count_saved = data->count;
3370 	}
3371 
3372 	return 0;
3373 }
3374 
3375 /*
3376  * read error record array in eeprom and reserve enough space for
3377  * storing new bad pages
3378  */
3379 static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
3380 {
3381 	struct amdgpu_ras_eeprom_control *control =
3382 		&adev->psp.ras_context.ras->eeprom_control;
3383 	struct eeprom_table_record *bps;
3384 	int ret, i = 0;
3385 
3386 	/* no bad page record, skip eeprom access */
3387 	if (control->ras_num_recs == 0 || amdgpu_bad_page_threshold == 0)
3388 		return 0;
3389 
3390 	bps = kzalloc_objs(*bps, control->ras_num_recs);
3391 	if (!bps)
3392 		return -ENOMEM;
3393 
3394 	ret = amdgpu_ras_eeprom_read(control, bps, control->ras_num_recs);
3395 	if (ret) {
3396 		dev_err(adev->dev, "Failed to load EEPROM table records!");
3397 	} else {
3398 		if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr) {
3399 			/*In V3, there is no pa recs, and some cases(when address==0) may be parsed
3400 			as pa recs, so add verion check to avoid it.
3401 			*/
3402 			if ((control->tbl_hdr.version < RAS_TABLE_VER_V3) &&
3403 			    !amdgpu_ras_smu_eeprom_supported(adev)) {
3404 				for (i = 0; i < control->ras_num_recs; i++) {
3405 					if ((control->ras_num_recs - i) >= adev->umc.retire_unit) {
3406 						if ((bps[i].address == bps[i + 1].address) &&
3407 							(bps[i].mem_channel == bps[i + 1].mem_channel)) {
3408 							control->ras_num_pa_recs += adev->umc.retire_unit;
3409 							i += (adev->umc.retire_unit - 1);
3410 						} else {
3411 							control->ras_num_mca_recs +=
3412 										(control->ras_num_recs - i);
3413 							break;
3414 						}
3415 					} else {
3416 						control->ras_num_mca_recs += (control->ras_num_recs - i);
3417 						break;
3418 					}
3419 				}
3420 			} else {
3421 				control->ras_num_mca_recs = control->ras_num_recs;
3422 			}
3423 		}
3424 
3425 		ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs, true);
3426 		if (ret)
3427 			goto out;
3428 
3429 		ret = amdgpu_ras_eeprom_check(control);
3430 		if (ret)
3431 			goto out;
3432 
3433 		/* HW not usable */
3434 		if (amdgpu_ras_is_rma(adev))
3435 			ret = -EHWPOISON;
3436 	}
3437 
3438 out:
3439 	kfree(bps);
3440 	return ret;
3441 }
3442 
3443 static int amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
3444 				uint64_t addr)
3445 {
3446 	struct ras_err_handler_data *data = con->eh_data;
3447 	struct amdgpu_device *adev = con->adev;
3448 	int i;
3449 
3450 	if ((addr >= adev->gmc.mc_vram_size &&
3451 	    adev->gmc.mc_vram_size) ||
3452 	    (addr >= RAS_UMC_INJECT_ADDR_LIMIT))
3453 		return -EINVAL;
3454 
3455 	addr >>= AMDGPU_GPU_PAGE_SHIFT;
3456 	for (i = 0; i < data->count; i++)
3457 		if (addr == data->bps[i].retired_page)
3458 			return 1;
3459 
3460 	return 0;
3461 }
3462 
3463 /*
3464  * check if an address belongs to bad page
3465  *
3466  * Note: this check is only for umc block
3467  */
3468 static int amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
3469 				uint64_t addr)
3470 {
3471 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3472 	int ret = 0;
3473 
3474 	if (!con || !con->eh_data)
3475 		return ret;
3476 
3477 	mutex_lock(&con->recovery_lock);
3478 	ret = amdgpu_ras_check_bad_page_unlock(con, addr);
3479 	mutex_unlock(&con->recovery_lock);
3480 	return ret;
3481 }
3482 
3483 static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev,
3484 					  uint32_t max_count)
3485 {
3486 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3487 
3488 	/*
3489 	 * amdgpu_bad_page_threshold is used to config
3490 	 * the threshold for the number of bad pages.
3491 	 * -1:  Threshold is set to default value
3492 	 *      Driver will issue a warning message when threshold is reached
3493 	 *      and continue runtime services.
3494 	 * 0:   Disable bad page retirement
3495 	 *      Driver will not retire bad pages
3496 	 *      which is intended for debugging purpose.
3497 	 * -2:  Threshold is determined by a formula
3498 	 *      that assumes 1 bad page per 100M of local memory.
3499 	 *      Driver will continue runtime services when threhold is reached.
3500 	 * 0 < threshold < max number of bad page records in EEPROM,
3501 	 *      A user-defined threshold is set
3502 	 *      Driver will halt runtime services when this custom threshold is reached.
3503 	 */
3504 	if (amdgpu_bad_page_threshold == -2) {
3505 		u64 val = adev->gmc.mc_vram_size;
3506 
3507 		do_div(val, RAS_BAD_PAGE_COVER);
3508 		con->bad_page_cnt_threshold = min(lower_32_bits(val),
3509 						  max_count);
3510 	} else if (amdgpu_bad_page_threshold == -1) {
3511 		con->bad_page_cnt_threshold = ((con->reserved_pages_in_bytes) >> 21) << 4;
3512 	} else {
3513 		con->bad_page_cnt_threshold = min_t(int, max_count,
3514 						    amdgpu_bad_page_threshold);
3515 	}
3516 }
3517 
3518 int amdgpu_ras_put_poison_req(struct amdgpu_device *adev,
3519 		enum amdgpu_ras_block block, uint16_t pasid,
3520 		pasid_notify pasid_fn, void *data, uint32_t reset)
3521 {
3522 	int ret = 0;
3523 	struct ras_poison_msg poison_msg;
3524 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3525 
3526 	memset(&poison_msg, 0, sizeof(poison_msg));
3527 	poison_msg.block = block;
3528 	poison_msg.pasid = pasid;
3529 	poison_msg.reset = reset;
3530 	poison_msg.pasid_fn = pasid_fn;
3531 	poison_msg.data = data;
3532 
3533 	ret = kfifo_put(&con->poison_fifo, poison_msg);
3534 	if (!ret) {
3535 		dev_err(adev->dev, "Poison message fifo is full!\n");
3536 		return -ENOSPC;
3537 	}
3538 
3539 	return 0;
3540 }
3541 
3542 static int amdgpu_ras_get_poison_req(struct amdgpu_device *adev,
3543 		struct ras_poison_msg *poison_msg)
3544 {
3545 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3546 
3547 	return kfifo_get(&con->poison_fifo, poison_msg);
3548 }
3549 
3550 static void amdgpu_ras_ecc_log_init(struct ras_ecc_log_info *ecc_log)
3551 {
3552 	mutex_init(&ecc_log->lock);
3553 
3554 	INIT_RADIX_TREE(&ecc_log->de_page_tree, GFP_KERNEL);
3555 	ecc_log->de_queried_count = 0;
3556 	ecc_log->consumption_q_count = 0;
3557 }
3558 
3559 static void amdgpu_ras_ecc_log_fini(struct ras_ecc_log_info *ecc_log)
3560 {
3561 	struct radix_tree_iter iter;
3562 	void __rcu **slot;
3563 	struct ras_ecc_err *ecc_err;
3564 
3565 	mutex_lock(&ecc_log->lock);
3566 	radix_tree_for_each_slot(slot, &ecc_log->de_page_tree, &iter, 0) {
3567 		ecc_err = radix_tree_deref_slot(slot);
3568 		kfree(ecc_err->err_pages.pfn);
3569 		kfree(ecc_err);
3570 		radix_tree_iter_delete(&ecc_log->de_page_tree, &iter, slot);
3571 	}
3572 	mutex_unlock(&ecc_log->lock);
3573 
3574 	mutex_destroy(&ecc_log->lock);
3575 	ecc_log->de_queried_count = 0;
3576 	ecc_log->consumption_q_count = 0;
3577 }
3578 
3579 static bool amdgpu_ras_schedule_retirement_dwork(struct amdgpu_ras *con,
3580 				uint32_t delayed_ms)
3581 {
3582 	int ret;
3583 
3584 	mutex_lock(&con->umc_ecc_log.lock);
3585 	ret = radix_tree_tagged(&con->umc_ecc_log.de_page_tree,
3586 			UMC_ECC_NEW_DETECTED_TAG);
3587 	mutex_unlock(&con->umc_ecc_log.lock);
3588 
3589 	if (ret)
3590 		schedule_delayed_work(&con->page_retirement_dwork,
3591 			msecs_to_jiffies(delayed_ms));
3592 
3593 	return ret ? true : false;
3594 }
3595 
3596 static void amdgpu_ras_do_page_retirement(struct work_struct *work)
3597 {
3598 	struct amdgpu_ras *con = container_of(work, struct amdgpu_ras,
3599 					      page_retirement_dwork.work);
3600 	struct amdgpu_device *adev = con->adev;
3601 	struct ras_err_data err_data;
3602 
3603 	/* If gpu reset is ongoing, delay retiring the bad pages */
3604 	if (amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev)) {
3605 		amdgpu_ras_schedule_retirement_dwork(con,
3606 				AMDGPU_RAS_RETIRE_PAGE_INTERVAL * 3);
3607 		return;
3608 	}
3609 
3610 	amdgpu_ras_error_data_init(&err_data);
3611 
3612 	amdgpu_umc_handle_bad_pages(adev, &err_data);
3613 
3614 	amdgpu_ras_error_data_fini(&err_data);
3615 
3616 	amdgpu_ras_schedule_retirement_dwork(con,
3617 			AMDGPU_RAS_RETIRE_PAGE_INTERVAL);
3618 }
3619 
3620 static int amdgpu_ras_poison_creation_handler(struct amdgpu_device *adev,
3621 				uint32_t poison_creation_count)
3622 {
3623 	int ret = 0;
3624 	struct ras_ecc_log_info *ecc_log;
3625 	struct ras_query_if info;
3626 	u32 timeout = MAX_UMC_POISON_POLLING_TIME_ASYNC;
3627 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
3628 	u64 de_queried_count;
3629 	u64 consumption_q_count;
3630 	enum ras_event_type type = RAS_EVENT_TYPE_POISON_CREATION;
3631 
3632 	memset(&info, 0, sizeof(info));
3633 	info.head.block = AMDGPU_RAS_BLOCK__UMC;
3634 
3635 	ecc_log = &ras->umc_ecc_log;
3636 	ecc_log->de_queried_count = 0;
3637 	ecc_log->consumption_q_count = 0;
3638 
3639 	do {
3640 		ret = amdgpu_ras_query_error_status_with_event(adev, &info, type);
3641 		if (ret)
3642 			return ret;
3643 
3644 		de_queried_count = ecc_log->de_queried_count;
3645 		consumption_q_count = ecc_log->consumption_q_count;
3646 
3647 		if (de_queried_count && consumption_q_count)
3648 			break;
3649 
3650 		msleep(100);
3651 	} while (--timeout);
3652 
3653 	if (de_queried_count)
3654 		schedule_delayed_work(&ras->page_retirement_dwork, 0);
3655 
3656 	if (amdgpu_ras_is_rma(adev) && atomic_cmpxchg(&ras->rma_in_recovery, 0, 1) == 0)
3657 		amdgpu_ras_reset_gpu(adev);
3658 
3659 	return 0;
3660 }
3661 
3662 static void amdgpu_ras_clear_poison_fifo(struct amdgpu_device *adev)
3663 {
3664 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3665 	struct ras_poison_msg msg;
3666 	int ret;
3667 
3668 	do {
3669 		ret = kfifo_get(&con->poison_fifo, &msg);
3670 	} while (ret);
3671 }
3672 
3673 static int amdgpu_ras_poison_consumption_handler(struct amdgpu_device *adev,
3674 			uint32_t msg_count, uint32_t *gpu_reset)
3675 {
3676 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3677 	uint32_t reset_flags = 0, reset = 0;
3678 	struct ras_poison_msg msg;
3679 	int ret, i;
3680 
3681 	kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
3682 
3683 	for (i = 0; i < msg_count; i++) {
3684 		ret = amdgpu_ras_get_poison_req(adev, &msg);
3685 		if (!ret)
3686 			continue;
3687 
3688 		if (msg.pasid_fn)
3689 			msg.pasid_fn(adev, msg.pasid, msg.data);
3690 
3691 		reset_flags |= msg.reset;
3692 	}
3693 
3694 	/*
3695 	 * Try to ensure poison creation handler is completed first
3696 	 * to set rma if bad page exceed threshold.
3697 	 */
3698 	flush_delayed_work(&con->page_retirement_dwork);
3699 
3700 	/* for RMA, amdgpu_ras_poison_creation_handler will trigger gpu reset */
3701 	if (reset_flags && !amdgpu_ras_is_rma(adev)) {
3702 		if (reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET)
3703 			reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET;
3704 		else if (reset_flags & AMDGPU_RAS_GPU_RESET_MODE2_RESET)
3705 			reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
3706 		else
3707 			reset = reset_flags;
3708 
3709 		con->gpu_reset_flags |= reset;
3710 		amdgpu_ras_reset_gpu(adev);
3711 
3712 		*gpu_reset = reset;
3713 
3714 		/* Wait for gpu recovery to complete */
3715 		flush_work(&con->recovery_work);
3716 	}
3717 
3718 	return 0;
3719 }
3720 
3721 static int amdgpu_ras_page_retirement_thread(void *param)
3722 {
3723 	struct amdgpu_device *adev = (struct amdgpu_device *)param;
3724 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3725 	uint32_t poison_creation_count, msg_count;
3726 	uint32_t gpu_reset;
3727 	int ret;
3728 
3729 	while (!kthread_should_stop()) {
3730 
3731 		wait_event_interruptible(con->page_retirement_wq,
3732 				kthread_should_stop() ||
3733 				atomic_read(&con->page_retirement_req_cnt));
3734 
3735 		if (kthread_should_stop())
3736 			break;
3737 
3738 		mutex_lock(&con->poison_lock);
3739 		gpu_reset = 0;
3740 
3741 		do {
3742 			poison_creation_count = atomic_read(&con->poison_creation_count);
3743 			ret = amdgpu_ras_poison_creation_handler(adev, poison_creation_count);
3744 			if (ret == -EIO)
3745 				break;
3746 
3747 			if (poison_creation_count) {
3748 				atomic_sub(poison_creation_count, &con->poison_creation_count);
3749 				atomic_sub(poison_creation_count, &con->page_retirement_req_cnt);
3750 			}
3751 		} while (atomic_read(&con->poison_creation_count) &&
3752 			!atomic_read(&con->poison_consumption_count));
3753 
3754 		if (ret != -EIO) {
3755 			msg_count = kfifo_len(&con->poison_fifo);
3756 			if (msg_count) {
3757 				ret = amdgpu_ras_poison_consumption_handler(adev,
3758 						msg_count, &gpu_reset);
3759 				if ((ret != -EIO) &&
3760 				    (gpu_reset != AMDGPU_RAS_GPU_RESET_MODE1_RESET))
3761 					atomic_sub(msg_count, &con->page_retirement_req_cnt);
3762 			}
3763 		}
3764 
3765 		if ((ret == -EIO) || (gpu_reset == AMDGPU_RAS_GPU_RESET_MODE1_RESET)) {
3766 			/* gpu mode-1 reset is ongoing or just completed ras mode-1 reset */
3767 			/* Clear poison creation request */
3768 			atomic_set(&con->poison_creation_count, 0);
3769 			atomic_set(&con->poison_consumption_count, 0);
3770 
3771 			/* Clear poison fifo */
3772 			amdgpu_ras_clear_poison_fifo(adev);
3773 
3774 			/* Clear all poison requests */
3775 			atomic_set(&con->page_retirement_req_cnt, 0);
3776 
3777 			if (ret == -EIO) {
3778 				/* Wait for mode-1 reset to complete */
3779 				down_read(&adev->reset_domain->sem);
3780 				up_read(&adev->reset_domain->sem);
3781 			}
3782 
3783 			/* Wake up work to save bad pages to eeprom */
3784 			schedule_delayed_work(&con->page_retirement_dwork, 0);
3785 		} else if (gpu_reset) {
3786 			/* gpu just completed mode-2 reset or other reset */
3787 			/* Clear poison consumption messages cached in fifo */
3788 			msg_count = kfifo_len(&con->poison_fifo);
3789 			if (msg_count) {
3790 				amdgpu_ras_clear_poison_fifo(adev);
3791 				atomic_sub(msg_count, &con->page_retirement_req_cnt);
3792 			}
3793 
3794 			atomic_set(&con->poison_consumption_count, 0);
3795 
3796 			/* Wake up work to save bad pages to eeprom */
3797 			schedule_delayed_work(&con->page_retirement_dwork, 0);
3798 		}
3799 		mutex_unlock(&con->poison_lock);
3800 	}
3801 
3802 	return 0;
3803 }
3804 
3805 int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev)
3806 {
3807 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3808 	struct amdgpu_ras_eeprom_control *control;
3809 	int ret;
3810 
3811 	if (!con || amdgpu_sriov_vf(adev))
3812 		return 0;
3813 
3814 	if (amdgpu_uniras_enabled(adev))
3815 		return 0;
3816 
3817 	control = &con->eeprom_control;
3818 	con->ras_smu_drv = amdgpu_dpm_get_ras_smu_driver(adev);
3819 
3820 	ret = amdgpu_ras_eeprom_init(control);
3821 	control->is_eeprom_valid = !ret;
3822 
3823 	if (!adev->umc.ras || !adev->umc.ras->convert_ras_err_addr)
3824 		control->ras_num_pa_recs = control->ras_num_recs;
3825 
3826 	if (adev->umc.ras &&
3827 	    adev->umc.ras->get_retire_flip_bits)
3828 		adev->umc.ras->get_retire_flip_bits(adev);
3829 
3830 	if (control->ras_num_recs && control->is_eeprom_valid) {
3831 		ret = amdgpu_ras_load_bad_pages(adev);
3832 		if (ret) {
3833 			control->is_eeprom_valid = false;
3834 			return 0;
3835 		}
3836 
3837 		amdgpu_dpm_send_hbm_bad_pages_num(
3838 			adev, control->ras_num_bad_pages);
3839 
3840 		if (con->update_channel_flag == true) {
3841 			amdgpu_dpm_send_hbm_bad_channel_flag(
3842 				adev, control->bad_channel_bitmap);
3843 			con->update_channel_flag = false;
3844 		}
3845 
3846 		/* The format action is only applied to new ASICs */
3847 		if (IP_VERSION_MAJ(amdgpu_ip_version(adev, UMC_HWIP, 0)) >= 12 &&
3848 		    control->tbl_hdr.version < RAS_TABLE_VER_V3)
3849 			if (!amdgpu_ras_eeprom_reset_table(control))
3850 				if (amdgpu_ras_save_bad_pages(adev, NULL))
3851 					dev_warn(adev->dev, "Failed to format RAS EEPROM data in V3 version!\n");
3852 	}
3853 
3854 	return 0;
3855 }
3856 
3857 int amdgpu_ras_recovery_init(struct amdgpu_device *adev, bool init_bp_info)
3858 {
3859 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3860 	struct ras_err_handler_data **data;
3861 	u32  max_eeprom_records_count = 0;
3862 	int ret;
3863 
3864 	if (!con || amdgpu_sriov_vf(adev))
3865 		return 0;
3866 
3867 	/* Allow access to RAS EEPROM via debugfs, when the ASIC
3868 	 * supports RAS and debugfs is enabled, but when
3869 	 * adev->ras_enabled is unset, i.e. when "ras_enable"
3870 	 * module parameter is set to 0.
3871 	 */
3872 	con->adev = adev;
3873 
3874 	if (!adev->ras_enabled)
3875 		return 0;
3876 
3877 	data = &con->eh_data;
3878 	*data = kzalloc_obj(**data);
3879 	if (!*data) {
3880 		ret = -ENOMEM;
3881 		goto out;
3882 	}
3883 
3884 	mutex_init(&con->recovery_lock);
3885 	mutex_init(&con->poison_lock);
3886 	INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
3887 	atomic_set(&con->in_recovery, 0);
3888 	atomic_set(&con->rma_in_recovery, 0);
3889 	con->eeprom_control.bad_channel_bitmap = 0;
3890 
3891 	max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count(&con->eeprom_control);
3892 	amdgpu_ras_validate_threshold(adev, max_eeprom_records_count);
3893 
3894 	if (init_bp_info) {
3895 		ret = amdgpu_ras_init_badpage_info(adev);
3896 		if (ret)
3897 			goto free;
3898 	}
3899 
3900 	mutex_init(&con->page_rsv_lock);
3901 	INIT_KFIFO(con->poison_fifo);
3902 	mutex_init(&con->page_retirement_lock);
3903 	init_waitqueue_head(&con->page_retirement_wq);
3904 	atomic_set(&con->page_retirement_req_cnt, 0);
3905 	atomic_set(&con->poison_creation_count, 0);
3906 	atomic_set(&con->poison_consumption_count, 0);
3907 	con->page_retirement_thread =
3908 		kthread_run(amdgpu_ras_page_retirement_thread, adev, "umc_page_retirement");
3909 	if (IS_ERR(con->page_retirement_thread)) {
3910 		con->page_retirement_thread = NULL;
3911 		dev_warn(adev->dev, "Failed to create umc_page_retirement thread!!!\n");
3912 	}
3913 
3914 	INIT_DELAYED_WORK(&con->page_retirement_dwork, amdgpu_ras_do_page_retirement);
3915 	amdgpu_ras_ecc_log_init(&con->umc_ecc_log);
3916 #ifdef CONFIG_X86_MCE_AMD
3917 	if ((adev->asic_type == CHIP_ALDEBARAN) &&
3918 	    (adev->gmc.xgmi.connected_to_cpu))
3919 		amdgpu_register_bad_pages_mca_notifier(adev);
3920 #endif
3921 	return 0;
3922 
3923 free:
3924 	kfree((*data)->bps);
3925 	kfree(*data);
3926 	con->eh_data = NULL;
3927 out:
3928 	dev_warn(adev->dev, "Failed to initialize ras recovery! (%d)\n", ret);
3929 
3930 	/*
3931 	 * Except error threshold exceeding case, other failure cases in this
3932 	 * function would not fail amdgpu driver init.
3933 	 */
3934 	if (!amdgpu_ras_is_rma(adev))
3935 		ret = 0;
3936 	else
3937 		ret = -EINVAL;
3938 
3939 	return ret;
3940 }
3941 
3942 static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
3943 {
3944 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3945 	struct ras_err_handler_data *data = con->eh_data;
3946 	int max_flush_timeout = MAX_FLUSH_RETIRE_DWORK_TIMES;
3947 	bool ret;
3948 
3949 	/* recovery_init failed to init it, fini is useless */
3950 	if (!data)
3951 		return 0;
3952 
3953 	/* Save all cached bad pages to eeprom */
3954 	do {
3955 		flush_delayed_work(&con->page_retirement_dwork);
3956 		ret = amdgpu_ras_schedule_retirement_dwork(con, 0);
3957 	} while (ret && max_flush_timeout--);
3958 
3959 	if (con->page_retirement_thread)
3960 		kthread_stop(con->page_retirement_thread);
3961 
3962 	atomic_set(&con->page_retirement_req_cnt, 0);
3963 	atomic_set(&con->poison_creation_count, 0);
3964 
3965 	mutex_destroy(&con->page_rsv_lock);
3966 
3967 	cancel_work_sync(&con->recovery_work);
3968 
3969 	cancel_delayed_work_sync(&con->page_retirement_dwork);
3970 
3971 	amdgpu_ras_ecc_log_fini(&con->umc_ecc_log);
3972 
3973 	mutex_lock(&con->recovery_lock);
3974 	con->eh_data = NULL;
3975 	kfree(data->bps);
3976 	kfree(data);
3977 	mutex_unlock(&con->recovery_lock);
3978 
3979 	amdgpu_ras_critical_region_init(adev);
3980 #ifdef CONFIG_X86_MCE_AMD
3981 	amdgpu_unregister_bad_pages_mca_notifier(adev);
3982 #endif
3983 	return 0;
3984 }
3985 /* recovery end */
3986 
3987 static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
3988 {
3989 	if (amdgpu_sriov_vf(adev)) {
3990 		switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
3991 		case IP_VERSION(13, 0, 2):
3992 		case IP_VERSION(13, 0, 6):
3993 		case IP_VERSION(13, 0, 12):
3994 		case IP_VERSION(13, 0, 14):
3995 			return true;
3996 		default:
3997 			return false;
3998 		}
3999 	}
4000 
4001 	if (adev->asic_type == CHIP_IP_DISCOVERY) {
4002 		switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
4003 		case IP_VERSION(13, 0, 0):
4004 		case IP_VERSION(13, 0, 6):
4005 		case IP_VERSION(13, 0, 10):
4006 		case IP_VERSION(13, 0, 12):
4007 		case IP_VERSION(13, 0, 14):
4008 		case IP_VERSION(14, 0, 3):
4009 			return true;
4010 		default:
4011 			return false;
4012 		}
4013 	}
4014 
4015 	return adev->asic_type == CHIP_VEGA10 ||
4016 		adev->asic_type == CHIP_VEGA20 ||
4017 		adev->asic_type == CHIP_ARCTURUS ||
4018 		adev->asic_type == CHIP_ALDEBARAN ||
4019 		adev->asic_type == CHIP_SIENNA_CICHLID;
4020 }
4021 
4022 /*
4023  * this is workaround for vega20 workstation sku,
4024  * force enable gfx ras, ignore vbios gfx ras flag
4025  * due to GC EDC can not write
4026  */
4027 static void amdgpu_ras_get_quirks(struct amdgpu_device *adev)
4028 {
4029 	struct atom_context *ctx = adev->mode_info.atom_context;
4030 
4031 	if (!ctx)
4032 		return;
4033 
4034 	if (strnstr(ctx->vbios_pn, "D16406",
4035 		    sizeof(ctx->vbios_pn)) ||
4036 		strnstr(ctx->vbios_pn, "D36002",
4037 			sizeof(ctx->vbios_pn)))
4038 		adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX);
4039 }
4040 
4041 /* Query ras capablity via atomfirmware interface */
4042 static void amdgpu_ras_query_ras_capablity_from_vbios(struct amdgpu_device *adev)
4043 {
4044 	/* mem_ecc cap */
4045 	if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
4046 		dev_info(adev->dev, "MEM ECC is active.\n");
4047 		adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__UMC |
4048 					 1 << AMDGPU_RAS_BLOCK__DF);
4049 	} else {
4050 		dev_info(adev->dev, "MEM ECC is not presented.\n");
4051 	}
4052 
4053 	/* sram_ecc cap */
4054 	if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
4055 		dev_info(adev->dev, "SRAM ECC is active.\n");
4056 		if (!amdgpu_sriov_vf(adev))
4057 			adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
4058 						  1 << AMDGPU_RAS_BLOCK__DF);
4059 		else
4060 			adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__PCIE_BIF |
4061 						 1 << AMDGPU_RAS_BLOCK__SDMA |
4062 						 1 << AMDGPU_RAS_BLOCK__GFX);
4063 
4064 		/*
4065 		 * VCN/JPEG RAS can be supported on both bare metal and
4066 		 * SRIOV environment
4067 		 */
4068 		if (amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(2, 6, 0) ||
4069 		    amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 0) ||
4070 		    amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 3) ||
4071 		    amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(5, 0, 1))
4072 			adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN |
4073 						 1 << AMDGPU_RAS_BLOCK__JPEG);
4074 		else
4075 			adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN |
4076 						  1 << AMDGPU_RAS_BLOCK__JPEG);
4077 
4078 		/*
4079 		 * XGMI RAS is not supported if xgmi num physical nodes
4080 		 * is zero
4081 		 */
4082 		if (!adev->gmc.xgmi.num_physical_nodes)
4083 			adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__XGMI_WAFL);
4084 	} else {
4085 		dev_info(adev->dev, "SRAM ECC is not presented.\n");
4086 	}
4087 }
4088 
4089 /* Query poison mode from umc/df IP callbacks */
4090 static void amdgpu_ras_query_poison_mode(struct amdgpu_device *adev)
4091 {
4092 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4093 	bool df_poison, umc_poison;
4094 
4095 	/* poison setting is useless on SRIOV guest */
4096 	if (amdgpu_sriov_vf(adev) || !con)
4097 		return;
4098 
4099 	/* Init poison supported flag, the default value is false */
4100 	if (adev->gmc.xgmi.connected_to_cpu ||
4101 	    adev->gmc.is_app_apu) {
4102 		/* enabled by default when GPU is connected to CPU */
4103 		con->poison_supported = true;
4104 	} else if (adev->df.funcs &&
4105 	    adev->df.funcs->query_ras_poison_mode &&
4106 	    adev->umc.ras &&
4107 	    adev->umc.ras->query_ras_poison_mode) {
4108 		df_poison =
4109 			adev->df.funcs->query_ras_poison_mode(adev);
4110 		umc_poison =
4111 			adev->umc.ras->query_ras_poison_mode(adev);
4112 
4113 		/* Only poison is set in both DF and UMC, we can support it */
4114 		if (df_poison && umc_poison)
4115 			con->poison_supported = true;
4116 		else if (df_poison != umc_poison)
4117 			dev_warn(adev->dev,
4118 				"Poison setting is inconsistent in DF/UMC(%d:%d)!\n",
4119 				df_poison, umc_poison);
4120 	}
4121 }
4122 
4123 /*
4124  * check hardware's ras ability which will be saved in hw_supported.
4125  * if hardware does not support ras, we can skip some ras initializtion and
4126  * forbid some ras operations from IP.
4127  * if software itself, say boot parameter, limit the ras ability. We still
4128  * need allow IP do some limited operations, like disable. In such case,
4129  * we have to initialize ras as normal. but need check if operation is
4130  * allowed or not in each function.
4131  */
4132 static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
4133 {
4134 	adev->ras_hw_enabled = adev->ras_enabled = 0;
4135 
4136 	if (!amdgpu_ras_asic_supported(adev))
4137 		return;
4138 
4139 	if (amdgpu_sriov_vf(adev)) {
4140 		if (amdgpu_virt_get_ras_capability(adev))
4141 			goto init_ras_enabled_flag;
4142 	}
4143 
4144 	/* query ras capability from psp */
4145 	if (amdgpu_psp_get_ras_capability(&adev->psp))
4146 		goto init_ras_enabled_flag;
4147 
4148 	/* query ras capablity from bios */
4149 	if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
4150 		amdgpu_ras_query_ras_capablity_from_vbios(adev);
4151 	} else {
4152 		/* driver only manages a few IP blocks RAS feature
4153 		 * when GPU is connected cpu through XGMI */
4154 		adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX |
4155 					   1 << AMDGPU_RAS_BLOCK__SDMA |
4156 					   1 << AMDGPU_RAS_BLOCK__MMHUB);
4157 	}
4158 
4159 	/* apply asic specific settings (vega20 only for now) */
4160 	amdgpu_ras_get_quirks(adev);
4161 
4162 	/* query poison mode from umc/df ip callback */
4163 	amdgpu_ras_query_poison_mode(adev);
4164 
4165 init_ras_enabled_flag:
4166 	/* hw_supported needs to be aligned with RAS block mask. */
4167 	adev->ras_hw_enabled &= AMDGPU_RAS_BLOCK_MASK;
4168 
4169 	adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 :
4170 		adev->ras_hw_enabled & amdgpu_ras_mask;
4171 
4172 	/* aca is disabled by default except for psp v13_0_6/v13_0_12/v13_0_14 */
4173 	if (!amdgpu_sriov_vf(adev)) {
4174 		adev->aca.is_enabled =
4175 			(amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
4176 			amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) ||
4177 			amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14));
4178 	}
4179 
4180 	/* bad page feature is not applicable to specific app platform */
4181 	if (adev->gmc.is_app_apu &&
4182 	    amdgpu_ip_version(adev, UMC_HWIP, 0) == IP_VERSION(12, 0, 0))
4183 		amdgpu_bad_page_threshold = 0;
4184 }
4185 
4186 static void amdgpu_ras_counte_dw(struct work_struct *work)
4187 {
4188 	struct amdgpu_ras *con = container_of(work, struct amdgpu_ras,
4189 					      ras_counte_delay_work.work);
4190 	struct amdgpu_device *adev = con->adev;
4191 	struct drm_device *dev = adev_to_drm(adev);
4192 	unsigned long ce_count, ue_count;
4193 	int res;
4194 
4195 	res = pm_runtime_get_sync(dev->dev);
4196 	if (res < 0)
4197 		goto Out;
4198 
4199 	/* Cache new values.
4200 	 */
4201 	if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, NULL) == 0) {
4202 		atomic_set(&con->ras_ce_count, ce_count);
4203 		atomic_set(&con->ras_ue_count, ue_count);
4204 	}
4205 
4206 Out:
4207 	pm_runtime_put_autosuspend(dev->dev);
4208 }
4209 
4210 static int amdgpu_get_ras_schema(struct amdgpu_device *adev)
4211 {
4212 	return  amdgpu_ras_is_poison_mode_supported(adev) ? AMDGPU_RAS_ERROR__POISON : 0 |
4213 			AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE |
4214 			AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE |
4215 			AMDGPU_RAS_ERROR__PARITY;
4216 }
4217 
4218 static void ras_event_mgr_init(struct ras_event_manager *mgr)
4219 {
4220 	struct ras_event_state *event_state;
4221 	int i;
4222 
4223 	memset(mgr, 0, sizeof(*mgr));
4224 	atomic64_set(&mgr->seqno, 0);
4225 
4226 	for (i = 0; i < ARRAY_SIZE(mgr->event_state); i++) {
4227 		event_state = &mgr->event_state[i];
4228 		event_state->last_seqno = RAS_EVENT_INVALID_ID;
4229 		atomic64_set(&event_state->count, 0);
4230 	}
4231 }
4232 
4233 static void amdgpu_ras_event_mgr_init(struct amdgpu_device *adev)
4234 {
4235 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
4236 	struct amdgpu_hive_info *hive;
4237 
4238 	if (!ras)
4239 		return;
4240 
4241 	hive = amdgpu_get_xgmi_hive(adev);
4242 	ras->event_mgr = hive ? &hive->event_mgr : &ras->__event_mgr;
4243 
4244 	/* init event manager with node 0 on xgmi system */
4245 	if (!amdgpu_reset_in_recovery(adev)) {
4246 		if (!hive || adev->gmc.xgmi.node_id == 0)
4247 			ras_event_mgr_init(ras->event_mgr);
4248 	}
4249 
4250 	if (hive)
4251 		amdgpu_put_xgmi_hive(hive);
4252 }
4253 
4254 static void amdgpu_ras_init_reserved_vram_size(struct amdgpu_device *adev)
4255 {
4256 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4257 
4258 	if (!con || (adev->flags & AMD_IS_APU))
4259 		return;
4260 
4261 	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
4262 	case IP_VERSION(13, 0, 2):
4263 	case IP_VERSION(13, 0, 6):
4264 	case IP_VERSION(13, 0, 12):
4265 		con->reserved_pages_in_bytes = AMDGPU_RAS_RESERVED_VRAM_SIZE_DEFAULT;
4266 		break;
4267 	case IP_VERSION(13, 0, 14):
4268 		con->reserved_pages_in_bytes = (AMDGPU_RAS_RESERVED_VRAM_SIZE_DEFAULT << 1);
4269 		break;
4270 	default:
4271 		break;
4272 	}
4273 }
4274 
4275 int amdgpu_ras_init(struct amdgpu_device *adev)
4276 {
4277 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4278 	int r;
4279 
4280 	if (con)
4281 		return 0;
4282 
4283 	con = kzalloc(sizeof(*con) +
4284 			sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT +
4285 			sizeof(struct ras_manager) * AMDGPU_RAS_MCA_BLOCK_COUNT,
4286 			GFP_KERNEL);
4287 	if (!con)
4288 		return -ENOMEM;
4289 
4290 	con->adev = adev;
4291 	INIT_DELAYED_WORK(&con->ras_counte_delay_work, amdgpu_ras_counte_dw);
4292 	atomic_set(&con->ras_ce_count, 0);
4293 	atomic_set(&con->ras_ue_count, 0);
4294 
4295 	con->objs = (struct ras_manager *)(con + 1);
4296 
4297 	amdgpu_ras_set_context(adev, con);
4298 
4299 	amdgpu_ras_check_supported(adev);
4300 
4301 	if (!adev->ras_enabled || adev->asic_type == CHIP_VEGA10) {
4302 		/* set gfx block ras context feature for VEGA20 Gaming
4303 		 * send ras disable cmd to ras ta during ras late init.
4304 		 */
4305 		if (!adev->ras_enabled && adev->asic_type == CHIP_VEGA20) {
4306 			con->features |= BIT(AMDGPU_RAS_BLOCK__GFX);
4307 
4308 			return 0;
4309 		}
4310 
4311 		r = 0;
4312 		goto release_con;
4313 	}
4314 
4315 	con->update_channel_flag = false;
4316 	con->features = 0;
4317 	con->schema = 0;
4318 	INIT_LIST_HEAD(&con->head);
4319 	/* Might need get this flag from vbios. */
4320 	con->flags = RAS_DEFAULT_FLAGS;
4321 
4322 	/* initialize nbio ras function ahead of any other
4323 	 * ras functions so hardware fatal error interrupt
4324 	 * can be enabled as early as possible */
4325 	switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
4326 	case IP_VERSION(7, 4, 0):
4327 	case IP_VERSION(7, 4, 1):
4328 	case IP_VERSION(7, 4, 4):
4329 		if (!adev->gmc.xgmi.connected_to_cpu)
4330 			adev->nbio.ras = &nbio_v7_4_ras;
4331 		break;
4332 	case IP_VERSION(4, 3, 0):
4333 		if (adev->ras_hw_enabled & (1 << AMDGPU_RAS_BLOCK__DF))
4334 			/* unlike other generation of nbio ras,
4335 			 * nbio v4_3 only support fatal error interrupt
4336 			 * to inform software that DF is freezed due to
4337 			 * system fatal error event. driver should not
4338 			 * enable nbio ras in such case. Instead,
4339 			 * check DF RAS */
4340 			adev->nbio.ras = &nbio_v4_3_ras;
4341 		break;
4342 	case IP_VERSION(6, 3, 1):
4343 		if (adev->ras_hw_enabled & (1 << AMDGPU_RAS_BLOCK__DF))
4344 			/* unlike other generation of nbio ras,
4345 			 * nbif v6_3_1 only support fatal error interrupt
4346 			 * to inform software that DF is freezed due to
4347 			 * system fatal error event. driver should not
4348 			 * enable nbio ras in such case. Instead,
4349 			 * check DF RAS
4350 			 */
4351 			adev->nbio.ras = &nbif_v6_3_1_ras;
4352 		break;
4353 	case IP_VERSION(7, 9, 0):
4354 	case IP_VERSION(7, 9, 1):
4355 		if (!adev->gmc.is_app_apu)
4356 			adev->nbio.ras = &nbio_v7_9_ras;
4357 		break;
4358 	default:
4359 		/* nbio ras is not available */
4360 		break;
4361 	}
4362 
4363 	/* nbio ras block needs to be enabled ahead of other ras blocks
4364 	 * to handle fatal error */
4365 	r = amdgpu_nbio_ras_sw_init(adev);
4366 	if (r)
4367 		goto release_con;
4368 
4369 	if (adev->nbio.ras &&
4370 	    adev->nbio.ras->init_ras_controller_interrupt) {
4371 		r = adev->nbio.ras->init_ras_controller_interrupt(adev);
4372 		if (r)
4373 			goto release_con;
4374 	}
4375 
4376 	if (adev->nbio.ras &&
4377 	    adev->nbio.ras->init_ras_err_event_athub_interrupt) {
4378 		r = adev->nbio.ras->init_ras_err_event_athub_interrupt(adev);
4379 		if (r)
4380 			goto release_con;
4381 	}
4382 
4383 	/* Packed socket_id to ras feature mask bits[31:29] */
4384 	if (adev->smuio.funcs &&
4385 	    adev->smuio.funcs->get_socket_id)
4386 		con->features |= ((adev->smuio.funcs->get_socket_id(adev)) <<
4387 					AMDGPU_RAS_FEATURES_SOCKETID_SHIFT);
4388 
4389 	/* Get RAS schema for particular SOC */
4390 	con->schema = amdgpu_get_ras_schema(adev);
4391 
4392 	amdgpu_ras_init_reserved_vram_size(adev);
4393 
4394 	if (amdgpu_ras_fs_init(adev)) {
4395 		r = -EINVAL;
4396 		goto release_con;
4397 	}
4398 
4399 	if (amdgpu_ras_aca_is_supported(adev)) {
4400 		if (amdgpu_aca_is_enabled(adev))
4401 			r = amdgpu_aca_init(adev);
4402 		else
4403 			r = amdgpu_mca_init(adev);
4404 		if (r)
4405 			goto release_con;
4406 	}
4407 
4408 	con->init_task_pid = task_pid_nr(current);
4409 	get_task_comm(con->init_task_comm, current);
4410 
4411 	mutex_init(&con->critical_region_lock);
4412 	INIT_LIST_HEAD(&con->critical_region_head);
4413 
4414 	dev_info(adev->dev, "RAS INFO: ras initialized successfully, "
4415 		 "hardware ability[%x] ras_mask[%x]\n",
4416 		 adev->ras_hw_enabled, adev->ras_enabled);
4417 
4418 	return 0;
4419 release_con:
4420 	amdgpu_ras_set_context(adev, NULL);
4421 	kfree(con);
4422 
4423 	return r;
4424 }
4425 
4426 int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev)
4427 {
4428 	if (adev->gmc.xgmi.connected_to_cpu ||
4429 	    adev->gmc.is_app_apu)
4430 		return 1;
4431 	return 0;
4432 }
4433 
4434 static int amdgpu_persistent_edc_harvesting(struct amdgpu_device *adev,
4435 					struct ras_common_if *ras_block)
4436 {
4437 	struct ras_query_if info = {
4438 		.head = *ras_block,
4439 	};
4440 
4441 	if (!amdgpu_persistent_edc_harvesting_supported(adev))
4442 		return 0;
4443 
4444 	if (amdgpu_ras_query_error_status(adev, &info) != 0)
4445 		drm_warn(adev_to_drm(adev), "RAS init query failure");
4446 
4447 	if (amdgpu_ras_reset_error_status(adev, ras_block->block) != 0)
4448 		drm_warn(adev_to_drm(adev), "RAS init harvest reset failure");
4449 
4450 	return 0;
4451 }
4452 
4453 bool amdgpu_ras_is_poison_mode_supported(struct amdgpu_device *adev)
4454 {
4455        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4456 
4457        if (!con)
4458                return false;
4459 
4460        return con->poison_supported;
4461 }
4462 
4463 /* helper function to handle common stuff in ip late init phase */
4464 int amdgpu_ras_block_late_init(struct amdgpu_device *adev,
4465 			 struct ras_common_if *ras_block)
4466 {
4467 	struct amdgpu_ras_block_object *ras_obj = NULL;
4468 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4469 	struct ras_query_if *query_info;
4470 	unsigned long ue_count, ce_count;
4471 	int r;
4472 
4473 	/* disable RAS feature per IP block if it is not supported */
4474 	if (!amdgpu_ras_is_supported(adev, ras_block->block)) {
4475 		amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
4476 		return 0;
4477 	}
4478 
4479 	r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1);
4480 	if (r) {
4481 		if (adev->in_suspend || amdgpu_reset_in_recovery(adev)) {
4482 			/* in resume phase, if fail to enable ras,
4483 			 * clean up all ras fs nodes, and disable ras */
4484 			goto cleanup;
4485 		} else
4486 			return r;
4487 	}
4488 
4489 	/* check for errors on warm reset edc persisant supported ASIC */
4490 	amdgpu_persistent_edc_harvesting(adev, ras_block);
4491 
4492 	/* in resume phase, no need to create ras fs node */
4493 	if (adev->in_suspend || amdgpu_reset_in_recovery(adev))
4494 		return 0;
4495 
4496 	ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
4497 	if (ras_obj->ras_cb || (ras_obj->hw_ops &&
4498 	    (ras_obj->hw_ops->query_poison_status ||
4499 	    ras_obj->hw_ops->handle_poison_consumption))) {
4500 		r = amdgpu_ras_interrupt_add_handler(adev, ras_block);
4501 		if (r)
4502 			goto cleanup;
4503 	}
4504 
4505 	if (ras_obj->hw_ops &&
4506 	    (ras_obj->hw_ops->query_ras_error_count ||
4507 	     ras_obj->hw_ops->query_ras_error_status)) {
4508 		r = amdgpu_ras_sysfs_create(adev, ras_block);
4509 		if (r)
4510 			goto interrupt;
4511 
4512 		/* Those are the cached values at init.
4513 		 */
4514 		query_info = kzalloc_obj(*query_info);
4515 		if (!query_info)
4516 			return -ENOMEM;
4517 		memcpy(&query_info->head, ras_block, sizeof(struct ras_common_if));
4518 
4519 		if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, query_info) == 0) {
4520 			atomic_set(&con->ras_ce_count, ce_count);
4521 			atomic_set(&con->ras_ue_count, ue_count);
4522 		}
4523 
4524 		kfree(query_info);
4525 	}
4526 
4527 	return 0;
4528 
4529 interrupt:
4530 	if (ras_obj->ras_cb)
4531 		amdgpu_ras_interrupt_remove_handler(adev, ras_block);
4532 cleanup:
4533 	amdgpu_ras_feature_enable(adev, ras_block, 0);
4534 	return r;
4535 }
4536 
4537 static int amdgpu_ras_block_late_init_default(struct amdgpu_device *adev,
4538 			 struct ras_common_if *ras_block)
4539 {
4540 	return amdgpu_ras_block_late_init(adev, ras_block);
4541 }
4542 
4543 /* helper function to remove ras fs node and interrupt handler */
4544 void amdgpu_ras_block_late_fini(struct amdgpu_device *adev,
4545 			  struct ras_common_if *ras_block)
4546 {
4547 	struct amdgpu_ras_block_object *ras_obj;
4548 	if (!ras_block)
4549 		return;
4550 
4551 	amdgpu_ras_sysfs_remove(adev, ras_block);
4552 
4553 	ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
4554 	if (ras_obj->ras_cb)
4555 		amdgpu_ras_interrupt_remove_handler(adev, ras_block);
4556 }
4557 
4558 static void amdgpu_ras_block_late_fini_default(struct amdgpu_device *adev,
4559 			  struct ras_common_if *ras_block)
4560 {
4561 	return amdgpu_ras_block_late_fini(adev, ras_block);
4562 }
4563 
4564 /* do some init work after IP late init as dependence.
4565  * and it runs in resume/gpu reset/booting up cases.
4566  */
4567 void amdgpu_ras_resume(struct amdgpu_device *adev)
4568 {
4569 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4570 	struct ras_manager *obj, *tmp;
4571 
4572 	if (!adev->ras_enabled || !con) {
4573 		/* clean ras context for VEGA20 Gaming after send ras disable cmd */
4574 		amdgpu_release_ras_context(adev);
4575 
4576 		return;
4577 	}
4578 
4579 	if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
4580 		/* Set up all other IPs which are not implemented. There is a
4581 		 * tricky thing that IP's actual ras error type should be
4582 		 * MULTI_UNCORRECTABLE, but as driver does not handle it, so
4583 		 * ERROR_NONE make sense anyway.
4584 		 */
4585 		amdgpu_ras_enable_all_features(adev, 1);
4586 
4587 		/* We enable ras on all hw_supported block, but as boot
4588 		 * parameter might disable some of them and one or more IP has
4589 		 * not implemented yet. So we disable them on behalf.
4590 		 */
4591 		list_for_each_entry_safe(obj, tmp, &con->head, node) {
4592 			if (!amdgpu_ras_is_supported(adev, obj->head.block)) {
4593 				amdgpu_ras_feature_enable(adev, &obj->head, 0);
4594 				/* there should be no any reference. */
4595 				WARN_ON(alive_obj(obj));
4596 			}
4597 		}
4598 	}
4599 }
4600 
4601 void amdgpu_ras_suspend(struct amdgpu_device *adev)
4602 {
4603 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4604 
4605 	if (!adev->ras_enabled || !con)
4606 		return;
4607 
4608 	amdgpu_ras_disable_all_features(adev, 0);
4609 	/* Make sure all ras objects are disabled. */
4610 	if (AMDGPU_RAS_GET_FEATURES(con->features))
4611 		amdgpu_ras_disable_all_features(adev, 1);
4612 }
4613 
4614 int amdgpu_ras_late_init(struct amdgpu_device *adev)
4615 {
4616 	struct amdgpu_ras_block_list *node, *tmp;
4617 	struct amdgpu_ras_block_object *obj;
4618 	int r;
4619 
4620 	amdgpu_ras_event_mgr_init(adev);
4621 
4622 	if (amdgpu_ras_aca_is_supported(adev)) {
4623 		if (amdgpu_reset_in_recovery(adev)) {
4624 			if (amdgpu_aca_is_enabled(adev))
4625 				r = amdgpu_aca_reset(adev);
4626 			else
4627 				r = amdgpu_mca_reset(adev);
4628 			if (r)
4629 				return r;
4630 		}
4631 
4632 		if (!amdgpu_sriov_vf(adev)) {
4633 			if (amdgpu_aca_is_enabled(adev))
4634 				amdgpu_ras_set_aca_debug_mode(adev, false);
4635 			else
4636 				amdgpu_ras_set_mca_debug_mode(adev, false);
4637 		}
4638 	}
4639 
4640 	/* Guest side doesn't need init ras feature */
4641 	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_ras_telemetry_en(adev))
4642 		return 0;
4643 
4644 	list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
4645 		obj = node->ras_obj;
4646 		if (!obj) {
4647 			dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
4648 			continue;
4649 		}
4650 
4651 		if (!amdgpu_ras_is_supported(adev, obj->ras_comm.block))
4652 			continue;
4653 
4654 		if (obj->ras_late_init) {
4655 			r = obj->ras_late_init(adev, &obj->ras_comm);
4656 			if (r) {
4657 				dev_err(adev->dev, "%s failed to execute ras_late_init! ret:%d\n",
4658 					obj->ras_comm.name, r);
4659 				return r;
4660 			}
4661 		} else
4662 			amdgpu_ras_block_late_init_default(adev, &obj->ras_comm);
4663 	}
4664 
4665 	amdgpu_ras_check_bad_page_status(adev);
4666 
4667 	return 0;
4668 }
4669 
4670 /* do some fini work before IP fini as dependence */
4671 int amdgpu_ras_pre_fini(struct amdgpu_device *adev)
4672 {
4673 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4674 
4675 	if (!adev->ras_enabled || !con)
4676 		return 0;
4677 
4678 
4679 	/* Need disable ras on all IPs here before ip [hw/sw]fini */
4680 	if (AMDGPU_RAS_GET_FEATURES(con->features))
4681 		amdgpu_ras_disable_all_features(adev, 0);
4682 	amdgpu_ras_recovery_fini(adev);
4683 	return 0;
4684 }
4685 
4686 int amdgpu_ras_fini(struct amdgpu_device *adev)
4687 {
4688 	struct amdgpu_ras_block_list *ras_node, *tmp;
4689 	struct amdgpu_ras_block_object *obj = NULL;
4690 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4691 
4692 	if (!adev->ras_enabled || !con)
4693 		return 0;
4694 
4695 	amdgpu_ras_critical_region_fini(adev);
4696 	mutex_destroy(&con->critical_region_lock);
4697 
4698 	list_for_each_entry_safe(ras_node, tmp, &adev->ras_list, node) {
4699 		if (ras_node->ras_obj) {
4700 			obj = ras_node->ras_obj;
4701 			if (amdgpu_ras_is_supported(adev, obj->ras_comm.block) &&
4702 			    obj->ras_fini)
4703 				obj->ras_fini(adev, &obj->ras_comm);
4704 			else
4705 				amdgpu_ras_block_late_fini_default(adev, &obj->ras_comm);
4706 		}
4707 
4708 		/* Clear ras blocks from ras_list and free ras block list node */
4709 		list_del(&ras_node->node);
4710 		kfree(ras_node);
4711 	}
4712 
4713 	amdgpu_ras_fs_fini(adev);
4714 	amdgpu_ras_interrupt_remove_all(adev);
4715 
4716 	if (amdgpu_ras_aca_is_supported(adev)) {
4717 		if (amdgpu_aca_is_enabled(adev))
4718 			amdgpu_aca_fini(adev);
4719 		else
4720 			amdgpu_mca_fini(adev);
4721 	}
4722 
4723 	WARN(AMDGPU_RAS_GET_FEATURES(con->features), "Feature mask is not cleared");
4724 
4725 	if (AMDGPU_RAS_GET_FEATURES(con->features))
4726 		amdgpu_ras_disable_all_features(adev, 0);
4727 
4728 	cancel_delayed_work_sync(&con->ras_counte_delay_work);
4729 
4730 	amdgpu_ras_set_context(adev, NULL);
4731 	kfree(con);
4732 
4733 	return 0;
4734 }
4735 
4736 bool amdgpu_ras_get_fed_status(struct amdgpu_device *adev)
4737 {
4738 	struct amdgpu_ras *ras;
4739 
4740 	ras = amdgpu_ras_get_context(adev);
4741 	if (!ras)
4742 		return false;
4743 
4744 	return test_bit(AMDGPU_RAS_BLOCK__LAST, &ras->ras_err_state);
4745 }
4746 
4747 void amdgpu_ras_set_fed(struct amdgpu_device *adev, bool status)
4748 {
4749 	struct amdgpu_ras *ras;
4750 
4751 	ras = amdgpu_ras_get_context(adev);
4752 	if (ras) {
4753 		if (status)
4754 			set_bit(AMDGPU_RAS_BLOCK__LAST, &ras->ras_err_state);
4755 		else
4756 			clear_bit(AMDGPU_RAS_BLOCK__LAST, &ras->ras_err_state);
4757 	}
4758 }
4759 
4760 void amdgpu_ras_clear_err_state(struct amdgpu_device *adev)
4761 {
4762 	struct amdgpu_ras *ras;
4763 
4764 	ras = amdgpu_ras_get_context(adev);
4765 	if (ras) {
4766 		ras->ras_err_state = 0;
4767 		ras->gpu_reset_flags = 0;
4768 	}
4769 }
4770 
4771 void amdgpu_ras_set_err_poison(struct amdgpu_device *adev,
4772 			       enum amdgpu_ras_block block)
4773 {
4774 	struct amdgpu_ras *ras;
4775 
4776 	ras = amdgpu_ras_get_context(adev);
4777 	if (ras)
4778 		set_bit(block, &ras->ras_err_state);
4779 }
4780 
4781 bool amdgpu_ras_is_err_state(struct amdgpu_device *adev, int block)
4782 {
4783 	struct amdgpu_ras *ras;
4784 
4785 	ras = amdgpu_ras_get_context(adev);
4786 	if (ras) {
4787 		if (block == AMDGPU_RAS_BLOCK__ANY)
4788 			return (ras->ras_err_state != 0);
4789 		else
4790 			return test_bit(block, &ras->ras_err_state) ||
4791 			       test_bit(AMDGPU_RAS_BLOCK__LAST,
4792 					&ras->ras_err_state);
4793 	}
4794 
4795 	return false;
4796 }
4797 
4798 static struct ras_event_manager *__get_ras_event_mgr(struct amdgpu_device *adev)
4799 {
4800 	struct amdgpu_ras *ras;
4801 
4802 	ras = amdgpu_ras_get_context(adev);
4803 	if (!ras)
4804 		return NULL;
4805 
4806 	return ras->event_mgr;
4807 }
4808 
4809 int amdgpu_ras_mark_ras_event_caller(struct amdgpu_device *adev, enum ras_event_type type,
4810 				     const void *caller)
4811 {
4812 	struct ras_event_manager *event_mgr;
4813 	struct ras_event_state *event_state;
4814 	int ret = 0;
4815 
4816 	if (amdgpu_uniras_enabled(adev))
4817 		return 0;
4818 
4819 	if (type >= RAS_EVENT_TYPE_COUNT) {
4820 		ret = -EINVAL;
4821 		goto out;
4822 	}
4823 
4824 	event_mgr = __get_ras_event_mgr(adev);
4825 	if (!event_mgr) {
4826 		ret = -EINVAL;
4827 		goto out;
4828 	}
4829 
4830 	event_state = &event_mgr->event_state[type];
4831 	event_state->last_seqno = atomic64_inc_return(&event_mgr->seqno);
4832 	atomic64_inc(&event_state->count);
4833 
4834 out:
4835 	if (ret && caller)
4836 		dev_warn(adev->dev, "failed mark ras event (%d) in %ps, ret:%d\n",
4837 			 (int)type, caller, ret);
4838 
4839 	return ret;
4840 }
4841 
4842 u64 amdgpu_ras_acquire_event_id(struct amdgpu_device *adev, enum ras_event_type type)
4843 {
4844 	struct ras_event_manager *event_mgr;
4845 	u64 id;
4846 
4847 	if (type >= RAS_EVENT_TYPE_COUNT)
4848 		return RAS_EVENT_INVALID_ID;
4849 
4850 	switch (type) {
4851 	case RAS_EVENT_TYPE_FATAL:
4852 	case RAS_EVENT_TYPE_POISON_CREATION:
4853 	case RAS_EVENT_TYPE_POISON_CONSUMPTION:
4854 		event_mgr = __get_ras_event_mgr(adev);
4855 		if (!event_mgr)
4856 			return RAS_EVENT_INVALID_ID;
4857 
4858 		id = event_mgr->event_state[type].last_seqno;
4859 		break;
4860 	case RAS_EVENT_TYPE_INVALID:
4861 	default:
4862 		id = RAS_EVENT_INVALID_ID;
4863 		break;
4864 	}
4865 
4866 	return id;
4867 }
4868 
4869 int amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
4870 {
4871 	if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
4872 		struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
4873 		enum ras_event_type type = RAS_EVENT_TYPE_FATAL;
4874 		u64 event_id = RAS_EVENT_INVALID_ID;
4875 
4876 		if (amdgpu_uniras_enabled(adev))
4877 			return 0;
4878 
4879 		if (!amdgpu_ras_mark_ras_event(adev, type))
4880 			event_id = amdgpu_ras_acquire_event_id(adev, type);
4881 
4882 		RAS_EVENT_LOG(adev, event_id, "uncorrectable hardware error"
4883 			      "(ERREVENT_ATHUB_INTERRUPT) detected!\n");
4884 
4885 		amdgpu_ras_set_fed(adev, true);
4886 		ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET;
4887 		amdgpu_ras_reset_gpu(adev);
4888 	}
4889 
4890 	return -EBUSY;
4891 }
4892 
4893 bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev)
4894 {
4895 	if (adev->asic_type == CHIP_VEGA20 &&
4896 	    adev->pm.fw_version <= 0x283400) {
4897 		return !(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) &&
4898 				amdgpu_ras_intr_triggered();
4899 	}
4900 
4901 	return false;
4902 }
4903 
4904 void amdgpu_release_ras_context(struct amdgpu_device *adev)
4905 {
4906 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4907 
4908 	if (!con)
4909 		return;
4910 
4911 	if (!adev->ras_enabled && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)) {
4912 		con->features &= ~BIT(AMDGPU_RAS_BLOCK__GFX);
4913 		amdgpu_ras_set_context(adev, NULL);
4914 		kfree(con);
4915 	}
4916 }
4917 
4918 #ifdef CONFIG_X86_MCE_AMD
4919 static struct amdgpu_device *find_adev(uint32_t node_id)
4920 {
4921 	int i;
4922 	struct amdgpu_device *adev = NULL;
4923 
4924 	for (i = 0; i < mce_adev_list.num_gpu; i++) {
4925 		adev = mce_adev_list.devs[i];
4926 
4927 		if (adev && adev->gmc.xgmi.connected_to_cpu &&
4928 		    adev->gmc.xgmi.physical_node_id == node_id)
4929 			break;
4930 		adev = NULL;
4931 	}
4932 
4933 	return adev;
4934 }
4935 
4936 #define GET_MCA_IPID_GPUID(m)	(((m) >> 44) & 0xF)
4937 #define GET_UMC_INST(m)		(((m) >> 21) & 0x7)
4938 #define GET_CHAN_INDEX(m)	((((m) >> 12) & 0x3) | (((m) >> 18) & 0x4))
4939 #define GPU_ID_OFFSET		8
4940 
4941 static int amdgpu_bad_page_notifier(struct notifier_block *nb,
4942 				    unsigned long val, void *data)
4943 {
4944 	struct mce *m = (struct mce *)data;
4945 	struct amdgpu_device *adev = NULL;
4946 	uint32_t gpu_id = 0;
4947 	uint32_t umc_inst = 0, ch_inst = 0;
4948 
4949 	/*
4950 	 * If the error was generated in UMC_V2, which belongs to GPU UMCs,
4951 	 * and error occurred in DramECC (Extended error code = 0) then only
4952 	 * process the error, else bail out.
4953 	 */
4954 	if (!m || !((smca_get_bank_type(m->extcpu, m->bank) == SMCA_UMC_V2) &&
4955 		    (XEC(m->status, 0x3f) == 0x0)))
4956 		return NOTIFY_DONE;
4957 
4958 	/*
4959 	 * If it is correctable error, return.
4960 	 */
4961 	if (mce_is_correctable(m))
4962 		return NOTIFY_OK;
4963 
4964 	/*
4965 	 * GPU Id is offset by GPU_ID_OFFSET in MCA_IPID_UMC register.
4966 	 */
4967 	gpu_id = GET_MCA_IPID_GPUID(m->ipid) - GPU_ID_OFFSET;
4968 
4969 	adev = find_adev(gpu_id);
4970 	if (!adev) {
4971 		DRM_WARN("%s: Unable to find adev for gpu_id: %d\n", __func__,
4972 								gpu_id);
4973 		return NOTIFY_DONE;
4974 	}
4975 
4976 	/*
4977 	 * If it is uncorrectable error, then find out UMC instance and
4978 	 * channel index.
4979 	 */
4980 	umc_inst = GET_UMC_INST(m->ipid);
4981 	ch_inst = GET_CHAN_INDEX(m->ipid);
4982 
4983 	dev_info(adev->dev, "Uncorrectable error detected in UMC inst: %d, chan_idx: %d",
4984 			     umc_inst, ch_inst);
4985 
4986 	if (!amdgpu_umc_page_retirement_mca(adev, m->addr, ch_inst, umc_inst))
4987 		return NOTIFY_OK;
4988 	else
4989 		return NOTIFY_DONE;
4990 }
4991 
4992 static struct notifier_block amdgpu_bad_page_nb = {
4993 	.notifier_call  = amdgpu_bad_page_notifier,
4994 	.priority       = MCE_PRIO_UC,
4995 };
4996 
4997 static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev)
4998 {
4999 	/*
5000 	 * Add the adev to the mce_adev_list.
5001 	 * During mode2 reset, amdgpu device is temporarily
5002 	 * removed from the mgpu_info list which can cause
5003 	 * page retirement to fail.
5004 	 * Use this list instead of mgpu_info to find the amdgpu
5005 	 * device on which the UMC error was reported.
5006 	 */
5007 	mce_adev_list.devs[mce_adev_list.num_gpu++] = adev;
5008 
5009 	/*
5010 	 * Register the x86 notifier only once
5011 	 * with MCE subsystem.
5012 	 */
5013 	if (notifier_registered == false) {
5014 		mce_register_decode_chain(&amdgpu_bad_page_nb);
5015 		notifier_registered = true;
5016 	}
5017 }
5018 static void amdgpu_unregister_bad_pages_mca_notifier(struct amdgpu_device *adev)
5019 {
5020 	int i, j;
5021 
5022 	if (!notifier_registered && !mce_adev_list.num_gpu)
5023 		return;
5024 	for (i = 0, j = 0; i < mce_adev_list.num_gpu; i++) {
5025 		if (mce_adev_list.devs[i] == adev)
5026 			mce_adev_list.devs[i] = NULL;
5027 		if (!mce_adev_list.devs[i])
5028 			++j;
5029 	}
5030 
5031 	if (j == mce_adev_list.num_gpu) {
5032 		mce_adev_list.num_gpu = 0;
5033 		/* Unregister x86 notifier with MCE subsystem. */
5034 		if (notifier_registered) {
5035 			mce_unregister_decode_chain(&amdgpu_bad_page_nb);
5036 			notifier_registered = false;
5037 		}
5038 	}
5039 }
5040 #endif
5041 
5042 struct amdgpu_ras *amdgpu_ras_get_context(struct amdgpu_device *adev)
5043 {
5044 	if (!adev)
5045 		return NULL;
5046 
5047 	return adev->psp.ras_context.ras;
5048 }
5049 
5050 int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_con)
5051 {
5052 	if (!adev)
5053 		return -EINVAL;
5054 
5055 	adev->psp.ras_context.ras = ras_con;
5056 	return 0;
5057 }
5058 
5059 /* check if ras is supported on block, say, sdma, gfx */
5060 int amdgpu_ras_is_supported(struct amdgpu_device *adev,
5061 		unsigned int block)
5062 {
5063 	int ret = 0;
5064 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5065 
5066 	if (block >= AMDGPU_RAS_BLOCK_COUNT)
5067 		return 0;
5068 
5069 	ret = ras && (adev->ras_enabled & (1 << block));
5070 
5071 	/* For the special asic with mem ecc enabled but sram ecc
5072 	 * not enabled, even if the ras block is not supported on
5073 	 * .ras_enabled, if the asic supports poison mode and the
5074 	 * ras block has ras configuration, it can be considered
5075 	 * that the ras block supports ras function.
5076 	 */
5077 	if (!ret &&
5078 	    (block == AMDGPU_RAS_BLOCK__GFX ||
5079 	     block == AMDGPU_RAS_BLOCK__SDMA ||
5080 	     block == AMDGPU_RAS_BLOCK__VCN ||
5081 	     block == AMDGPU_RAS_BLOCK__JPEG) &&
5082 		(amdgpu_ras_mask & (1 << block)) &&
5083 	    amdgpu_ras_is_poison_mode_supported(adev) &&
5084 	    amdgpu_ras_get_ras_block(adev, block, 0))
5085 		ret = 1;
5086 
5087 	return ret;
5088 }
5089 
5090 int amdgpu_ras_reset_gpu(struct amdgpu_device *adev)
5091 {
5092 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5093 
5094 	/* mode1 is the only selection for RMA status */
5095 	if (amdgpu_ras_is_rma(adev)) {
5096 		ras->gpu_reset_flags = 0;
5097 		ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET;
5098 	}
5099 
5100 	if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0) {
5101 		struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
5102 		int hive_ras_recovery = 0;
5103 
5104 		if (hive) {
5105 			hive_ras_recovery = atomic_read(&hive->ras_recovery);
5106 			amdgpu_put_xgmi_hive(hive);
5107 		}
5108 		/* In the case of multiple GPUs, after a GPU has started
5109 		 * resetting all GPUs on hive, other GPUs do not need to
5110 		 * trigger GPU reset again.
5111 		 */
5112 		if (!hive_ras_recovery)
5113 			amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work);
5114 		else
5115 			atomic_set(&ras->in_recovery, 0);
5116 	} else {
5117 		flush_work(&ras->recovery_work);
5118 		amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work);
5119 	}
5120 
5121 	return 0;
5122 }
5123 
5124 int amdgpu_ras_set_mca_debug_mode(struct amdgpu_device *adev, bool enable)
5125 {
5126 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5127 	int ret = 0;
5128 
5129 	if (con) {
5130 		ret = amdgpu_mca_smu_set_debug_mode(adev, enable);
5131 		if (!ret)
5132 			con->is_aca_debug_mode = enable;
5133 	}
5134 
5135 	return ret;
5136 }
5137 
5138 int amdgpu_ras_set_aca_debug_mode(struct amdgpu_device *adev, bool enable)
5139 {
5140 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5141 	int ret = 0;
5142 
5143 	if (con) {
5144 		if (amdgpu_aca_is_enabled(adev))
5145 			ret = amdgpu_aca_smu_set_debug_mode(adev, enable);
5146 		else
5147 			ret = amdgpu_mca_smu_set_debug_mode(adev, enable);
5148 		if (!ret)
5149 			con->is_aca_debug_mode = enable;
5150 	}
5151 
5152 	return ret;
5153 }
5154 
5155 bool amdgpu_ras_get_aca_debug_mode(struct amdgpu_device *adev)
5156 {
5157 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5158 	const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
5159 	const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
5160 
5161 	if (!con)
5162 		return false;
5163 
5164 	if ((amdgpu_aca_is_enabled(adev) && smu_funcs && smu_funcs->set_debug_mode) ||
5165 	    (!amdgpu_aca_is_enabled(adev) && mca_funcs && mca_funcs->mca_set_debug_mode))
5166 		return con->is_aca_debug_mode;
5167 	else
5168 		return true;
5169 }
5170 
5171 bool amdgpu_ras_get_error_query_mode(struct amdgpu_device *adev,
5172 				     unsigned int *error_query_mode)
5173 {
5174 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5175 	const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
5176 	const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
5177 
5178 	if (!con) {
5179 		*error_query_mode = AMDGPU_RAS_INVALID_ERROR_QUERY;
5180 		return false;
5181 	}
5182 
5183 	if (amdgpu_sriov_vf(adev)) {
5184 		*error_query_mode = AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY;
5185 	} else if ((smu_funcs && smu_funcs->set_debug_mode) || (mca_funcs && mca_funcs->mca_set_debug_mode)) {
5186 		*error_query_mode =
5187 			(con->is_aca_debug_mode) ? AMDGPU_RAS_DIRECT_ERROR_QUERY : AMDGPU_RAS_FIRMWARE_ERROR_QUERY;
5188 	} else {
5189 		*error_query_mode = AMDGPU_RAS_DIRECT_ERROR_QUERY;
5190 	}
5191 
5192 	return true;
5193 }
5194 
5195 /* Register each ip ras block into amdgpu ras */
5196 int amdgpu_ras_register_ras_block(struct amdgpu_device *adev,
5197 		struct amdgpu_ras_block_object *ras_block_obj)
5198 {
5199 	struct amdgpu_ras_block_list *ras_node;
5200 	if (!adev || !ras_block_obj)
5201 		return -EINVAL;
5202 
5203 	ras_node = kzalloc_obj(*ras_node);
5204 	if (!ras_node)
5205 		return -ENOMEM;
5206 
5207 	INIT_LIST_HEAD(&ras_node->node);
5208 	ras_node->ras_obj = ras_block_obj;
5209 	list_add_tail(&ras_node->node, &adev->ras_list);
5210 
5211 	return 0;
5212 }
5213 
5214 void amdgpu_ras_get_error_type_name(uint32_t err_type, char *err_type_name)
5215 {
5216 	if (!err_type_name)
5217 		return;
5218 
5219 	switch (err_type) {
5220 	case AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE:
5221 		sprintf(err_type_name, "correctable");
5222 		break;
5223 	case AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE:
5224 		sprintf(err_type_name, "uncorrectable");
5225 		break;
5226 	default:
5227 		sprintf(err_type_name, "unknown");
5228 		break;
5229 	}
5230 }
5231 
5232 bool amdgpu_ras_inst_get_memory_id_field(struct amdgpu_device *adev,
5233 					 const struct amdgpu_ras_err_status_reg_entry *reg_entry,
5234 					 uint32_t instance,
5235 					 uint32_t *memory_id)
5236 {
5237 	uint32_t err_status_lo_data, err_status_lo_offset;
5238 
5239 	if (!reg_entry)
5240 		return false;
5241 
5242 	err_status_lo_offset =
5243 		AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance,
5244 					    reg_entry->seg_lo, reg_entry->reg_lo);
5245 	err_status_lo_data = RREG32(err_status_lo_offset);
5246 
5247 	if ((reg_entry->flags & AMDGPU_RAS_ERR_STATUS_VALID) &&
5248 	    !REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, ERR_STATUS_VALID_FLAG))
5249 		return false;
5250 
5251 	*memory_id = REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, MEMORY_ID);
5252 
5253 	return true;
5254 }
5255 
5256 bool amdgpu_ras_inst_get_err_cnt_field(struct amdgpu_device *adev,
5257 				       const struct amdgpu_ras_err_status_reg_entry *reg_entry,
5258 				       uint32_t instance,
5259 				       unsigned long *err_cnt)
5260 {
5261 	uint32_t err_status_hi_data, err_status_hi_offset;
5262 
5263 	if (!reg_entry)
5264 		return false;
5265 
5266 	err_status_hi_offset =
5267 		AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance,
5268 					    reg_entry->seg_hi, reg_entry->reg_hi);
5269 	err_status_hi_data = RREG32(err_status_hi_offset);
5270 
5271 	if ((reg_entry->flags & AMDGPU_RAS_ERR_INFO_VALID) &&
5272 	    !REG_GET_FIELD(err_status_hi_data, ERR_STATUS_HI, ERR_INFO_VALID_FLAG))
5273 		/* keep the check here in case we need to refer to the result later */
5274 		dev_dbg(adev->dev, "Invalid err_info field\n");
5275 
5276 	/* read err count */
5277 	*err_cnt = REG_GET_FIELD(err_status_hi_data, ERR_STATUS, ERR_CNT);
5278 
5279 	return true;
5280 }
5281 
5282 void amdgpu_ras_inst_query_ras_error_count(struct amdgpu_device *adev,
5283 					   const struct amdgpu_ras_err_status_reg_entry *reg_list,
5284 					   uint32_t reg_list_size,
5285 					   const struct amdgpu_ras_memory_id_entry *mem_list,
5286 					   uint32_t mem_list_size,
5287 					   uint32_t instance,
5288 					   uint32_t err_type,
5289 					   unsigned long *err_count)
5290 {
5291 	uint32_t memory_id;
5292 	unsigned long err_cnt;
5293 	char err_type_name[16];
5294 	uint32_t i, j;
5295 
5296 	for (i = 0; i < reg_list_size; i++) {
5297 		/* query memory_id from err_status_lo */
5298 		if (!amdgpu_ras_inst_get_memory_id_field(adev, &reg_list[i],
5299 							 instance, &memory_id))
5300 			continue;
5301 
5302 		/* query err_cnt from err_status_hi */
5303 		if (!amdgpu_ras_inst_get_err_cnt_field(adev, &reg_list[i],
5304 						       instance, &err_cnt) ||
5305 		    !err_cnt)
5306 			continue;
5307 
5308 		*err_count += err_cnt;
5309 
5310 		/* log the errors */
5311 		amdgpu_ras_get_error_type_name(err_type, err_type_name);
5312 		if (!mem_list) {
5313 			/* memory_list is not supported */
5314 			dev_info(adev->dev,
5315 				 "%ld %s hardware errors detected in %s, instance: %d, memory_id: %d\n",
5316 				 err_cnt, err_type_name,
5317 				 reg_list[i].block_name,
5318 				 instance, memory_id);
5319 		} else {
5320 			for (j = 0; j < mem_list_size; j++) {
5321 				if (memory_id == mem_list[j].memory_id) {
5322 					dev_info(adev->dev,
5323 						 "%ld %s hardware errors detected in %s, instance: %d, memory block: %s\n",
5324 						 err_cnt, err_type_name,
5325 						 reg_list[i].block_name,
5326 						 instance, mem_list[j].name);
5327 					break;
5328 				}
5329 			}
5330 		}
5331 	}
5332 }
5333 
5334 void amdgpu_ras_inst_reset_ras_error_count(struct amdgpu_device *adev,
5335 					   const struct amdgpu_ras_err_status_reg_entry *reg_list,
5336 					   uint32_t reg_list_size,
5337 					   uint32_t instance)
5338 {
5339 	uint32_t err_status_lo_offset, err_status_hi_offset;
5340 	uint32_t i;
5341 
5342 	for (i = 0; i < reg_list_size; i++) {
5343 		err_status_lo_offset =
5344 			AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance,
5345 						    reg_list[i].seg_lo, reg_list[i].reg_lo);
5346 		err_status_hi_offset =
5347 			AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance,
5348 						    reg_list[i].seg_hi, reg_list[i].reg_hi);
5349 		WREG32(err_status_lo_offset, 0);
5350 		WREG32(err_status_hi_offset, 0);
5351 	}
5352 }
5353 
5354 int amdgpu_ras_error_data_init(struct ras_err_data *err_data)
5355 {
5356 	memset(err_data, 0, sizeof(*err_data));
5357 
5358 	INIT_LIST_HEAD(&err_data->err_node_list);
5359 
5360 	return 0;
5361 }
5362 
5363 static void amdgpu_ras_error_node_release(struct ras_err_node *err_node)
5364 {
5365 	if (!err_node)
5366 		return;
5367 
5368 	list_del(&err_node->node);
5369 	kvfree(err_node);
5370 }
5371 
5372 void amdgpu_ras_error_data_fini(struct ras_err_data *err_data)
5373 {
5374 	struct ras_err_node *err_node, *tmp;
5375 
5376 	list_for_each_entry_safe(err_node, tmp, &err_data->err_node_list, node)
5377 		amdgpu_ras_error_node_release(err_node);
5378 }
5379 
5380 static struct ras_err_node *amdgpu_ras_error_find_node_by_id(struct ras_err_data *err_data,
5381 							     struct amdgpu_smuio_mcm_config_info *mcm_info)
5382 {
5383 	struct ras_err_node *err_node;
5384 	struct amdgpu_smuio_mcm_config_info *ref_id;
5385 
5386 	if (!err_data || !mcm_info)
5387 		return NULL;
5388 
5389 	for_each_ras_error(err_node, err_data) {
5390 		ref_id = &err_node->err_info.mcm_info;
5391 
5392 		if (mcm_info->socket_id == ref_id->socket_id &&
5393 		    mcm_info->die_id == ref_id->die_id)
5394 			return err_node;
5395 	}
5396 
5397 	return NULL;
5398 }
5399 
5400 static struct ras_err_node *amdgpu_ras_error_node_new(void)
5401 {
5402 	struct ras_err_node *err_node;
5403 
5404 	err_node = kvzalloc_obj(*err_node);
5405 	if (!err_node)
5406 		return NULL;
5407 
5408 	INIT_LIST_HEAD(&err_node->node);
5409 
5410 	return err_node;
5411 }
5412 
5413 static int ras_err_info_cmp(void *priv, const struct list_head *a, const struct list_head *b)
5414 {
5415 	struct ras_err_node *nodea = container_of(a, struct ras_err_node, node);
5416 	struct ras_err_node *nodeb = container_of(b, struct ras_err_node, node);
5417 	struct amdgpu_smuio_mcm_config_info *infoa = &nodea->err_info.mcm_info;
5418 	struct amdgpu_smuio_mcm_config_info *infob = &nodeb->err_info.mcm_info;
5419 
5420 	if (unlikely(infoa->socket_id != infob->socket_id))
5421 		return infoa->socket_id - infob->socket_id;
5422 	else
5423 		return infoa->die_id - infob->die_id;
5424 
5425 	return 0;
5426 }
5427 
5428 static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_data,
5429 				struct amdgpu_smuio_mcm_config_info *mcm_info)
5430 {
5431 	struct ras_err_node *err_node;
5432 
5433 	err_node = amdgpu_ras_error_find_node_by_id(err_data, mcm_info);
5434 	if (err_node)
5435 		return &err_node->err_info;
5436 
5437 	err_node = amdgpu_ras_error_node_new();
5438 	if (!err_node)
5439 		return NULL;
5440 
5441 	memcpy(&err_node->err_info.mcm_info, mcm_info, sizeof(*mcm_info));
5442 
5443 	err_data->err_list_count++;
5444 	list_add_tail(&err_node->node, &err_data->err_node_list);
5445 	list_sort(NULL, &err_data->err_node_list, ras_err_info_cmp);
5446 
5447 	return &err_node->err_info;
5448 }
5449 
5450 int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
5451 					struct amdgpu_smuio_mcm_config_info *mcm_info,
5452 					u64 count)
5453 {
5454 	struct ras_err_info *err_info;
5455 
5456 	if (!err_data || !mcm_info)
5457 		return -EINVAL;
5458 
5459 	if (!count)
5460 		return 0;
5461 
5462 	err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
5463 	if (!err_info)
5464 		return -EINVAL;
5465 
5466 	err_info->ue_count += count;
5467 	err_data->ue_count += count;
5468 
5469 	return 0;
5470 }
5471 
5472 int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data,
5473 					struct amdgpu_smuio_mcm_config_info *mcm_info,
5474 					u64 count)
5475 {
5476 	struct ras_err_info *err_info;
5477 
5478 	if (!err_data || !mcm_info)
5479 		return -EINVAL;
5480 
5481 	if (!count)
5482 		return 0;
5483 
5484 	err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
5485 	if (!err_info)
5486 		return -EINVAL;
5487 
5488 	err_info->ce_count += count;
5489 	err_data->ce_count += count;
5490 
5491 	return 0;
5492 }
5493 
5494 int amdgpu_ras_error_statistic_de_count(struct ras_err_data *err_data,
5495 					struct amdgpu_smuio_mcm_config_info *mcm_info,
5496 					u64 count)
5497 {
5498 	struct ras_err_info *err_info;
5499 
5500 	if (!err_data || !mcm_info)
5501 		return -EINVAL;
5502 
5503 	if (!count)
5504 		return 0;
5505 
5506 	err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
5507 	if (!err_info)
5508 		return -EINVAL;
5509 
5510 	err_info->de_count += count;
5511 	err_data->de_count += count;
5512 
5513 	return 0;
5514 }
5515 
5516 #define mmMP0_SMN_C2PMSG_92	0x1609C
5517 #define mmMP0_SMN_C2PMSG_126	0x160BE
5518 static void amdgpu_ras_boot_time_error_reporting(struct amdgpu_device *adev,
5519 						 u32 instance)
5520 {
5521 	u32 socket_id, aid_id, hbm_id;
5522 	u32 fw_status;
5523 	u32 boot_error;
5524 	u64 reg_addr;
5525 
5526 	/* The pattern for smn addressing in other SOC could be different from
5527 	 * the one for aqua_vanjaram. We should revisit the code if the pattern
5528 	 * is changed. In such case, replace the aqua_vanjaram implementation
5529 	 * with more common helper */
5530 	reg_addr = (mmMP0_SMN_C2PMSG_92 << 2) +
5531 		   aqua_vanjaram_encode_ext_smn_addressing(instance);
5532 	fw_status = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
5533 
5534 	reg_addr = (mmMP0_SMN_C2PMSG_126 << 2) +
5535 		   aqua_vanjaram_encode_ext_smn_addressing(instance);
5536 	boot_error = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
5537 
5538 	socket_id = AMDGPU_RAS_GPU_ERR_SOCKET_ID(boot_error);
5539 	aid_id = AMDGPU_RAS_GPU_ERR_AID_ID(boot_error);
5540 	hbm_id = ((1 == AMDGPU_RAS_GPU_ERR_HBM_ID(boot_error)) ? 0 : 1);
5541 
5542 	if (AMDGPU_RAS_GPU_ERR_MEM_TRAINING(boot_error))
5543 		dev_info(adev->dev,
5544 			 "socket: %d, aid: %d, hbm: %d, fw_status: 0x%x, memory training failed\n",
5545 			 socket_id, aid_id, hbm_id, fw_status);
5546 
5547 	if (AMDGPU_RAS_GPU_ERR_FW_LOAD(boot_error))
5548 		dev_info(adev->dev,
5549 			 "socket: %d, aid: %d, fw_status: 0x%x, firmware load failed at boot time\n",
5550 			 socket_id, aid_id, fw_status);
5551 
5552 	if (AMDGPU_RAS_GPU_ERR_WAFL_LINK_TRAINING(boot_error))
5553 		dev_info(adev->dev,
5554 			 "socket: %d, aid: %d, fw_status: 0x%x, wafl link training failed\n",
5555 			 socket_id, aid_id, fw_status);
5556 
5557 	if (AMDGPU_RAS_GPU_ERR_XGMI_LINK_TRAINING(boot_error))
5558 		dev_info(adev->dev,
5559 			 "socket: %d, aid: %d, fw_status: 0x%x, xgmi link training failed\n",
5560 			 socket_id, aid_id, fw_status);
5561 
5562 	if (AMDGPU_RAS_GPU_ERR_USR_CP_LINK_TRAINING(boot_error))
5563 		dev_info(adev->dev,
5564 			 "socket: %d, aid: %d, fw_status: 0x%x, usr cp link training failed\n",
5565 			 socket_id, aid_id, fw_status);
5566 
5567 	if (AMDGPU_RAS_GPU_ERR_USR_DP_LINK_TRAINING(boot_error))
5568 		dev_info(adev->dev,
5569 			 "socket: %d, aid: %d, fw_status: 0x%x, usr dp link training failed\n",
5570 			 socket_id, aid_id, fw_status);
5571 
5572 	if (AMDGPU_RAS_GPU_ERR_HBM_MEM_TEST(boot_error))
5573 		dev_info(adev->dev,
5574 			 "socket: %d, aid: %d, hbm: %d, fw_status: 0x%x, hbm memory test failed\n",
5575 			 socket_id, aid_id, hbm_id, fw_status);
5576 
5577 	if (AMDGPU_RAS_GPU_ERR_HBM_BIST_TEST(boot_error))
5578 		dev_info(adev->dev,
5579 			 "socket: %d, aid: %d, hbm: %d, fw_status: 0x%x, hbm bist test failed\n",
5580 			 socket_id, aid_id, hbm_id, fw_status);
5581 
5582 	if (AMDGPU_RAS_GPU_ERR_DATA_ABORT(boot_error))
5583 		dev_info(adev->dev,
5584 			 "socket: %d, aid: %d, fw_status: 0x%x, data abort exception\n",
5585 			 socket_id, aid_id, fw_status);
5586 
5587 	if (AMDGPU_RAS_GPU_ERR_GENERIC(boot_error))
5588 		dev_info(adev->dev,
5589 			 "socket: %d, aid: %d, fw_status: 0x%x, Boot Controller Generic Error\n",
5590 			 socket_id, aid_id, fw_status);
5591 }
5592 
5593 static bool amdgpu_ras_boot_error_detected(struct amdgpu_device *adev,
5594 					   u32 instance)
5595 {
5596 	u64 reg_addr;
5597 	u32 reg_data;
5598 	int retry_loop;
5599 
5600 	reg_addr = (mmMP0_SMN_C2PMSG_92 << 2) +
5601 		   aqua_vanjaram_encode_ext_smn_addressing(instance);
5602 
5603 	for (retry_loop = 0; retry_loop < AMDGPU_RAS_BOOT_STATUS_POLLING_LIMIT; retry_loop++) {
5604 		reg_data = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
5605 		if ((reg_data & AMDGPU_RAS_BOOT_STATUS_MASK) == AMDGPU_RAS_BOOT_STEADY_STATUS)
5606 			return false;
5607 		else
5608 			msleep(1);
5609 	}
5610 
5611 	return true;
5612 }
5613 
5614 void amdgpu_ras_query_boot_status(struct amdgpu_device *adev, u32 num_instances)
5615 {
5616 	u32 i;
5617 
5618 	for (i = 0; i < num_instances; i++) {
5619 		if (amdgpu_ras_boot_error_detected(adev, i))
5620 			amdgpu_ras_boot_time_error_reporting(adev, i);
5621 	}
5622 }
5623 
5624 int amdgpu_ras_reserve_page(struct amdgpu_device *adev, uint64_t pfn)
5625 {
5626 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5627 	struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
5628 	uint64_t start = pfn << AMDGPU_GPU_PAGE_SHIFT;
5629 	int ret = 0;
5630 
5631 	if (amdgpu_ras_check_critical_address(adev, start))
5632 		return 0;
5633 
5634 	mutex_lock(&con->page_rsv_lock);
5635 	ret = amdgpu_vram_mgr_query_page_status(mgr, start);
5636 	if (ret == -ENOENT)
5637 		ret = amdgpu_vram_mgr_reserve_range(mgr, start, AMDGPU_GPU_PAGE_SIZE);
5638 	mutex_unlock(&con->page_rsv_lock);
5639 
5640 	return ret;
5641 }
5642 
5643 void amdgpu_ras_event_log_print(struct amdgpu_device *adev, u64 event_id,
5644 				const char *fmt, ...)
5645 {
5646 	struct va_format vaf;
5647 	va_list args;
5648 
5649 	va_start(args, fmt);
5650 	vaf.fmt = fmt;
5651 	vaf.va = &args;
5652 
5653 	if (RAS_EVENT_ID_IS_VALID(event_id))
5654 		dev_printk(KERN_INFO, adev->dev, "{%llu}%pV", event_id, &vaf);
5655 	else
5656 		dev_printk(KERN_INFO, adev->dev, "%pV", &vaf);
5657 
5658 	va_end(args);
5659 }
5660 
5661 bool amdgpu_ras_is_rma(struct amdgpu_device *adev)
5662 {
5663 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5664 
5665 	if (amdgpu_uniras_enabled(adev))
5666 		return amdgpu_ras_mgr_is_rma(adev);
5667 
5668 	if (!con)
5669 		return false;
5670 
5671 	return con->is_rma;
5672 }
5673 
5674 int amdgpu_ras_add_critical_region(struct amdgpu_device *adev,
5675 			struct amdgpu_bo *bo)
5676 {
5677 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5678 	struct amdgpu_vram_mgr_resource *vres;
5679 	struct ras_critical_region *region;
5680 	struct drm_buddy_block *block;
5681 	int ret = 0;
5682 
5683 	if (!bo || !bo->tbo.resource)
5684 		return -EINVAL;
5685 
5686 	vres = to_amdgpu_vram_mgr_resource(bo->tbo.resource);
5687 
5688 	mutex_lock(&con->critical_region_lock);
5689 
5690 	/* Check if the bo had been recorded */
5691 	list_for_each_entry(region, &con->critical_region_head, node)
5692 		if (region->bo == bo)
5693 			goto out;
5694 
5695 	/* Record new critical amdgpu bo */
5696 	list_for_each_entry(block, &vres->blocks, link) {
5697 		region = kzalloc_obj(*region);
5698 		if (!region) {
5699 			ret = -ENOMEM;
5700 			goto out;
5701 		}
5702 		region->bo = bo;
5703 		region->start = amdgpu_vram_mgr_block_start(block);
5704 		region->size = amdgpu_vram_mgr_block_size(block);
5705 		list_add_tail(&region->node, &con->critical_region_head);
5706 	}
5707 
5708 out:
5709 	mutex_unlock(&con->critical_region_lock);
5710 
5711 	return ret;
5712 }
5713 
5714 static void amdgpu_ras_critical_region_init(struct amdgpu_device *adev)
5715 {
5716 	amdgpu_ras_add_critical_region(adev, adev->mman.fw_reserved_memory);
5717 }
5718 
5719 static void amdgpu_ras_critical_region_fini(struct amdgpu_device *adev)
5720 {
5721 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5722 	struct ras_critical_region *region, *tmp;
5723 
5724 	mutex_lock(&con->critical_region_lock);
5725 	list_for_each_entry_safe(region, tmp, &con->critical_region_head, node) {
5726 		list_del(&region->node);
5727 		kfree(region);
5728 	}
5729 	mutex_unlock(&con->critical_region_lock);
5730 }
5731 
5732 bool amdgpu_ras_check_critical_address(struct amdgpu_device *adev, uint64_t addr)
5733 {
5734 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5735 	struct ras_critical_region *region;
5736 	bool ret = false;
5737 
5738 	mutex_lock(&con->critical_region_lock);
5739 	list_for_each_entry(region, &con->critical_region_head, node) {
5740 		if ((region->start <= addr) &&
5741 		    (addr < (region->start + region->size))) {
5742 			ret = true;
5743 			break;
5744 		}
5745 	}
5746 	mutex_unlock(&con->critical_region_lock);
5747 
5748 	return ret;
5749 }
5750 
5751 void amdgpu_ras_pre_reset(struct amdgpu_device *adev,
5752 					  struct list_head *device_list)
5753 {
5754 	struct amdgpu_device *tmp_adev = NULL;
5755 
5756 	list_for_each_entry(tmp_adev, device_list, reset_list) {
5757 		if (amdgpu_uniras_enabled(tmp_adev))
5758 			amdgpu_ras_mgr_pre_reset(tmp_adev);
5759 	}
5760 }
5761 
5762 void amdgpu_ras_post_reset(struct amdgpu_device *adev,
5763 					  struct list_head *device_list)
5764 {
5765 	struct amdgpu_device *tmp_adev = NULL;
5766 
5767 	list_for_each_entry(tmp_adev, device_list, reset_list) {
5768 		if (amdgpu_uniras_enabled(tmp_adev))
5769 			amdgpu_ras_mgr_post_reset(tmp_adev);
5770 	}
5771 }
5772