xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c (revision 5946dbe1c802efef3b12a4eecab1471f725f4ca9)
1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  *
23  */
24 #include <linux/debugfs.h>
25 #include <linux/list.h>
26 #include <linux/module.h>
27 #include <linux/uaccess.h>
28 #include <linux/reboot.h>
29 #include <linux/syscalls.h>
30 #include <linux/pm_runtime.h>
31 #include <linux/list_sort.h>
32 
33 #include "amdgpu.h"
34 #include "amdgpu_ras.h"
35 #include "amdgpu_atomfirmware.h"
36 #include "amdgpu_xgmi.h"
37 #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
38 #include "nbio_v4_3.h"
39 #include "nbif_v6_3_1.h"
40 #include "nbio_v7_9.h"
41 #include "atom.h"
42 #include "amdgpu_reset.h"
43 #include "amdgpu_psp.h"
44 #include "amdgpu_ras_mgr.h"
45 
46 #ifdef CONFIG_X86_MCE_AMD
47 #include <asm/mce.h>
48 
49 static bool notifier_registered;
50 #endif
51 static const char *RAS_FS_NAME = "ras";
52 
53 const char *ras_error_string[] = {
54 	"none",
55 	"parity",
56 	"single_correctable",
57 	"multi_uncorrectable",
58 	"poison",
59 };
60 
61 const char *ras_block_string[] = {
62 	"umc",
63 	"sdma",
64 	"gfx",
65 	"mmhub",
66 	"athub",
67 	"pcie_bif",
68 	"hdp",
69 	"xgmi_wafl",
70 	"df",
71 	"smn",
72 	"sem",
73 	"mp0",
74 	"mp1",
75 	"fuse",
76 	"mca",
77 	"vcn",
78 	"jpeg",
79 	"ih",
80 	"mpio",
81 	"mmsch",
82 };
83 
84 const char *ras_mca_block_string[] = {
85 	"mca_mp0",
86 	"mca_mp1",
87 	"mca_mpio",
88 	"mca_iohc",
89 };
90 
91 struct amdgpu_ras_block_list {
92 	/* ras block link */
93 	struct list_head node;
94 
95 	struct amdgpu_ras_block_object *ras_obj;
96 };
97 
98 const char *get_ras_block_str(struct ras_common_if *ras_block)
99 {
100 	if (!ras_block)
101 		return "NULL";
102 
103 	if (ras_block->block >= AMDGPU_RAS_BLOCK_COUNT ||
104 	    ras_block->block >= ARRAY_SIZE(ras_block_string))
105 		return "OUT OF RANGE";
106 
107 	if (ras_block->block == AMDGPU_RAS_BLOCK__MCA)
108 		return ras_mca_block_string[ras_block->sub_block_index];
109 
110 	return ras_block_string[ras_block->block];
111 }
112 
113 #define ras_block_str(_BLOCK_) \
114 	(((_BLOCK_) < ARRAY_SIZE(ras_block_string)) ? ras_block_string[_BLOCK_] : "Out Of Range")
115 
116 #define ras_err_str(i) (ras_error_string[ffs(i)])
117 
118 #define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
119 
120 /* inject address is 52 bits */
121 #define	RAS_UMC_INJECT_ADDR_LIMIT	(0x1ULL << 52)
122 
123 /* typical ECC bad page rate is 1 bad page per 100MB VRAM */
124 #define RAS_BAD_PAGE_COVER              (100 * 1024 * 1024ULL)
125 
126 #define MAX_UMC_POISON_POLLING_TIME_ASYNC  10
127 
128 #define AMDGPU_RAS_RETIRE_PAGE_INTERVAL 100  //ms
129 
130 #define MAX_FLUSH_RETIRE_DWORK_TIMES  100
131 
132 #define BYPASS_ALLOCATED_ADDRESS        0x0
133 #define BYPASS_INITIALIZATION_ADDRESS   0x1
134 
135 enum amdgpu_ras_retire_page_reservation {
136 	AMDGPU_RAS_RETIRE_PAGE_RESERVED,
137 	AMDGPU_RAS_RETIRE_PAGE_PENDING,
138 	AMDGPU_RAS_RETIRE_PAGE_FAULT,
139 };
140 
141 atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0);
142 
143 static int amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
144 				uint64_t addr);
145 static int amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
146 				uint64_t addr);
147 
148 static void amdgpu_ras_critical_region_init(struct amdgpu_device *adev);
149 static void amdgpu_ras_critical_region_fini(struct amdgpu_device *adev);
150 
151 #ifdef CONFIG_X86_MCE_AMD
152 static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev);
153 static void
154 amdgpu_unregister_bad_pages_mca_notifier(struct amdgpu_device *adev);
155 struct mce_notifier_adev_list {
156 	struct amdgpu_device *devs[MAX_GPU_INSTANCE];
157 	int num_gpu;
158 };
159 static struct mce_notifier_adev_list mce_adev_list;
160 #endif
161 
162 void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready)
163 {
164 	if (adev && amdgpu_ras_get_context(adev))
165 		amdgpu_ras_get_context(adev)->error_query_ready = ready;
166 }
167 
168 static bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev)
169 {
170 	if (adev && amdgpu_ras_get_context(adev))
171 		return amdgpu_ras_get_context(adev)->error_query_ready;
172 
173 	return false;
174 }
175 
176 static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t address)
177 {
178 	struct ras_err_data err_data;
179 	struct eeprom_table_record err_rec;
180 	int ret;
181 
182 	ret = amdgpu_ras_check_bad_page(adev, address);
183 	if (ret == -EINVAL) {
184 		dev_warn(adev->dev,
185 			"RAS WARN: input address 0x%llx is invalid.\n",
186 			address);
187 		return -EINVAL;
188 	} else if (ret == 1) {
189 		dev_warn(adev->dev,
190 			"RAS WARN: 0x%llx has already been marked as bad page!\n",
191 			address);
192 		return 0;
193 	}
194 
195 	ret = amdgpu_ras_error_data_init(&err_data);
196 	if (ret)
197 		return ret;
198 
199 	memset(&err_rec, 0x0, sizeof(struct eeprom_table_record));
200 	err_data.err_addr = &err_rec;
201 	amdgpu_umc_fill_error_record(&err_data, address, address, 0, 0);
202 
203 	if (amdgpu_bad_page_threshold != 0) {
204 		amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
205 					 err_data.err_addr_cnt, false);
206 		amdgpu_ras_save_bad_pages(adev, NULL);
207 	}
208 
209 	amdgpu_ras_error_data_fini(&err_data);
210 
211 	dev_warn(adev->dev, "WARNING: THIS IS ONLY FOR TEST PURPOSES AND WILL CORRUPT RAS EEPROM\n");
212 	dev_warn(adev->dev, "Clear EEPROM:\n");
213 	dev_warn(adev->dev, "    echo 1 > /sys/kernel/debug/dri/0/ras/ras_eeprom_reset\n");
214 
215 	return 0;
216 }
217 
218 static int amdgpu_check_address_validity(struct amdgpu_device *adev,
219 			uint64_t address, uint64_t flags)
220 {
221 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
222 	struct amdgpu_vram_block_info blk_info;
223 	uint64_t page_pfns[32] = {0};
224 	int i, ret, count;
225 	bool hit = false;
226 
227 	if (amdgpu_ip_version(adev, UMC_HWIP, 0) < IP_VERSION(12, 0, 0))
228 		return 0;
229 
230 	if (amdgpu_sriov_vf(adev)) {
231 		if (amdgpu_virt_check_vf_critical_region(adev, address, &hit))
232 			return -EPERM;
233 		return hit ? -EACCES : 0;
234 	}
235 
236 	if ((address >= adev->gmc.mc_vram_size) ||
237 	    (address >= RAS_UMC_INJECT_ADDR_LIMIT))
238 		return -EFAULT;
239 
240 	if (amdgpu_uniras_enabled(adev))
241 		count = amdgpu_ras_mgr_lookup_bad_pages_in_a_row(adev, address,
242 			page_pfns, ARRAY_SIZE(page_pfns));
243 	else
244 		count = amdgpu_umc_lookup_bad_pages_in_a_row(adev,
245 				address, page_pfns, ARRAY_SIZE(page_pfns));
246 
247 	if (count <= 0)
248 		return -EPERM;
249 
250 	for (i = 0; i < count; i++) {
251 		memset(&blk_info, 0, sizeof(blk_info));
252 		ret = amdgpu_vram_mgr_query_address_block_info(&adev->mman.vram_mgr,
253 					page_pfns[i] << AMDGPU_GPU_PAGE_SHIFT, &blk_info);
254 		if (!ret) {
255 			/* The input address that needs to be checked is allocated by
256 			 * current calling process, so it is necessary to exclude
257 			 * the calling process.
258 			 */
259 			if ((flags == BYPASS_ALLOCATED_ADDRESS) &&
260 			    ((blk_info.task.pid != task_pid_nr(current)) ||
261 				strncmp(blk_info.task.comm, current->comm, TASK_COMM_LEN)))
262 				return -EACCES;
263 			else if ((flags == BYPASS_INITIALIZATION_ADDRESS) &&
264 				(blk_info.task.pid == con->init_task_pid) &&
265 				!strncmp(blk_info.task.comm, con->init_task_comm, TASK_COMM_LEN))
266 				return -EACCES;
267 		}
268 	}
269 
270 	return 0;
271 }
272 
273 static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
274 					size_t size, loff_t *pos)
275 {
276 	struct ras_manager *obj = (struct ras_manager *)file_inode(f)->i_private;
277 	struct ras_query_if info = {
278 		.head = obj->head,
279 	};
280 	ssize_t s;
281 	char val[128];
282 
283 	if (amdgpu_ras_query_error_status(obj->adev, &info))
284 		return -EINVAL;
285 
286 	/* Hardware counter will be reset automatically after the query on Vega20 and Arcturus */
287 	if (amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
288 	    amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {
289 		if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
290 			dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
291 	}
292 
293 	s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n",
294 			"ue", info.ue_count,
295 			"ce", info.ce_count);
296 	if (*pos >= s)
297 		return 0;
298 
299 	s -= *pos;
300 	s = min_t(u64, s, size);
301 
302 
303 	if (copy_to_user(buf, &val[*pos], s))
304 		return -EINVAL;
305 
306 	*pos += s;
307 
308 	return s;
309 }
310 
311 static const struct file_operations amdgpu_ras_debugfs_ops = {
312 	.owner = THIS_MODULE,
313 	.read = amdgpu_ras_debugfs_read,
314 	.write = NULL,
315 	.llseek = default_llseek
316 };
317 
318 static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id)
319 {
320 	int i;
321 
322 	for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) {
323 		*block_id = i;
324 		if (strcmp(name, ras_block_string[i]) == 0)
325 			return 0;
326 	}
327 	return -EINVAL;
328 }
329 
330 static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
331 		const char __user *buf, size_t size,
332 		loff_t *pos, struct ras_debug_if *data)
333 {
334 	ssize_t s = min_t(u64, 64, size);
335 	char str[65];
336 	char block_name[33];
337 	char err[9] = "ue";
338 	int op = -1;
339 	int block_id;
340 	uint32_t sub_block;
341 	u64 address, value;
342 	/* default value is 0 if the mask is not set by user */
343 	u32 instance_mask = 0;
344 
345 	if (*pos)
346 		return -EINVAL;
347 	*pos = size;
348 
349 	memset(str, 0, sizeof(str));
350 	memset(data, 0, sizeof(*data));
351 
352 	if (copy_from_user(str, buf, s))
353 		return -EINVAL;
354 
355 	if (sscanf(str, "disable %32s", block_name) == 1)
356 		op = 0;
357 	else if (sscanf(str, "enable %32s %8s", block_name, err) == 2)
358 		op = 1;
359 	else if (sscanf(str, "inject %32s %8s", block_name, err) == 2)
360 		op = 2;
361 	else if (strstr(str, "retire_page") != NULL)
362 		op = 3;
363 	else if (strstr(str, "check_address") != NULL)
364 		op = 4;
365 	else if (str[0] && str[1] && str[2] && str[3])
366 		/* ascii string, but commands are not matched. */
367 		return -EINVAL;
368 
369 	if (op != -1) {
370 		if (op == 3) {
371 			if (sscanf(str, "%*s 0x%llx", &address) != 1 &&
372 			    sscanf(str, "%*s %llu", &address) != 1)
373 				return -EINVAL;
374 
375 			data->op = op;
376 			data->inject.address = address;
377 
378 			return 0;
379 		} else if (op == 4) {
380 			if (sscanf(str, "%*s 0x%llx 0x%llx", &address, &value) != 2 &&
381 			    sscanf(str, "%*s %llu %llu", &address, &value) != 2)
382 				return -EINVAL;
383 
384 			data->op = op;
385 			data->inject.address = address;
386 			data->inject.value = value;
387 			return 0;
388 		}
389 
390 		if (amdgpu_ras_find_block_id_by_name(block_name, &block_id))
391 			return -EINVAL;
392 
393 		data->head.block = block_id;
394 		/* only ue, ce and poison errors are supported */
395 		if (!memcmp("ue", err, 2))
396 			data->head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
397 		else if (!memcmp("ce", err, 2))
398 			data->head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE;
399 		else if (!memcmp("poison", err, 6))
400 			data->head.type = AMDGPU_RAS_ERROR__POISON;
401 		else
402 			return -EINVAL;
403 
404 		data->op = op;
405 
406 		if (op == 2) {
407 			if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx 0x%x",
408 				   &sub_block, &address, &value, &instance_mask) != 4 &&
409 			    sscanf(str, "%*s %*s %*s %u %llu %llu %u",
410 				   &sub_block, &address, &value, &instance_mask) != 4 &&
411 				sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx",
412 				   &sub_block, &address, &value) != 3 &&
413 			    sscanf(str, "%*s %*s %*s %u %llu %llu",
414 				   &sub_block, &address, &value) != 3)
415 				return -EINVAL;
416 			data->head.sub_block_index = sub_block;
417 			data->inject.address = address;
418 			data->inject.value = value;
419 			data->inject.instance_mask = instance_mask;
420 		}
421 	} else {
422 		if (size < sizeof(*data))
423 			return -EINVAL;
424 
425 		if (copy_from_user(data, buf, sizeof(*data)))
426 			return -EINVAL;
427 	}
428 
429 	return 0;
430 }
431 
432 static void amdgpu_ras_instance_mask_check(struct amdgpu_device *adev,
433 				struct ras_debug_if *data)
434 {
435 	int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1;
436 	uint32_t mask, inst_mask = data->inject.instance_mask;
437 
438 	/* no need to set instance mask if there is only one instance */
439 	if (num_xcc <= 1 && inst_mask) {
440 		data->inject.instance_mask = 0;
441 		dev_dbg(adev->dev,
442 			"RAS inject mask(0x%x) isn't supported and force it to 0.\n",
443 			inst_mask);
444 
445 		return;
446 	}
447 
448 	switch (data->head.block) {
449 	case AMDGPU_RAS_BLOCK__GFX:
450 		mask = GENMASK(num_xcc - 1, 0);
451 		break;
452 	case AMDGPU_RAS_BLOCK__SDMA:
453 		mask = GENMASK(adev->sdma.num_instances - 1, 0);
454 		break;
455 	case AMDGPU_RAS_BLOCK__VCN:
456 	case AMDGPU_RAS_BLOCK__JPEG:
457 		mask = GENMASK(adev->vcn.num_vcn_inst - 1, 0);
458 		break;
459 	default:
460 		mask = inst_mask;
461 		break;
462 	}
463 
464 	/* remove invalid bits in instance mask */
465 	data->inject.instance_mask &= mask;
466 	if (inst_mask != data->inject.instance_mask)
467 		dev_dbg(adev->dev,
468 			"Adjust RAS inject mask 0x%x to 0x%x\n",
469 			inst_mask, data->inject.instance_mask);
470 }
471 
472 /**
473  * DOC: AMDGPU RAS debugfs control interface
474  *
475  * The control interface accepts struct ras_debug_if which has two members.
476  *
477  * First member: ras_debug_if::head or ras_debug_if::inject.
478  *
479  * head is used to indicate which IP block will be under control.
480  *
481  * head has four members, they are block, type, sub_block_index, name.
482  * block: which IP will be under control.
483  * type: what kind of error will be enabled/disabled/injected.
484  * sub_block_index: some IPs have subcomponets. say, GFX, sDMA.
485  * name: the name of IP.
486  *
487  * inject has three more members than head, they are address, value and mask.
488  * As their names indicate, inject operation will write the
489  * value to the address.
490  *
491  * The second member: struct ras_debug_if::op.
492  * It has three kinds of operations.
493  *
494  * - 0: disable RAS on the block. Take ::head as its data.
495  * - 1: enable RAS on the block. Take ::head as its data.
496  * - 2: inject errors on the block. Take ::inject as its data.
497  *
498  * How to use the interface?
499  *
500  * In a program
501  *
502  * Copy the struct ras_debug_if in your code and initialize it.
503  * Write the struct to the control interface.
504  *
505  * From shell
506  *
507  * .. code-block:: bash
508  *
509  *	echo "disable <block>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
510  *	echo "enable  <block> <error>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
511  *	echo "inject  <block> <error> <sub-block> <address> <value> <mask>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
512  *
513  * Where N, is the card which you want to affect.
514  *
515  * "disable" requires only the block.
516  * "enable" requires the block and error type.
517  * "inject" requires the block, error type, address, and value.
518  *
519  * The block is one of: umc, sdma, gfx, etc.
520  *	see ras_block_string[] for details
521  *
522  * The error type is one of: ue, ce and poison where,
523  *	ue is multi-uncorrectable
524  *	ce is single-correctable
525  *	poison is poison
526  *
527  * The sub-block is a the sub-block index, pass 0 if there is no sub-block.
528  * The address and value are hexadecimal numbers, leading 0x is optional.
529  * The mask means instance mask, is optional, default value is 0x1.
530  *
531  * For instance,
532  *
533  * .. code-block:: bash
534  *
535  *	echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
536  *	echo inject umc ce 0 0 0 3 > /sys/kernel/debug/dri/0/ras/ras_ctrl
537  *	echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl
538  *
539  * How to check the result of the operation?
540  *
541  * To check disable/enable, see "ras" features at,
542  * /sys/class/drm/card[0/1/2...]/device/ras/features
543  *
544  * To check inject, see the corresponding error count at,
545  * /sys/class/drm/card[0/1/2...]/device/ras/[gfx|sdma|umc|...]_err_count
546  *
547  * .. note::
548  *	Operations are only allowed on blocks which are supported.
549  *	Check the "ras" mask at /sys/module/amdgpu/parameters/ras_mask
550  *	to see which blocks support RAS on a particular asic.
551  *
552  */
553 static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f,
554 					     const char __user *buf,
555 					     size_t size, loff_t *pos)
556 {
557 	struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
558 	struct ras_debug_if data;
559 	int ret = 0;
560 
561 	if (!amdgpu_ras_get_error_query_ready(adev)) {
562 		dev_warn(adev->dev, "RAS WARN: error injection "
563 				"currently inaccessible\n");
564 		return size;
565 	}
566 
567 	ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data);
568 	if (ret)
569 		return ret;
570 
571 	if (data.op == 3) {
572 		ret = amdgpu_reserve_page_direct(adev, data.inject.address);
573 		if (!ret)
574 			return size;
575 		else
576 			return ret;
577 	} else if (data.op == 4) {
578 		ret = amdgpu_check_address_validity(adev, data.inject.address, data.inject.value);
579 		return ret ? ret : size;
580 	}
581 
582 	if (!amdgpu_ras_is_supported(adev, data.head.block))
583 		return -EINVAL;
584 
585 	switch (data.op) {
586 	case 0:
587 		ret = amdgpu_ras_feature_enable(adev, &data.head, 0);
588 		break;
589 	case 1:
590 		ret = amdgpu_ras_feature_enable(adev, &data.head, 1);
591 		break;
592 	case 2:
593 		/* umc ce/ue error injection for a bad page is not allowed */
594 		if (data.head.block == AMDGPU_RAS_BLOCK__UMC)
595 			ret = amdgpu_ras_check_bad_page(adev, data.inject.address);
596 		if (ret == -EINVAL) {
597 			dev_warn(adev->dev, "RAS WARN: input address 0x%llx is invalid.",
598 					data.inject.address);
599 			break;
600 		} else if (ret == 1) {
601 			dev_warn(adev->dev, "RAS WARN: inject: 0x%llx has already been marked as bad!\n",
602 					data.inject.address);
603 			break;
604 		}
605 
606 		amdgpu_ras_instance_mask_check(adev, &data);
607 
608 		/* data.inject.address is offset instead of absolute gpu address */
609 		ret = amdgpu_ras_error_inject(adev, &data.inject);
610 		break;
611 	default:
612 		ret = -EINVAL;
613 		break;
614 	}
615 
616 	if (ret)
617 		return ret;
618 
619 	return size;
620 }
621 
622 static int amdgpu_uniras_clear_badpages_info(struct amdgpu_device *adev);
623 
624 /**
625  * DOC: AMDGPU RAS debugfs EEPROM table reset interface
626  *
627  * Some boards contain an EEPROM which is used to persistently store a list of
628  * bad pages which experiences ECC errors in vram.  This interface provides
629  * a way to reset the EEPROM, e.g., after testing error injection.
630  *
631  * Usage:
632  *
633  * .. code-block:: bash
634  *
635  *	echo 1 > ../ras/ras_eeprom_reset
636  *
637  * will reset EEPROM table to 0 entries.
638  *
639  */
640 static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f,
641 					       const char __user *buf,
642 					       size_t size, loff_t *pos)
643 {
644 	struct amdgpu_device *adev =
645 		(struct amdgpu_device *)file_inode(f)->i_private;
646 	int ret;
647 
648 	if (amdgpu_uniras_enabled(adev)) {
649 		ret = amdgpu_uniras_clear_badpages_info(adev);
650 		return ret ? ret : size;
651 	}
652 
653 	ret = amdgpu_ras_eeprom_reset_table(
654 		&(amdgpu_ras_get_context(adev)->eeprom_control));
655 
656 	if (!ret) {
657 		/* Something was written to EEPROM.
658 		 */
659 		amdgpu_ras_get_context(adev)->flags = RAS_DEFAULT_FLAGS;
660 		return size;
661 	} else {
662 		return ret;
663 	}
664 }
665 
666 static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
667 	.owner = THIS_MODULE,
668 	.read = NULL,
669 	.write = amdgpu_ras_debugfs_ctrl_write,
670 	.llseek = default_llseek
671 };
672 
673 static const struct file_operations amdgpu_ras_debugfs_eeprom_ops = {
674 	.owner = THIS_MODULE,
675 	.read = NULL,
676 	.write = amdgpu_ras_debugfs_eeprom_write,
677 	.llseek = default_llseek
678 };
679 
680 /**
681  * DOC: AMDGPU RAS sysfs Error Count Interface
682  *
683  * It allows the user to read the error count for each IP block on the gpu through
684  * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
685  *
686  * It outputs the multiple lines which report the uncorrected (ue) and corrected
687  * (ce) error counts.
688  *
689  * The format of one line is below,
690  *
691  * [ce|ue]: count
692  *
693  * Example:
694  *
695  * .. code-block:: bash
696  *
697  *	ue: 0
698  *	ce: 1
699  *
700  */
701 static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
702 		struct device_attribute *attr, char *buf)
703 {
704 	struct ras_manager *obj = container_of(attr, struct ras_manager, sysfs_attr);
705 	struct ras_query_if info = {
706 		.head = obj->head,
707 	};
708 
709 	if (!amdgpu_ras_get_error_query_ready(obj->adev))
710 		return sysfs_emit(buf, "Query currently inaccessible\n");
711 
712 	if (amdgpu_ras_query_error_status(obj->adev, &info))
713 		return -EINVAL;
714 
715 	if (amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
716 	    amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {
717 		if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
718 			dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
719 	}
720 
721 	if (info.head.block == AMDGPU_RAS_BLOCK__UMC)
722 		return sysfs_emit(buf, "%s: %lu\n%s: %lu\n%s: %lu\n", "ue", info.ue_count,
723 				"ce", info.ce_count, "de", info.de_count);
724 	else
725 		return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count,
726 				"ce", info.ce_count);
727 }
728 
729 /* obj begin */
730 
731 #define get_obj(obj) do { (obj)->use++; } while (0)
732 #define alive_obj(obj) ((obj)->use)
733 
734 static inline void put_obj(struct ras_manager *obj)
735 {
736 	if (obj && (--obj->use == 0)) {
737 		list_del(&obj->node);
738 		amdgpu_ras_error_data_fini(&obj->err_data);
739 	}
740 
741 	if (obj && (obj->use < 0))
742 		DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", get_ras_block_str(&obj->head));
743 }
744 
745 /* make one obj and return it. */
746 static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev,
747 		struct ras_common_if *head)
748 {
749 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
750 	struct ras_manager *obj;
751 
752 	if (!adev->ras_enabled || !con)
753 		return NULL;
754 
755 	if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
756 		return NULL;
757 
758 	if (head->block == AMDGPU_RAS_BLOCK__MCA) {
759 		if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
760 			return NULL;
761 
762 		obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
763 	} else
764 		obj = &con->objs[head->block];
765 
766 	/* already exist. return obj? */
767 	if (alive_obj(obj))
768 		return NULL;
769 
770 	if (amdgpu_ras_error_data_init(&obj->err_data))
771 		return NULL;
772 
773 	obj->head = *head;
774 	obj->adev = adev;
775 	list_add(&obj->node, &con->head);
776 	get_obj(obj);
777 
778 	return obj;
779 }
780 
781 /* return an obj equal to head, or the first when head is NULL */
782 struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
783 		struct ras_common_if *head)
784 {
785 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
786 	struct ras_manager *obj;
787 	int i;
788 
789 	if (!adev->ras_enabled || !con)
790 		return NULL;
791 
792 	if (head) {
793 		if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
794 			return NULL;
795 
796 		if (head->block == AMDGPU_RAS_BLOCK__MCA) {
797 			if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
798 				return NULL;
799 
800 			obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
801 		} else
802 			obj = &con->objs[head->block];
803 
804 		if (alive_obj(obj))
805 			return obj;
806 	} else {
807 		for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT + AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
808 			obj = &con->objs[i];
809 			if (alive_obj(obj))
810 				return obj;
811 		}
812 	}
813 
814 	return NULL;
815 }
816 /* obj end */
817 
818 /* feature ctl begin */
819 static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev,
820 					 struct ras_common_if *head)
821 {
822 	return adev->ras_hw_enabled & BIT(head->block);
823 }
824 
825 static int amdgpu_ras_is_feature_enabled(struct amdgpu_device *adev,
826 		struct ras_common_if *head)
827 {
828 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
829 
830 	return con->features & BIT(head->block);
831 }
832 
833 /*
834  * if obj is not created, then create one.
835  * set feature enable flag.
836  */
837 static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev,
838 		struct ras_common_if *head, int enable)
839 {
840 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
841 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
842 
843 	/* If hardware does not support ras, then do not create obj.
844 	 * But if hardware support ras, we can create the obj.
845 	 * Ras framework checks con->hw_supported to see if it need do
846 	 * corresponding initialization.
847 	 * IP checks con->support to see if it need disable ras.
848 	 */
849 	if (!amdgpu_ras_is_feature_allowed(adev, head))
850 		return 0;
851 
852 	if (enable) {
853 		if (!obj) {
854 			obj = amdgpu_ras_create_obj(adev, head);
855 			if (!obj)
856 				return -EINVAL;
857 		} else {
858 			/* In case we create obj somewhere else */
859 			get_obj(obj);
860 		}
861 		con->features |= BIT(head->block);
862 	} else {
863 		if (obj && amdgpu_ras_is_feature_enabled(adev, head)) {
864 			con->features &= ~BIT(head->block);
865 			put_obj(obj);
866 		}
867 	}
868 
869 	return 0;
870 }
871 
872 /* wrapper of psp_ras_enable_features */
873 int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
874 		struct ras_common_if *head, bool enable)
875 {
876 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
877 	union ta_ras_cmd_input *info;
878 	int ret;
879 
880 	if (!con)
881 		return -EINVAL;
882 
883 	/* For non-gfx ip, do not enable ras feature if it is not allowed */
884 	/* For gfx ip, regardless of feature support status, */
885 	/* Force issue enable or disable ras feature commands */
886 	if (head->block != AMDGPU_RAS_BLOCK__GFX &&
887 	    !amdgpu_ras_is_feature_allowed(adev, head))
888 		return 0;
889 
890 	/* Only enable gfx ras feature from host side */
891 	if (head->block == AMDGPU_RAS_BLOCK__GFX &&
892 	    !amdgpu_sriov_vf(adev) &&
893 	    !amdgpu_ras_intr_triggered()) {
894 		info = kzalloc(sizeof(union ta_ras_cmd_input), GFP_KERNEL);
895 		if (!info)
896 			return -ENOMEM;
897 
898 		if (!enable) {
899 			info->disable_features = (struct ta_ras_disable_features_input) {
900 				.block_id =  amdgpu_ras_block_to_ta(head->block),
901 				.error_type = amdgpu_ras_error_to_ta(head->type),
902 			};
903 		} else {
904 			info->enable_features = (struct ta_ras_enable_features_input) {
905 				.block_id =  amdgpu_ras_block_to_ta(head->block),
906 				.error_type = amdgpu_ras_error_to_ta(head->type),
907 			};
908 		}
909 
910 		ret = psp_ras_enable_features(&adev->psp, info, enable);
911 		if (ret) {
912 			dev_err(adev->dev, "ras %s %s failed poison:%d ret:%d\n",
913 				enable ? "enable":"disable",
914 				get_ras_block_str(head),
915 				amdgpu_ras_is_poison_mode_supported(adev), ret);
916 			kfree(info);
917 			return ret;
918 		}
919 
920 		kfree(info);
921 	}
922 
923 	/* setup the obj */
924 	__amdgpu_ras_feature_enable(adev, head, enable);
925 
926 	return 0;
927 }
928 
929 /* Only used in device probe stage and called only once. */
930 int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
931 		struct ras_common_if *head, bool enable)
932 {
933 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
934 	int ret;
935 
936 	if (!con)
937 		return -EINVAL;
938 
939 	if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
940 		if (enable) {
941 			/* There is no harm to issue a ras TA cmd regardless of
942 			 * the currecnt ras state.
943 			 * If current state == target state, it will do nothing
944 			 * But sometimes it requests driver to reset and repost
945 			 * with error code -EAGAIN.
946 			 */
947 			ret = amdgpu_ras_feature_enable(adev, head, 1);
948 			/* With old ras TA, we might fail to enable ras.
949 			 * Log it and just setup the object.
950 			 * TODO need remove this WA in the future.
951 			 */
952 			if (ret == -EINVAL) {
953 				ret = __amdgpu_ras_feature_enable(adev, head, 1);
954 				if (!ret)
955 					dev_info(adev->dev,
956 						"RAS INFO: %s setup object\n",
957 						get_ras_block_str(head));
958 			}
959 		} else {
960 			/* setup the object then issue a ras TA disable cmd.*/
961 			ret = __amdgpu_ras_feature_enable(adev, head, 1);
962 			if (ret)
963 				return ret;
964 
965 			/* gfx block ras disable cmd must send to ras-ta */
966 			if (head->block == AMDGPU_RAS_BLOCK__GFX)
967 				con->features |= BIT(head->block);
968 
969 			ret = amdgpu_ras_feature_enable(adev, head, 0);
970 
971 			/* clean gfx block ras features flag */
972 			if (adev->ras_enabled && head->block == AMDGPU_RAS_BLOCK__GFX)
973 				con->features &= ~BIT(head->block);
974 		}
975 	} else
976 		ret = amdgpu_ras_feature_enable(adev, head, enable);
977 
978 	return ret;
979 }
980 
981 static int amdgpu_ras_disable_all_features(struct amdgpu_device *adev,
982 		bool bypass)
983 {
984 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
985 	struct ras_manager *obj, *tmp;
986 
987 	list_for_each_entry_safe(obj, tmp, &con->head, node) {
988 		/* bypass psp.
989 		 * aka just release the obj and corresponding flags
990 		 */
991 		if (bypass) {
992 			if (__amdgpu_ras_feature_enable(adev, &obj->head, 0))
993 				break;
994 		} else {
995 			if (amdgpu_ras_feature_enable(adev, &obj->head, 0))
996 				break;
997 		}
998 	}
999 
1000 	return con->features;
1001 }
1002 
1003 static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
1004 		bool bypass)
1005 {
1006 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1007 	int i;
1008 	const enum amdgpu_ras_error_type default_ras_type = AMDGPU_RAS_ERROR__NONE;
1009 
1010 	for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) {
1011 		struct ras_common_if head = {
1012 			.block = i,
1013 			.type = default_ras_type,
1014 			.sub_block_index = 0,
1015 		};
1016 
1017 		if (i == AMDGPU_RAS_BLOCK__MCA)
1018 			continue;
1019 
1020 		if (bypass) {
1021 			/*
1022 			 * bypass psp. vbios enable ras for us.
1023 			 * so just create the obj
1024 			 */
1025 			if (__amdgpu_ras_feature_enable(adev, &head, 1))
1026 				break;
1027 		} else {
1028 			if (amdgpu_ras_feature_enable(adev, &head, 1))
1029 				break;
1030 		}
1031 	}
1032 
1033 	for (i = 0; i < AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
1034 		struct ras_common_if head = {
1035 			.block = AMDGPU_RAS_BLOCK__MCA,
1036 			.type = default_ras_type,
1037 			.sub_block_index = i,
1038 		};
1039 
1040 		if (bypass) {
1041 			/*
1042 			 * bypass psp. vbios enable ras for us.
1043 			 * so just create the obj
1044 			 */
1045 			if (__amdgpu_ras_feature_enable(adev, &head, 1))
1046 				break;
1047 		} else {
1048 			if (amdgpu_ras_feature_enable(adev, &head, 1))
1049 				break;
1050 		}
1051 	}
1052 
1053 	return con->features;
1054 }
1055 /* feature ctl end */
1056 
1057 static int amdgpu_ras_block_match_default(struct amdgpu_ras_block_object *block_obj,
1058 		enum amdgpu_ras_block block)
1059 {
1060 	if (!block_obj)
1061 		return -EINVAL;
1062 
1063 	if (block_obj->ras_comm.block == block)
1064 		return 0;
1065 
1066 	return -EINVAL;
1067 }
1068 
1069 static struct amdgpu_ras_block_object *amdgpu_ras_get_ras_block(struct amdgpu_device *adev,
1070 					enum amdgpu_ras_block block, uint32_t sub_block_index)
1071 {
1072 	struct amdgpu_ras_block_list *node, *tmp;
1073 	struct amdgpu_ras_block_object *obj;
1074 
1075 	if (block >= AMDGPU_RAS_BLOCK__LAST)
1076 		return NULL;
1077 
1078 	list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
1079 		if (!node->ras_obj) {
1080 			dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
1081 			continue;
1082 		}
1083 
1084 		obj = node->ras_obj;
1085 		if (obj->ras_block_match) {
1086 			if (obj->ras_block_match(obj, block, sub_block_index) == 0)
1087 				return obj;
1088 		} else {
1089 			if (amdgpu_ras_block_match_default(obj, block) == 0)
1090 				return obj;
1091 		}
1092 	}
1093 
1094 	return NULL;
1095 }
1096 
1097 static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_data *err_data)
1098 {
1099 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
1100 	int ret = 0;
1101 
1102 	/*
1103 	 * choosing right query method according to
1104 	 * whether smu support query error information
1105 	 */
1106 	ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(ras->umc_ecc));
1107 	if (ret == -EOPNOTSUPP) {
1108 		if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
1109 			adev->umc.ras->ras_block.hw_ops->query_ras_error_count)
1110 			adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data);
1111 
1112 		/* umc query_ras_error_address is also responsible for clearing
1113 		 * error status
1114 		 */
1115 		if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
1116 		    adev->umc.ras->ras_block.hw_ops->query_ras_error_address)
1117 			adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, err_data);
1118 	} else if (!ret) {
1119 		if (adev->umc.ras &&
1120 			adev->umc.ras->ecc_info_query_ras_error_count)
1121 			adev->umc.ras->ecc_info_query_ras_error_count(adev, err_data);
1122 
1123 		if (adev->umc.ras &&
1124 			adev->umc.ras->ecc_info_query_ras_error_address)
1125 			adev->umc.ras->ecc_info_query_ras_error_address(adev, err_data);
1126 	}
1127 }
1128 
1129 static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev,
1130 					      struct ras_manager *ras_mgr,
1131 					      struct ras_err_data *err_data,
1132 					      struct ras_query_context *qctx,
1133 					      const char *blk_name,
1134 					      bool is_ue,
1135 					      bool is_de)
1136 {
1137 	struct amdgpu_smuio_mcm_config_info *mcm_info;
1138 	struct ras_err_node *err_node;
1139 	struct ras_err_info *err_info;
1140 	u64 event_id = qctx->evid.event_id;
1141 
1142 	if (is_ue) {
1143 		for_each_ras_error(err_node, err_data) {
1144 			err_info = &err_node->err_info;
1145 			mcm_info = &err_info->mcm_info;
1146 			if (err_info->ue_count) {
1147 				RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1148 					      "%lld new uncorrectable hardware errors detected in %s block\n",
1149 					      mcm_info->socket_id,
1150 					      mcm_info->die_id,
1151 					      err_info->ue_count,
1152 					      blk_name);
1153 			}
1154 		}
1155 
1156 		for_each_ras_error(err_node, &ras_mgr->err_data) {
1157 			err_info = &err_node->err_info;
1158 			mcm_info = &err_info->mcm_info;
1159 			RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1160 				      "%lld uncorrectable hardware errors detected in total in %s block\n",
1161 				      mcm_info->socket_id, mcm_info->die_id, err_info->ue_count, blk_name);
1162 		}
1163 
1164 	} else {
1165 		if (is_de) {
1166 			for_each_ras_error(err_node, err_data) {
1167 				err_info = &err_node->err_info;
1168 				mcm_info = &err_info->mcm_info;
1169 				if (err_info->de_count) {
1170 					RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1171 						      "%lld new deferred hardware errors detected in %s block\n",
1172 						      mcm_info->socket_id,
1173 						      mcm_info->die_id,
1174 						      err_info->de_count,
1175 						      blk_name);
1176 				}
1177 			}
1178 
1179 			for_each_ras_error(err_node, &ras_mgr->err_data) {
1180 				err_info = &err_node->err_info;
1181 				mcm_info = &err_info->mcm_info;
1182 				RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1183 					      "%lld deferred hardware errors detected in total in %s block\n",
1184 					      mcm_info->socket_id, mcm_info->die_id,
1185 					      err_info->de_count, blk_name);
1186 			}
1187 		} else {
1188 			if (adev->debug_disable_ce_logs)
1189 				return;
1190 
1191 			for_each_ras_error(err_node, err_data) {
1192 				err_info = &err_node->err_info;
1193 				mcm_info = &err_info->mcm_info;
1194 				if (err_info->ce_count) {
1195 					RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1196 						      "%lld new correctable hardware errors detected in %s block\n",
1197 						      mcm_info->socket_id,
1198 						      mcm_info->die_id,
1199 						      err_info->ce_count,
1200 						      blk_name);
1201 				}
1202 			}
1203 
1204 			for_each_ras_error(err_node, &ras_mgr->err_data) {
1205 				err_info = &err_node->err_info;
1206 				mcm_info = &err_info->mcm_info;
1207 				RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1208 					      "%lld correctable hardware errors detected in total in %s block\n",
1209 					      mcm_info->socket_id, mcm_info->die_id,
1210 					      err_info->ce_count, blk_name);
1211 			}
1212 		}
1213 	}
1214 }
1215 
1216 static inline bool err_data_has_source_info(struct ras_err_data *data)
1217 {
1218 	return !list_empty(&data->err_node_list);
1219 }
1220 
1221 static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev,
1222 					     struct ras_query_if *query_if,
1223 					     struct ras_err_data *err_data,
1224 					     struct ras_query_context *qctx)
1225 {
1226 	struct ras_manager *ras_mgr = amdgpu_ras_find_obj(adev, &query_if->head);
1227 	const char *blk_name = get_ras_block_str(&query_if->head);
1228 	u64 event_id = qctx->evid.event_id;
1229 
1230 	if (err_data->ce_count) {
1231 		if (err_data_has_source_info(err_data)) {
1232 			amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx,
1233 							  blk_name, false, false);
1234 		} else if (!adev->aid_mask &&
1235 			   adev->smuio.funcs &&
1236 			   adev->smuio.funcs->get_socket_id &&
1237 			   adev->smuio.funcs->get_die_id) {
1238 			RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d "
1239 				      "%ld correctable hardware errors "
1240 				      "detected in %s block\n",
1241 				      adev->smuio.funcs->get_socket_id(adev),
1242 				      adev->smuio.funcs->get_die_id(adev),
1243 				      ras_mgr->err_data.ce_count,
1244 				      blk_name);
1245 		} else {
1246 			RAS_EVENT_LOG(adev, event_id, "%ld correctable hardware errors "
1247 				      "detected in %s block\n",
1248 				      ras_mgr->err_data.ce_count,
1249 				      blk_name);
1250 		}
1251 	}
1252 
1253 	if (err_data->ue_count) {
1254 		if (err_data_has_source_info(err_data)) {
1255 			amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx,
1256 							  blk_name, true, false);
1257 		} else if (!adev->aid_mask &&
1258 			   adev->smuio.funcs &&
1259 			   adev->smuio.funcs->get_socket_id &&
1260 			   adev->smuio.funcs->get_die_id) {
1261 			RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d "
1262 				      "%ld uncorrectable hardware errors "
1263 				      "detected in %s block\n",
1264 				      adev->smuio.funcs->get_socket_id(adev),
1265 				      adev->smuio.funcs->get_die_id(adev),
1266 				      ras_mgr->err_data.ue_count,
1267 				      blk_name);
1268 		} else {
1269 			RAS_EVENT_LOG(adev, event_id, "%ld uncorrectable hardware errors "
1270 				      "detected in %s block\n",
1271 				      ras_mgr->err_data.ue_count,
1272 				      blk_name);
1273 		}
1274 	}
1275 
1276 	if (err_data->de_count) {
1277 		if (err_data_has_source_info(err_data)) {
1278 			amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx,
1279 							  blk_name, false, true);
1280 		} else if (!adev->aid_mask &&
1281 			   adev->smuio.funcs &&
1282 			   adev->smuio.funcs->get_socket_id &&
1283 			   adev->smuio.funcs->get_die_id) {
1284 			RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d "
1285 				      "%ld deferred hardware errors "
1286 				      "detected in %s block\n",
1287 				      adev->smuio.funcs->get_socket_id(adev),
1288 				      adev->smuio.funcs->get_die_id(adev),
1289 				      ras_mgr->err_data.de_count,
1290 				      blk_name);
1291 		} else {
1292 			RAS_EVENT_LOG(adev, event_id, "%ld deferred hardware errors "
1293 				      "detected in %s block\n",
1294 				      ras_mgr->err_data.de_count,
1295 				      blk_name);
1296 		}
1297 	}
1298 }
1299 
1300 static void amdgpu_ras_virt_error_generate_report(struct amdgpu_device *adev,
1301 						  struct ras_query_if *query_if,
1302 						  struct ras_err_data *err_data,
1303 						  struct ras_query_context *qctx)
1304 {
1305 	unsigned long new_ue, new_ce, new_de;
1306 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, &query_if->head);
1307 	const char *blk_name = get_ras_block_str(&query_if->head);
1308 	u64 event_id = qctx->evid.event_id;
1309 
1310 	new_ce = err_data->ce_count - obj->err_data.ce_count;
1311 	new_ue = err_data->ue_count - obj->err_data.ue_count;
1312 	new_de = err_data->de_count - obj->err_data.de_count;
1313 
1314 	if (new_ce) {
1315 		RAS_EVENT_LOG(adev, event_id, "%lu correctable hardware errors "
1316 			      "detected in %s block\n",
1317 			      new_ce,
1318 			      blk_name);
1319 	}
1320 
1321 	if (new_ue) {
1322 		RAS_EVENT_LOG(adev, event_id, "%lu uncorrectable hardware errors "
1323 			      "detected in %s block\n",
1324 			      new_ue,
1325 			      blk_name);
1326 	}
1327 
1328 	if (new_de) {
1329 		RAS_EVENT_LOG(adev, event_id, "%lu deferred hardware errors "
1330 			      "detected in %s block\n",
1331 			      new_de,
1332 			      blk_name);
1333 	}
1334 }
1335 
1336 static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, struct ras_err_data *err_data)
1337 {
1338 	struct ras_err_node *err_node;
1339 	struct ras_err_info *err_info;
1340 
1341 	if (err_data_has_source_info(err_data)) {
1342 		for_each_ras_error(err_node, err_data) {
1343 			err_info = &err_node->err_info;
1344 			amdgpu_ras_error_statistic_de_count(&obj->err_data,
1345 					&err_info->mcm_info, err_info->de_count);
1346 			amdgpu_ras_error_statistic_ce_count(&obj->err_data,
1347 					&err_info->mcm_info, err_info->ce_count);
1348 			amdgpu_ras_error_statistic_ue_count(&obj->err_data,
1349 					&err_info->mcm_info, err_info->ue_count);
1350 		}
1351 	} else {
1352 		/* for legacy asic path which doesn't has error source info */
1353 		obj->err_data.ue_count += err_data->ue_count;
1354 		obj->err_data.ce_count += err_data->ce_count;
1355 		obj->err_data.de_count += err_data->de_count;
1356 	}
1357 }
1358 
1359 static void amdgpu_ras_mgr_virt_error_data_statistics_update(struct ras_manager *obj,
1360 							     struct ras_err_data *err_data)
1361 {
1362 	/* Host reports absolute counts */
1363 	obj->err_data.ue_count = err_data->ue_count;
1364 	obj->err_data.ce_count = err_data->ce_count;
1365 	obj->err_data.de_count = err_data->de_count;
1366 }
1367 
1368 static struct ras_manager *get_ras_manager(struct amdgpu_device *adev, enum amdgpu_ras_block blk)
1369 {
1370 	struct ras_common_if head;
1371 
1372 	memset(&head, 0, sizeof(head));
1373 	head.block = blk;
1374 
1375 	return amdgpu_ras_find_obj(adev, &head);
1376 }
1377 
1378 int amdgpu_ras_bind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
1379 			const struct aca_info *aca_info, void *data)
1380 {
1381 	struct ras_manager *obj;
1382 
1383 	/* in resume phase, no need to create aca fs node */
1384 	if (adev->in_suspend || amdgpu_reset_in_recovery(adev))
1385 		return 0;
1386 
1387 	obj = get_ras_manager(adev, blk);
1388 	if (!obj)
1389 		return -EINVAL;
1390 
1391 	return amdgpu_aca_add_handle(adev, &obj->aca_handle, ras_block_str(blk), aca_info, data);
1392 }
1393 
1394 int amdgpu_ras_unbind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk)
1395 {
1396 	struct ras_manager *obj;
1397 
1398 	obj = get_ras_manager(adev, blk);
1399 	if (!obj)
1400 		return -EINVAL;
1401 
1402 	amdgpu_aca_remove_handle(&obj->aca_handle);
1403 
1404 	return 0;
1405 }
1406 
1407 static int amdgpu_aca_log_ras_error_data(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
1408 					 enum aca_error_type type, struct ras_err_data *err_data,
1409 					 struct ras_query_context *qctx)
1410 {
1411 	struct ras_manager *obj;
1412 
1413 	obj = get_ras_manager(adev, blk);
1414 	if (!obj)
1415 		return -EINVAL;
1416 
1417 	return amdgpu_aca_get_error_data(adev, &obj->aca_handle, type, err_data, qctx);
1418 }
1419 
1420 ssize_t amdgpu_ras_aca_sysfs_read(struct device *dev, struct device_attribute *attr,
1421 				  struct aca_handle *handle, char *buf, void *data)
1422 {
1423 	struct ras_manager *obj = container_of(handle, struct ras_manager, aca_handle);
1424 	struct ras_query_if info = {
1425 		.head = obj->head,
1426 	};
1427 
1428 	if (!amdgpu_ras_get_error_query_ready(obj->adev))
1429 		return sysfs_emit(buf, "Query currently inaccessible\n");
1430 
1431 	if (amdgpu_ras_query_error_status(obj->adev, &info))
1432 		return -EINVAL;
1433 
1434 	return sysfs_emit(buf, "%s: %lu\n%s: %lu\n%s: %lu\n", "ue", info.ue_count,
1435 			  "ce", info.ce_count, "de", info.de_count);
1436 }
1437 
1438 static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev,
1439 						struct ras_query_if *info,
1440 						struct ras_err_data *err_data,
1441 						struct ras_query_context *qctx,
1442 						unsigned int error_query_mode)
1443 {
1444 	enum amdgpu_ras_block blk = info ? info->head.block : AMDGPU_RAS_BLOCK_COUNT;
1445 	struct amdgpu_ras_block_object *block_obj = NULL;
1446 	int ret;
1447 
1448 	if (blk == AMDGPU_RAS_BLOCK_COUNT)
1449 		return -EINVAL;
1450 
1451 	if (error_query_mode == AMDGPU_RAS_INVALID_ERROR_QUERY)
1452 		return -EINVAL;
1453 
1454 	if (error_query_mode == AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY) {
1455 		return amdgpu_virt_req_ras_err_count(adev, blk, err_data);
1456 	} else if (error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) {
1457 		if (info->head.block == AMDGPU_RAS_BLOCK__UMC) {
1458 			amdgpu_ras_get_ecc_info(adev, err_data);
1459 		} else {
1460 			block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, 0);
1461 			if (!block_obj || !block_obj->hw_ops) {
1462 				dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1463 					     get_ras_block_str(&info->head));
1464 				return -EINVAL;
1465 			}
1466 
1467 			if (block_obj->hw_ops->query_ras_error_count)
1468 				block_obj->hw_ops->query_ras_error_count(adev, err_data);
1469 
1470 			if ((info->head.block == AMDGPU_RAS_BLOCK__SDMA) ||
1471 			    (info->head.block == AMDGPU_RAS_BLOCK__GFX) ||
1472 			    (info->head.block == AMDGPU_RAS_BLOCK__MMHUB)) {
1473 				if (block_obj->hw_ops->query_ras_error_status)
1474 					block_obj->hw_ops->query_ras_error_status(adev);
1475 			}
1476 		}
1477 	} else {
1478 		if (amdgpu_aca_is_enabled(adev)) {
1479 			ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_UE, err_data, qctx);
1480 			if (ret)
1481 				return ret;
1482 
1483 			ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_CE, err_data, qctx);
1484 			if (ret)
1485 				return ret;
1486 
1487 			ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_DEFERRED, err_data, qctx);
1488 			if (ret)
1489 				return ret;
1490 		} else {
1491 			/* FIXME: add code to check return value later */
1492 			amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_UE, err_data, qctx);
1493 			amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_CE, err_data, qctx);
1494 		}
1495 	}
1496 
1497 	return 0;
1498 }
1499 
1500 /* query/inject/cure begin */
1501 static int amdgpu_ras_query_error_status_with_event(struct amdgpu_device *adev,
1502 						    struct ras_query_if *info,
1503 						    enum ras_event_type type)
1504 {
1505 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1506 	struct ras_err_data err_data;
1507 	struct ras_query_context qctx;
1508 	unsigned int error_query_mode;
1509 	int ret;
1510 
1511 	if (!obj)
1512 		return -EINVAL;
1513 
1514 	ret = amdgpu_ras_error_data_init(&err_data);
1515 	if (ret)
1516 		return ret;
1517 
1518 	if (!amdgpu_ras_get_error_query_mode(adev, &error_query_mode))
1519 		return -EINVAL;
1520 
1521 	memset(&qctx, 0, sizeof(qctx));
1522 	qctx.evid.type = type;
1523 	qctx.evid.event_id = amdgpu_ras_acquire_event_id(adev, type);
1524 
1525 	if (!down_read_trylock(&adev->reset_domain->sem)) {
1526 		ret = -EIO;
1527 		goto out_fini_err_data;
1528 	}
1529 
1530 	ret = amdgpu_ras_query_error_status_helper(adev, info,
1531 						   &err_data,
1532 						   &qctx,
1533 						   error_query_mode);
1534 	up_read(&adev->reset_domain->sem);
1535 	if (ret)
1536 		goto out_fini_err_data;
1537 
1538 	if (error_query_mode != AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY) {
1539 		amdgpu_rasmgr_error_data_statistic_update(obj, &err_data);
1540 		amdgpu_ras_error_generate_report(adev, info, &err_data, &qctx);
1541 	} else {
1542 		/* Host provides absolute error counts. First generate the report
1543 		 * using the previous VF internal count against new host count.
1544 		 * Then Update VF internal count.
1545 		 */
1546 		amdgpu_ras_virt_error_generate_report(adev, info, &err_data, &qctx);
1547 		amdgpu_ras_mgr_virt_error_data_statistics_update(obj, &err_data);
1548 	}
1549 
1550 	info->ue_count = obj->err_data.ue_count;
1551 	info->ce_count = obj->err_data.ce_count;
1552 	info->de_count = obj->err_data.de_count;
1553 
1554 out_fini_err_data:
1555 	amdgpu_ras_error_data_fini(&err_data);
1556 
1557 	return ret;
1558 }
1559 
1560 static int amdgpu_uniras_clear_badpages_info(struct amdgpu_device *adev)
1561 {
1562 	struct ras_cmd_dev_handle req = {0};
1563 	int ret;
1564 
1565 	ret = amdgpu_ras_mgr_handle_ras_cmd(adev, RAS_CMD__CLEAR_BAD_PAGE_INFO,
1566 				&req, sizeof(req), NULL, 0);
1567 	if (ret) {
1568 		dev_err(adev->dev, "Failed to clear bad pages info, ret: %d\n", ret);
1569 		return ret;
1570 	}
1571 
1572 	return 0;
1573 }
1574 
1575 static int amdgpu_uniras_query_block_ecc(struct amdgpu_device *adev,
1576 			struct ras_query_if *info)
1577 {
1578 	struct ras_cmd_block_ecc_info_req req = {0};
1579 	struct ras_cmd_block_ecc_info_rsp rsp = {0};
1580 	int ret;
1581 
1582 	if (!info)
1583 		return -EINVAL;
1584 
1585 	req.block_id = info->head.block;
1586 	req.subblock_id = info->head.sub_block_index;
1587 
1588 	ret = amdgpu_ras_mgr_handle_ras_cmd(adev, RAS_CMD__GET_BLOCK_ECC_STATUS,
1589 				&req, sizeof(req), &rsp, sizeof(rsp));
1590 	if (!ret) {
1591 		info->ce_count = rsp.ce_count;
1592 		info->ue_count = rsp.ue_count;
1593 		info->de_count = rsp.de_count;
1594 	}
1595 
1596 	return ret;
1597 }
1598 
1599 int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_if *info)
1600 {
1601 	if (amdgpu_uniras_enabled(adev))
1602 		return amdgpu_uniras_query_block_ecc(adev, info);
1603 	else
1604 		return amdgpu_ras_query_error_status_with_event(adev, info, RAS_EVENT_TYPE_INVALID);
1605 }
1606 
1607 int amdgpu_ras_reset_error_count(struct amdgpu_device *adev,
1608 		enum amdgpu_ras_block block)
1609 {
1610 	struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
1611 	const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
1612 	const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
1613 
1614 	if (!block_obj || !block_obj->hw_ops) {
1615 		dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1616 				ras_block_str(block));
1617 		return -EOPNOTSUPP;
1618 	}
1619 
1620 	if (!amdgpu_ras_is_supported(adev, block) ||
1621 	    !amdgpu_ras_get_aca_debug_mode(adev))
1622 		return -EOPNOTSUPP;
1623 
1624 	if (amdgpu_sriov_vf(adev))
1625 		return -EOPNOTSUPP;
1626 
1627 	/* skip ras error reset in gpu reset */
1628 	if ((amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev)) &&
1629 	    ((smu_funcs && smu_funcs->set_debug_mode) ||
1630 	     (mca_funcs && mca_funcs->mca_set_debug_mode)))
1631 		return -EOPNOTSUPP;
1632 
1633 	if (block_obj->hw_ops->reset_ras_error_count)
1634 		block_obj->hw_ops->reset_ras_error_count(adev);
1635 
1636 	return 0;
1637 }
1638 
1639 int amdgpu_ras_reset_error_status(struct amdgpu_device *adev,
1640 		enum amdgpu_ras_block block)
1641 {
1642 	struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
1643 
1644 	if (amdgpu_ras_reset_error_count(adev, block) == -EOPNOTSUPP)
1645 		return 0;
1646 
1647 	if ((block == AMDGPU_RAS_BLOCK__GFX) ||
1648 	    (block == AMDGPU_RAS_BLOCK__MMHUB)) {
1649 		if (block_obj->hw_ops->reset_ras_error_status)
1650 			block_obj->hw_ops->reset_ras_error_status(adev);
1651 	}
1652 
1653 	return 0;
1654 }
1655 
1656 static int amdgpu_uniras_error_inject(struct amdgpu_device *adev,
1657 		struct ras_inject_if *info)
1658 {
1659 	struct ras_cmd_inject_error_req inject_req;
1660 	struct ras_cmd_inject_error_rsp rsp;
1661 
1662 	if (!info)
1663 		return -EINVAL;
1664 
1665 	memset(&inject_req, 0, sizeof(inject_req));
1666 	inject_req.block_id = info->head.block;
1667 	inject_req.subblock_id = info->head.sub_block_index;
1668 	inject_req.address = info->address;
1669 	inject_req.error_type = info->head.type;
1670 	inject_req.instance_mask = info->instance_mask;
1671 	inject_req.method = info->value;
1672 
1673 	return amdgpu_ras_mgr_handle_ras_cmd(adev, RAS_CMD__INJECT_ERROR,
1674 			&inject_req, sizeof(inject_req), &rsp, sizeof(rsp));
1675 }
1676 
1677 /* wrapper of psp_ras_trigger_error */
1678 int amdgpu_ras_error_inject(struct amdgpu_device *adev,
1679 		struct ras_inject_if *info)
1680 {
1681 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1682 	struct ta_ras_trigger_error_input block_info = {
1683 		.block_id =  amdgpu_ras_block_to_ta(info->head.block),
1684 		.inject_error_type = amdgpu_ras_error_to_ta(info->head.type),
1685 		.sub_block_index = info->head.sub_block_index,
1686 		.address = info->address,
1687 		.value = info->value,
1688 	};
1689 	int ret = -EINVAL;
1690 	struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev,
1691 							info->head.block,
1692 							info->head.sub_block_index);
1693 
1694 	if (amdgpu_uniras_enabled(adev))
1695 		return amdgpu_uniras_error_inject(adev, info);
1696 
1697 	/* inject on guest isn't allowed, return success directly */
1698 	if (amdgpu_sriov_vf(adev))
1699 		return 0;
1700 
1701 	if (!obj)
1702 		return -EINVAL;
1703 
1704 	if (!block_obj || !block_obj->hw_ops)	{
1705 		dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1706 			     get_ras_block_str(&info->head));
1707 		return -EINVAL;
1708 	}
1709 
1710 	/* Calculate XGMI relative offset */
1711 	if (adev->gmc.xgmi.num_physical_nodes > 1 &&
1712 	    info->head.block != AMDGPU_RAS_BLOCK__GFX) {
1713 		block_info.address =
1714 			amdgpu_xgmi_get_relative_phy_addr(adev,
1715 							  block_info.address);
1716 	}
1717 
1718 	if (block_obj->hw_ops->ras_error_inject) {
1719 		if (info->head.block == AMDGPU_RAS_BLOCK__GFX)
1720 			ret = block_obj->hw_ops->ras_error_inject(adev, info, info->instance_mask);
1721 		else /* Special ras_error_inject is defined (e.g: xgmi) */
1722 			ret = block_obj->hw_ops->ras_error_inject(adev, &block_info,
1723 						info->instance_mask);
1724 	} else {
1725 		/* default path */
1726 		ret = psp_ras_trigger_error(&adev->psp, &block_info, info->instance_mask);
1727 	}
1728 
1729 	if (ret)
1730 		dev_err(adev->dev, "ras inject %s failed %d\n",
1731 			get_ras_block_str(&info->head), ret);
1732 
1733 	return ret;
1734 }
1735 
1736 /**
1737  * amdgpu_ras_query_error_count_helper -- Get error counter for specific IP
1738  * @adev: pointer to AMD GPU device
1739  * @ce_count: pointer to an integer to be set to the count of correctible errors.
1740  * @ue_count: pointer to an integer to be set to the count of uncorrectible errors.
1741  * @query_info: pointer to ras_query_if
1742  *
1743  * Return 0 for query success or do nothing, otherwise return an error
1744  * on failures
1745  */
1746 static int amdgpu_ras_query_error_count_helper(struct amdgpu_device *adev,
1747 					       unsigned long *ce_count,
1748 					       unsigned long *ue_count,
1749 					       struct ras_query_if *query_info)
1750 {
1751 	int ret;
1752 
1753 	if (!query_info)
1754 		/* do nothing if query_info is not specified */
1755 		return 0;
1756 
1757 	ret = amdgpu_ras_query_error_status(adev, query_info);
1758 	if (ret)
1759 		return ret;
1760 
1761 	*ce_count += query_info->ce_count;
1762 	*ue_count += query_info->ue_count;
1763 
1764 	/* some hardware/IP supports read to clear
1765 	 * no need to explictly reset the err status after the query call */
1766 	if (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
1767 	    amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {
1768 		if (amdgpu_ras_reset_error_status(adev, query_info->head.block))
1769 			dev_warn(adev->dev,
1770 				 "Failed to reset error counter and error status\n");
1771 	}
1772 
1773 	return 0;
1774 }
1775 
1776 /**
1777  * amdgpu_ras_query_error_count -- Get error counts of all IPs or specific IP
1778  * @adev: pointer to AMD GPU device
1779  * @ce_count: pointer to an integer to be set to the count of correctible errors.
1780  * @ue_count: pointer to an integer to be set to the count of uncorrectible
1781  * errors.
1782  * @query_info: pointer to ras_query_if if the query request is only for
1783  * specific ip block; if info is NULL, then the qurey request is for
1784  * all the ip blocks that support query ras error counters/status
1785  *
1786  * If set, @ce_count or @ue_count, count and return the corresponding
1787  * error counts in those integer pointers. Return 0 if the device
1788  * supports RAS. Return -EOPNOTSUPP if the device doesn't support RAS.
1789  */
1790 int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
1791 				 unsigned long *ce_count,
1792 				 unsigned long *ue_count,
1793 				 struct ras_query_if *query_info)
1794 {
1795 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1796 	struct ras_manager *obj;
1797 	unsigned long ce, ue;
1798 	int ret;
1799 
1800 	if (!adev->ras_enabled || !con)
1801 		return -EOPNOTSUPP;
1802 
1803 	/* Don't count since no reporting.
1804 	 */
1805 	if (!ce_count && !ue_count)
1806 		return 0;
1807 
1808 	ce = 0;
1809 	ue = 0;
1810 	if (!query_info) {
1811 		/* query all the ip blocks that support ras query interface */
1812 		list_for_each_entry(obj, &con->head, node) {
1813 			struct ras_query_if info = {
1814 				.head = obj->head,
1815 			};
1816 
1817 			ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, &info);
1818 		}
1819 	} else {
1820 		/* query specific ip block */
1821 		ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, query_info);
1822 	}
1823 
1824 	if (ret)
1825 		return ret;
1826 
1827 	if (ce_count)
1828 		*ce_count = ce;
1829 
1830 	if (ue_count)
1831 		*ue_count = ue;
1832 
1833 	return 0;
1834 }
1835 /* query/inject/cure end */
1836 
1837 
1838 /* sysfs begin */
1839 
1840 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
1841 		struct ras_badpage *bps, uint32_t count, uint32_t start);
1842 static int amdgpu_uniras_badpages_read(struct amdgpu_device *adev,
1843 		struct ras_badpage *bps, uint32_t count, uint32_t start);
1844 
1845 static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
1846 {
1847 	switch (flags) {
1848 	case AMDGPU_RAS_RETIRE_PAGE_RESERVED:
1849 		return "R";
1850 	case AMDGPU_RAS_RETIRE_PAGE_PENDING:
1851 		return "P";
1852 	case AMDGPU_RAS_RETIRE_PAGE_FAULT:
1853 	default:
1854 		return "F";
1855 	}
1856 }
1857 
1858 /**
1859  * DOC: AMDGPU RAS sysfs gpu_vram_bad_pages Interface
1860  *
1861  * It allows user to read the bad pages of vram on the gpu through
1862  * /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages
1863  *
1864  * It outputs multiple lines, and each line stands for one gpu page.
1865  *
1866  * The format of one line is below,
1867  * gpu pfn : gpu page size : flags
1868  *
1869  * gpu pfn and gpu page size are printed in hex format.
1870  * flags can be one of below character,
1871  *
1872  * R: reserved, this gpu page is reserved and not able to use.
1873  *
1874  * P: pending for reserve, this gpu page is marked as bad, will be reserved
1875  * in next window of page_reserve.
1876  *
1877  * F: unable to reserve. this gpu page can't be reserved due to some reasons.
1878  *
1879  * Examples:
1880  *
1881  * .. code-block:: bash
1882  *
1883  *	0x00000001 : 0x00001000 : R
1884  *	0x00000002 : 0x00001000 : P
1885  *
1886  */
1887 
1888 static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f,
1889 		struct kobject *kobj, const struct bin_attribute *attr,
1890 		char *buf, loff_t ppos, size_t count)
1891 {
1892 	struct amdgpu_ras *con =
1893 		container_of(attr, struct amdgpu_ras, badpages_attr);
1894 	struct amdgpu_device *adev = con->adev;
1895 	const unsigned int element_size =
1896 		sizeof("0xabcdabcd : 0x12345678 : R\n") - 1;
1897 	unsigned int start = div64_ul(ppos + element_size - 1, element_size);
1898 	unsigned int end = div64_ul(ppos + count - 1, element_size);
1899 	ssize_t s = 0;
1900 	struct ras_badpage *bps = NULL;
1901 	int bps_count = 0, i, status;
1902 	uint64_t address;
1903 
1904 	memset(buf, 0, count);
1905 
1906 	bps_count = end - start;
1907 	bps = kmalloc_array(bps_count, sizeof(*bps), GFP_KERNEL);
1908 	if (!bps)
1909 		return 0;
1910 
1911 	memset(bps, 0, sizeof(*bps) * bps_count);
1912 
1913 	if (amdgpu_uniras_enabled(adev))
1914 		bps_count = amdgpu_uniras_badpages_read(adev, bps, bps_count, start);
1915 	else
1916 		bps_count = amdgpu_ras_badpages_read(adev, bps, bps_count, start);
1917 
1918 	if (bps_count <= 0) {
1919 		kfree(bps);
1920 		return 0;
1921 	}
1922 
1923 	for (i = 0; i < bps_count; i++) {
1924 		address = ((uint64_t)bps[i].bp) << AMDGPU_GPU_PAGE_SHIFT;
1925 		if (amdgpu_ras_check_critical_address(adev, address))
1926 			continue;
1927 
1928 		bps[i].size = AMDGPU_GPU_PAGE_SIZE;
1929 
1930 		status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr,
1931 					address);
1932 		if (status == -EBUSY)
1933 			bps[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
1934 		else if (status == -ENOENT)
1935 			bps[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
1936 		else
1937 			bps[i].flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED;
1938 
1939 		s += scnprintf(&buf[s], element_size + 1,
1940 				"0x%08x : 0x%08x : %1s\n",
1941 				bps[i].bp,
1942 				bps[i].size,
1943 				amdgpu_ras_badpage_flags_str(bps[i].flags));
1944 	}
1945 
1946 	kfree(bps);
1947 
1948 	return s;
1949 }
1950 
1951 static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev,
1952 		struct device_attribute *attr, char *buf)
1953 {
1954 	struct amdgpu_ras *con =
1955 		container_of(attr, struct amdgpu_ras, features_attr);
1956 
1957 	return sysfs_emit(buf, "feature mask: 0x%x\n", con->features);
1958 }
1959 
1960 static bool amdgpu_ras_get_version_info(struct amdgpu_device *adev, u32 *major,
1961 			u32 *minor, u32 *rev)
1962 {
1963 	int i;
1964 
1965 	if (!adev || !major || !minor || !rev || !amdgpu_uniras_enabled(adev))
1966 		return false;
1967 
1968 	for (i = 0; i < adev->num_ip_blocks; i++) {
1969 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_RAS) {
1970 			*major = adev->ip_blocks[i].version->major;
1971 			*minor = adev->ip_blocks[i].version->minor;
1972 			*rev = adev->ip_blocks[i].version->rev;
1973 			return true;
1974 		}
1975 	}
1976 
1977 	return false;
1978 }
1979 
1980 static ssize_t amdgpu_ras_sysfs_version_show(struct device *dev,
1981 		struct device_attribute *attr, char *buf)
1982 {
1983 	struct amdgpu_ras *con =
1984 		container_of(attr, struct amdgpu_ras, version_attr);
1985 	u32 major, minor, rev;
1986 	ssize_t size = 0;
1987 
1988 	size += sysfs_emit_at(buf, size, "table version: 0x%x\n",
1989 			con->eeprom_control.tbl_hdr.version);
1990 
1991 	if (amdgpu_ras_get_version_info(con->adev, &major, &minor, &rev))
1992 		size += sysfs_emit_at(buf, size, "ras version: %u.%u.%u\n",
1993 			major, minor, rev);
1994 
1995 	return size;
1996 }
1997 
1998 static ssize_t amdgpu_ras_sysfs_schema_show(struct device *dev,
1999 		struct device_attribute *attr, char *buf)
2000 {
2001 	struct amdgpu_ras *con =
2002 		container_of(attr, struct amdgpu_ras, schema_attr);
2003 	return sysfs_emit(buf, "schema: 0x%x\n", con->schema);
2004 }
2005 
2006 static struct {
2007 	enum ras_event_type type;
2008 	const char *name;
2009 } dump_event[] = {
2010 	{RAS_EVENT_TYPE_FATAL, "Fatal Error"},
2011 	{RAS_EVENT_TYPE_POISON_CREATION, "Poison Creation"},
2012 	{RAS_EVENT_TYPE_POISON_CONSUMPTION, "Poison Consumption"},
2013 };
2014 
2015 static ssize_t amdgpu_ras_sysfs_event_state_show(struct device *dev,
2016 						 struct device_attribute *attr, char *buf)
2017 {
2018 	struct amdgpu_ras *con =
2019 		container_of(attr, struct amdgpu_ras, event_state_attr);
2020 	struct ras_event_manager *event_mgr = con->event_mgr;
2021 	struct ras_event_state *event_state;
2022 	int i, size = 0;
2023 
2024 	if (!event_mgr)
2025 		return -EINVAL;
2026 
2027 	size += sysfs_emit_at(buf, size, "current seqno: %llu\n", atomic64_read(&event_mgr->seqno));
2028 	for (i = 0; i < ARRAY_SIZE(dump_event); i++) {
2029 		event_state = &event_mgr->event_state[dump_event[i].type];
2030 		size += sysfs_emit_at(buf, size, "%s: count:%llu, last_seqno:%llu\n",
2031 				      dump_event[i].name,
2032 				      atomic64_read(&event_state->count),
2033 				      event_state->last_seqno);
2034 	}
2035 
2036 	return (ssize_t)size;
2037 }
2038 
2039 static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev)
2040 {
2041 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2042 
2043 	if (adev->dev->kobj.sd)
2044 		sysfs_remove_file_from_group(&adev->dev->kobj,
2045 				&con->badpages_attr.attr,
2046 				RAS_FS_NAME);
2047 }
2048 
2049 static int amdgpu_ras_sysfs_remove_dev_attr_node(struct amdgpu_device *adev)
2050 {
2051 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2052 	struct attribute *attrs[] = {
2053 		&con->features_attr.attr,
2054 		&con->version_attr.attr,
2055 		&con->schema_attr.attr,
2056 		&con->event_state_attr.attr,
2057 		NULL
2058 	};
2059 	struct attribute_group group = {
2060 		.name = RAS_FS_NAME,
2061 		.attrs = attrs,
2062 	};
2063 
2064 	if (adev->dev->kobj.sd)
2065 		sysfs_remove_group(&adev->dev->kobj, &group);
2066 
2067 	return 0;
2068 }
2069 
2070 int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
2071 		struct ras_common_if *head)
2072 {
2073 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
2074 
2075 	if (amdgpu_aca_is_enabled(adev))
2076 		return 0;
2077 
2078 	if (!obj || obj->attr_inuse)
2079 		return -EINVAL;
2080 
2081 	if (amdgpu_sriov_vf(adev) && !amdgpu_virt_ras_telemetry_block_en(adev, head->block))
2082 		return 0;
2083 
2084 	get_obj(obj);
2085 
2086 	snprintf(obj->fs_data.sysfs_name, sizeof(obj->fs_data.sysfs_name),
2087 		"%s_err_count", head->name);
2088 
2089 	obj->sysfs_attr = (struct device_attribute){
2090 		.attr = {
2091 			.name = obj->fs_data.sysfs_name,
2092 			.mode = S_IRUGO,
2093 		},
2094 			.show = amdgpu_ras_sysfs_read,
2095 	};
2096 	sysfs_attr_init(&obj->sysfs_attr.attr);
2097 
2098 	if (sysfs_add_file_to_group(&adev->dev->kobj,
2099 				&obj->sysfs_attr.attr,
2100 				RAS_FS_NAME)) {
2101 		put_obj(obj);
2102 		return -EINVAL;
2103 	}
2104 
2105 	obj->attr_inuse = 1;
2106 
2107 	return 0;
2108 }
2109 
2110 int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
2111 		struct ras_common_if *head)
2112 {
2113 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
2114 
2115 	if (amdgpu_aca_is_enabled(adev))
2116 		return 0;
2117 
2118 	if (!obj || !obj->attr_inuse)
2119 		return -EINVAL;
2120 
2121 	if (adev->dev->kobj.sd)
2122 		sysfs_remove_file_from_group(&adev->dev->kobj,
2123 				&obj->sysfs_attr.attr,
2124 				RAS_FS_NAME);
2125 	obj->attr_inuse = 0;
2126 	put_obj(obj);
2127 
2128 	return 0;
2129 }
2130 
2131 static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev)
2132 {
2133 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2134 	struct ras_manager *obj, *tmp;
2135 
2136 	list_for_each_entry_safe(obj, tmp, &con->head, node) {
2137 		amdgpu_ras_sysfs_remove(adev, &obj->head);
2138 	}
2139 
2140 	if (amdgpu_bad_page_threshold != 0)
2141 		amdgpu_ras_sysfs_remove_bad_page_node(adev);
2142 
2143 	amdgpu_ras_sysfs_remove_dev_attr_node(adev);
2144 
2145 	return 0;
2146 }
2147 /* sysfs end */
2148 
2149 /**
2150  * DOC: AMDGPU RAS Reboot Behavior for Unrecoverable Errors
2151  *
2152  * Normally when there is an uncorrectable error, the driver will reset
2153  * the GPU to recover.  However, in the event of an unrecoverable error,
2154  * the driver provides an interface to reboot the system automatically
2155  * in that event.
2156  *
2157  * The following file in debugfs provides that interface:
2158  * /sys/kernel/debug/dri/[0/1/2...]/ras/auto_reboot
2159  *
2160  * Usage:
2161  *
2162  * .. code-block:: bash
2163  *
2164  *	echo true > .../ras/auto_reboot
2165  *
2166  */
2167 /* debugfs begin */
2168 static struct dentry *amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
2169 {
2170 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2171 	struct amdgpu_ras_eeprom_control *eeprom = &con->eeprom_control;
2172 	struct drm_minor  *minor = adev_to_drm(adev)->primary;
2173 	struct dentry     *dir;
2174 
2175 	dir = debugfs_create_dir(RAS_FS_NAME, minor->debugfs_root);
2176 	debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, dir, adev,
2177 			    &amdgpu_ras_debugfs_ctrl_ops);
2178 	debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, dir, adev,
2179 			    &amdgpu_ras_debugfs_eeprom_ops);
2180 	debugfs_create_u32("bad_page_cnt_threshold", 0444, dir,
2181 			   &con->bad_page_cnt_threshold);
2182 	debugfs_create_u32("ras_num_recs", 0444, dir, &eeprom->ras_num_recs);
2183 	debugfs_create_x32("ras_hw_enabled", 0444, dir, &adev->ras_hw_enabled);
2184 	debugfs_create_x32("ras_enabled", 0444, dir, &adev->ras_enabled);
2185 	debugfs_create_file("ras_eeprom_size", S_IRUGO, dir, adev,
2186 			    &amdgpu_ras_debugfs_eeprom_size_ops);
2187 	con->de_ras_eeprom_table = debugfs_create_file("ras_eeprom_table",
2188 						       S_IRUGO, dir, adev,
2189 						       &amdgpu_ras_debugfs_eeprom_table_ops);
2190 	amdgpu_ras_debugfs_set_ret_size(&con->eeprom_control);
2191 
2192 	/*
2193 	 * After one uncorrectable error happens, usually GPU recovery will
2194 	 * be scheduled. But due to the known problem in GPU recovery failing
2195 	 * to bring GPU back, below interface provides one direct way to
2196 	 * user to reboot system automatically in such case within
2197 	 * ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine
2198 	 * will never be called.
2199 	 */
2200 	debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, dir, &con->reboot);
2201 
2202 	/*
2203 	 * User could set this not to clean up hardware's error count register
2204 	 * of RAS IPs during ras recovery.
2205 	 */
2206 	debugfs_create_bool("disable_ras_err_cnt_harvest", 0644, dir,
2207 			    &con->disable_ras_err_cnt_harvest);
2208 	return dir;
2209 }
2210 
2211 static void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
2212 				      struct ras_fs_if *head,
2213 				      struct dentry *dir)
2214 {
2215 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);
2216 
2217 	if (!obj || !dir)
2218 		return;
2219 
2220 	get_obj(obj);
2221 
2222 	memcpy(obj->fs_data.debugfs_name,
2223 			head->debugfs_name,
2224 			sizeof(obj->fs_data.debugfs_name));
2225 
2226 	debugfs_create_file(obj->fs_data.debugfs_name, S_IWUGO | S_IRUGO, dir,
2227 			    obj, &amdgpu_ras_debugfs_ops);
2228 }
2229 
2230 static bool amdgpu_ras_aca_is_supported(struct amdgpu_device *adev)
2231 {
2232 	bool ret;
2233 
2234 	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
2235 	case IP_VERSION(13, 0, 6):
2236 	case IP_VERSION(13, 0, 12):
2237 	case IP_VERSION(13, 0, 14):
2238 		ret = true;
2239 		break;
2240 	default:
2241 		ret = false;
2242 		break;
2243 	}
2244 
2245 	return ret;
2246 }
2247 
2248 void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
2249 {
2250 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2251 	struct dentry *dir;
2252 	struct ras_manager *obj;
2253 	struct ras_fs_if fs_info;
2254 
2255 	/*
2256 	 * it won't be called in resume path, no need to check
2257 	 * suspend and gpu reset status
2258 	 */
2259 	if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con)
2260 		return;
2261 
2262 	dir = amdgpu_ras_debugfs_create_ctrl_node(adev);
2263 
2264 	list_for_each_entry(obj, &con->head, node) {
2265 		if (amdgpu_ras_is_supported(adev, obj->head.block) &&
2266 			(obj->attr_inuse == 1)) {
2267 			sprintf(fs_info.debugfs_name, "%s_err_inject",
2268 					get_ras_block_str(&obj->head));
2269 			fs_info.head = obj->head;
2270 			amdgpu_ras_debugfs_create(adev, &fs_info, dir);
2271 		}
2272 	}
2273 
2274 	if (amdgpu_ras_aca_is_supported(adev)) {
2275 		if (amdgpu_aca_is_enabled(adev))
2276 			amdgpu_aca_smu_debugfs_init(adev, dir);
2277 		else
2278 			amdgpu_mca_smu_debugfs_init(adev, dir);
2279 	}
2280 }
2281 
2282 /* debugfs end */
2283 
2284 /* ras fs */
2285 static const BIN_ATTR(gpu_vram_bad_pages, S_IRUGO,
2286 		      amdgpu_ras_sysfs_badpages_read, NULL, 0);
2287 static DEVICE_ATTR(features, S_IRUGO,
2288 		amdgpu_ras_sysfs_features_read, NULL);
2289 static DEVICE_ATTR(version, 0444,
2290 		amdgpu_ras_sysfs_version_show, NULL);
2291 static DEVICE_ATTR(schema, 0444,
2292 		amdgpu_ras_sysfs_schema_show, NULL);
2293 static DEVICE_ATTR(event_state, 0444,
2294 		   amdgpu_ras_sysfs_event_state_show, NULL);
2295 static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
2296 {
2297 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2298 	struct attribute_group group = {
2299 		.name = RAS_FS_NAME,
2300 	};
2301 	struct attribute *attrs[] = {
2302 		&con->features_attr.attr,
2303 		&con->version_attr.attr,
2304 		&con->schema_attr.attr,
2305 		&con->event_state_attr.attr,
2306 		NULL
2307 	};
2308 	const struct bin_attribute *bin_attrs[] = {
2309 		NULL,
2310 		NULL,
2311 	};
2312 	int r;
2313 
2314 	group.attrs = attrs;
2315 
2316 	/* add features entry */
2317 	con->features_attr = dev_attr_features;
2318 	sysfs_attr_init(attrs[0]);
2319 
2320 	/* add version entry */
2321 	con->version_attr = dev_attr_version;
2322 	sysfs_attr_init(attrs[1]);
2323 
2324 	/* add schema entry */
2325 	con->schema_attr = dev_attr_schema;
2326 	sysfs_attr_init(attrs[2]);
2327 
2328 	/* add event_state entry */
2329 	con->event_state_attr = dev_attr_event_state;
2330 	sysfs_attr_init(attrs[3]);
2331 
2332 	if (amdgpu_bad_page_threshold != 0) {
2333 		/* add bad_page_features entry */
2334 		con->badpages_attr = bin_attr_gpu_vram_bad_pages;
2335 		sysfs_bin_attr_init(&con->badpages_attr);
2336 		bin_attrs[0] = &con->badpages_attr;
2337 		group.bin_attrs = bin_attrs;
2338 	}
2339 
2340 	r = sysfs_create_group(&adev->dev->kobj, &group);
2341 	if (r)
2342 		dev_err(adev->dev, "Failed to create RAS sysfs group!");
2343 
2344 	return 0;
2345 }
2346 
2347 static int amdgpu_ras_fs_fini(struct amdgpu_device *adev)
2348 {
2349 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2350 	struct ras_manager *con_obj, *ip_obj, *tmp;
2351 
2352 	if (IS_ENABLED(CONFIG_DEBUG_FS)) {
2353 		list_for_each_entry_safe(con_obj, tmp, &con->head, node) {
2354 			ip_obj = amdgpu_ras_find_obj(adev, &con_obj->head);
2355 			if (ip_obj)
2356 				put_obj(ip_obj);
2357 		}
2358 	}
2359 
2360 	amdgpu_ras_sysfs_remove_all(adev);
2361 	return 0;
2362 }
2363 /* ras fs end */
2364 
2365 /* ih begin */
2366 
2367 /* For the hardware that cannot enable bif ring for both ras_controller_irq
2368  * and ras_err_evnet_athub_irq ih cookies, the driver has to poll status
2369  * register to check whether the interrupt is triggered or not, and properly
2370  * ack the interrupt if it is there
2371  */
2372 void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev)
2373 {
2374 	/* Fatal error events are handled on host side */
2375 	if (amdgpu_sriov_vf(adev))
2376 		return;
2377 	/*
2378 	 * If the current interrupt is caused by a non-fatal RAS error, skip
2379 	 * check for fatal error. For fatal errors, FED status of all devices
2380 	 * in XGMI hive gets set when the first device gets fatal error
2381 	 * interrupt. The error gets propagated to other devices as well, so
2382 	 * make sure to ack the interrupt regardless of FED status.
2383 	 */
2384 	if (!amdgpu_ras_get_fed_status(adev) &&
2385 	    amdgpu_ras_is_err_state(adev, AMDGPU_RAS_BLOCK__ANY))
2386 		return;
2387 
2388 	if (amdgpu_uniras_enabled(adev)) {
2389 		amdgpu_ras_mgr_handle_fatal_interrupt(adev, NULL);
2390 		return;
2391 	}
2392 
2393 	if (adev->nbio.ras &&
2394 	    adev->nbio.ras->handle_ras_controller_intr_no_bifring)
2395 		adev->nbio.ras->handle_ras_controller_intr_no_bifring(adev);
2396 
2397 	if (adev->nbio.ras &&
2398 	    adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring)
2399 		adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring(adev);
2400 }
2401 
2402 static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *obj,
2403 				struct amdgpu_iv_entry *entry)
2404 {
2405 	bool poison_stat = false;
2406 	struct amdgpu_device *adev = obj->adev;
2407 	struct amdgpu_ras_block_object *block_obj =
2408 		amdgpu_ras_get_ras_block(adev, obj->head.block, 0);
2409 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2410 	enum ras_event_type type = RAS_EVENT_TYPE_POISON_CONSUMPTION;
2411 	u64 event_id;
2412 	int ret;
2413 
2414 	if (!block_obj || !con)
2415 		return;
2416 
2417 	ret = amdgpu_ras_mark_ras_event(adev, type);
2418 	if (ret)
2419 		return;
2420 
2421 	amdgpu_ras_set_err_poison(adev, block_obj->ras_comm.block);
2422 	/* both query_poison_status and handle_poison_consumption are optional,
2423 	 * but at least one of them should be implemented if we need poison
2424 	 * consumption handler
2425 	 */
2426 	if (block_obj->hw_ops && block_obj->hw_ops->query_poison_status) {
2427 		poison_stat = block_obj->hw_ops->query_poison_status(adev);
2428 		if (!poison_stat) {
2429 			/* Not poison consumption interrupt, no need to handle it */
2430 			dev_info(adev->dev, "No RAS poison status in %s poison IH.\n",
2431 					block_obj->ras_comm.name);
2432 
2433 			return;
2434 		}
2435 	}
2436 
2437 	amdgpu_umc_poison_handler(adev, obj->head.block, 0);
2438 
2439 	if (block_obj->hw_ops && block_obj->hw_ops->handle_poison_consumption)
2440 		poison_stat = block_obj->hw_ops->handle_poison_consumption(adev);
2441 
2442 	/* gpu reset is fallback for failed and default cases.
2443 	 * For RMA case, amdgpu_umc_poison_handler will handle gpu reset.
2444 	 */
2445 	if (poison_stat && !amdgpu_ras_is_rma(adev)) {
2446 		event_id = amdgpu_ras_acquire_event_id(adev, type);
2447 		RAS_EVENT_LOG(adev, event_id,
2448 			      "GPU reset for %s RAS poison consumption is issued!\n",
2449 			      block_obj->ras_comm.name);
2450 		amdgpu_ras_reset_gpu(adev);
2451 	}
2452 
2453 	if (!poison_stat)
2454 		amdgpu_gfx_poison_consumption_handler(adev, entry);
2455 }
2456 
2457 static void amdgpu_ras_interrupt_poison_creation_handler(struct ras_manager *obj,
2458 				struct amdgpu_iv_entry *entry)
2459 {
2460 	struct amdgpu_device *adev = obj->adev;
2461 	enum ras_event_type type = RAS_EVENT_TYPE_POISON_CREATION;
2462 	u64 event_id;
2463 	int ret;
2464 
2465 	ret = amdgpu_ras_mark_ras_event(adev, type);
2466 	if (ret)
2467 		return;
2468 
2469 	event_id = amdgpu_ras_acquire_event_id(adev, type);
2470 	RAS_EVENT_LOG(adev, event_id, "Poison is created\n");
2471 
2472 	if (amdgpu_ip_version(obj->adev, UMC_HWIP, 0) >= IP_VERSION(12, 0, 0)) {
2473 		struct amdgpu_ras *con = amdgpu_ras_get_context(obj->adev);
2474 
2475 		atomic_inc(&con->page_retirement_req_cnt);
2476 		atomic_inc(&con->poison_creation_count);
2477 
2478 		wake_up(&con->page_retirement_wq);
2479 	}
2480 }
2481 
2482 static void amdgpu_ras_interrupt_umc_handler(struct ras_manager *obj,
2483 				struct amdgpu_iv_entry *entry)
2484 {
2485 	struct ras_ih_data *data = &obj->ih_data;
2486 	struct ras_err_data err_data;
2487 	int ret;
2488 
2489 	if (!data->cb)
2490 		return;
2491 
2492 	ret = amdgpu_ras_error_data_init(&err_data);
2493 	if (ret)
2494 		return;
2495 
2496 	/* Let IP handle its data, maybe we need get the output
2497 	 * from the callback to update the error type/count, etc
2498 	 */
2499 	amdgpu_ras_set_fed(obj->adev, true);
2500 	ret = data->cb(obj->adev, &err_data, entry);
2501 	/* ue will trigger an interrupt, and in that case
2502 	 * we need do a reset to recovery the whole system.
2503 	 * But leave IP do that recovery, here we just dispatch
2504 	 * the error.
2505 	 */
2506 	if (ret == AMDGPU_RAS_SUCCESS) {
2507 		/* these counts could be left as 0 if
2508 		 * some blocks do not count error number
2509 		 */
2510 		obj->err_data.ue_count += err_data.ue_count;
2511 		obj->err_data.ce_count += err_data.ce_count;
2512 		obj->err_data.de_count += err_data.de_count;
2513 	}
2514 
2515 	amdgpu_ras_error_data_fini(&err_data);
2516 }
2517 
2518 static void amdgpu_ras_interrupt_handler(struct ras_manager *obj)
2519 {
2520 	struct ras_ih_data *data = &obj->ih_data;
2521 	struct amdgpu_iv_entry entry;
2522 
2523 	while (data->rptr != data->wptr) {
2524 		rmb();
2525 		memcpy(&entry, &data->ring[data->rptr],
2526 				data->element_size);
2527 
2528 		wmb();
2529 		data->rptr = (data->aligned_element_size +
2530 				data->rptr) % data->ring_size;
2531 
2532 		if (amdgpu_ras_is_poison_mode_supported(obj->adev)) {
2533 			if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
2534 				amdgpu_ras_interrupt_poison_creation_handler(obj, &entry);
2535 			else
2536 				amdgpu_ras_interrupt_poison_consumption_handler(obj, &entry);
2537 		} else {
2538 			if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
2539 				amdgpu_ras_interrupt_umc_handler(obj, &entry);
2540 			else
2541 				dev_warn(obj->adev->dev,
2542 					"No RAS interrupt handler for non-UMC block with poison disabled.\n");
2543 		}
2544 	}
2545 }
2546 
2547 static void amdgpu_ras_interrupt_process_handler(struct work_struct *work)
2548 {
2549 	struct ras_ih_data *data =
2550 		container_of(work, struct ras_ih_data, ih_work);
2551 	struct ras_manager *obj =
2552 		container_of(data, struct ras_manager, ih_data);
2553 
2554 	amdgpu_ras_interrupt_handler(obj);
2555 }
2556 
2557 int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
2558 		struct ras_dispatch_if *info)
2559 {
2560 	struct ras_manager *obj;
2561 	struct ras_ih_data *data;
2562 
2563 	if (amdgpu_uniras_enabled(adev)) {
2564 		struct ras_ih_info ih_info;
2565 
2566 		memset(&ih_info, 0, sizeof(ih_info));
2567 		ih_info.block = info->head.block;
2568 		memcpy(&ih_info.iv_entry, info->entry, sizeof(struct amdgpu_iv_entry));
2569 
2570 		return amdgpu_ras_mgr_handle_controller_interrupt(adev, &ih_info);
2571 	}
2572 
2573 	obj = amdgpu_ras_find_obj(adev, &info->head);
2574 	if (!obj)
2575 		return -EINVAL;
2576 
2577 	data = &obj->ih_data;
2578 
2579 	if (data->inuse == 0)
2580 		return 0;
2581 
2582 	/* Might be overflow... */
2583 	memcpy(&data->ring[data->wptr], info->entry,
2584 			data->element_size);
2585 
2586 	wmb();
2587 	data->wptr = (data->aligned_element_size +
2588 			data->wptr) % data->ring_size;
2589 
2590 	schedule_work(&data->ih_work);
2591 
2592 	return 0;
2593 }
2594 
2595 int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev,
2596 		struct ras_common_if *head)
2597 {
2598 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
2599 	struct ras_ih_data *data;
2600 
2601 	if (!obj)
2602 		return -EINVAL;
2603 
2604 	data = &obj->ih_data;
2605 	if (data->inuse == 0)
2606 		return 0;
2607 
2608 	cancel_work_sync(&data->ih_work);
2609 
2610 	kfree(data->ring);
2611 	memset(data, 0, sizeof(*data));
2612 	put_obj(obj);
2613 
2614 	return 0;
2615 }
2616 
2617 int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev,
2618 		struct ras_common_if *head)
2619 {
2620 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
2621 	struct ras_ih_data *data;
2622 	struct amdgpu_ras_block_object *ras_obj;
2623 
2624 	if (!obj) {
2625 		/* in case we registe the IH before enable ras feature */
2626 		obj = amdgpu_ras_create_obj(adev, head);
2627 		if (!obj)
2628 			return -EINVAL;
2629 	} else
2630 		get_obj(obj);
2631 
2632 	ras_obj = container_of(head, struct amdgpu_ras_block_object, ras_comm);
2633 
2634 	data = &obj->ih_data;
2635 	/* add the callback.etc */
2636 	*data = (struct ras_ih_data) {
2637 		.inuse = 0,
2638 		.cb = ras_obj->ras_cb,
2639 		.element_size = sizeof(struct amdgpu_iv_entry),
2640 		.rptr = 0,
2641 		.wptr = 0,
2642 	};
2643 
2644 	INIT_WORK(&data->ih_work, amdgpu_ras_interrupt_process_handler);
2645 
2646 	data->aligned_element_size = ALIGN(data->element_size, 8);
2647 	/* the ring can store 64 iv entries. */
2648 	data->ring_size = 64 * data->aligned_element_size;
2649 	data->ring = kmalloc(data->ring_size, GFP_KERNEL);
2650 	if (!data->ring) {
2651 		put_obj(obj);
2652 		return -ENOMEM;
2653 	}
2654 
2655 	/* IH is ready */
2656 	data->inuse = 1;
2657 
2658 	return 0;
2659 }
2660 
2661 static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev)
2662 {
2663 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2664 	struct ras_manager *obj, *tmp;
2665 
2666 	list_for_each_entry_safe(obj, tmp, &con->head, node) {
2667 		amdgpu_ras_interrupt_remove_handler(adev, &obj->head);
2668 	}
2669 
2670 	return 0;
2671 }
2672 /* ih end */
2673 
2674 /* traversal all IPs except NBIO to query error counter */
2675 static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev, enum ras_event_type type)
2676 {
2677 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2678 	struct ras_manager *obj;
2679 
2680 	if (!adev->ras_enabled || !con)
2681 		return;
2682 
2683 	list_for_each_entry(obj, &con->head, node) {
2684 		struct ras_query_if info = {
2685 			.head = obj->head,
2686 		};
2687 
2688 		/*
2689 		 * PCIE_BIF IP has one different isr by ras controller
2690 		 * interrupt, the specific ras counter query will be
2691 		 * done in that isr. So skip such block from common
2692 		 * sync flood interrupt isr calling.
2693 		 */
2694 		if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF)
2695 			continue;
2696 
2697 		/*
2698 		 * this is a workaround for aldebaran, skip send msg to
2699 		 * smu to get ecc_info table due to smu handle get ecc
2700 		 * info table failed temporarily.
2701 		 * should be removed until smu fix handle ecc_info table.
2702 		 */
2703 		if ((info.head.block == AMDGPU_RAS_BLOCK__UMC) &&
2704 		    (amdgpu_ip_version(adev, MP1_HWIP, 0) ==
2705 		     IP_VERSION(13, 0, 2)))
2706 			continue;
2707 
2708 		amdgpu_ras_query_error_status_with_event(adev, &info, type);
2709 
2710 		if (amdgpu_ip_version(adev, MP0_HWIP, 0) !=
2711 			    IP_VERSION(11, 0, 2) &&
2712 		    amdgpu_ip_version(adev, MP0_HWIP, 0) !=
2713 			    IP_VERSION(11, 0, 4) &&
2714 		    amdgpu_ip_version(adev, MP0_HWIP, 0) !=
2715 			    IP_VERSION(13, 0, 0)) {
2716 			if (amdgpu_ras_reset_error_status(adev, info.head.block))
2717 				dev_warn(adev->dev, "Failed to reset error counter and error status");
2718 		}
2719 	}
2720 }
2721 
2722 /* Parse RdRspStatus and WrRspStatus */
2723 static void amdgpu_ras_error_status_query(struct amdgpu_device *adev,
2724 					  struct ras_query_if *info)
2725 {
2726 	struct amdgpu_ras_block_object *block_obj;
2727 	/*
2728 	 * Only two block need to query read/write
2729 	 * RspStatus at current state
2730 	 */
2731 	if ((info->head.block != AMDGPU_RAS_BLOCK__GFX) &&
2732 		(info->head.block != AMDGPU_RAS_BLOCK__MMHUB))
2733 		return;
2734 
2735 	block_obj = amdgpu_ras_get_ras_block(adev,
2736 					info->head.block,
2737 					info->head.sub_block_index);
2738 
2739 	if (!block_obj || !block_obj->hw_ops) {
2740 		dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
2741 			     get_ras_block_str(&info->head));
2742 		return;
2743 	}
2744 
2745 	if (block_obj->hw_ops->query_ras_error_status)
2746 		block_obj->hw_ops->query_ras_error_status(adev);
2747 
2748 }
2749 
2750 static void amdgpu_ras_query_err_status(struct amdgpu_device *adev)
2751 {
2752 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2753 	struct ras_manager *obj;
2754 
2755 	if (!adev->ras_enabled || !con)
2756 		return;
2757 
2758 	list_for_each_entry(obj, &con->head, node) {
2759 		struct ras_query_if info = {
2760 			.head = obj->head,
2761 		};
2762 
2763 		amdgpu_ras_error_status_query(adev, &info);
2764 	}
2765 }
2766 
2767 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
2768 		struct ras_badpage *bps, uint32_t count, uint32_t start)
2769 {
2770 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2771 	struct ras_err_handler_data *data;
2772 	int r = 0;
2773 	uint32_t i;
2774 
2775 	if (!con || !con->eh_data || !bps || !count)
2776 		return -EINVAL;
2777 
2778 	mutex_lock(&con->recovery_lock);
2779 	data = con->eh_data;
2780 	if (start < data->count) {
2781 		for (i = start; i < data->count; i++) {
2782 			if (!data->bps[i].ts)
2783 				continue;
2784 
2785 			bps[r].bp = data->bps[i].retired_page;
2786 			r++;
2787 			if (r >= count)
2788 				break;
2789 		}
2790 	}
2791 	mutex_unlock(&con->recovery_lock);
2792 
2793 	return r;
2794 }
2795 
2796 static int amdgpu_uniras_badpages_read(struct amdgpu_device *adev,
2797 		struct ras_badpage *bps, uint32_t count, uint32_t start)
2798 {
2799 	struct ras_cmd_bad_pages_info_req cmd_input;
2800 	struct ras_cmd_bad_pages_info_rsp *output;
2801 	uint32_t group, start_group, end_group;
2802 	uint32_t pos, pos_in_group;
2803 	int r = 0, i;
2804 
2805 	if (!bps || !count)
2806 		return -EINVAL;
2807 
2808 	output = kmalloc(sizeof(*output), GFP_KERNEL);
2809 	if (!output)
2810 		return -ENOMEM;
2811 
2812 	memset(&cmd_input, 0, sizeof(cmd_input));
2813 
2814 	start_group = start / RAS_CMD_MAX_BAD_PAGES_PER_GROUP;
2815 	end_group = (start + count + RAS_CMD_MAX_BAD_PAGES_PER_GROUP - 1) /
2816 				RAS_CMD_MAX_BAD_PAGES_PER_GROUP;
2817 
2818 	pos = start;
2819 	for (group = start_group; group < end_group; group++) {
2820 		memset(output, 0, sizeof(*output));
2821 		cmd_input.group_index = group;
2822 		if (amdgpu_ras_mgr_handle_ras_cmd(adev, RAS_CMD__GET_BAD_PAGES,
2823 			&cmd_input, sizeof(cmd_input), output, sizeof(*output)))
2824 			goto out;
2825 
2826 		if (pos >= output->bp_total_cnt)
2827 			goto out;
2828 
2829 		pos_in_group = pos - group * RAS_CMD_MAX_BAD_PAGES_PER_GROUP;
2830 		for (i = pos_in_group; i < output->bp_in_group; i++, pos++) {
2831 			if (!output->records[i].ts)
2832 				continue;
2833 
2834 			bps[r].bp = output->records[i].retired_page;
2835 			r++;
2836 			if (r >= count)
2837 				goto out;
2838 		}
2839 	}
2840 
2841 out:
2842 	kfree(output);
2843 	return r;
2844 }
2845 
2846 static void amdgpu_ras_set_fed_all(struct amdgpu_device *adev,
2847 				   struct amdgpu_hive_info *hive, bool status)
2848 {
2849 	struct amdgpu_device *tmp_adev;
2850 
2851 	if (hive) {
2852 		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
2853 			amdgpu_ras_set_fed(tmp_adev, status);
2854 	} else {
2855 		amdgpu_ras_set_fed(adev, status);
2856 	}
2857 }
2858 
2859 bool amdgpu_ras_in_recovery(struct amdgpu_device *adev)
2860 {
2861 	struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2862 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
2863 	int hive_ras_recovery = 0;
2864 
2865 	if (hive) {
2866 		hive_ras_recovery = atomic_read(&hive->ras_recovery);
2867 		amdgpu_put_xgmi_hive(hive);
2868 	}
2869 
2870 	if (ras && (atomic_read(&ras->in_recovery) || hive_ras_recovery))
2871 		return true;
2872 
2873 	return false;
2874 }
2875 
2876 static enum ras_event_type amdgpu_ras_get_fatal_error_event(struct amdgpu_device *adev)
2877 {
2878 	if (amdgpu_ras_intr_triggered())
2879 		return RAS_EVENT_TYPE_FATAL;
2880 	else
2881 		return RAS_EVENT_TYPE_POISON_CONSUMPTION;
2882 }
2883 
2884 static void amdgpu_ras_do_recovery(struct work_struct *work)
2885 {
2886 	struct amdgpu_ras *ras =
2887 		container_of(work, struct amdgpu_ras, recovery_work);
2888 	struct amdgpu_device *remote_adev = NULL;
2889 	struct amdgpu_device *adev = ras->adev;
2890 	struct list_head device_list, *device_list_handle =  NULL;
2891 	struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2892 	unsigned int error_query_mode;
2893 	enum ras_event_type type;
2894 
2895 	if (hive) {
2896 		atomic_set(&hive->ras_recovery, 1);
2897 
2898 		/* If any device which is part of the hive received RAS fatal
2899 		 * error interrupt, set fatal error status on all. This
2900 		 * condition will need a recovery, and flag will be cleared
2901 		 * as part of recovery.
2902 		 */
2903 		list_for_each_entry(remote_adev, &hive->device_list,
2904 				    gmc.xgmi.head)
2905 			if (amdgpu_ras_get_fed_status(remote_adev)) {
2906 				amdgpu_ras_set_fed_all(adev, hive, true);
2907 				break;
2908 			}
2909 	}
2910 	if (!ras->disable_ras_err_cnt_harvest) {
2911 
2912 		/* Build list of devices to query RAS related errors */
2913 		if  (hive && adev->gmc.xgmi.num_physical_nodes > 1) {
2914 			device_list_handle = &hive->device_list;
2915 		} else {
2916 			INIT_LIST_HEAD(&device_list);
2917 			list_add_tail(&adev->gmc.xgmi.head, &device_list);
2918 			device_list_handle = &device_list;
2919 		}
2920 
2921 		if (amdgpu_ras_get_error_query_mode(adev, &error_query_mode)) {
2922 			if (error_query_mode == AMDGPU_RAS_FIRMWARE_ERROR_QUERY) {
2923 				/* wait 500ms to ensure pmfw polling mca bank info done */
2924 				msleep(500);
2925 			}
2926 		}
2927 
2928 		type = amdgpu_ras_get_fatal_error_event(adev);
2929 		list_for_each_entry(remote_adev,
2930 				device_list_handle, gmc.xgmi.head) {
2931 			if (amdgpu_uniras_enabled(remote_adev)) {
2932 				amdgpu_ras_mgr_update_ras_ecc(remote_adev);
2933 			} else {
2934 				amdgpu_ras_query_err_status(remote_adev);
2935 				amdgpu_ras_log_on_err_counter(remote_adev, type);
2936 			}
2937 		}
2938 
2939 	}
2940 
2941 	if (amdgpu_device_should_recover_gpu(ras->adev)) {
2942 		struct amdgpu_reset_context reset_context;
2943 		memset(&reset_context, 0, sizeof(reset_context));
2944 
2945 		reset_context.method = AMD_RESET_METHOD_NONE;
2946 		reset_context.reset_req_dev = adev;
2947 		reset_context.src = AMDGPU_RESET_SRC_RAS;
2948 		set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);
2949 
2950 		/* Perform full reset in fatal error mode */
2951 		if (!amdgpu_ras_is_poison_mode_supported(ras->adev))
2952 			set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2953 		else {
2954 			clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2955 
2956 			if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE2_RESET) {
2957 				ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE2_RESET;
2958 				reset_context.method = AMD_RESET_METHOD_MODE2;
2959 			}
2960 
2961 			/* Fatal error occurs in poison mode, mode1 reset is used to
2962 			 * recover gpu.
2963 			 */
2964 			if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET) {
2965 				ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE1_RESET;
2966 				set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2967 
2968 				psp_fatal_error_recovery_quirk(&adev->psp);
2969 			}
2970 		}
2971 
2972 		amdgpu_device_gpu_recover(ras->adev, NULL, &reset_context);
2973 	}
2974 	atomic_set(&ras->in_recovery, 0);
2975 	if (hive) {
2976 		atomic_set(&hive->ras_recovery, 0);
2977 		amdgpu_put_xgmi_hive(hive);
2978 	}
2979 }
2980 
2981 /* alloc/realloc bps array */
2982 static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
2983 		struct ras_err_handler_data *data, int pages)
2984 {
2985 	unsigned int old_space = data->count + data->space_left;
2986 	unsigned int new_space = old_space + pages;
2987 	unsigned int align_space = ALIGN(new_space, 512);
2988 	void *bps = kmalloc_array(align_space, sizeof(*data->bps), GFP_KERNEL);
2989 
2990 	if (!bps) {
2991 		return -ENOMEM;
2992 	}
2993 
2994 	if (data->bps) {
2995 		memcpy(bps, data->bps,
2996 				data->count * sizeof(*data->bps));
2997 		kfree(data->bps);
2998 	}
2999 
3000 	data->bps = bps;
3001 	data->space_left += align_space - old_space;
3002 	return 0;
3003 }
3004 
3005 static int amdgpu_ras_mca2pa_by_idx(struct amdgpu_device *adev,
3006 			struct eeprom_table_record *bps,
3007 			struct ras_err_data *err_data)
3008 {
3009 	struct ta_ras_query_address_input addr_in;
3010 	uint32_t socket = 0;
3011 	int ret = 0;
3012 
3013 	if (adev->smuio.funcs && adev->smuio.funcs->get_socket_id)
3014 		socket = adev->smuio.funcs->get_socket_id(adev);
3015 
3016 	/* reinit err_data */
3017 	err_data->err_addr_cnt = 0;
3018 	err_data->err_addr_len = adev->umc.retire_unit;
3019 
3020 	memset(&addr_in, 0, sizeof(addr_in));
3021 	addr_in.ma.err_addr = bps->address;
3022 	addr_in.ma.socket_id = socket;
3023 	addr_in.ma.ch_inst = bps->mem_channel;
3024 	if (!amdgpu_ras_smu_eeprom_supported(adev)) {
3025 		/* tell RAS TA the node instance is not used */
3026 		addr_in.ma.node_inst = TA_RAS_INV_NODE;
3027 	} else {
3028 		addr_in.ma.umc_inst = bps->mcumc_id;
3029 		addr_in.ma.node_inst = bps->cu;
3030 	}
3031 
3032 	if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr)
3033 		ret = adev->umc.ras->convert_ras_err_addr(adev, err_data,
3034 				&addr_in, NULL, false);
3035 
3036 	return ret;
3037 }
3038 
3039 static int amdgpu_ras_mca2pa(struct amdgpu_device *adev,
3040 			struct eeprom_table_record *bps,
3041 			struct ras_err_data *err_data)
3042 {
3043 	struct ta_ras_query_address_input addr_in;
3044 	uint32_t die_id, socket = 0;
3045 
3046 	if (adev->smuio.funcs && adev->smuio.funcs->get_socket_id)
3047 		socket = adev->smuio.funcs->get_socket_id(adev);
3048 
3049 	/* although die id is gotten from PA in nps1 mode, the id is
3050 	 * fitable for any nps mode
3051 	 */
3052 	if (adev->umc.ras && adev->umc.ras->get_die_id_from_pa)
3053 		die_id = adev->umc.ras->get_die_id_from_pa(adev, bps->address,
3054 					bps->retired_page << AMDGPU_GPU_PAGE_SHIFT);
3055 	else
3056 		return -EINVAL;
3057 
3058 	/* reinit err_data */
3059 	err_data->err_addr_cnt = 0;
3060 	err_data->err_addr_len = adev->umc.retire_unit;
3061 
3062 	memset(&addr_in, 0, sizeof(addr_in));
3063 	addr_in.ma.err_addr = bps->address;
3064 	addr_in.ma.ch_inst = bps->mem_channel;
3065 	addr_in.ma.umc_inst = bps->mcumc_id;
3066 	addr_in.ma.node_inst = die_id;
3067 	addr_in.ma.socket_id = socket;
3068 
3069 	if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr)
3070 		return adev->umc.ras->convert_ras_err_addr(adev, err_data,
3071 					&addr_in, NULL, false);
3072 	else
3073 		return  -EINVAL;
3074 }
3075 
3076 static int __amdgpu_ras_restore_bad_pages(struct amdgpu_device *adev,
3077 					struct eeprom_table_record *bps, int count)
3078 {
3079 	int j;
3080 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3081 	struct ras_err_handler_data *data = con->eh_data;
3082 
3083 	for (j = 0; j < count; j++) {
3084 		if (!data->space_left &&
3085 		    amdgpu_ras_realloc_eh_data_space(adev, data, 256)) {
3086 			return -ENOMEM;
3087 		}
3088 
3089 		if (amdgpu_ras_check_bad_page_unlock(con,
3090 			bps[j].retired_page << AMDGPU_GPU_PAGE_SHIFT)) {
3091 			data->count++;
3092 			data->space_left--;
3093 			continue;
3094 		}
3095 
3096 		amdgpu_ras_reserve_page(adev, bps[j].retired_page);
3097 
3098 		memcpy(&data->bps[data->count], &(bps[j]),
3099 				sizeof(struct eeprom_table_record));
3100 		data->count++;
3101 		data->space_left--;
3102 		con->bad_page_num++;
3103 	}
3104 
3105 	return 0;
3106 }
3107 
3108 static int __amdgpu_ras_convert_rec_array_from_rom(struct amdgpu_device *adev,
3109 				struct eeprom_table_record *bps, struct ras_err_data *err_data,
3110 				enum amdgpu_memory_partition nps)
3111 {
3112 	int i = 0;
3113 	enum amdgpu_memory_partition save_nps;
3114 
3115 	save_nps = (bps[0].retired_page >> UMC_NPS_SHIFT) & UMC_NPS_MASK;
3116 
3117 	/*old asics just have pa in eeprom*/
3118 	if (IP_VERSION_MAJ(amdgpu_ip_version(adev, UMC_HWIP, 0)) < 12) {
3119 		memcpy(err_data->err_addr, bps,
3120 			sizeof(struct eeprom_table_record) * adev->umc.retire_unit);
3121 		goto out;
3122 	}
3123 
3124 	for (i = 0; i < adev->umc.retire_unit; i++)
3125 		bps[i].retired_page &= ~(UMC_NPS_MASK << UMC_NPS_SHIFT);
3126 
3127 	if (save_nps) {
3128 		if (save_nps == nps) {
3129 			if (amdgpu_umc_pages_in_a_row(adev, err_data,
3130 					bps[0].retired_page << AMDGPU_GPU_PAGE_SHIFT))
3131 				return -EINVAL;
3132 			for (i = 0; i < adev->umc.retire_unit; i++) {
3133 				err_data->err_addr[i].address = bps[0].address;
3134 				err_data->err_addr[i].mem_channel = bps[0].mem_channel;
3135 				err_data->err_addr[i].bank = bps[0].bank;
3136 				err_data->err_addr[i].err_type = bps[0].err_type;
3137 				err_data->err_addr[i].mcumc_id = bps[0].mcumc_id;
3138 			}
3139 		} else {
3140 			if (amdgpu_ras_mca2pa_by_idx(adev, &bps[0], err_data))
3141 				return -EINVAL;
3142 		}
3143 	} else {
3144 		if (bps[0].address == 0) {
3145 			/* for specific old eeprom data, mca address is not stored,
3146 			 * calc it from pa
3147 			 */
3148 			if (amdgpu_umc_pa2mca(adev, bps[0].retired_page << AMDGPU_GPU_PAGE_SHIFT,
3149 				&(bps[0].address), AMDGPU_NPS1_PARTITION_MODE))
3150 				return -EINVAL;
3151 		}
3152 
3153 		if (amdgpu_ras_mca2pa(adev, &bps[0], err_data)) {
3154 			if (nps == AMDGPU_NPS1_PARTITION_MODE)
3155 				memcpy(err_data->err_addr, bps,
3156 					sizeof(struct eeprom_table_record) * adev->umc.retire_unit);
3157 			else
3158 				return -EOPNOTSUPP;
3159 		}
3160 	}
3161 
3162 out:
3163 	return __amdgpu_ras_restore_bad_pages(adev, err_data->err_addr, adev->umc.retire_unit);
3164 }
3165 
3166 static int __amdgpu_ras_convert_rec_from_rom(struct amdgpu_device *adev,
3167 				struct eeprom_table_record *bps, struct ras_err_data *err_data,
3168 				enum amdgpu_memory_partition nps)
3169 {
3170 	int i = 0;
3171 	enum amdgpu_memory_partition save_nps;
3172 
3173 	if (!amdgpu_ras_smu_eeprom_supported(adev)) {
3174 		save_nps = (bps->retired_page >> UMC_NPS_SHIFT) & UMC_NPS_MASK;
3175 		bps->retired_page &= ~(UMC_NPS_MASK << UMC_NPS_SHIFT);
3176 	} else {
3177 		/* if pmfw manages eeprom, save_nps is not stored on eeprom,
3178 		 * we should always convert mca address into physical address,
3179 		 * make save_nps different from nps
3180 		 */
3181 		save_nps = nps + 1;
3182 	}
3183 
3184 	if (save_nps == nps) {
3185 		if (amdgpu_umc_pages_in_a_row(adev, err_data,
3186 				bps->retired_page << AMDGPU_GPU_PAGE_SHIFT))
3187 			return -EINVAL;
3188 		for (i = 0; i < adev->umc.retire_unit; i++) {
3189 			err_data->err_addr[i].address = bps->address;
3190 			err_data->err_addr[i].mem_channel = bps->mem_channel;
3191 			err_data->err_addr[i].bank = bps->bank;
3192 			err_data->err_addr[i].err_type = bps->err_type;
3193 			err_data->err_addr[i].mcumc_id = bps->mcumc_id;
3194 		}
3195 	} else {
3196 		if (bps->address) {
3197 			if (amdgpu_ras_mca2pa_by_idx(adev, bps, err_data))
3198 				return -EINVAL;
3199 		} else {
3200 			/* for specific old eeprom data, mca address is not stored,
3201 			 * calc it from pa
3202 			 */
3203 			if (amdgpu_umc_pa2mca(adev, bps->retired_page << AMDGPU_GPU_PAGE_SHIFT,
3204 				&(bps->address), AMDGPU_NPS1_PARTITION_MODE))
3205 				return -EINVAL;
3206 
3207 			if (amdgpu_ras_mca2pa(adev, bps, err_data))
3208 				return -EOPNOTSUPP;
3209 		}
3210 	}
3211 
3212 	return __amdgpu_ras_restore_bad_pages(adev, err_data->err_addr,
3213 									adev->umc.retire_unit);
3214 }
3215 
3216 /* it deal with vram only. */
3217 int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
3218 		struct eeprom_table_record *bps, int pages, bool from_rom)
3219 {
3220 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3221 	struct ras_err_data err_data;
3222 	struct amdgpu_ras_eeprom_control *control =
3223 			&adev->psp.ras_context.ras->eeprom_control;
3224 	enum amdgpu_memory_partition nps = AMDGPU_NPS1_PARTITION_MODE;
3225 	int ret = 0;
3226 	uint32_t i = 0;
3227 
3228 	if (!con || !con->eh_data || !bps || pages <= 0)
3229 		return 0;
3230 
3231 	if (from_rom) {
3232 		err_data.err_addr =
3233 			kcalloc(adev->umc.retire_unit,
3234 				sizeof(struct eeprom_table_record), GFP_KERNEL);
3235 		if (!err_data.err_addr) {
3236 			dev_warn(adev->dev, "Failed to alloc UMC error address record in mca2pa conversion!\n");
3237 			return -ENOMEM;
3238 		}
3239 
3240 		if (adev->gmc.gmc_funcs->query_mem_partition_mode)
3241 			nps = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
3242 	}
3243 
3244 	mutex_lock(&con->recovery_lock);
3245 
3246 	if (from_rom) {
3247 		/* there is no pa recs in V3, so skip pa recs processing */
3248 		if ((control->tbl_hdr.version < RAS_TABLE_VER_V3) &&
3249 		    !amdgpu_ras_smu_eeprom_supported(adev)) {
3250 			for (i = 0; i < pages; i++) {
3251 				if (control->ras_num_recs - i >= adev->umc.retire_unit) {
3252 					if ((bps[i].address == bps[i + 1].address) &&
3253 						(bps[i].mem_channel == bps[i + 1].mem_channel)) {
3254 						/* deal with retire_unit records a time */
3255 						ret = __amdgpu_ras_convert_rec_array_from_rom(adev,
3256 										&bps[i], &err_data, nps);
3257 						i += (adev->umc.retire_unit - 1);
3258 					} else {
3259 						break;
3260 					}
3261 				} else {
3262 					break;
3263 				}
3264 			}
3265 		}
3266 		for (; i < pages; i++) {
3267 			ret = __amdgpu_ras_convert_rec_from_rom(adev,
3268 				&bps[i], &err_data, nps);
3269 		}
3270 
3271 		con->eh_data->count_saved = con->eh_data->count;
3272 	} else {
3273 		ret = __amdgpu_ras_restore_bad_pages(adev, bps, pages);
3274 	}
3275 
3276 	if (from_rom)
3277 		kfree(err_data.err_addr);
3278 	mutex_unlock(&con->recovery_lock);
3279 
3280 	return ret;
3281 }
3282 
3283 /*
3284  * write error record array to eeprom, the function should be
3285  * protected by recovery_lock
3286  * new_cnt: new added UE count, excluding reserved bad pages, can be NULL
3287  */
3288 int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
3289 		unsigned long *new_cnt)
3290 {
3291 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3292 	struct ras_err_handler_data *data;
3293 	struct amdgpu_ras_eeprom_control *control;
3294 	int save_count, unit_num, i;
3295 
3296 	if (!con || !con->eh_data) {
3297 		if (new_cnt)
3298 			*new_cnt = 0;
3299 
3300 		return 0;
3301 	}
3302 
3303 	if (!con->eeprom_control.is_eeprom_valid) {
3304 		dev_warn(adev->dev,
3305 			"Failed to save EEPROM table data because of EEPROM data corruption!");
3306 		if (new_cnt)
3307 			*new_cnt = 0;
3308 
3309 		return 0;
3310 	}
3311 
3312 	mutex_lock(&con->recovery_lock);
3313 	control = &con->eeprom_control;
3314 	data = con->eh_data;
3315 	if (amdgpu_ras_smu_eeprom_supported(adev))
3316 		unit_num = control->ras_num_recs -
3317 			control->ras_num_recs_old;
3318 	else
3319 		unit_num = data->count / adev->umc.retire_unit -
3320 			control->ras_num_recs;
3321 
3322 	save_count = con->bad_page_num - control->ras_num_bad_pages;
3323 	mutex_unlock(&con->recovery_lock);
3324 
3325 	if (new_cnt)
3326 		*new_cnt = unit_num;
3327 
3328 	/* only new entries are saved */
3329 	if (unit_num && save_count) {
3330 		/*old asics only save pa to eeprom like before*/
3331 		if (IP_VERSION_MAJ(amdgpu_ip_version(adev, UMC_HWIP, 0)) < 12) {
3332 			if (amdgpu_ras_eeprom_append(control,
3333 					&data->bps[data->count_saved], unit_num)) {
3334 				dev_err(adev->dev, "Failed to save EEPROM table data!");
3335 				return -EIO;
3336 			}
3337 		} else {
3338 			for (i = 0; i < unit_num; i++) {
3339 				if (amdgpu_ras_eeprom_append(control,
3340 						&data->bps[data->count_saved +
3341 						i * adev->umc.retire_unit], 1)) {
3342 					dev_err(adev->dev, "Failed to save EEPROM table data!");
3343 					return -EIO;
3344 				}
3345 			}
3346 		}
3347 
3348 		dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count);
3349 		data->count_saved = data->count;
3350 	}
3351 
3352 	return 0;
3353 }
3354 
3355 /*
3356  * read error record array in eeprom and reserve enough space for
3357  * storing new bad pages
3358  */
3359 static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
3360 {
3361 	struct amdgpu_ras_eeprom_control *control =
3362 		&adev->psp.ras_context.ras->eeprom_control;
3363 	struct eeprom_table_record *bps;
3364 	int ret, i = 0;
3365 
3366 	/* no bad page record, skip eeprom access */
3367 	if (control->ras_num_recs == 0 || amdgpu_bad_page_threshold == 0)
3368 		return 0;
3369 
3370 	bps = kcalloc(control->ras_num_recs, sizeof(*bps), GFP_KERNEL);
3371 	if (!bps)
3372 		return -ENOMEM;
3373 
3374 	ret = amdgpu_ras_eeprom_read(control, bps, control->ras_num_recs);
3375 	if (ret) {
3376 		dev_err(adev->dev, "Failed to load EEPROM table records!");
3377 	} else {
3378 		if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr) {
3379 			/*In V3, there is no pa recs, and some cases(when address==0) may be parsed
3380 			as pa recs, so add verion check to avoid it.
3381 			*/
3382 			if ((control->tbl_hdr.version < RAS_TABLE_VER_V3) &&
3383 			    !amdgpu_ras_smu_eeprom_supported(adev)) {
3384 				for (i = 0; i < control->ras_num_recs; i++) {
3385 					if ((control->ras_num_recs - i) >= adev->umc.retire_unit) {
3386 						if ((bps[i].address == bps[i + 1].address) &&
3387 							(bps[i].mem_channel == bps[i + 1].mem_channel)) {
3388 							control->ras_num_pa_recs += adev->umc.retire_unit;
3389 							i += (adev->umc.retire_unit - 1);
3390 						} else {
3391 							control->ras_num_mca_recs +=
3392 										(control->ras_num_recs - i);
3393 							break;
3394 						}
3395 					} else {
3396 						control->ras_num_mca_recs += (control->ras_num_recs - i);
3397 						break;
3398 					}
3399 				}
3400 			} else {
3401 				control->ras_num_mca_recs = control->ras_num_recs;
3402 			}
3403 		}
3404 
3405 		ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs, true);
3406 		if (ret)
3407 			goto out;
3408 
3409 		ret = amdgpu_ras_eeprom_check(control);
3410 		if (ret)
3411 			goto out;
3412 
3413 		/* HW not usable */
3414 		if (amdgpu_ras_is_rma(adev))
3415 			ret = -EHWPOISON;
3416 	}
3417 
3418 out:
3419 	kfree(bps);
3420 	return ret;
3421 }
3422 
3423 static int amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
3424 				uint64_t addr)
3425 {
3426 	struct ras_err_handler_data *data = con->eh_data;
3427 	struct amdgpu_device *adev = con->adev;
3428 	int i;
3429 
3430 	if ((addr >= adev->gmc.mc_vram_size &&
3431 	    adev->gmc.mc_vram_size) ||
3432 	    (addr >= RAS_UMC_INJECT_ADDR_LIMIT))
3433 		return -EINVAL;
3434 
3435 	addr >>= AMDGPU_GPU_PAGE_SHIFT;
3436 	for (i = 0; i < data->count; i++)
3437 		if (addr == data->bps[i].retired_page)
3438 			return 1;
3439 
3440 	return 0;
3441 }
3442 
3443 /*
3444  * check if an address belongs to bad page
3445  *
3446  * Note: this check is only for umc block
3447  */
3448 static int amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
3449 				uint64_t addr)
3450 {
3451 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3452 	int ret = 0;
3453 
3454 	if (!con || !con->eh_data)
3455 		return ret;
3456 
3457 	mutex_lock(&con->recovery_lock);
3458 	ret = amdgpu_ras_check_bad_page_unlock(con, addr);
3459 	mutex_unlock(&con->recovery_lock);
3460 	return ret;
3461 }
3462 
3463 static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev,
3464 					  uint32_t max_count)
3465 {
3466 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3467 
3468 	/*
3469 	 * amdgpu_bad_page_threshold is used to config
3470 	 * the threshold for the number of bad pages.
3471 	 * -1:  Threshold is set to default value
3472 	 *      Driver will issue a warning message when threshold is reached
3473 	 *      and continue runtime services.
3474 	 * 0:   Disable bad page retirement
3475 	 *      Driver will not retire bad pages
3476 	 *      which is intended for debugging purpose.
3477 	 * -2:  Threshold is determined by a formula
3478 	 *      that assumes 1 bad page per 100M of local memory.
3479 	 *      Driver will continue runtime services when threhold is reached.
3480 	 * 0 < threshold < max number of bad page records in EEPROM,
3481 	 *      A user-defined threshold is set
3482 	 *      Driver will halt runtime services when this custom threshold is reached.
3483 	 */
3484 	if (amdgpu_bad_page_threshold == -2) {
3485 		u64 val = adev->gmc.mc_vram_size;
3486 
3487 		do_div(val, RAS_BAD_PAGE_COVER);
3488 		con->bad_page_cnt_threshold = min(lower_32_bits(val),
3489 						  max_count);
3490 	} else if (amdgpu_bad_page_threshold == -1) {
3491 		con->bad_page_cnt_threshold = ((con->reserved_pages_in_bytes) >> 21) << 4;
3492 	} else {
3493 		con->bad_page_cnt_threshold = min_t(int, max_count,
3494 						    amdgpu_bad_page_threshold);
3495 	}
3496 }
3497 
3498 int amdgpu_ras_put_poison_req(struct amdgpu_device *adev,
3499 		enum amdgpu_ras_block block, uint16_t pasid,
3500 		pasid_notify pasid_fn, void *data, uint32_t reset)
3501 {
3502 	int ret = 0;
3503 	struct ras_poison_msg poison_msg;
3504 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3505 
3506 	memset(&poison_msg, 0, sizeof(poison_msg));
3507 	poison_msg.block = block;
3508 	poison_msg.pasid = pasid;
3509 	poison_msg.reset = reset;
3510 	poison_msg.pasid_fn = pasid_fn;
3511 	poison_msg.data = data;
3512 
3513 	ret = kfifo_put(&con->poison_fifo, poison_msg);
3514 	if (!ret) {
3515 		dev_err(adev->dev, "Poison message fifo is full!\n");
3516 		return -ENOSPC;
3517 	}
3518 
3519 	return 0;
3520 }
3521 
3522 static int amdgpu_ras_get_poison_req(struct amdgpu_device *adev,
3523 		struct ras_poison_msg *poison_msg)
3524 {
3525 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3526 
3527 	return kfifo_get(&con->poison_fifo, poison_msg);
3528 }
3529 
3530 static void amdgpu_ras_ecc_log_init(struct ras_ecc_log_info *ecc_log)
3531 {
3532 	mutex_init(&ecc_log->lock);
3533 
3534 	INIT_RADIX_TREE(&ecc_log->de_page_tree, GFP_KERNEL);
3535 	ecc_log->de_queried_count = 0;
3536 	ecc_log->consumption_q_count = 0;
3537 }
3538 
3539 static void amdgpu_ras_ecc_log_fini(struct ras_ecc_log_info *ecc_log)
3540 {
3541 	struct radix_tree_iter iter;
3542 	void __rcu **slot;
3543 	struct ras_ecc_err *ecc_err;
3544 
3545 	mutex_lock(&ecc_log->lock);
3546 	radix_tree_for_each_slot(slot, &ecc_log->de_page_tree, &iter, 0) {
3547 		ecc_err = radix_tree_deref_slot(slot);
3548 		kfree(ecc_err->err_pages.pfn);
3549 		kfree(ecc_err);
3550 		radix_tree_iter_delete(&ecc_log->de_page_tree, &iter, slot);
3551 	}
3552 	mutex_unlock(&ecc_log->lock);
3553 
3554 	mutex_destroy(&ecc_log->lock);
3555 	ecc_log->de_queried_count = 0;
3556 	ecc_log->consumption_q_count = 0;
3557 }
3558 
3559 static bool amdgpu_ras_schedule_retirement_dwork(struct amdgpu_ras *con,
3560 				uint32_t delayed_ms)
3561 {
3562 	int ret;
3563 
3564 	mutex_lock(&con->umc_ecc_log.lock);
3565 	ret = radix_tree_tagged(&con->umc_ecc_log.de_page_tree,
3566 			UMC_ECC_NEW_DETECTED_TAG);
3567 	mutex_unlock(&con->umc_ecc_log.lock);
3568 
3569 	if (ret)
3570 		schedule_delayed_work(&con->page_retirement_dwork,
3571 			msecs_to_jiffies(delayed_ms));
3572 
3573 	return ret ? true : false;
3574 }
3575 
3576 static void amdgpu_ras_do_page_retirement(struct work_struct *work)
3577 {
3578 	struct amdgpu_ras *con = container_of(work, struct amdgpu_ras,
3579 					      page_retirement_dwork.work);
3580 	struct amdgpu_device *adev = con->adev;
3581 	struct ras_err_data err_data;
3582 
3583 	/* If gpu reset is ongoing, delay retiring the bad pages */
3584 	if (amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev)) {
3585 		amdgpu_ras_schedule_retirement_dwork(con,
3586 				AMDGPU_RAS_RETIRE_PAGE_INTERVAL * 3);
3587 		return;
3588 	}
3589 
3590 	amdgpu_ras_error_data_init(&err_data);
3591 
3592 	amdgpu_umc_handle_bad_pages(adev, &err_data);
3593 
3594 	amdgpu_ras_error_data_fini(&err_data);
3595 
3596 	amdgpu_ras_schedule_retirement_dwork(con,
3597 			AMDGPU_RAS_RETIRE_PAGE_INTERVAL);
3598 }
3599 
3600 static int amdgpu_ras_poison_creation_handler(struct amdgpu_device *adev,
3601 				uint32_t poison_creation_count)
3602 {
3603 	int ret = 0;
3604 	struct ras_ecc_log_info *ecc_log;
3605 	struct ras_query_if info;
3606 	u32 timeout = MAX_UMC_POISON_POLLING_TIME_ASYNC;
3607 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
3608 	u64 de_queried_count;
3609 	u64 consumption_q_count;
3610 	enum ras_event_type type = RAS_EVENT_TYPE_POISON_CREATION;
3611 
3612 	memset(&info, 0, sizeof(info));
3613 	info.head.block = AMDGPU_RAS_BLOCK__UMC;
3614 
3615 	ecc_log = &ras->umc_ecc_log;
3616 	ecc_log->de_queried_count = 0;
3617 	ecc_log->consumption_q_count = 0;
3618 
3619 	do {
3620 		ret = amdgpu_ras_query_error_status_with_event(adev, &info, type);
3621 		if (ret)
3622 			return ret;
3623 
3624 		de_queried_count = ecc_log->de_queried_count;
3625 		consumption_q_count = ecc_log->consumption_q_count;
3626 
3627 		if (de_queried_count && consumption_q_count)
3628 			break;
3629 
3630 		msleep(100);
3631 	} while (--timeout);
3632 
3633 	if (de_queried_count)
3634 		schedule_delayed_work(&ras->page_retirement_dwork, 0);
3635 
3636 	if (amdgpu_ras_is_rma(adev) && atomic_cmpxchg(&ras->rma_in_recovery, 0, 1) == 0)
3637 		amdgpu_ras_reset_gpu(adev);
3638 
3639 	return 0;
3640 }
3641 
3642 static void amdgpu_ras_clear_poison_fifo(struct amdgpu_device *adev)
3643 {
3644 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3645 	struct ras_poison_msg msg;
3646 	int ret;
3647 
3648 	do {
3649 		ret = kfifo_get(&con->poison_fifo, &msg);
3650 	} while (ret);
3651 }
3652 
3653 static int amdgpu_ras_poison_consumption_handler(struct amdgpu_device *adev,
3654 			uint32_t msg_count, uint32_t *gpu_reset)
3655 {
3656 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3657 	uint32_t reset_flags = 0, reset = 0;
3658 	struct ras_poison_msg msg;
3659 	int ret, i;
3660 
3661 	kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
3662 
3663 	for (i = 0; i < msg_count; i++) {
3664 		ret = amdgpu_ras_get_poison_req(adev, &msg);
3665 		if (!ret)
3666 			continue;
3667 
3668 		if (msg.pasid_fn)
3669 			msg.pasid_fn(adev, msg.pasid, msg.data);
3670 
3671 		reset_flags |= msg.reset;
3672 	}
3673 
3674 	/*
3675 	 * Try to ensure poison creation handler is completed first
3676 	 * to set rma if bad page exceed threshold.
3677 	 */
3678 	flush_delayed_work(&con->page_retirement_dwork);
3679 
3680 	/* for RMA, amdgpu_ras_poison_creation_handler will trigger gpu reset */
3681 	if (reset_flags && !amdgpu_ras_is_rma(adev)) {
3682 		if (reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET)
3683 			reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET;
3684 		else if (reset_flags & AMDGPU_RAS_GPU_RESET_MODE2_RESET)
3685 			reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
3686 		else
3687 			reset = reset_flags;
3688 
3689 		con->gpu_reset_flags |= reset;
3690 		amdgpu_ras_reset_gpu(adev);
3691 
3692 		*gpu_reset = reset;
3693 
3694 		/* Wait for gpu recovery to complete */
3695 		flush_work(&con->recovery_work);
3696 	}
3697 
3698 	return 0;
3699 }
3700 
3701 static int amdgpu_ras_page_retirement_thread(void *param)
3702 {
3703 	struct amdgpu_device *adev = (struct amdgpu_device *)param;
3704 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3705 	uint32_t poison_creation_count, msg_count;
3706 	uint32_t gpu_reset;
3707 	int ret;
3708 
3709 	while (!kthread_should_stop()) {
3710 
3711 		wait_event_interruptible(con->page_retirement_wq,
3712 				kthread_should_stop() ||
3713 				atomic_read(&con->page_retirement_req_cnt));
3714 
3715 		if (kthread_should_stop())
3716 			break;
3717 
3718 		mutex_lock(&con->poison_lock);
3719 		gpu_reset = 0;
3720 
3721 		do {
3722 			poison_creation_count = atomic_read(&con->poison_creation_count);
3723 			ret = amdgpu_ras_poison_creation_handler(adev, poison_creation_count);
3724 			if (ret == -EIO)
3725 				break;
3726 
3727 			if (poison_creation_count) {
3728 				atomic_sub(poison_creation_count, &con->poison_creation_count);
3729 				atomic_sub(poison_creation_count, &con->page_retirement_req_cnt);
3730 			}
3731 		} while (atomic_read(&con->poison_creation_count) &&
3732 			!atomic_read(&con->poison_consumption_count));
3733 
3734 		if (ret != -EIO) {
3735 			msg_count = kfifo_len(&con->poison_fifo);
3736 			if (msg_count) {
3737 				ret = amdgpu_ras_poison_consumption_handler(adev,
3738 						msg_count, &gpu_reset);
3739 				if ((ret != -EIO) &&
3740 				    (gpu_reset != AMDGPU_RAS_GPU_RESET_MODE1_RESET))
3741 					atomic_sub(msg_count, &con->page_retirement_req_cnt);
3742 			}
3743 		}
3744 
3745 		if ((ret == -EIO) || (gpu_reset == AMDGPU_RAS_GPU_RESET_MODE1_RESET)) {
3746 			/* gpu mode-1 reset is ongoing or just completed ras mode-1 reset */
3747 			/* Clear poison creation request */
3748 			atomic_set(&con->poison_creation_count, 0);
3749 			atomic_set(&con->poison_consumption_count, 0);
3750 
3751 			/* Clear poison fifo */
3752 			amdgpu_ras_clear_poison_fifo(adev);
3753 
3754 			/* Clear all poison requests */
3755 			atomic_set(&con->page_retirement_req_cnt, 0);
3756 
3757 			if (ret == -EIO) {
3758 				/* Wait for mode-1 reset to complete */
3759 				down_read(&adev->reset_domain->sem);
3760 				up_read(&adev->reset_domain->sem);
3761 			}
3762 
3763 			/* Wake up work to save bad pages to eeprom */
3764 			schedule_delayed_work(&con->page_retirement_dwork, 0);
3765 		} else if (gpu_reset) {
3766 			/* gpu just completed mode-2 reset or other reset */
3767 			/* Clear poison consumption messages cached in fifo */
3768 			msg_count = kfifo_len(&con->poison_fifo);
3769 			if (msg_count) {
3770 				amdgpu_ras_clear_poison_fifo(adev);
3771 				atomic_sub(msg_count, &con->page_retirement_req_cnt);
3772 			}
3773 
3774 			atomic_set(&con->poison_consumption_count, 0);
3775 
3776 			/* Wake up work to save bad pages to eeprom */
3777 			schedule_delayed_work(&con->page_retirement_dwork, 0);
3778 		}
3779 		mutex_unlock(&con->poison_lock);
3780 	}
3781 
3782 	return 0;
3783 }
3784 
3785 int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev)
3786 {
3787 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3788 	struct amdgpu_ras_eeprom_control *control;
3789 	int ret;
3790 
3791 	if (!con || amdgpu_sriov_vf(adev))
3792 		return 0;
3793 
3794 	if (amdgpu_uniras_enabled(adev))
3795 		return 0;
3796 
3797 	control = &con->eeprom_control;
3798 	con->ras_smu_drv = amdgpu_dpm_get_ras_smu_driver(adev);
3799 
3800 	ret = amdgpu_ras_eeprom_init(control);
3801 	control->is_eeprom_valid = !ret;
3802 
3803 	if (!adev->umc.ras || !adev->umc.ras->convert_ras_err_addr)
3804 		control->ras_num_pa_recs = control->ras_num_recs;
3805 
3806 	if (adev->umc.ras &&
3807 	    adev->umc.ras->get_retire_flip_bits)
3808 		adev->umc.ras->get_retire_flip_bits(adev);
3809 
3810 	if (control->ras_num_recs && control->is_eeprom_valid) {
3811 		ret = amdgpu_ras_load_bad_pages(adev);
3812 		if (ret) {
3813 			control->is_eeprom_valid = false;
3814 			return 0;
3815 		}
3816 
3817 		amdgpu_dpm_send_hbm_bad_pages_num(
3818 			adev, control->ras_num_bad_pages);
3819 
3820 		if (con->update_channel_flag == true) {
3821 			amdgpu_dpm_send_hbm_bad_channel_flag(
3822 				adev, control->bad_channel_bitmap);
3823 			con->update_channel_flag = false;
3824 		}
3825 
3826 		/* The format action is only applied to new ASICs */
3827 		if (IP_VERSION_MAJ(amdgpu_ip_version(adev, UMC_HWIP, 0)) >= 12 &&
3828 		    control->tbl_hdr.version < RAS_TABLE_VER_V3)
3829 			if (!amdgpu_ras_eeprom_reset_table(control))
3830 				if (amdgpu_ras_save_bad_pages(adev, NULL))
3831 					dev_warn(adev->dev, "Failed to format RAS EEPROM data in V3 version!\n");
3832 	}
3833 
3834 	return 0;
3835 }
3836 
3837 int amdgpu_ras_recovery_init(struct amdgpu_device *adev, bool init_bp_info)
3838 {
3839 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3840 	struct ras_err_handler_data **data;
3841 	u32  max_eeprom_records_count = 0;
3842 	int ret;
3843 
3844 	if (!con || amdgpu_sriov_vf(adev))
3845 		return 0;
3846 
3847 	/* Allow access to RAS EEPROM via debugfs, when the ASIC
3848 	 * supports RAS and debugfs is enabled, but when
3849 	 * adev->ras_enabled is unset, i.e. when "ras_enable"
3850 	 * module parameter is set to 0.
3851 	 */
3852 	con->adev = adev;
3853 
3854 	if (!adev->ras_enabled)
3855 		return 0;
3856 
3857 	data = &con->eh_data;
3858 	*data = kzalloc(sizeof(**data), GFP_KERNEL);
3859 	if (!*data) {
3860 		ret = -ENOMEM;
3861 		goto out;
3862 	}
3863 
3864 	mutex_init(&con->recovery_lock);
3865 	mutex_init(&con->poison_lock);
3866 	INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
3867 	atomic_set(&con->in_recovery, 0);
3868 	atomic_set(&con->rma_in_recovery, 0);
3869 	con->eeprom_control.bad_channel_bitmap = 0;
3870 
3871 	max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count(&con->eeprom_control);
3872 	amdgpu_ras_validate_threshold(adev, max_eeprom_records_count);
3873 
3874 	if (init_bp_info) {
3875 		ret = amdgpu_ras_init_badpage_info(adev);
3876 		if (ret)
3877 			goto free;
3878 	}
3879 
3880 	mutex_init(&con->page_rsv_lock);
3881 	INIT_KFIFO(con->poison_fifo);
3882 	mutex_init(&con->page_retirement_lock);
3883 	init_waitqueue_head(&con->page_retirement_wq);
3884 	atomic_set(&con->page_retirement_req_cnt, 0);
3885 	atomic_set(&con->poison_creation_count, 0);
3886 	atomic_set(&con->poison_consumption_count, 0);
3887 	con->page_retirement_thread =
3888 		kthread_run(amdgpu_ras_page_retirement_thread, adev, "umc_page_retirement");
3889 	if (IS_ERR(con->page_retirement_thread)) {
3890 		con->page_retirement_thread = NULL;
3891 		dev_warn(adev->dev, "Failed to create umc_page_retirement thread!!!\n");
3892 	}
3893 
3894 	INIT_DELAYED_WORK(&con->page_retirement_dwork, amdgpu_ras_do_page_retirement);
3895 	amdgpu_ras_ecc_log_init(&con->umc_ecc_log);
3896 #ifdef CONFIG_X86_MCE_AMD
3897 	if ((adev->asic_type == CHIP_ALDEBARAN) &&
3898 	    (adev->gmc.xgmi.connected_to_cpu))
3899 		amdgpu_register_bad_pages_mca_notifier(adev);
3900 #endif
3901 	return 0;
3902 
3903 free:
3904 	kfree((*data)->bps);
3905 	kfree(*data);
3906 	con->eh_data = NULL;
3907 out:
3908 	dev_warn(adev->dev, "Failed to initialize ras recovery! (%d)\n", ret);
3909 
3910 	/*
3911 	 * Except error threshold exceeding case, other failure cases in this
3912 	 * function would not fail amdgpu driver init.
3913 	 */
3914 	if (!amdgpu_ras_is_rma(adev))
3915 		ret = 0;
3916 	else
3917 		ret = -EINVAL;
3918 
3919 	return ret;
3920 }
3921 
3922 static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
3923 {
3924 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3925 	struct ras_err_handler_data *data = con->eh_data;
3926 	int max_flush_timeout = MAX_FLUSH_RETIRE_DWORK_TIMES;
3927 	bool ret;
3928 
3929 	/* recovery_init failed to init it, fini is useless */
3930 	if (!data)
3931 		return 0;
3932 
3933 	/* Save all cached bad pages to eeprom */
3934 	do {
3935 		flush_delayed_work(&con->page_retirement_dwork);
3936 		ret = amdgpu_ras_schedule_retirement_dwork(con, 0);
3937 	} while (ret && max_flush_timeout--);
3938 
3939 	if (con->page_retirement_thread)
3940 		kthread_stop(con->page_retirement_thread);
3941 
3942 	atomic_set(&con->page_retirement_req_cnt, 0);
3943 	atomic_set(&con->poison_creation_count, 0);
3944 
3945 	mutex_destroy(&con->page_rsv_lock);
3946 
3947 	cancel_work_sync(&con->recovery_work);
3948 
3949 	cancel_delayed_work_sync(&con->page_retirement_dwork);
3950 
3951 	amdgpu_ras_ecc_log_fini(&con->umc_ecc_log);
3952 
3953 	mutex_lock(&con->recovery_lock);
3954 	con->eh_data = NULL;
3955 	kfree(data->bps);
3956 	kfree(data);
3957 	mutex_unlock(&con->recovery_lock);
3958 
3959 	amdgpu_ras_critical_region_init(adev);
3960 #ifdef CONFIG_X86_MCE_AMD
3961 	amdgpu_unregister_bad_pages_mca_notifier(adev);
3962 #endif
3963 	return 0;
3964 }
3965 /* recovery end */
3966 
3967 static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
3968 {
3969 	if (amdgpu_sriov_vf(adev)) {
3970 		switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
3971 		case IP_VERSION(13, 0, 2):
3972 		case IP_VERSION(13, 0, 6):
3973 		case IP_VERSION(13, 0, 12):
3974 		case IP_VERSION(13, 0, 14):
3975 			return true;
3976 		default:
3977 			return false;
3978 		}
3979 	}
3980 
3981 	if (adev->asic_type == CHIP_IP_DISCOVERY) {
3982 		switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
3983 		case IP_VERSION(13, 0, 0):
3984 		case IP_VERSION(13, 0, 6):
3985 		case IP_VERSION(13, 0, 10):
3986 		case IP_VERSION(13, 0, 12):
3987 		case IP_VERSION(13, 0, 14):
3988 		case IP_VERSION(14, 0, 3):
3989 			return true;
3990 		default:
3991 			return false;
3992 		}
3993 	}
3994 
3995 	return adev->asic_type == CHIP_VEGA10 ||
3996 		adev->asic_type == CHIP_VEGA20 ||
3997 		adev->asic_type == CHIP_ARCTURUS ||
3998 		adev->asic_type == CHIP_ALDEBARAN ||
3999 		adev->asic_type == CHIP_SIENNA_CICHLID;
4000 }
4001 
4002 /*
4003  * this is workaround for vega20 workstation sku,
4004  * force enable gfx ras, ignore vbios gfx ras flag
4005  * due to GC EDC can not write
4006  */
4007 static void amdgpu_ras_get_quirks(struct amdgpu_device *adev)
4008 {
4009 	struct atom_context *ctx = adev->mode_info.atom_context;
4010 
4011 	if (!ctx)
4012 		return;
4013 
4014 	if (strnstr(ctx->vbios_pn, "D16406",
4015 		    sizeof(ctx->vbios_pn)) ||
4016 		strnstr(ctx->vbios_pn, "D36002",
4017 			sizeof(ctx->vbios_pn)))
4018 		adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX);
4019 }
4020 
4021 /* Query ras capablity via atomfirmware interface */
4022 static void amdgpu_ras_query_ras_capablity_from_vbios(struct amdgpu_device *adev)
4023 {
4024 	/* mem_ecc cap */
4025 	if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
4026 		dev_info(adev->dev, "MEM ECC is active.\n");
4027 		adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__UMC |
4028 					 1 << AMDGPU_RAS_BLOCK__DF);
4029 	} else {
4030 		dev_info(adev->dev, "MEM ECC is not presented.\n");
4031 	}
4032 
4033 	/* sram_ecc cap */
4034 	if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
4035 		dev_info(adev->dev, "SRAM ECC is active.\n");
4036 		if (!amdgpu_sriov_vf(adev))
4037 			adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
4038 						  1 << AMDGPU_RAS_BLOCK__DF);
4039 		else
4040 			adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__PCIE_BIF |
4041 						 1 << AMDGPU_RAS_BLOCK__SDMA |
4042 						 1 << AMDGPU_RAS_BLOCK__GFX);
4043 
4044 		/*
4045 		 * VCN/JPEG RAS can be supported on both bare metal and
4046 		 * SRIOV environment
4047 		 */
4048 		if (amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(2, 6, 0) ||
4049 		    amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 0) ||
4050 		    amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 3) ||
4051 		    amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(5, 0, 1))
4052 			adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN |
4053 						 1 << AMDGPU_RAS_BLOCK__JPEG);
4054 		else
4055 			adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN |
4056 						  1 << AMDGPU_RAS_BLOCK__JPEG);
4057 
4058 		/*
4059 		 * XGMI RAS is not supported if xgmi num physical nodes
4060 		 * is zero
4061 		 */
4062 		if (!adev->gmc.xgmi.num_physical_nodes)
4063 			adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__XGMI_WAFL);
4064 	} else {
4065 		dev_info(adev->dev, "SRAM ECC is not presented.\n");
4066 	}
4067 }
4068 
4069 /* Query poison mode from umc/df IP callbacks */
4070 static void amdgpu_ras_query_poison_mode(struct amdgpu_device *adev)
4071 {
4072 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4073 	bool df_poison, umc_poison;
4074 
4075 	/* poison setting is useless on SRIOV guest */
4076 	if (amdgpu_sriov_vf(adev) || !con)
4077 		return;
4078 
4079 	/* Init poison supported flag, the default value is false */
4080 	if (adev->gmc.xgmi.connected_to_cpu ||
4081 	    adev->gmc.is_app_apu) {
4082 		/* enabled by default when GPU is connected to CPU */
4083 		con->poison_supported = true;
4084 	} else if (adev->df.funcs &&
4085 	    adev->df.funcs->query_ras_poison_mode &&
4086 	    adev->umc.ras &&
4087 	    adev->umc.ras->query_ras_poison_mode) {
4088 		df_poison =
4089 			adev->df.funcs->query_ras_poison_mode(adev);
4090 		umc_poison =
4091 			adev->umc.ras->query_ras_poison_mode(adev);
4092 
4093 		/* Only poison is set in both DF and UMC, we can support it */
4094 		if (df_poison && umc_poison)
4095 			con->poison_supported = true;
4096 		else if (df_poison != umc_poison)
4097 			dev_warn(adev->dev,
4098 				"Poison setting is inconsistent in DF/UMC(%d:%d)!\n",
4099 				df_poison, umc_poison);
4100 	}
4101 }
4102 
4103 /*
4104  * check hardware's ras ability which will be saved in hw_supported.
4105  * if hardware does not support ras, we can skip some ras initializtion and
4106  * forbid some ras operations from IP.
4107  * if software itself, say boot parameter, limit the ras ability. We still
4108  * need allow IP do some limited operations, like disable. In such case,
4109  * we have to initialize ras as normal. but need check if operation is
4110  * allowed or not in each function.
4111  */
4112 static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
4113 {
4114 	adev->ras_hw_enabled = adev->ras_enabled = 0;
4115 
4116 	if (!amdgpu_ras_asic_supported(adev))
4117 		return;
4118 
4119 	if (amdgpu_sriov_vf(adev)) {
4120 		if (amdgpu_virt_get_ras_capability(adev))
4121 			goto init_ras_enabled_flag;
4122 	}
4123 
4124 	/* query ras capability from psp */
4125 	if (amdgpu_psp_get_ras_capability(&adev->psp))
4126 		goto init_ras_enabled_flag;
4127 
4128 	/* query ras capablity from bios */
4129 	if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
4130 		amdgpu_ras_query_ras_capablity_from_vbios(adev);
4131 	} else {
4132 		/* driver only manages a few IP blocks RAS feature
4133 		 * when GPU is connected cpu through XGMI */
4134 		adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX |
4135 					   1 << AMDGPU_RAS_BLOCK__SDMA |
4136 					   1 << AMDGPU_RAS_BLOCK__MMHUB);
4137 	}
4138 
4139 	/* apply asic specific settings (vega20 only for now) */
4140 	amdgpu_ras_get_quirks(adev);
4141 
4142 	/* query poison mode from umc/df ip callback */
4143 	amdgpu_ras_query_poison_mode(adev);
4144 
4145 init_ras_enabled_flag:
4146 	/* hw_supported needs to be aligned with RAS block mask. */
4147 	adev->ras_hw_enabled &= AMDGPU_RAS_BLOCK_MASK;
4148 
4149 	adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 :
4150 		adev->ras_hw_enabled & amdgpu_ras_mask;
4151 
4152 	/* aca is disabled by default except for psp v13_0_6/v13_0_12/v13_0_14 */
4153 	if (!amdgpu_sriov_vf(adev)) {
4154 		adev->aca.is_enabled =
4155 			(amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
4156 			amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) ||
4157 			amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14));
4158 	}
4159 
4160 	/* bad page feature is not applicable to specific app platform */
4161 	if (adev->gmc.is_app_apu &&
4162 	    amdgpu_ip_version(adev, UMC_HWIP, 0) == IP_VERSION(12, 0, 0))
4163 		amdgpu_bad_page_threshold = 0;
4164 }
4165 
4166 static void amdgpu_ras_counte_dw(struct work_struct *work)
4167 {
4168 	struct amdgpu_ras *con = container_of(work, struct amdgpu_ras,
4169 					      ras_counte_delay_work.work);
4170 	struct amdgpu_device *adev = con->adev;
4171 	struct drm_device *dev = adev_to_drm(adev);
4172 	unsigned long ce_count, ue_count;
4173 	int res;
4174 
4175 	res = pm_runtime_get_sync(dev->dev);
4176 	if (res < 0)
4177 		goto Out;
4178 
4179 	/* Cache new values.
4180 	 */
4181 	if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, NULL) == 0) {
4182 		atomic_set(&con->ras_ce_count, ce_count);
4183 		atomic_set(&con->ras_ue_count, ue_count);
4184 	}
4185 
4186 Out:
4187 	pm_runtime_put_autosuspend(dev->dev);
4188 }
4189 
4190 static int amdgpu_get_ras_schema(struct amdgpu_device *adev)
4191 {
4192 	return  amdgpu_ras_is_poison_mode_supported(adev) ? AMDGPU_RAS_ERROR__POISON : 0 |
4193 			AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE |
4194 			AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE |
4195 			AMDGPU_RAS_ERROR__PARITY;
4196 }
4197 
4198 static void ras_event_mgr_init(struct ras_event_manager *mgr)
4199 {
4200 	struct ras_event_state *event_state;
4201 	int i;
4202 
4203 	memset(mgr, 0, sizeof(*mgr));
4204 	atomic64_set(&mgr->seqno, 0);
4205 
4206 	for (i = 0; i < ARRAY_SIZE(mgr->event_state); i++) {
4207 		event_state = &mgr->event_state[i];
4208 		event_state->last_seqno = RAS_EVENT_INVALID_ID;
4209 		atomic64_set(&event_state->count, 0);
4210 	}
4211 }
4212 
4213 static void amdgpu_ras_event_mgr_init(struct amdgpu_device *adev)
4214 {
4215 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
4216 	struct amdgpu_hive_info *hive;
4217 
4218 	if (!ras)
4219 		return;
4220 
4221 	hive = amdgpu_get_xgmi_hive(adev);
4222 	ras->event_mgr = hive ? &hive->event_mgr : &ras->__event_mgr;
4223 
4224 	/* init event manager with node 0 on xgmi system */
4225 	if (!amdgpu_reset_in_recovery(adev)) {
4226 		if (!hive || adev->gmc.xgmi.node_id == 0)
4227 			ras_event_mgr_init(ras->event_mgr);
4228 	}
4229 
4230 	if (hive)
4231 		amdgpu_put_xgmi_hive(hive);
4232 }
4233 
4234 static void amdgpu_ras_init_reserved_vram_size(struct amdgpu_device *adev)
4235 {
4236 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4237 
4238 	if (!con || (adev->flags & AMD_IS_APU))
4239 		return;
4240 
4241 	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
4242 	case IP_VERSION(13, 0, 2):
4243 	case IP_VERSION(13, 0, 6):
4244 	case IP_VERSION(13, 0, 12):
4245 		con->reserved_pages_in_bytes = AMDGPU_RAS_RESERVED_VRAM_SIZE_DEFAULT;
4246 		break;
4247 	case IP_VERSION(13, 0, 14):
4248 		con->reserved_pages_in_bytes = (AMDGPU_RAS_RESERVED_VRAM_SIZE_DEFAULT << 1);
4249 		break;
4250 	default:
4251 		break;
4252 	}
4253 }
4254 
4255 int amdgpu_ras_init(struct amdgpu_device *adev)
4256 {
4257 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4258 	int r;
4259 
4260 	if (con)
4261 		return 0;
4262 
4263 	con = kzalloc(sizeof(*con) +
4264 			sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT +
4265 			sizeof(struct ras_manager) * AMDGPU_RAS_MCA_BLOCK_COUNT,
4266 			GFP_KERNEL);
4267 	if (!con)
4268 		return -ENOMEM;
4269 
4270 	con->adev = adev;
4271 	INIT_DELAYED_WORK(&con->ras_counte_delay_work, amdgpu_ras_counte_dw);
4272 	atomic_set(&con->ras_ce_count, 0);
4273 	atomic_set(&con->ras_ue_count, 0);
4274 
4275 	con->objs = (struct ras_manager *)(con + 1);
4276 
4277 	amdgpu_ras_set_context(adev, con);
4278 
4279 	amdgpu_ras_check_supported(adev);
4280 
4281 	if (!adev->ras_enabled || adev->asic_type == CHIP_VEGA10) {
4282 		/* set gfx block ras context feature for VEGA20 Gaming
4283 		 * send ras disable cmd to ras ta during ras late init.
4284 		 */
4285 		if (!adev->ras_enabled && adev->asic_type == CHIP_VEGA20) {
4286 			con->features |= BIT(AMDGPU_RAS_BLOCK__GFX);
4287 
4288 			return 0;
4289 		}
4290 
4291 		r = 0;
4292 		goto release_con;
4293 	}
4294 
4295 	con->update_channel_flag = false;
4296 	con->features = 0;
4297 	con->schema = 0;
4298 	INIT_LIST_HEAD(&con->head);
4299 	/* Might need get this flag from vbios. */
4300 	con->flags = RAS_DEFAULT_FLAGS;
4301 
4302 	/* initialize nbio ras function ahead of any other
4303 	 * ras functions so hardware fatal error interrupt
4304 	 * can be enabled as early as possible */
4305 	switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
4306 	case IP_VERSION(7, 4, 0):
4307 	case IP_VERSION(7, 4, 1):
4308 	case IP_VERSION(7, 4, 4):
4309 		if (!adev->gmc.xgmi.connected_to_cpu)
4310 			adev->nbio.ras = &nbio_v7_4_ras;
4311 		break;
4312 	case IP_VERSION(4, 3, 0):
4313 		if (adev->ras_hw_enabled & (1 << AMDGPU_RAS_BLOCK__DF))
4314 			/* unlike other generation of nbio ras,
4315 			 * nbio v4_3 only support fatal error interrupt
4316 			 * to inform software that DF is freezed due to
4317 			 * system fatal error event. driver should not
4318 			 * enable nbio ras in such case. Instead,
4319 			 * check DF RAS */
4320 			adev->nbio.ras = &nbio_v4_3_ras;
4321 		break;
4322 	case IP_VERSION(6, 3, 1):
4323 		if (adev->ras_hw_enabled & (1 << AMDGPU_RAS_BLOCK__DF))
4324 			/* unlike other generation of nbio ras,
4325 			 * nbif v6_3_1 only support fatal error interrupt
4326 			 * to inform software that DF is freezed due to
4327 			 * system fatal error event. driver should not
4328 			 * enable nbio ras in such case. Instead,
4329 			 * check DF RAS
4330 			 */
4331 			adev->nbio.ras = &nbif_v6_3_1_ras;
4332 		break;
4333 	case IP_VERSION(7, 9, 0):
4334 	case IP_VERSION(7, 9, 1):
4335 		if (!adev->gmc.is_app_apu)
4336 			adev->nbio.ras = &nbio_v7_9_ras;
4337 		break;
4338 	default:
4339 		/* nbio ras is not available */
4340 		break;
4341 	}
4342 
4343 	/* nbio ras block needs to be enabled ahead of other ras blocks
4344 	 * to handle fatal error */
4345 	r = amdgpu_nbio_ras_sw_init(adev);
4346 	if (r)
4347 		return r;
4348 
4349 	if (adev->nbio.ras &&
4350 	    adev->nbio.ras->init_ras_controller_interrupt) {
4351 		r = adev->nbio.ras->init_ras_controller_interrupt(adev);
4352 		if (r)
4353 			goto release_con;
4354 	}
4355 
4356 	if (adev->nbio.ras &&
4357 	    adev->nbio.ras->init_ras_err_event_athub_interrupt) {
4358 		r = adev->nbio.ras->init_ras_err_event_athub_interrupt(adev);
4359 		if (r)
4360 			goto release_con;
4361 	}
4362 
4363 	/* Packed socket_id to ras feature mask bits[31:29] */
4364 	if (adev->smuio.funcs &&
4365 	    adev->smuio.funcs->get_socket_id)
4366 		con->features |= ((adev->smuio.funcs->get_socket_id(adev)) <<
4367 					AMDGPU_RAS_FEATURES_SOCKETID_SHIFT);
4368 
4369 	/* Get RAS schema for particular SOC */
4370 	con->schema = amdgpu_get_ras_schema(adev);
4371 
4372 	amdgpu_ras_init_reserved_vram_size(adev);
4373 
4374 	if (amdgpu_ras_fs_init(adev)) {
4375 		r = -EINVAL;
4376 		goto release_con;
4377 	}
4378 
4379 	if (amdgpu_ras_aca_is_supported(adev)) {
4380 		if (amdgpu_aca_is_enabled(adev))
4381 			r = amdgpu_aca_init(adev);
4382 		else
4383 			r = amdgpu_mca_init(adev);
4384 		if (r)
4385 			goto release_con;
4386 	}
4387 
4388 	con->init_task_pid = task_pid_nr(current);
4389 	get_task_comm(con->init_task_comm, current);
4390 
4391 	mutex_init(&con->critical_region_lock);
4392 	INIT_LIST_HEAD(&con->critical_region_head);
4393 
4394 	dev_info(adev->dev, "RAS INFO: ras initialized successfully, "
4395 		 "hardware ability[%x] ras_mask[%x]\n",
4396 		 adev->ras_hw_enabled, adev->ras_enabled);
4397 
4398 	return 0;
4399 release_con:
4400 	amdgpu_ras_set_context(adev, NULL);
4401 	kfree(con);
4402 
4403 	return r;
4404 }
4405 
4406 int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev)
4407 {
4408 	if (adev->gmc.xgmi.connected_to_cpu ||
4409 	    adev->gmc.is_app_apu)
4410 		return 1;
4411 	return 0;
4412 }
4413 
4414 static int amdgpu_persistent_edc_harvesting(struct amdgpu_device *adev,
4415 					struct ras_common_if *ras_block)
4416 {
4417 	struct ras_query_if info = {
4418 		.head = *ras_block,
4419 	};
4420 
4421 	if (!amdgpu_persistent_edc_harvesting_supported(adev))
4422 		return 0;
4423 
4424 	if (amdgpu_ras_query_error_status(adev, &info) != 0)
4425 		drm_warn(adev_to_drm(adev), "RAS init query failure");
4426 
4427 	if (amdgpu_ras_reset_error_status(adev, ras_block->block) != 0)
4428 		drm_warn(adev_to_drm(adev), "RAS init harvest reset failure");
4429 
4430 	return 0;
4431 }
4432 
4433 bool amdgpu_ras_is_poison_mode_supported(struct amdgpu_device *adev)
4434 {
4435        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4436 
4437        if (!con)
4438                return false;
4439 
4440        return con->poison_supported;
4441 }
4442 
4443 /* helper function to handle common stuff in ip late init phase */
4444 int amdgpu_ras_block_late_init(struct amdgpu_device *adev,
4445 			 struct ras_common_if *ras_block)
4446 {
4447 	struct amdgpu_ras_block_object *ras_obj = NULL;
4448 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4449 	struct ras_query_if *query_info;
4450 	unsigned long ue_count, ce_count;
4451 	int r;
4452 
4453 	/* disable RAS feature per IP block if it is not supported */
4454 	if (!amdgpu_ras_is_supported(adev, ras_block->block)) {
4455 		amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
4456 		return 0;
4457 	}
4458 
4459 	r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1);
4460 	if (r) {
4461 		if (adev->in_suspend || amdgpu_reset_in_recovery(adev)) {
4462 			/* in resume phase, if fail to enable ras,
4463 			 * clean up all ras fs nodes, and disable ras */
4464 			goto cleanup;
4465 		} else
4466 			return r;
4467 	}
4468 
4469 	/* check for errors on warm reset edc persisant supported ASIC */
4470 	amdgpu_persistent_edc_harvesting(adev, ras_block);
4471 
4472 	/* in resume phase, no need to create ras fs node */
4473 	if (adev->in_suspend || amdgpu_reset_in_recovery(adev))
4474 		return 0;
4475 
4476 	ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
4477 	if (ras_obj->ras_cb || (ras_obj->hw_ops &&
4478 	    (ras_obj->hw_ops->query_poison_status ||
4479 	    ras_obj->hw_ops->handle_poison_consumption))) {
4480 		r = amdgpu_ras_interrupt_add_handler(adev, ras_block);
4481 		if (r)
4482 			goto cleanup;
4483 	}
4484 
4485 	if (ras_obj->hw_ops &&
4486 	    (ras_obj->hw_ops->query_ras_error_count ||
4487 	     ras_obj->hw_ops->query_ras_error_status)) {
4488 		r = amdgpu_ras_sysfs_create(adev, ras_block);
4489 		if (r)
4490 			goto interrupt;
4491 
4492 		/* Those are the cached values at init.
4493 		 */
4494 		query_info = kzalloc(sizeof(*query_info), GFP_KERNEL);
4495 		if (!query_info)
4496 			return -ENOMEM;
4497 		memcpy(&query_info->head, ras_block, sizeof(struct ras_common_if));
4498 
4499 		if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, query_info) == 0) {
4500 			atomic_set(&con->ras_ce_count, ce_count);
4501 			atomic_set(&con->ras_ue_count, ue_count);
4502 		}
4503 
4504 		kfree(query_info);
4505 	}
4506 
4507 	return 0;
4508 
4509 interrupt:
4510 	if (ras_obj->ras_cb)
4511 		amdgpu_ras_interrupt_remove_handler(adev, ras_block);
4512 cleanup:
4513 	amdgpu_ras_feature_enable(adev, ras_block, 0);
4514 	return r;
4515 }
4516 
4517 static int amdgpu_ras_block_late_init_default(struct amdgpu_device *adev,
4518 			 struct ras_common_if *ras_block)
4519 {
4520 	return amdgpu_ras_block_late_init(adev, ras_block);
4521 }
4522 
4523 /* helper function to remove ras fs node and interrupt handler */
4524 void amdgpu_ras_block_late_fini(struct amdgpu_device *adev,
4525 			  struct ras_common_if *ras_block)
4526 {
4527 	struct amdgpu_ras_block_object *ras_obj;
4528 	if (!ras_block)
4529 		return;
4530 
4531 	amdgpu_ras_sysfs_remove(adev, ras_block);
4532 
4533 	ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
4534 	if (ras_obj->ras_cb)
4535 		amdgpu_ras_interrupt_remove_handler(adev, ras_block);
4536 }
4537 
4538 static void amdgpu_ras_block_late_fini_default(struct amdgpu_device *adev,
4539 			  struct ras_common_if *ras_block)
4540 {
4541 	return amdgpu_ras_block_late_fini(adev, ras_block);
4542 }
4543 
4544 /* do some init work after IP late init as dependence.
4545  * and it runs in resume/gpu reset/booting up cases.
4546  */
4547 void amdgpu_ras_resume(struct amdgpu_device *adev)
4548 {
4549 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4550 	struct ras_manager *obj, *tmp;
4551 
4552 	if (!adev->ras_enabled || !con) {
4553 		/* clean ras context for VEGA20 Gaming after send ras disable cmd */
4554 		amdgpu_release_ras_context(adev);
4555 
4556 		return;
4557 	}
4558 
4559 	if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
4560 		/* Set up all other IPs which are not implemented. There is a
4561 		 * tricky thing that IP's actual ras error type should be
4562 		 * MULTI_UNCORRECTABLE, but as driver does not handle it, so
4563 		 * ERROR_NONE make sense anyway.
4564 		 */
4565 		amdgpu_ras_enable_all_features(adev, 1);
4566 
4567 		/* We enable ras on all hw_supported block, but as boot
4568 		 * parameter might disable some of them and one or more IP has
4569 		 * not implemented yet. So we disable them on behalf.
4570 		 */
4571 		list_for_each_entry_safe(obj, tmp, &con->head, node) {
4572 			if (!amdgpu_ras_is_supported(adev, obj->head.block)) {
4573 				amdgpu_ras_feature_enable(adev, &obj->head, 0);
4574 				/* there should be no any reference. */
4575 				WARN_ON(alive_obj(obj));
4576 			}
4577 		}
4578 	}
4579 }
4580 
4581 void amdgpu_ras_suspend(struct amdgpu_device *adev)
4582 {
4583 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4584 
4585 	if (!adev->ras_enabled || !con)
4586 		return;
4587 
4588 	amdgpu_ras_disable_all_features(adev, 0);
4589 	/* Make sure all ras objects are disabled. */
4590 	if (AMDGPU_RAS_GET_FEATURES(con->features))
4591 		amdgpu_ras_disable_all_features(adev, 1);
4592 }
4593 
4594 int amdgpu_ras_late_init(struct amdgpu_device *adev)
4595 {
4596 	struct amdgpu_ras_block_list *node, *tmp;
4597 	struct amdgpu_ras_block_object *obj;
4598 	int r;
4599 
4600 	amdgpu_ras_event_mgr_init(adev);
4601 
4602 	if (amdgpu_ras_aca_is_supported(adev)) {
4603 		if (amdgpu_reset_in_recovery(adev)) {
4604 			if (amdgpu_aca_is_enabled(adev))
4605 				r = amdgpu_aca_reset(adev);
4606 			else
4607 				r = amdgpu_mca_reset(adev);
4608 			if (r)
4609 				return r;
4610 		}
4611 
4612 		if (!amdgpu_sriov_vf(adev)) {
4613 			if (amdgpu_aca_is_enabled(adev))
4614 				amdgpu_ras_set_aca_debug_mode(adev, false);
4615 			else
4616 				amdgpu_ras_set_mca_debug_mode(adev, false);
4617 		}
4618 	}
4619 
4620 	/* Guest side doesn't need init ras feature */
4621 	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_ras_telemetry_en(adev))
4622 		return 0;
4623 
4624 	list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
4625 		obj = node->ras_obj;
4626 		if (!obj) {
4627 			dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
4628 			continue;
4629 		}
4630 
4631 		if (!amdgpu_ras_is_supported(adev, obj->ras_comm.block))
4632 			continue;
4633 
4634 		if (obj->ras_late_init) {
4635 			r = obj->ras_late_init(adev, &obj->ras_comm);
4636 			if (r) {
4637 				dev_err(adev->dev, "%s failed to execute ras_late_init! ret:%d\n",
4638 					obj->ras_comm.name, r);
4639 				return r;
4640 			}
4641 		} else
4642 			amdgpu_ras_block_late_init_default(adev, &obj->ras_comm);
4643 	}
4644 
4645 	return 0;
4646 }
4647 
4648 /* do some fini work before IP fini as dependence */
4649 int amdgpu_ras_pre_fini(struct amdgpu_device *adev)
4650 {
4651 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4652 
4653 	if (!adev->ras_enabled || !con)
4654 		return 0;
4655 
4656 
4657 	/* Need disable ras on all IPs here before ip [hw/sw]fini */
4658 	if (AMDGPU_RAS_GET_FEATURES(con->features))
4659 		amdgpu_ras_disable_all_features(adev, 0);
4660 	amdgpu_ras_recovery_fini(adev);
4661 	return 0;
4662 }
4663 
4664 int amdgpu_ras_fini(struct amdgpu_device *adev)
4665 {
4666 	struct amdgpu_ras_block_list *ras_node, *tmp;
4667 	struct amdgpu_ras_block_object *obj = NULL;
4668 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4669 
4670 	if (!adev->ras_enabled || !con)
4671 		return 0;
4672 
4673 	amdgpu_ras_critical_region_fini(adev);
4674 	mutex_destroy(&con->critical_region_lock);
4675 
4676 	list_for_each_entry_safe(ras_node, tmp, &adev->ras_list, node) {
4677 		if (ras_node->ras_obj) {
4678 			obj = ras_node->ras_obj;
4679 			if (amdgpu_ras_is_supported(adev, obj->ras_comm.block) &&
4680 			    obj->ras_fini)
4681 				obj->ras_fini(adev, &obj->ras_comm);
4682 			else
4683 				amdgpu_ras_block_late_fini_default(adev, &obj->ras_comm);
4684 		}
4685 
4686 		/* Clear ras blocks from ras_list and free ras block list node */
4687 		list_del(&ras_node->node);
4688 		kfree(ras_node);
4689 	}
4690 
4691 	amdgpu_ras_fs_fini(adev);
4692 	amdgpu_ras_interrupt_remove_all(adev);
4693 
4694 	if (amdgpu_ras_aca_is_supported(adev)) {
4695 		if (amdgpu_aca_is_enabled(adev))
4696 			amdgpu_aca_fini(adev);
4697 		else
4698 			amdgpu_mca_fini(adev);
4699 	}
4700 
4701 	WARN(AMDGPU_RAS_GET_FEATURES(con->features), "Feature mask is not cleared");
4702 
4703 	if (AMDGPU_RAS_GET_FEATURES(con->features))
4704 		amdgpu_ras_disable_all_features(adev, 0);
4705 
4706 	cancel_delayed_work_sync(&con->ras_counte_delay_work);
4707 
4708 	amdgpu_ras_set_context(adev, NULL);
4709 	kfree(con);
4710 
4711 	return 0;
4712 }
4713 
4714 bool amdgpu_ras_get_fed_status(struct amdgpu_device *adev)
4715 {
4716 	struct amdgpu_ras *ras;
4717 
4718 	ras = amdgpu_ras_get_context(adev);
4719 	if (!ras)
4720 		return false;
4721 
4722 	return test_bit(AMDGPU_RAS_BLOCK__LAST, &ras->ras_err_state);
4723 }
4724 
4725 void amdgpu_ras_set_fed(struct amdgpu_device *adev, bool status)
4726 {
4727 	struct amdgpu_ras *ras;
4728 
4729 	ras = amdgpu_ras_get_context(adev);
4730 	if (ras) {
4731 		if (status)
4732 			set_bit(AMDGPU_RAS_BLOCK__LAST, &ras->ras_err_state);
4733 		else
4734 			clear_bit(AMDGPU_RAS_BLOCK__LAST, &ras->ras_err_state);
4735 	}
4736 }
4737 
4738 void amdgpu_ras_clear_err_state(struct amdgpu_device *adev)
4739 {
4740 	struct amdgpu_ras *ras;
4741 
4742 	ras = amdgpu_ras_get_context(adev);
4743 	if (ras) {
4744 		ras->ras_err_state = 0;
4745 		ras->gpu_reset_flags = 0;
4746 	}
4747 }
4748 
4749 void amdgpu_ras_set_err_poison(struct amdgpu_device *adev,
4750 			       enum amdgpu_ras_block block)
4751 {
4752 	struct amdgpu_ras *ras;
4753 
4754 	ras = amdgpu_ras_get_context(adev);
4755 	if (ras)
4756 		set_bit(block, &ras->ras_err_state);
4757 }
4758 
4759 bool amdgpu_ras_is_err_state(struct amdgpu_device *adev, int block)
4760 {
4761 	struct amdgpu_ras *ras;
4762 
4763 	ras = amdgpu_ras_get_context(adev);
4764 	if (ras) {
4765 		if (block == AMDGPU_RAS_BLOCK__ANY)
4766 			return (ras->ras_err_state != 0);
4767 		else
4768 			return test_bit(block, &ras->ras_err_state) ||
4769 			       test_bit(AMDGPU_RAS_BLOCK__LAST,
4770 					&ras->ras_err_state);
4771 	}
4772 
4773 	return false;
4774 }
4775 
4776 static struct ras_event_manager *__get_ras_event_mgr(struct amdgpu_device *adev)
4777 {
4778 	struct amdgpu_ras *ras;
4779 
4780 	ras = amdgpu_ras_get_context(adev);
4781 	if (!ras)
4782 		return NULL;
4783 
4784 	return ras->event_mgr;
4785 }
4786 
4787 int amdgpu_ras_mark_ras_event_caller(struct amdgpu_device *adev, enum ras_event_type type,
4788 				     const void *caller)
4789 {
4790 	struct ras_event_manager *event_mgr;
4791 	struct ras_event_state *event_state;
4792 	int ret = 0;
4793 
4794 	if (amdgpu_uniras_enabled(adev))
4795 		return 0;
4796 
4797 	if (type >= RAS_EVENT_TYPE_COUNT) {
4798 		ret = -EINVAL;
4799 		goto out;
4800 	}
4801 
4802 	event_mgr = __get_ras_event_mgr(adev);
4803 	if (!event_mgr) {
4804 		ret = -EINVAL;
4805 		goto out;
4806 	}
4807 
4808 	event_state = &event_mgr->event_state[type];
4809 	event_state->last_seqno = atomic64_inc_return(&event_mgr->seqno);
4810 	atomic64_inc(&event_state->count);
4811 
4812 out:
4813 	if (ret && caller)
4814 		dev_warn(adev->dev, "failed mark ras event (%d) in %ps, ret:%d\n",
4815 			 (int)type, caller, ret);
4816 
4817 	return ret;
4818 }
4819 
4820 u64 amdgpu_ras_acquire_event_id(struct amdgpu_device *adev, enum ras_event_type type)
4821 {
4822 	struct ras_event_manager *event_mgr;
4823 	u64 id;
4824 
4825 	if (type >= RAS_EVENT_TYPE_COUNT)
4826 		return RAS_EVENT_INVALID_ID;
4827 
4828 	switch (type) {
4829 	case RAS_EVENT_TYPE_FATAL:
4830 	case RAS_EVENT_TYPE_POISON_CREATION:
4831 	case RAS_EVENT_TYPE_POISON_CONSUMPTION:
4832 		event_mgr = __get_ras_event_mgr(adev);
4833 		if (!event_mgr)
4834 			return RAS_EVENT_INVALID_ID;
4835 
4836 		id = event_mgr->event_state[type].last_seqno;
4837 		break;
4838 	case RAS_EVENT_TYPE_INVALID:
4839 	default:
4840 		id = RAS_EVENT_INVALID_ID;
4841 		break;
4842 	}
4843 
4844 	return id;
4845 }
4846 
4847 int amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
4848 {
4849 	if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
4850 		struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
4851 		enum ras_event_type type = RAS_EVENT_TYPE_FATAL;
4852 		u64 event_id = RAS_EVENT_INVALID_ID;
4853 
4854 		if (amdgpu_uniras_enabled(adev))
4855 			return 0;
4856 
4857 		if (!amdgpu_ras_mark_ras_event(adev, type))
4858 			event_id = amdgpu_ras_acquire_event_id(adev, type);
4859 
4860 		RAS_EVENT_LOG(adev, event_id, "uncorrectable hardware error"
4861 			      "(ERREVENT_ATHUB_INTERRUPT) detected!\n");
4862 
4863 		amdgpu_ras_set_fed(adev, true);
4864 		ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET;
4865 		amdgpu_ras_reset_gpu(adev);
4866 	}
4867 
4868 	return -EBUSY;
4869 }
4870 
4871 bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev)
4872 {
4873 	if (adev->asic_type == CHIP_VEGA20 &&
4874 	    adev->pm.fw_version <= 0x283400) {
4875 		return !(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) &&
4876 				amdgpu_ras_intr_triggered();
4877 	}
4878 
4879 	return false;
4880 }
4881 
4882 void amdgpu_release_ras_context(struct amdgpu_device *adev)
4883 {
4884 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4885 
4886 	if (!con)
4887 		return;
4888 
4889 	if (!adev->ras_enabled && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)) {
4890 		con->features &= ~BIT(AMDGPU_RAS_BLOCK__GFX);
4891 		amdgpu_ras_set_context(adev, NULL);
4892 		kfree(con);
4893 	}
4894 }
4895 
4896 #ifdef CONFIG_X86_MCE_AMD
4897 static struct amdgpu_device *find_adev(uint32_t node_id)
4898 {
4899 	int i;
4900 	struct amdgpu_device *adev = NULL;
4901 
4902 	for (i = 0; i < mce_adev_list.num_gpu; i++) {
4903 		adev = mce_adev_list.devs[i];
4904 
4905 		if (adev && adev->gmc.xgmi.connected_to_cpu &&
4906 		    adev->gmc.xgmi.physical_node_id == node_id)
4907 			break;
4908 		adev = NULL;
4909 	}
4910 
4911 	return adev;
4912 }
4913 
4914 #define GET_MCA_IPID_GPUID(m)	(((m) >> 44) & 0xF)
4915 #define GET_UMC_INST(m)		(((m) >> 21) & 0x7)
4916 #define GET_CHAN_INDEX(m)	((((m) >> 12) & 0x3) | (((m) >> 18) & 0x4))
4917 #define GPU_ID_OFFSET		8
4918 
4919 static int amdgpu_bad_page_notifier(struct notifier_block *nb,
4920 				    unsigned long val, void *data)
4921 {
4922 	struct mce *m = (struct mce *)data;
4923 	struct amdgpu_device *adev = NULL;
4924 	uint32_t gpu_id = 0;
4925 	uint32_t umc_inst = 0, ch_inst = 0;
4926 
4927 	/*
4928 	 * If the error was generated in UMC_V2, which belongs to GPU UMCs,
4929 	 * and error occurred in DramECC (Extended error code = 0) then only
4930 	 * process the error, else bail out.
4931 	 */
4932 	if (!m || !((smca_get_bank_type(m->extcpu, m->bank) == SMCA_UMC_V2) &&
4933 		    (XEC(m->status, 0x3f) == 0x0)))
4934 		return NOTIFY_DONE;
4935 
4936 	/*
4937 	 * If it is correctable error, return.
4938 	 */
4939 	if (mce_is_correctable(m))
4940 		return NOTIFY_OK;
4941 
4942 	/*
4943 	 * GPU Id is offset by GPU_ID_OFFSET in MCA_IPID_UMC register.
4944 	 */
4945 	gpu_id = GET_MCA_IPID_GPUID(m->ipid) - GPU_ID_OFFSET;
4946 
4947 	adev = find_adev(gpu_id);
4948 	if (!adev) {
4949 		DRM_WARN("%s: Unable to find adev for gpu_id: %d\n", __func__,
4950 								gpu_id);
4951 		return NOTIFY_DONE;
4952 	}
4953 
4954 	/*
4955 	 * If it is uncorrectable error, then find out UMC instance and
4956 	 * channel index.
4957 	 */
4958 	umc_inst = GET_UMC_INST(m->ipid);
4959 	ch_inst = GET_CHAN_INDEX(m->ipid);
4960 
4961 	dev_info(adev->dev, "Uncorrectable error detected in UMC inst: %d, chan_idx: %d",
4962 			     umc_inst, ch_inst);
4963 
4964 	if (!amdgpu_umc_page_retirement_mca(adev, m->addr, ch_inst, umc_inst))
4965 		return NOTIFY_OK;
4966 	else
4967 		return NOTIFY_DONE;
4968 }
4969 
4970 static struct notifier_block amdgpu_bad_page_nb = {
4971 	.notifier_call  = amdgpu_bad_page_notifier,
4972 	.priority       = MCE_PRIO_UC,
4973 };
4974 
4975 static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev)
4976 {
4977 	/*
4978 	 * Add the adev to the mce_adev_list.
4979 	 * During mode2 reset, amdgpu device is temporarily
4980 	 * removed from the mgpu_info list which can cause
4981 	 * page retirement to fail.
4982 	 * Use this list instead of mgpu_info to find the amdgpu
4983 	 * device on which the UMC error was reported.
4984 	 */
4985 	mce_adev_list.devs[mce_adev_list.num_gpu++] = adev;
4986 
4987 	/*
4988 	 * Register the x86 notifier only once
4989 	 * with MCE subsystem.
4990 	 */
4991 	if (notifier_registered == false) {
4992 		mce_register_decode_chain(&amdgpu_bad_page_nb);
4993 		notifier_registered = true;
4994 	}
4995 }
4996 static void amdgpu_unregister_bad_pages_mca_notifier(struct amdgpu_device *adev)
4997 {
4998 	int i, j;
4999 
5000 	if (!notifier_registered && !mce_adev_list.num_gpu)
5001 		return;
5002 	for (i = 0, j = 0; i < mce_adev_list.num_gpu; i++) {
5003 		if (mce_adev_list.devs[i] == adev)
5004 			mce_adev_list.devs[i] = NULL;
5005 		if (!mce_adev_list.devs[i])
5006 			++j;
5007 	}
5008 
5009 	if (j == mce_adev_list.num_gpu) {
5010 		mce_adev_list.num_gpu = 0;
5011 		/* Unregister x86 notifier with MCE subsystem. */
5012 		if (notifier_registered) {
5013 			mce_unregister_decode_chain(&amdgpu_bad_page_nb);
5014 			notifier_registered = false;
5015 		}
5016 	}
5017 }
5018 #endif
5019 
5020 struct amdgpu_ras *amdgpu_ras_get_context(struct amdgpu_device *adev)
5021 {
5022 	if (!adev)
5023 		return NULL;
5024 
5025 	return adev->psp.ras_context.ras;
5026 }
5027 
5028 int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_con)
5029 {
5030 	if (!adev)
5031 		return -EINVAL;
5032 
5033 	adev->psp.ras_context.ras = ras_con;
5034 	return 0;
5035 }
5036 
5037 /* check if ras is supported on block, say, sdma, gfx */
5038 int amdgpu_ras_is_supported(struct amdgpu_device *adev,
5039 		unsigned int block)
5040 {
5041 	int ret = 0;
5042 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5043 
5044 	if (block >= AMDGPU_RAS_BLOCK_COUNT)
5045 		return 0;
5046 
5047 	ret = ras && (adev->ras_enabled & (1 << block));
5048 
5049 	/* For the special asic with mem ecc enabled but sram ecc
5050 	 * not enabled, even if the ras block is not supported on
5051 	 * .ras_enabled, if the asic supports poison mode and the
5052 	 * ras block has ras configuration, it can be considered
5053 	 * that the ras block supports ras function.
5054 	 */
5055 	if (!ret &&
5056 	    (block == AMDGPU_RAS_BLOCK__GFX ||
5057 	     block == AMDGPU_RAS_BLOCK__SDMA ||
5058 	     block == AMDGPU_RAS_BLOCK__VCN ||
5059 	     block == AMDGPU_RAS_BLOCK__JPEG) &&
5060 		(amdgpu_ras_mask & (1 << block)) &&
5061 	    amdgpu_ras_is_poison_mode_supported(adev) &&
5062 	    amdgpu_ras_get_ras_block(adev, block, 0))
5063 		ret = 1;
5064 
5065 	return ret;
5066 }
5067 
5068 int amdgpu_ras_reset_gpu(struct amdgpu_device *adev)
5069 {
5070 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5071 
5072 	/* mode1 is the only selection for RMA status */
5073 	if (amdgpu_ras_is_rma(adev)) {
5074 		ras->gpu_reset_flags = 0;
5075 		ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET;
5076 	}
5077 
5078 	if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0) {
5079 		struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
5080 		int hive_ras_recovery = 0;
5081 
5082 		if (hive) {
5083 			hive_ras_recovery = atomic_read(&hive->ras_recovery);
5084 			amdgpu_put_xgmi_hive(hive);
5085 		}
5086 		/* In the case of multiple GPUs, after a GPU has started
5087 		 * resetting all GPUs on hive, other GPUs do not need to
5088 		 * trigger GPU reset again.
5089 		 */
5090 		if (!hive_ras_recovery)
5091 			amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work);
5092 		else
5093 			atomic_set(&ras->in_recovery, 0);
5094 	} else {
5095 		flush_work(&ras->recovery_work);
5096 		amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work);
5097 	}
5098 
5099 	return 0;
5100 }
5101 
5102 int amdgpu_ras_set_mca_debug_mode(struct amdgpu_device *adev, bool enable)
5103 {
5104 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5105 	int ret = 0;
5106 
5107 	if (con) {
5108 		ret = amdgpu_mca_smu_set_debug_mode(adev, enable);
5109 		if (!ret)
5110 			con->is_aca_debug_mode = enable;
5111 	}
5112 
5113 	return ret;
5114 }
5115 
5116 int amdgpu_ras_set_aca_debug_mode(struct amdgpu_device *adev, bool enable)
5117 {
5118 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5119 	int ret = 0;
5120 
5121 	if (con) {
5122 		if (amdgpu_aca_is_enabled(adev))
5123 			ret = amdgpu_aca_smu_set_debug_mode(adev, enable);
5124 		else
5125 			ret = amdgpu_mca_smu_set_debug_mode(adev, enable);
5126 		if (!ret)
5127 			con->is_aca_debug_mode = enable;
5128 	}
5129 
5130 	return ret;
5131 }
5132 
5133 bool amdgpu_ras_get_aca_debug_mode(struct amdgpu_device *adev)
5134 {
5135 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5136 	const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
5137 	const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
5138 
5139 	if (!con)
5140 		return false;
5141 
5142 	if ((amdgpu_aca_is_enabled(adev) && smu_funcs && smu_funcs->set_debug_mode) ||
5143 	    (!amdgpu_aca_is_enabled(adev) && mca_funcs && mca_funcs->mca_set_debug_mode))
5144 		return con->is_aca_debug_mode;
5145 	else
5146 		return true;
5147 }
5148 
5149 bool amdgpu_ras_get_error_query_mode(struct amdgpu_device *adev,
5150 				     unsigned int *error_query_mode)
5151 {
5152 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5153 	const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
5154 	const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
5155 
5156 	if (!con) {
5157 		*error_query_mode = AMDGPU_RAS_INVALID_ERROR_QUERY;
5158 		return false;
5159 	}
5160 
5161 	if (amdgpu_sriov_vf(adev)) {
5162 		*error_query_mode = AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY;
5163 	} else if ((smu_funcs && smu_funcs->set_debug_mode) || (mca_funcs && mca_funcs->mca_set_debug_mode)) {
5164 		*error_query_mode =
5165 			(con->is_aca_debug_mode) ? AMDGPU_RAS_DIRECT_ERROR_QUERY : AMDGPU_RAS_FIRMWARE_ERROR_QUERY;
5166 	} else {
5167 		*error_query_mode = AMDGPU_RAS_DIRECT_ERROR_QUERY;
5168 	}
5169 
5170 	return true;
5171 }
5172 
5173 /* Register each ip ras block into amdgpu ras */
5174 int amdgpu_ras_register_ras_block(struct amdgpu_device *adev,
5175 		struct amdgpu_ras_block_object *ras_block_obj)
5176 {
5177 	struct amdgpu_ras_block_list *ras_node;
5178 	if (!adev || !ras_block_obj)
5179 		return -EINVAL;
5180 
5181 	ras_node = kzalloc(sizeof(*ras_node), GFP_KERNEL);
5182 	if (!ras_node)
5183 		return -ENOMEM;
5184 
5185 	INIT_LIST_HEAD(&ras_node->node);
5186 	ras_node->ras_obj = ras_block_obj;
5187 	list_add_tail(&ras_node->node, &adev->ras_list);
5188 
5189 	return 0;
5190 }
5191 
5192 void amdgpu_ras_get_error_type_name(uint32_t err_type, char *err_type_name)
5193 {
5194 	if (!err_type_name)
5195 		return;
5196 
5197 	switch (err_type) {
5198 	case AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE:
5199 		sprintf(err_type_name, "correctable");
5200 		break;
5201 	case AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE:
5202 		sprintf(err_type_name, "uncorrectable");
5203 		break;
5204 	default:
5205 		sprintf(err_type_name, "unknown");
5206 		break;
5207 	}
5208 }
5209 
5210 bool amdgpu_ras_inst_get_memory_id_field(struct amdgpu_device *adev,
5211 					 const struct amdgpu_ras_err_status_reg_entry *reg_entry,
5212 					 uint32_t instance,
5213 					 uint32_t *memory_id)
5214 {
5215 	uint32_t err_status_lo_data, err_status_lo_offset;
5216 
5217 	if (!reg_entry)
5218 		return false;
5219 
5220 	err_status_lo_offset =
5221 		AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance,
5222 					    reg_entry->seg_lo, reg_entry->reg_lo);
5223 	err_status_lo_data = RREG32(err_status_lo_offset);
5224 
5225 	if ((reg_entry->flags & AMDGPU_RAS_ERR_STATUS_VALID) &&
5226 	    !REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, ERR_STATUS_VALID_FLAG))
5227 		return false;
5228 
5229 	*memory_id = REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, MEMORY_ID);
5230 
5231 	return true;
5232 }
5233 
5234 bool amdgpu_ras_inst_get_err_cnt_field(struct amdgpu_device *adev,
5235 				       const struct amdgpu_ras_err_status_reg_entry *reg_entry,
5236 				       uint32_t instance,
5237 				       unsigned long *err_cnt)
5238 {
5239 	uint32_t err_status_hi_data, err_status_hi_offset;
5240 
5241 	if (!reg_entry)
5242 		return false;
5243 
5244 	err_status_hi_offset =
5245 		AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance,
5246 					    reg_entry->seg_hi, reg_entry->reg_hi);
5247 	err_status_hi_data = RREG32(err_status_hi_offset);
5248 
5249 	if ((reg_entry->flags & AMDGPU_RAS_ERR_INFO_VALID) &&
5250 	    !REG_GET_FIELD(err_status_hi_data, ERR_STATUS_HI, ERR_INFO_VALID_FLAG))
5251 		/* keep the check here in case we need to refer to the result later */
5252 		dev_dbg(adev->dev, "Invalid err_info field\n");
5253 
5254 	/* read err count */
5255 	*err_cnt = REG_GET_FIELD(err_status_hi_data, ERR_STATUS, ERR_CNT);
5256 
5257 	return true;
5258 }
5259 
5260 void amdgpu_ras_inst_query_ras_error_count(struct amdgpu_device *adev,
5261 					   const struct amdgpu_ras_err_status_reg_entry *reg_list,
5262 					   uint32_t reg_list_size,
5263 					   const struct amdgpu_ras_memory_id_entry *mem_list,
5264 					   uint32_t mem_list_size,
5265 					   uint32_t instance,
5266 					   uint32_t err_type,
5267 					   unsigned long *err_count)
5268 {
5269 	uint32_t memory_id;
5270 	unsigned long err_cnt;
5271 	char err_type_name[16];
5272 	uint32_t i, j;
5273 
5274 	for (i = 0; i < reg_list_size; i++) {
5275 		/* query memory_id from err_status_lo */
5276 		if (!amdgpu_ras_inst_get_memory_id_field(adev, &reg_list[i],
5277 							 instance, &memory_id))
5278 			continue;
5279 
5280 		/* query err_cnt from err_status_hi */
5281 		if (!amdgpu_ras_inst_get_err_cnt_field(adev, &reg_list[i],
5282 						       instance, &err_cnt) ||
5283 		    !err_cnt)
5284 			continue;
5285 
5286 		*err_count += err_cnt;
5287 
5288 		/* log the errors */
5289 		amdgpu_ras_get_error_type_name(err_type, err_type_name);
5290 		if (!mem_list) {
5291 			/* memory_list is not supported */
5292 			dev_info(adev->dev,
5293 				 "%ld %s hardware errors detected in %s, instance: %d, memory_id: %d\n",
5294 				 err_cnt, err_type_name,
5295 				 reg_list[i].block_name,
5296 				 instance, memory_id);
5297 		} else {
5298 			for (j = 0; j < mem_list_size; j++) {
5299 				if (memory_id == mem_list[j].memory_id) {
5300 					dev_info(adev->dev,
5301 						 "%ld %s hardware errors detected in %s, instance: %d, memory block: %s\n",
5302 						 err_cnt, err_type_name,
5303 						 reg_list[i].block_name,
5304 						 instance, mem_list[j].name);
5305 					break;
5306 				}
5307 			}
5308 		}
5309 	}
5310 }
5311 
5312 void amdgpu_ras_inst_reset_ras_error_count(struct amdgpu_device *adev,
5313 					   const struct amdgpu_ras_err_status_reg_entry *reg_list,
5314 					   uint32_t reg_list_size,
5315 					   uint32_t instance)
5316 {
5317 	uint32_t err_status_lo_offset, err_status_hi_offset;
5318 	uint32_t i;
5319 
5320 	for (i = 0; i < reg_list_size; i++) {
5321 		err_status_lo_offset =
5322 			AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance,
5323 						    reg_list[i].seg_lo, reg_list[i].reg_lo);
5324 		err_status_hi_offset =
5325 			AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance,
5326 						    reg_list[i].seg_hi, reg_list[i].reg_hi);
5327 		WREG32(err_status_lo_offset, 0);
5328 		WREG32(err_status_hi_offset, 0);
5329 	}
5330 }
5331 
5332 int amdgpu_ras_error_data_init(struct ras_err_data *err_data)
5333 {
5334 	memset(err_data, 0, sizeof(*err_data));
5335 
5336 	INIT_LIST_HEAD(&err_data->err_node_list);
5337 
5338 	return 0;
5339 }
5340 
5341 static void amdgpu_ras_error_node_release(struct ras_err_node *err_node)
5342 {
5343 	if (!err_node)
5344 		return;
5345 
5346 	list_del(&err_node->node);
5347 	kvfree(err_node);
5348 }
5349 
5350 void amdgpu_ras_error_data_fini(struct ras_err_data *err_data)
5351 {
5352 	struct ras_err_node *err_node, *tmp;
5353 
5354 	list_for_each_entry_safe(err_node, tmp, &err_data->err_node_list, node)
5355 		amdgpu_ras_error_node_release(err_node);
5356 }
5357 
5358 static struct ras_err_node *amdgpu_ras_error_find_node_by_id(struct ras_err_data *err_data,
5359 							     struct amdgpu_smuio_mcm_config_info *mcm_info)
5360 {
5361 	struct ras_err_node *err_node;
5362 	struct amdgpu_smuio_mcm_config_info *ref_id;
5363 
5364 	if (!err_data || !mcm_info)
5365 		return NULL;
5366 
5367 	for_each_ras_error(err_node, err_data) {
5368 		ref_id = &err_node->err_info.mcm_info;
5369 
5370 		if (mcm_info->socket_id == ref_id->socket_id &&
5371 		    mcm_info->die_id == ref_id->die_id)
5372 			return err_node;
5373 	}
5374 
5375 	return NULL;
5376 }
5377 
5378 static struct ras_err_node *amdgpu_ras_error_node_new(void)
5379 {
5380 	struct ras_err_node *err_node;
5381 
5382 	err_node = kvzalloc(sizeof(*err_node), GFP_KERNEL);
5383 	if (!err_node)
5384 		return NULL;
5385 
5386 	INIT_LIST_HEAD(&err_node->node);
5387 
5388 	return err_node;
5389 }
5390 
5391 static int ras_err_info_cmp(void *priv, const struct list_head *a, const struct list_head *b)
5392 {
5393 	struct ras_err_node *nodea = container_of(a, struct ras_err_node, node);
5394 	struct ras_err_node *nodeb = container_of(b, struct ras_err_node, node);
5395 	struct amdgpu_smuio_mcm_config_info *infoa = &nodea->err_info.mcm_info;
5396 	struct amdgpu_smuio_mcm_config_info *infob = &nodeb->err_info.mcm_info;
5397 
5398 	if (unlikely(infoa->socket_id != infob->socket_id))
5399 		return infoa->socket_id - infob->socket_id;
5400 	else
5401 		return infoa->die_id - infob->die_id;
5402 
5403 	return 0;
5404 }
5405 
5406 static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_data,
5407 				struct amdgpu_smuio_mcm_config_info *mcm_info)
5408 {
5409 	struct ras_err_node *err_node;
5410 
5411 	err_node = amdgpu_ras_error_find_node_by_id(err_data, mcm_info);
5412 	if (err_node)
5413 		return &err_node->err_info;
5414 
5415 	err_node = amdgpu_ras_error_node_new();
5416 	if (!err_node)
5417 		return NULL;
5418 
5419 	memcpy(&err_node->err_info.mcm_info, mcm_info, sizeof(*mcm_info));
5420 
5421 	err_data->err_list_count++;
5422 	list_add_tail(&err_node->node, &err_data->err_node_list);
5423 	list_sort(NULL, &err_data->err_node_list, ras_err_info_cmp);
5424 
5425 	return &err_node->err_info;
5426 }
5427 
5428 int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
5429 					struct amdgpu_smuio_mcm_config_info *mcm_info,
5430 					u64 count)
5431 {
5432 	struct ras_err_info *err_info;
5433 
5434 	if (!err_data || !mcm_info)
5435 		return -EINVAL;
5436 
5437 	if (!count)
5438 		return 0;
5439 
5440 	err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
5441 	if (!err_info)
5442 		return -EINVAL;
5443 
5444 	err_info->ue_count += count;
5445 	err_data->ue_count += count;
5446 
5447 	return 0;
5448 }
5449 
5450 int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data,
5451 					struct amdgpu_smuio_mcm_config_info *mcm_info,
5452 					u64 count)
5453 {
5454 	struct ras_err_info *err_info;
5455 
5456 	if (!err_data || !mcm_info)
5457 		return -EINVAL;
5458 
5459 	if (!count)
5460 		return 0;
5461 
5462 	err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
5463 	if (!err_info)
5464 		return -EINVAL;
5465 
5466 	err_info->ce_count += count;
5467 	err_data->ce_count += count;
5468 
5469 	return 0;
5470 }
5471 
5472 int amdgpu_ras_error_statistic_de_count(struct ras_err_data *err_data,
5473 					struct amdgpu_smuio_mcm_config_info *mcm_info,
5474 					u64 count)
5475 {
5476 	struct ras_err_info *err_info;
5477 
5478 	if (!err_data || !mcm_info)
5479 		return -EINVAL;
5480 
5481 	if (!count)
5482 		return 0;
5483 
5484 	err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
5485 	if (!err_info)
5486 		return -EINVAL;
5487 
5488 	err_info->de_count += count;
5489 	err_data->de_count += count;
5490 
5491 	return 0;
5492 }
5493 
5494 #define mmMP0_SMN_C2PMSG_92	0x1609C
5495 #define mmMP0_SMN_C2PMSG_126	0x160BE
5496 static void amdgpu_ras_boot_time_error_reporting(struct amdgpu_device *adev,
5497 						 u32 instance)
5498 {
5499 	u32 socket_id, aid_id, hbm_id;
5500 	u32 fw_status;
5501 	u32 boot_error;
5502 	u64 reg_addr;
5503 
5504 	/* The pattern for smn addressing in other SOC could be different from
5505 	 * the one for aqua_vanjaram. We should revisit the code if the pattern
5506 	 * is changed. In such case, replace the aqua_vanjaram implementation
5507 	 * with more common helper */
5508 	reg_addr = (mmMP0_SMN_C2PMSG_92 << 2) +
5509 		   aqua_vanjaram_encode_ext_smn_addressing(instance);
5510 	fw_status = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
5511 
5512 	reg_addr = (mmMP0_SMN_C2PMSG_126 << 2) +
5513 		   aqua_vanjaram_encode_ext_smn_addressing(instance);
5514 	boot_error = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
5515 
5516 	socket_id = AMDGPU_RAS_GPU_ERR_SOCKET_ID(boot_error);
5517 	aid_id = AMDGPU_RAS_GPU_ERR_AID_ID(boot_error);
5518 	hbm_id = ((1 == AMDGPU_RAS_GPU_ERR_HBM_ID(boot_error)) ? 0 : 1);
5519 
5520 	if (AMDGPU_RAS_GPU_ERR_MEM_TRAINING(boot_error))
5521 		dev_info(adev->dev,
5522 			 "socket: %d, aid: %d, hbm: %d, fw_status: 0x%x, memory training failed\n",
5523 			 socket_id, aid_id, hbm_id, fw_status);
5524 
5525 	if (AMDGPU_RAS_GPU_ERR_FW_LOAD(boot_error))
5526 		dev_info(adev->dev,
5527 			 "socket: %d, aid: %d, fw_status: 0x%x, firmware load failed at boot time\n",
5528 			 socket_id, aid_id, fw_status);
5529 
5530 	if (AMDGPU_RAS_GPU_ERR_WAFL_LINK_TRAINING(boot_error))
5531 		dev_info(adev->dev,
5532 			 "socket: %d, aid: %d, fw_status: 0x%x, wafl link training failed\n",
5533 			 socket_id, aid_id, fw_status);
5534 
5535 	if (AMDGPU_RAS_GPU_ERR_XGMI_LINK_TRAINING(boot_error))
5536 		dev_info(adev->dev,
5537 			 "socket: %d, aid: %d, fw_status: 0x%x, xgmi link training failed\n",
5538 			 socket_id, aid_id, fw_status);
5539 
5540 	if (AMDGPU_RAS_GPU_ERR_USR_CP_LINK_TRAINING(boot_error))
5541 		dev_info(adev->dev,
5542 			 "socket: %d, aid: %d, fw_status: 0x%x, usr cp link training failed\n",
5543 			 socket_id, aid_id, fw_status);
5544 
5545 	if (AMDGPU_RAS_GPU_ERR_USR_DP_LINK_TRAINING(boot_error))
5546 		dev_info(adev->dev,
5547 			 "socket: %d, aid: %d, fw_status: 0x%x, usr dp link training failed\n",
5548 			 socket_id, aid_id, fw_status);
5549 
5550 	if (AMDGPU_RAS_GPU_ERR_HBM_MEM_TEST(boot_error))
5551 		dev_info(adev->dev,
5552 			 "socket: %d, aid: %d, hbm: %d, fw_status: 0x%x, hbm memory test failed\n",
5553 			 socket_id, aid_id, hbm_id, fw_status);
5554 
5555 	if (AMDGPU_RAS_GPU_ERR_HBM_BIST_TEST(boot_error))
5556 		dev_info(adev->dev,
5557 			 "socket: %d, aid: %d, hbm: %d, fw_status: 0x%x, hbm bist test failed\n",
5558 			 socket_id, aid_id, hbm_id, fw_status);
5559 
5560 	if (AMDGPU_RAS_GPU_ERR_DATA_ABORT(boot_error))
5561 		dev_info(adev->dev,
5562 			 "socket: %d, aid: %d, fw_status: 0x%x, data abort exception\n",
5563 			 socket_id, aid_id, fw_status);
5564 
5565 	if (AMDGPU_RAS_GPU_ERR_GENERIC(boot_error))
5566 		dev_info(adev->dev,
5567 			 "socket: %d, aid: %d, fw_status: 0x%x, Boot Controller Generic Error\n",
5568 			 socket_id, aid_id, fw_status);
5569 }
5570 
5571 static bool amdgpu_ras_boot_error_detected(struct amdgpu_device *adev,
5572 					   u32 instance)
5573 {
5574 	u64 reg_addr;
5575 	u32 reg_data;
5576 	int retry_loop;
5577 
5578 	reg_addr = (mmMP0_SMN_C2PMSG_92 << 2) +
5579 		   aqua_vanjaram_encode_ext_smn_addressing(instance);
5580 
5581 	for (retry_loop = 0; retry_loop < AMDGPU_RAS_BOOT_STATUS_POLLING_LIMIT; retry_loop++) {
5582 		reg_data = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
5583 		if ((reg_data & AMDGPU_RAS_BOOT_STATUS_MASK) == AMDGPU_RAS_BOOT_STEADY_STATUS)
5584 			return false;
5585 		else
5586 			msleep(1);
5587 	}
5588 
5589 	return true;
5590 }
5591 
5592 void amdgpu_ras_query_boot_status(struct amdgpu_device *adev, u32 num_instances)
5593 {
5594 	u32 i;
5595 
5596 	for (i = 0; i < num_instances; i++) {
5597 		if (amdgpu_ras_boot_error_detected(adev, i))
5598 			amdgpu_ras_boot_time_error_reporting(adev, i);
5599 	}
5600 }
5601 
5602 int amdgpu_ras_reserve_page(struct amdgpu_device *adev, uint64_t pfn)
5603 {
5604 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5605 	struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
5606 	uint64_t start = pfn << AMDGPU_GPU_PAGE_SHIFT;
5607 	int ret = 0;
5608 
5609 	if (amdgpu_ras_check_critical_address(adev, start))
5610 		return 0;
5611 
5612 	mutex_lock(&con->page_rsv_lock);
5613 	ret = amdgpu_vram_mgr_query_page_status(mgr, start);
5614 	if (ret == -ENOENT)
5615 		ret = amdgpu_vram_mgr_reserve_range(mgr, start, AMDGPU_GPU_PAGE_SIZE);
5616 	mutex_unlock(&con->page_rsv_lock);
5617 
5618 	return ret;
5619 }
5620 
5621 void amdgpu_ras_event_log_print(struct amdgpu_device *adev, u64 event_id,
5622 				const char *fmt, ...)
5623 {
5624 	struct va_format vaf;
5625 	va_list args;
5626 
5627 	va_start(args, fmt);
5628 	vaf.fmt = fmt;
5629 	vaf.va = &args;
5630 
5631 	if (RAS_EVENT_ID_IS_VALID(event_id))
5632 		dev_printk(KERN_INFO, adev->dev, "{%llu}%pV", event_id, &vaf);
5633 	else
5634 		dev_printk(KERN_INFO, adev->dev, "%pV", &vaf);
5635 
5636 	va_end(args);
5637 }
5638 
5639 bool amdgpu_ras_is_rma(struct amdgpu_device *adev)
5640 {
5641 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5642 
5643 	if (amdgpu_uniras_enabled(adev))
5644 		return amdgpu_ras_mgr_is_rma(adev);
5645 
5646 	if (!con)
5647 		return false;
5648 
5649 	return con->is_rma;
5650 }
5651 
5652 int amdgpu_ras_add_critical_region(struct amdgpu_device *adev,
5653 			struct amdgpu_bo *bo)
5654 {
5655 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5656 	struct amdgpu_vram_mgr_resource *vres;
5657 	struct ras_critical_region *region;
5658 	struct drm_buddy_block *block;
5659 	int ret = 0;
5660 
5661 	if (!bo || !bo->tbo.resource)
5662 		return -EINVAL;
5663 
5664 	vres = to_amdgpu_vram_mgr_resource(bo->tbo.resource);
5665 
5666 	mutex_lock(&con->critical_region_lock);
5667 
5668 	/* Check if the bo had been recorded */
5669 	list_for_each_entry(region, &con->critical_region_head, node)
5670 		if (region->bo == bo)
5671 			goto out;
5672 
5673 	/* Record new critical amdgpu bo */
5674 	list_for_each_entry(block, &vres->blocks, link) {
5675 		region = kzalloc(sizeof(*region), GFP_KERNEL);
5676 		if (!region) {
5677 			ret = -ENOMEM;
5678 			goto out;
5679 		}
5680 		region->bo = bo;
5681 		region->start = amdgpu_vram_mgr_block_start(block);
5682 		region->size = amdgpu_vram_mgr_block_size(block);
5683 		list_add_tail(&region->node, &con->critical_region_head);
5684 	}
5685 
5686 out:
5687 	mutex_unlock(&con->critical_region_lock);
5688 
5689 	return ret;
5690 }
5691 
5692 static void amdgpu_ras_critical_region_init(struct amdgpu_device *adev)
5693 {
5694 	amdgpu_ras_add_critical_region(adev, adev->mman.fw_reserved_memory);
5695 }
5696 
5697 static void amdgpu_ras_critical_region_fini(struct amdgpu_device *adev)
5698 {
5699 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5700 	struct ras_critical_region *region, *tmp;
5701 
5702 	mutex_lock(&con->critical_region_lock);
5703 	list_for_each_entry_safe(region, tmp, &con->critical_region_head, node) {
5704 		list_del(&region->node);
5705 		kfree(region);
5706 	}
5707 	mutex_unlock(&con->critical_region_lock);
5708 }
5709 
5710 bool amdgpu_ras_check_critical_address(struct amdgpu_device *adev, uint64_t addr)
5711 {
5712 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5713 	struct ras_critical_region *region;
5714 	bool ret = false;
5715 
5716 	mutex_lock(&con->critical_region_lock);
5717 	list_for_each_entry(region, &con->critical_region_head, node) {
5718 		if ((region->start <= addr) &&
5719 		    (addr < (region->start + region->size))) {
5720 			ret = true;
5721 			break;
5722 		}
5723 	}
5724 	mutex_unlock(&con->critical_region_lock);
5725 
5726 	return ret;
5727 }
5728 
5729 void amdgpu_ras_pre_reset(struct amdgpu_device *adev,
5730 					  struct list_head *device_list)
5731 {
5732 	struct amdgpu_device *tmp_adev = NULL;
5733 
5734 	list_for_each_entry(tmp_adev, device_list, reset_list) {
5735 		if (amdgpu_uniras_enabled(tmp_adev))
5736 			amdgpu_ras_mgr_pre_reset(tmp_adev);
5737 	}
5738 }
5739 
5740 void amdgpu_ras_post_reset(struct amdgpu_device *adev,
5741 					  struct list_head *device_list)
5742 {
5743 	struct amdgpu_device *tmp_adev = NULL;
5744 
5745 	list_for_each_entry(tmp_adev, device_list, reset_list) {
5746 		if (amdgpu_uniras_enabled(tmp_adev))
5747 			amdgpu_ras_mgr_post_reset(tmp_adev);
5748 	}
5749 }
5750