xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c (revision 69f22c5b454f7a3d77f323ed96b4ad6ac7bbe378)
1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  *
23  */
24 #include <linux/debugfs.h>
25 #include <linux/list.h>
26 #include <linux/module.h>
27 #include <linux/uaccess.h>
28 #include <linux/reboot.h>
29 #include <linux/syscalls.h>
30 #include <linux/pm_runtime.h>
31 #include <linux/list_sort.h>
32 
33 #include "amdgpu.h"
34 #include "amdgpu_ras.h"
35 #include "amdgpu_atomfirmware.h"
36 #include "amdgpu_xgmi.h"
37 #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
38 #include "nbio_v4_3.h"
39 #include "nbio_v7_9.h"
40 #include "atom.h"
41 #include "amdgpu_reset.h"
42 #include "amdgpu_psp.h"
43 
44 #ifdef CONFIG_X86_MCE_AMD
45 #include <asm/mce.h>
46 
47 static bool notifier_registered;
48 #endif
49 static const char *RAS_FS_NAME = "ras";
50 
51 const char *ras_error_string[] = {
52 	"none",
53 	"parity",
54 	"single_correctable",
55 	"multi_uncorrectable",
56 	"poison",
57 };
58 
59 const char *ras_block_string[] = {
60 	"umc",
61 	"sdma",
62 	"gfx",
63 	"mmhub",
64 	"athub",
65 	"pcie_bif",
66 	"hdp",
67 	"xgmi_wafl",
68 	"df",
69 	"smn",
70 	"sem",
71 	"mp0",
72 	"mp1",
73 	"fuse",
74 	"mca",
75 	"vcn",
76 	"jpeg",
77 	"ih",
78 	"mpio",
79 };
80 
81 const char *ras_mca_block_string[] = {
82 	"mca_mp0",
83 	"mca_mp1",
84 	"mca_mpio",
85 	"mca_iohc",
86 };
87 
88 struct amdgpu_ras_block_list {
89 	/* ras block link */
90 	struct list_head node;
91 
92 	struct amdgpu_ras_block_object *ras_obj;
93 };
94 
95 const char *get_ras_block_str(struct ras_common_if *ras_block)
96 {
97 	if (!ras_block)
98 		return "NULL";
99 
100 	if (ras_block->block >= AMDGPU_RAS_BLOCK_COUNT ||
101 	    ras_block->block >= ARRAY_SIZE(ras_block_string))
102 		return "OUT OF RANGE";
103 
104 	if (ras_block->block == AMDGPU_RAS_BLOCK__MCA)
105 		return ras_mca_block_string[ras_block->sub_block_index];
106 
107 	return ras_block_string[ras_block->block];
108 }
109 
110 #define ras_block_str(_BLOCK_) \
111 	(((_BLOCK_) < ARRAY_SIZE(ras_block_string)) ? ras_block_string[_BLOCK_] : "Out Of Range")
112 
113 #define ras_err_str(i) (ras_error_string[ffs(i)])
114 
115 #define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
116 
117 /* inject address is 52 bits */
118 #define	RAS_UMC_INJECT_ADDR_LIMIT	(0x1ULL << 52)
119 
120 /* typical ECC bad page rate is 1 bad page per 100MB VRAM */
121 #define RAS_BAD_PAGE_COVER              (100 * 1024 * 1024ULL)
122 
123 #define MAX_UMC_POISON_POLLING_TIME_ASYNC  300  //ms
124 
125 #define AMDGPU_RAS_RETIRE_PAGE_INTERVAL 100  //ms
126 
127 #define MAX_FLUSH_RETIRE_DWORK_TIMES  100
128 
129 enum amdgpu_ras_retire_page_reservation {
130 	AMDGPU_RAS_RETIRE_PAGE_RESERVED,
131 	AMDGPU_RAS_RETIRE_PAGE_PENDING,
132 	AMDGPU_RAS_RETIRE_PAGE_FAULT,
133 };
134 
135 atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0);
136 
137 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
138 				uint64_t addr);
139 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
140 				uint64_t addr);
141 #ifdef CONFIG_X86_MCE_AMD
142 static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev);
143 struct mce_notifier_adev_list {
144 	struct amdgpu_device *devs[MAX_GPU_INSTANCE];
145 	int num_gpu;
146 };
147 static struct mce_notifier_adev_list mce_adev_list;
148 #endif
149 
150 void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready)
151 {
152 	if (adev && amdgpu_ras_get_context(adev))
153 		amdgpu_ras_get_context(adev)->error_query_ready = ready;
154 }
155 
156 static bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev)
157 {
158 	if (adev && amdgpu_ras_get_context(adev))
159 		return amdgpu_ras_get_context(adev)->error_query_ready;
160 
161 	return false;
162 }
163 
164 static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t address)
165 {
166 	struct ras_err_data err_data;
167 	struct eeprom_table_record err_rec;
168 	int ret;
169 
170 	if ((address >= adev->gmc.mc_vram_size) ||
171 	    (address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
172 		dev_warn(adev->dev,
173 		         "RAS WARN: input address 0x%llx is invalid.\n",
174 		         address);
175 		return -EINVAL;
176 	}
177 
178 	if (amdgpu_ras_check_bad_page(adev, address)) {
179 		dev_warn(adev->dev,
180 			 "RAS WARN: 0x%llx has already been marked as bad page!\n",
181 			 address);
182 		return 0;
183 	}
184 
185 	ret = amdgpu_ras_error_data_init(&err_data);
186 	if (ret)
187 		return ret;
188 
189 	memset(&err_rec, 0x0, sizeof(struct eeprom_table_record));
190 	err_data.err_addr = &err_rec;
191 	amdgpu_umc_fill_error_record(&err_data, address, address, 0, 0);
192 
193 	if (amdgpu_bad_page_threshold != 0) {
194 		amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
195 					 err_data.err_addr_cnt);
196 		amdgpu_ras_save_bad_pages(adev, NULL);
197 	}
198 
199 	amdgpu_ras_error_data_fini(&err_data);
200 
201 	dev_warn(adev->dev, "WARNING: THIS IS ONLY FOR TEST PURPOSES AND WILL CORRUPT RAS EEPROM\n");
202 	dev_warn(adev->dev, "Clear EEPROM:\n");
203 	dev_warn(adev->dev, "    echo 1 > /sys/kernel/debug/dri/0/ras/ras_eeprom_reset\n");
204 
205 	return 0;
206 }
207 
208 static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
209 					size_t size, loff_t *pos)
210 {
211 	struct ras_manager *obj = (struct ras_manager *)file_inode(f)->i_private;
212 	struct ras_query_if info = {
213 		.head = obj->head,
214 	};
215 	ssize_t s;
216 	char val[128];
217 
218 	if (amdgpu_ras_query_error_status(obj->adev, &info))
219 		return -EINVAL;
220 
221 	/* Hardware counter will be reset automatically after the query on Vega20 and Arcturus */
222 	if (amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
223 	    amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {
224 		if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
225 			dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
226 	}
227 
228 	s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n",
229 			"ue", info.ue_count,
230 			"ce", info.ce_count);
231 	if (*pos >= s)
232 		return 0;
233 
234 	s -= *pos;
235 	s = min_t(u64, s, size);
236 
237 
238 	if (copy_to_user(buf, &val[*pos], s))
239 		return -EINVAL;
240 
241 	*pos += s;
242 
243 	return s;
244 }
245 
246 static const struct file_operations amdgpu_ras_debugfs_ops = {
247 	.owner = THIS_MODULE,
248 	.read = amdgpu_ras_debugfs_read,
249 	.write = NULL,
250 	.llseek = default_llseek
251 };
252 
253 static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id)
254 {
255 	int i;
256 
257 	for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) {
258 		*block_id = i;
259 		if (strcmp(name, ras_block_string[i]) == 0)
260 			return 0;
261 	}
262 	return -EINVAL;
263 }
264 
265 static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
266 		const char __user *buf, size_t size,
267 		loff_t *pos, struct ras_debug_if *data)
268 {
269 	ssize_t s = min_t(u64, 64, size);
270 	char str[65];
271 	char block_name[33];
272 	char err[9] = "ue";
273 	int op = -1;
274 	int block_id;
275 	uint32_t sub_block;
276 	u64 address, value;
277 	/* default value is 0 if the mask is not set by user */
278 	u32 instance_mask = 0;
279 
280 	if (*pos)
281 		return -EINVAL;
282 	*pos = size;
283 
284 	memset(str, 0, sizeof(str));
285 	memset(data, 0, sizeof(*data));
286 
287 	if (copy_from_user(str, buf, s))
288 		return -EINVAL;
289 
290 	if (sscanf(str, "disable %32s", block_name) == 1)
291 		op = 0;
292 	else if (sscanf(str, "enable %32s %8s", block_name, err) == 2)
293 		op = 1;
294 	else if (sscanf(str, "inject %32s %8s", block_name, err) == 2)
295 		op = 2;
296 	else if (strstr(str, "retire_page") != NULL)
297 		op = 3;
298 	else if (str[0] && str[1] && str[2] && str[3])
299 		/* ascii string, but commands are not matched. */
300 		return -EINVAL;
301 
302 	if (op != -1) {
303 		if (op == 3) {
304 			if (sscanf(str, "%*s 0x%llx", &address) != 1 &&
305 			    sscanf(str, "%*s %llu", &address) != 1)
306 				return -EINVAL;
307 
308 			data->op = op;
309 			data->inject.address = address;
310 
311 			return 0;
312 		}
313 
314 		if (amdgpu_ras_find_block_id_by_name(block_name, &block_id))
315 			return -EINVAL;
316 
317 		data->head.block = block_id;
318 		/* only ue, ce and poison errors are supported */
319 		if (!memcmp("ue", err, 2))
320 			data->head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
321 		else if (!memcmp("ce", err, 2))
322 			data->head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE;
323 		else if (!memcmp("poison", err, 6))
324 			data->head.type = AMDGPU_RAS_ERROR__POISON;
325 		else
326 			return -EINVAL;
327 
328 		data->op = op;
329 
330 		if (op == 2) {
331 			if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx 0x%x",
332 				   &sub_block, &address, &value, &instance_mask) != 4 &&
333 			    sscanf(str, "%*s %*s %*s %u %llu %llu %u",
334 				   &sub_block, &address, &value, &instance_mask) != 4 &&
335 				sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx",
336 				   &sub_block, &address, &value) != 3 &&
337 			    sscanf(str, "%*s %*s %*s %u %llu %llu",
338 				   &sub_block, &address, &value) != 3)
339 				return -EINVAL;
340 			data->head.sub_block_index = sub_block;
341 			data->inject.address = address;
342 			data->inject.value = value;
343 			data->inject.instance_mask = instance_mask;
344 		}
345 	} else {
346 		if (size < sizeof(*data))
347 			return -EINVAL;
348 
349 		if (copy_from_user(data, buf, sizeof(*data)))
350 			return -EINVAL;
351 	}
352 
353 	return 0;
354 }
355 
356 static void amdgpu_ras_instance_mask_check(struct amdgpu_device *adev,
357 				struct ras_debug_if *data)
358 {
359 	int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1;
360 	uint32_t mask, inst_mask = data->inject.instance_mask;
361 
362 	/* no need to set instance mask if there is only one instance */
363 	if (num_xcc <= 1 && inst_mask) {
364 		data->inject.instance_mask = 0;
365 		dev_dbg(adev->dev,
366 			"RAS inject mask(0x%x) isn't supported and force it to 0.\n",
367 			inst_mask);
368 
369 		return;
370 	}
371 
372 	switch (data->head.block) {
373 	case AMDGPU_RAS_BLOCK__GFX:
374 		mask = GENMASK(num_xcc - 1, 0);
375 		break;
376 	case AMDGPU_RAS_BLOCK__SDMA:
377 		mask = GENMASK(adev->sdma.num_instances - 1, 0);
378 		break;
379 	case AMDGPU_RAS_BLOCK__VCN:
380 	case AMDGPU_RAS_BLOCK__JPEG:
381 		mask = GENMASK(adev->vcn.num_vcn_inst - 1, 0);
382 		break;
383 	default:
384 		mask = inst_mask;
385 		break;
386 	}
387 
388 	/* remove invalid bits in instance mask */
389 	data->inject.instance_mask &= mask;
390 	if (inst_mask != data->inject.instance_mask)
391 		dev_dbg(adev->dev,
392 			"Adjust RAS inject mask 0x%x to 0x%x\n",
393 			inst_mask, data->inject.instance_mask);
394 }
395 
396 /**
397  * DOC: AMDGPU RAS debugfs control interface
398  *
399  * The control interface accepts struct ras_debug_if which has two members.
400  *
401  * First member: ras_debug_if::head or ras_debug_if::inject.
402  *
403  * head is used to indicate which IP block will be under control.
404  *
405  * head has four members, they are block, type, sub_block_index, name.
406  * block: which IP will be under control.
407  * type: what kind of error will be enabled/disabled/injected.
408  * sub_block_index: some IPs have subcomponets. say, GFX, sDMA.
409  * name: the name of IP.
410  *
411  * inject has three more members than head, they are address, value and mask.
412  * As their names indicate, inject operation will write the
413  * value to the address.
414  *
415  * The second member: struct ras_debug_if::op.
416  * It has three kinds of operations.
417  *
418  * - 0: disable RAS on the block. Take ::head as its data.
419  * - 1: enable RAS on the block. Take ::head as its data.
420  * - 2: inject errors on the block. Take ::inject as its data.
421  *
422  * How to use the interface?
423  *
424  * In a program
425  *
426  * Copy the struct ras_debug_if in your code and initialize it.
427  * Write the struct to the control interface.
428  *
429  * From shell
430  *
431  * .. code-block:: bash
432  *
433  *	echo "disable <block>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
434  *	echo "enable  <block> <error>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
435  *	echo "inject  <block> <error> <sub-block> <address> <value> <mask>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
436  *
437  * Where N, is the card which you want to affect.
438  *
439  * "disable" requires only the block.
440  * "enable" requires the block and error type.
441  * "inject" requires the block, error type, address, and value.
442  *
443  * The block is one of: umc, sdma, gfx, etc.
444  *	see ras_block_string[] for details
445  *
446  * The error type is one of: ue, ce and poison where,
447  *	ue is multi-uncorrectable
448  *	ce is single-correctable
449  *	poison is poison
450  *
451  * The sub-block is a the sub-block index, pass 0 if there is no sub-block.
452  * The address and value are hexadecimal numbers, leading 0x is optional.
453  * The mask means instance mask, is optional, default value is 0x1.
454  *
455  * For instance,
456  *
457  * .. code-block:: bash
458  *
459  *	echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
460  *	echo inject umc ce 0 0 0 3 > /sys/kernel/debug/dri/0/ras/ras_ctrl
461  *	echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl
462  *
463  * How to check the result of the operation?
464  *
465  * To check disable/enable, see "ras" features at,
466  * /sys/class/drm/card[0/1/2...]/device/ras/features
467  *
468  * To check inject, see the corresponding error count at,
469  * /sys/class/drm/card[0/1/2...]/device/ras/[gfx|sdma|umc|...]_err_count
470  *
471  * .. note::
472  *	Operations are only allowed on blocks which are supported.
473  *	Check the "ras" mask at /sys/module/amdgpu/parameters/ras_mask
474  *	to see which blocks support RAS on a particular asic.
475  *
476  */
477 static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f,
478 					     const char __user *buf,
479 					     size_t size, loff_t *pos)
480 {
481 	struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
482 	struct ras_debug_if data;
483 	int ret = 0;
484 
485 	if (!amdgpu_ras_get_error_query_ready(adev)) {
486 		dev_warn(adev->dev, "RAS WARN: error injection "
487 				"currently inaccessible\n");
488 		return size;
489 	}
490 
491 	ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data);
492 	if (ret)
493 		return ret;
494 
495 	if (data.op == 3) {
496 		ret = amdgpu_reserve_page_direct(adev, data.inject.address);
497 		if (!ret)
498 			return size;
499 		else
500 			return ret;
501 	}
502 
503 	if (!amdgpu_ras_is_supported(adev, data.head.block))
504 		return -EINVAL;
505 
506 	switch (data.op) {
507 	case 0:
508 		ret = amdgpu_ras_feature_enable(adev, &data.head, 0);
509 		break;
510 	case 1:
511 		ret = amdgpu_ras_feature_enable(adev, &data.head, 1);
512 		break;
513 	case 2:
514 		if ((data.inject.address >= adev->gmc.mc_vram_size &&
515 		    adev->gmc.mc_vram_size) ||
516 		    (data.inject.address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
517 			dev_warn(adev->dev, "RAS WARN: input address "
518 					"0x%llx is invalid.",
519 					data.inject.address);
520 			ret = -EINVAL;
521 			break;
522 		}
523 
524 		/* umc ce/ue error injection for a bad page is not allowed */
525 		if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) &&
526 		    amdgpu_ras_check_bad_page(adev, data.inject.address)) {
527 			dev_warn(adev->dev, "RAS WARN: inject: 0x%llx has "
528 				 "already been marked as bad!\n",
529 				 data.inject.address);
530 			break;
531 		}
532 
533 		amdgpu_ras_instance_mask_check(adev, &data);
534 
535 		/* data.inject.address is offset instead of absolute gpu address */
536 		ret = amdgpu_ras_error_inject(adev, &data.inject);
537 		break;
538 	default:
539 		ret = -EINVAL;
540 		break;
541 	}
542 
543 	if (ret)
544 		return ret;
545 
546 	return size;
547 }
548 
549 /**
550  * DOC: AMDGPU RAS debugfs EEPROM table reset interface
551  *
552  * Some boards contain an EEPROM which is used to persistently store a list of
553  * bad pages which experiences ECC errors in vram.  This interface provides
554  * a way to reset the EEPROM, e.g., after testing error injection.
555  *
556  * Usage:
557  *
558  * .. code-block:: bash
559  *
560  *	echo 1 > ../ras/ras_eeprom_reset
561  *
562  * will reset EEPROM table to 0 entries.
563  *
564  */
565 static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f,
566 					       const char __user *buf,
567 					       size_t size, loff_t *pos)
568 {
569 	struct amdgpu_device *adev =
570 		(struct amdgpu_device *)file_inode(f)->i_private;
571 	int ret;
572 
573 	ret = amdgpu_ras_eeprom_reset_table(
574 		&(amdgpu_ras_get_context(adev)->eeprom_control));
575 
576 	if (!ret) {
577 		/* Something was written to EEPROM.
578 		 */
579 		amdgpu_ras_get_context(adev)->flags = RAS_DEFAULT_FLAGS;
580 		return size;
581 	} else {
582 		return ret;
583 	}
584 }
585 
586 static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
587 	.owner = THIS_MODULE,
588 	.read = NULL,
589 	.write = amdgpu_ras_debugfs_ctrl_write,
590 	.llseek = default_llseek
591 };
592 
593 static const struct file_operations amdgpu_ras_debugfs_eeprom_ops = {
594 	.owner = THIS_MODULE,
595 	.read = NULL,
596 	.write = amdgpu_ras_debugfs_eeprom_write,
597 	.llseek = default_llseek
598 };
599 
600 /**
601  * DOC: AMDGPU RAS sysfs Error Count Interface
602  *
603  * It allows the user to read the error count for each IP block on the gpu through
604  * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
605  *
606  * It outputs the multiple lines which report the uncorrected (ue) and corrected
607  * (ce) error counts.
608  *
609  * The format of one line is below,
610  *
611  * [ce|ue]: count
612  *
613  * Example:
614  *
615  * .. code-block:: bash
616  *
617  *	ue: 0
618  *	ce: 1
619  *
620  */
621 static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
622 		struct device_attribute *attr, char *buf)
623 {
624 	struct ras_manager *obj = container_of(attr, struct ras_manager, sysfs_attr);
625 	struct ras_query_if info = {
626 		.head = obj->head,
627 	};
628 
629 	if (!amdgpu_ras_get_error_query_ready(obj->adev))
630 		return sysfs_emit(buf, "Query currently inaccessible\n");
631 
632 	if (amdgpu_ras_query_error_status(obj->adev, &info))
633 		return -EINVAL;
634 
635 	if (amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
636 	    amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {
637 		if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
638 			dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
639 	}
640 
641 	if (info.head.block == AMDGPU_RAS_BLOCK__UMC)
642 		return sysfs_emit(buf, "%s: %lu\n%s: %lu\n%s: %lu\n", "ue", info.ue_count,
643 				"ce", info.ce_count, "de", info.de_count);
644 	else
645 		return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count,
646 				"ce", info.ce_count);
647 }
648 
649 /* obj begin */
650 
651 #define get_obj(obj) do { (obj)->use++; } while (0)
652 #define alive_obj(obj) ((obj)->use)
653 
654 static inline void put_obj(struct ras_manager *obj)
655 {
656 	if (obj && (--obj->use == 0)) {
657 		list_del(&obj->node);
658 		amdgpu_ras_error_data_fini(&obj->err_data);
659 	}
660 
661 	if (obj && (obj->use < 0))
662 		DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", get_ras_block_str(&obj->head));
663 }
664 
665 /* make one obj and return it. */
666 static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev,
667 		struct ras_common_if *head)
668 {
669 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
670 	struct ras_manager *obj;
671 
672 	if (!adev->ras_enabled || !con)
673 		return NULL;
674 
675 	if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
676 		return NULL;
677 
678 	if (head->block == AMDGPU_RAS_BLOCK__MCA) {
679 		if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
680 			return NULL;
681 
682 		obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
683 	} else
684 		obj = &con->objs[head->block];
685 
686 	/* already exist. return obj? */
687 	if (alive_obj(obj))
688 		return NULL;
689 
690 	if (amdgpu_ras_error_data_init(&obj->err_data))
691 		return NULL;
692 
693 	obj->head = *head;
694 	obj->adev = adev;
695 	list_add(&obj->node, &con->head);
696 	get_obj(obj);
697 
698 	return obj;
699 }
700 
701 /* return an obj equal to head, or the first when head is NULL */
702 struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
703 		struct ras_common_if *head)
704 {
705 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
706 	struct ras_manager *obj;
707 	int i;
708 
709 	if (!adev->ras_enabled || !con)
710 		return NULL;
711 
712 	if (head) {
713 		if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
714 			return NULL;
715 
716 		if (head->block == AMDGPU_RAS_BLOCK__MCA) {
717 			if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
718 				return NULL;
719 
720 			obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
721 		} else
722 			obj = &con->objs[head->block];
723 
724 		if (alive_obj(obj))
725 			return obj;
726 	} else {
727 		for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT + AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
728 			obj = &con->objs[i];
729 			if (alive_obj(obj))
730 				return obj;
731 		}
732 	}
733 
734 	return NULL;
735 }
736 /* obj end */
737 
738 /* feature ctl begin */
739 static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev,
740 					 struct ras_common_if *head)
741 {
742 	return adev->ras_hw_enabled & BIT(head->block);
743 }
744 
745 static int amdgpu_ras_is_feature_enabled(struct amdgpu_device *adev,
746 		struct ras_common_if *head)
747 {
748 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
749 
750 	return con->features & BIT(head->block);
751 }
752 
753 /*
754  * if obj is not created, then create one.
755  * set feature enable flag.
756  */
757 static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev,
758 		struct ras_common_if *head, int enable)
759 {
760 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
761 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
762 
763 	/* If hardware does not support ras, then do not create obj.
764 	 * But if hardware support ras, we can create the obj.
765 	 * Ras framework checks con->hw_supported to see if it need do
766 	 * corresponding initialization.
767 	 * IP checks con->support to see if it need disable ras.
768 	 */
769 	if (!amdgpu_ras_is_feature_allowed(adev, head))
770 		return 0;
771 
772 	if (enable) {
773 		if (!obj) {
774 			obj = amdgpu_ras_create_obj(adev, head);
775 			if (!obj)
776 				return -EINVAL;
777 		} else {
778 			/* In case we create obj somewhere else */
779 			get_obj(obj);
780 		}
781 		con->features |= BIT(head->block);
782 	} else {
783 		if (obj && amdgpu_ras_is_feature_enabled(adev, head)) {
784 			con->features &= ~BIT(head->block);
785 			put_obj(obj);
786 		}
787 	}
788 
789 	return 0;
790 }
791 
792 /* wrapper of psp_ras_enable_features */
793 int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
794 		struct ras_common_if *head, bool enable)
795 {
796 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
797 	union ta_ras_cmd_input *info;
798 	int ret;
799 
800 	if (!con)
801 		return -EINVAL;
802 
803 	/* For non-gfx ip, do not enable ras feature if it is not allowed */
804 	/* For gfx ip, regardless of feature support status, */
805 	/* Force issue enable or disable ras feature commands */
806 	if (head->block != AMDGPU_RAS_BLOCK__GFX &&
807 	    !amdgpu_ras_is_feature_allowed(adev, head))
808 		return 0;
809 
810 	/* Only enable gfx ras feature from host side */
811 	if (head->block == AMDGPU_RAS_BLOCK__GFX &&
812 	    !amdgpu_sriov_vf(adev) &&
813 	    !amdgpu_ras_intr_triggered()) {
814 		info = kzalloc(sizeof(union ta_ras_cmd_input), GFP_KERNEL);
815 		if (!info)
816 			return -ENOMEM;
817 
818 		if (!enable) {
819 			info->disable_features = (struct ta_ras_disable_features_input) {
820 				.block_id =  amdgpu_ras_block_to_ta(head->block),
821 				.error_type = amdgpu_ras_error_to_ta(head->type),
822 			};
823 		} else {
824 			info->enable_features = (struct ta_ras_enable_features_input) {
825 				.block_id =  amdgpu_ras_block_to_ta(head->block),
826 				.error_type = amdgpu_ras_error_to_ta(head->type),
827 			};
828 		}
829 
830 		ret = psp_ras_enable_features(&adev->psp, info, enable);
831 		if (ret) {
832 			dev_err(adev->dev, "ras %s %s failed poison:%d ret:%d\n",
833 				enable ? "enable":"disable",
834 				get_ras_block_str(head),
835 				amdgpu_ras_is_poison_mode_supported(adev), ret);
836 			kfree(info);
837 			return ret;
838 		}
839 
840 		kfree(info);
841 	}
842 
843 	/* setup the obj */
844 	__amdgpu_ras_feature_enable(adev, head, enable);
845 
846 	return 0;
847 }
848 
849 /* Only used in device probe stage and called only once. */
850 int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
851 		struct ras_common_if *head, bool enable)
852 {
853 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
854 	int ret;
855 
856 	if (!con)
857 		return -EINVAL;
858 
859 	if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
860 		if (enable) {
861 			/* There is no harm to issue a ras TA cmd regardless of
862 			 * the currecnt ras state.
863 			 * If current state == target state, it will do nothing
864 			 * But sometimes it requests driver to reset and repost
865 			 * with error code -EAGAIN.
866 			 */
867 			ret = amdgpu_ras_feature_enable(adev, head, 1);
868 			/* With old ras TA, we might fail to enable ras.
869 			 * Log it and just setup the object.
870 			 * TODO need remove this WA in the future.
871 			 */
872 			if (ret == -EINVAL) {
873 				ret = __amdgpu_ras_feature_enable(adev, head, 1);
874 				if (!ret)
875 					dev_info(adev->dev,
876 						"RAS INFO: %s setup object\n",
877 						get_ras_block_str(head));
878 			}
879 		} else {
880 			/* setup the object then issue a ras TA disable cmd.*/
881 			ret = __amdgpu_ras_feature_enable(adev, head, 1);
882 			if (ret)
883 				return ret;
884 
885 			/* gfx block ras disable cmd must send to ras-ta */
886 			if (head->block == AMDGPU_RAS_BLOCK__GFX)
887 				con->features |= BIT(head->block);
888 
889 			ret = amdgpu_ras_feature_enable(adev, head, 0);
890 
891 			/* clean gfx block ras features flag */
892 			if (adev->ras_enabled && head->block == AMDGPU_RAS_BLOCK__GFX)
893 				con->features &= ~BIT(head->block);
894 		}
895 	} else
896 		ret = amdgpu_ras_feature_enable(adev, head, enable);
897 
898 	return ret;
899 }
900 
901 static int amdgpu_ras_disable_all_features(struct amdgpu_device *adev,
902 		bool bypass)
903 {
904 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
905 	struct ras_manager *obj, *tmp;
906 
907 	list_for_each_entry_safe(obj, tmp, &con->head, node) {
908 		/* bypass psp.
909 		 * aka just release the obj and corresponding flags
910 		 */
911 		if (bypass) {
912 			if (__amdgpu_ras_feature_enable(adev, &obj->head, 0))
913 				break;
914 		} else {
915 			if (amdgpu_ras_feature_enable(adev, &obj->head, 0))
916 				break;
917 		}
918 	}
919 
920 	return con->features;
921 }
922 
923 static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
924 		bool bypass)
925 {
926 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
927 	int i;
928 	const enum amdgpu_ras_error_type default_ras_type = AMDGPU_RAS_ERROR__NONE;
929 
930 	for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) {
931 		struct ras_common_if head = {
932 			.block = i,
933 			.type = default_ras_type,
934 			.sub_block_index = 0,
935 		};
936 
937 		if (i == AMDGPU_RAS_BLOCK__MCA)
938 			continue;
939 
940 		if (bypass) {
941 			/*
942 			 * bypass psp. vbios enable ras for us.
943 			 * so just create the obj
944 			 */
945 			if (__amdgpu_ras_feature_enable(adev, &head, 1))
946 				break;
947 		} else {
948 			if (amdgpu_ras_feature_enable(adev, &head, 1))
949 				break;
950 		}
951 	}
952 
953 	for (i = 0; i < AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
954 		struct ras_common_if head = {
955 			.block = AMDGPU_RAS_BLOCK__MCA,
956 			.type = default_ras_type,
957 			.sub_block_index = i,
958 		};
959 
960 		if (bypass) {
961 			/*
962 			 * bypass psp. vbios enable ras for us.
963 			 * so just create the obj
964 			 */
965 			if (__amdgpu_ras_feature_enable(adev, &head, 1))
966 				break;
967 		} else {
968 			if (amdgpu_ras_feature_enable(adev, &head, 1))
969 				break;
970 		}
971 	}
972 
973 	return con->features;
974 }
975 /* feature ctl end */
976 
977 static int amdgpu_ras_block_match_default(struct amdgpu_ras_block_object *block_obj,
978 		enum amdgpu_ras_block block)
979 {
980 	if (!block_obj)
981 		return -EINVAL;
982 
983 	if (block_obj->ras_comm.block == block)
984 		return 0;
985 
986 	return -EINVAL;
987 }
988 
989 static struct amdgpu_ras_block_object *amdgpu_ras_get_ras_block(struct amdgpu_device *adev,
990 					enum amdgpu_ras_block block, uint32_t sub_block_index)
991 {
992 	struct amdgpu_ras_block_list *node, *tmp;
993 	struct amdgpu_ras_block_object *obj;
994 
995 	if (block >= AMDGPU_RAS_BLOCK__LAST)
996 		return NULL;
997 
998 	list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
999 		if (!node->ras_obj) {
1000 			dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
1001 			continue;
1002 		}
1003 
1004 		obj = node->ras_obj;
1005 		if (obj->ras_block_match) {
1006 			if (obj->ras_block_match(obj, block, sub_block_index) == 0)
1007 				return obj;
1008 		} else {
1009 			if (amdgpu_ras_block_match_default(obj, block) == 0)
1010 				return obj;
1011 		}
1012 	}
1013 
1014 	return NULL;
1015 }
1016 
1017 static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_data *err_data)
1018 {
1019 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
1020 	int ret = 0;
1021 
1022 	/*
1023 	 * choosing right query method according to
1024 	 * whether smu support query error information
1025 	 */
1026 	ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(ras->umc_ecc));
1027 	if (ret == -EOPNOTSUPP) {
1028 		if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
1029 			adev->umc.ras->ras_block.hw_ops->query_ras_error_count)
1030 			adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data);
1031 
1032 		/* umc query_ras_error_address is also responsible for clearing
1033 		 * error status
1034 		 */
1035 		if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
1036 		    adev->umc.ras->ras_block.hw_ops->query_ras_error_address)
1037 			adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, err_data);
1038 	} else if (!ret) {
1039 		if (adev->umc.ras &&
1040 			adev->umc.ras->ecc_info_query_ras_error_count)
1041 			adev->umc.ras->ecc_info_query_ras_error_count(adev, err_data);
1042 
1043 		if (adev->umc.ras &&
1044 			adev->umc.ras->ecc_info_query_ras_error_address)
1045 			adev->umc.ras->ecc_info_query_ras_error_address(adev, err_data);
1046 	}
1047 }
1048 
1049 static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev,
1050 					      struct ras_manager *ras_mgr,
1051 					      struct ras_err_data *err_data,
1052 					      struct ras_query_context *qctx,
1053 					      const char *blk_name,
1054 					      bool is_ue,
1055 					      bool is_de)
1056 {
1057 	struct amdgpu_smuio_mcm_config_info *mcm_info;
1058 	struct ras_err_node *err_node;
1059 	struct ras_err_info *err_info;
1060 	u64 event_id = qctx->evid.event_id;
1061 
1062 	if (is_ue) {
1063 		for_each_ras_error(err_node, err_data) {
1064 			err_info = &err_node->err_info;
1065 			mcm_info = &err_info->mcm_info;
1066 			if (err_info->ue_count) {
1067 				RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1068 					      "%lld new uncorrectable hardware errors detected in %s block\n",
1069 					      mcm_info->socket_id,
1070 					      mcm_info->die_id,
1071 					      err_info->ue_count,
1072 					      blk_name);
1073 			}
1074 		}
1075 
1076 		for_each_ras_error(err_node, &ras_mgr->err_data) {
1077 			err_info = &err_node->err_info;
1078 			mcm_info = &err_info->mcm_info;
1079 			RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1080 				      "%lld uncorrectable hardware errors detected in total in %s block\n",
1081 				      mcm_info->socket_id, mcm_info->die_id, err_info->ue_count, blk_name);
1082 		}
1083 
1084 	} else {
1085 		if (is_de) {
1086 			for_each_ras_error(err_node, err_data) {
1087 				err_info = &err_node->err_info;
1088 				mcm_info = &err_info->mcm_info;
1089 				if (err_info->de_count) {
1090 					RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1091 						      "%lld new deferred hardware errors detected in %s block\n",
1092 						      mcm_info->socket_id,
1093 						      mcm_info->die_id,
1094 						      err_info->de_count,
1095 						      blk_name);
1096 				}
1097 			}
1098 
1099 			for_each_ras_error(err_node, &ras_mgr->err_data) {
1100 				err_info = &err_node->err_info;
1101 				mcm_info = &err_info->mcm_info;
1102 				RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1103 					      "%lld deferred hardware errors detected in total in %s block\n",
1104 					      mcm_info->socket_id, mcm_info->die_id,
1105 					      err_info->de_count, blk_name);
1106 			}
1107 		} else {
1108 			for_each_ras_error(err_node, err_data) {
1109 				err_info = &err_node->err_info;
1110 				mcm_info = &err_info->mcm_info;
1111 				if (err_info->ce_count) {
1112 					RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1113 						      "%lld new correctable hardware errors detected in %s block\n",
1114 						      mcm_info->socket_id,
1115 						      mcm_info->die_id,
1116 						      err_info->ce_count,
1117 						      blk_name);
1118 				}
1119 			}
1120 
1121 			for_each_ras_error(err_node, &ras_mgr->err_data) {
1122 				err_info = &err_node->err_info;
1123 				mcm_info = &err_info->mcm_info;
1124 				RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1125 					      "%lld correctable hardware errors detected in total in %s block\n",
1126 					      mcm_info->socket_id, mcm_info->die_id,
1127 					      err_info->ce_count, blk_name);
1128 			}
1129 		}
1130 	}
1131 }
1132 
1133 static inline bool err_data_has_source_info(struct ras_err_data *data)
1134 {
1135 	return !list_empty(&data->err_node_list);
1136 }
1137 
1138 static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev,
1139 					     struct ras_query_if *query_if,
1140 					     struct ras_err_data *err_data,
1141 					     struct ras_query_context *qctx)
1142 {
1143 	struct ras_manager *ras_mgr = amdgpu_ras_find_obj(adev, &query_if->head);
1144 	const char *blk_name = get_ras_block_str(&query_if->head);
1145 	u64 event_id = qctx->evid.event_id;
1146 
1147 	if (err_data->ce_count) {
1148 		if (err_data_has_source_info(err_data)) {
1149 			amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx,
1150 							  blk_name, false, false);
1151 		} else if (!adev->aid_mask &&
1152 			   adev->smuio.funcs &&
1153 			   adev->smuio.funcs->get_socket_id &&
1154 			   adev->smuio.funcs->get_die_id) {
1155 			RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d "
1156 				      "%ld correctable hardware errors "
1157 				      "detected in %s block\n",
1158 				      adev->smuio.funcs->get_socket_id(adev),
1159 				      adev->smuio.funcs->get_die_id(adev),
1160 				      ras_mgr->err_data.ce_count,
1161 				      blk_name);
1162 		} else {
1163 			RAS_EVENT_LOG(adev, event_id, "%ld correctable hardware errors "
1164 				      "detected in %s block\n",
1165 				      ras_mgr->err_data.ce_count,
1166 				      blk_name);
1167 		}
1168 	}
1169 
1170 	if (err_data->ue_count) {
1171 		if (err_data_has_source_info(err_data)) {
1172 			amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx,
1173 							  blk_name, true, false);
1174 		} else if (!adev->aid_mask &&
1175 			   adev->smuio.funcs &&
1176 			   adev->smuio.funcs->get_socket_id &&
1177 			   adev->smuio.funcs->get_die_id) {
1178 			RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d "
1179 				      "%ld uncorrectable hardware errors "
1180 				      "detected in %s block\n",
1181 				      adev->smuio.funcs->get_socket_id(adev),
1182 				      adev->smuio.funcs->get_die_id(adev),
1183 				      ras_mgr->err_data.ue_count,
1184 				      blk_name);
1185 		} else {
1186 			RAS_EVENT_LOG(adev, event_id, "%ld uncorrectable hardware errors "
1187 				      "detected in %s block\n",
1188 				      ras_mgr->err_data.ue_count,
1189 				      blk_name);
1190 		}
1191 	}
1192 
1193 	if (err_data->de_count) {
1194 		if (err_data_has_source_info(err_data)) {
1195 			amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx,
1196 							  blk_name, false, true);
1197 		} else if (!adev->aid_mask &&
1198 			   adev->smuio.funcs &&
1199 			   adev->smuio.funcs->get_socket_id &&
1200 			   adev->smuio.funcs->get_die_id) {
1201 			RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d "
1202 				      "%ld deferred hardware errors "
1203 				      "detected in %s block\n",
1204 				      adev->smuio.funcs->get_socket_id(adev),
1205 				      adev->smuio.funcs->get_die_id(adev),
1206 				      ras_mgr->err_data.de_count,
1207 				      blk_name);
1208 		} else {
1209 			RAS_EVENT_LOG(adev, event_id, "%ld deferred hardware errors "
1210 				      "detected in %s block\n",
1211 				      ras_mgr->err_data.de_count,
1212 				      blk_name);
1213 		}
1214 	}
1215 }
1216 
1217 static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, struct ras_err_data *err_data)
1218 {
1219 	struct ras_err_node *err_node;
1220 	struct ras_err_info *err_info;
1221 
1222 	if (err_data_has_source_info(err_data)) {
1223 		for_each_ras_error(err_node, err_data) {
1224 			err_info = &err_node->err_info;
1225 			amdgpu_ras_error_statistic_de_count(&obj->err_data,
1226 					&err_info->mcm_info, err_info->de_count);
1227 			amdgpu_ras_error_statistic_ce_count(&obj->err_data,
1228 					&err_info->mcm_info, err_info->ce_count);
1229 			amdgpu_ras_error_statistic_ue_count(&obj->err_data,
1230 					&err_info->mcm_info, err_info->ue_count);
1231 		}
1232 	} else {
1233 		/* for legacy asic path which doesn't has error source info */
1234 		obj->err_data.ue_count += err_data->ue_count;
1235 		obj->err_data.ce_count += err_data->ce_count;
1236 		obj->err_data.de_count += err_data->de_count;
1237 	}
1238 }
1239 
1240 static struct ras_manager *get_ras_manager(struct amdgpu_device *adev, enum amdgpu_ras_block blk)
1241 {
1242 	struct ras_common_if head;
1243 
1244 	memset(&head, 0, sizeof(head));
1245 	head.block = blk;
1246 
1247 	return amdgpu_ras_find_obj(adev, &head);
1248 }
1249 
1250 int amdgpu_ras_bind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
1251 			const struct aca_info *aca_info, void *data)
1252 {
1253 	struct ras_manager *obj;
1254 
1255 	/* in resume phase, no need to create aca fs node */
1256 	if (adev->in_suspend || amdgpu_in_reset(adev))
1257 		return 0;
1258 
1259 	obj = get_ras_manager(adev, blk);
1260 	if (!obj)
1261 		return -EINVAL;
1262 
1263 	return amdgpu_aca_add_handle(adev, &obj->aca_handle, ras_block_str(blk), aca_info, data);
1264 }
1265 
1266 int amdgpu_ras_unbind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk)
1267 {
1268 	struct ras_manager *obj;
1269 
1270 	obj = get_ras_manager(adev, blk);
1271 	if (!obj)
1272 		return -EINVAL;
1273 
1274 	amdgpu_aca_remove_handle(&obj->aca_handle);
1275 
1276 	return 0;
1277 }
1278 
1279 static int amdgpu_aca_log_ras_error_data(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
1280 					 enum aca_error_type type, struct ras_err_data *err_data,
1281 					 struct ras_query_context *qctx)
1282 {
1283 	struct ras_manager *obj;
1284 
1285 	obj = get_ras_manager(adev, blk);
1286 	if (!obj)
1287 		return -EINVAL;
1288 
1289 	return amdgpu_aca_get_error_data(adev, &obj->aca_handle, type, err_data, qctx);
1290 }
1291 
1292 ssize_t amdgpu_ras_aca_sysfs_read(struct device *dev, struct device_attribute *attr,
1293 				  struct aca_handle *handle, char *buf, void *data)
1294 {
1295 	struct ras_manager *obj = container_of(handle, struct ras_manager, aca_handle);
1296 	struct ras_query_if info = {
1297 		.head = obj->head,
1298 	};
1299 
1300 	if (!amdgpu_ras_get_error_query_ready(obj->adev))
1301 		return sysfs_emit(buf, "Query currently inaccessible\n");
1302 
1303 	if (amdgpu_ras_query_error_status(obj->adev, &info))
1304 		return -EINVAL;
1305 
1306 	return sysfs_emit(buf, "%s: %lu\n%s: %lu\n%s: %lu\n", "ue", info.ue_count,
1307 			  "ce", info.ce_count, "de", info.de_count);
1308 }
1309 
1310 static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev,
1311 						struct ras_query_if *info,
1312 						struct ras_err_data *err_data,
1313 						struct ras_query_context *qctx,
1314 						unsigned int error_query_mode)
1315 {
1316 	enum amdgpu_ras_block blk = info ? info->head.block : AMDGPU_RAS_BLOCK_COUNT;
1317 	struct amdgpu_ras_block_object *block_obj = NULL;
1318 	int ret;
1319 
1320 	if (blk == AMDGPU_RAS_BLOCK_COUNT)
1321 		return -EINVAL;
1322 
1323 	if (error_query_mode == AMDGPU_RAS_INVALID_ERROR_QUERY)
1324 		return -EINVAL;
1325 
1326 	if (error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) {
1327 		if (info->head.block == AMDGPU_RAS_BLOCK__UMC) {
1328 			amdgpu_ras_get_ecc_info(adev, err_data);
1329 		} else {
1330 			block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, 0);
1331 			if (!block_obj || !block_obj->hw_ops) {
1332 				dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1333 					     get_ras_block_str(&info->head));
1334 				return -EINVAL;
1335 			}
1336 
1337 			if (block_obj->hw_ops->query_ras_error_count)
1338 				block_obj->hw_ops->query_ras_error_count(adev, err_data);
1339 
1340 			if ((info->head.block == AMDGPU_RAS_BLOCK__SDMA) ||
1341 			    (info->head.block == AMDGPU_RAS_BLOCK__GFX) ||
1342 			    (info->head.block == AMDGPU_RAS_BLOCK__MMHUB)) {
1343 				if (block_obj->hw_ops->query_ras_error_status)
1344 					block_obj->hw_ops->query_ras_error_status(adev);
1345 			}
1346 		}
1347 	} else {
1348 		if (amdgpu_aca_is_enabled(adev)) {
1349 			ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_UE, err_data, qctx);
1350 			if (ret)
1351 				return ret;
1352 
1353 			ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_CE, err_data, qctx);
1354 			if (ret)
1355 				return ret;
1356 
1357 			ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_DEFERRED, err_data, qctx);
1358 			if (ret)
1359 				return ret;
1360 		} else {
1361 			/* FIXME: add code to check return value later */
1362 			amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_UE, err_data, qctx);
1363 			amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_CE, err_data, qctx);
1364 		}
1365 	}
1366 
1367 	return 0;
1368 }
1369 
1370 /* query/inject/cure begin */
1371 static int amdgpu_ras_query_error_status_with_event(struct amdgpu_device *adev,
1372 						    struct ras_query_if *info,
1373 						    enum ras_event_type type)
1374 {
1375 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1376 	struct ras_err_data err_data;
1377 	struct ras_query_context qctx;
1378 	unsigned int error_query_mode;
1379 	int ret;
1380 
1381 	if (!obj)
1382 		return -EINVAL;
1383 
1384 	ret = amdgpu_ras_error_data_init(&err_data);
1385 	if (ret)
1386 		return ret;
1387 
1388 	if (!amdgpu_ras_get_error_query_mode(adev, &error_query_mode))
1389 		return -EINVAL;
1390 
1391 	memset(&qctx, 0, sizeof(qctx));
1392 	qctx.evid.type = type;
1393 	qctx.evid.event_id = amdgpu_ras_acquire_event_id(adev, type);
1394 
1395 	if (!down_read_trylock(&adev->reset_domain->sem)) {
1396 		ret = -EIO;
1397 		goto out_fini_err_data;
1398 	}
1399 
1400 	ret = amdgpu_ras_query_error_status_helper(adev, info,
1401 						   &err_data,
1402 						   &qctx,
1403 						   error_query_mode);
1404 	up_read(&adev->reset_domain->sem);
1405 	if (ret)
1406 		goto out_fini_err_data;
1407 
1408 	amdgpu_rasmgr_error_data_statistic_update(obj, &err_data);
1409 
1410 	info->ue_count = obj->err_data.ue_count;
1411 	info->ce_count = obj->err_data.ce_count;
1412 	info->de_count = obj->err_data.de_count;
1413 
1414 	amdgpu_ras_error_generate_report(adev, info, &err_data, &qctx);
1415 
1416 out_fini_err_data:
1417 	amdgpu_ras_error_data_fini(&err_data);
1418 
1419 	return ret;
1420 }
1421 
1422 int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_if *info)
1423 {
1424 	return amdgpu_ras_query_error_status_with_event(adev, info, RAS_EVENT_TYPE_INVALID);
1425 }
1426 
1427 int amdgpu_ras_reset_error_count(struct amdgpu_device *adev,
1428 		enum amdgpu_ras_block block)
1429 {
1430 	struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
1431 	const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
1432 	const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
1433 
1434 	if (!block_obj || !block_obj->hw_ops) {
1435 		dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1436 				ras_block_str(block));
1437 		return -EOPNOTSUPP;
1438 	}
1439 
1440 	if (!amdgpu_ras_is_supported(adev, block) ||
1441 	    !amdgpu_ras_get_aca_debug_mode(adev))
1442 		return -EOPNOTSUPP;
1443 
1444 	/* skip ras error reset in gpu reset */
1445 	if ((amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev)) &&
1446 	    ((smu_funcs && smu_funcs->set_debug_mode) ||
1447 	     (mca_funcs && mca_funcs->mca_set_debug_mode)))
1448 		return -EOPNOTSUPP;
1449 
1450 	if (block_obj->hw_ops->reset_ras_error_count)
1451 		block_obj->hw_ops->reset_ras_error_count(adev);
1452 
1453 	return 0;
1454 }
1455 
1456 int amdgpu_ras_reset_error_status(struct amdgpu_device *adev,
1457 		enum amdgpu_ras_block block)
1458 {
1459 	struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
1460 
1461 	if (amdgpu_ras_reset_error_count(adev, block) == -EOPNOTSUPP)
1462 		return 0;
1463 
1464 	if ((block == AMDGPU_RAS_BLOCK__GFX) ||
1465 	    (block == AMDGPU_RAS_BLOCK__MMHUB)) {
1466 		if (block_obj->hw_ops->reset_ras_error_status)
1467 			block_obj->hw_ops->reset_ras_error_status(adev);
1468 	}
1469 
1470 	return 0;
1471 }
1472 
1473 /* wrapper of psp_ras_trigger_error */
1474 int amdgpu_ras_error_inject(struct amdgpu_device *adev,
1475 		struct ras_inject_if *info)
1476 {
1477 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1478 	struct ta_ras_trigger_error_input block_info = {
1479 		.block_id =  amdgpu_ras_block_to_ta(info->head.block),
1480 		.inject_error_type = amdgpu_ras_error_to_ta(info->head.type),
1481 		.sub_block_index = info->head.sub_block_index,
1482 		.address = info->address,
1483 		.value = info->value,
1484 	};
1485 	int ret = -EINVAL;
1486 	struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev,
1487 							info->head.block,
1488 							info->head.sub_block_index);
1489 
1490 	/* inject on guest isn't allowed, return success directly */
1491 	if (amdgpu_sriov_vf(adev))
1492 		return 0;
1493 
1494 	if (!obj)
1495 		return -EINVAL;
1496 
1497 	if (!block_obj || !block_obj->hw_ops)	{
1498 		dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1499 			     get_ras_block_str(&info->head));
1500 		return -EINVAL;
1501 	}
1502 
1503 	/* Calculate XGMI relative offset */
1504 	if (adev->gmc.xgmi.num_physical_nodes > 1 &&
1505 	    info->head.block != AMDGPU_RAS_BLOCK__GFX) {
1506 		block_info.address =
1507 			amdgpu_xgmi_get_relative_phy_addr(adev,
1508 							  block_info.address);
1509 	}
1510 
1511 	if (block_obj->hw_ops->ras_error_inject) {
1512 		if (info->head.block == AMDGPU_RAS_BLOCK__GFX)
1513 			ret = block_obj->hw_ops->ras_error_inject(adev, info, info->instance_mask);
1514 		else /* Special ras_error_inject is defined (e.g: xgmi) */
1515 			ret = block_obj->hw_ops->ras_error_inject(adev, &block_info,
1516 						info->instance_mask);
1517 	} else {
1518 		/* default path */
1519 		ret = psp_ras_trigger_error(&adev->psp, &block_info, info->instance_mask);
1520 	}
1521 
1522 	if (ret)
1523 		dev_err(adev->dev, "ras inject %s failed %d\n",
1524 			get_ras_block_str(&info->head), ret);
1525 
1526 	return ret;
1527 }
1528 
1529 /**
1530  * amdgpu_ras_query_error_count_helper -- Get error counter for specific IP
1531  * @adev: pointer to AMD GPU device
1532  * @ce_count: pointer to an integer to be set to the count of correctible errors.
1533  * @ue_count: pointer to an integer to be set to the count of uncorrectible errors.
1534  * @query_info: pointer to ras_query_if
1535  *
1536  * Return 0 for query success or do nothing, otherwise return an error
1537  * on failures
1538  */
1539 static int amdgpu_ras_query_error_count_helper(struct amdgpu_device *adev,
1540 					       unsigned long *ce_count,
1541 					       unsigned long *ue_count,
1542 					       struct ras_query_if *query_info)
1543 {
1544 	int ret;
1545 
1546 	if (!query_info)
1547 		/* do nothing if query_info is not specified */
1548 		return 0;
1549 
1550 	ret = amdgpu_ras_query_error_status(adev, query_info);
1551 	if (ret)
1552 		return ret;
1553 
1554 	*ce_count += query_info->ce_count;
1555 	*ue_count += query_info->ue_count;
1556 
1557 	/* some hardware/IP supports read to clear
1558 	 * no need to explictly reset the err status after the query call */
1559 	if (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
1560 	    amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {
1561 		if (amdgpu_ras_reset_error_status(adev, query_info->head.block))
1562 			dev_warn(adev->dev,
1563 				 "Failed to reset error counter and error status\n");
1564 	}
1565 
1566 	return 0;
1567 }
1568 
1569 /**
1570  * amdgpu_ras_query_error_count -- Get error counts of all IPs or specific IP
1571  * @adev: pointer to AMD GPU device
1572  * @ce_count: pointer to an integer to be set to the count of correctible errors.
1573  * @ue_count: pointer to an integer to be set to the count of uncorrectible
1574  * errors.
1575  * @query_info: pointer to ras_query_if if the query request is only for
1576  * specific ip block; if info is NULL, then the qurey request is for
1577  * all the ip blocks that support query ras error counters/status
1578  *
1579  * If set, @ce_count or @ue_count, count and return the corresponding
1580  * error counts in those integer pointers. Return 0 if the device
1581  * supports RAS. Return -EOPNOTSUPP if the device doesn't support RAS.
1582  */
1583 int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
1584 				 unsigned long *ce_count,
1585 				 unsigned long *ue_count,
1586 				 struct ras_query_if *query_info)
1587 {
1588 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1589 	struct ras_manager *obj;
1590 	unsigned long ce, ue;
1591 	int ret;
1592 
1593 	if (!adev->ras_enabled || !con)
1594 		return -EOPNOTSUPP;
1595 
1596 	/* Don't count since no reporting.
1597 	 */
1598 	if (!ce_count && !ue_count)
1599 		return 0;
1600 
1601 	ce = 0;
1602 	ue = 0;
1603 	if (!query_info) {
1604 		/* query all the ip blocks that support ras query interface */
1605 		list_for_each_entry(obj, &con->head, node) {
1606 			struct ras_query_if info = {
1607 				.head = obj->head,
1608 			};
1609 
1610 			ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, &info);
1611 		}
1612 	} else {
1613 		/* query specific ip block */
1614 		ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, query_info);
1615 	}
1616 
1617 	if (ret)
1618 		return ret;
1619 
1620 	if (ce_count)
1621 		*ce_count = ce;
1622 
1623 	if (ue_count)
1624 		*ue_count = ue;
1625 
1626 	return 0;
1627 }
1628 /* query/inject/cure end */
1629 
1630 
1631 /* sysfs begin */
1632 
1633 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
1634 		struct ras_badpage **bps, unsigned int *count);
1635 
1636 static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
1637 {
1638 	switch (flags) {
1639 	case AMDGPU_RAS_RETIRE_PAGE_RESERVED:
1640 		return "R";
1641 	case AMDGPU_RAS_RETIRE_PAGE_PENDING:
1642 		return "P";
1643 	case AMDGPU_RAS_RETIRE_PAGE_FAULT:
1644 	default:
1645 		return "F";
1646 	}
1647 }
1648 
1649 /**
1650  * DOC: AMDGPU RAS sysfs gpu_vram_bad_pages Interface
1651  *
1652  * It allows user to read the bad pages of vram on the gpu through
1653  * /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages
1654  *
1655  * It outputs multiple lines, and each line stands for one gpu page.
1656  *
1657  * The format of one line is below,
1658  * gpu pfn : gpu page size : flags
1659  *
1660  * gpu pfn and gpu page size are printed in hex format.
1661  * flags can be one of below character,
1662  *
1663  * R: reserved, this gpu page is reserved and not able to use.
1664  *
1665  * P: pending for reserve, this gpu page is marked as bad, will be reserved
1666  * in next window of page_reserve.
1667  *
1668  * F: unable to reserve. this gpu page can't be reserved due to some reasons.
1669  *
1670  * Examples:
1671  *
1672  * .. code-block:: bash
1673  *
1674  *	0x00000001 : 0x00001000 : R
1675  *	0x00000002 : 0x00001000 : P
1676  *
1677  */
1678 
1679 static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f,
1680 		struct kobject *kobj, struct bin_attribute *attr,
1681 		char *buf, loff_t ppos, size_t count)
1682 {
1683 	struct amdgpu_ras *con =
1684 		container_of(attr, struct amdgpu_ras, badpages_attr);
1685 	struct amdgpu_device *adev = con->adev;
1686 	const unsigned int element_size =
1687 		sizeof("0xabcdabcd : 0x12345678 : R\n") - 1;
1688 	unsigned int start = div64_ul(ppos + element_size - 1, element_size);
1689 	unsigned int end = div64_ul(ppos + count - 1, element_size);
1690 	ssize_t s = 0;
1691 	struct ras_badpage *bps = NULL;
1692 	unsigned int bps_count = 0;
1693 
1694 	memset(buf, 0, count);
1695 
1696 	if (amdgpu_ras_badpages_read(adev, &bps, &bps_count))
1697 		return 0;
1698 
1699 	for (; start < end && start < bps_count; start++)
1700 		s += scnprintf(&buf[s], element_size + 1,
1701 				"0x%08x : 0x%08x : %1s\n",
1702 				bps[start].bp,
1703 				bps[start].size,
1704 				amdgpu_ras_badpage_flags_str(bps[start].flags));
1705 
1706 	kfree(bps);
1707 
1708 	return s;
1709 }
1710 
1711 static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev,
1712 		struct device_attribute *attr, char *buf)
1713 {
1714 	struct amdgpu_ras *con =
1715 		container_of(attr, struct amdgpu_ras, features_attr);
1716 
1717 	return sysfs_emit(buf, "feature mask: 0x%x\n", con->features);
1718 }
1719 
1720 static ssize_t amdgpu_ras_sysfs_version_show(struct device *dev,
1721 		struct device_attribute *attr, char *buf)
1722 {
1723 	struct amdgpu_ras *con =
1724 		container_of(attr, struct amdgpu_ras, version_attr);
1725 	return sysfs_emit(buf, "table version: 0x%x\n", con->eeprom_control.tbl_hdr.version);
1726 }
1727 
1728 static ssize_t amdgpu_ras_sysfs_schema_show(struct device *dev,
1729 		struct device_attribute *attr, char *buf)
1730 {
1731 	struct amdgpu_ras *con =
1732 		container_of(attr, struct amdgpu_ras, schema_attr);
1733 	return sysfs_emit(buf, "schema: 0x%x\n", con->schema);
1734 }
1735 
1736 static struct {
1737 	enum ras_event_type type;
1738 	const char *name;
1739 } dump_event[] = {
1740 	{RAS_EVENT_TYPE_FATAL, "Fatal Error"},
1741 	{RAS_EVENT_TYPE_POISON_CREATION, "Poison Creation"},
1742 	{RAS_EVENT_TYPE_POISON_CONSUMPTION, "Poison Consumption"},
1743 };
1744 
1745 static ssize_t amdgpu_ras_sysfs_event_state_show(struct device *dev,
1746 						 struct device_attribute *attr, char *buf)
1747 {
1748 	struct amdgpu_ras *con =
1749 		container_of(attr, struct amdgpu_ras, event_state_attr);
1750 	struct ras_event_manager *event_mgr = con->event_mgr;
1751 	struct ras_event_state *event_state;
1752 	int i, size = 0;
1753 
1754 	if (!event_mgr)
1755 		return -EINVAL;
1756 
1757 	size += sysfs_emit_at(buf, size, "current seqno: %llu\n", atomic64_read(&event_mgr->seqno));
1758 	for (i = 0; i < ARRAY_SIZE(dump_event); i++) {
1759 		event_state = &event_mgr->event_state[dump_event[i].type];
1760 		size += sysfs_emit_at(buf, size, "%s: count:%llu, last_seqno:%llu\n",
1761 				      dump_event[i].name,
1762 				      atomic64_read(&event_state->count),
1763 				      event_state->last_seqno);
1764 	}
1765 
1766 	return (ssize_t)size;
1767 }
1768 
1769 static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev)
1770 {
1771 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1772 
1773 	if (adev->dev->kobj.sd)
1774 		sysfs_remove_file_from_group(&adev->dev->kobj,
1775 				&con->badpages_attr.attr,
1776 				RAS_FS_NAME);
1777 }
1778 
1779 static int amdgpu_ras_sysfs_remove_dev_attr_node(struct amdgpu_device *adev)
1780 {
1781 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1782 	struct attribute *attrs[] = {
1783 		&con->features_attr.attr,
1784 		&con->version_attr.attr,
1785 		&con->schema_attr.attr,
1786 		&con->event_state_attr.attr,
1787 		NULL
1788 	};
1789 	struct attribute_group group = {
1790 		.name = RAS_FS_NAME,
1791 		.attrs = attrs,
1792 	};
1793 
1794 	if (adev->dev->kobj.sd)
1795 		sysfs_remove_group(&adev->dev->kobj, &group);
1796 
1797 	return 0;
1798 }
1799 
1800 int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
1801 		struct ras_common_if *head)
1802 {
1803 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1804 
1805 	if (amdgpu_aca_is_enabled(adev))
1806 		return 0;
1807 
1808 	if (!obj || obj->attr_inuse)
1809 		return -EINVAL;
1810 
1811 	get_obj(obj);
1812 
1813 	snprintf(obj->fs_data.sysfs_name, sizeof(obj->fs_data.sysfs_name),
1814 		"%s_err_count", head->name);
1815 
1816 	obj->sysfs_attr = (struct device_attribute){
1817 		.attr = {
1818 			.name = obj->fs_data.sysfs_name,
1819 			.mode = S_IRUGO,
1820 		},
1821 			.show = amdgpu_ras_sysfs_read,
1822 	};
1823 	sysfs_attr_init(&obj->sysfs_attr.attr);
1824 
1825 	if (sysfs_add_file_to_group(&adev->dev->kobj,
1826 				&obj->sysfs_attr.attr,
1827 				RAS_FS_NAME)) {
1828 		put_obj(obj);
1829 		return -EINVAL;
1830 	}
1831 
1832 	obj->attr_inuse = 1;
1833 
1834 	return 0;
1835 }
1836 
1837 int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
1838 		struct ras_common_if *head)
1839 {
1840 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1841 
1842 	if (amdgpu_aca_is_enabled(adev))
1843 		return 0;
1844 
1845 	if (!obj || !obj->attr_inuse)
1846 		return -EINVAL;
1847 
1848 	if (adev->dev->kobj.sd)
1849 		sysfs_remove_file_from_group(&adev->dev->kobj,
1850 				&obj->sysfs_attr.attr,
1851 				RAS_FS_NAME);
1852 	obj->attr_inuse = 0;
1853 	put_obj(obj);
1854 
1855 	return 0;
1856 }
1857 
1858 static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev)
1859 {
1860 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1861 	struct ras_manager *obj, *tmp;
1862 
1863 	list_for_each_entry_safe(obj, tmp, &con->head, node) {
1864 		amdgpu_ras_sysfs_remove(adev, &obj->head);
1865 	}
1866 
1867 	if (amdgpu_bad_page_threshold != 0)
1868 		amdgpu_ras_sysfs_remove_bad_page_node(adev);
1869 
1870 	amdgpu_ras_sysfs_remove_dev_attr_node(adev);
1871 
1872 	return 0;
1873 }
1874 /* sysfs end */
1875 
1876 /**
1877  * DOC: AMDGPU RAS Reboot Behavior for Unrecoverable Errors
1878  *
1879  * Normally when there is an uncorrectable error, the driver will reset
1880  * the GPU to recover.  However, in the event of an unrecoverable error,
1881  * the driver provides an interface to reboot the system automatically
1882  * in that event.
1883  *
1884  * The following file in debugfs provides that interface:
1885  * /sys/kernel/debug/dri/[0/1/2...]/ras/auto_reboot
1886  *
1887  * Usage:
1888  *
1889  * .. code-block:: bash
1890  *
1891  *	echo true > .../ras/auto_reboot
1892  *
1893  */
1894 /* debugfs begin */
1895 static struct dentry *amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
1896 {
1897 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1898 	struct amdgpu_ras_eeprom_control *eeprom = &con->eeprom_control;
1899 	struct drm_minor  *minor = adev_to_drm(adev)->primary;
1900 	struct dentry     *dir;
1901 
1902 	dir = debugfs_create_dir(RAS_FS_NAME, minor->debugfs_root);
1903 	debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, dir, adev,
1904 			    &amdgpu_ras_debugfs_ctrl_ops);
1905 	debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, dir, adev,
1906 			    &amdgpu_ras_debugfs_eeprom_ops);
1907 	debugfs_create_u32("bad_page_cnt_threshold", 0444, dir,
1908 			   &con->bad_page_cnt_threshold);
1909 	debugfs_create_u32("ras_num_recs", 0444, dir, &eeprom->ras_num_recs);
1910 	debugfs_create_x32("ras_hw_enabled", 0444, dir, &adev->ras_hw_enabled);
1911 	debugfs_create_x32("ras_enabled", 0444, dir, &adev->ras_enabled);
1912 	debugfs_create_file("ras_eeprom_size", S_IRUGO, dir, adev,
1913 			    &amdgpu_ras_debugfs_eeprom_size_ops);
1914 	con->de_ras_eeprom_table = debugfs_create_file("ras_eeprom_table",
1915 						       S_IRUGO, dir, adev,
1916 						       &amdgpu_ras_debugfs_eeprom_table_ops);
1917 	amdgpu_ras_debugfs_set_ret_size(&con->eeprom_control);
1918 
1919 	/*
1920 	 * After one uncorrectable error happens, usually GPU recovery will
1921 	 * be scheduled. But due to the known problem in GPU recovery failing
1922 	 * to bring GPU back, below interface provides one direct way to
1923 	 * user to reboot system automatically in such case within
1924 	 * ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine
1925 	 * will never be called.
1926 	 */
1927 	debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, dir, &con->reboot);
1928 
1929 	/*
1930 	 * User could set this not to clean up hardware's error count register
1931 	 * of RAS IPs during ras recovery.
1932 	 */
1933 	debugfs_create_bool("disable_ras_err_cnt_harvest", 0644, dir,
1934 			    &con->disable_ras_err_cnt_harvest);
1935 	return dir;
1936 }
1937 
1938 static void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
1939 				      struct ras_fs_if *head,
1940 				      struct dentry *dir)
1941 {
1942 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);
1943 
1944 	if (!obj || !dir)
1945 		return;
1946 
1947 	get_obj(obj);
1948 
1949 	memcpy(obj->fs_data.debugfs_name,
1950 			head->debugfs_name,
1951 			sizeof(obj->fs_data.debugfs_name));
1952 
1953 	debugfs_create_file(obj->fs_data.debugfs_name, S_IWUGO | S_IRUGO, dir,
1954 			    obj, &amdgpu_ras_debugfs_ops);
1955 }
1956 
1957 static bool amdgpu_ras_aca_is_supported(struct amdgpu_device *adev)
1958 {
1959 	bool ret;
1960 
1961 	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
1962 	case IP_VERSION(13, 0, 6):
1963 	case IP_VERSION(13, 0, 14):
1964 		ret = true;
1965 		break;
1966 	default:
1967 		ret = false;
1968 		break;
1969 	}
1970 
1971 	return ret;
1972 }
1973 
1974 void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
1975 {
1976 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1977 	struct dentry *dir;
1978 	struct ras_manager *obj;
1979 	struct ras_fs_if fs_info;
1980 
1981 	/*
1982 	 * it won't be called in resume path, no need to check
1983 	 * suspend and gpu reset status
1984 	 */
1985 	if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con)
1986 		return;
1987 
1988 	dir = amdgpu_ras_debugfs_create_ctrl_node(adev);
1989 
1990 	list_for_each_entry(obj, &con->head, node) {
1991 		if (amdgpu_ras_is_supported(adev, obj->head.block) &&
1992 			(obj->attr_inuse == 1)) {
1993 			sprintf(fs_info.debugfs_name, "%s_err_inject",
1994 					get_ras_block_str(&obj->head));
1995 			fs_info.head = obj->head;
1996 			amdgpu_ras_debugfs_create(adev, &fs_info, dir);
1997 		}
1998 	}
1999 
2000 	if (amdgpu_ras_aca_is_supported(adev)) {
2001 		if (amdgpu_aca_is_enabled(adev))
2002 			amdgpu_aca_smu_debugfs_init(adev, dir);
2003 		else
2004 			amdgpu_mca_smu_debugfs_init(adev, dir);
2005 	}
2006 }
2007 
2008 /* debugfs end */
2009 
2010 /* ras fs */
2011 static BIN_ATTR(gpu_vram_bad_pages, S_IRUGO,
2012 		amdgpu_ras_sysfs_badpages_read, NULL, 0);
2013 static DEVICE_ATTR(features, S_IRUGO,
2014 		amdgpu_ras_sysfs_features_read, NULL);
2015 static DEVICE_ATTR(version, 0444,
2016 		amdgpu_ras_sysfs_version_show, NULL);
2017 static DEVICE_ATTR(schema, 0444,
2018 		amdgpu_ras_sysfs_schema_show, NULL);
2019 static DEVICE_ATTR(event_state, 0444,
2020 		   amdgpu_ras_sysfs_event_state_show, NULL);
2021 static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
2022 {
2023 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2024 	struct attribute_group group = {
2025 		.name = RAS_FS_NAME,
2026 	};
2027 	struct attribute *attrs[] = {
2028 		&con->features_attr.attr,
2029 		&con->version_attr.attr,
2030 		&con->schema_attr.attr,
2031 		&con->event_state_attr.attr,
2032 		NULL
2033 	};
2034 	struct bin_attribute *bin_attrs[] = {
2035 		NULL,
2036 		NULL,
2037 	};
2038 	int r;
2039 
2040 	group.attrs = attrs;
2041 
2042 	/* add features entry */
2043 	con->features_attr = dev_attr_features;
2044 	sysfs_attr_init(attrs[0]);
2045 
2046 	/* add version entry */
2047 	con->version_attr = dev_attr_version;
2048 	sysfs_attr_init(attrs[1]);
2049 
2050 	/* add schema entry */
2051 	con->schema_attr = dev_attr_schema;
2052 	sysfs_attr_init(attrs[2]);
2053 
2054 	/* add event_state entry */
2055 	con->event_state_attr = dev_attr_event_state;
2056 	sysfs_attr_init(attrs[3]);
2057 
2058 	if (amdgpu_bad_page_threshold != 0) {
2059 		/* add bad_page_features entry */
2060 		bin_attr_gpu_vram_bad_pages.private = NULL;
2061 		con->badpages_attr = bin_attr_gpu_vram_bad_pages;
2062 		bin_attrs[0] = &con->badpages_attr;
2063 		group.bin_attrs = bin_attrs;
2064 		sysfs_bin_attr_init(bin_attrs[0]);
2065 	}
2066 
2067 	r = sysfs_create_group(&adev->dev->kobj, &group);
2068 	if (r)
2069 		dev_err(adev->dev, "Failed to create RAS sysfs group!");
2070 
2071 	return 0;
2072 }
2073 
2074 static int amdgpu_ras_fs_fini(struct amdgpu_device *adev)
2075 {
2076 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2077 	struct ras_manager *con_obj, *ip_obj, *tmp;
2078 
2079 	if (IS_ENABLED(CONFIG_DEBUG_FS)) {
2080 		list_for_each_entry_safe(con_obj, tmp, &con->head, node) {
2081 			ip_obj = amdgpu_ras_find_obj(adev, &con_obj->head);
2082 			if (ip_obj)
2083 				put_obj(ip_obj);
2084 		}
2085 	}
2086 
2087 	amdgpu_ras_sysfs_remove_all(adev);
2088 	return 0;
2089 }
2090 /* ras fs end */
2091 
2092 /* ih begin */
2093 
2094 /* For the hardware that cannot enable bif ring for both ras_controller_irq
2095  * and ras_err_evnet_athub_irq ih cookies, the driver has to poll status
2096  * register to check whether the interrupt is triggered or not, and properly
2097  * ack the interrupt if it is there
2098  */
2099 void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev)
2100 {
2101 	/* Fatal error events are handled on host side */
2102 	if (amdgpu_sriov_vf(adev))
2103 		return;
2104 
2105 	if (adev->nbio.ras &&
2106 	    adev->nbio.ras->handle_ras_controller_intr_no_bifring)
2107 		adev->nbio.ras->handle_ras_controller_intr_no_bifring(adev);
2108 
2109 	if (adev->nbio.ras &&
2110 	    adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring)
2111 		adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring(adev);
2112 }
2113 
2114 static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *obj,
2115 				struct amdgpu_iv_entry *entry)
2116 {
2117 	bool poison_stat = false;
2118 	struct amdgpu_device *adev = obj->adev;
2119 	struct amdgpu_ras_block_object *block_obj =
2120 		amdgpu_ras_get_ras_block(adev, obj->head.block, 0);
2121 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2122 	enum ras_event_type type = RAS_EVENT_TYPE_POISON_CONSUMPTION;
2123 	u64 event_id;
2124 	int ret;
2125 
2126 	if (!block_obj || !con)
2127 		return;
2128 
2129 	ret = amdgpu_ras_mark_ras_event(adev, type);
2130 	if (ret)
2131 		return;
2132 
2133 	/* both query_poison_status and handle_poison_consumption are optional,
2134 	 * but at least one of them should be implemented if we need poison
2135 	 * consumption handler
2136 	 */
2137 	if (block_obj->hw_ops && block_obj->hw_ops->query_poison_status) {
2138 		poison_stat = block_obj->hw_ops->query_poison_status(adev);
2139 		if (!poison_stat) {
2140 			/* Not poison consumption interrupt, no need to handle it */
2141 			dev_info(adev->dev, "No RAS poison status in %s poison IH.\n",
2142 					block_obj->ras_comm.name);
2143 
2144 			return;
2145 		}
2146 	}
2147 
2148 	amdgpu_umc_poison_handler(adev, obj->head.block, 0);
2149 
2150 	if (block_obj->hw_ops && block_obj->hw_ops->handle_poison_consumption)
2151 		poison_stat = block_obj->hw_ops->handle_poison_consumption(adev);
2152 
2153 	/* gpu reset is fallback for failed and default cases.
2154 	 * For RMA case, amdgpu_umc_poison_handler will handle gpu reset.
2155 	 */
2156 	if (poison_stat && !amdgpu_ras_is_rma(adev)) {
2157 		event_id = amdgpu_ras_acquire_event_id(adev, type);
2158 		RAS_EVENT_LOG(adev, event_id,
2159 			      "GPU reset for %s RAS poison consumption is issued!\n",
2160 			      block_obj->ras_comm.name);
2161 		amdgpu_ras_reset_gpu(adev);
2162 	}
2163 
2164 	if (!poison_stat)
2165 		amdgpu_gfx_poison_consumption_handler(adev, entry);
2166 }
2167 
2168 static void amdgpu_ras_interrupt_poison_creation_handler(struct ras_manager *obj,
2169 				struct amdgpu_iv_entry *entry)
2170 {
2171 	struct amdgpu_device *adev = obj->adev;
2172 	enum ras_event_type type = RAS_EVENT_TYPE_POISON_CREATION;
2173 	u64 event_id;
2174 	int ret;
2175 
2176 	ret = amdgpu_ras_mark_ras_event(adev, type);
2177 	if (ret)
2178 		return;
2179 
2180 	event_id = amdgpu_ras_acquire_event_id(adev, type);
2181 	RAS_EVENT_LOG(adev, event_id, "Poison is created\n");
2182 
2183 	if (amdgpu_ip_version(obj->adev, UMC_HWIP, 0) >= IP_VERSION(12, 0, 0)) {
2184 		struct amdgpu_ras *con = amdgpu_ras_get_context(obj->adev);
2185 
2186 		atomic_inc(&con->page_retirement_req_cnt);
2187 		atomic_inc(&con->poison_creation_count);
2188 
2189 		wake_up(&con->page_retirement_wq);
2190 	}
2191 }
2192 
2193 static void amdgpu_ras_interrupt_umc_handler(struct ras_manager *obj,
2194 				struct amdgpu_iv_entry *entry)
2195 {
2196 	struct ras_ih_data *data = &obj->ih_data;
2197 	struct ras_err_data err_data;
2198 	int ret;
2199 
2200 	if (!data->cb)
2201 		return;
2202 
2203 	ret = amdgpu_ras_error_data_init(&err_data);
2204 	if (ret)
2205 		return;
2206 
2207 	/* Let IP handle its data, maybe we need get the output
2208 	 * from the callback to update the error type/count, etc
2209 	 */
2210 	amdgpu_ras_set_fed(obj->adev, true);
2211 	ret = data->cb(obj->adev, &err_data, entry);
2212 	/* ue will trigger an interrupt, and in that case
2213 	 * we need do a reset to recovery the whole system.
2214 	 * But leave IP do that recovery, here we just dispatch
2215 	 * the error.
2216 	 */
2217 	if (ret == AMDGPU_RAS_SUCCESS) {
2218 		/* these counts could be left as 0 if
2219 		 * some blocks do not count error number
2220 		 */
2221 		obj->err_data.ue_count += err_data.ue_count;
2222 		obj->err_data.ce_count += err_data.ce_count;
2223 		obj->err_data.de_count += err_data.de_count;
2224 	}
2225 
2226 	amdgpu_ras_error_data_fini(&err_data);
2227 }
2228 
2229 static void amdgpu_ras_interrupt_handler(struct ras_manager *obj)
2230 {
2231 	struct ras_ih_data *data = &obj->ih_data;
2232 	struct amdgpu_iv_entry entry;
2233 
2234 	while (data->rptr != data->wptr) {
2235 		rmb();
2236 		memcpy(&entry, &data->ring[data->rptr],
2237 				data->element_size);
2238 
2239 		wmb();
2240 		data->rptr = (data->aligned_element_size +
2241 				data->rptr) % data->ring_size;
2242 
2243 		if (amdgpu_ras_is_poison_mode_supported(obj->adev)) {
2244 			if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
2245 				amdgpu_ras_interrupt_poison_creation_handler(obj, &entry);
2246 			else
2247 				amdgpu_ras_interrupt_poison_consumption_handler(obj, &entry);
2248 		} else {
2249 			if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
2250 				amdgpu_ras_interrupt_umc_handler(obj, &entry);
2251 			else
2252 				dev_warn(obj->adev->dev,
2253 					"No RAS interrupt handler for non-UMC block with poison disabled.\n");
2254 		}
2255 	}
2256 }
2257 
2258 static void amdgpu_ras_interrupt_process_handler(struct work_struct *work)
2259 {
2260 	struct ras_ih_data *data =
2261 		container_of(work, struct ras_ih_data, ih_work);
2262 	struct ras_manager *obj =
2263 		container_of(data, struct ras_manager, ih_data);
2264 
2265 	amdgpu_ras_interrupt_handler(obj);
2266 }
2267 
2268 int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
2269 		struct ras_dispatch_if *info)
2270 {
2271 	struct ras_manager *obj;
2272 	struct ras_ih_data *data;
2273 
2274 	obj = amdgpu_ras_find_obj(adev, &info->head);
2275 	if (!obj)
2276 		return -EINVAL;
2277 
2278 	data = &obj->ih_data;
2279 
2280 	if (data->inuse == 0)
2281 		return 0;
2282 
2283 	/* Might be overflow... */
2284 	memcpy(&data->ring[data->wptr], info->entry,
2285 			data->element_size);
2286 
2287 	wmb();
2288 	data->wptr = (data->aligned_element_size +
2289 			data->wptr) % data->ring_size;
2290 
2291 	schedule_work(&data->ih_work);
2292 
2293 	return 0;
2294 }
2295 
2296 int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev,
2297 		struct ras_common_if *head)
2298 {
2299 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
2300 	struct ras_ih_data *data;
2301 
2302 	if (!obj)
2303 		return -EINVAL;
2304 
2305 	data = &obj->ih_data;
2306 	if (data->inuse == 0)
2307 		return 0;
2308 
2309 	cancel_work_sync(&data->ih_work);
2310 
2311 	kfree(data->ring);
2312 	memset(data, 0, sizeof(*data));
2313 	put_obj(obj);
2314 
2315 	return 0;
2316 }
2317 
2318 int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev,
2319 		struct ras_common_if *head)
2320 {
2321 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
2322 	struct ras_ih_data *data;
2323 	struct amdgpu_ras_block_object *ras_obj;
2324 
2325 	if (!obj) {
2326 		/* in case we registe the IH before enable ras feature */
2327 		obj = amdgpu_ras_create_obj(adev, head);
2328 		if (!obj)
2329 			return -EINVAL;
2330 	} else
2331 		get_obj(obj);
2332 
2333 	ras_obj = container_of(head, struct amdgpu_ras_block_object, ras_comm);
2334 
2335 	data = &obj->ih_data;
2336 	/* add the callback.etc */
2337 	*data = (struct ras_ih_data) {
2338 		.inuse = 0,
2339 		.cb = ras_obj->ras_cb,
2340 		.element_size = sizeof(struct amdgpu_iv_entry),
2341 		.rptr = 0,
2342 		.wptr = 0,
2343 	};
2344 
2345 	INIT_WORK(&data->ih_work, amdgpu_ras_interrupt_process_handler);
2346 
2347 	data->aligned_element_size = ALIGN(data->element_size, 8);
2348 	/* the ring can store 64 iv entries. */
2349 	data->ring_size = 64 * data->aligned_element_size;
2350 	data->ring = kmalloc(data->ring_size, GFP_KERNEL);
2351 	if (!data->ring) {
2352 		put_obj(obj);
2353 		return -ENOMEM;
2354 	}
2355 
2356 	/* IH is ready */
2357 	data->inuse = 1;
2358 
2359 	return 0;
2360 }
2361 
2362 static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev)
2363 {
2364 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2365 	struct ras_manager *obj, *tmp;
2366 
2367 	list_for_each_entry_safe(obj, tmp, &con->head, node) {
2368 		amdgpu_ras_interrupt_remove_handler(adev, &obj->head);
2369 	}
2370 
2371 	return 0;
2372 }
2373 /* ih end */
2374 
2375 /* traversal all IPs except NBIO to query error counter */
2376 static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev, enum ras_event_type type)
2377 {
2378 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2379 	struct ras_manager *obj;
2380 
2381 	if (!adev->ras_enabled || !con)
2382 		return;
2383 
2384 	list_for_each_entry(obj, &con->head, node) {
2385 		struct ras_query_if info = {
2386 			.head = obj->head,
2387 		};
2388 
2389 		/*
2390 		 * PCIE_BIF IP has one different isr by ras controller
2391 		 * interrupt, the specific ras counter query will be
2392 		 * done in that isr. So skip such block from common
2393 		 * sync flood interrupt isr calling.
2394 		 */
2395 		if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF)
2396 			continue;
2397 
2398 		/*
2399 		 * this is a workaround for aldebaran, skip send msg to
2400 		 * smu to get ecc_info table due to smu handle get ecc
2401 		 * info table failed temporarily.
2402 		 * should be removed until smu fix handle ecc_info table.
2403 		 */
2404 		if ((info.head.block == AMDGPU_RAS_BLOCK__UMC) &&
2405 		    (amdgpu_ip_version(adev, MP1_HWIP, 0) ==
2406 		     IP_VERSION(13, 0, 2)))
2407 			continue;
2408 
2409 		amdgpu_ras_query_error_status_with_event(adev, &info, type);
2410 
2411 		if (amdgpu_ip_version(adev, MP0_HWIP, 0) !=
2412 			    IP_VERSION(11, 0, 2) &&
2413 		    amdgpu_ip_version(adev, MP0_HWIP, 0) !=
2414 			    IP_VERSION(11, 0, 4) &&
2415 		    amdgpu_ip_version(adev, MP0_HWIP, 0) !=
2416 			    IP_VERSION(13, 0, 0)) {
2417 			if (amdgpu_ras_reset_error_status(adev, info.head.block))
2418 				dev_warn(adev->dev, "Failed to reset error counter and error status");
2419 		}
2420 	}
2421 }
2422 
2423 /* Parse RdRspStatus and WrRspStatus */
2424 static void amdgpu_ras_error_status_query(struct amdgpu_device *adev,
2425 					  struct ras_query_if *info)
2426 {
2427 	struct amdgpu_ras_block_object *block_obj;
2428 	/*
2429 	 * Only two block need to query read/write
2430 	 * RspStatus at current state
2431 	 */
2432 	if ((info->head.block != AMDGPU_RAS_BLOCK__GFX) &&
2433 		(info->head.block != AMDGPU_RAS_BLOCK__MMHUB))
2434 		return;
2435 
2436 	block_obj = amdgpu_ras_get_ras_block(adev,
2437 					info->head.block,
2438 					info->head.sub_block_index);
2439 
2440 	if (!block_obj || !block_obj->hw_ops) {
2441 		dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
2442 			     get_ras_block_str(&info->head));
2443 		return;
2444 	}
2445 
2446 	if (block_obj->hw_ops->query_ras_error_status)
2447 		block_obj->hw_ops->query_ras_error_status(adev);
2448 
2449 }
2450 
2451 static void amdgpu_ras_query_err_status(struct amdgpu_device *adev)
2452 {
2453 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2454 	struct ras_manager *obj;
2455 
2456 	if (!adev->ras_enabled || !con)
2457 		return;
2458 
2459 	list_for_each_entry(obj, &con->head, node) {
2460 		struct ras_query_if info = {
2461 			.head = obj->head,
2462 		};
2463 
2464 		amdgpu_ras_error_status_query(adev, &info);
2465 	}
2466 }
2467 
2468 /* recovery begin */
2469 
2470 /* return 0 on success.
2471  * caller need free bps.
2472  */
2473 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
2474 		struct ras_badpage **bps, unsigned int *count)
2475 {
2476 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2477 	struct ras_err_handler_data *data;
2478 	int i = 0;
2479 	int ret = 0, status;
2480 
2481 	if (!con || !con->eh_data || !bps || !count)
2482 		return -EINVAL;
2483 
2484 	mutex_lock(&con->recovery_lock);
2485 	data = con->eh_data;
2486 	if (!data || data->count == 0) {
2487 		*bps = NULL;
2488 		ret = -EINVAL;
2489 		goto out;
2490 	}
2491 
2492 	*bps = kmalloc(sizeof(struct ras_badpage) * data->count, GFP_KERNEL);
2493 	if (!*bps) {
2494 		ret = -ENOMEM;
2495 		goto out;
2496 	}
2497 
2498 	for (; i < data->count; i++) {
2499 		(*bps)[i] = (struct ras_badpage){
2500 			.bp = data->bps[i].retired_page,
2501 			.size = AMDGPU_GPU_PAGE_SIZE,
2502 			.flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
2503 		};
2504 		status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr,
2505 				data->bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT);
2506 		if (status == -EBUSY)
2507 			(*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
2508 		else if (status == -ENOENT)
2509 			(*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
2510 	}
2511 
2512 	*count = data->count;
2513 out:
2514 	mutex_unlock(&con->recovery_lock);
2515 	return ret;
2516 }
2517 
2518 static void amdgpu_ras_set_fed_all(struct amdgpu_device *adev,
2519 				   struct amdgpu_hive_info *hive, bool status)
2520 {
2521 	struct amdgpu_device *tmp_adev;
2522 
2523 	if (hive) {
2524 		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
2525 			amdgpu_ras_set_fed(tmp_adev, status);
2526 	} else {
2527 		amdgpu_ras_set_fed(adev, status);
2528 	}
2529 }
2530 
2531 bool amdgpu_ras_in_recovery(struct amdgpu_device *adev)
2532 {
2533 	struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2534 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
2535 	int hive_ras_recovery = 0;
2536 
2537 	if (hive) {
2538 		hive_ras_recovery = atomic_read(&hive->ras_recovery);
2539 		amdgpu_put_xgmi_hive(hive);
2540 	}
2541 
2542 	if (ras && (atomic_read(&ras->in_recovery) || hive_ras_recovery))
2543 		return true;
2544 
2545 	return false;
2546 }
2547 
2548 static enum ras_event_type amdgpu_ras_get_fatal_error_event(struct amdgpu_device *adev)
2549 {
2550 	if (amdgpu_ras_intr_triggered())
2551 		return RAS_EVENT_TYPE_FATAL;
2552 	else
2553 		return RAS_EVENT_TYPE_POISON_CONSUMPTION;
2554 }
2555 
2556 static void amdgpu_ras_do_recovery(struct work_struct *work)
2557 {
2558 	struct amdgpu_ras *ras =
2559 		container_of(work, struct amdgpu_ras, recovery_work);
2560 	struct amdgpu_device *remote_adev = NULL;
2561 	struct amdgpu_device *adev = ras->adev;
2562 	struct list_head device_list, *device_list_handle =  NULL;
2563 	struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2564 	enum ras_event_type type;
2565 
2566 	if (hive) {
2567 		atomic_set(&hive->ras_recovery, 1);
2568 
2569 		/* If any device which is part of the hive received RAS fatal
2570 		 * error interrupt, set fatal error status on all. This
2571 		 * condition will need a recovery, and flag will be cleared
2572 		 * as part of recovery.
2573 		 */
2574 		list_for_each_entry(remote_adev, &hive->device_list,
2575 				    gmc.xgmi.head)
2576 			if (amdgpu_ras_get_fed_status(remote_adev)) {
2577 				amdgpu_ras_set_fed_all(adev, hive, true);
2578 				break;
2579 			}
2580 	}
2581 	if (!ras->disable_ras_err_cnt_harvest) {
2582 
2583 		/* Build list of devices to query RAS related errors */
2584 		if  (hive && adev->gmc.xgmi.num_physical_nodes > 1) {
2585 			device_list_handle = &hive->device_list;
2586 		} else {
2587 			INIT_LIST_HEAD(&device_list);
2588 			list_add_tail(&adev->gmc.xgmi.head, &device_list);
2589 			device_list_handle = &device_list;
2590 		}
2591 
2592 		type = amdgpu_ras_get_fatal_error_event(adev);
2593 		list_for_each_entry(remote_adev,
2594 				device_list_handle, gmc.xgmi.head) {
2595 			amdgpu_ras_query_err_status(remote_adev);
2596 			amdgpu_ras_log_on_err_counter(remote_adev, type);
2597 		}
2598 
2599 	}
2600 
2601 	if (amdgpu_device_should_recover_gpu(ras->adev)) {
2602 		struct amdgpu_reset_context reset_context;
2603 		memset(&reset_context, 0, sizeof(reset_context));
2604 
2605 		reset_context.method = AMD_RESET_METHOD_NONE;
2606 		reset_context.reset_req_dev = adev;
2607 		reset_context.src = AMDGPU_RESET_SRC_RAS;
2608 
2609 		/* Perform full reset in fatal error mode */
2610 		if (!amdgpu_ras_is_poison_mode_supported(ras->adev))
2611 			set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2612 		else {
2613 			clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2614 
2615 			if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE2_RESET) {
2616 				ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE2_RESET;
2617 				reset_context.method = AMD_RESET_METHOD_MODE2;
2618 			}
2619 
2620 			/* Fatal error occurs in poison mode, mode1 reset is used to
2621 			 * recover gpu.
2622 			 */
2623 			if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET) {
2624 				ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE1_RESET;
2625 				set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2626 
2627 				psp_fatal_error_recovery_quirk(&adev->psp);
2628 			}
2629 		}
2630 
2631 		amdgpu_device_gpu_recover(ras->adev, NULL, &reset_context);
2632 	}
2633 	atomic_set(&ras->in_recovery, 0);
2634 	if (hive) {
2635 		atomic_set(&hive->ras_recovery, 0);
2636 		amdgpu_put_xgmi_hive(hive);
2637 	}
2638 }
2639 
2640 /* alloc/realloc bps array */
2641 static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
2642 		struct ras_err_handler_data *data, int pages)
2643 {
2644 	unsigned int old_space = data->count + data->space_left;
2645 	unsigned int new_space = old_space + pages;
2646 	unsigned int align_space = ALIGN(new_space, 512);
2647 	void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
2648 
2649 	if (!bps) {
2650 		return -ENOMEM;
2651 	}
2652 
2653 	if (data->bps) {
2654 		memcpy(bps, data->bps,
2655 				data->count * sizeof(*data->bps));
2656 		kfree(data->bps);
2657 	}
2658 
2659 	data->bps = bps;
2660 	data->space_left += align_space - old_space;
2661 	return 0;
2662 }
2663 
2664 /* it deal with vram only. */
2665 int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
2666 		struct eeprom_table_record *bps, int pages)
2667 {
2668 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2669 	struct ras_err_handler_data *data;
2670 	int ret = 0;
2671 	uint32_t i;
2672 
2673 	if (!con || !con->eh_data || !bps || pages <= 0)
2674 		return 0;
2675 
2676 	mutex_lock(&con->recovery_lock);
2677 	data = con->eh_data;
2678 	if (!data)
2679 		goto out;
2680 
2681 	for (i = 0; i < pages; i++) {
2682 		if (amdgpu_ras_check_bad_page_unlock(con,
2683 			bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT))
2684 			continue;
2685 
2686 		if (!data->space_left &&
2687 			amdgpu_ras_realloc_eh_data_space(adev, data, 256)) {
2688 			ret = -ENOMEM;
2689 			goto out;
2690 		}
2691 
2692 		amdgpu_ras_reserve_page(adev, bps[i].retired_page);
2693 
2694 		memcpy(&data->bps[data->count], &bps[i], sizeof(*data->bps));
2695 		data->count++;
2696 		data->space_left--;
2697 	}
2698 out:
2699 	mutex_unlock(&con->recovery_lock);
2700 
2701 	return ret;
2702 }
2703 
2704 /*
2705  * write error record array to eeprom, the function should be
2706  * protected by recovery_lock
2707  * new_cnt: new added UE count, excluding reserved bad pages, can be NULL
2708  */
2709 int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
2710 		unsigned long *new_cnt)
2711 {
2712 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2713 	struct ras_err_handler_data *data;
2714 	struct amdgpu_ras_eeprom_control *control;
2715 	int save_count;
2716 
2717 	if (!con || !con->eh_data) {
2718 		if (new_cnt)
2719 			*new_cnt = 0;
2720 
2721 		return 0;
2722 	}
2723 
2724 	mutex_lock(&con->recovery_lock);
2725 	control = &con->eeprom_control;
2726 	data = con->eh_data;
2727 	save_count = data->count - control->ras_num_recs;
2728 	mutex_unlock(&con->recovery_lock);
2729 
2730 	if (new_cnt)
2731 		*new_cnt = save_count / adev->umc.retire_unit;
2732 
2733 	/* only new entries are saved */
2734 	if (save_count > 0) {
2735 		if (amdgpu_ras_eeprom_append(control,
2736 					     &data->bps[control->ras_num_recs],
2737 					     save_count)) {
2738 			dev_err(adev->dev, "Failed to save EEPROM table data!");
2739 			return -EIO;
2740 		}
2741 
2742 		dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count);
2743 	}
2744 
2745 	return 0;
2746 }
2747 
2748 /*
2749  * read error record array in eeprom and reserve enough space for
2750  * storing new bad pages
2751  */
2752 static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
2753 {
2754 	struct amdgpu_ras_eeprom_control *control =
2755 		&adev->psp.ras_context.ras->eeprom_control;
2756 	struct eeprom_table_record *bps;
2757 	int ret;
2758 
2759 	/* no bad page record, skip eeprom access */
2760 	if (control->ras_num_recs == 0 || amdgpu_bad_page_threshold == 0)
2761 		return 0;
2762 
2763 	bps = kcalloc(control->ras_num_recs, sizeof(*bps), GFP_KERNEL);
2764 	if (!bps)
2765 		return -ENOMEM;
2766 
2767 	ret = amdgpu_ras_eeprom_read(control, bps, control->ras_num_recs);
2768 	if (ret)
2769 		dev_err(adev->dev, "Failed to load EEPROM table records!");
2770 	else
2771 		ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs);
2772 
2773 	kfree(bps);
2774 	return ret;
2775 }
2776 
2777 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
2778 				uint64_t addr)
2779 {
2780 	struct ras_err_handler_data *data = con->eh_data;
2781 	int i;
2782 
2783 	addr >>= AMDGPU_GPU_PAGE_SHIFT;
2784 	for (i = 0; i < data->count; i++)
2785 		if (addr == data->bps[i].retired_page)
2786 			return true;
2787 
2788 	return false;
2789 }
2790 
2791 /*
2792  * check if an address belongs to bad page
2793  *
2794  * Note: this check is only for umc block
2795  */
2796 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
2797 				uint64_t addr)
2798 {
2799 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2800 	bool ret = false;
2801 
2802 	if (!con || !con->eh_data)
2803 		return ret;
2804 
2805 	mutex_lock(&con->recovery_lock);
2806 	ret = amdgpu_ras_check_bad_page_unlock(con, addr);
2807 	mutex_unlock(&con->recovery_lock);
2808 	return ret;
2809 }
2810 
2811 static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev,
2812 					  uint32_t max_count)
2813 {
2814 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2815 
2816 	/*
2817 	 * Justification of value bad_page_cnt_threshold in ras structure
2818 	 *
2819 	 * Generally, 0 <= amdgpu_bad_page_threshold <= max record length
2820 	 * in eeprom or amdgpu_bad_page_threshold == -2, introduce two
2821 	 * scenarios accordingly.
2822 	 *
2823 	 * Bad page retirement enablement:
2824 	 *    - If amdgpu_bad_page_threshold = -2,
2825 	 *      bad_page_cnt_threshold = typical value by formula.
2826 	 *
2827 	 *    - When the value from user is 0 < amdgpu_bad_page_threshold <
2828 	 *      max record length in eeprom, use it directly.
2829 	 *
2830 	 * Bad page retirement disablement:
2831 	 *    - If amdgpu_bad_page_threshold = 0, bad page retirement
2832 	 *      functionality is disabled, and bad_page_cnt_threshold will
2833 	 *      take no effect.
2834 	 */
2835 
2836 	if (amdgpu_bad_page_threshold < 0) {
2837 		u64 val = adev->gmc.mc_vram_size;
2838 
2839 		do_div(val, RAS_BAD_PAGE_COVER);
2840 		con->bad_page_cnt_threshold = min(lower_32_bits(val),
2841 						  max_count);
2842 	} else {
2843 		con->bad_page_cnt_threshold = min_t(int, max_count,
2844 						    amdgpu_bad_page_threshold);
2845 	}
2846 }
2847 
2848 int amdgpu_ras_put_poison_req(struct amdgpu_device *adev,
2849 		enum amdgpu_ras_block block, uint16_t pasid,
2850 		pasid_notify pasid_fn, void *data, uint32_t reset)
2851 {
2852 	int ret = 0;
2853 	struct ras_poison_msg poison_msg;
2854 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2855 
2856 	memset(&poison_msg, 0, sizeof(poison_msg));
2857 	poison_msg.block = block;
2858 	poison_msg.pasid = pasid;
2859 	poison_msg.reset = reset;
2860 	poison_msg.pasid_fn = pasid_fn;
2861 	poison_msg.data = data;
2862 
2863 	ret = kfifo_put(&con->poison_fifo, poison_msg);
2864 	if (!ret) {
2865 		dev_err(adev->dev, "Poison message fifo is full!\n");
2866 		return -ENOSPC;
2867 	}
2868 
2869 	return 0;
2870 }
2871 
2872 static int amdgpu_ras_get_poison_req(struct amdgpu_device *adev,
2873 		struct ras_poison_msg *poison_msg)
2874 {
2875 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2876 
2877 	return kfifo_get(&con->poison_fifo, poison_msg);
2878 }
2879 
2880 static void amdgpu_ras_ecc_log_init(struct ras_ecc_log_info *ecc_log)
2881 {
2882 	mutex_init(&ecc_log->lock);
2883 
2884 	INIT_RADIX_TREE(&ecc_log->de_page_tree, GFP_KERNEL);
2885 	ecc_log->de_queried_count = 0;
2886 	ecc_log->prev_de_queried_count = 0;
2887 }
2888 
2889 static void amdgpu_ras_ecc_log_fini(struct ras_ecc_log_info *ecc_log)
2890 {
2891 	struct radix_tree_iter iter;
2892 	void __rcu **slot;
2893 	struct ras_ecc_err *ecc_err;
2894 
2895 	mutex_lock(&ecc_log->lock);
2896 	radix_tree_for_each_slot(slot, &ecc_log->de_page_tree, &iter, 0) {
2897 		ecc_err = radix_tree_deref_slot(slot);
2898 		kfree(ecc_err->err_pages.pfn);
2899 		kfree(ecc_err);
2900 		radix_tree_iter_delete(&ecc_log->de_page_tree, &iter, slot);
2901 	}
2902 	mutex_unlock(&ecc_log->lock);
2903 
2904 	mutex_destroy(&ecc_log->lock);
2905 	ecc_log->de_queried_count = 0;
2906 	ecc_log->prev_de_queried_count = 0;
2907 }
2908 
2909 static bool amdgpu_ras_schedule_retirement_dwork(struct amdgpu_ras *con,
2910 				uint32_t delayed_ms)
2911 {
2912 	int ret;
2913 
2914 	mutex_lock(&con->umc_ecc_log.lock);
2915 	ret = radix_tree_tagged(&con->umc_ecc_log.de_page_tree,
2916 			UMC_ECC_NEW_DETECTED_TAG);
2917 	mutex_unlock(&con->umc_ecc_log.lock);
2918 
2919 	if (ret)
2920 		schedule_delayed_work(&con->page_retirement_dwork,
2921 			msecs_to_jiffies(delayed_ms));
2922 
2923 	return ret ? true : false;
2924 }
2925 
2926 static void amdgpu_ras_do_page_retirement(struct work_struct *work)
2927 {
2928 	struct amdgpu_ras *con = container_of(work, struct amdgpu_ras,
2929 					      page_retirement_dwork.work);
2930 	struct amdgpu_device *adev = con->adev;
2931 	struct ras_err_data err_data;
2932 	unsigned long err_cnt;
2933 
2934 	/* If gpu reset is ongoing, delay retiring the bad pages */
2935 	if (amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev)) {
2936 		amdgpu_ras_schedule_retirement_dwork(con,
2937 				AMDGPU_RAS_RETIRE_PAGE_INTERVAL * 3);
2938 		return;
2939 	}
2940 
2941 	amdgpu_ras_error_data_init(&err_data);
2942 
2943 	amdgpu_umc_handle_bad_pages(adev, &err_data);
2944 	err_cnt = err_data.err_addr_cnt;
2945 
2946 	amdgpu_ras_error_data_fini(&err_data);
2947 
2948 	if (err_cnt && amdgpu_ras_is_rma(adev))
2949 		amdgpu_ras_reset_gpu(adev);
2950 
2951 	amdgpu_ras_schedule_retirement_dwork(con,
2952 			AMDGPU_RAS_RETIRE_PAGE_INTERVAL);
2953 }
2954 
2955 static int amdgpu_ras_poison_creation_handler(struct amdgpu_device *adev,
2956 				uint32_t poison_creation_count)
2957 {
2958 	int ret = 0;
2959 	struct ras_ecc_log_info *ecc_log;
2960 	struct ras_query_if info;
2961 	uint32_t timeout = 0;
2962 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
2963 	uint64_t de_queried_count;
2964 	uint32_t new_detect_count, total_detect_count;
2965 	uint32_t need_query_count = poison_creation_count;
2966 	bool query_data_timeout = false;
2967 	enum ras_event_type type = RAS_EVENT_TYPE_POISON_CREATION;
2968 
2969 	memset(&info, 0, sizeof(info));
2970 	info.head.block = AMDGPU_RAS_BLOCK__UMC;
2971 
2972 	ecc_log = &ras->umc_ecc_log;
2973 	total_detect_count = 0;
2974 	do {
2975 		ret = amdgpu_ras_query_error_status_with_event(adev, &info, type);
2976 		if (ret)
2977 			return ret;
2978 
2979 		de_queried_count = ecc_log->de_queried_count;
2980 		if (de_queried_count > ecc_log->prev_de_queried_count) {
2981 			new_detect_count = de_queried_count - ecc_log->prev_de_queried_count;
2982 			ecc_log->prev_de_queried_count = de_queried_count;
2983 			timeout = 0;
2984 		} else {
2985 			new_detect_count = 0;
2986 		}
2987 
2988 		if (new_detect_count) {
2989 			total_detect_count += new_detect_count;
2990 		} else {
2991 			if (!timeout && need_query_count)
2992 				timeout = MAX_UMC_POISON_POLLING_TIME_ASYNC;
2993 
2994 			if (timeout) {
2995 				if (!--timeout) {
2996 					query_data_timeout = true;
2997 					break;
2998 				}
2999 				msleep(1);
3000 			}
3001 		}
3002 	} while (total_detect_count < need_query_count);
3003 
3004 	if (query_data_timeout) {
3005 		dev_warn(adev->dev, "Can't find deferred error! count: %u\n",
3006 			(need_query_count - total_detect_count));
3007 		return -ENOENT;
3008 	}
3009 
3010 	if (total_detect_count)
3011 		schedule_delayed_work(&ras->page_retirement_dwork, 0);
3012 
3013 	return 0;
3014 }
3015 
3016 static void amdgpu_ras_clear_poison_fifo(struct amdgpu_device *adev)
3017 {
3018 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3019 	struct ras_poison_msg msg;
3020 	int ret;
3021 
3022 	do {
3023 		ret = kfifo_get(&con->poison_fifo, &msg);
3024 	} while (ret);
3025 }
3026 
3027 static int amdgpu_ras_poison_consumption_handler(struct amdgpu_device *adev,
3028 			uint32_t msg_count, uint32_t *gpu_reset)
3029 {
3030 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3031 	uint32_t reset_flags = 0, reset = 0;
3032 	struct ras_poison_msg msg;
3033 	int ret, i;
3034 
3035 	kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
3036 
3037 	for (i = 0; i < msg_count; i++) {
3038 		ret = amdgpu_ras_get_poison_req(adev, &msg);
3039 		if (!ret)
3040 			continue;
3041 
3042 		if (msg.pasid_fn)
3043 			msg.pasid_fn(adev, msg.pasid, msg.data);
3044 
3045 		reset_flags |= msg.reset;
3046 	}
3047 
3048 	/* for RMA, amdgpu_ras_poison_creation_handler will trigger gpu reset */
3049 	if (reset_flags && !amdgpu_ras_is_rma(adev)) {
3050 		if (reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET)
3051 			reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET;
3052 		else if (reset_flags & AMDGPU_RAS_GPU_RESET_MODE2_RESET)
3053 			reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
3054 		else
3055 			reset = reset_flags;
3056 
3057 		flush_delayed_work(&con->page_retirement_dwork);
3058 
3059 		con->gpu_reset_flags |= reset;
3060 		amdgpu_ras_reset_gpu(adev);
3061 
3062 		*gpu_reset = reset;
3063 
3064 		/* Wait for gpu recovery to complete */
3065 		flush_work(&con->recovery_work);
3066 	}
3067 
3068 	return 0;
3069 }
3070 
3071 static int amdgpu_ras_page_retirement_thread(void *param)
3072 {
3073 	struct amdgpu_device *adev = (struct amdgpu_device *)param;
3074 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3075 	uint32_t poison_creation_count, msg_count;
3076 	uint32_t gpu_reset;
3077 	int ret;
3078 
3079 	while (!kthread_should_stop()) {
3080 
3081 		wait_event_interruptible(con->page_retirement_wq,
3082 				kthread_should_stop() ||
3083 				atomic_read(&con->page_retirement_req_cnt));
3084 
3085 		if (kthread_should_stop())
3086 			break;
3087 
3088 		gpu_reset = 0;
3089 
3090 		do {
3091 			poison_creation_count = atomic_read(&con->poison_creation_count);
3092 			ret = amdgpu_ras_poison_creation_handler(adev, poison_creation_count);
3093 			if (ret == -EIO)
3094 				break;
3095 
3096 			if (poison_creation_count) {
3097 				atomic_sub(poison_creation_count, &con->poison_creation_count);
3098 				atomic_sub(poison_creation_count, &con->page_retirement_req_cnt);
3099 			}
3100 		} while (atomic_read(&con->poison_creation_count));
3101 
3102 		if (ret != -EIO) {
3103 			msg_count = kfifo_len(&con->poison_fifo);
3104 			if (msg_count) {
3105 				ret = amdgpu_ras_poison_consumption_handler(adev,
3106 						msg_count, &gpu_reset);
3107 				if ((ret != -EIO) &&
3108 				    (gpu_reset != AMDGPU_RAS_GPU_RESET_MODE1_RESET))
3109 					atomic_sub(msg_count, &con->page_retirement_req_cnt);
3110 			}
3111 		}
3112 
3113 		if ((ret == -EIO) || (gpu_reset == AMDGPU_RAS_GPU_RESET_MODE1_RESET)) {
3114 			/* gpu mode-1 reset is ongoing or just completed ras mode-1 reset */
3115 			/* Clear poison creation request */
3116 			atomic_set(&con->poison_creation_count, 0);
3117 
3118 			/* Clear poison fifo */
3119 			amdgpu_ras_clear_poison_fifo(adev);
3120 
3121 			/* Clear all poison requests */
3122 			atomic_set(&con->page_retirement_req_cnt, 0);
3123 
3124 			if (ret == -EIO) {
3125 				/* Wait for mode-1 reset to complete */
3126 				down_read(&adev->reset_domain->sem);
3127 				up_read(&adev->reset_domain->sem);
3128 			}
3129 
3130 			/* Wake up work to save bad pages to eeprom */
3131 			schedule_delayed_work(&con->page_retirement_dwork, 0);
3132 		} else if (gpu_reset) {
3133 			/* gpu just completed mode-2 reset or other reset */
3134 			/* Clear poison consumption messages cached in fifo */
3135 			msg_count = kfifo_len(&con->poison_fifo);
3136 			if (msg_count) {
3137 				amdgpu_ras_clear_poison_fifo(adev);
3138 				atomic_sub(msg_count, &con->page_retirement_req_cnt);
3139 			}
3140 
3141 			/* Wake up work to save bad pages to eeprom */
3142 			schedule_delayed_work(&con->page_retirement_dwork, 0);
3143 		}
3144 	}
3145 
3146 	return 0;
3147 }
3148 
3149 int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev)
3150 {
3151 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3152 	int ret;
3153 
3154 	if (!con || amdgpu_sriov_vf(adev))
3155 		return 0;
3156 
3157 	ret = amdgpu_ras_eeprom_init(&con->eeprom_control);
3158 
3159 	if (ret)
3160 		return ret;
3161 
3162 	/* HW not usable */
3163 	if (amdgpu_ras_is_rma(adev))
3164 		return -EHWPOISON;
3165 
3166 	if (con->eeprom_control.ras_num_recs) {
3167 		ret = amdgpu_ras_load_bad_pages(adev);
3168 		if (ret)
3169 			return ret;
3170 
3171 		amdgpu_dpm_send_hbm_bad_pages_num(
3172 			adev, con->eeprom_control.ras_num_recs);
3173 
3174 		if (con->update_channel_flag == true) {
3175 			amdgpu_dpm_send_hbm_bad_channel_flag(
3176 				adev, con->eeprom_control.bad_channel_bitmap);
3177 			con->update_channel_flag = false;
3178 		}
3179 	}
3180 
3181 	return ret;
3182 }
3183 
3184 int amdgpu_ras_recovery_init(struct amdgpu_device *adev, bool init_bp_info)
3185 {
3186 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3187 	struct ras_err_handler_data **data;
3188 	u32  max_eeprom_records_count = 0;
3189 	int ret;
3190 
3191 	if (!con || amdgpu_sriov_vf(adev))
3192 		return 0;
3193 
3194 	/* Allow access to RAS EEPROM via debugfs, when the ASIC
3195 	 * supports RAS and debugfs is enabled, but when
3196 	 * adev->ras_enabled is unset, i.e. when "ras_enable"
3197 	 * module parameter is set to 0.
3198 	 */
3199 	con->adev = adev;
3200 
3201 	if (!adev->ras_enabled)
3202 		return 0;
3203 
3204 	data = &con->eh_data;
3205 	*data = kzalloc(sizeof(**data), GFP_KERNEL);
3206 	if (!*data) {
3207 		ret = -ENOMEM;
3208 		goto out;
3209 	}
3210 
3211 	mutex_init(&con->recovery_lock);
3212 	INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
3213 	atomic_set(&con->in_recovery, 0);
3214 	con->eeprom_control.bad_channel_bitmap = 0;
3215 
3216 	max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count(&con->eeprom_control);
3217 	amdgpu_ras_validate_threshold(adev, max_eeprom_records_count);
3218 
3219 	if (init_bp_info) {
3220 		ret = amdgpu_ras_init_badpage_info(adev);
3221 		if (ret)
3222 			goto free;
3223 	}
3224 
3225 	mutex_init(&con->page_rsv_lock);
3226 	INIT_KFIFO(con->poison_fifo);
3227 	mutex_init(&con->page_retirement_lock);
3228 	init_waitqueue_head(&con->page_retirement_wq);
3229 	atomic_set(&con->page_retirement_req_cnt, 0);
3230 	atomic_set(&con->poison_creation_count, 0);
3231 	con->page_retirement_thread =
3232 		kthread_run(amdgpu_ras_page_retirement_thread, adev, "umc_page_retirement");
3233 	if (IS_ERR(con->page_retirement_thread)) {
3234 		con->page_retirement_thread = NULL;
3235 		dev_warn(adev->dev, "Failed to create umc_page_retirement thread!!!\n");
3236 	}
3237 
3238 	INIT_DELAYED_WORK(&con->page_retirement_dwork, amdgpu_ras_do_page_retirement);
3239 	amdgpu_ras_ecc_log_init(&con->umc_ecc_log);
3240 #ifdef CONFIG_X86_MCE_AMD
3241 	if ((adev->asic_type == CHIP_ALDEBARAN) &&
3242 	    (adev->gmc.xgmi.connected_to_cpu))
3243 		amdgpu_register_bad_pages_mca_notifier(adev);
3244 #endif
3245 	return 0;
3246 
3247 free:
3248 	kfree((*data)->bps);
3249 	kfree(*data);
3250 	con->eh_data = NULL;
3251 out:
3252 	dev_warn(adev->dev, "Failed to initialize ras recovery! (%d)\n", ret);
3253 
3254 	/*
3255 	 * Except error threshold exceeding case, other failure cases in this
3256 	 * function would not fail amdgpu driver init.
3257 	 */
3258 	if (!amdgpu_ras_is_rma(adev))
3259 		ret = 0;
3260 	else
3261 		ret = -EINVAL;
3262 
3263 	return ret;
3264 }
3265 
3266 static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
3267 {
3268 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3269 	struct ras_err_handler_data *data = con->eh_data;
3270 	int max_flush_timeout = MAX_FLUSH_RETIRE_DWORK_TIMES;
3271 	bool ret;
3272 
3273 	/* recovery_init failed to init it, fini is useless */
3274 	if (!data)
3275 		return 0;
3276 
3277 	/* Save all cached bad pages to eeprom */
3278 	do {
3279 		flush_delayed_work(&con->page_retirement_dwork);
3280 		ret = amdgpu_ras_schedule_retirement_dwork(con, 0);
3281 	} while (ret && max_flush_timeout--);
3282 
3283 	if (con->page_retirement_thread)
3284 		kthread_stop(con->page_retirement_thread);
3285 
3286 	atomic_set(&con->page_retirement_req_cnt, 0);
3287 	atomic_set(&con->poison_creation_count, 0);
3288 
3289 	mutex_destroy(&con->page_rsv_lock);
3290 
3291 	cancel_work_sync(&con->recovery_work);
3292 
3293 	cancel_delayed_work_sync(&con->page_retirement_dwork);
3294 
3295 	amdgpu_ras_ecc_log_fini(&con->umc_ecc_log);
3296 
3297 	mutex_lock(&con->recovery_lock);
3298 	con->eh_data = NULL;
3299 	kfree(data->bps);
3300 	kfree(data);
3301 	mutex_unlock(&con->recovery_lock);
3302 
3303 	return 0;
3304 }
3305 /* recovery end */
3306 
3307 static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
3308 {
3309 	if (amdgpu_sriov_vf(adev)) {
3310 		switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
3311 		case IP_VERSION(13, 0, 2):
3312 		case IP_VERSION(13, 0, 6):
3313 		case IP_VERSION(13, 0, 14):
3314 			return true;
3315 		default:
3316 			return false;
3317 		}
3318 	}
3319 
3320 	if (adev->asic_type == CHIP_IP_DISCOVERY) {
3321 		switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
3322 		case IP_VERSION(13, 0, 0):
3323 		case IP_VERSION(13, 0, 6):
3324 		case IP_VERSION(13, 0, 10):
3325 		case IP_VERSION(13, 0, 14):
3326 			return true;
3327 		default:
3328 			return false;
3329 		}
3330 	}
3331 
3332 	return adev->asic_type == CHIP_VEGA10 ||
3333 		adev->asic_type == CHIP_VEGA20 ||
3334 		adev->asic_type == CHIP_ARCTURUS ||
3335 		adev->asic_type == CHIP_ALDEBARAN ||
3336 		adev->asic_type == CHIP_SIENNA_CICHLID;
3337 }
3338 
3339 /*
3340  * this is workaround for vega20 workstation sku,
3341  * force enable gfx ras, ignore vbios gfx ras flag
3342  * due to GC EDC can not write
3343  */
3344 static void amdgpu_ras_get_quirks(struct amdgpu_device *adev)
3345 {
3346 	struct atom_context *ctx = adev->mode_info.atom_context;
3347 
3348 	if (!ctx)
3349 		return;
3350 
3351 	if (strnstr(ctx->vbios_pn, "D16406",
3352 		    sizeof(ctx->vbios_pn)) ||
3353 		strnstr(ctx->vbios_pn, "D36002",
3354 			sizeof(ctx->vbios_pn)))
3355 		adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX);
3356 }
3357 
3358 /* Query ras capablity via atomfirmware interface */
3359 static void amdgpu_ras_query_ras_capablity_from_vbios(struct amdgpu_device *adev)
3360 {
3361 	/* mem_ecc cap */
3362 	if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
3363 		dev_info(adev->dev, "MEM ECC is active.\n");
3364 		adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__UMC |
3365 					 1 << AMDGPU_RAS_BLOCK__DF);
3366 	} else {
3367 		dev_info(adev->dev, "MEM ECC is not presented.\n");
3368 	}
3369 
3370 	/* sram_ecc cap */
3371 	if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
3372 		dev_info(adev->dev, "SRAM ECC is active.\n");
3373 		if (!amdgpu_sriov_vf(adev))
3374 			adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
3375 						  1 << AMDGPU_RAS_BLOCK__DF);
3376 		else
3377 			adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__PCIE_BIF |
3378 						 1 << AMDGPU_RAS_BLOCK__SDMA |
3379 						 1 << AMDGPU_RAS_BLOCK__GFX);
3380 
3381 		/*
3382 		 * VCN/JPEG RAS can be supported on both bare metal and
3383 		 * SRIOV environment
3384 		 */
3385 		if (amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(2, 6, 0) ||
3386 		    amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 0) ||
3387 		    amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 3))
3388 			adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN |
3389 						 1 << AMDGPU_RAS_BLOCK__JPEG);
3390 		else
3391 			adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN |
3392 						  1 << AMDGPU_RAS_BLOCK__JPEG);
3393 
3394 		/*
3395 		 * XGMI RAS is not supported if xgmi num physical nodes
3396 		 * is zero
3397 		 */
3398 		if (!adev->gmc.xgmi.num_physical_nodes)
3399 			adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__XGMI_WAFL);
3400 	} else {
3401 		dev_info(adev->dev, "SRAM ECC is not presented.\n");
3402 	}
3403 }
3404 
3405 /* Query poison mode from umc/df IP callbacks */
3406 static void amdgpu_ras_query_poison_mode(struct amdgpu_device *adev)
3407 {
3408 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3409 	bool df_poison, umc_poison;
3410 
3411 	/* poison setting is useless on SRIOV guest */
3412 	if (amdgpu_sriov_vf(adev) || !con)
3413 		return;
3414 
3415 	/* Init poison supported flag, the default value is false */
3416 	if (adev->gmc.xgmi.connected_to_cpu ||
3417 	    adev->gmc.is_app_apu) {
3418 		/* enabled by default when GPU is connected to CPU */
3419 		con->poison_supported = true;
3420 	} else if (adev->df.funcs &&
3421 	    adev->df.funcs->query_ras_poison_mode &&
3422 	    adev->umc.ras &&
3423 	    adev->umc.ras->query_ras_poison_mode) {
3424 		df_poison =
3425 			adev->df.funcs->query_ras_poison_mode(adev);
3426 		umc_poison =
3427 			adev->umc.ras->query_ras_poison_mode(adev);
3428 
3429 		/* Only poison is set in both DF and UMC, we can support it */
3430 		if (df_poison && umc_poison)
3431 			con->poison_supported = true;
3432 		else if (df_poison != umc_poison)
3433 			dev_warn(adev->dev,
3434 				"Poison setting is inconsistent in DF/UMC(%d:%d)!\n",
3435 				df_poison, umc_poison);
3436 	}
3437 }
3438 
3439 /*
3440  * check hardware's ras ability which will be saved in hw_supported.
3441  * if hardware does not support ras, we can skip some ras initializtion and
3442  * forbid some ras operations from IP.
3443  * if software itself, say boot parameter, limit the ras ability. We still
3444  * need allow IP do some limited operations, like disable. In such case,
3445  * we have to initialize ras as normal. but need check if operation is
3446  * allowed or not in each function.
3447  */
3448 static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
3449 {
3450 	adev->ras_hw_enabled = adev->ras_enabled = 0;
3451 
3452 	if (!amdgpu_ras_asic_supported(adev))
3453 		return;
3454 
3455 	/* query ras capability from psp */
3456 	if (amdgpu_psp_get_ras_capability(&adev->psp))
3457 		goto init_ras_enabled_flag;
3458 
3459 	/* query ras capablity from bios */
3460 	if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
3461 		amdgpu_ras_query_ras_capablity_from_vbios(adev);
3462 	} else {
3463 		/* driver only manages a few IP blocks RAS feature
3464 		 * when GPU is connected cpu through XGMI */
3465 		adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX |
3466 					   1 << AMDGPU_RAS_BLOCK__SDMA |
3467 					   1 << AMDGPU_RAS_BLOCK__MMHUB);
3468 	}
3469 
3470 	/* apply asic specific settings (vega20 only for now) */
3471 	amdgpu_ras_get_quirks(adev);
3472 
3473 	/* query poison mode from umc/df ip callback */
3474 	amdgpu_ras_query_poison_mode(adev);
3475 
3476 init_ras_enabled_flag:
3477 	/* hw_supported needs to be aligned with RAS block mask. */
3478 	adev->ras_hw_enabled &= AMDGPU_RAS_BLOCK_MASK;
3479 
3480 	adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 :
3481 		adev->ras_hw_enabled & amdgpu_ras_mask;
3482 
3483 	/* aca is disabled by default */
3484 	adev->aca.is_enabled = false;
3485 
3486 	/* bad page feature is not applicable to specific app platform */
3487 	if (adev->gmc.is_app_apu &&
3488 	    amdgpu_ip_version(adev, UMC_HWIP, 0) == IP_VERSION(12, 0, 0))
3489 		amdgpu_bad_page_threshold = 0;
3490 }
3491 
3492 static void amdgpu_ras_counte_dw(struct work_struct *work)
3493 {
3494 	struct amdgpu_ras *con = container_of(work, struct amdgpu_ras,
3495 					      ras_counte_delay_work.work);
3496 	struct amdgpu_device *adev = con->adev;
3497 	struct drm_device *dev = adev_to_drm(adev);
3498 	unsigned long ce_count, ue_count;
3499 	int res;
3500 
3501 	res = pm_runtime_get_sync(dev->dev);
3502 	if (res < 0)
3503 		goto Out;
3504 
3505 	/* Cache new values.
3506 	 */
3507 	if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, NULL) == 0) {
3508 		atomic_set(&con->ras_ce_count, ce_count);
3509 		atomic_set(&con->ras_ue_count, ue_count);
3510 	}
3511 
3512 	pm_runtime_mark_last_busy(dev->dev);
3513 Out:
3514 	pm_runtime_put_autosuspend(dev->dev);
3515 }
3516 
3517 static int amdgpu_get_ras_schema(struct amdgpu_device *adev)
3518 {
3519 	return  amdgpu_ras_is_poison_mode_supported(adev) ? AMDGPU_RAS_ERROR__POISON : 0 |
3520 			AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE |
3521 			AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE |
3522 			AMDGPU_RAS_ERROR__PARITY;
3523 }
3524 
3525 static void ras_event_mgr_init(struct ras_event_manager *mgr)
3526 {
3527 	struct ras_event_state *event_state;
3528 	int i;
3529 
3530 	memset(mgr, 0, sizeof(*mgr));
3531 	atomic64_set(&mgr->seqno, 0);
3532 
3533 	for (i = 0; i < ARRAY_SIZE(mgr->event_state); i++) {
3534 		event_state = &mgr->event_state[i];
3535 		event_state->last_seqno = RAS_EVENT_INVALID_ID;
3536 		atomic64_set(&event_state->count, 0);
3537 	}
3538 }
3539 
3540 static void amdgpu_ras_event_mgr_init(struct amdgpu_device *adev)
3541 {
3542 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
3543 	struct amdgpu_hive_info *hive;
3544 
3545 	if (!ras)
3546 		return;
3547 
3548 	hive = amdgpu_get_xgmi_hive(adev);
3549 	ras->event_mgr = hive ? &hive->event_mgr : &ras->__event_mgr;
3550 
3551 	/* init event manager with node 0 on xgmi system */
3552 	if (!amdgpu_in_reset(adev)) {
3553 		if (!hive || adev->gmc.xgmi.node_id == 0)
3554 			ras_event_mgr_init(ras->event_mgr);
3555 	}
3556 
3557 	if (hive)
3558 		amdgpu_put_xgmi_hive(hive);
3559 }
3560 
3561 static void amdgpu_ras_init_reserved_vram_size(struct amdgpu_device *adev)
3562 {
3563 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3564 
3565 	if (!con || (adev->flags & AMD_IS_APU))
3566 		return;
3567 
3568 	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
3569 	case IP_VERSION(13, 0, 2):
3570 	case IP_VERSION(13, 0, 6):
3571 	case IP_VERSION(13, 0, 14):
3572 		con->reserved_pages_in_bytes = AMDGPU_RAS_RESERVED_VRAM_SIZE;
3573 		break;
3574 	default:
3575 		break;
3576 	}
3577 }
3578 
3579 int amdgpu_ras_init(struct amdgpu_device *adev)
3580 {
3581 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3582 	int r;
3583 
3584 	if (con)
3585 		return 0;
3586 
3587 	con = kzalloc(sizeof(*con) +
3588 			sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT +
3589 			sizeof(struct ras_manager) * AMDGPU_RAS_MCA_BLOCK_COUNT,
3590 			GFP_KERNEL);
3591 	if (!con)
3592 		return -ENOMEM;
3593 
3594 	con->adev = adev;
3595 	INIT_DELAYED_WORK(&con->ras_counte_delay_work, amdgpu_ras_counte_dw);
3596 	atomic_set(&con->ras_ce_count, 0);
3597 	atomic_set(&con->ras_ue_count, 0);
3598 
3599 	con->objs = (struct ras_manager *)(con + 1);
3600 
3601 	amdgpu_ras_set_context(adev, con);
3602 
3603 	amdgpu_ras_check_supported(adev);
3604 
3605 	if (!adev->ras_enabled || adev->asic_type == CHIP_VEGA10) {
3606 		/* set gfx block ras context feature for VEGA20 Gaming
3607 		 * send ras disable cmd to ras ta during ras late init.
3608 		 */
3609 		if (!adev->ras_enabled && adev->asic_type == CHIP_VEGA20) {
3610 			con->features |= BIT(AMDGPU_RAS_BLOCK__GFX);
3611 
3612 			return 0;
3613 		}
3614 
3615 		r = 0;
3616 		goto release_con;
3617 	}
3618 
3619 	con->update_channel_flag = false;
3620 	con->features = 0;
3621 	con->schema = 0;
3622 	INIT_LIST_HEAD(&con->head);
3623 	/* Might need get this flag from vbios. */
3624 	con->flags = RAS_DEFAULT_FLAGS;
3625 
3626 	/* initialize nbio ras function ahead of any other
3627 	 * ras functions so hardware fatal error interrupt
3628 	 * can be enabled as early as possible */
3629 	switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
3630 	case IP_VERSION(7, 4, 0):
3631 	case IP_VERSION(7, 4, 1):
3632 	case IP_VERSION(7, 4, 4):
3633 		if (!adev->gmc.xgmi.connected_to_cpu)
3634 			adev->nbio.ras = &nbio_v7_4_ras;
3635 		break;
3636 	case IP_VERSION(4, 3, 0):
3637 		if (adev->ras_hw_enabled & (1 << AMDGPU_RAS_BLOCK__DF))
3638 			/* unlike other generation of nbio ras,
3639 			 * nbio v4_3 only support fatal error interrupt
3640 			 * to inform software that DF is freezed due to
3641 			 * system fatal error event. driver should not
3642 			 * enable nbio ras in such case. Instead,
3643 			 * check DF RAS */
3644 			adev->nbio.ras = &nbio_v4_3_ras;
3645 		break;
3646 	case IP_VERSION(7, 9, 0):
3647 		if (!adev->gmc.is_app_apu)
3648 			adev->nbio.ras = &nbio_v7_9_ras;
3649 		break;
3650 	default:
3651 		/* nbio ras is not available */
3652 		break;
3653 	}
3654 
3655 	/* nbio ras block needs to be enabled ahead of other ras blocks
3656 	 * to handle fatal error */
3657 	r = amdgpu_nbio_ras_sw_init(adev);
3658 	if (r)
3659 		return r;
3660 
3661 	if (adev->nbio.ras &&
3662 	    adev->nbio.ras->init_ras_controller_interrupt) {
3663 		r = adev->nbio.ras->init_ras_controller_interrupt(adev);
3664 		if (r)
3665 			goto release_con;
3666 	}
3667 
3668 	if (adev->nbio.ras &&
3669 	    adev->nbio.ras->init_ras_err_event_athub_interrupt) {
3670 		r = adev->nbio.ras->init_ras_err_event_athub_interrupt(adev);
3671 		if (r)
3672 			goto release_con;
3673 	}
3674 
3675 	/* Packed socket_id to ras feature mask bits[31:29] */
3676 	if (adev->smuio.funcs &&
3677 	    adev->smuio.funcs->get_socket_id)
3678 		con->features |= ((adev->smuio.funcs->get_socket_id(adev)) <<
3679 					AMDGPU_RAS_FEATURES_SOCKETID_SHIFT);
3680 
3681 	/* Get RAS schema for particular SOC */
3682 	con->schema = amdgpu_get_ras_schema(adev);
3683 
3684 	amdgpu_ras_init_reserved_vram_size(adev);
3685 
3686 	if (amdgpu_ras_fs_init(adev)) {
3687 		r = -EINVAL;
3688 		goto release_con;
3689 	}
3690 
3691 	if (amdgpu_ras_aca_is_supported(adev)) {
3692 		if (amdgpu_aca_is_enabled(adev))
3693 			r = amdgpu_aca_init(adev);
3694 		else
3695 			r = amdgpu_mca_init(adev);
3696 		if (r)
3697 			goto release_con;
3698 	}
3699 
3700 	dev_info(adev->dev, "RAS INFO: ras initialized successfully, "
3701 		 "hardware ability[%x] ras_mask[%x]\n",
3702 		 adev->ras_hw_enabled, adev->ras_enabled);
3703 
3704 	return 0;
3705 release_con:
3706 	amdgpu_ras_set_context(adev, NULL);
3707 	kfree(con);
3708 
3709 	return r;
3710 }
3711 
3712 int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev)
3713 {
3714 	if (adev->gmc.xgmi.connected_to_cpu ||
3715 	    adev->gmc.is_app_apu)
3716 		return 1;
3717 	return 0;
3718 }
3719 
3720 static int amdgpu_persistent_edc_harvesting(struct amdgpu_device *adev,
3721 					struct ras_common_if *ras_block)
3722 {
3723 	struct ras_query_if info = {
3724 		.head = *ras_block,
3725 	};
3726 
3727 	if (!amdgpu_persistent_edc_harvesting_supported(adev))
3728 		return 0;
3729 
3730 	if (amdgpu_ras_query_error_status(adev, &info) != 0)
3731 		DRM_WARN("RAS init harvest failure");
3732 
3733 	if (amdgpu_ras_reset_error_status(adev, ras_block->block) != 0)
3734 		DRM_WARN("RAS init harvest reset failure");
3735 
3736 	return 0;
3737 }
3738 
3739 bool amdgpu_ras_is_poison_mode_supported(struct amdgpu_device *adev)
3740 {
3741        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3742 
3743        if (!con)
3744                return false;
3745 
3746        return con->poison_supported;
3747 }
3748 
3749 /* helper function to handle common stuff in ip late init phase */
3750 int amdgpu_ras_block_late_init(struct amdgpu_device *adev,
3751 			 struct ras_common_if *ras_block)
3752 {
3753 	struct amdgpu_ras_block_object *ras_obj = NULL;
3754 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3755 	struct ras_query_if *query_info;
3756 	unsigned long ue_count, ce_count;
3757 	int r;
3758 
3759 	/* disable RAS feature per IP block if it is not supported */
3760 	if (!amdgpu_ras_is_supported(adev, ras_block->block)) {
3761 		amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
3762 		return 0;
3763 	}
3764 
3765 	r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1);
3766 	if (r) {
3767 		if (adev->in_suspend || amdgpu_in_reset(adev)) {
3768 			/* in resume phase, if fail to enable ras,
3769 			 * clean up all ras fs nodes, and disable ras */
3770 			goto cleanup;
3771 		} else
3772 			return r;
3773 	}
3774 
3775 	/* check for errors on warm reset edc persisant supported ASIC */
3776 	amdgpu_persistent_edc_harvesting(adev, ras_block);
3777 
3778 	/* in resume phase, no need to create ras fs node */
3779 	if (adev->in_suspend || amdgpu_in_reset(adev))
3780 		return 0;
3781 
3782 	ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
3783 	if (ras_obj->ras_cb || (ras_obj->hw_ops &&
3784 	    (ras_obj->hw_ops->query_poison_status ||
3785 	    ras_obj->hw_ops->handle_poison_consumption))) {
3786 		r = amdgpu_ras_interrupt_add_handler(adev, ras_block);
3787 		if (r)
3788 			goto cleanup;
3789 	}
3790 
3791 	if (ras_obj->hw_ops &&
3792 	    (ras_obj->hw_ops->query_ras_error_count ||
3793 	     ras_obj->hw_ops->query_ras_error_status)) {
3794 		r = amdgpu_ras_sysfs_create(adev, ras_block);
3795 		if (r)
3796 			goto interrupt;
3797 
3798 		/* Those are the cached values at init.
3799 		 */
3800 		query_info = kzalloc(sizeof(*query_info), GFP_KERNEL);
3801 		if (!query_info)
3802 			return -ENOMEM;
3803 		memcpy(&query_info->head, ras_block, sizeof(struct ras_common_if));
3804 
3805 		if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, query_info) == 0) {
3806 			atomic_set(&con->ras_ce_count, ce_count);
3807 			atomic_set(&con->ras_ue_count, ue_count);
3808 		}
3809 
3810 		kfree(query_info);
3811 	}
3812 
3813 	return 0;
3814 
3815 interrupt:
3816 	if (ras_obj->ras_cb)
3817 		amdgpu_ras_interrupt_remove_handler(adev, ras_block);
3818 cleanup:
3819 	amdgpu_ras_feature_enable(adev, ras_block, 0);
3820 	return r;
3821 }
3822 
3823 static int amdgpu_ras_block_late_init_default(struct amdgpu_device *adev,
3824 			 struct ras_common_if *ras_block)
3825 {
3826 	return amdgpu_ras_block_late_init(adev, ras_block);
3827 }
3828 
3829 /* helper function to remove ras fs node and interrupt handler */
3830 void amdgpu_ras_block_late_fini(struct amdgpu_device *adev,
3831 			  struct ras_common_if *ras_block)
3832 {
3833 	struct amdgpu_ras_block_object *ras_obj;
3834 	if (!ras_block)
3835 		return;
3836 
3837 	amdgpu_ras_sysfs_remove(adev, ras_block);
3838 
3839 	ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
3840 	if (ras_obj->ras_cb)
3841 		amdgpu_ras_interrupt_remove_handler(adev, ras_block);
3842 }
3843 
3844 static void amdgpu_ras_block_late_fini_default(struct amdgpu_device *adev,
3845 			  struct ras_common_if *ras_block)
3846 {
3847 	return amdgpu_ras_block_late_fini(adev, ras_block);
3848 }
3849 
3850 /* do some init work after IP late init as dependence.
3851  * and it runs in resume/gpu reset/booting up cases.
3852  */
3853 void amdgpu_ras_resume(struct amdgpu_device *adev)
3854 {
3855 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3856 	struct ras_manager *obj, *tmp;
3857 
3858 	if (!adev->ras_enabled || !con) {
3859 		/* clean ras context for VEGA20 Gaming after send ras disable cmd */
3860 		amdgpu_release_ras_context(adev);
3861 
3862 		return;
3863 	}
3864 
3865 	if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
3866 		/* Set up all other IPs which are not implemented. There is a
3867 		 * tricky thing that IP's actual ras error type should be
3868 		 * MULTI_UNCORRECTABLE, but as driver does not handle it, so
3869 		 * ERROR_NONE make sense anyway.
3870 		 */
3871 		amdgpu_ras_enable_all_features(adev, 1);
3872 
3873 		/* We enable ras on all hw_supported block, but as boot
3874 		 * parameter might disable some of them and one or more IP has
3875 		 * not implemented yet. So we disable them on behalf.
3876 		 */
3877 		list_for_each_entry_safe(obj, tmp, &con->head, node) {
3878 			if (!amdgpu_ras_is_supported(adev, obj->head.block)) {
3879 				amdgpu_ras_feature_enable(adev, &obj->head, 0);
3880 				/* there should be no any reference. */
3881 				WARN_ON(alive_obj(obj));
3882 			}
3883 		}
3884 	}
3885 }
3886 
3887 void amdgpu_ras_suspend(struct amdgpu_device *adev)
3888 {
3889 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3890 
3891 	if (!adev->ras_enabled || !con)
3892 		return;
3893 
3894 	amdgpu_ras_disable_all_features(adev, 0);
3895 	/* Make sure all ras objects are disabled. */
3896 	if (AMDGPU_RAS_GET_FEATURES(con->features))
3897 		amdgpu_ras_disable_all_features(adev, 1);
3898 }
3899 
3900 int amdgpu_ras_late_init(struct amdgpu_device *adev)
3901 {
3902 	struct amdgpu_ras_block_list *node, *tmp;
3903 	struct amdgpu_ras_block_object *obj;
3904 	int r;
3905 
3906 	amdgpu_ras_event_mgr_init(adev);
3907 
3908 	if (amdgpu_ras_aca_is_supported(adev)) {
3909 		if (amdgpu_in_reset(adev)) {
3910 			if (amdgpu_aca_is_enabled(adev))
3911 				r = amdgpu_aca_reset(adev);
3912 			else
3913 				r = amdgpu_mca_reset(adev);
3914 			if (r)
3915 				return r;
3916 		}
3917 
3918 		if (!amdgpu_sriov_vf(adev)) {
3919 			if (amdgpu_aca_is_enabled(adev))
3920 				amdgpu_ras_set_aca_debug_mode(adev, false);
3921 			else
3922 				amdgpu_ras_set_mca_debug_mode(adev, false);
3923 		}
3924 	}
3925 
3926 	/* Guest side doesn't need init ras feature */
3927 	if (amdgpu_sriov_vf(adev))
3928 		return 0;
3929 
3930 	list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
3931 		obj = node->ras_obj;
3932 		if (!obj) {
3933 			dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
3934 			continue;
3935 		}
3936 
3937 		if (!amdgpu_ras_is_supported(adev, obj->ras_comm.block))
3938 			continue;
3939 
3940 		if (obj->ras_late_init) {
3941 			r = obj->ras_late_init(adev, &obj->ras_comm);
3942 			if (r) {
3943 				dev_err(adev->dev, "%s failed to execute ras_late_init! ret:%d\n",
3944 					obj->ras_comm.name, r);
3945 				return r;
3946 			}
3947 		} else
3948 			amdgpu_ras_block_late_init_default(adev, &obj->ras_comm);
3949 	}
3950 
3951 	return 0;
3952 }
3953 
3954 /* do some fini work before IP fini as dependence */
3955 int amdgpu_ras_pre_fini(struct amdgpu_device *adev)
3956 {
3957 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3958 
3959 	if (!adev->ras_enabled || !con)
3960 		return 0;
3961 
3962 
3963 	/* Need disable ras on all IPs here before ip [hw/sw]fini */
3964 	if (AMDGPU_RAS_GET_FEATURES(con->features))
3965 		amdgpu_ras_disable_all_features(adev, 0);
3966 	amdgpu_ras_recovery_fini(adev);
3967 	return 0;
3968 }
3969 
3970 int amdgpu_ras_fini(struct amdgpu_device *adev)
3971 {
3972 	struct amdgpu_ras_block_list *ras_node, *tmp;
3973 	struct amdgpu_ras_block_object *obj = NULL;
3974 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3975 
3976 	if (!adev->ras_enabled || !con)
3977 		return 0;
3978 
3979 	list_for_each_entry_safe(ras_node, tmp, &adev->ras_list, node) {
3980 		if (ras_node->ras_obj) {
3981 			obj = ras_node->ras_obj;
3982 			if (amdgpu_ras_is_supported(adev, obj->ras_comm.block) &&
3983 			    obj->ras_fini)
3984 				obj->ras_fini(adev, &obj->ras_comm);
3985 			else
3986 				amdgpu_ras_block_late_fini_default(adev, &obj->ras_comm);
3987 		}
3988 
3989 		/* Clear ras blocks from ras_list and free ras block list node */
3990 		list_del(&ras_node->node);
3991 		kfree(ras_node);
3992 	}
3993 
3994 	amdgpu_ras_fs_fini(adev);
3995 	amdgpu_ras_interrupt_remove_all(adev);
3996 
3997 	if (amdgpu_ras_aca_is_supported(adev)) {
3998 		if (amdgpu_aca_is_enabled(adev))
3999 			amdgpu_aca_fini(adev);
4000 		else
4001 			amdgpu_mca_fini(adev);
4002 	}
4003 
4004 	WARN(AMDGPU_RAS_GET_FEATURES(con->features), "Feature mask is not cleared");
4005 
4006 	if (AMDGPU_RAS_GET_FEATURES(con->features))
4007 		amdgpu_ras_disable_all_features(adev, 0);
4008 
4009 	cancel_delayed_work_sync(&con->ras_counte_delay_work);
4010 
4011 	amdgpu_ras_set_context(adev, NULL);
4012 	kfree(con);
4013 
4014 	return 0;
4015 }
4016 
4017 bool amdgpu_ras_get_fed_status(struct amdgpu_device *adev)
4018 {
4019 	struct amdgpu_ras *ras;
4020 
4021 	ras = amdgpu_ras_get_context(adev);
4022 	if (!ras)
4023 		return false;
4024 
4025 	return atomic_read(&ras->fed);
4026 }
4027 
4028 void amdgpu_ras_set_fed(struct amdgpu_device *adev, bool status)
4029 {
4030 	struct amdgpu_ras *ras;
4031 
4032 	ras = amdgpu_ras_get_context(adev);
4033 	if (ras)
4034 		atomic_set(&ras->fed, !!status);
4035 }
4036 
4037 static struct ras_event_manager *__get_ras_event_mgr(struct amdgpu_device *adev)
4038 {
4039 	struct amdgpu_ras *ras;
4040 
4041 	ras = amdgpu_ras_get_context(adev);
4042 	if (!ras)
4043 		return NULL;
4044 
4045 	return ras->event_mgr;
4046 }
4047 
4048 int amdgpu_ras_mark_ras_event_caller(struct amdgpu_device *adev, enum ras_event_type type,
4049 				     const void *caller)
4050 {
4051 	struct ras_event_manager *event_mgr;
4052 	struct ras_event_state *event_state;
4053 	int ret = 0;
4054 
4055 	if (type >= RAS_EVENT_TYPE_COUNT) {
4056 		ret = -EINVAL;
4057 		goto out;
4058 	}
4059 
4060 	event_mgr = __get_ras_event_mgr(adev);
4061 	if (!event_mgr) {
4062 		ret = -EINVAL;
4063 		goto out;
4064 	}
4065 
4066 	event_state = &event_mgr->event_state[type];
4067 	event_state->last_seqno = atomic64_inc_return(&event_mgr->seqno);
4068 	atomic64_inc(&event_state->count);
4069 
4070 out:
4071 	if (ret && caller)
4072 		dev_warn(adev->dev, "failed mark ras event (%d) in %ps, ret:%d\n",
4073 			 (int)type, caller, ret);
4074 
4075 	return ret;
4076 }
4077 
4078 u64 amdgpu_ras_acquire_event_id(struct amdgpu_device *adev, enum ras_event_type type)
4079 {
4080 	struct ras_event_manager *event_mgr;
4081 	u64 id;
4082 
4083 	if (type >= RAS_EVENT_TYPE_COUNT)
4084 		return RAS_EVENT_INVALID_ID;
4085 
4086 	switch (type) {
4087 	case RAS_EVENT_TYPE_FATAL:
4088 	case RAS_EVENT_TYPE_POISON_CREATION:
4089 	case RAS_EVENT_TYPE_POISON_CONSUMPTION:
4090 		event_mgr = __get_ras_event_mgr(adev);
4091 		if (!event_mgr)
4092 			return RAS_EVENT_INVALID_ID;
4093 
4094 		id = event_mgr->event_state[type].last_seqno;
4095 		break;
4096 	case RAS_EVENT_TYPE_INVALID:
4097 	default:
4098 		id = RAS_EVENT_INVALID_ID;
4099 		break;
4100 	}
4101 
4102 	return id;
4103 }
4104 
4105 void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
4106 {
4107 	if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
4108 		struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
4109 		enum ras_event_type type = RAS_EVENT_TYPE_FATAL;
4110 		u64 event_id;
4111 
4112 		if (amdgpu_ras_mark_ras_event(adev, type))
4113 			return;
4114 
4115 		event_id = amdgpu_ras_acquire_event_id(adev, type);
4116 
4117 		RAS_EVENT_LOG(adev, event_id, "uncorrectable hardware error"
4118 			      "(ERREVENT_ATHUB_INTERRUPT) detected!\n");
4119 
4120 		amdgpu_ras_set_fed(adev, true);
4121 		ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET;
4122 		amdgpu_ras_reset_gpu(adev);
4123 	}
4124 }
4125 
4126 bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev)
4127 {
4128 	if (adev->asic_type == CHIP_VEGA20 &&
4129 	    adev->pm.fw_version <= 0x283400) {
4130 		return !(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) &&
4131 				amdgpu_ras_intr_triggered();
4132 	}
4133 
4134 	return false;
4135 }
4136 
4137 void amdgpu_release_ras_context(struct amdgpu_device *adev)
4138 {
4139 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4140 
4141 	if (!con)
4142 		return;
4143 
4144 	if (!adev->ras_enabled && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)) {
4145 		con->features &= ~BIT(AMDGPU_RAS_BLOCK__GFX);
4146 		amdgpu_ras_set_context(adev, NULL);
4147 		kfree(con);
4148 	}
4149 }
4150 
4151 #ifdef CONFIG_X86_MCE_AMD
4152 static struct amdgpu_device *find_adev(uint32_t node_id)
4153 {
4154 	int i;
4155 	struct amdgpu_device *adev = NULL;
4156 
4157 	for (i = 0; i < mce_adev_list.num_gpu; i++) {
4158 		adev = mce_adev_list.devs[i];
4159 
4160 		if (adev && adev->gmc.xgmi.connected_to_cpu &&
4161 		    adev->gmc.xgmi.physical_node_id == node_id)
4162 			break;
4163 		adev = NULL;
4164 	}
4165 
4166 	return adev;
4167 }
4168 
4169 #define GET_MCA_IPID_GPUID(m)	(((m) >> 44) & 0xF)
4170 #define GET_UMC_INST(m)		(((m) >> 21) & 0x7)
4171 #define GET_CHAN_INDEX(m)	((((m) >> 12) & 0x3) | (((m) >> 18) & 0x4))
4172 #define GPU_ID_OFFSET		8
4173 
4174 static int amdgpu_bad_page_notifier(struct notifier_block *nb,
4175 				    unsigned long val, void *data)
4176 {
4177 	struct mce *m = (struct mce *)data;
4178 	struct amdgpu_device *adev = NULL;
4179 	uint32_t gpu_id = 0;
4180 	uint32_t umc_inst = 0, ch_inst = 0;
4181 
4182 	/*
4183 	 * If the error was generated in UMC_V2, which belongs to GPU UMCs,
4184 	 * and error occurred in DramECC (Extended error code = 0) then only
4185 	 * process the error, else bail out.
4186 	 */
4187 	if (!m || !((smca_get_bank_type(m->extcpu, m->bank) == SMCA_UMC_V2) &&
4188 		    (XEC(m->status, 0x3f) == 0x0)))
4189 		return NOTIFY_DONE;
4190 
4191 	/*
4192 	 * If it is correctable error, return.
4193 	 */
4194 	if (mce_is_correctable(m))
4195 		return NOTIFY_OK;
4196 
4197 	/*
4198 	 * GPU Id is offset by GPU_ID_OFFSET in MCA_IPID_UMC register.
4199 	 */
4200 	gpu_id = GET_MCA_IPID_GPUID(m->ipid) - GPU_ID_OFFSET;
4201 
4202 	adev = find_adev(gpu_id);
4203 	if (!adev) {
4204 		DRM_WARN("%s: Unable to find adev for gpu_id: %d\n", __func__,
4205 								gpu_id);
4206 		return NOTIFY_DONE;
4207 	}
4208 
4209 	/*
4210 	 * If it is uncorrectable error, then find out UMC instance and
4211 	 * channel index.
4212 	 */
4213 	umc_inst = GET_UMC_INST(m->ipid);
4214 	ch_inst = GET_CHAN_INDEX(m->ipid);
4215 
4216 	dev_info(adev->dev, "Uncorrectable error detected in UMC inst: %d, chan_idx: %d",
4217 			     umc_inst, ch_inst);
4218 
4219 	if (!amdgpu_umc_page_retirement_mca(adev, m->addr, ch_inst, umc_inst))
4220 		return NOTIFY_OK;
4221 	else
4222 		return NOTIFY_DONE;
4223 }
4224 
4225 static struct notifier_block amdgpu_bad_page_nb = {
4226 	.notifier_call  = amdgpu_bad_page_notifier,
4227 	.priority       = MCE_PRIO_UC,
4228 };
4229 
4230 static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev)
4231 {
4232 	/*
4233 	 * Add the adev to the mce_adev_list.
4234 	 * During mode2 reset, amdgpu device is temporarily
4235 	 * removed from the mgpu_info list which can cause
4236 	 * page retirement to fail.
4237 	 * Use this list instead of mgpu_info to find the amdgpu
4238 	 * device on which the UMC error was reported.
4239 	 */
4240 	mce_adev_list.devs[mce_adev_list.num_gpu++] = adev;
4241 
4242 	/*
4243 	 * Register the x86 notifier only once
4244 	 * with MCE subsystem.
4245 	 */
4246 	if (notifier_registered == false) {
4247 		mce_register_decode_chain(&amdgpu_bad_page_nb);
4248 		notifier_registered = true;
4249 	}
4250 }
4251 #endif
4252 
4253 struct amdgpu_ras *amdgpu_ras_get_context(struct amdgpu_device *adev)
4254 {
4255 	if (!adev)
4256 		return NULL;
4257 
4258 	return adev->psp.ras_context.ras;
4259 }
4260 
4261 int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_con)
4262 {
4263 	if (!adev)
4264 		return -EINVAL;
4265 
4266 	adev->psp.ras_context.ras = ras_con;
4267 	return 0;
4268 }
4269 
4270 /* check if ras is supported on block, say, sdma, gfx */
4271 int amdgpu_ras_is_supported(struct amdgpu_device *adev,
4272 		unsigned int block)
4273 {
4274 	int ret = 0;
4275 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
4276 
4277 	if (block >= AMDGPU_RAS_BLOCK_COUNT)
4278 		return 0;
4279 
4280 	ret = ras && (adev->ras_enabled & (1 << block));
4281 
4282 	/* For the special asic with mem ecc enabled but sram ecc
4283 	 * not enabled, even if the ras block is not supported on
4284 	 * .ras_enabled, if the asic supports poison mode and the
4285 	 * ras block has ras configuration, it can be considered
4286 	 * that the ras block supports ras function.
4287 	 */
4288 	if (!ret &&
4289 	    (block == AMDGPU_RAS_BLOCK__GFX ||
4290 	     block == AMDGPU_RAS_BLOCK__SDMA ||
4291 	     block == AMDGPU_RAS_BLOCK__VCN ||
4292 	     block == AMDGPU_RAS_BLOCK__JPEG) &&
4293 		(amdgpu_ras_mask & (1 << block)) &&
4294 	    amdgpu_ras_is_poison_mode_supported(adev) &&
4295 	    amdgpu_ras_get_ras_block(adev, block, 0))
4296 		ret = 1;
4297 
4298 	return ret;
4299 }
4300 
4301 int amdgpu_ras_reset_gpu(struct amdgpu_device *adev)
4302 {
4303 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
4304 
4305 	/* mode1 is the only selection for RMA status */
4306 	if (amdgpu_ras_is_rma(adev)) {
4307 		ras->gpu_reset_flags = 0;
4308 		ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET;
4309 	}
4310 
4311 	if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0) {
4312 		struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
4313 		int hive_ras_recovery = 0;
4314 
4315 		if (hive) {
4316 			hive_ras_recovery = atomic_read(&hive->ras_recovery);
4317 			amdgpu_put_xgmi_hive(hive);
4318 		}
4319 		/* In the case of multiple GPUs, after a GPU has started
4320 		 * resetting all GPUs on hive, other GPUs do not need to
4321 		 * trigger GPU reset again.
4322 		 */
4323 		if (!hive_ras_recovery)
4324 			amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work);
4325 		else
4326 			atomic_set(&ras->in_recovery, 0);
4327 	} else {
4328 		flush_work(&ras->recovery_work);
4329 		amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work);
4330 	}
4331 
4332 	return 0;
4333 }
4334 
4335 int amdgpu_ras_set_mca_debug_mode(struct amdgpu_device *adev, bool enable)
4336 {
4337 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4338 	int ret = 0;
4339 
4340 	if (con) {
4341 		ret = amdgpu_mca_smu_set_debug_mode(adev, enable);
4342 		if (!ret)
4343 			con->is_aca_debug_mode = enable;
4344 	}
4345 
4346 	return ret;
4347 }
4348 
4349 int amdgpu_ras_set_aca_debug_mode(struct amdgpu_device *adev, bool enable)
4350 {
4351 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4352 	int ret = 0;
4353 
4354 	if (con) {
4355 		if (amdgpu_aca_is_enabled(adev))
4356 			ret = amdgpu_aca_smu_set_debug_mode(adev, enable);
4357 		else
4358 			ret = amdgpu_mca_smu_set_debug_mode(adev, enable);
4359 		if (!ret)
4360 			con->is_aca_debug_mode = enable;
4361 	}
4362 
4363 	return ret;
4364 }
4365 
4366 bool amdgpu_ras_get_aca_debug_mode(struct amdgpu_device *adev)
4367 {
4368 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4369 	const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
4370 	const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
4371 
4372 	if (!con)
4373 		return false;
4374 
4375 	if ((amdgpu_aca_is_enabled(adev) && smu_funcs && smu_funcs->set_debug_mode) ||
4376 	    (!amdgpu_aca_is_enabled(adev) && mca_funcs && mca_funcs->mca_set_debug_mode))
4377 		return con->is_aca_debug_mode;
4378 	else
4379 		return true;
4380 }
4381 
4382 bool amdgpu_ras_get_error_query_mode(struct amdgpu_device *adev,
4383 				     unsigned int *error_query_mode)
4384 {
4385 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4386 	const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
4387 	const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
4388 
4389 	if (!con) {
4390 		*error_query_mode = AMDGPU_RAS_INVALID_ERROR_QUERY;
4391 		return false;
4392 	}
4393 
4394 	if ((smu_funcs && smu_funcs->set_debug_mode) || (mca_funcs && mca_funcs->mca_set_debug_mode))
4395 		*error_query_mode =
4396 			(con->is_aca_debug_mode) ? AMDGPU_RAS_DIRECT_ERROR_QUERY : AMDGPU_RAS_FIRMWARE_ERROR_QUERY;
4397 	else
4398 		*error_query_mode = AMDGPU_RAS_DIRECT_ERROR_QUERY;
4399 
4400 	return true;
4401 }
4402 
4403 /* Register each ip ras block into amdgpu ras */
4404 int amdgpu_ras_register_ras_block(struct amdgpu_device *adev,
4405 		struct amdgpu_ras_block_object *ras_block_obj)
4406 {
4407 	struct amdgpu_ras_block_list *ras_node;
4408 	if (!adev || !ras_block_obj)
4409 		return -EINVAL;
4410 
4411 	ras_node = kzalloc(sizeof(*ras_node), GFP_KERNEL);
4412 	if (!ras_node)
4413 		return -ENOMEM;
4414 
4415 	INIT_LIST_HEAD(&ras_node->node);
4416 	ras_node->ras_obj = ras_block_obj;
4417 	list_add_tail(&ras_node->node, &adev->ras_list);
4418 
4419 	return 0;
4420 }
4421 
4422 void amdgpu_ras_get_error_type_name(uint32_t err_type, char *err_type_name)
4423 {
4424 	if (!err_type_name)
4425 		return;
4426 
4427 	switch (err_type) {
4428 	case AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE:
4429 		sprintf(err_type_name, "correctable");
4430 		break;
4431 	case AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE:
4432 		sprintf(err_type_name, "uncorrectable");
4433 		break;
4434 	default:
4435 		sprintf(err_type_name, "unknown");
4436 		break;
4437 	}
4438 }
4439 
4440 bool amdgpu_ras_inst_get_memory_id_field(struct amdgpu_device *adev,
4441 					 const struct amdgpu_ras_err_status_reg_entry *reg_entry,
4442 					 uint32_t instance,
4443 					 uint32_t *memory_id)
4444 {
4445 	uint32_t err_status_lo_data, err_status_lo_offset;
4446 
4447 	if (!reg_entry)
4448 		return false;
4449 
4450 	err_status_lo_offset =
4451 		AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance,
4452 					    reg_entry->seg_lo, reg_entry->reg_lo);
4453 	err_status_lo_data = RREG32(err_status_lo_offset);
4454 
4455 	if ((reg_entry->flags & AMDGPU_RAS_ERR_STATUS_VALID) &&
4456 	    !REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, ERR_STATUS_VALID_FLAG))
4457 		return false;
4458 
4459 	*memory_id = REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, MEMORY_ID);
4460 
4461 	return true;
4462 }
4463 
4464 bool amdgpu_ras_inst_get_err_cnt_field(struct amdgpu_device *adev,
4465 				       const struct amdgpu_ras_err_status_reg_entry *reg_entry,
4466 				       uint32_t instance,
4467 				       unsigned long *err_cnt)
4468 {
4469 	uint32_t err_status_hi_data, err_status_hi_offset;
4470 
4471 	if (!reg_entry)
4472 		return false;
4473 
4474 	err_status_hi_offset =
4475 		AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance,
4476 					    reg_entry->seg_hi, reg_entry->reg_hi);
4477 	err_status_hi_data = RREG32(err_status_hi_offset);
4478 
4479 	if ((reg_entry->flags & AMDGPU_RAS_ERR_INFO_VALID) &&
4480 	    !REG_GET_FIELD(err_status_hi_data, ERR_STATUS_HI, ERR_INFO_VALID_FLAG))
4481 		/* keep the check here in case we need to refer to the result later */
4482 		dev_dbg(adev->dev, "Invalid err_info field\n");
4483 
4484 	/* read err count */
4485 	*err_cnt = REG_GET_FIELD(err_status_hi_data, ERR_STATUS, ERR_CNT);
4486 
4487 	return true;
4488 }
4489 
4490 void amdgpu_ras_inst_query_ras_error_count(struct amdgpu_device *adev,
4491 					   const struct amdgpu_ras_err_status_reg_entry *reg_list,
4492 					   uint32_t reg_list_size,
4493 					   const struct amdgpu_ras_memory_id_entry *mem_list,
4494 					   uint32_t mem_list_size,
4495 					   uint32_t instance,
4496 					   uint32_t err_type,
4497 					   unsigned long *err_count)
4498 {
4499 	uint32_t memory_id;
4500 	unsigned long err_cnt;
4501 	char err_type_name[16];
4502 	uint32_t i, j;
4503 
4504 	for (i = 0; i < reg_list_size; i++) {
4505 		/* query memory_id from err_status_lo */
4506 		if (!amdgpu_ras_inst_get_memory_id_field(adev, &reg_list[i],
4507 							 instance, &memory_id))
4508 			continue;
4509 
4510 		/* query err_cnt from err_status_hi */
4511 		if (!amdgpu_ras_inst_get_err_cnt_field(adev, &reg_list[i],
4512 						       instance, &err_cnt) ||
4513 		    !err_cnt)
4514 			continue;
4515 
4516 		*err_count += err_cnt;
4517 
4518 		/* log the errors */
4519 		amdgpu_ras_get_error_type_name(err_type, err_type_name);
4520 		if (!mem_list) {
4521 			/* memory_list is not supported */
4522 			dev_info(adev->dev,
4523 				 "%ld %s hardware errors detected in %s, instance: %d, memory_id: %d\n",
4524 				 err_cnt, err_type_name,
4525 				 reg_list[i].block_name,
4526 				 instance, memory_id);
4527 		} else {
4528 			for (j = 0; j < mem_list_size; j++) {
4529 				if (memory_id == mem_list[j].memory_id) {
4530 					dev_info(adev->dev,
4531 						 "%ld %s hardware errors detected in %s, instance: %d, memory block: %s\n",
4532 						 err_cnt, err_type_name,
4533 						 reg_list[i].block_name,
4534 						 instance, mem_list[j].name);
4535 					break;
4536 				}
4537 			}
4538 		}
4539 	}
4540 }
4541 
4542 void amdgpu_ras_inst_reset_ras_error_count(struct amdgpu_device *adev,
4543 					   const struct amdgpu_ras_err_status_reg_entry *reg_list,
4544 					   uint32_t reg_list_size,
4545 					   uint32_t instance)
4546 {
4547 	uint32_t err_status_lo_offset, err_status_hi_offset;
4548 	uint32_t i;
4549 
4550 	for (i = 0; i < reg_list_size; i++) {
4551 		err_status_lo_offset =
4552 			AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance,
4553 						    reg_list[i].seg_lo, reg_list[i].reg_lo);
4554 		err_status_hi_offset =
4555 			AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance,
4556 						    reg_list[i].seg_hi, reg_list[i].reg_hi);
4557 		WREG32(err_status_lo_offset, 0);
4558 		WREG32(err_status_hi_offset, 0);
4559 	}
4560 }
4561 
4562 int amdgpu_ras_error_data_init(struct ras_err_data *err_data)
4563 {
4564 	memset(err_data, 0, sizeof(*err_data));
4565 
4566 	INIT_LIST_HEAD(&err_data->err_node_list);
4567 
4568 	return 0;
4569 }
4570 
4571 static void amdgpu_ras_error_node_release(struct ras_err_node *err_node)
4572 {
4573 	if (!err_node)
4574 		return;
4575 
4576 	list_del(&err_node->node);
4577 	kvfree(err_node);
4578 }
4579 
4580 void amdgpu_ras_error_data_fini(struct ras_err_data *err_data)
4581 {
4582 	struct ras_err_node *err_node, *tmp;
4583 
4584 	list_for_each_entry_safe(err_node, tmp, &err_data->err_node_list, node)
4585 		amdgpu_ras_error_node_release(err_node);
4586 }
4587 
4588 static struct ras_err_node *amdgpu_ras_error_find_node_by_id(struct ras_err_data *err_data,
4589 							     struct amdgpu_smuio_mcm_config_info *mcm_info)
4590 {
4591 	struct ras_err_node *err_node;
4592 	struct amdgpu_smuio_mcm_config_info *ref_id;
4593 
4594 	if (!err_data || !mcm_info)
4595 		return NULL;
4596 
4597 	for_each_ras_error(err_node, err_data) {
4598 		ref_id = &err_node->err_info.mcm_info;
4599 
4600 		if (mcm_info->socket_id == ref_id->socket_id &&
4601 		    mcm_info->die_id == ref_id->die_id)
4602 			return err_node;
4603 	}
4604 
4605 	return NULL;
4606 }
4607 
4608 static struct ras_err_node *amdgpu_ras_error_node_new(void)
4609 {
4610 	struct ras_err_node *err_node;
4611 
4612 	err_node = kvzalloc(sizeof(*err_node), GFP_KERNEL);
4613 	if (!err_node)
4614 		return NULL;
4615 
4616 	INIT_LIST_HEAD(&err_node->node);
4617 
4618 	return err_node;
4619 }
4620 
4621 static int ras_err_info_cmp(void *priv, const struct list_head *a, const struct list_head *b)
4622 {
4623 	struct ras_err_node *nodea = container_of(a, struct ras_err_node, node);
4624 	struct ras_err_node *nodeb = container_of(b, struct ras_err_node, node);
4625 	struct amdgpu_smuio_mcm_config_info *infoa = &nodea->err_info.mcm_info;
4626 	struct amdgpu_smuio_mcm_config_info *infob = &nodeb->err_info.mcm_info;
4627 
4628 	if (unlikely(infoa->socket_id != infob->socket_id))
4629 		return infoa->socket_id - infob->socket_id;
4630 	else
4631 		return infoa->die_id - infob->die_id;
4632 
4633 	return 0;
4634 }
4635 
4636 static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_data,
4637 				struct amdgpu_smuio_mcm_config_info *mcm_info)
4638 {
4639 	struct ras_err_node *err_node;
4640 
4641 	err_node = amdgpu_ras_error_find_node_by_id(err_data, mcm_info);
4642 	if (err_node)
4643 		return &err_node->err_info;
4644 
4645 	err_node = amdgpu_ras_error_node_new();
4646 	if (!err_node)
4647 		return NULL;
4648 
4649 	memcpy(&err_node->err_info.mcm_info, mcm_info, sizeof(*mcm_info));
4650 
4651 	err_data->err_list_count++;
4652 	list_add_tail(&err_node->node, &err_data->err_node_list);
4653 	list_sort(NULL, &err_data->err_node_list, ras_err_info_cmp);
4654 
4655 	return &err_node->err_info;
4656 }
4657 
4658 int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
4659 					struct amdgpu_smuio_mcm_config_info *mcm_info,
4660 					u64 count)
4661 {
4662 	struct ras_err_info *err_info;
4663 
4664 	if (!err_data || !mcm_info)
4665 		return -EINVAL;
4666 
4667 	if (!count)
4668 		return 0;
4669 
4670 	err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
4671 	if (!err_info)
4672 		return -EINVAL;
4673 
4674 	err_info->ue_count += count;
4675 	err_data->ue_count += count;
4676 
4677 	return 0;
4678 }
4679 
4680 int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data,
4681 					struct amdgpu_smuio_mcm_config_info *mcm_info,
4682 					u64 count)
4683 {
4684 	struct ras_err_info *err_info;
4685 
4686 	if (!err_data || !mcm_info)
4687 		return -EINVAL;
4688 
4689 	if (!count)
4690 		return 0;
4691 
4692 	err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
4693 	if (!err_info)
4694 		return -EINVAL;
4695 
4696 	err_info->ce_count += count;
4697 	err_data->ce_count += count;
4698 
4699 	return 0;
4700 }
4701 
4702 int amdgpu_ras_error_statistic_de_count(struct ras_err_data *err_data,
4703 					struct amdgpu_smuio_mcm_config_info *mcm_info,
4704 					u64 count)
4705 {
4706 	struct ras_err_info *err_info;
4707 
4708 	if (!err_data || !mcm_info)
4709 		return -EINVAL;
4710 
4711 	if (!count)
4712 		return 0;
4713 
4714 	err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
4715 	if (!err_info)
4716 		return -EINVAL;
4717 
4718 	err_info->de_count += count;
4719 	err_data->de_count += count;
4720 
4721 	return 0;
4722 }
4723 
4724 #define mmMP0_SMN_C2PMSG_92	0x1609C
4725 #define mmMP0_SMN_C2PMSG_126	0x160BE
4726 static void amdgpu_ras_boot_time_error_reporting(struct amdgpu_device *adev,
4727 						 u32 instance)
4728 {
4729 	u32 socket_id, aid_id, hbm_id;
4730 	u32 fw_status;
4731 	u32 boot_error;
4732 	u64 reg_addr;
4733 
4734 	/* The pattern for smn addressing in other SOC could be different from
4735 	 * the one for aqua_vanjaram. We should revisit the code if the pattern
4736 	 * is changed. In such case, replace the aqua_vanjaram implementation
4737 	 * with more common helper */
4738 	reg_addr = (mmMP0_SMN_C2PMSG_92 << 2) +
4739 		   aqua_vanjaram_encode_ext_smn_addressing(instance);
4740 	fw_status = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
4741 
4742 	reg_addr = (mmMP0_SMN_C2PMSG_126 << 2) +
4743 		   aqua_vanjaram_encode_ext_smn_addressing(instance);
4744 	boot_error = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
4745 
4746 	socket_id = AMDGPU_RAS_GPU_ERR_SOCKET_ID(boot_error);
4747 	aid_id = AMDGPU_RAS_GPU_ERR_AID_ID(boot_error);
4748 	hbm_id = ((1 == AMDGPU_RAS_GPU_ERR_HBM_ID(boot_error)) ? 0 : 1);
4749 
4750 	if (AMDGPU_RAS_GPU_ERR_MEM_TRAINING(boot_error))
4751 		dev_info(adev->dev,
4752 			 "socket: %d, aid: %d, hbm: %d, fw_status: 0x%x, memory training failed\n",
4753 			 socket_id, aid_id, hbm_id, fw_status);
4754 
4755 	if (AMDGPU_RAS_GPU_ERR_FW_LOAD(boot_error))
4756 		dev_info(adev->dev,
4757 			 "socket: %d, aid: %d, fw_status: 0x%x, firmware load failed at boot time\n",
4758 			 socket_id, aid_id, fw_status);
4759 
4760 	if (AMDGPU_RAS_GPU_ERR_WAFL_LINK_TRAINING(boot_error))
4761 		dev_info(adev->dev,
4762 			 "socket: %d, aid: %d, fw_status: 0x%x, wafl link training failed\n",
4763 			 socket_id, aid_id, fw_status);
4764 
4765 	if (AMDGPU_RAS_GPU_ERR_XGMI_LINK_TRAINING(boot_error))
4766 		dev_info(adev->dev,
4767 			 "socket: %d, aid: %d, fw_status: 0x%x, xgmi link training failed\n",
4768 			 socket_id, aid_id, fw_status);
4769 
4770 	if (AMDGPU_RAS_GPU_ERR_USR_CP_LINK_TRAINING(boot_error))
4771 		dev_info(adev->dev,
4772 			 "socket: %d, aid: %d, fw_status: 0x%x, usr cp link training failed\n",
4773 			 socket_id, aid_id, fw_status);
4774 
4775 	if (AMDGPU_RAS_GPU_ERR_USR_DP_LINK_TRAINING(boot_error))
4776 		dev_info(adev->dev,
4777 			 "socket: %d, aid: %d, fw_status: 0x%x, usr dp link training failed\n",
4778 			 socket_id, aid_id, fw_status);
4779 
4780 	if (AMDGPU_RAS_GPU_ERR_HBM_MEM_TEST(boot_error))
4781 		dev_info(adev->dev,
4782 			 "socket: %d, aid: %d, hbm: %d, fw_status: 0x%x, hbm memory test failed\n",
4783 			 socket_id, aid_id, hbm_id, fw_status);
4784 
4785 	if (AMDGPU_RAS_GPU_ERR_HBM_BIST_TEST(boot_error))
4786 		dev_info(adev->dev,
4787 			 "socket: %d, aid: %d, hbm: %d, fw_status: 0x%x, hbm bist test failed\n",
4788 			 socket_id, aid_id, hbm_id, fw_status);
4789 
4790 	if (AMDGPU_RAS_GPU_ERR_DATA_ABORT(boot_error))
4791 		dev_info(adev->dev,
4792 			 "socket: %d, aid: %d, fw_status: 0x%x, data abort exception\n",
4793 			 socket_id, aid_id, fw_status);
4794 
4795 	if (AMDGPU_RAS_GPU_ERR_UNKNOWN(boot_error))
4796 		dev_info(adev->dev,
4797 			 "socket: %d, aid: %d, fw_status: 0x%x, unknown boot time errors\n",
4798 			 socket_id, aid_id, fw_status);
4799 }
4800 
4801 static bool amdgpu_ras_boot_error_detected(struct amdgpu_device *adev,
4802 					   u32 instance)
4803 {
4804 	u64 reg_addr;
4805 	u32 reg_data;
4806 	int retry_loop;
4807 
4808 	reg_addr = (mmMP0_SMN_C2PMSG_92 << 2) +
4809 		   aqua_vanjaram_encode_ext_smn_addressing(instance);
4810 
4811 	for (retry_loop = 0; retry_loop < AMDGPU_RAS_BOOT_STATUS_POLLING_LIMIT; retry_loop++) {
4812 		reg_data = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
4813 		if ((reg_data & AMDGPU_RAS_BOOT_STATUS_MASK) == AMDGPU_RAS_BOOT_STEADY_STATUS)
4814 			return false;
4815 		else
4816 			msleep(1);
4817 	}
4818 
4819 	return true;
4820 }
4821 
4822 void amdgpu_ras_query_boot_status(struct amdgpu_device *adev, u32 num_instances)
4823 {
4824 	u32 i;
4825 
4826 	for (i = 0; i < num_instances; i++) {
4827 		if (amdgpu_ras_boot_error_detected(adev, i))
4828 			amdgpu_ras_boot_time_error_reporting(adev, i);
4829 	}
4830 }
4831 
4832 int amdgpu_ras_reserve_page(struct amdgpu_device *adev, uint64_t pfn)
4833 {
4834 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4835 	struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
4836 	uint64_t start = pfn << AMDGPU_GPU_PAGE_SHIFT;
4837 	int ret = 0;
4838 
4839 	mutex_lock(&con->page_rsv_lock);
4840 	ret = amdgpu_vram_mgr_query_page_status(mgr, start);
4841 	if (ret == -ENOENT)
4842 		ret = amdgpu_vram_mgr_reserve_range(mgr, start, AMDGPU_GPU_PAGE_SIZE);
4843 	mutex_unlock(&con->page_rsv_lock);
4844 
4845 	return ret;
4846 }
4847 
4848 void amdgpu_ras_event_log_print(struct amdgpu_device *adev, u64 event_id,
4849 				const char *fmt, ...)
4850 {
4851 	struct va_format vaf;
4852 	va_list args;
4853 
4854 	va_start(args, fmt);
4855 	vaf.fmt = fmt;
4856 	vaf.va = &args;
4857 
4858 	if (RAS_EVENT_ID_IS_VALID(event_id))
4859 		dev_printk(KERN_INFO, adev->dev, "{%llu}%pV", event_id, &vaf);
4860 	else
4861 		dev_printk(KERN_INFO, adev->dev, "%pV", &vaf);
4862 
4863 	va_end(args);
4864 }
4865 
4866 bool amdgpu_ras_is_rma(struct amdgpu_device *adev)
4867 {
4868 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4869 
4870 	if (!con)
4871 		return false;
4872 
4873 	return con->is_rma;
4874 }
4875