xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c (revision 5d74be8c3a941993299c78f7d3d70c27f826af0b)
1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  *
23  */
24 #include <linux/debugfs.h>
25 #include <linux/list.h>
26 #include <linux/module.h>
27 #include <linux/uaccess.h>
28 #include <linux/reboot.h>
29 #include <linux/syscalls.h>
30 #include <linux/pm_runtime.h>
31 #include <linux/list_sort.h>
32 
33 #include "amdgpu.h"
34 #include "amdgpu_ras.h"
35 #include "amdgpu_atomfirmware.h"
36 #include "amdgpu_xgmi.h"
37 #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
38 #include "nbio_v4_3.h"
39 #include "nbio_v7_9.h"
40 #include "atom.h"
41 #include "amdgpu_reset.h"
42 #include "amdgpu_psp.h"
43 
44 #ifdef CONFIG_X86_MCE_AMD
45 #include <asm/mce.h>
46 
47 static bool notifier_registered;
48 #endif
49 static const char *RAS_FS_NAME = "ras";
50 
51 const char *ras_error_string[] = {
52 	"none",
53 	"parity",
54 	"single_correctable",
55 	"multi_uncorrectable",
56 	"poison",
57 };
58 
59 const char *ras_block_string[] = {
60 	"umc",
61 	"sdma",
62 	"gfx",
63 	"mmhub",
64 	"athub",
65 	"pcie_bif",
66 	"hdp",
67 	"xgmi_wafl",
68 	"df",
69 	"smn",
70 	"sem",
71 	"mp0",
72 	"mp1",
73 	"fuse",
74 	"mca",
75 	"vcn",
76 	"jpeg",
77 	"ih",
78 	"mpio",
79 };
80 
81 const char *ras_mca_block_string[] = {
82 	"mca_mp0",
83 	"mca_mp1",
84 	"mca_mpio",
85 	"mca_iohc",
86 };
87 
88 struct amdgpu_ras_block_list {
89 	/* ras block link */
90 	struct list_head node;
91 
92 	struct amdgpu_ras_block_object *ras_obj;
93 };
94 
95 const char *get_ras_block_str(struct ras_common_if *ras_block)
96 {
97 	if (!ras_block)
98 		return "NULL";
99 
100 	if (ras_block->block >= AMDGPU_RAS_BLOCK_COUNT ||
101 	    ras_block->block >= ARRAY_SIZE(ras_block_string))
102 		return "OUT OF RANGE";
103 
104 	if (ras_block->block == AMDGPU_RAS_BLOCK__MCA)
105 		return ras_mca_block_string[ras_block->sub_block_index];
106 
107 	return ras_block_string[ras_block->block];
108 }
109 
110 #define ras_block_str(_BLOCK_) \
111 	(((_BLOCK_) < ARRAY_SIZE(ras_block_string)) ? ras_block_string[_BLOCK_] : "Out Of Range")
112 
113 #define ras_err_str(i) (ras_error_string[ffs(i)])
114 
115 #define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
116 
117 /* inject address is 52 bits */
118 #define	RAS_UMC_INJECT_ADDR_LIMIT	(0x1ULL << 52)
119 
120 /* typical ECC bad page rate is 1 bad page per 100MB VRAM */
121 #define RAS_BAD_PAGE_COVER              (100 * 1024 * 1024ULL)
122 
123 #define MAX_UMC_POISON_POLLING_TIME_ASYNC  100  //ms
124 
125 #define AMDGPU_RAS_RETIRE_PAGE_INTERVAL 100  //ms
126 
127 enum amdgpu_ras_retire_page_reservation {
128 	AMDGPU_RAS_RETIRE_PAGE_RESERVED,
129 	AMDGPU_RAS_RETIRE_PAGE_PENDING,
130 	AMDGPU_RAS_RETIRE_PAGE_FAULT,
131 };
132 
133 atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0);
134 
135 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
136 				uint64_t addr);
137 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
138 				uint64_t addr);
139 #ifdef CONFIG_X86_MCE_AMD
140 static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev);
141 struct mce_notifier_adev_list {
142 	struct amdgpu_device *devs[MAX_GPU_INSTANCE];
143 	int num_gpu;
144 };
145 static struct mce_notifier_adev_list mce_adev_list;
146 #endif
147 
148 void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready)
149 {
150 	if (adev && amdgpu_ras_get_context(adev))
151 		amdgpu_ras_get_context(adev)->error_query_ready = ready;
152 }
153 
154 static bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev)
155 {
156 	if (adev && amdgpu_ras_get_context(adev))
157 		return amdgpu_ras_get_context(adev)->error_query_ready;
158 
159 	return false;
160 }
161 
162 static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t address)
163 {
164 	struct ras_err_data err_data;
165 	struct eeprom_table_record err_rec;
166 	int ret;
167 
168 	if ((address >= adev->gmc.mc_vram_size) ||
169 	    (address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
170 		dev_warn(adev->dev,
171 		         "RAS WARN: input address 0x%llx is invalid.\n",
172 		         address);
173 		return -EINVAL;
174 	}
175 
176 	if (amdgpu_ras_check_bad_page(adev, address)) {
177 		dev_warn(adev->dev,
178 			 "RAS WARN: 0x%llx has already been marked as bad page!\n",
179 			 address);
180 		return 0;
181 	}
182 
183 	ret = amdgpu_ras_error_data_init(&err_data);
184 	if (ret)
185 		return ret;
186 
187 	memset(&err_rec, 0x0, sizeof(struct eeprom_table_record));
188 	err_data.err_addr = &err_rec;
189 	amdgpu_umc_fill_error_record(&err_data, address, address, 0, 0);
190 
191 	if (amdgpu_bad_page_threshold != 0) {
192 		amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
193 					 err_data.err_addr_cnt);
194 		amdgpu_ras_save_bad_pages(adev, NULL);
195 	}
196 
197 	amdgpu_ras_error_data_fini(&err_data);
198 
199 	dev_warn(adev->dev, "WARNING: THIS IS ONLY FOR TEST PURPOSES AND WILL CORRUPT RAS EEPROM\n");
200 	dev_warn(adev->dev, "Clear EEPROM:\n");
201 	dev_warn(adev->dev, "    echo 1 > /sys/kernel/debug/dri/0/ras/ras_eeprom_reset\n");
202 
203 	return 0;
204 }
205 
206 static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
207 					size_t size, loff_t *pos)
208 {
209 	struct ras_manager *obj = (struct ras_manager *)file_inode(f)->i_private;
210 	struct ras_query_if info = {
211 		.head = obj->head,
212 	};
213 	ssize_t s;
214 	char val[128];
215 
216 	if (amdgpu_ras_query_error_status(obj->adev, &info))
217 		return -EINVAL;
218 
219 	/* Hardware counter will be reset automatically after the query on Vega20 and Arcturus */
220 	if (amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
221 	    amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {
222 		if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
223 			dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
224 	}
225 
226 	s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n",
227 			"ue", info.ue_count,
228 			"ce", info.ce_count);
229 	if (*pos >= s)
230 		return 0;
231 
232 	s -= *pos;
233 	s = min_t(u64, s, size);
234 
235 
236 	if (copy_to_user(buf, &val[*pos], s))
237 		return -EINVAL;
238 
239 	*pos += s;
240 
241 	return s;
242 }
243 
244 static const struct file_operations amdgpu_ras_debugfs_ops = {
245 	.owner = THIS_MODULE,
246 	.read = amdgpu_ras_debugfs_read,
247 	.write = NULL,
248 	.llseek = default_llseek
249 };
250 
251 static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id)
252 {
253 	int i;
254 
255 	for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) {
256 		*block_id = i;
257 		if (strcmp(name, ras_block_string[i]) == 0)
258 			return 0;
259 	}
260 	return -EINVAL;
261 }
262 
263 static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
264 		const char __user *buf, size_t size,
265 		loff_t *pos, struct ras_debug_if *data)
266 {
267 	ssize_t s = min_t(u64, 64, size);
268 	char str[65];
269 	char block_name[33];
270 	char err[9] = "ue";
271 	int op = -1;
272 	int block_id;
273 	uint32_t sub_block;
274 	u64 address, value;
275 	/* default value is 0 if the mask is not set by user */
276 	u32 instance_mask = 0;
277 
278 	if (*pos)
279 		return -EINVAL;
280 	*pos = size;
281 
282 	memset(str, 0, sizeof(str));
283 	memset(data, 0, sizeof(*data));
284 
285 	if (copy_from_user(str, buf, s))
286 		return -EINVAL;
287 
288 	if (sscanf(str, "disable %32s", block_name) == 1)
289 		op = 0;
290 	else if (sscanf(str, "enable %32s %8s", block_name, err) == 2)
291 		op = 1;
292 	else if (sscanf(str, "inject %32s %8s", block_name, err) == 2)
293 		op = 2;
294 	else if (strstr(str, "retire_page") != NULL)
295 		op = 3;
296 	else if (str[0] && str[1] && str[2] && str[3])
297 		/* ascii string, but commands are not matched. */
298 		return -EINVAL;
299 
300 	if (op != -1) {
301 		if (op == 3) {
302 			if (sscanf(str, "%*s 0x%llx", &address) != 1 &&
303 			    sscanf(str, "%*s %llu", &address) != 1)
304 				return -EINVAL;
305 
306 			data->op = op;
307 			data->inject.address = address;
308 
309 			return 0;
310 		}
311 
312 		if (amdgpu_ras_find_block_id_by_name(block_name, &block_id))
313 			return -EINVAL;
314 
315 		data->head.block = block_id;
316 		/* only ue, ce and poison errors are supported */
317 		if (!memcmp("ue", err, 2))
318 			data->head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
319 		else if (!memcmp("ce", err, 2))
320 			data->head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE;
321 		else if (!memcmp("poison", err, 6))
322 			data->head.type = AMDGPU_RAS_ERROR__POISON;
323 		else
324 			return -EINVAL;
325 
326 		data->op = op;
327 
328 		if (op == 2) {
329 			if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx 0x%x",
330 				   &sub_block, &address, &value, &instance_mask) != 4 &&
331 			    sscanf(str, "%*s %*s %*s %u %llu %llu %u",
332 				   &sub_block, &address, &value, &instance_mask) != 4 &&
333 				sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx",
334 				   &sub_block, &address, &value) != 3 &&
335 			    sscanf(str, "%*s %*s %*s %u %llu %llu",
336 				   &sub_block, &address, &value) != 3)
337 				return -EINVAL;
338 			data->head.sub_block_index = sub_block;
339 			data->inject.address = address;
340 			data->inject.value = value;
341 			data->inject.instance_mask = instance_mask;
342 		}
343 	} else {
344 		if (size < sizeof(*data))
345 			return -EINVAL;
346 
347 		if (copy_from_user(data, buf, sizeof(*data)))
348 			return -EINVAL;
349 	}
350 
351 	return 0;
352 }
353 
354 static void amdgpu_ras_instance_mask_check(struct amdgpu_device *adev,
355 				struct ras_debug_if *data)
356 {
357 	int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1;
358 	uint32_t mask, inst_mask = data->inject.instance_mask;
359 
360 	/* no need to set instance mask if there is only one instance */
361 	if (num_xcc <= 1 && inst_mask) {
362 		data->inject.instance_mask = 0;
363 		dev_dbg(adev->dev,
364 			"RAS inject mask(0x%x) isn't supported and force it to 0.\n",
365 			inst_mask);
366 
367 		return;
368 	}
369 
370 	switch (data->head.block) {
371 	case AMDGPU_RAS_BLOCK__GFX:
372 		mask = GENMASK(num_xcc - 1, 0);
373 		break;
374 	case AMDGPU_RAS_BLOCK__SDMA:
375 		mask = GENMASK(adev->sdma.num_instances - 1, 0);
376 		break;
377 	case AMDGPU_RAS_BLOCK__VCN:
378 	case AMDGPU_RAS_BLOCK__JPEG:
379 		mask = GENMASK(adev->vcn.num_vcn_inst - 1, 0);
380 		break;
381 	default:
382 		mask = inst_mask;
383 		break;
384 	}
385 
386 	/* remove invalid bits in instance mask */
387 	data->inject.instance_mask &= mask;
388 	if (inst_mask != data->inject.instance_mask)
389 		dev_dbg(adev->dev,
390 			"Adjust RAS inject mask 0x%x to 0x%x\n",
391 			inst_mask, data->inject.instance_mask);
392 }
393 
394 /**
395  * DOC: AMDGPU RAS debugfs control interface
396  *
397  * The control interface accepts struct ras_debug_if which has two members.
398  *
399  * First member: ras_debug_if::head or ras_debug_if::inject.
400  *
401  * head is used to indicate which IP block will be under control.
402  *
403  * head has four members, they are block, type, sub_block_index, name.
404  * block: which IP will be under control.
405  * type: what kind of error will be enabled/disabled/injected.
406  * sub_block_index: some IPs have subcomponets. say, GFX, sDMA.
407  * name: the name of IP.
408  *
409  * inject has three more members than head, they are address, value and mask.
410  * As their names indicate, inject operation will write the
411  * value to the address.
412  *
413  * The second member: struct ras_debug_if::op.
414  * It has three kinds of operations.
415  *
416  * - 0: disable RAS on the block. Take ::head as its data.
417  * - 1: enable RAS on the block. Take ::head as its data.
418  * - 2: inject errors on the block. Take ::inject as its data.
419  *
420  * How to use the interface?
421  *
422  * In a program
423  *
424  * Copy the struct ras_debug_if in your code and initialize it.
425  * Write the struct to the control interface.
426  *
427  * From shell
428  *
429  * .. code-block:: bash
430  *
431  *	echo "disable <block>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
432  *	echo "enable  <block> <error>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
433  *	echo "inject  <block> <error> <sub-block> <address> <value> <mask>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
434  *
435  * Where N, is the card which you want to affect.
436  *
437  * "disable" requires only the block.
438  * "enable" requires the block and error type.
439  * "inject" requires the block, error type, address, and value.
440  *
441  * The block is one of: umc, sdma, gfx, etc.
442  *	see ras_block_string[] for details
443  *
444  * The error type is one of: ue, ce and poison where,
445  *	ue is multi-uncorrectable
446  *	ce is single-correctable
447  *	poison is poison
448  *
449  * The sub-block is a the sub-block index, pass 0 if there is no sub-block.
450  * The address and value are hexadecimal numbers, leading 0x is optional.
451  * The mask means instance mask, is optional, default value is 0x1.
452  *
453  * For instance,
454  *
455  * .. code-block:: bash
456  *
457  *	echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
458  *	echo inject umc ce 0 0 0 3 > /sys/kernel/debug/dri/0/ras/ras_ctrl
459  *	echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl
460  *
461  * How to check the result of the operation?
462  *
463  * To check disable/enable, see "ras" features at,
464  * /sys/class/drm/card[0/1/2...]/device/ras/features
465  *
466  * To check inject, see the corresponding error count at,
467  * /sys/class/drm/card[0/1/2...]/device/ras/[gfx|sdma|umc|...]_err_count
468  *
469  * .. note::
470  *	Operations are only allowed on blocks which are supported.
471  *	Check the "ras" mask at /sys/module/amdgpu/parameters/ras_mask
472  *	to see which blocks support RAS on a particular asic.
473  *
474  */
475 static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f,
476 					     const char __user *buf,
477 					     size_t size, loff_t *pos)
478 {
479 	struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
480 	struct ras_debug_if data;
481 	int ret = 0;
482 
483 	if (!amdgpu_ras_get_error_query_ready(adev)) {
484 		dev_warn(adev->dev, "RAS WARN: error injection "
485 				"currently inaccessible\n");
486 		return size;
487 	}
488 
489 	ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data);
490 	if (ret)
491 		return ret;
492 
493 	if (data.op == 3) {
494 		ret = amdgpu_reserve_page_direct(adev, data.inject.address);
495 		if (!ret)
496 			return size;
497 		else
498 			return ret;
499 	}
500 
501 	if (!amdgpu_ras_is_supported(adev, data.head.block))
502 		return -EINVAL;
503 
504 	switch (data.op) {
505 	case 0:
506 		ret = amdgpu_ras_feature_enable(adev, &data.head, 0);
507 		break;
508 	case 1:
509 		ret = amdgpu_ras_feature_enable(adev, &data.head, 1);
510 		break;
511 	case 2:
512 		if ((data.inject.address >= adev->gmc.mc_vram_size &&
513 		    adev->gmc.mc_vram_size) ||
514 		    (data.inject.address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
515 			dev_warn(adev->dev, "RAS WARN: input address "
516 					"0x%llx is invalid.",
517 					data.inject.address);
518 			ret = -EINVAL;
519 			break;
520 		}
521 
522 		/* umc ce/ue error injection for a bad page is not allowed */
523 		if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) &&
524 		    amdgpu_ras_check_bad_page(adev, data.inject.address)) {
525 			dev_warn(adev->dev, "RAS WARN: inject: 0x%llx has "
526 				 "already been marked as bad!\n",
527 				 data.inject.address);
528 			break;
529 		}
530 
531 		amdgpu_ras_instance_mask_check(adev, &data);
532 
533 		/* data.inject.address is offset instead of absolute gpu address */
534 		ret = amdgpu_ras_error_inject(adev, &data.inject);
535 		break;
536 	default:
537 		ret = -EINVAL;
538 		break;
539 	}
540 
541 	if (ret)
542 		return ret;
543 
544 	return size;
545 }
546 
547 /**
548  * DOC: AMDGPU RAS debugfs EEPROM table reset interface
549  *
550  * Some boards contain an EEPROM which is used to persistently store a list of
551  * bad pages which experiences ECC errors in vram.  This interface provides
552  * a way to reset the EEPROM, e.g., after testing error injection.
553  *
554  * Usage:
555  *
556  * .. code-block:: bash
557  *
558  *	echo 1 > ../ras/ras_eeprom_reset
559  *
560  * will reset EEPROM table to 0 entries.
561  *
562  */
563 static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f,
564 					       const char __user *buf,
565 					       size_t size, loff_t *pos)
566 {
567 	struct amdgpu_device *adev =
568 		(struct amdgpu_device *)file_inode(f)->i_private;
569 	int ret;
570 
571 	ret = amdgpu_ras_eeprom_reset_table(
572 		&(amdgpu_ras_get_context(adev)->eeprom_control));
573 
574 	if (!ret) {
575 		/* Something was written to EEPROM.
576 		 */
577 		amdgpu_ras_get_context(adev)->flags = RAS_DEFAULT_FLAGS;
578 		return size;
579 	} else {
580 		return ret;
581 	}
582 }
583 
584 static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
585 	.owner = THIS_MODULE,
586 	.read = NULL,
587 	.write = amdgpu_ras_debugfs_ctrl_write,
588 	.llseek = default_llseek
589 };
590 
591 static const struct file_operations amdgpu_ras_debugfs_eeprom_ops = {
592 	.owner = THIS_MODULE,
593 	.read = NULL,
594 	.write = amdgpu_ras_debugfs_eeprom_write,
595 	.llseek = default_llseek
596 };
597 
598 /**
599  * DOC: AMDGPU RAS sysfs Error Count Interface
600  *
601  * It allows the user to read the error count for each IP block on the gpu through
602  * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
603  *
604  * It outputs the multiple lines which report the uncorrected (ue) and corrected
605  * (ce) error counts.
606  *
607  * The format of one line is below,
608  *
609  * [ce|ue]: count
610  *
611  * Example:
612  *
613  * .. code-block:: bash
614  *
615  *	ue: 0
616  *	ce: 1
617  *
618  */
619 static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
620 		struct device_attribute *attr, char *buf)
621 {
622 	struct ras_manager *obj = container_of(attr, struct ras_manager, sysfs_attr);
623 	struct ras_query_if info = {
624 		.head = obj->head,
625 	};
626 
627 	if (!amdgpu_ras_get_error_query_ready(obj->adev))
628 		return sysfs_emit(buf, "Query currently inaccessible\n");
629 
630 	if (amdgpu_ras_query_error_status(obj->adev, &info))
631 		return -EINVAL;
632 
633 	if (amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
634 	    amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {
635 		if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
636 			dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
637 	}
638 
639 	if (info.head.block == AMDGPU_RAS_BLOCK__UMC)
640 		return sysfs_emit(buf, "%s: %lu\n%s: %lu\n%s: %lu\n", "ue", info.ue_count,
641 				"ce", info.ce_count, "de", info.de_count);
642 	else
643 		return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count,
644 				"ce", info.ce_count);
645 }
646 
647 /* obj begin */
648 
649 #define get_obj(obj) do { (obj)->use++; } while (0)
650 #define alive_obj(obj) ((obj)->use)
651 
652 static inline void put_obj(struct ras_manager *obj)
653 {
654 	if (obj && (--obj->use == 0)) {
655 		list_del(&obj->node);
656 		amdgpu_ras_error_data_fini(&obj->err_data);
657 	}
658 
659 	if (obj && (obj->use < 0))
660 		DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", get_ras_block_str(&obj->head));
661 }
662 
663 /* make one obj and return it. */
664 static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev,
665 		struct ras_common_if *head)
666 {
667 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
668 	struct ras_manager *obj;
669 
670 	if (!adev->ras_enabled || !con)
671 		return NULL;
672 
673 	if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
674 		return NULL;
675 
676 	if (head->block == AMDGPU_RAS_BLOCK__MCA) {
677 		if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
678 			return NULL;
679 
680 		obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
681 	} else
682 		obj = &con->objs[head->block];
683 
684 	/* already exist. return obj? */
685 	if (alive_obj(obj))
686 		return NULL;
687 
688 	if (amdgpu_ras_error_data_init(&obj->err_data))
689 		return NULL;
690 
691 	obj->head = *head;
692 	obj->adev = adev;
693 	list_add(&obj->node, &con->head);
694 	get_obj(obj);
695 
696 	return obj;
697 }
698 
699 /* return an obj equal to head, or the first when head is NULL */
700 struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
701 		struct ras_common_if *head)
702 {
703 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
704 	struct ras_manager *obj;
705 	int i;
706 
707 	if (!adev->ras_enabled || !con)
708 		return NULL;
709 
710 	if (head) {
711 		if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
712 			return NULL;
713 
714 		if (head->block == AMDGPU_RAS_BLOCK__MCA) {
715 			if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
716 				return NULL;
717 
718 			obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
719 		} else
720 			obj = &con->objs[head->block];
721 
722 		if (alive_obj(obj))
723 			return obj;
724 	} else {
725 		for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT + AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
726 			obj = &con->objs[i];
727 			if (alive_obj(obj))
728 				return obj;
729 		}
730 	}
731 
732 	return NULL;
733 }
734 /* obj end */
735 
736 /* feature ctl begin */
737 static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev,
738 					 struct ras_common_if *head)
739 {
740 	return adev->ras_hw_enabled & BIT(head->block);
741 }
742 
743 static int amdgpu_ras_is_feature_enabled(struct amdgpu_device *adev,
744 		struct ras_common_if *head)
745 {
746 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
747 
748 	return con->features & BIT(head->block);
749 }
750 
751 /*
752  * if obj is not created, then create one.
753  * set feature enable flag.
754  */
755 static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev,
756 		struct ras_common_if *head, int enable)
757 {
758 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
759 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
760 
761 	/* If hardware does not support ras, then do not create obj.
762 	 * But if hardware support ras, we can create the obj.
763 	 * Ras framework checks con->hw_supported to see if it need do
764 	 * corresponding initialization.
765 	 * IP checks con->support to see if it need disable ras.
766 	 */
767 	if (!amdgpu_ras_is_feature_allowed(adev, head))
768 		return 0;
769 
770 	if (enable) {
771 		if (!obj) {
772 			obj = amdgpu_ras_create_obj(adev, head);
773 			if (!obj)
774 				return -EINVAL;
775 		} else {
776 			/* In case we create obj somewhere else */
777 			get_obj(obj);
778 		}
779 		con->features |= BIT(head->block);
780 	} else {
781 		if (obj && amdgpu_ras_is_feature_enabled(adev, head)) {
782 			con->features &= ~BIT(head->block);
783 			put_obj(obj);
784 		}
785 	}
786 
787 	return 0;
788 }
789 
790 /* wrapper of psp_ras_enable_features */
791 int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
792 		struct ras_common_if *head, bool enable)
793 {
794 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
795 	union ta_ras_cmd_input *info;
796 	int ret;
797 
798 	if (!con)
799 		return -EINVAL;
800 
801 	/* For non-gfx ip, do not enable ras feature if it is not allowed */
802 	/* For gfx ip, regardless of feature support status, */
803 	/* Force issue enable or disable ras feature commands */
804 	if (head->block != AMDGPU_RAS_BLOCK__GFX &&
805 	    !amdgpu_ras_is_feature_allowed(adev, head))
806 		return 0;
807 
808 	/* Only enable gfx ras feature from host side */
809 	if (head->block == AMDGPU_RAS_BLOCK__GFX &&
810 	    !amdgpu_sriov_vf(adev) &&
811 	    !amdgpu_ras_intr_triggered()) {
812 		info = kzalloc(sizeof(union ta_ras_cmd_input), GFP_KERNEL);
813 		if (!info)
814 			return -ENOMEM;
815 
816 		if (!enable) {
817 			info->disable_features = (struct ta_ras_disable_features_input) {
818 				.block_id =  amdgpu_ras_block_to_ta(head->block),
819 				.error_type = amdgpu_ras_error_to_ta(head->type),
820 			};
821 		} else {
822 			info->enable_features = (struct ta_ras_enable_features_input) {
823 				.block_id =  amdgpu_ras_block_to_ta(head->block),
824 				.error_type = amdgpu_ras_error_to_ta(head->type),
825 			};
826 		}
827 
828 		ret = psp_ras_enable_features(&adev->psp, info, enable);
829 		if (ret) {
830 			dev_err(adev->dev, "ras %s %s failed poison:%d ret:%d\n",
831 				enable ? "enable":"disable",
832 				get_ras_block_str(head),
833 				amdgpu_ras_is_poison_mode_supported(adev), ret);
834 			kfree(info);
835 			return ret;
836 		}
837 
838 		kfree(info);
839 	}
840 
841 	/* setup the obj */
842 	__amdgpu_ras_feature_enable(adev, head, enable);
843 
844 	return 0;
845 }
846 
847 /* Only used in device probe stage and called only once. */
848 int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
849 		struct ras_common_if *head, bool enable)
850 {
851 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
852 	int ret;
853 
854 	if (!con)
855 		return -EINVAL;
856 
857 	if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
858 		if (enable) {
859 			/* There is no harm to issue a ras TA cmd regardless of
860 			 * the currecnt ras state.
861 			 * If current state == target state, it will do nothing
862 			 * But sometimes it requests driver to reset and repost
863 			 * with error code -EAGAIN.
864 			 */
865 			ret = amdgpu_ras_feature_enable(adev, head, 1);
866 			/* With old ras TA, we might fail to enable ras.
867 			 * Log it and just setup the object.
868 			 * TODO need remove this WA in the future.
869 			 */
870 			if (ret == -EINVAL) {
871 				ret = __amdgpu_ras_feature_enable(adev, head, 1);
872 				if (!ret)
873 					dev_info(adev->dev,
874 						"RAS INFO: %s setup object\n",
875 						get_ras_block_str(head));
876 			}
877 		} else {
878 			/* setup the object then issue a ras TA disable cmd.*/
879 			ret = __amdgpu_ras_feature_enable(adev, head, 1);
880 			if (ret)
881 				return ret;
882 
883 			/* gfx block ras dsiable cmd must send to ras-ta */
884 			if (head->block == AMDGPU_RAS_BLOCK__GFX)
885 				con->features |= BIT(head->block);
886 
887 			ret = amdgpu_ras_feature_enable(adev, head, 0);
888 
889 			/* clean gfx block ras features flag */
890 			if (adev->ras_enabled && head->block == AMDGPU_RAS_BLOCK__GFX)
891 				con->features &= ~BIT(head->block);
892 		}
893 	} else
894 		ret = amdgpu_ras_feature_enable(adev, head, enable);
895 
896 	return ret;
897 }
898 
899 static int amdgpu_ras_disable_all_features(struct amdgpu_device *adev,
900 		bool bypass)
901 {
902 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
903 	struct ras_manager *obj, *tmp;
904 
905 	list_for_each_entry_safe(obj, tmp, &con->head, node) {
906 		/* bypass psp.
907 		 * aka just release the obj and corresponding flags
908 		 */
909 		if (bypass) {
910 			if (__amdgpu_ras_feature_enable(adev, &obj->head, 0))
911 				break;
912 		} else {
913 			if (amdgpu_ras_feature_enable(adev, &obj->head, 0))
914 				break;
915 		}
916 	}
917 
918 	return con->features;
919 }
920 
921 static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
922 		bool bypass)
923 {
924 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
925 	int i;
926 	const enum amdgpu_ras_error_type default_ras_type = AMDGPU_RAS_ERROR__NONE;
927 
928 	for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) {
929 		struct ras_common_if head = {
930 			.block = i,
931 			.type = default_ras_type,
932 			.sub_block_index = 0,
933 		};
934 
935 		if (i == AMDGPU_RAS_BLOCK__MCA)
936 			continue;
937 
938 		if (bypass) {
939 			/*
940 			 * bypass psp. vbios enable ras for us.
941 			 * so just create the obj
942 			 */
943 			if (__amdgpu_ras_feature_enable(adev, &head, 1))
944 				break;
945 		} else {
946 			if (amdgpu_ras_feature_enable(adev, &head, 1))
947 				break;
948 		}
949 	}
950 
951 	for (i = 0; i < AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
952 		struct ras_common_if head = {
953 			.block = AMDGPU_RAS_BLOCK__MCA,
954 			.type = default_ras_type,
955 			.sub_block_index = i,
956 		};
957 
958 		if (bypass) {
959 			/*
960 			 * bypass psp. vbios enable ras for us.
961 			 * so just create the obj
962 			 */
963 			if (__amdgpu_ras_feature_enable(adev, &head, 1))
964 				break;
965 		} else {
966 			if (amdgpu_ras_feature_enable(adev, &head, 1))
967 				break;
968 		}
969 	}
970 
971 	return con->features;
972 }
973 /* feature ctl end */
974 
975 static int amdgpu_ras_block_match_default(struct amdgpu_ras_block_object *block_obj,
976 		enum amdgpu_ras_block block)
977 {
978 	if (!block_obj)
979 		return -EINVAL;
980 
981 	if (block_obj->ras_comm.block == block)
982 		return 0;
983 
984 	return -EINVAL;
985 }
986 
987 static struct amdgpu_ras_block_object *amdgpu_ras_get_ras_block(struct amdgpu_device *adev,
988 					enum amdgpu_ras_block block, uint32_t sub_block_index)
989 {
990 	struct amdgpu_ras_block_list *node, *tmp;
991 	struct amdgpu_ras_block_object *obj;
992 
993 	if (block >= AMDGPU_RAS_BLOCK__LAST)
994 		return NULL;
995 
996 	list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
997 		if (!node->ras_obj) {
998 			dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
999 			continue;
1000 		}
1001 
1002 		obj = node->ras_obj;
1003 		if (obj->ras_block_match) {
1004 			if (obj->ras_block_match(obj, block, sub_block_index) == 0)
1005 				return obj;
1006 		} else {
1007 			if (amdgpu_ras_block_match_default(obj, block) == 0)
1008 				return obj;
1009 		}
1010 	}
1011 
1012 	return NULL;
1013 }
1014 
1015 static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_data *err_data)
1016 {
1017 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
1018 	int ret = 0;
1019 
1020 	/*
1021 	 * choosing right query method according to
1022 	 * whether smu support query error information
1023 	 */
1024 	ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(ras->umc_ecc));
1025 	if (ret == -EOPNOTSUPP) {
1026 		if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
1027 			adev->umc.ras->ras_block.hw_ops->query_ras_error_count)
1028 			adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data);
1029 
1030 		/* umc query_ras_error_address is also responsible for clearing
1031 		 * error status
1032 		 */
1033 		if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
1034 		    adev->umc.ras->ras_block.hw_ops->query_ras_error_address)
1035 			adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, err_data);
1036 	} else if (!ret) {
1037 		if (adev->umc.ras &&
1038 			adev->umc.ras->ecc_info_query_ras_error_count)
1039 			adev->umc.ras->ecc_info_query_ras_error_count(adev, err_data);
1040 
1041 		if (adev->umc.ras &&
1042 			adev->umc.ras->ecc_info_query_ras_error_address)
1043 			adev->umc.ras->ecc_info_query_ras_error_address(adev, err_data);
1044 	}
1045 }
1046 
1047 static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev,
1048 					      struct ras_manager *ras_mgr,
1049 					      struct ras_err_data *err_data,
1050 					      struct ras_query_context *qctx,
1051 					      const char *blk_name,
1052 					      bool is_ue,
1053 					      bool is_de)
1054 {
1055 	struct amdgpu_smuio_mcm_config_info *mcm_info;
1056 	struct ras_err_node *err_node;
1057 	struct ras_err_info *err_info;
1058 	u64 event_id = qctx->event_id;
1059 
1060 	if (is_ue) {
1061 		for_each_ras_error(err_node, err_data) {
1062 			err_info = &err_node->err_info;
1063 			mcm_info = &err_info->mcm_info;
1064 			if (err_info->ue_count) {
1065 				RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1066 					      "%lld new uncorrectable hardware errors detected in %s block\n",
1067 					      mcm_info->socket_id,
1068 					      mcm_info->die_id,
1069 					      err_info->ue_count,
1070 					      blk_name);
1071 			}
1072 		}
1073 
1074 		for_each_ras_error(err_node, &ras_mgr->err_data) {
1075 			err_info = &err_node->err_info;
1076 			mcm_info = &err_info->mcm_info;
1077 			RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1078 				      "%lld uncorrectable hardware errors detected in total in %s block\n",
1079 				      mcm_info->socket_id, mcm_info->die_id, err_info->ue_count, blk_name);
1080 		}
1081 
1082 	} else {
1083 		if (is_de) {
1084 			for_each_ras_error(err_node, err_data) {
1085 				err_info = &err_node->err_info;
1086 				mcm_info = &err_info->mcm_info;
1087 				if (err_info->de_count) {
1088 					RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1089 						      "%lld new deferred hardware errors detected in %s block\n",
1090 						      mcm_info->socket_id,
1091 						      mcm_info->die_id,
1092 						      err_info->de_count,
1093 						      blk_name);
1094 				}
1095 			}
1096 
1097 			for_each_ras_error(err_node, &ras_mgr->err_data) {
1098 				err_info = &err_node->err_info;
1099 				mcm_info = &err_info->mcm_info;
1100 				RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1101 					      "%lld deferred hardware errors detected in total in %s block\n",
1102 					      mcm_info->socket_id, mcm_info->die_id,
1103 					      err_info->de_count, blk_name);
1104 			}
1105 		} else {
1106 			for_each_ras_error(err_node, err_data) {
1107 				err_info = &err_node->err_info;
1108 				mcm_info = &err_info->mcm_info;
1109 				if (err_info->ce_count) {
1110 					RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1111 						      "%lld new correctable hardware errors detected in %s block\n",
1112 						      mcm_info->socket_id,
1113 						      mcm_info->die_id,
1114 						      err_info->ce_count,
1115 						      blk_name);
1116 				}
1117 			}
1118 
1119 			for_each_ras_error(err_node, &ras_mgr->err_data) {
1120 				err_info = &err_node->err_info;
1121 				mcm_info = &err_info->mcm_info;
1122 				RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1123 					      "%lld correctable hardware errors detected in total in %s block\n",
1124 					      mcm_info->socket_id, mcm_info->die_id,
1125 					      err_info->ce_count, blk_name);
1126 			}
1127 		}
1128 	}
1129 }
1130 
1131 static inline bool err_data_has_source_info(struct ras_err_data *data)
1132 {
1133 	return !list_empty(&data->err_node_list);
1134 }
1135 
1136 static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev,
1137 					     struct ras_query_if *query_if,
1138 					     struct ras_err_data *err_data,
1139 					     struct ras_query_context *qctx)
1140 {
1141 	struct ras_manager *ras_mgr = amdgpu_ras_find_obj(adev, &query_if->head);
1142 	const char *blk_name = get_ras_block_str(&query_if->head);
1143 	u64 event_id = qctx->event_id;
1144 
1145 	if (err_data->ce_count) {
1146 		if (err_data_has_source_info(err_data)) {
1147 			amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx,
1148 							  blk_name, false, false);
1149 		} else if (!adev->aid_mask &&
1150 			   adev->smuio.funcs &&
1151 			   adev->smuio.funcs->get_socket_id &&
1152 			   adev->smuio.funcs->get_die_id) {
1153 			RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d "
1154 				      "%ld correctable hardware errors "
1155 				      "detected in %s block\n",
1156 				      adev->smuio.funcs->get_socket_id(adev),
1157 				      adev->smuio.funcs->get_die_id(adev),
1158 				      ras_mgr->err_data.ce_count,
1159 				      blk_name);
1160 		} else {
1161 			RAS_EVENT_LOG(adev, event_id, "%ld correctable hardware errors "
1162 				      "detected in %s block\n",
1163 				      ras_mgr->err_data.ce_count,
1164 				      blk_name);
1165 		}
1166 	}
1167 
1168 	if (err_data->ue_count) {
1169 		if (err_data_has_source_info(err_data)) {
1170 			amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx,
1171 							  blk_name, true, false);
1172 		} else if (!adev->aid_mask &&
1173 			   adev->smuio.funcs &&
1174 			   adev->smuio.funcs->get_socket_id &&
1175 			   adev->smuio.funcs->get_die_id) {
1176 			RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d "
1177 				      "%ld uncorrectable hardware errors "
1178 				      "detected in %s block\n",
1179 				      adev->smuio.funcs->get_socket_id(adev),
1180 				      adev->smuio.funcs->get_die_id(adev),
1181 				      ras_mgr->err_data.ue_count,
1182 				      blk_name);
1183 		} else {
1184 			RAS_EVENT_LOG(adev, event_id, "%ld uncorrectable hardware errors "
1185 				      "detected in %s block\n",
1186 				      ras_mgr->err_data.ue_count,
1187 				      blk_name);
1188 		}
1189 	}
1190 
1191 	if (err_data->de_count) {
1192 		if (err_data_has_source_info(err_data)) {
1193 			amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx,
1194 							  blk_name, false, true);
1195 		} else if (!adev->aid_mask &&
1196 			   adev->smuio.funcs &&
1197 			   adev->smuio.funcs->get_socket_id &&
1198 			   adev->smuio.funcs->get_die_id) {
1199 			RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d "
1200 				      "%ld deferred hardware errors "
1201 				      "detected in %s block\n",
1202 				      adev->smuio.funcs->get_socket_id(adev),
1203 				      adev->smuio.funcs->get_die_id(adev),
1204 				      ras_mgr->err_data.de_count,
1205 				      blk_name);
1206 		} else {
1207 			RAS_EVENT_LOG(adev, event_id, "%ld deferred hardware errors "
1208 				      "detected in %s block\n",
1209 				      ras_mgr->err_data.de_count,
1210 				      blk_name);
1211 		}
1212 	}
1213 }
1214 
1215 static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, struct ras_err_data *err_data)
1216 {
1217 	struct ras_err_node *err_node;
1218 	struct ras_err_info *err_info;
1219 
1220 	if (err_data_has_source_info(err_data)) {
1221 		for_each_ras_error(err_node, err_data) {
1222 			err_info = &err_node->err_info;
1223 			amdgpu_ras_error_statistic_de_count(&obj->err_data,
1224 					&err_info->mcm_info, NULL, err_info->de_count);
1225 			amdgpu_ras_error_statistic_ce_count(&obj->err_data,
1226 					&err_info->mcm_info, NULL, err_info->ce_count);
1227 			amdgpu_ras_error_statistic_ue_count(&obj->err_data,
1228 					&err_info->mcm_info, NULL, err_info->ue_count);
1229 		}
1230 	} else {
1231 		/* for legacy asic path which doesn't has error source info */
1232 		obj->err_data.ue_count += err_data->ue_count;
1233 		obj->err_data.ce_count += err_data->ce_count;
1234 		obj->err_data.de_count += err_data->de_count;
1235 	}
1236 }
1237 
1238 static struct ras_manager *get_ras_manager(struct amdgpu_device *adev, enum amdgpu_ras_block blk)
1239 {
1240 	struct ras_common_if head;
1241 
1242 	memset(&head, 0, sizeof(head));
1243 	head.block = blk;
1244 
1245 	return amdgpu_ras_find_obj(adev, &head);
1246 }
1247 
1248 int amdgpu_ras_bind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
1249 			const struct aca_info *aca_info, void *data)
1250 {
1251 	struct ras_manager *obj;
1252 
1253 	/* in resume phase, no need to create aca fs node */
1254 	if (adev->in_suspend || amdgpu_in_reset(adev))
1255 		return 0;
1256 
1257 	obj = get_ras_manager(adev, blk);
1258 	if (!obj)
1259 		return -EINVAL;
1260 
1261 	return amdgpu_aca_add_handle(adev, &obj->aca_handle, ras_block_str(blk), aca_info, data);
1262 }
1263 
1264 int amdgpu_ras_unbind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk)
1265 {
1266 	struct ras_manager *obj;
1267 
1268 	obj = get_ras_manager(adev, blk);
1269 	if (!obj)
1270 		return -EINVAL;
1271 
1272 	amdgpu_aca_remove_handle(&obj->aca_handle);
1273 
1274 	return 0;
1275 }
1276 
1277 static int amdgpu_aca_log_ras_error_data(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
1278 					 enum aca_error_type type, struct ras_err_data *err_data,
1279 					 struct ras_query_context *qctx)
1280 {
1281 	struct ras_manager *obj;
1282 
1283 	obj = get_ras_manager(adev, blk);
1284 	if (!obj)
1285 		return -EINVAL;
1286 
1287 	return amdgpu_aca_get_error_data(adev, &obj->aca_handle, type, err_data, qctx);
1288 }
1289 
1290 ssize_t amdgpu_ras_aca_sysfs_read(struct device *dev, struct device_attribute *attr,
1291 				  struct aca_handle *handle, char *buf, void *data)
1292 {
1293 	struct ras_manager *obj = container_of(handle, struct ras_manager, aca_handle);
1294 	struct ras_query_if info = {
1295 		.head = obj->head,
1296 	};
1297 
1298 	if (amdgpu_ras_query_error_status(obj->adev, &info))
1299 		return -EINVAL;
1300 
1301 	return sysfs_emit(buf, "%s: %lu\n%s: %lu\n%s: %lu\n", "ue", info.ue_count,
1302 			  "ce", info.ce_count, "de", info.de_count);
1303 }
1304 
1305 static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev,
1306 						struct ras_query_if *info,
1307 						struct ras_err_data *err_data,
1308 						struct ras_query_context *qctx,
1309 						unsigned int error_query_mode)
1310 {
1311 	enum amdgpu_ras_block blk = info ? info->head.block : AMDGPU_RAS_BLOCK_COUNT;
1312 	struct amdgpu_ras_block_object *block_obj = NULL;
1313 	int ret;
1314 
1315 	if (blk == AMDGPU_RAS_BLOCK_COUNT)
1316 		return -EINVAL;
1317 
1318 	if (error_query_mode == AMDGPU_RAS_INVALID_ERROR_QUERY)
1319 		return -EINVAL;
1320 
1321 	if (error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) {
1322 		if (info->head.block == AMDGPU_RAS_BLOCK__UMC) {
1323 			amdgpu_ras_get_ecc_info(adev, err_data);
1324 		} else {
1325 			block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, 0);
1326 			if (!block_obj || !block_obj->hw_ops) {
1327 				dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1328 					     get_ras_block_str(&info->head));
1329 				return -EINVAL;
1330 			}
1331 
1332 			if (block_obj->hw_ops->query_ras_error_count)
1333 				block_obj->hw_ops->query_ras_error_count(adev, err_data);
1334 
1335 			if ((info->head.block == AMDGPU_RAS_BLOCK__SDMA) ||
1336 			    (info->head.block == AMDGPU_RAS_BLOCK__GFX) ||
1337 			    (info->head.block == AMDGPU_RAS_BLOCK__MMHUB)) {
1338 				if (block_obj->hw_ops->query_ras_error_status)
1339 					block_obj->hw_ops->query_ras_error_status(adev);
1340 			}
1341 		}
1342 	} else {
1343 		if (amdgpu_aca_is_enabled(adev)) {
1344 			ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_UE, err_data, qctx);
1345 			if (ret)
1346 				return ret;
1347 
1348 			ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_CE, err_data, qctx);
1349 			if (ret)
1350 				return ret;
1351 
1352 			ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_DEFERRED, err_data, qctx);
1353 			if (ret)
1354 				return ret;
1355 		} else {
1356 			/* FIXME: add code to check return value later */
1357 			amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_UE, err_data, qctx);
1358 			amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_CE, err_data, qctx);
1359 		}
1360 	}
1361 
1362 	return 0;
1363 }
1364 
1365 /* query/inject/cure begin */
1366 int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_if *info)
1367 {
1368 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1369 	struct ras_err_data err_data;
1370 	struct ras_query_context qctx;
1371 	unsigned int error_query_mode;
1372 	int ret;
1373 
1374 	if (!obj)
1375 		return -EINVAL;
1376 
1377 	ret = amdgpu_ras_error_data_init(&err_data);
1378 	if (ret)
1379 		return ret;
1380 
1381 	if (!amdgpu_ras_get_error_query_mode(adev, &error_query_mode))
1382 		return -EINVAL;
1383 
1384 	memset(&qctx, 0, sizeof(qctx));
1385 	qctx.event_id = amdgpu_ras_acquire_event_id(adev, amdgpu_ras_intr_triggered() ?
1386 						   RAS_EVENT_TYPE_ISR : RAS_EVENT_TYPE_INVALID);
1387 	ret = amdgpu_ras_query_error_status_helper(adev, info,
1388 						   &err_data,
1389 						   &qctx,
1390 						   error_query_mode);
1391 	if (ret)
1392 		goto out_fini_err_data;
1393 
1394 	amdgpu_rasmgr_error_data_statistic_update(obj, &err_data);
1395 
1396 	info->ue_count = obj->err_data.ue_count;
1397 	info->ce_count = obj->err_data.ce_count;
1398 	info->de_count = obj->err_data.de_count;
1399 
1400 	amdgpu_ras_error_generate_report(adev, info, &err_data, &qctx);
1401 
1402 out_fini_err_data:
1403 	amdgpu_ras_error_data_fini(&err_data);
1404 
1405 	return ret;
1406 }
1407 
1408 int amdgpu_ras_reset_error_count(struct amdgpu_device *adev,
1409 		enum amdgpu_ras_block block)
1410 {
1411 	struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
1412 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
1413 	const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
1414 	const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
1415 	struct amdgpu_hive_info *hive;
1416 	int hive_ras_recovery = 0;
1417 
1418 	if (!block_obj || !block_obj->hw_ops) {
1419 		dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1420 				ras_block_str(block));
1421 		return -EOPNOTSUPP;
1422 	}
1423 
1424 	if (!amdgpu_ras_is_supported(adev, block) ||
1425 	    !amdgpu_ras_get_aca_debug_mode(adev))
1426 		return -EOPNOTSUPP;
1427 
1428 	hive = amdgpu_get_xgmi_hive(adev);
1429 	if (hive) {
1430 		hive_ras_recovery = atomic_read(&hive->ras_recovery);
1431 		amdgpu_put_xgmi_hive(hive);
1432 	}
1433 
1434 	/* skip ras error reset in gpu reset */
1435 	if ((amdgpu_in_reset(adev) || atomic_read(&ras->in_recovery) ||
1436 	    hive_ras_recovery) &&
1437 	    ((smu_funcs && smu_funcs->set_debug_mode) ||
1438 	     (mca_funcs && mca_funcs->mca_set_debug_mode)))
1439 		return -EOPNOTSUPP;
1440 
1441 	if (block_obj->hw_ops->reset_ras_error_count)
1442 		block_obj->hw_ops->reset_ras_error_count(adev);
1443 
1444 	return 0;
1445 }
1446 
1447 int amdgpu_ras_reset_error_status(struct amdgpu_device *adev,
1448 		enum amdgpu_ras_block block)
1449 {
1450 	struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
1451 
1452 	if (amdgpu_ras_reset_error_count(adev, block) == -EOPNOTSUPP)
1453 		return 0;
1454 
1455 	if ((block == AMDGPU_RAS_BLOCK__GFX) ||
1456 	    (block == AMDGPU_RAS_BLOCK__MMHUB)) {
1457 		if (block_obj->hw_ops->reset_ras_error_status)
1458 			block_obj->hw_ops->reset_ras_error_status(adev);
1459 	}
1460 
1461 	return 0;
1462 }
1463 
1464 /* wrapper of psp_ras_trigger_error */
1465 int amdgpu_ras_error_inject(struct amdgpu_device *adev,
1466 		struct ras_inject_if *info)
1467 {
1468 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1469 	struct ta_ras_trigger_error_input block_info = {
1470 		.block_id =  amdgpu_ras_block_to_ta(info->head.block),
1471 		.inject_error_type = amdgpu_ras_error_to_ta(info->head.type),
1472 		.sub_block_index = info->head.sub_block_index,
1473 		.address = info->address,
1474 		.value = info->value,
1475 	};
1476 	int ret = -EINVAL;
1477 	struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev,
1478 							info->head.block,
1479 							info->head.sub_block_index);
1480 
1481 	/* inject on guest isn't allowed, return success directly */
1482 	if (amdgpu_sriov_vf(adev))
1483 		return 0;
1484 
1485 	if (!obj)
1486 		return -EINVAL;
1487 
1488 	if (!block_obj || !block_obj->hw_ops)	{
1489 		dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1490 			     get_ras_block_str(&info->head));
1491 		return -EINVAL;
1492 	}
1493 
1494 	/* Calculate XGMI relative offset */
1495 	if (adev->gmc.xgmi.num_physical_nodes > 1 &&
1496 	    info->head.block != AMDGPU_RAS_BLOCK__GFX) {
1497 		block_info.address =
1498 			amdgpu_xgmi_get_relative_phy_addr(adev,
1499 							  block_info.address);
1500 	}
1501 
1502 	if (block_obj->hw_ops->ras_error_inject) {
1503 		if (info->head.block == AMDGPU_RAS_BLOCK__GFX)
1504 			ret = block_obj->hw_ops->ras_error_inject(adev, info, info->instance_mask);
1505 		else /* Special ras_error_inject is defined (e.g: xgmi) */
1506 			ret = block_obj->hw_ops->ras_error_inject(adev, &block_info,
1507 						info->instance_mask);
1508 	} else {
1509 		/* default path */
1510 		ret = psp_ras_trigger_error(&adev->psp, &block_info, info->instance_mask);
1511 	}
1512 
1513 	if (ret)
1514 		dev_err(adev->dev, "ras inject %s failed %d\n",
1515 			get_ras_block_str(&info->head), ret);
1516 
1517 	return ret;
1518 }
1519 
1520 /**
1521  * amdgpu_ras_query_error_count_helper -- Get error counter for specific IP
1522  * @adev: pointer to AMD GPU device
1523  * @ce_count: pointer to an integer to be set to the count of correctible errors.
1524  * @ue_count: pointer to an integer to be set to the count of uncorrectible errors.
1525  * @query_info: pointer to ras_query_if
1526  *
1527  * Return 0 for query success or do nothing, otherwise return an error
1528  * on failures
1529  */
1530 static int amdgpu_ras_query_error_count_helper(struct amdgpu_device *adev,
1531 					       unsigned long *ce_count,
1532 					       unsigned long *ue_count,
1533 					       struct ras_query_if *query_info)
1534 {
1535 	int ret;
1536 
1537 	if (!query_info)
1538 		/* do nothing if query_info is not specified */
1539 		return 0;
1540 
1541 	ret = amdgpu_ras_query_error_status(adev, query_info);
1542 	if (ret)
1543 		return ret;
1544 
1545 	*ce_count += query_info->ce_count;
1546 	*ue_count += query_info->ue_count;
1547 
1548 	/* some hardware/IP supports read to clear
1549 	 * no need to explictly reset the err status after the query call */
1550 	if (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
1551 	    amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {
1552 		if (amdgpu_ras_reset_error_status(adev, query_info->head.block))
1553 			dev_warn(adev->dev,
1554 				 "Failed to reset error counter and error status\n");
1555 	}
1556 
1557 	return 0;
1558 }
1559 
1560 /**
1561  * amdgpu_ras_query_error_count -- Get error counts of all IPs or specific IP
1562  * @adev: pointer to AMD GPU device
1563  * @ce_count: pointer to an integer to be set to the count of correctible errors.
1564  * @ue_count: pointer to an integer to be set to the count of uncorrectible
1565  * errors.
1566  * @query_info: pointer to ras_query_if if the query request is only for
1567  * specific ip block; if info is NULL, then the qurey request is for
1568  * all the ip blocks that support query ras error counters/status
1569  *
1570  * If set, @ce_count or @ue_count, count and return the corresponding
1571  * error counts in those integer pointers. Return 0 if the device
1572  * supports RAS. Return -EOPNOTSUPP if the device doesn't support RAS.
1573  */
1574 int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
1575 				 unsigned long *ce_count,
1576 				 unsigned long *ue_count,
1577 				 struct ras_query_if *query_info)
1578 {
1579 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1580 	struct ras_manager *obj;
1581 	unsigned long ce, ue;
1582 	int ret;
1583 
1584 	if (!adev->ras_enabled || !con)
1585 		return -EOPNOTSUPP;
1586 
1587 	/* Don't count since no reporting.
1588 	 */
1589 	if (!ce_count && !ue_count)
1590 		return 0;
1591 
1592 	ce = 0;
1593 	ue = 0;
1594 	if (!query_info) {
1595 		/* query all the ip blocks that support ras query interface */
1596 		list_for_each_entry(obj, &con->head, node) {
1597 			struct ras_query_if info = {
1598 				.head = obj->head,
1599 			};
1600 
1601 			ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, &info);
1602 		}
1603 	} else {
1604 		/* query specific ip block */
1605 		ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, query_info);
1606 	}
1607 
1608 	if (ret)
1609 		return ret;
1610 
1611 	if (ce_count)
1612 		*ce_count = ce;
1613 
1614 	if (ue_count)
1615 		*ue_count = ue;
1616 
1617 	return 0;
1618 }
1619 /* query/inject/cure end */
1620 
1621 
1622 /* sysfs begin */
1623 
1624 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
1625 		struct ras_badpage **bps, unsigned int *count);
1626 
1627 static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
1628 {
1629 	switch (flags) {
1630 	case AMDGPU_RAS_RETIRE_PAGE_RESERVED:
1631 		return "R";
1632 	case AMDGPU_RAS_RETIRE_PAGE_PENDING:
1633 		return "P";
1634 	case AMDGPU_RAS_RETIRE_PAGE_FAULT:
1635 	default:
1636 		return "F";
1637 	}
1638 }
1639 
1640 /**
1641  * DOC: AMDGPU RAS sysfs gpu_vram_bad_pages Interface
1642  *
1643  * It allows user to read the bad pages of vram on the gpu through
1644  * /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages
1645  *
1646  * It outputs multiple lines, and each line stands for one gpu page.
1647  *
1648  * The format of one line is below,
1649  * gpu pfn : gpu page size : flags
1650  *
1651  * gpu pfn and gpu page size are printed in hex format.
1652  * flags can be one of below character,
1653  *
1654  * R: reserved, this gpu page is reserved and not able to use.
1655  *
1656  * P: pending for reserve, this gpu page is marked as bad, will be reserved
1657  * in next window of page_reserve.
1658  *
1659  * F: unable to reserve. this gpu page can't be reserved due to some reasons.
1660  *
1661  * Examples:
1662  *
1663  * .. code-block:: bash
1664  *
1665  *	0x00000001 : 0x00001000 : R
1666  *	0x00000002 : 0x00001000 : P
1667  *
1668  */
1669 
1670 static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f,
1671 		struct kobject *kobj, struct bin_attribute *attr,
1672 		char *buf, loff_t ppos, size_t count)
1673 {
1674 	struct amdgpu_ras *con =
1675 		container_of(attr, struct amdgpu_ras, badpages_attr);
1676 	struct amdgpu_device *adev = con->adev;
1677 	const unsigned int element_size =
1678 		sizeof("0xabcdabcd : 0x12345678 : R\n") - 1;
1679 	unsigned int start = div64_ul(ppos + element_size - 1, element_size);
1680 	unsigned int end = div64_ul(ppos + count - 1, element_size);
1681 	ssize_t s = 0;
1682 	struct ras_badpage *bps = NULL;
1683 	unsigned int bps_count = 0;
1684 
1685 	memset(buf, 0, count);
1686 
1687 	if (amdgpu_ras_badpages_read(adev, &bps, &bps_count))
1688 		return 0;
1689 
1690 	for (; start < end && start < bps_count; start++)
1691 		s += scnprintf(&buf[s], element_size + 1,
1692 				"0x%08x : 0x%08x : %1s\n",
1693 				bps[start].bp,
1694 				bps[start].size,
1695 				amdgpu_ras_badpage_flags_str(bps[start].flags));
1696 
1697 	kfree(bps);
1698 
1699 	return s;
1700 }
1701 
1702 static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev,
1703 		struct device_attribute *attr, char *buf)
1704 {
1705 	struct amdgpu_ras *con =
1706 		container_of(attr, struct amdgpu_ras, features_attr);
1707 
1708 	return sysfs_emit(buf, "feature mask: 0x%x\n", con->features);
1709 }
1710 
1711 static ssize_t amdgpu_ras_sysfs_version_show(struct device *dev,
1712 		struct device_attribute *attr, char *buf)
1713 {
1714 	struct amdgpu_ras *con =
1715 		container_of(attr, struct amdgpu_ras, version_attr);
1716 	return sysfs_emit(buf, "table version: 0x%x\n", con->eeprom_control.tbl_hdr.version);
1717 }
1718 
1719 static ssize_t amdgpu_ras_sysfs_schema_show(struct device *dev,
1720 		struct device_attribute *attr, char *buf)
1721 {
1722 	struct amdgpu_ras *con =
1723 		container_of(attr, struct amdgpu_ras, schema_attr);
1724 	return sysfs_emit(buf, "schema: 0x%x\n", con->schema);
1725 }
1726 
1727 static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev)
1728 {
1729 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1730 
1731 	if (adev->dev->kobj.sd)
1732 		sysfs_remove_file_from_group(&adev->dev->kobj,
1733 				&con->badpages_attr.attr,
1734 				RAS_FS_NAME);
1735 }
1736 
1737 static int amdgpu_ras_sysfs_remove_dev_attr_node(struct amdgpu_device *adev)
1738 {
1739 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1740 	struct attribute *attrs[] = {
1741 		&con->features_attr.attr,
1742 		&con->version_attr.attr,
1743 		&con->schema_attr.attr,
1744 		NULL
1745 	};
1746 	struct attribute_group group = {
1747 		.name = RAS_FS_NAME,
1748 		.attrs = attrs,
1749 	};
1750 
1751 	if (adev->dev->kobj.sd)
1752 		sysfs_remove_group(&adev->dev->kobj, &group);
1753 
1754 	return 0;
1755 }
1756 
1757 int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
1758 		struct ras_common_if *head)
1759 {
1760 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1761 
1762 	if (amdgpu_aca_is_enabled(adev))
1763 		return 0;
1764 
1765 	if (!obj || obj->attr_inuse)
1766 		return -EINVAL;
1767 
1768 	get_obj(obj);
1769 
1770 	snprintf(obj->fs_data.sysfs_name, sizeof(obj->fs_data.sysfs_name),
1771 		"%s_err_count", head->name);
1772 
1773 	obj->sysfs_attr = (struct device_attribute){
1774 		.attr = {
1775 			.name = obj->fs_data.sysfs_name,
1776 			.mode = S_IRUGO,
1777 		},
1778 			.show = amdgpu_ras_sysfs_read,
1779 	};
1780 	sysfs_attr_init(&obj->sysfs_attr.attr);
1781 
1782 	if (sysfs_add_file_to_group(&adev->dev->kobj,
1783 				&obj->sysfs_attr.attr,
1784 				RAS_FS_NAME)) {
1785 		put_obj(obj);
1786 		return -EINVAL;
1787 	}
1788 
1789 	obj->attr_inuse = 1;
1790 
1791 	return 0;
1792 }
1793 
1794 int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
1795 		struct ras_common_if *head)
1796 {
1797 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1798 
1799 	if (amdgpu_aca_is_enabled(adev))
1800 		return 0;
1801 
1802 	if (!obj || !obj->attr_inuse)
1803 		return -EINVAL;
1804 
1805 	if (adev->dev->kobj.sd)
1806 		sysfs_remove_file_from_group(&adev->dev->kobj,
1807 				&obj->sysfs_attr.attr,
1808 				RAS_FS_NAME);
1809 	obj->attr_inuse = 0;
1810 	put_obj(obj);
1811 
1812 	return 0;
1813 }
1814 
1815 static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev)
1816 {
1817 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1818 	struct ras_manager *obj, *tmp;
1819 
1820 	list_for_each_entry_safe(obj, tmp, &con->head, node) {
1821 		amdgpu_ras_sysfs_remove(adev, &obj->head);
1822 	}
1823 
1824 	if (amdgpu_bad_page_threshold != 0)
1825 		amdgpu_ras_sysfs_remove_bad_page_node(adev);
1826 
1827 	amdgpu_ras_sysfs_remove_dev_attr_node(adev);
1828 
1829 	return 0;
1830 }
1831 /* sysfs end */
1832 
1833 /**
1834  * DOC: AMDGPU RAS Reboot Behavior for Unrecoverable Errors
1835  *
1836  * Normally when there is an uncorrectable error, the driver will reset
1837  * the GPU to recover.  However, in the event of an unrecoverable error,
1838  * the driver provides an interface to reboot the system automatically
1839  * in that event.
1840  *
1841  * The following file in debugfs provides that interface:
1842  * /sys/kernel/debug/dri/[0/1/2...]/ras/auto_reboot
1843  *
1844  * Usage:
1845  *
1846  * .. code-block:: bash
1847  *
1848  *	echo true > .../ras/auto_reboot
1849  *
1850  */
1851 /* debugfs begin */
1852 static struct dentry *amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
1853 {
1854 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1855 	struct amdgpu_ras_eeprom_control *eeprom = &con->eeprom_control;
1856 	struct drm_minor  *minor = adev_to_drm(adev)->primary;
1857 	struct dentry     *dir;
1858 
1859 	dir = debugfs_create_dir(RAS_FS_NAME, minor->debugfs_root);
1860 	debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, dir, adev,
1861 			    &amdgpu_ras_debugfs_ctrl_ops);
1862 	debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, dir, adev,
1863 			    &amdgpu_ras_debugfs_eeprom_ops);
1864 	debugfs_create_u32("bad_page_cnt_threshold", 0444, dir,
1865 			   &con->bad_page_cnt_threshold);
1866 	debugfs_create_u32("ras_num_recs", 0444, dir, &eeprom->ras_num_recs);
1867 	debugfs_create_x32("ras_hw_enabled", 0444, dir, &adev->ras_hw_enabled);
1868 	debugfs_create_x32("ras_enabled", 0444, dir, &adev->ras_enabled);
1869 	debugfs_create_file("ras_eeprom_size", S_IRUGO, dir, adev,
1870 			    &amdgpu_ras_debugfs_eeprom_size_ops);
1871 	con->de_ras_eeprom_table = debugfs_create_file("ras_eeprom_table",
1872 						       S_IRUGO, dir, adev,
1873 						       &amdgpu_ras_debugfs_eeprom_table_ops);
1874 	amdgpu_ras_debugfs_set_ret_size(&con->eeprom_control);
1875 
1876 	/*
1877 	 * After one uncorrectable error happens, usually GPU recovery will
1878 	 * be scheduled. But due to the known problem in GPU recovery failing
1879 	 * to bring GPU back, below interface provides one direct way to
1880 	 * user to reboot system automatically in such case within
1881 	 * ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine
1882 	 * will never be called.
1883 	 */
1884 	debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, dir, &con->reboot);
1885 
1886 	/*
1887 	 * User could set this not to clean up hardware's error count register
1888 	 * of RAS IPs during ras recovery.
1889 	 */
1890 	debugfs_create_bool("disable_ras_err_cnt_harvest", 0644, dir,
1891 			    &con->disable_ras_err_cnt_harvest);
1892 	return dir;
1893 }
1894 
1895 static void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
1896 				      struct ras_fs_if *head,
1897 				      struct dentry *dir)
1898 {
1899 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);
1900 
1901 	if (!obj || !dir)
1902 		return;
1903 
1904 	get_obj(obj);
1905 
1906 	memcpy(obj->fs_data.debugfs_name,
1907 			head->debugfs_name,
1908 			sizeof(obj->fs_data.debugfs_name));
1909 
1910 	debugfs_create_file(obj->fs_data.debugfs_name, S_IWUGO | S_IRUGO, dir,
1911 			    obj, &amdgpu_ras_debugfs_ops);
1912 }
1913 
1914 void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
1915 {
1916 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1917 	struct dentry *dir;
1918 	struct ras_manager *obj;
1919 	struct ras_fs_if fs_info;
1920 
1921 	/*
1922 	 * it won't be called in resume path, no need to check
1923 	 * suspend and gpu reset status
1924 	 */
1925 	if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con)
1926 		return;
1927 
1928 	dir = amdgpu_ras_debugfs_create_ctrl_node(adev);
1929 
1930 	list_for_each_entry(obj, &con->head, node) {
1931 		if (amdgpu_ras_is_supported(adev, obj->head.block) &&
1932 			(obj->attr_inuse == 1)) {
1933 			sprintf(fs_info.debugfs_name, "%s_err_inject",
1934 					get_ras_block_str(&obj->head));
1935 			fs_info.head = obj->head;
1936 			amdgpu_ras_debugfs_create(adev, &fs_info, dir);
1937 		}
1938 	}
1939 
1940 	if (amdgpu_aca_is_enabled(adev))
1941 		amdgpu_aca_smu_debugfs_init(adev, dir);
1942 	else
1943 		amdgpu_mca_smu_debugfs_init(adev, dir);
1944 }
1945 
1946 /* debugfs end */
1947 
1948 /* ras fs */
1949 static BIN_ATTR(gpu_vram_bad_pages, S_IRUGO,
1950 		amdgpu_ras_sysfs_badpages_read, NULL, 0);
1951 static DEVICE_ATTR(features, S_IRUGO,
1952 		amdgpu_ras_sysfs_features_read, NULL);
1953 static DEVICE_ATTR(version, 0444,
1954 		amdgpu_ras_sysfs_version_show, NULL);
1955 static DEVICE_ATTR(schema, 0444,
1956 		amdgpu_ras_sysfs_schema_show, NULL);
1957 static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
1958 {
1959 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1960 	struct attribute_group group = {
1961 		.name = RAS_FS_NAME,
1962 	};
1963 	struct attribute *attrs[] = {
1964 		&con->features_attr.attr,
1965 		&con->version_attr.attr,
1966 		&con->schema_attr.attr,
1967 		NULL
1968 	};
1969 	struct bin_attribute *bin_attrs[] = {
1970 		NULL,
1971 		NULL,
1972 	};
1973 	int r;
1974 
1975 	group.attrs = attrs;
1976 
1977 	/* add features entry */
1978 	con->features_attr = dev_attr_features;
1979 	sysfs_attr_init(attrs[0]);
1980 
1981 	/* add version entry */
1982 	con->version_attr = dev_attr_version;
1983 	sysfs_attr_init(attrs[1]);
1984 
1985 	/* add schema entry */
1986 	con->schema_attr = dev_attr_schema;
1987 	sysfs_attr_init(attrs[2]);
1988 
1989 	if (amdgpu_bad_page_threshold != 0) {
1990 		/* add bad_page_features entry */
1991 		bin_attr_gpu_vram_bad_pages.private = NULL;
1992 		con->badpages_attr = bin_attr_gpu_vram_bad_pages;
1993 		bin_attrs[0] = &con->badpages_attr;
1994 		group.bin_attrs = bin_attrs;
1995 		sysfs_bin_attr_init(bin_attrs[0]);
1996 	}
1997 
1998 	r = sysfs_create_group(&adev->dev->kobj, &group);
1999 	if (r)
2000 		dev_err(adev->dev, "Failed to create RAS sysfs group!");
2001 
2002 	return 0;
2003 }
2004 
2005 static int amdgpu_ras_fs_fini(struct amdgpu_device *adev)
2006 {
2007 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2008 	struct ras_manager *con_obj, *ip_obj, *tmp;
2009 
2010 	if (IS_ENABLED(CONFIG_DEBUG_FS)) {
2011 		list_for_each_entry_safe(con_obj, tmp, &con->head, node) {
2012 			ip_obj = amdgpu_ras_find_obj(adev, &con_obj->head);
2013 			if (ip_obj)
2014 				put_obj(ip_obj);
2015 		}
2016 	}
2017 
2018 	amdgpu_ras_sysfs_remove_all(adev);
2019 	return 0;
2020 }
2021 /* ras fs end */
2022 
2023 /* ih begin */
2024 
2025 /* For the hardware that cannot enable bif ring for both ras_controller_irq
2026  * and ras_err_evnet_athub_irq ih cookies, the driver has to poll status
2027  * register to check whether the interrupt is triggered or not, and properly
2028  * ack the interrupt if it is there
2029  */
2030 void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev)
2031 {
2032 	/* Fatal error events are handled on host side */
2033 	if (amdgpu_sriov_vf(adev))
2034 		return;
2035 
2036 	if (adev->nbio.ras &&
2037 	    adev->nbio.ras->handle_ras_controller_intr_no_bifring)
2038 		adev->nbio.ras->handle_ras_controller_intr_no_bifring(adev);
2039 
2040 	if (adev->nbio.ras &&
2041 	    adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring)
2042 		adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring(adev);
2043 }
2044 
2045 static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *obj,
2046 				struct amdgpu_iv_entry *entry)
2047 {
2048 	bool poison_stat = false;
2049 	struct amdgpu_device *adev = obj->adev;
2050 	struct amdgpu_ras_block_object *block_obj =
2051 		amdgpu_ras_get_ras_block(adev, obj->head.block, 0);
2052 
2053 	if (!block_obj)
2054 		return;
2055 
2056 	/* both query_poison_status and handle_poison_consumption are optional,
2057 	 * but at least one of them should be implemented if we need poison
2058 	 * consumption handler
2059 	 */
2060 	if (block_obj->hw_ops && block_obj->hw_ops->query_poison_status) {
2061 		poison_stat = block_obj->hw_ops->query_poison_status(adev);
2062 		if (!poison_stat) {
2063 			/* Not poison consumption interrupt, no need to handle it */
2064 			dev_info(adev->dev, "No RAS poison status in %s poison IH.\n",
2065 					block_obj->ras_comm.name);
2066 
2067 			return;
2068 		}
2069 	}
2070 
2071 	amdgpu_umc_poison_handler(adev, obj->head.block, 0);
2072 
2073 	if (block_obj->hw_ops && block_obj->hw_ops->handle_poison_consumption)
2074 		poison_stat = block_obj->hw_ops->handle_poison_consumption(adev);
2075 
2076 	/* gpu reset is fallback for failed and default cases */
2077 	if (poison_stat) {
2078 		dev_info(adev->dev, "GPU reset for %s RAS poison consumption is issued!\n",
2079 				block_obj->ras_comm.name);
2080 		amdgpu_ras_reset_gpu(adev);
2081 	} else {
2082 		amdgpu_gfx_poison_consumption_handler(adev, entry);
2083 	}
2084 }
2085 
2086 static void amdgpu_ras_interrupt_poison_creation_handler(struct ras_manager *obj,
2087 				struct amdgpu_iv_entry *entry)
2088 {
2089 	dev_info(obj->adev->dev,
2090 		"Poison is created\n");
2091 
2092 	if (amdgpu_ip_version(obj->adev, UMC_HWIP, 0) >= IP_VERSION(12, 0, 0)) {
2093 		struct amdgpu_ras *con = amdgpu_ras_get_context(obj->adev);
2094 
2095 		amdgpu_ras_put_poison_req(obj->adev,
2096 			AMDGPU_RAS_BLOCK__UMC, 0, NULL, NULL, false);
2097 
2098 		atomic_inc(&con->page_retirement_req_cnt);
2099 
2100 		wake_up(&con->page_retirement_wq);
2101 	}
2102 }
2103 
2104 static void amdgpu_ras_interrupt_umc_handler(struct ras_manager *obj,
2105 				struct amdgpu_iv_entry *entry)
2106 {
2107 	struct ras_ih_data *data = &obj->ih_data;
2108 	struct ras_err_data err_data;
2109 	int ret;
2110 
2111 	if (!data->cb)
2112 		return;
2113 
2114 	ret = amdgpu_ras_error_data_init(&err_data);
2115 	if (ret)
2116 		return;
2117 
2118 	/* Let IP handle its data, maybe we need get the output
2119 	 * from the callback to update the error type/count, etc
2120 	 */
2121 	ret = data->cb(obj->adev, &err_data, entry);
2122 	/* ue will trigger an interrupt, and in that case
2123 	 * we need do a reset to recovery the whole system.
2124 	 * But leave IP do that recovery, here we just dispatch
2125 	 * the error.
2126 	 */
2127 	if (ret == AMDGPU_RAS_SUCCESS) {
2128 		/* these counts could be left as 0 if
2129 		 * some blocks do not count error number
2130 		 */
2131 		obj->err_data.ue_count += err_data.ue_count;
2132 		obj->err_data.ce_count += err_data.ce_count;
2133 		obj->err_data.de_count += err_data.de_count;
2134 	}
2135 
2136 	amdgpu_ras_error_data_fini(&err_data);
2137 }
2138 
2139 static void amdgpu_ras_interrupt_handler(struct ras_manager *obj)
2140 {
2141 	struct ras_ih_data *data = &obj->ih_data;
2142 	struct amdgpu_iv_entry entry;
2143 
2144 	while (data->rptr != data->wptr) {
2145 		rmb();
2146 		memcpy(&entry, &data->ring[data->rptr],
2147 				data->element_size);
2148 
2149 		wmb();
2150 		data->rptr = (data->aligned_element_size +
2151 				data->rptr) % data->ring_size;
2152 
2153 		if (amdgpu_ras_is_poison_mode_supported(obj->adev)) {
2154 			if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
2155 				amdgpu_ras_interrupt_poison_creation_handler(obj, &entry);
2156 			else
2157 				amdgpu_ras_interrupt_poison_consumption_handler(obj, &entry);
2158 		} else {
2159 			if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
2160 				amdgpu_ras_interrupt_umc_handler(obj, &entry);
2161 			else
2162 				dev_warn(obj->adev->dev,
2163 					"No RAS interrupt handler for non-UMC block with poison disabled.\n");
2164 		}
2165 	}
2166 }
2167 
2168 static void amdgpu_ras_interrupt_process_handler(struct work_struct *work)
2169 {
2170 	struct ras_ih_data *data =
2171 		container_of(work, struct ras_ih_data, ih_work);
2172 	struct ras_manager *obj =
2173 		container_of(data, struct ras_manager, ih_data);
2174 
2175 	amdgpu_ras_interrupt_handler(obj);
2176 }
2177 
2178 int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
2179 		struct ras_dispatch_if *info)
2180 {
2181 	struct ras_manager *obj;
2182 	struct ras_ih_data *data;
2183 
2184 	obj = amdgpu_ras_find_obj(adev, &info->head);
2185 	if (!obj)
2186 		return -EINVAL;
2187 
2188 	data = &obj->ih_data;
2189 
2190 	if (data->inuse == 0)
2191 		return 0;
2192 
2193 	/* Might be overflow... */
2194 	memcpy(&data->ring[data->wptr], info->entry,
2195 			data->element_size);
2196 
2197 	wmb();
2198 	data->wptr = (data->aligned_element_size +
2199 			data->wptr) % data->ring_size;
2200 
2201 	schedule_work(&data->ih_work);
2202 
2203 	return 0;
2204 }
2205 
2206 int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev,
2207 		struct ras_common_if *head)
2208 {
2209 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
2210 	struct ras_ih_data *data;
2211 
2212 	if (!obj)
2213 		return -EINVAL;
2214 
2215 	data = &obj->ih_data;
2216 	if (data->inuse == 0)
2217 		return 0;
2218 
2219 	cancel_work_sync(&data->ih_work);
2220 
2221 	kfree(data->ring);
2222 	memset(data, 0, sizeof(*data));
2223 	put_obj(obj);
2224 
2225 	return 0;
2226 }
2227 
2228 int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev,
2229 		struct ras_common_if *head)
2230 {
2231 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
2232 	struct ras_ih_data *data;
2233 	struct amdgpu_ras_block_object *ras_obj;
2234 
2235 	if (!obj) {
2236 		/* in case we registe the IH before enable ras feature */
2237 		obj = amdgpu_ras_create_obj(adev, head);
2238 		if (!obj)
2239 			return -EINVAL;
2240 	} else
2241 		get_obj(obj);
2242 
2243 	ras_obj = container_of(head, struct amdgpu_ras_block_object, ras_comm);
2244 
2245 	data = &obj->ih_data;
2246 	/* add the callback.etc */
2247 	*data = (struct ras_ih_data) {
2248 		.inuse = 0,
2249 		.cb = ras_obj->ras_cb,
2250 		.element_size = sizeof(struct amdgpu_iv_entry),
2251 		.rptr = 0,
2252 		.wptr = 0,
2253 	};
2254 
2255 	INIT_WORK(&data->ih_work, amdgpu_ras_interrupt_process_handler);
2256 
2257 	data->aligned_element_size = ALIGN(data->element_size, 8);
2258 	/* the ring can store 64 iv entries. */
2259 	data->ring_size = 64 * data->aligned_element_size;
2260 	data->ring = kmalloc(data->ring_size, GFP_KERNEL);
2261 	if (!data->ring) {
2262 		put_obj(obj);
2263 		return -ENOMEM;
2264 	}
2265 
2266 	/* IH is ready */
2267 	data->inuse = 1;
2268 
2269 	return 0;
2270 }
2271 
2272 static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev)
2273 {
2274 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2275 	struct ras_manager *obj, *tmp;
2276 
2277 	list_for_each_entry_safe(obj, tmp, &con->head, node) {
2278 		amdgpu_ras_interrupt_remove_handler(adev, &obj->head);
2279 	}
2280 
2281 	return 0;
2282 }
2283 /* ih end */
2284 
2285 /* traversal all IPs except NBIO to query error counter */
2286 static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev)
2287 {
2288 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2289 	struct ras_manager *obj;
2290 
2291 	if (!adev->ras_enabled || !con)
2292 		return;
2293 
2294 	list_for_each_entry(obj, &con->head, node) {
2295 		struct ras_query_if info = {
2296 			.head = obj->head,
2297 		};
2298 
2299 		/*
2300 		 * PCIE_BIF IP has one different isr by ras controller
2301 		 * interrupt, the specific ras counter query will be
2302 		 * done in that isr. So skip such block from common
2303 		 * sync flood interrupt isr calling.
2304 		 */
2305 		if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF)
2306 			continue;
2307 
2308 		/*
2309 		 * this is a workaround for aldebaran, skip send msg to
2310 		 * smu to get ecc_info table due to smu handle get ecc
2311 		 * info table failed temporarily.
2312 		 * should be removed until smu fix handle ecc_info table.
2313 		 */
2314 		if ((info.head.block == AMDGPU_RAS_BLOCK__UMC) &&
2315 		    (amdgpu_ip_version(adev, MP1_HWIP, 0) ==
2316 		     IP_VERSION(13, 0, 2)))
2317 			continue;
2318 
2319 		amdgpu_ras_query_error_status(adev, &info);
2320 
2321 		if (amdgpu_ip_version(adev, MP0_HWIP, 0) !=
2322 			    IP_VERSION(11, 0, 2) &&
2323 		    amdgpu_ip_version(adev, MP0_HWIP, 0) !=
2324 			    IP_VERSION(11, 0, 4) &&
2325 		    amdgpu_ip_version(adev, MP0_HWIP, 0) !=
2326 			    IP_VERSION(13, 0, 0)) {
2327 			if (amdgpu_ras_reset_error_status(adev, info.head.block))
2328 				dev_warn(adev->dev, "Failed to reset error counter and error status");
2329 		}
2330 	}
2331 }
2332 
2333 /* Parse RdRspStatus and WrRspStatus */
2334 static void amdgpu_ras_error_status_query(struct amdgpu_device *adev,
2335 					  struct ras_query_if *info)
2336 {
2337 	struct amdgpu_ras_block_object *block_obj;
2338 	/*
2339 	 * Only two block need to query read/write
2340 	 * RspStatus at current state
2341 	 */
2342 	if ((info->head.block != AMDGPU_RAS_BLOCK__GFX) &&
2343 		(info->head.block != AMDGPU_RAS_BLOCK__MMHUB))
2344 		return;
2345 
2346 	block_obj = amdgpu_ras_get_ras_block(adev,
2347 					info->head.block,
2348 					info->head.sub_block_index);
2349 
2350 	if (!block_obj || !block_obj->hw_ops) {
2351 		dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
2352 			     get_ras_block_str(&info->head));
2353 		return;
2354 	}
2355 
2356 	if (block_obj->hw_ops->query_ras_error_status)
2357 		block_obj->hw_ops->query_ras_error_status(adev);
2358 
2359 }
2360 
2361 static void amdgpu_ras_query_err_status(struct amdgpu_device *adev)
2362 {
2363 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2364 	struct ras_manager *obj;
2365 
2366 	if (!adev->ras_enabled || !con)
2367 		return;
2368 
2369 	list_for_each_entry(obj, &con->head, node) {
2370 		struct ras_query_if info = {
2371 			.head = obj->head,
2372 		};
2373 
2374 		amdgpu_ras_error_status_query(adev, &info);
2375 	}
2376 }
2377 
2378 /* recovery begin */
2379 
2380 /* return 0 on success.
2381  * caller need free bps.
2382  */
2383 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
2384 		struct ras_badpage **bps, unsigned int *count)
2385 {
2386 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2387 	struct ras_err_handler_data *data;
2388 	int i = 0;
2389 	int ret = 0, status;
2390 
2391 	if (!con || !con->eh_data || !bps || !count)
2392 		return -EINVAL;
2393 
2394 	mutex_lock(&con->recovery_lock);
2395 	data = con->eh_data;
2396 	if (!data || data->count == 0) {
2397 		*bps = NULL;
2398 		ret = -EINVAL;
2399 		goto out;
2400 	}
2401 
2402 	*bps = kmalloc(sizeof(struct ras_badpage) * data->count, GFP_KERNEL);
2403 	if (!*bps) {
2404 		ret = -ENOMEM;
2405 		goto out;
2406 	}
2407 
2408 	for (; i < data->count; i++) {
2409 		(*bps)[i] = (struct ras_badpage){
2410 			.bp = data->bps[i].retired_page,
2411 			.size = AMDGPU_GPU_PAGE_SIZE,
2412 			.flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
2413 		};
2414 		status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr,
2415 				data->bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT);
2416 		if (status == -EBUSY)
2417 			(*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
2418 		else if (status == -ENOENT)
2419 			(*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
2420 	}
2421 
2422 	*count = data->count;
2423 out:
2424 	mutex_unlock(&con->recovery_lock);
2425 	return ret;
2426 }
2427 
2428 static void amdgpu_ras_set_fed_all(struct amdgpu_device *adev,
2429 				   struct amdgpu_hive_info *hive, bool status)
2430 {
2431 	struct amdgpu_device *tmp_adev;
2432 
2433 	if (hive) {
2434 		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
2435 			amdgpu_ras_set_fed(tmp_adev, status);
2436 	} else {
2437 		amdgpu_ras_set_fed(adev, status);
2438 	}
2439 }
2440 
2441 static void amdgpu_ras_do_recovery(struct work_struct *work)
2442 {
2443 	struct amdgpu_ras *ras =
2444 		container_of(work, struct amdgpu_ras, recovery_work);
2445 	struct amdgpu_device *remote_adev = NULL;
2446 	struct amdgpu_device *adev = ras->adev;
2447 	struct list_head device_list, *device_list_handle =  NULL;
2448 	struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2449 
2450 	if (hive) {
2451 		atomic_set(&hive->ras_recovery, 1);
2452 
2453 		/* If any device which is part of the hive received RAS fatal
2454 		 * error interrupt, set fatal error status on all. This
2455 		 * condition will need a recovery, and flag will be cleared
2456 		 * as part of recovery.
2457 		 */
2458 		list_for_each_entry(remote_adev, &hive->device_list,
2459 				    gmc.xgmi.head)
2460 			if (amdgpu_ras_get_fed_status(remote_adev)) {
2461 				amdgpu_ras_set_fed_all(adev, hive, true);
2462 				break;
2463 			}
2464 	}
2465 	if (!ras->disable_ras_err_cnt_harvest) {
2466 
2467 		/* Build list of devices to query RAS related errors */
2468 		if  (hive && adev->gmc.xgmi.num_physical_nodes > 1) {
2469 			device_list_handle = &hive->device_list;
2470 		} else {
2471 			INIT_LIST_HEAD(&device_list);
2472 			list_add_tail(&adev->gmc.xgmi.head, &device_list);
2473 			device_list_handle = &device_list;
2474 		}
2475 
2476 		list_for_each_entry(remote_adev,
2477 				device_list_handle, gmc.xgmi.head) {
2478 			amdgpu_ras_query_err_status(remote_adev);
2479 			amdgpu_ras_log_on_err_counter(remote_adev);
2480 		}
2481 
2482 	}
2483 
2484 	if (amdgpu_device_should_recover_gpu(ras->adev)) {
2485 		struct amdgpu_reset_context reset_context;
2486 		memset(&reset_context, 0, sizeof(reset_context));
2487 
2488 		reset_context.method = AMD_RESET_METHOD_NONE;
2489 		reset_context.reset_req_dev = adev;
2490 
2491 		/* Perform full reset in fatal error mode */
2492 		if (!amdgpu_ras_is_poison_mode_supported(ras->adev))
2493 			set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2494 		else {
2495 			clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2496 
2497 			if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE2_RESET) {
2498 				ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE2_RESET;
2499 				reset_context.method = AMD_RESET_METHOD_MODE2;
2500 			}
2501 
2502 			/* Fatal error occurs in poison mode, mode1 reset is used to
2503 			 * recover gpu.
2504 			 */
2505 			if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET) {
2506 				ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE1_RESET;
2507 				set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2508 
2509 				psp_fatal_error_recovery_quirk(&adev->psp);
2510 			}
2511 		}
2512 
2513 		amdgpu_device_gpu_recover(ras->adev, NULL, &reset_context);
2514 	}
2515 	atomic_set(&ras->in_recovery, 0);
2516 	if (hive) {
2517 		atomic_set(&hive->ras_recovery, 0);
2518 		amdgpu_put_xgmi_hive(hive);
2519 	}
2520 }
2521 
2522 /* alloc/realloc bps array */
2523 static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
2524 		struct ras_err_handler_data *data, int pages)
2525 {
2526 	unsigned int old_space = data->count + data->space_left;
2527 	unsigned int new_space = old_space + pages;
2528 	unsigned int align_space = ALIGN(new_space, 512);
2529 	void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
2530 
2531 	if (!bps) {
2532 		return -ENOMEM;
2533 	}
2534 
2535 	if (data->bps) {
2536 		memcpy(bps, data->bps,
2537 				data->count * sizeof(*data->bps));
2538 		kfree(data->bps);
2539 	}
2540 
2541 	data->bps = bps;
2542 	data->space_left += align_space - old_space;
2543 	return 0;
2544 }
2545 
2546 /* it deal with vram only. */
2547 int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
2548 		struct eeprom_table_record *bps, int pages)
2549 {
2550 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2551 	struct ras_err_handler_data *data;
2552 	int ret = 0;
2553 	uint32_t i;
2554 
2555 	if (!con || !con->eh_data || !bps || pages <= 0)
2556 		return 0;
2557 
2558 	mutex_lock(&con->recovery_lock);
2559 	data = con->eh_data;
2560 	if (!data)
2561 		goto out;
2562 
2563 	for (i = 0; i < pages; i++) {
2564 		if (amdgpu_ras_check_bad_page_unlock(con,
2565 			bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT))
2566 			continue;
2567 
2568 		if (!data->space_left &&
2569 			amdgpu_ras_realloc_eh_data_space(adev, data, 256)) {
2570 			ret = -ENOMEM;
2571 			goto out;
2572 		}
2573 
2574 		amdgpu_ras_reserve_page(adev, bps[i].retired_page);
2575 
2576 		memcpy(&data->bps[data->count], &bps[i], sizeof(*data->bps));
2577 		data->count++;
2578 		data->space_left--;
2579 	}
2580 out:
2581 	mutex_unlock(&con->recovery_lock);
2582 
2583 	return ret;
2584 }
2585 
2586 /*
2587  * write error record array to eeprom, the function should be
2588  * protected by recovery_lock
2589  * new_cnt: new added UE count, excluding reserved bad pages, can be NULL
2590  */
2591 int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
2592 		unsigned long *new_cnt)
2593 {
2594 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2595 	struct ras_err_handler_data *data;
2596 	struct amdgpu_ras_eeprom_control *control;
2597 	int save_count;
2598 
2599 	if (!con || !con->eh_data) {
2600 		if (new_cnt)
2601 			*new_cnt = 0;
2602 
2603 		return 0;
2604 	}
2605 
2606 	mutex_lock(&con->recovery_lock);
2607 	control = &con->eeprom_control;
2608 	data = con->eh_data;
2609 	save_count = data->count - control->ras_num_recs;
2610 	mutex_unlock(&con->recovery_lock);
2611 
2612 	if (new_cnt)
2613 		*new_cnt = save_count / adev->umc.retire_unit;
2614 
2615 	/* only new entries are saved */
2616 	if (save_count > 0) {
2617 		if (amdgpu_ras_eeprom_append(control,
2618 					     &data->bps[control->ras_num_recs],
2619 					     save_count)) {
2620 			dev_err(adev->dev, "Failed to save EEPROM table data!");
2621 			return -EIO;
2622 		}
2623 
2624 		dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count);
2625 	}
2626 
2627 	return 0;
2628 }
2629 
2630 /*
2631  * read error record array in eeprom and reserve enough space for
2632  * storing new bad pages
2633  */
2634 static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
2635 {
2636 	struct amdgpu_ras_eeprom_control *control =
2637 		&adev->psp.ras_context.ras->eeprom_control;
2638 	struct eeprom_table_record *bps;
2639 	int ret;
2640 
2641 	/* no bad page record, skip eeprom access */
2642 	if (control->ras_num_recs == 0 || amdgpu_bad_page_threshold == 0)
2643 		return 0;
2644 
2645 	bps = kcalloc(control->ras_num_recs, sizeof(*bps), GFP_KERNEL);
2646 	if (!bps)
2647 		return -ENOMEM;
2648 
2649 	ret = amdgpu_ras_eeprom_read(control, bps, control->ras_num_recs);
2650 	if (ret)
2651 		dev_err(adev->dev, "Failed to load EEPROM table records!");
2652 	else
2653 		ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs);
2654 
2655 	kfree(bps);
2656 	return ret;
2657 }
2658 
2659 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
2660 				uint64_t addr)
2661 {
2662 	struct ras_err_handler_data *data = con->eh_data;
2663 	int i;
2664 
2665 	addr >>= AMDGPU_GPU_PAGE_SHIFT;
2666 	for (i = 0; i < data->count; i++)
2667 		if (addr == data->bps[i].retired_page)
2668 			return true;
2669 
2670 	return false;
2671 }
2672 
2673 /*
2674  * check if an address belongs to bad page
2675  *
2676  * Note: this check is only for umc block
2677  */
2678 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
2679 				uint64_t addr)
2680 {
2681 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2682 	bool ret = false;
2683 
2684 	if (!con || !con->eh_data)
2685 		return ret;
2686 
2687 	mutex_lock(&con->recovery_lock);
2688 	ret = amdgpu_ras_check_bad_page_unlock(con, addr);
2689 	mutex_unlock(&con->recovery_lock);
2690 	return ret;
2691 }
2692 
2693 static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev,
2694 					  uint32_t max_count)
2695 {
2696 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2697 
2698 	/*
2699 	 * Justification of value bad_page_cnt_threshold in ras structure
2700 	 *
2701 	 * Generally, 0 <= amdgpu_bad_page_threshold <= max record length
2702 	 * in eeprom or amdgpu_bad_page_threshold == -2, introduce two
2703 	 * scenarios accordingly.
2704 	 *
2705 	 * Bad page retirement enablement:
2706 	 *    - If amdgpu_bad_page_threshold = -2,
2707 	 *      bad_page_cnt_threshold = typical value by formula.
2708 	 *
2709 	 *    - When the value from user is 0 < amdgpu_bad_page_threshold <
2710 	 *      max record length in eeprom, use it directly.
2711 	 *
2712 	 * Bad page retirement disablement:
2713 	 *    - If amdgpu_bad_page_threshold = 0, bad page retirement
2714 	 *      functionality is disabled, and bad_page_cnt_threshold will
2715 	 *      take no effect.
2716 	 */
2717 
2718 	if (amdgpu_bad_page_threshold < 0) {
2719 		u64 val = adev->gmc.mc_vram_size;
2720 
2721 		do_div(val, RAS_BAD_PAGE_COVER);
2722 		con->bad_page_cnt_threshold = min(lower_32_bits(val),
2723 						  max_count);
2724 	} else {
2725 		con->bad_page_cnt_threshold = min_t(int, max_count,
2726 						    amdgpu_bad_page_threshold);
2727 	}
2728 }
2729 
2730 int amdgpu_ras_put_poison_req(struct amdgpu_device *adev,
2731 		enum amdgpu_ras_block block, uint16_t pasid,
2732 		pasid_notify pasid_fn, void *data, uint32_t reset)
2733 {
2734 	int ret = 0;
2735 	struct ras_poison_msg poison_msg;
2736 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2737 
2738 	memset(&poison_msg, 0, sizeof(poison_msg));
2739 	poison_msg.block = block;
2740 	poison_msg.pasid = pasid;
2741 	poison_msg.reset = reset;
2742 	poison_msg.pasid_fn = pasid_fn;
2743 	poison_msg.data = data;
2744 
2745 	ret = kfifo_put(&con->poison_fifo, poison_msg);
2746 	if (!ret) {
2747 		dev_err(adev->dev, "Poison message fifo is full!\n");
2748 		return -ENOSPC;
2749 	}
2750 
2751 	return 0;
2752 }
2753 
2754 static int amdgpu_ras_get_poison_req(struct amdgpu_device *adev,
2755 		struct ras_poison_msg *poison_msg)
2756 {
2757 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2758 
2759 	return kfifo_get(&con->poison_fifo, poison_msg);
2760 }
2761 
2762 static void amdgpu_ras_ecc_log_init(struct ras_ecc_log_info *ecc_log)
2763 {
2764 	mutex_init(&ecc_log->lock);
2765 
2766 	/* Set any value as siphash key */
2767 	memset(&ecc_log->ecc_key, 0xad, sizeof(ecc_log->ecc_key));
2768 
2769 	INIT_RADIX_TREE(&ecc_log->de_page_tree, GFP_KERNEL);
2770 	ecc_log->de_updated = false;
2771 }
2772 
2773 static void amdgpu_ras_ecc_log_fini(struct ras_ecc_log_info *ecc_log)
2774 {
2775 	struct radix_tree_iter iter;
2776 	void __rcu **slot;
2777 	struct ras_ecc_err *ecc_err;
2778 
2779 	mutex_lock(&ecc_log->lock);
2780 	radix_tree_for_each_slot(slot, &ecc_log->de_page_tree, &iter, 0) {
2781 		ecc_err = radix_tree_deref_slot(slot);
2782 		kfree(ecc_err->err_pages.pfn);
2783 		kfree(ecc_err);
2784 		radix_tree_iter_delete(&ecc_log->de_page_tree, &iter, slot);
2785 	}
2786 	mutex_unlock(&ecc_log->lock);
2787 
2788 	mutex_destroy(&ecc_log->lock);
2789 	ecc_log->de_updated = false;
2790 }
2791 
2792 static void amdgpu_ras_do_page_retirement(struct work_struct *work)
2793 {
2794 	struct amdgpu_ras *con = container_of(work, struct amdgpu_ras,
2795 					      page_retirement_dwork.work);
2796 	struct amdgpu_device *adev = con->adev;
2797 	struct ras_err_data err_data;
2798 
2799 	if (amdgpu_in_reset(adev) || atomic_read(&con->in_recovery))
2800 		return;
2801 
2802 	amdgpu_ras_error_data_init(&err_data);
2803 
2804 	amdgpu_umc_handle_bad_pages(adev, &err_data);
2805 
2806 	amdgpu_ras_error_data_fini(&err_data);
2807 
2808 	mutex_lock(&con->umc_ecc_log.lock);
2809 	if (radix_tree_tagged(&con->umc_ecc_log.de_page_tree,
2810 				UMC_ECC_NEW_DETECTED_TAG))
2811 		schedule_delayed_work(&con->page_retirement_dwork,
2812 			msecs_to_jiffies(AMDGPU_RAS_RETIRE_PAGE_INTERVAL));
2813 	mutex_unlock(&con->umc_ecc_log.lock);
2814 }
2815 
2816 static void amdgpu_ras_poison_creation_handler(struct amdgpu_device *adev,
2817 				uint32_t timeout_ms)
2818 {
2819 	int ret = 0;
2820 	struct ras_ecc_log_info *ecc_log;
2821 	struct ras_query_if info;
2822 	uint32_t timeout = timeout_ms;
2823 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
2824 
2825 	memset(&info, 0, sizeof(info));
2826 	info.head.block = AMDGPU_RAS_BLOCK__UMC;
2827 
2828 	ecc_log = &ras->umc_ecc_log;
2829 	ecc_log->de_updated = false;
2830 	do {
2831 		ret = amdgpu_ras_query_error_status(adev, &info);
2832 		if (ret) {
2833 			dev_err(adev->dev, "Failed to query ras error! ret:%d\n", ret);
2834 			return;
2835 		}
2836 
2837 		if (timeout && !ecc_log->de_updated) {
2838 			msleep(1);
2839 			timeout--;
2840 		}
2841 	} while (timeout && !ecc_log->de_updated);
2842 
2843 	if (timeout_ms && !timeout) {
2844 		dev_warn(adev->dev, "Can't find deferred error\n");
2845 		return;
2846 	}
2847 
2848 	if (!ret)
2849 		schedule_delayed_work(&ras->page_retirement_dwork, 0);
2850 }
2851 
2852 static int amdgpu_ras_poison_consumption_handler(struct amdgpu_device *adev,
2853 			struct ras_poison_msg *poison_msg)
2854 {
2855 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2856 	uint32_t reset = poison_msg->reset;
2857 	uint16_t pasid = poison_msg->pasid;
2858 
2859 	kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
2860 
2861 	if (poison_msg->pasid_fn)
2862 		poison_msg->pasid_fn(adev, pasid, poison_msg->data);
2863 
2864 	if (reset) {
2865 		flush_delayed_work(&con->page_retirement_dwork);
2866 
2867 		con->gpu_reset_flags |= reset;
2868 		amdgpu_ras_reset_gpu(adev);
2869 	}
2870 
2871 	return 0;
2872 }
2873 
2874 static int amdgpu_ras_page_retirement_thread(void *param)
2875 {
2876 	struct amdgpu_device *adev = (struct amdgpu_device *)param;
2877 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2878 	struct ras_poison_msg poison_msg;
2879 	enum amdgpu_ras_block ras_block;
2880 	bool poison_creation_is_handled = false;
2881 
2882 	while (!kthread_should_stop()) {
2883 
2884 		wait_event_interruptible(con->page_retirement_wq,
2885 				kthread_should_stop() ||
2886 				atomic_read(&con->page_retirement_req_cnt));
2887 
2888 		if (kthread_should_stop())
2889 			break;
2890 
2891 		atomic_dec(&con->page_retirement_req_cnt);
2892 
2893 		if (!amdgpu_ras_get_poison_req(adev, &poison_msg))
2894 			continue;
2895 
2896 		ras_block = poison_msg.block;
2897 
2898 		dev_dbg(adev->dev, "Start processing ras block %s(%d)\n",
2899 				ras_block_str(ras_block), ras_block);
2900 
2901 		if (ras_block == AMDGPU_RAS_BLOCK__UMC) {
2902 			amdgpu_ras_poison_creation_handler(adev,
2903 				MAX_UMC_POISON_POLLING_TIME_ASYNC);
2904 			poison_creation_is_handled = true;
2905 		} else {
2906 			/* poison_creation_is_handled:
2907 			 *   false: no poison creation interrupt, but it has poison
2908 			 *          consumption interrupt.
2909 			 *   true: It has poison creation interrupt at the beginning,
2910 			 *         but it has no poison creation interrupt later.
2911 			 */
2912 			amdgpu_ras_poison_creation_handler(adev,
2913 					poison_creation_is_handled ?
2914 					0 : MAX_UMC_POISON_POLLING_TIME_ASYNC);
2915 
2916 			amdgpu_ras_poison_consumption_handler(adev, &poison_msg);
2917 			poison_creation_is_handled = false;
2918 		}
2919 	}
2920 
2921 	return 0;
2922 }
2923 
2924 int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
2925 {
2926 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2927 	struct ras_err_handler_data **data;
2928 	u32  max_eeprom_records_count = 0;
2929 	bool exc_err_limit = false;
2930 	int ret;
2931 
2932 	if (!con || amdgpu_sriov_vf(adev))
2933 		return 0;
2934 
2935 	/* Allow access to RAS EEPROM via debugfs, when the ASIC
2936 	 * supports RAS and debugfs is enabled, but when
2937 	 * adev->ras_enabled is unset, i.e. when "ras_enable"
2938 	 * module parameter is set to 0.
2939 	 */
2940 	con->adev = adev;
2941 
2942 	if (!adev->ras_enabled)
2943 		return 0;
2944 
2945 	data = &con->eh_data;
2946 	*data = kzalloc(sizeof(**data), GFP_KERNEL);
2947 	if (!*data) {
2948 		ret = -ENOMEM;
2949 		goto out;
2950 	}
2951 
2952 	mutex_init(&con->recovery_lock);
2953 	INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
2954 	atomic_set(&con->in_recovery, 0);
2955 	con->eeprom_control.bad_channel_bitmap = 0;
2956 
2957 	max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count(&con->eeprom_control);
2958 	amdgpu_ras_validate_threshold(adev, max_eeprom_records_count);
2959 
2960 	/* Todo: During test the SMU might fail to read the eeprom through I2C
2961 	 * when the GPU is pending on XGMI reset during probe time
2962 	 * (Mostly after second bus reset), skip it now
2963 	 */
2964 	if (adev->gmc.xgmi.pending_reset)
2965 		return 0;
2966 	ret = amdgpu_ras_eeprom_init(&con->eeprom_control, &exc_err_limit);
2967 	/*
2968 	 * This calling fails when exc_err_limit is true or
2969 	 * ret != 0.
2970 	 */
2971 	if (exc_err_limit || ret)
2972 		goto free;
2973 
2974 	if (con->eeprom_control.ras_num_recs) {
2975 		ret = amdgpu_ras_load_bad_pages(adev);
2976 		if (ret)
2977 			goto free;
2978 
2979 		amdgpu_dpm_send_hbm_bad_pages_num(adev, con->eeprom_control.ras_num_recs);
2980 
2981 		if (con->update_channel_flag == true) {
2982 			amdgpu_dpm_send_hbm_bad_channel_flag(adev, con->eeprom_control.bad_channel_bitmap);
2983 			con->update_channel_flag = false;
2984 		}
2985 	}
2986 
2987 	mutex_init(&con->page_rsv_lock);
2988 	INIT_KFIFO(con->poison_fifo);
2989 	mutex_init(&con->page_retirement_lock);
2990 	init_waitqueue_head(&con->page_retirement_wq);
2991 	atomic_set(&con->page_retirement_req_cnt, 0);
2992 	con->page_retirement_thread =
2993 		kthread_run(amdgpu_ras_page_retirement_thread, adev, "umc_page_retirement");
2994 	if (IS_ERR(con->page_retirement_thread)) {
2995 		con->page_retirement_thread = NULL;
2996 		dev_warn(adev->dev, "Failed to create umc_page_retirement thread!!!\n");
2997 	}
2998 
2999 	INIT_DELAYED_WORK(&con->page_retirement_dwork, amdgpu_ras_do_page_retirement);
3000 	amdgpu_ras_ecc_log_init(&con->umc_ecc_log);
3001 #ifdef CONFIG_X86_MCE_AMD
3002 	if ((adev->asic_type == CHIP_ALDEBARAN) &&
3003 	    (adev->gmc.xgmi.connected_to_cpu))
3004 		amdgpu_register_bad_pages_mca_notifier(adev);
3005 #endif
3006 	return 0;
3007 
3008 free:
3009 	kfree((*data)->bps);
3010 	kfree(*data);
3011 	con->eh_data = NULL;
3012 out:
3013 	dev_warn(adev->dev, "Failed to initialize ras recovery! (%d)\n", ret);
3014 
3015 	/*
3016 	 * Except error threshold exceeding case, other failure cases in this
3017 	 * function would not fail amdgpu driver init.
3018 	 */
3019 	if (!exc_err_limit)
3020 		ret = 0;
3021 	else
3022 		ret = -EINVAL;
3023 
3024 	return ret;
3025 }
3026 
3027 static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
3028 {
3029 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3030 	struct ras_err_handler_data *data = con->eh_data;
3031 
3032 	/* recovery_init failed to init it, fini is useless */
3033 	if (!data)
3034 		return 0;
3035 
3036 	if (con->page_retirement_thread)
3037 		kthread_stop(con->page_retirement_thread);
3038 
3039 	atomic_set(&con->page_retirement_req_cnt, 0);
3040 
3041 	mutex_destroy(&con->page_rsv_lock);
3042 
3043 	cancel_work_sync(&con->recovery_work);
3044 
3045 	cancel_delayed_work_sync(&con->page_retirement_dwork);
3046 
3047 	amdgpu_ras_ecc_log_fini(&con->umc_ecc_log);
3048 
3049 	mutex_lock(&con->recovery_lock);
3050 	con->eh_data = NULL;
3051 	kfree(data->bps);
3052 	kfree(data);
3053 	mutex_unlock(&con->recovery_lock);
3054 
3055 	return 0;
3056 }
3057 /* recovery end */
3058 
3059 static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
3060 {
3061 	if (amdgpu_sriov_vf(adev)) {
3062 		switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
3063 		case IP_VERSION(13, 0, 2):
3064 		case IP_VERSION(13, 0, 6):
3065 		case IP_VERSION(13, 0, 14):
3066 			return true;
3067 		default:
3068 			return false;
3069 		}
3070 	}
3071 
3072 	if (adev->asic_type == CHIP_IP_DISCOVERY) {
3073 		switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
3074 		case IP_VERSION(13, 0, 0):
3075 		case IP_VERSION(13, 0, 6):
3076 		case IP_VERSION(13, 0, 10):
3077 		case IP_VERSION(13, 0, 14):
3078 			return true;
3079 		default:
3080 			return false;
3081 		}
3082 	}
3083 
3084 	return adev->asic_type == CHIP_VEGA10 ||
3085 		adev->asic_type == CHIP_VEGA20 ||
3086 		adev->asic_type == CHIP_ARCTURUS ||
3087 		adev->asic_type == CHIP_ALDEBARAN ||
3088 		adev->asic_type == CHIP_SIENNA_CICHLID;
3089 }
3090 
3091 /*
3092  * this is workaround for vega20 workstation sku,
3093  * force enable gfx ras, ignore vbios gfx ras flag
3094  * due to GC EDC can not write
3095  */
3096 static void amdgpu_ras_get_quirks(struct amdgpu_device *adev)
3097 {
3098 	struct atom_context *ctx = adev->mode_info.atom_context;
3099 
3100 	if (!ctx)
3101 		return;
3102 
3103 	if (strnstr(ctx->vbios_pn, "D16406",
3104 		    sizeof(ctx->vbios_pn)) ||
3105 		strnstr(ctx->vbios_pn, "D36002",
3106 			sizeof(ctx->vbios_pn)))
3107 		adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX);
3108 }
3109 
3110 /* Query ras capablity via atomfirmware interface */
3111 static void amdgpu_ras_query_ras_capablity_from_vbios(struct amdgpu_device *adev)
3112 {
3113 	/* mem_ecc cap */
3114 	if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
3115 		dev_info(adev->dev, "MEM ECC is active.\n");
3116 		adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__UMC |
3117 					 1 << AMDGPU_RAS_BLOCK__DF);
3118 	} else {
3119 		dev_info(adev->dev, "MEM ECC is not presented.\n");
3120 	}
3121 
3122 	/* sram_ecc cap */
3123 	if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
3124 		dev_info(adev->dev, "SRAM ECC is active.\n");
3125 		if (!amdgpu_sriov_vf(adev))
3126 			adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
3127 						  1 << AMDGPU_RAS_BLOCK__DF);
3128 		else
3129 			adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__PCIE_BIF |
3130 						 1 << AMDGPU_RAS_BLOCK__SDMA |
3131 						 1 << AMDGPU_RAS_BLOCK__GFX);
3132 
3133 		/*
3134 		 * VCN/JPEG RAS can be supported on both bare metal and
3135 		 * SRIOV environment
3136 		 */
3137 		if (amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(2, 6, 0) ||
3138 		    amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 0) ||
3139 		    amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 3))
3140 			adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN |
3141 						 1 << AMDGPU_RAS_BLOCK__JPEG);
3142 		else
3143 			adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN |
3144 						  1 << AMDGPU_RAS_BLOCK__JPEG);
3145 
3146 		/*
3147 		 * XGMI RAS is not supported if xgmi num physical nodes
3148 		 * is zero
3149 		 */
3150 		if (!adev->gmc.xgmi.num_physical_nodes)
3151 			adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__XGMI_WAFL);
3152 	} else {
3153 		dev_info(adev->dev, "SRAM ECC is not presented.\n");
3154 	}
3155 }
3156 
3157 /* Query poison mode from umc/df IP callbacks */
3158 static void amdgpu_ras_query_poison_mode(struct amdgpu_device *adev)
3159 {
3160 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3161 	bool df_poison, umc_poison;
3162 
3163 	/* poison setting is useless on SRIOV guest */
3164 	if (amdgpu_sriov_vf(adev) || !con)
3165 		return;
3166 
3167 	/* Init poison supported flag, the default value is false */
3168 	if (adev->gmc.xgmi.connected_to_cpu ||
3169 	    adev->gmc.is_app_apu) {
3170 		/* enabled by default when GPU is connected to CPU */
3171 		con->poison_supported = true;
3172 	} else if (adev->df.funcs &&
3173 	    adev->df.funcs->query_ras_poison_mode &&
3174 	    adev->umc.ras &&
3175 	    adev->umc.ras->query_ras_poison_mode) {
3176 		df_poison =
3177 			adev->df.funcs->query_ras_poison_mode(adev);
3178 		umc_poison =
3179 			adev->umc.ras->query_ras_poison_mode(adev);
3180 
3181 		/* Only poison is set in both DF and UMC, we can support it */
3182 		if (df_poison && umc_poison)
3183 			con->poison_supported = true;
3184 		else if (df_poison != umc_poison)
3185 			dev_warn(adev->dev,
3186 				"Poison setting is inconsistent in DF/UMC(%d:%d)!\n",
3187 				df_poison, umc_poison);
3188 	}
3189 }
3190 
3191 /*
3192  * check hardware's ras ability which will be saved in hw_supported.
3193  * if hardware does not support ras, we can skip some ras initializtion and
3194  * forbid some ras operations from IP.
3195  * if software itself, say boot parameter, limit the ras ability. We still
3196  * need allow IP do some limited operations, like disable. In such case,
3197  * we have to initialize ras as normal. but need check if operation is
3198  * allowed or not in each function.
3199  */
3200 static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
3201 {
3202 	adev->ras_hw_enabled = adev->ras_enabled = 0;
3203 
3204 	if (!amdgpu_ras_asic_supported(adev))
3205 		return;
3206 
3207 	/* query ras capability from psp */
3208 	if (amdgpu_psp_get_ras_capability(&adev->psp))
3209 		goto init_ras_enabled_flag;
3210 
3211 	/* query ras capablity from bios */
3212 	if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
3213 		amdgpu_ras_query_ras_capablity_from_vbios(adev);
3214 	} else {
3215 		/* driver only manages a few IP blocks RAS feature
3216 		 * when GPU is connected cpu through XGMI */
3217 		adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX |
3218 					   1 << AMDGPU_RAS_BLOCK__SDMA |
3219 					   1 << AMDGPU_RAS_BLOCK__MMHUB);
3220 	}
3221 
3222 	/* apply asic specific settings (vega20 only for now) */
3223 	amdgpu_ras_get_quirks(adev);
3224 
3225 	/* query poison mode from umc/df ip callback */
3226 	amdgpu_ras_query_poison_mode(adev);
3227 
3228 init_ras_enabled_flag:
3229 	/* hw_supported needs to be aligned with RAS block mask. */
3230 	adev->ras_hw_enabled &= AMDGPU_RAS_BLOCK_MASK;
3231 
3232 	adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 :
3233 		adev->ras_hw_enabled & amdgpu_ras_mask;
3234 
3235 	/* aca is disabled by default */
3236 	adev->aca.is_enabled = false;
3237 }
3238 
3239 static void amdgpu_ras_counte_dw(struct work_struct *work)
3240 {
3241 	struct amdgpu_ras *con = container_of(work, struct amdgpu_ras,
3242 					      ras_counte_delay_work.work);
3243 	struct amdgpu_device *adev = con->adev;
3244 	struct drm_device *dev = adev_to_drm(adev);
3245 	unsigned long ce_count, ue_count;
3246 	int res;
3247 
3248 	res = pm_runtime_get_sync(dev->dev);
3249 	if (res < 0)
3250 		goto Out;
3251 
3252 	/* Cache new values.
3253 	 */
3254 	if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, NULL) == 0) {
3255 		atomic_set(&con->ras_ce_count, ce_count);
3256 		atomic_set(&con->ras_ue_count, ue_count);
3257 	}
3258 
3259 	pm_runtime_mark_last_busy(dev->dev);
3260 Out:
3261 	pm_runtime_put_autosuspend(dev->dev);
3262 }
3263 
3264 static int amdgpu_get_ras_schema(struct amdgpu_device *adev)
3265 {
3266 	return  amdgpu_ras_is_poison_mode_supported(adev) ? AMDGPU_RAS_ERROR__POISON : 0 |
3267 			AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE |
3268 			AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE |
3269 			AMDGPU_RAS_ERROR__PARITY;
3270 }
3271 
3272 static void ras_event_mgr_init(struct ras_event_manager *mgr)
3273 {
3274 	int i;
3275 
3276 	for (i = 0; i < ARRAY_SIZE(mgr->seqnos); i++)
3277 		atomic64_set(&mgr->seqnos[i], 0);
3278 }
3279 
3280 static void amdgpu_ras_event_mgr_init(struct amdgpu_device *adev)
3281 {
3282 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
3283 	struct amdgpu_hive_info *hive;
3284 
3285 	if (!ras)
3286 		return;
3287 
3288 	hive = amdgpu_get_xgmi_hive(adev);
3289 	ras->event_mgr = hive ? &hive->event_mgr : &ras->__event_mgr;
3290 
3291 	/* init event manager with node 0 on xgmi system */
3292 	if (!amdgpu_in_reset(adev)) {
3293 		if (!hive || adev->gmc.xgmi.node_id == 0)
3294 			ras_event_mgr_init(ras->event_mgr);
3295 	}
3296 
3297 	if (hive)
3298 		amdgpu_put_xgmi_hive(hive);
3299 }
3300 
3301 static void amdgpu_ras_init_reserved_vram_size(struct amdgpu_device *adev)
3302 {
3303 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3304 
3305 	if (!con || (adev->flags & AMD_IS_APU))
3306 		return;
3307 
3308 	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
3309 	case IP_VERSION(13, 0, 2):
3310 	case IP_VERSION(13, 0, 6):
3311 	case IP_VERSION(13, 0, 14):
3312 		con->reserved_pages_in_bytes = AMDGPU_RAS_RESERVED_VRAM_SIZE;
3313 		break;
3314 	default:
3315 		break;
3316 	}
3317 }
3318 
3319 int amdgpu_ras_init(struct amdgpu_device *adev)
3320 {
3321 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3322 	int r;
3323 
3324 	if (con)
3325 		return 0;
3326 
3327 	con = kzalloc(sizeof(*con) +
3328 			sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT +
3329 			sizeof(struct ras_manager) * AMDGPU_RAS_MCA_BLOCK_COUNT,
3330 			GFP_KERNEL);
3331 	if (!con)
3332 		return -ENOMEM;
3333 
3334 	con->adev = adev;
3335 	INIT_DELAYED_WORK(&con->ras_counte_delay_work, amdgpu_ras_counte_dw);
3336 	atomic_set(&con->ras_ce_count, 0);
3337 	atomic_set(&con->ras_ue_count, 0);
3338 
3339 	con->objs = (struct ras_manager *)(con + 1);
3340 
3341 	amdgpu_ras_set_context(adev, con);
3342 
3343 	amdgpu_ras_check_supported(adev);
3344 
3345 	if (!adev->ras_enabled || adev->asic_type == CHIP_VEGA10) {
3346 		/* set gfx block ras context feature for VEGA20 Gaming
3347 		 * send ras disable cmd to ras ta during ras late init.
3348 		 */
3349 		if (!adev->ras_enabled && adev->asic_type == CHIP_VEGA20) {
3350 			con->features |= BIT(AMDGPU_RAS_BLOCK__GFX);
3351 
3352 			return 0;
3353 		}
3354 
3355 		r = 0;
3356 		goto release_con;
3357 	}
3358 
3359 	con->update_channel_flag = false;
3360 	con->features = 0;
3361 	con->schema = 0;
3362 	INIT_LIST_HEAD(&con->head);
3363 	/* Might need get this flag from vbios. */
3364 	con->flags = RAS_DEFAULT_FLAGS;
3365 
3366 	/* initialize nbio ras function ahead of any other
3367 	 * ras functions so hardware fatal error interrupt
3368 	 * can be enabled as early as possible */
3369 	switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
3370 	case IP_VERSION(7, 4, 0):
3371 	case IP_VERSION(7, 4, 1):
3372 	case IP_VERSION(7, 4, 4):
3373 		if (!adev->gmc.xgmi.connected_to_cpu)
3374 			adev->nbio.ras = &nbio_v7_4_ras;
3375 		break;
3376 	case IP_VERSION(4, 3, 0):
3377 		if (adev->ras_hw_enabled & (1 << AMDGPU_RAS_BLOCK__DF))
3378 			/* unlike other generation of nbio ras,
3379 			 * nbio v4_3 only support fatal error interrupt
3380 			 * to inform software that DF is freezed due to
3381 			 * system fatal error event. driver should not
3382 			 * enable nbio ras in such case. Instead,
3383 			 * check DF RAS */
3384 			adev->nbio.ras = &nbio_v4_3_ras;
3385 		break;
3386 	case IP_VERSION(7, 9, 0):
3387 		if (!adev->gmc.is_app_apu)
3388 			adev->nbio.ras = &nbio_v7_9_ras;
3389 		break;
3390 	default:
3391 		/* nbio ras is not available */
3392 		break;
3393 	}
3394 
3395 	/* nbio ras block needs to be enabled ahead of other ras blocks
3396 	 * to handle fatal error */
3397 	r = amdgpu_nbio_ras_sw_init(adev);
3398 	if (r)
3399 		return r;
3400 
3401 	if (adev->nbio.ras &&
3402 	    adev->nbio.ras->init_ras_controller_interrupt) {
3403 		r = adev->nbio.ras->init_ras_controller_interrupt(adev);
3404 		if (r)
3405 			goto release_con;
3406 	}
3407 
3408 	if (adev->nbio.ras &&
3409 	    adev->nbio.ras->init_ras_err_event_athub_interrupt) {
3410 		r = adev->nbio.ras->init_ras_err_event_athub_interrupt(adev);
3411 		if (r)
3412 			goto release_con;
3413 	}
3414 
3415 	/* Packed socket_id to ras feature mask bits[31:29] */
3416 	if (adev->smuio.funcs &&
3417 	    adev->smuio.funcs->get_socket_id)
3418 		con->features |= ((adev->smuio.funcs->get_socket_id(adev)) <<
3419 					AMDGPU_RAS_FEATURES_SOCKETID_SHIFT);
3420 
3421 	/* Get RAS schema for particular SOC */
3422 	con->schema = amdgpu_get_ras_schema(adev);
3423 
3424 	amdgpu_ras_init_reserved_vram_size(adev);
3425 
3426 	if (amdgpu_ras_fs_init(adev)) {
3427 		r = -EINVAL;
3428 		goto release_con;
3429 	}
3430 
3431 	dev_info(adev->dev, "RAS INFO: ras initialized successfully, "
3432 		 "hardware ability[%x] ras_mask[%x]\n",
3433 		 adev->ras_hw_enabled, adev->ras_enabled);
3434 
3435 	return 0;
3436 release_con:
3437 	amdgpu_ras_set_context(adev, NULL);
3438 	kfree(con);
3439 
3440 	return r;
3441 }
3442 
3443 int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev)
3444 {
3445 	if (adev->gmc.xgmi.connected_to_cpu ||
3446 	    adev->gmc.is_app_apu)
3447 		return 1;
3448 	return 0;
3449 }
3450 
3451 static int amdgpu_persistent_edc_harvesting(struct amdgpu_device *adev,
3452 					struct ras_common_if *ras_block)
3453 {
3454 	struct ras_query_if info = {
3455 		.head = *ras_block,
3456 	};
3457 
3458 	if (!amdgpu_persistent_edc_harvesting_supported(adev))
3459 		return 0;
3460 
3461 	if (amdgpu_ras_query_error_status(adev, &info) != 0)
3462 		DRM_WARN("RAS init harvest failure");
3463 
3464 	if (amdgpu_ras_reset_error_status(adev, ras_block->block) != 0)
3465 		DRM_WARN("RAS init harvest reset failure");
3466 
3467 	return 0;
3468 }
3469 
3470 bool amdgpu_ras_is_poison_mode_supported(struct amdgpu_device *adev)
3471 {
3472        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3473 
3474        if (!con)
3475                return false;
3476 
3477        return con->poison_supported;
3478 }
3479 
3480 /* helper function to handle common stuff in ip late init phase */
3481 int amdgpu_ras_block_late_init(struct amdgpu_device *adev,
3482 			 struct ras_common_if *ras_block)
3483 {
3484 	struct amdgpu_ras_block_object *ras_obj = NULL;
3485 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3486 	struct ras_query_if *query_info;
3487 	unsigned long ue_count, ce_count;
3488 	int r;
3489 
3490 	/* disable RAS feature per IP block if it is not supported */
3491 	if (!amdgpu_ras_is_supported(adev, ras_block->block)) {
3492 		amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
3493 		return 0;
3494 	}
3495 
3496 	r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1);
3497 	if (r) {
3498 		if (adev->in_suspend || amdgpu_in_reset(adev)) {
3499 			/* in resume phase, if fail to enable ras,
3500 			 * clean up all ras fs nodes, and disable ras */
3501 			goto cleanup;
3502 		} else
3503 			return r;
3504 	}
3505 
3506 	/* check for errors on warm reset edc persisant supported ASIC */
3507 	amdgpu_persistent_edc_harvesting(adev, ras_block);
3508 
3509 	/* in resume phase, no need to create ras fs node */
3510 	if (adev->in_suspend || amdgpu_in_reset(adev))
3511 		return 0;
3512 
3513 	ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
3514 	if (ras_obj->ras_cb || (ras_obj->hw_ops &&
3515 	    (ras_obj->hw_ops->query_poison_status ||
3516 	    ras_obj->hw_ops->handle_poison_consumption))) {
3517 		r = amdgpu_ras_interrupt_add_handler(adev, ras_block);
3518 		if (r)
3519 			goto cleanup;
3520 	}
3521 
3522 	if (ras_obj->hw_ops &&
3523 	    (ras_obj->hw_ops->query_ras_error_count ||
3524 	     ras_obj->hw_ops->query_ras_error_status)) {
3525 		r = amdgpu_ras_sysfs_create(adev, ras_block);
3526 		if (r)
3527 			goto interrupt;
3528 
3529 		/* Those are the cached values at init.
3530 		 */
3531 		query_info = kzalloc(sizeof(*query_info), GFP_KERNEL);
3532 		if (!query_info)
3533 			return -ENOMEM;
3534 		memcpy(&query_info->head, ras_block, sizeof(struct ras_common_if));
3535 
3536 		if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, query_info) == 0) {
3537 			atomic_set(&con->ras_ce_count, ce_count);
3538 			atomic_set(&con->ras_ue_count, ue_count);
3539 		}
3540 
3541 		kfree(query_info);
3542 	}
3543 
3544 	return 0;
3545 
3546 interrupt:
3547 	if (ras_obj->ras_cb)
3548 		amdgpu_ras_interrupt_remove_handler(adev, ras_block);
3549 cleanup:
3550 	amdgpu_ras_feature_enable(adev, ras_block, 0);
3551 	return r;
3552 }
3553 
3554 static int amdgpu_ras_block_late_init_default(struct amdgpu_device *adev,
3555 			 struct ras_common_if *ras_block)
3556 {
3557 	return amdgpu_ras_block_late_init(adev, ras_block);
3558 }
3559 
3560 /* helper function to remove ras fs node and interrupt handler */
3561 void amdgpu_ras_block_late_fini(struct amdgpu_device *adev,
3562 			  struct ras_common_if *ras_block)
3563 {
3564 	struct amdgpu_ras_block_object *ras_obj;
3565 	if (!ras_block)
3566 		return;
3567 
3568 	amdgpu_ras_sysfs_remove(adev, ras_block);
3569 
3570 	ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
3571 	if (ras_obj->ras_cb)
3572 		amdgpu_ras_interrupt_remove_handler(adev, ras_block);
3573 }
3574 
3575 static void amdgpu_ras_block_late_fini_default(struct amdgpu_device *adev,
3576 			  struct ras_common_if *ras_block)
3577 {
3578 	return amdgpu_ras_block_late_fini(adev, ras_block);
3579 }
3580 
3581 /* do some init work after IP late init as dependence.
3582  * and it runs in resume/gpu reset/booting up cases.
3583  */
3584 void amdgpu_ras_resume(struct amdgpu_device *adev)
3585 {
3586 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3587 	struct ras_manager *obj, *tmp;
3588 
3589 	if (!adev->ras_enabled || !con) {
3590 		/* clean ras context for VEGA20 Gaming after send ras disable cmd */
3591 		amdgpu_release_ras_context(adev);
3592 
3593 		return;
3594 	}
3595 
3596 	if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
3597 		/* Set up all other IPs which are not implemented. There is a
3598 		 * tricky thing that IP's actual ras error type should be
3599 		 * MULTI_UNCORRECTABLE, but as driver does not handle it, so
3600 		 * ERROR_NONE make sense anyway.
3601 		 */
3602 		amdgpu_ras_enable_all_features(adev, 1);
3603 
3604 		/* We enable ras on all hw_supported block, but as boot
3605 		 * parameter might disable some of them and one or more IP has
3606 		 * not implemented yet. So we disable them on behalf.
3607 		 */
3608 		list_for_each_entry_safe(obj, tmp, &con->head, node) {
3609 			if (!amdgpu_ras_is_supported(adev, obj->head.block)) {
3610 				amdgpu_ras_feature_enable(adev, &obj->head, 0);
3611 				/* there should be no any reference. */
3612 				WARN_ON(alive_obj(obj));
3613 			}
3614 		}
3615 	}
3616 }
3617 
3618 void amdgpu_ras_suspend(struct amdgpu_device *adev)
3619 {
3620 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3621 
3622 	if (!adev->ras_enabled || !con)
3623 		return;
3624 
3625 	amdgpu_ras_disable_all_features(adev, 0);
3626 	/* Make sure all ras objects are disabled. */
3627 	if (AMDGPU_RAS_GET_FEATURES(con->features))
3628 		amdgpu_ras_disable_all_features(adev, 1);
3629 }
3630 
3631 int amdgpu_ras_late_init(struct amdgpu_device *adev)
3632 {
3633 	struct amdgpu_ras_block_list *node, *tmp;
3634 	struct amdgpu_ras_block_object *obj;
3635 	int r;
3636 
3637 	amdgpu_ras_event_mgr_init(adev);
3638 
3639 	if (amdgpu_aca_is_enabled(adev)) {
3640 		if (!amdgpu_in_reset(adev)) {
3641 			r = amdgpu_aca_init(adev);
3642 			if (r)
3643 				return r;
3644 		}
3645 
3646 		if (!amdgpu_sriov_vf(adev))
3647 			amdgpu_ras_set_aca_debug_mode(adev, false);
3648 	} else {
3649 		if (amdgpu_in_reset(adev))
3650 			r = amdgpu_mca_reset(adev);
3651 		else
3652 			r = amdgpu_mca_init(adev);
3653 		if (r)
3654 			return r;
3655 
3656 		if (!amdgpu_sriov_vf(adev))
3657 			amdgpu_ras_set_mca_debug_mode(adev, false);
3658 	}
3659 
3660 	/* Guest side doesn't need init ras feature */
3661 	if (amdgpu_sriov_vf(adev))
3662 		return 0;
3663 
3664 	list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
3665 		obj = node->ras_obj;
3666 		if (!obj) {
3667 			dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
3668 			continue;
3669 		}
3670 
3671 		if (!amdgpu_ras_is_supported(adev, obj->ras_comm.block))
3672 			continue;
3673 
3674 		if (obj->ras_late_init) {
3675 			r = obj->ras_late_init(adev, &obj->ras_comm);
3676 			if (r) {
3677 				dev_err(adev->dev, "%s failed to execute ras_late_init! ret:%d\n",
3678 					obj->ras_comm.name, r);
3679 				return r;
3680 			}
3681 		} else
3682 			amdgpu_ras_block_late_init_default(adev, &obj->ras_comm);
3683 	}
3684 
3685 	return 0;
3686 }
3687 
3688 /* do some fini work before IP fini as dependence */
3689 int amdgpu_ras_pre_fini(struct amdgpu_device *adev)
3690 {
3691 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3692 
3693 	if (!adev->ras_enabled || !con)
3694 		return 0;
3695 
3696 
3697 	/* Need disable ras on all IPs here before ip [hw/sw]fini */
3698 	if (AMDGPU_RAS_GET_FEATURES(con->features))
3699 		amdgpu_ras_disable_all_features(adev, 0);
3700 	amdgpu_ras_recovery_fini(adev);
3701 	return 0;
3702 }
3703 
3704 int amdgpu_ras_fini(struct amdgpu_device *adev)
3705 {
3706 	struct amdgpu_ras_block_list *ras_node, *tmp;
3707 	struct amdgpu_ras_block_object *obj = NULL;
3708 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3709 
3710 	if (!adev->ras_enabled || !con)
3711 		return 0;
3712 
3713 	list_for_each_entry_safe(ras_node, tmp, &adev->ras_list, node) {
3714 		if (ras_node->ras_obj) {
3715 			obj = ras_node->ras_obj;
3716 			if (amdgpu_ras_is_supported(adev, obj->ras_comm.block) &&
3717 			    obj->ras_fini)
3718 				obj->ras_fini(adev, &obj->ras_comm);
3719 			else
3720 				amdgpu_ras_block_late_fini_default(adev, &obj->ras_comm);
3721 		}
3722 
3723 		/* Clear ras blocks from ras_list and free ras block list node */
3724 		list_del(&ras_node->node);
3725 		kfree(ras_node);
3726 	}
3727 
3728 	amdgpu_ras_fs_fini(adev);
3729 	amdgpu_ras_interrupt_remove_all(adev);
3730 
3731 	if (amdgpu_aca_is_enabled(adev))
3732 		amdgpu_aca_fini(adev);
3733 	else
3734 		amdgpu_mca_fini(adev);
3735 
3736 	WARN(AMDGPU_RAS_GET_FEATURES(con->features), "Feature mask is not cleared");
3737 
3738 	if (AMDGPU_RAS_GET_FEATURES(con->features))
3739 		amdgpu_ras_disable_all_features(adev, 0);
3740 
3741 	cancel_delayed_work_sync(&con->ras_counte_delay_work);
3742 
3743 	amdgpu_ras_set_context(adev, NULL);
3744 	kfree(con);
3745 
3746 	return 0;
3747 }
3748 
3749 bool amdgpu_ras_get_fed_status(struct amdgpu_device *adev)
3750 {
3751 	struct amdgpu_ras *ras;
3752 
3753 	ras = amdgpu_ras_get_context(adev);
3754 	if (!ras)
3755 		return false;
3756 
3757 	return atomic_read(&ras->fed);
3758 }
3759 
3760 void amdgpu_ras_set_fed(struct amdgpu_device *adev, bool status)
3761 {
3762 	struct amdgpu_ras *ras;
3763 
3764 	ras = amdgpu_ras_get_context(adev);
3765 	if (ras)
3766 		atomic_set(&ras->fed, !!status);
3767 }
3768 
3769 bool amdgpu_ras_event_id_is_valid(struct amdgpu_device *adev, u64 id)
3770 {
3771 	return !(id & BIT_ULL(63));
3772 }
3773 
3774 u64 amdgpu_ras_acquire_event_id(struct amdgpu_device *adev, enum ras_event_type type)
3775 {
3776 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
3777 	u64 id;
3778 
3779 	switch (type) {
3780 	case RAS_EVENT_TYPE_ISR:
3781 		id = (u64)atomic64_read(&ras->event_mgr->seqnos[type]);
3782 		break;
3783 	case RAS_EVENT_TYPE_INVALID:
3784 	default:
3785 		id = BIT_ULL(63) | 0ULL;
3786 		break;
3787 	}
3788 
3789 	return id;
3790 }
3791 
3792 void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
3793 {
3794 	if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
3795 		struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
3796 		u64 event_id = (u64)atomic64_inc_return(&ras->event_mgr->seqnos[RAS_EVENT_TYPE_ISR]);
3797 
3798 		RAS_EVENT_LOG(adev, event_id, "uncorrectable hardware error"
3799 			      "(ERREVENT_ATHUB_INTERRUPT) detected!\n");
3800 
3801 		amdgpu_ras_set_fed(adev, true);
3802 		ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET;
3803 		amdgpu_ras_reset_gpu(adev);
3804 	}
3805 }
3806 
3807 bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev)
3808 {
3809 	if (adev->asic_type == CHIP_VEGA20 &&
3810 	    adev->pm.fw_version <= 0x283400) {
3811 		return !(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) &&
3812 				amdgpu_ras_intr_triggered();
3813 	}
3814 
3815 	return false;
3816 }
3817 
3818 void amdgpu_release_ras_context(struct amdgpu_device *adev)
3819 {
3820 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3821 
3822 	if (!con)
3823 		return;
3824 
3825 	if (!adev->ras_enabled && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)) {
3826 		con->features &= ~BIT(AMDGPU_RAS_BLOCK__GFX);
3827 		amdgpu_ras_set_context(adev, NULL);
3828 		kfree(con);
3829 	}
3830 }
3831 
3832 #ifdef CONFIG_X86_MCE_AMD
3833 static struct amdgpu_device *find_adev(uint32_t node_id)
3834 {
3835 	int i;
3836 	struct amdgpu_device *adev = NULL;
3837 
3838 	for (i = 0; i < mce_adev_list.num_gpu; i++) {
3839 		adev = mce_adev_list.devs[i];
3840 
3841 		if (adev && adev->gmc.xgmi.connected_to_cpu &&
3842 		    adev->gmc.xgmi.physical_node_id == node_id)
3843 			break;
3844 		adev = NULL;
3845 	}
3846 
3847 	return adev;
3848 }
3849 
3850 #define GET_MCA_IPID_GPUID(m)	(((m) >> 44) & 0xF)
3851 #define GET_UMC_INST(m)		(((m) >> 21) & 0x7)
3852 #define GET_CHAN_INDEX(m)	((((m) >> 12) & 0x3) | (((m) >> 18) & 0x4))
3853 #define GPU_ID_OFFSET		8
3854 
3855 static int amdgpu_bad_page_notifier(struct notifier_block *nb,
3856 				    unsigned long val, void *data)
3857 {
3858 	struct mce *m = (struct mce *)data;
3859 	struct amdgpu_device *adev = NULL;
3860 	uint32_t gpu_id = 0;
3861 	uint32_t umc_inst = 0, ch_inst = 0;
3862 
3863 	/*
3864 	 * If the error was generated in UMC_V2, which belongs to GPU UMCs,
3865 	 * and error occurred in DramECC (Extended error code = 0) then only
3866 	 * process the error, else bail out.
3867 	 */
3868 	if (!m || !((smca_get_bank_type(m->extcpu, m->bank) == SMCA_UMC_V2) &&
3869 		    (XEC(m->status, 0x3f) == 0x0)))
3870 		return NOTIFY_DONE;
3871 
3872 	/*
3873 	 * If it is correctable error, return.
3874 	 */
3875 	if (mce_is_correctable(m))
3876 		return NOTIFY_OK;
3877 
3878 	/*
3879 	 * GPU Id is offset by GPU_ID_OFFSET in MCA_IPID_UMC register.
3880 	 */
3881 	gpu_id = GET_MCA_IPID_GPUID(m->ipid) - GPU_ID_OFFSET;
3882 
3883 	adev = find_adev(gpu_id);
3884 	if (!adev) {
3885 		DRM_WARN("%s: Unable to find adev for gpu_id: %d\n", __func__,
3886 								gpu_id);
3887 		return NOTIFY_DONE;
3888 	}
3889 
3890 	/*
3891 	 * If it is uncorrectable error, then find out UMC instance and
3892 	 * channel index.
3893 	 */
3894 	umc_inst = GET_UMC_INST(m->ipid);
3895 	ch_inst = GET_CHAN_INDEX(m->ipid);
3896 
3897 	dev_info(adev->dev, "Uncorrectable error detected in UMC inst: %d, chan_idx: %d",
3898 			     umc_inst, ch_inst);
3899 
3900 	if (!amdgpu_umc_page_retirement_mca(adev, m->addr, ch_inst, umc_inst))
3901 		return NOTIFY_OK;
3902 	else
3903 		return NOTIFY_DONE;
3904 }
3905 
3906 static struct notifier_block amdgpu_bad_page_nb = {
3907 	.notifier_call  = amdgpu_bad_page_notifier,
3908 	.priority       = MCE_PRIO_UC,
3909 };
3910 
3911 static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev)
3912 {
3913 	/*
3914 	 * Add the adev to the mce_adev_list.
3915 	 * During mode2 reset, amdgpu device is temporarily
3916 	 * removed from the mgpu_info list which can cause
3917 	 * page retirement to fail.
3918 	 * Use this list instead of mgpu_info to find the amdgpu
3919 	 * device on which the UMC error was reported.
3920 	 */
3921 	mce_adev_list.devs[mce_adev_list.num_gpu++] = adev;
3922 
3923 	/*
3924 	 * Register the x86 notifier only once
3925 	 * with MCE subsystem.
3926 	 */
3927 	if (notifier_registered == false) {
3928 		mce_register_decode_chain(&amdgpu_bad_page_nb);
3929 		notifier_registered = true;
3930 	}
3931 }
3932 #endif
3933 
3934 struct amdgpu_ras *amdgpu_ras_get_context(struct amdgpu_device *adev)
3935 {
3936 	if (!adev)
3937 		return NULL;
3938 
3939 	return adev->psp.ras_context.ras;
3940 }
3941 
3942 int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_con)
3943 {
3944 	if (!adev)
3945 		return -EINVAL;
3946 
3947 	adev->psp.ras_context.ras = ras_con;
3948 	return 0;
3949 }
3950 
3951 /* check if ras is supported on block, say, sdma, gfx */
3952 int amdgpu_ras_is_supported(struct amdgpu_device *adev,
3953 		unsigned int block)
3954 {
3955 	int ret = 0;
3956 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
3957 
3958 	if (block >= AMDGPU_RAS_BLOCK_COUNT)
3959 		return 0;
3960 
3961 	ret = ras && (adev->ras_enabled & (1 << block));
3962 
3963 	/* For the special asic with mem ecc enabled but sram ecc
3964 	 * not enabled, even if the ras block is not supported on
3965 	 * .ras_enabled, if the asic supports poison mode and the
3966 	 * ras block has ras configuration, it can be considered
3967 	 * that the ras block supports ras function.
3968 	 */
3969 	if (!ret &&
3970 	    (block == AMDGPU_RAS_BLOCK__GFX ||
3971 	     block == AMDGPU_RAS_BLOCK__SDMA ||
3972 	     block == AMDGPU_RAS_BLOCK__VCN ||
3973 	     block == AMDGPU_RAS_BLOCK__JPEG) &&
3974 		(amdgpu_ras_mask & (1 << block)) &&
3975 	    amdgpu_ras_is_poison_mode_supported(adev) &&
3976 	    amdgpu_ras_get_ras_block(adev, block, 0))
3977 		ret = 1;
3978 
3979 	return ret;
3980 }
3981 
3982 int amdgpu_ras_reset_gpu(struct amdgpu_device *adev)
3983 {
3984 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
3985 
3986 	if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0)
3987 		amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work);
3988 	return 0;
3989 }
3990 
3991 int amdgpu_ras_set_mca_debug_mode(struct amdgpu_device *adev, bool enable)
3992 {
3993 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3994 	int ret = 0;
3995 
3996 	if (con) {
3997 		ret = amdgpu_mca_smu_set_debug_mode(adev, enable);
3998 		if (!ret)
3999 			con->is_aca_debug_mode = enable;
4000 	}
4001 
4002 	return ret;
4003 }
4004 
4005 int amdgpu_ras_set_aca_debug_mode(struct amdgpu_device *adev, bool enable)
4006 {
4007 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4008 	int ret = 0;
4009 
4010 	if (con) {
4011 		if (amdgpu_aca_is_enabled(adev))
4012 			ret = amdgpu_aca_smu_set_debug_mode(adev, enable);
4013 		else
4014 			ret = amdgpu_mca_smu_set_debug_mode(adev, enable);
4015 		if (!ret)
4016 			con->is_aca_debug_mode = enable;
4017 	}
4018 
4019 	return ret;
4020 }
4021 
4022 bool amdgpu_ras_get_aca_debug_mode(struct amdgpu_device *adev)
4023 {
4024 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4025 	const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
4026 	const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
4027 
4028 	if (!con)
4029 		return false;
4030 
4031 	if ((amdgpu_aca_is_enabled(adev) && smu_funcs && smu_funcs->set_debug_mode) ||
4032 	    (!amdgpu_aca_is_enabled(adev) && mca_funcs && mca_funcs->mca_set_debug_mode))
4033 		return con->is_aca_debug_mode;
4034 	else
4035 		return true;
4036 }
4037 
4038 bool amdgpu_ras_get_error_query_mode(struct amdgpu_device *adev,
4039 				     unsigned int *error_query_mode)
4040 {
4041 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4042 	const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
4043 	const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
4044 
4045 	if (!con) {
4046 		*error_query_mode = AMDGPU_RAS_INVALID_ERROR_QUERY;
4047 		return false;
4048 	}
4049 
4050 	if ((smu_funcs && smu_funcs->set_debug_mode) || (mca_funcs && mca_funcs->mca_set_debug_mode))
4051 		*error_query_mode =
4052 			(con->is_aca_debug_mode) ? AMDGPU_RAS_DIRECT_ERROR_QUERY : AMDGPU_RAS_FIRMWARE_ERROR_QUERY;
4053 	else
4054 		*error_query_mode = AMDGPU_RAS_DIRECT_ERROR_QUERY;
4055 
4056 	return true;
4057 }
4058 
4059 /* Register each ip ras block into amdgpu ras */
4060 int amdgpu_ras_register_ras_block(struct amdgpu_device *adev,
4061 		struct amdgpu_ras_block_object *ras_block_obj)
4062 {
4063 	struct amdgpu_ras_block_list *ras_node;
4064 	if (!adev || !ras_block_obj)
4065 		return -EINVAL;
4066 
4067 	ras_node = kzalloc(sizeof(*ras_node), GFP_KERNEL);
4068 	if (!ras_node)
4069 		return -ENOMEM;
4070 
4071 	INIT_LIST_HEAD(&ras_node->node);
4072 	ras_node->ras_obj = ras_block_obj;
4073 	list_add_tail(&ras_node->node, &adev->ras_list);
4074 
4075 	return 0;
4076 }
4077 
4078 void amdgpu_ras_get_error_type_name(uint32_t err_type, char *err_type_name)
4079 {
4080 	if (!err_type_name)
4081 		return;
4082 
4083 	switch (err_type) {
4084 	case AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE:
4085 		sprintf(err_type_name, "correctable");
4086 		break;
4087 	case AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE:
4088 		sprintf(err_type_name, "uncorrectable");
4089 		break;
4090 	default:
4091 		sprintf(err_type_name, "unknown");
4092 		break;
4093 	}
4094 }
4095 
4096 bool amdgpu_ras_inst_get_memory_id_field(struct amdgpu_device *adev,
4097 					 const struct amdgpu_ras_err_status_reg_entry *reg_entry,
4098 					 uint32_t instance,
4099 					 uint32_t *memory_id)
4100 {
4101 	uint32_t err_status_lo_data, err_status_lo_offset;
4102 
4103 	if (!reg_entry)
4104 		return false;
4105 
4106 	err_status_lo_offset =
4107 		AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance,
4108 					    reg_entry->seg_lo, reg_entry->reg_lo);
4109 	err_status_lo_data = RREG32(err_status_lo_offset);
4110 
4111 	if ((reg_entry->flags & AMDGPU_RAS_ERR_STATUS_VALID) &&
4112 	    !REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, ERR_STATUS_VALID_FLAG))
4113 		return false;
4114 
4115 	*memory_id = REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, MEMORY_ID);
4116 
4117 	return true;
4118 }
4119 
4120 bool amdgpu_ras_inst_get_err_cnt_field(struct amdgpu_device *adev,
4121 				       const struct amdgpu_ras_err_status_reg_entry *reg_entry,
4122 				       uint32_t instance,
4123 				       unsigned long *err_cnt)
4124 {
4125 	uint32_t err_status_hi_data, err_status_hi_offset;
4126 
4127 	if (!reg_entry)
4128 		return false;
4129 
4130 	err_status_hi_offset =
4131 		AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance,
4132 					    reg_entry->seg_hi, reg_entry->reg_hi);
4133 	err_status_hi_data = RREG32(err_status_hi_offset);
4134 
4135 	if ((reg_entry->flags & AMDGPU_RAS_ERR_INFO_VALID) &&
4136 	    !REG_GET_FIELD(err_status_hi_data, ERR_STATUS_HI, ERR_INFO_VALID_FLAG))
4137 		/* keep the check here in case we need to refer to the result later */
4138 		dev_dbg(adev->dev, "Invalid err_info field\n");
4139 
4140 	/* read err count */
4141 	*err_cnt = REG_GET_FIELD(err_status_hi_data, ERR_STATUS, ERR_CNT);
4142 
4143 	return true;
4144 }
4145 
4146 void amdgpu_ras_inst_query_ras_error_count(struct amdgpu_device *adev,
4147 					   const struct amdgpu_ras_err_status_reg_entry *reg_list,
4148 					   uint32_t reg_list_size,
4149 					   const struct amdgpu_ras_memory_id_entry *mem_list,
4150 					   uint32_t mem_list_size,
4151 					   uint32_t instance,
4152 					   uint32_t err_type,
4153 					   unsigned long *err_count)
4154 {
4155 	uint32_t memory_id;
4156 	unsigned long err_cnt;
4157 	char err_type_name[16];
4158 	uint32_t i, j;
4159 
4160 	for (i = 0; i < reg_list_size; i++) {
4161 		/* query memory_id from err_status_lo */
4162 		if (!amdgpu_ras_inst_get_memory_id_field(adev, &reg_list[i],
4163 							 instance, &memory_id))
4164 			continue;
4165 
4166 		/* query err_cnt from err_status_hi */
4167 		if (!amdgpu_ras_inst_get_err_cnt_field(adev, &reg_list[i],
4168 						       instance, &err_cnt) ||
4169 		    !err_cnt)
4170 			continue;
4171 
4172 		*err_count += err_cnt;
4173 
4174 		/* log the errors */
4175 		amdgpu_ras_get_error_type_name(err_type, err_type_name);
4176 		if (!mem_list) {
4177 			/* memory_list is not supported */
4178 			dev_info(adev->dev,
4179 				 "%ld %s hardware errors detected in %s, instance: %d, memory_id: %d\n",
4180 				 err_cnt, err_type_name,
4181 				 reg_list[i].block_name,
4182 				 instance, memory_id);
4183 		} else {
4184 			for (j = 0; j < mem_list_size; j++) {
4185 				if (memory_id == mem_list[j].memory_id) {
4186 					dev_info(adev->dev,
4187 						 "%ld %s hardware errors detected in %s, instance: %d, memory block: %s\n",
4188 						 err_cnt, err_type_name,
4189 						 reg_list[i].block_name,
4190 						 instance, mem_list[j].name);
4191 					break;
4192 				}
4193 			}
4194 		}
4195 	}
4196 }
4197 
4198 void amdgpu_ras_inst_reset_ras_error_count(struct amdgpu_device *adev,
4199 					   const struct amdgpu_ras_err_status_reg_entry *reg_list,
4200 					   uint32_t reg_list_size,
4201 					   uint32_t instance)
4202 {
4203 	uint32_t err_status_lo_offset, err_status_hi_offset;
4204 	uint32_t i;
4205 
4206 	for (i = 0; i < reg_list_size; i++) {
4207 		err_status_lo_offset =
4208 			AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance,
4209 						    reg_list[i].seg_lo, reg_list[i].reg_lo);
4210 		err_status_hi_offset =
4211 			AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance,
4212 						    reg_list[i].seg_hi, reg_list[i].reg_hi);
4213 		WREG32(err_status_lo_offset, 0);
4214 		WREG32(err_status_hi_offset, 0);
4215 	}
4216 }
4217 
4218 int amdgpu_ras_error_data_init(struct ras_err_data *err_data)
4219 {
4220 	memset(err_data, 0, sizeof(*err_data));
4221 
4222 	INIT_LIST_HEAD(&err_data->err_node_list);
4223 
4224 	return 0;
4225 }
4226 
4227 static void amdgpu_ras_error_node_release(struct ras_err_node *err_node)
4228 {
4229 	if (!err_node)
4230 		return;
4231 
4232 	list_del(&err_node->node);
4233 	kvfree(err_node);
4234 }
4235 
4236 void amdgpu_ras_error_data_fini(struct ras_err_data *err_data)
4237 {
4238 	struct ras_err_node *err_node, *tmp;
4239 
4240 	list_for_each_entry_safe(err_node, tmp, &err_data->err_node_list, node)
4241 		amdgpu_ras_error_node_release(err_node);
4242 }
4243 
4244 static struct ras_err_node *amdgpu_ras_error_find_node_by_id(struct ras_err_data *err_data,
4245 							     struct amdgpu_smuio_mcm_config_info *mcm_info)
4246 {
4247 	struct ras_err_node *err_node;
4248 	struct amdgpu_smuio_mcm_config_info *ref_id;
4249 
4250 	if (!err_data || !mcm_info)
4251 		return NULL;
4252 
4253 	for_each_ras_error(err_node, err_data) {
4254 		ref_id = &err_node->err_info.mcm_info;
4255 
4256 		if (mcm_info->socket_id == ref_id->socket_id &&
4257 		    mcm_info->die_id == ref_id->die_id)
4258 			return err_node;
4259 	}
4260 
4261 	return NULL;
4262 }
4263 
4264 static struct ras_err_node *amdgpu_ras_error_node_new(void)
4265 {
4266 	struct ras_err_node *err_node;
4267 
4268 	err_node = kvzalloc(sizeof(*err_node), GFP_KERNEL);
4269 	if (!err_node)
4270 		return NULL;
4271 
4272 	INIT_LIST_HEAD(&err_node->node);
4273 
4274 	return err_node;
4275 }
4276 
4277 static int ras_err_info_cmp(void *priv, const struct list_head *a, const struct list_head *b)
4278 {
4279 	struct ras_err_node *nodea = container_of(a, struct ras_err_node, node);
4280 	struct ras_err_node *nodeb = container_of(b, struct ras_err_node, node);
4281 	struct amdgpu_smuio_mcm_config_info *infoa = &nodea->err_info.mcm_info;
4282 	struct amdgpu_smuio_mcm_config_info *infob = &nodeb->err_info.mcm_info;
4283 
4284 	if (unlikely(infoa->socket_id != infob->socket_id))
4285 		return infoa->socket_id - infob->socket_id;
4286 	else
4287 		return infoa->die_id - infob->die_id;
4288 
4289 	return 0;
4290 }
4291 
4292 static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_data,
4293 				struct amdgpu_smuio_mcm_config_info *mcm_info)
4294 {
4295 	struct ras_err_node *err_node;
4296 
4297 	err_node = amdgpu_ras_error_find_node_by_id(err_data, mcm_info);
4298 	if (err_node)
4299 		return &err_node->err_info;
4300 
4301 	err_node = amdgpu_ras_error_node_new();
4302 	if (!err_node)
4303 		return NULL;
4304 
4305 	INIT_LIST_HEAD(&err_node->err_info.err_addr_list);
4306 
4307 	memcpy(&err_node->err_info.mcm_info, mcm_info, sizeof(*mcm_info));
4308 
4309 	err_data->err_list_count++;
4310 	list_add_tail(&err_node->node, &err_data->err_node_list);
4311 	list_sort(NULL, &err_data->err_node_list, ras_err_info_cmp);
4312 
4313 	return &err_node->err_info;
4314 }
4315 
4316 void amdgpu_ras_add_mca_err_addr(struct ras_err_info *err_info, struct ras_err_addr *err_addr)
4317 {
4318 	/* This function will be retired. */
4319 	return;
4320 }
4321 
4322 void amdgpu_ras_del_mca_err_addr(struct ras_err_info *err_info, struct ras_err_addr *mca_err_addr)
4323 {
4324 	list_del(&mca_err_addr->node);
4325 	kfree(mca_err_addr);
4326 }
4327 
4328 int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
4329 		struct amdgpu_smuio_mcm_config_info *mcm_info,
4330 		struct ras_err_addr *err_addr, u64 count)
4331 {
4332 	struct ras_err_info *err_info;
4333 
4334 	if (!err_data || !mcm_info)
4335 		return -EINVAL;
4336 
4337 	if (!count)
4338 		return 0;
4339 
4340 	err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
4341 	if (!err_info)
4342 		return -EINVAL;
4343 
4344 	if (err_addr && err_addr->err_status)
4345 		amdgpu_ras_add_mca_err_addr(err_info, err_addr);
4346 
4347 	err_info->ue_count += count;
4348 	err_data->ue_count += count;
4349 
4350 	return 0;
4351 }
4352 
4353 int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data,
4354 		struct amdgpu_smuio_mcm_config_info *mcm_info,
4355 		struct ras_err_addr *err_addr, u64 count)
4356 {
4357 	struct ras_err_info *err_info;
4358 
4359 	if (!err_data || !mcm_info)
4360 		return -EINVAL;
4361 
4362 	if (!count)
4363 		return 0;
4364 
4365 	err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
4366 	if (!err_info)
4367 		return -EINVAL;
4368 
4369 	err_info->ce_count += count;
4370 	err_data->ce_count += count;
4371 
4372 	return 0;
4373 }
4374 
4375 int amdgpu_ras_error_statistic_de_count(struct ras_err_data *err_data,
4376 		struct amdgpu_smuio_mcm_config_info *mcm_info,
4377 		struct ras_err_addr *err_addr, u64 count)
4378 {
4379 	struct ras_err_info *err_info;
4380 
4381 	if (!err_data || !mcm_info)
4382 		return -EINVAL;
4383 
4384 	if (!count)
4385 		return 0;
4386 
4387 	err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
4388 	if (!err_info)
4389 		return -EINVAL;
4390 
4391 	if (err_addr && err_addr->err_status)
4392 		amdgpu_ras_add_mca_err_addr(err_info, err_addr);
4393 
4394 	err_info->de_count += count;
4395 	err_data->de_count += count;
4396 
4397 	return 0;
4398 }
4399 
4400 #define mmMP0_SMN_C2PMSG_92	0x1609C
4401 #define mmMP0_SMN_C2PMSG_126	0x160BE
4402 static void amdgpu_ras_boot_time_error_reporting(struct amdgpu_device *adev,
4403 						 u32 instance)
4404 {
4405 	u32 socket_id, aid_id, hbm_id;
4406 	u32 fw_status;
4407 	u32 boot_error;
4408 	u64 reg_addr;
4409 
4410 	/* The pattern for smn addressing in other SOC could be different from
4411 	 * the one for aqua_vanjaram. We should revisit the code if the pattern
4412 	 * is changed. In such case, replace the aqua_vanjaram implementation
4413 	 * with more common helper */
4414 	reg_addr = (mmMP0_SMN_C2PMSG_92 << 2) +
4415 		   aqua_vanjaram_encode_ext_smn_addressing(instance);
4416 	fw_status = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
4417 
4418 	reg_addr = (mmMP0_SMN_C2PMSG_126 << 2) +
4419 		   aqua_vanjaram_encode_ext_smn_addressing(instance);
4420 	boot_error = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
4421 
4422 	socket_id = AMDGPU_RAS_GPU_ERR_SOCKET_ID(boot_error);
4423 	aid_id = AMDGPU_RAS_GPU_ERR_AID_ID(boot_error);
4424 	hbm_id = AMDGPU_RAS_GPU_ERR_HBM_ID(boot_error);
4425 
4426 	if (AMDGPU_RAS_GPU_ERR_MEM_TRAINING(boot_error))
4427 		dev_info(adev->dev,
4428 			 "socket: %d, aid: %d, hbm: %d, fw_status: 0x%x, memory training failed\n",
4429 			 socket_id, aid_id, hbm_id, fw_status);
4430 
4431 	if (AMDGPU_RAS_GPU_ERR_FW_LOAD(boot_error))
4432 		dev_info(adev->dev,
4433 			 "socket: %d, aid: %d, fw_status: 0x%x, firmware load failed at boot time\n",
4434 			 socket_id, aid_id, fw_status);
4435 
4436 	if (AMDGPU_RAS_GPU_ERR_WAFL_LINK_TRAINING(boot_error))
4437 		dev_info(adev->dev,
4438 			 "socket: %d, aid: %d, fw_status: 0x%x, wafl link training failed\n",
4439 			 socket_id, aid_id, fw_status);
4440 
4441 	if (AMDGPU_RAS_GPU_ERR_XGMI_LINK_TRAINING(boot_error))
4442 		dev_info(adev->dev,
4443 			 "socket: %d, aid: %d, fw_status: 0x%x, xgmi link training failed\n",
4444 			 socket_id, aid_id, fw_status);
4445 
4446 	if (AMDGPU_RAS_GPU_ERR_USR_CP_LINK_TRAINING(boot_error))
4447 		dev_info(adev->dev,
4448 			 "socket: %d, aid: %d, fw_status: 0x%x, usr cp link training failed\n",
4449 			 socket_id, aid_id, fw_status);
4450 
4451 	if (AMDGPU_RAS_GPU_ERR_USR_DP_LINK_TRAINING(boot_error))
4452 		dev_info(adev->dev,
4453 			 "socket: %d, aid: %d, fw_status: 0x%x, usr dp link training failed\n",
4454 			 socket_id, aid_id, fw_status);
4455 
4456 	if (AMDGPU_RAS_GPU_ERR_HBM_MEM_TEST(boot_error))
4457 		dev_info(adev->dev,
4458 			 "socket: %d, aid: %d, hbm: %d, fw_status: 0x%x, hbm memory test failed\n",
4459 			 socket_id, aid_id, hbm_id, fw_status);
4460 
4461 	if (AMDGPU_RAS_GPU_ERR_HBM_BIST_TEST(boot_error))
4462 		dev_info(adev->dev,
4463 			 "socket: %d, aid: %d, hbm: %d, fw_status: 0x%x, hbm bist test failed\n",
4464 			 socket_id, aid_id, hbm_id, fw_status);
4465 }
4466 
4467 static bool amdgpu_ras_boot_error_detected(struct amdgpu_device *adev,
4468 					   u32 instance)
4469 {
4470 	u64 reg_addr;
4471 	u32 reg_data;
4472 	int retry_loop;
4473 
4474 	reg_addr = (mmMP0_SMN_C2PMSG_92 << 2) +
4475 		   aqua_vanjaram_encode_ext_smn_addressing(instance);
4476 
4477 	for (retry_loop = 0; retry_loop < AMDGPU_RAS_BOOT_STATUS_POLLING_LIMIT; retry_loop++) {
4478 		reg_data = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
4479 		if ((reg_data & AMDGPU_RAS_BOOT_STATUS_MASK) == AMDGPU_RAS_BOOT_STEADY_STATUS)
4480 			return false;
4481 		else
4482 			msleep(1);
4483 	}
4484 
4485 	return true;
4486 }
4487 
4488 void amdgpu_ras_query_boot_status(struct amdgpu_device *adev, u32 num_instances)
4489 {
4490 	u32 i;
4491 
4492 	for (i = 0; i < num_instances; i++) {
4493 		if (amdgpu_ras_boot_error_detected(adev, i))
4494 			amdgpu_ras_boot_time_error_reporting(adev, i);
4495 	}
4496 }
4497 
4498 int amdgpu_ras_reserve_page(struct amdgpu_device *adev, uint64_t pfn)
4499 {
4500 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4501 	struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
4502 	uint64_t start = pfn << AMDGPU_GPU_PAGE_SHIFT;
4503 	int ret = 0;
4504 
4505 	mutex_lock(&con->page_rsv_lock);
4506 	ret = amdgpu_vram_mgr_query_page_status(mgr, start);
4507 	if (ret == -ENOENT)
4508 		ret = amdgpu_vram_mgr_reserve_range(mgr, start, AMDGPU_GPU_PAGE_SIZE);
4509 	mutex_unlock(&con->page_rsv_lock);
4510 
4511 	return ret;
4512 }
4513 
4514 void amdgpu_ras_event_log_print(struct amdgpu_device *adev, u64 event_id,
4515 				const char *fmt, ...)
4516 {
4517 	struct va_format vaf;
4518 	va_list args;
4519 
4520 	va_start(args, fmt);
4521 	vaf.fmt = fmt;
4522 	vaf.va = &args;
4523 
4524 	if (amdgpu_ras_event_id_is_valid(adev, event_id))
4525 		dev_printk(KERN_INFO, adev->dev, "{%llu}%pV", event_id, &vaf);
4526 	else
4527 		dev_printk(KERN_INFO, adev->dev, "%pV", &vaf);
4528 
4529 	va_end(args);
4530 }
4531