xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c (revision 727b77df826b44853476d6e8690fec4cf5515eca)
1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  *
23  */
24 #include <linux/debugfs.h>
25 #include <linux/list.h>
26 #include <linux/module.h>
27 #include <linux/uaccess.h>
28 #include <linux/reboot.h>
29 #include <linux/syscalls.h>
30 #include <linux/pm_runtime.h>
31 #include <linux/list_sort.h>
32 
33 #include "amdgpu.h"
34 #include "amdgpu_ras.h"
35 #include "amdgpu_atomfirmware.h"
36 #include "amdgpu_xgmi.h"
37 #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
38 #include "nbio_v4_3.h"
39 #include "nbif_v6_3_1.h"
40 #include "nbio_v7_9.h"
41 #include "atom.h"
42 #include "amdgpu_reset.h"
43 #include "amdgpu_psp.h"
44 
45 #ifdef CONFIG_X86_MCE_AMD
46 #include <asm/mce.h>
47 
48 static bool notifier_registered;
49 #endif
50 static const char *RAS_FS_NAME = "ras";
51 
52 const char *ras_error_string[] = {
53 	"none",
54 	"parity",
55 	"single_correctable",
56 	"multi_uncorrectable",
57 	"poison",
58 };
59 
60 const char *ras_block_string[] = {
61 	"umc",
62 	"sdma",
63 	"gfx",
64 	"mmhub",
65 	"athub",
66 	"pcie_bif",
67 	"hdp",
68 	"xgmi_wafl",
69 	"df",
70 	"smn",
71 	"sem",
72 	"mp0",
73 	"mp1",
74 	"fuse",
75 	"mca",
76 	"vcn",
77 	"jpeg",
78 	"ih",
79 	"mpio",
80 	"mmsch",
81 };
82 
83 const char *ras_mca_block_string[] = {
84 	"mca_mp0",
85 	"mca_mp1",
86 	"mca_mpio",
87 	"mca_iohc",
88 };
89 
90 struct amdgpu_ras_block_list {
91 	/* ras block link */
92 	struct list_head node;
93 
94 	struct amdgpu_ras_block_object *ras_obj;
95 };
96 
97 const char *get_ras_block_str(struct ras_common_if *ras_block)
98 {
99 	if (!ras_block)
100 		return "NULL";
101 
102 	if (ras_block->block >= AMDGPU_RAS_BLOCK_COUNT ||
103 	    ras_block->block >= ARRAY_SIZE(ras_block_string))
104 		return "OUT OF RANGE";
105 
106 	if (ras_block->block == AMDGPU_RAS_BLOCK__MCA)
107 		return ras_mca_block_string[ras_block->sub_block_index];
108 
109 	return ras_block_string[ras_block->block];
110 }
111 
112 #define ras_block_str(_BLOCK_) \
113 	(((_BLOCK_) < ARRAY_SIZE(ras_block_string)) ? ras_block_string[_BLOCK_] : "Out Of Range")
114 
115 #define ras_err_str(i) (ras_error_string[ffs(i)])
116 
117 #define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
118 
119 /* inject address is 52 bits */
120 #define	RAS_UMC_INJECT_ADDR_LIMIT	(0x1ULL << 52)
121 
122 /* typical ECC bad page rate is 1 bad page per 100MB VRAM */
123 #define RAS_BAD_PAGE_COVER              (100 * 1024 * 1024ULL)
124 
125 #define MAX_UMC_POISON_POLLING_TIME_ASYNC  300  //ms
126 
127 #define AMDGPU_RAS_RETIRE_PAGE_INTERVAL 100  //ms
128 
129 #define MAX_FLUSH_RETIRE_DWORK_TIMES  100
130 
131 enum amdgpu_ras_retire_page_reservation {
132 	AMDGPU_RAS_RETIRE_PAGE_RESERVED,
133 	AMDGPU_RAS_RETIRE_PAGE_PENDING,
134 	AMDGPU_RAS_RETIRE_PAGE_FAULT,
135 };
136 
137 atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0);
138 
139 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
140 				uint64_t addr);
141 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
142 				uint64_t addr);
143 #ifdef CONFIG_X86_MCE_AMD
144 static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev);
145 struct mce_notifier_adev_list {
146 	struct amdgpu_device *devs[MAX_GPU_INSTANCE];
147 	int num_gpu;
148 };
149 static struct mce_notifier_adev_list mce_adev_list;
150 #endif
151 
152 void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready)
153 {
154 	if (adev && amdgpu_ras_get_context(adev))
155 		amdgpu_ras_get_context(adev)->error_query_ready = ready;
156 }
157 
158 static bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev)
159 {
160 	if (adev && amdgpu_ras_get_context(adev))
161 		return amdgpu_ras_get_context(adev)->error_query_ready;
162 
163 	return false;
164 }
165 
166 static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t address)
167 {
168 	struct ras_err_data err_data;
169 	struct eeprom_table_record err_rec;
170 	int ret;
171 
172 	if ((address >= adev->gmc.mc_vram_size) ||
173 	    (address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
174 		dev_warn(adev->dev,
175 		         "RAS WARN: input address 0x%llx is invalid.\n",
176 		         address);
177 		return -EINVAL;
178 	}
179 
180 	if (amdgpu_ras_check_bad_page(adev, address)) {
181 		dev_warn(adev->dev,
182 			 "RAS WARN: 0x%llx has already been marked as bad page!\n",
183 			 address);
184 		return 0;
185 	}
186 
187 	ret = amdgpu_ras_error_data_init(&err_data);
188 	if (ret)
189 		return ret;
190 
191 	memset(&err_rec, 0x0, sizeof(struct eeprom_table_record));
192 	err_data.err_addr = &err_rec;
193 	amdgpu_umc_fill_error_record(&err_data, address, address, 0, 0);
194 
195 	if (amdgpu_bad_page_threshold != 0) {
196 		amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
197 					 err_data.err_addr_cnt, false);
198 		amdgpu_ras_save_bad_pages(adev, NULL);
199 	}
200 
201 	amdgpu_ras_error_data_fini(&err_data);
202 
203 	dev_warn(adev->dev, "WARNING: THIS IS ONLY FOR TEST PURPOSES AND WILL CORRUPT RAS EEPROM\n");
204 	dev_warn(adev->dev, "Clear EEPROM:\n");
205 	dev_warn(adev->dev, "    echo 1 > /sys/kernel/debug/dri/0/ras/ras_eeprom_reset\n");
206 
207 	return 0;
208 }
209 
210 static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
211 					size_t size, loff_t *pos)
212 {
213 	struct ras_manager *obj = (struct ras_manager *)file_inode(f)->i_private;
214 	struct ras_query_if info = {
215 		.head = obj->head,
216 	};
217 	ssize_t s;
218 	char val[128];
219 
220 	if (amdgpu_ras_query_error_status(obj->adev, &info))
221 		return -EINVAL;
222 
223 	/* Hardware counter will be reset automatically after the query on Vega20 and Arcturus */
224 	if (amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
225 	    amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {
226 		if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
227 			dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
228 	}
229 
230 	s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n",
231 			"ue", info.ue_count,
232 			"ce", info.ce_count);
233 	if (*pos >= s)
234 		return 0;
235 
236 	s -= *pos;
237 	s = min_t(u64, s, size);
238 
239 
240 	if (copy_to_user(buf, &val[*pos], s))
241 		return -EINVAL;
242 
243 	*pos += s;
244 
245 	return s;
246 }
247 
248 static const struct file_operations amdgpu_ras_debugfs_ops = {
249 	.owner = THIS_MODULE,
250 	.read = amdgpu_ras_debugfs_read,
251 	.write = NULL,
252 	.llseek = default_llseek
253 };
254 
255 static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id)
256 {
257 	int i;
258 
259 	for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) {
260 		*block_id = i;
261 		if (strcmp(name, ras_block_string[i]) == 0)
262 			return 0;
263 	}
264 	return -EINVAL;
265 }
266 
267 static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
268 		const char __user *buf, size_t size,
269 		loff_t *pos, struct ras_debug_if *data)
270 {
271 	ssize_t s = min_t(u64, 64, size);
272 	char str[65];
273 	char block_name[33];
274 	char err[9] = "ue";
275 	int op = -1;
276 	int block_id;
277 	uint32_t sub_block;
278 	u64 address, value;
279 	/* default value is 0 if the mask is not set by user */
280 	u32 instance_mask = 0;
281 
282 	if (*pos)
283 		return -EINVAL;
284 	*pos = size;
285 
286 	memset(str, 0, sizeof(str));
287 	memset(data, 0, sizeof(*data));
288 
289 	if (copy_from_user(str, buf, s))
290 		return -EINVAL;
291 
292 	if (sscanf(str, "disable %32s", block_name) == 1)
293 		op = 0;
294 	else if (sscanf(str, "enable %32s %8s", block_name, err) == 2)
295 		op = 1;
296 	else if (sscanf(str, "inject %32s %8s", block_name, err) == 2)
297 		op = 2;
298 	else if (strstr(str, "retire_page") != NULL)
299 		op = 3;
300 	else if (str[0] && str[1] && str[2] && str[3])
301 		/* ascii string, but commands are not matched. */
302 		return -EINVAL;
303 
304 	if (op != -1) {
305 		if (op == 3) {
306 			if (sscanf(str, "%*s 0x%llx", &address) != 1 &&
307 			    sscanf(str, "%*s %llu", &address) != 1)
308 				return -EINVAL;
309 
310 			data->op = op;
311 			data->inject.address = address;
312 
313 			return 0;
314 		}
315 
316 		if (amdgpu_ras_find_block_id_by_name(block_name, &block_id))
317 			return -EINVAL;
318 
319 		data->head.block = block_id;
320 		/* only ue, ce and poison errors are supported */
321 		if (!memcmp("ue", err, 2))
322 			data->head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
323 		else if (!memcmp("ce", err, 2))
324 			data->head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE;
325 		else if (!memcmp("poison", err, 6))
326 			data->head.type = AMDGPU_RAS_ERROR__POISON;
327 		else
328 			return -EINVAL;
329 
330 		data->op = op;
331 
332 		if (op == 2) {
333 			if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx 0x%x",
334 				   &sub_block, &address, &value, &instance_mask) != 4 &&
335 			    sscanf(str, "%*s %*s %*s %u %llu %llu %u",
336 				   &sub_block, &address, &value, &instance_mask) != 4 &&
337 				sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx",
338 				   &sub_block, &address, &value) != 3 &&
339 			    sscanf(str, "%*s %*s %*s %u %llu %llu",
340 				   &sub_block, &address, &value) != 3)
341 				return -EINVAL;
342 			data->head.sub_block_index = sub_block;
343 			data->inject.address = address;
344 			data->inject.value = value;
345 			data->inject.instance_mask = instance_mask;
346 		}
347 	} else {
348 		if (size < sizeof(*data))
349 			return -EINVAL;
350 
351 		if (copy_from_user(data, buf, sizeof(*data)))
352 			return -EINVAL;
353 	}
354 
355 	return 0;
356 }
357 
358 static void amdgpu_ras_instance_mask_check(struct amdgpu_device *adev,
359 				struct ras_debug_if *data)
360 {
361 	int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1;
362 	uint32_t mask, inst_mask = data->inject.instance_mask;
363 
364 	/* no need to set instance mask if there is only one instance */
365 	if (num_xcc <= 1 && inst_mask) {
366 		data->inject.instance_mask = 0;
367 		dev_dbg(adev->dev,
368 			"RAS inject mask(0x%x) isn't supported and force it to 0.\n",
369 			inst_mask);
370 
371 		return;
372 	}
373 
374 	switch (data->head.block) {
375 	case AMDGPU_RAS_BLOCK__GFX:
376 		mask = GENMASK(num_xcc - 1, 0);
377 		break;
378 	case AMDGPU_RAS_BLOCK__SDMA:
379 		mask = GENMASK(adev->sdma.num_instances - 1, 0);
380 		break;
381 	case AMDGPU_RAS_BLOCK__VCN:
382 	case AMDGPU_RAS_BLOCK__JPEG:
383 		mask = GENMASK(adev->vcn.num_vcn_inst - 1, 0);
384 		break;
385 	default:
386 		mask = inst_mask;
387 		break;
388 	}
389 
390 	/* remove invalid bits in instance mask */
391 	data->inject.instance_mask &= mask;
392 	if (inst_mask != data->inject.instance_mask)
393 		dev_dbg(adev->dev,
394 			"Adjust RAS inject mask 0x%x to 0x%x\n",
395 			inst_mask, data->inject.instance_mask);
396 }
397 
398 /**
399  * DOC: AMDGPU RAS debugfs control interface
400  *
401  * The control interface accepts struct ras_debug_if which has two members.
402  *
403  * First member: ras_debug_if::head or ras_debug_if::inject.
404  *
405  * head is used to indicate which IP block will be under control.
406  *
407  * head has four members, they are block, type, sub_block_index, name.
408  * block: which IP will be under control.
409  * type: what kind of error will be enabled/disabled/injected.
410  * sub_block_index: some IPs have subcomponets. say, GFX, sDMA.
411  * name: the name of IP.
412  *
413  * inject has three more members than head, they are address, value and mask.
414  * As their names indicate, inject operation will write the
415  * value to the address.
416  *
417  * The second member: struct ras_debug_if::op.
418  * It has three kinds of operations.
419  *
420  * - 0: disable RAS on the block. Take ::head as its data.
421  * - 1: enable RAS on the block. Take ::head as its data.
422  * - 2: inject errors on the block. Take ::inject as its data.
423  *
424  * How to use the interface?
425  *
426  * In a program
427  *
428  * Copy the struct ras_debug_if in your code and initialize it.
429  * Write the struct to the control interface.
430  *
431  * From shell
432  *
433  * .. code-block:: bash
434  *
435  *	echo "disable <block>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
436  *	echo "enable  <block> <error>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
437  *	echo "inject  <block> <error> <sub-block> <address> <value> <mask>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
438  *
439  * Where N, is the card which you want to affect.
440  *
441  * "disable" requires only the block.
442  * "enable" requires the block and error type.
443  * "inject" requires the block, error type, address, and value.
444  *
445  * The block is one of: umc, sdma, gfx, etc.
446  *	see ras_block_string[] for details
447  *
448  * The error type is one of: ue, ce and poison where,
449  *	ue is multi-uncorrectable
450  *	ce is single-correctable
451  *	poison is poison
452  *
453  * The sub-block is a the sub-block index, pass 0 if there is no sub-block.
454  * The address and value are hexadecimal numbers, leading 0x is optional.
455  * The mask means instance mask, is optional, default value is 0x1.
456  *
457  * For instance,
458  *
459  * .. code-block:: bash
460  *
461  *	echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
462  *	echo inject umc ce 0 0 0 3 > /sys/kernel/debug/dri/0/ras/ras_ctrl
463  *	echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl
464  *
465  * How to check the result of the operation?
466  *
467  * To check disable/enable, see "ras" features at,
468  * /sys/class/drm/card[0/1/2...]/device/ras/features
469  *
470  * To check inject, see the corresponding error count at,
471  * /sys/class/drm/card[0/1/2...]/device/ras/[gfx|sdma|umc|...]_err_count
472  *
473  * .. note::
474  *	Operations are only allowed on blocks which are supported.
475  *	Check the "ras" mask at /sys/module/amdgpu/parameters/ras_mask
476  *	to see which blocks support RAS on a particular asic.
477  *
478  */
479 static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f,
480 					     const char __user *buf,
481 					     size_t size, loff_t *pos)
482 {
483 	struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
484 	struct ras_debug_if data;
485 	int ret = 0;
486 
487 	if (!amdgpu_ras_get_error_query_ready(adev)) {
488 		dev_warn(adev->dev, "RAS WARN: error injection "
489 				"currently inaccessible\n");
490 		return size;
491 	}
492 
493 	ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data);
494 	if (ret)
495 		return ret;
496 
497 	if (data.op == 3) {
498 		ret = amdgpu_reserve_page_direct(adev, data.inject.address);
499 		if (!ret)
500 			return size;
501 		else
502 			return ret;
503 	}
504 
505 	if (!amdgpu_ras_is_supported(adev, data.head.block))
506 		return -EINVAL;
507 
508 	switch (data.op) {
509 	case 0:
510 		ret = amdgpu_ras_feature_enable(adev, &data.head, 0);
511 		break;
512 	case 1:
513 		ret = amdgpu_ras_feature_enable(adev, &data.head, 1);
514 		break;
515 	case 2:
516 		if ((data.inject.address >= adev->gmc.mc_vram_size &&
517 		    adev->gmc.mc_vram_size) ||
518 		    (data.inject.address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
519 			dev_warn(adev->dev, "RAS WARN: input address "
520 					"0x%llx is invalid.",
521 					data.inject.address);
522 			ret = -EINVAL;
523 			break;
524 		}
525 
526 		/* umc ce/ue error injection for a bad page is not allowed */
527 		if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) &&
528 		    amdgpu_ras_check_bad_page(adev, data.inject.address)) {
529 			dev_warn(adev->dev, "RAS WARN: inject: 0x%llx has "
530 				 "already been marked as bad!\n",
531 				 data.inject.address);
532 			break;
533 		}
534 
535 		amdgpu_ras_instance_mask_check(adev, &data);
536 
537 		/* data.inject.address is offset instead of absolute gpu address */
538 		ret = amdgpu_ras_error_inject(adev, &data.inject);
539 		break;
540 	default:
541 		ret = -EINVAL;
542 		break;
543 	}
544 
545 	if (ret)
546 		return ret;
547 
548 	return size;
549 }
550 
551 /**
552  * DOC: AMDGPU RAS debugfs EEPROM table reset interface
553  *
554  * Some boards contain an EEPROM which is used to persistently store a list of
555  * bad pages which experiences ECC errors in vram.  This interface provides
556  * a way to reset the EEPROM, e.g., after testing error injection.
557  *
558  * Usage:
559  *
560  * .. code-block:: bash
561  *
562  *	echo 1 > ../ras/ras_eeprom_reset
563  *
564  * will reset EEPROM table to 0 entries.
565  *
566  */
567 static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f,
568 					       const char __user *buf,
569 					       size_t size, loff_t *pos)
570 {
571 	struct amdgpu_device *adev =
572 		(struct amdgpu_device *)file_inode(f)->i_private;
573 	int ret;
574 
575 	ret = amdgpu_ras_eeprom_reset_table(
576 		&(amdgpu_ras_get_context(adev)->eeprom_control));
577 
578 	if (!ret) {
579 		/* Something was written to EEPROM.
580 		 */
581 		amdgpu_ras_get_context(adev)->flags = RAS_DEFAULT_FLAGS;
582 		return size;
583 	} else {
584 		return ret;
585 	}
586 }
587 
588 static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
589 	.owner = THIS_MODULE,
590 	.read = NULL,
591 	.write = amdgpu_ras_debugfs_ctrl_write,
592 	.llseek = default_llseek
593 };
594 
595 static const struct file_operations amdgpu_ras_debugfs_eeprom_ops = {
596 	.owner = THIS_MODULE,
597 	.read = NULL,
598 	.write = amdgpu_ras_debugfs_eeprom_write,
599 	.llseek = default_llseek
600 };
601 
602 /**
603  * DOC: AMDGPU RAS sysfs Error Count Interface
604  *
605  * It allows the user to read the error count for each IP block on the gpu through
606  * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
607  *
608  * It outputs the multiple lines which report the uncorrected (ue) and corrected
609  * (ce) error counts.
610  *
611  * The format of one line is below,
612  *
613  * [ce|ue]: count
614  *
615  * Example:
616  *
617  * .. code-block:: bash
618  *
619  *	ue: 0
620  *	ce: 1
621  *
622  */
623 static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
624 		struct device_attribute *attr, char *buf)
625 {
626 	struct ras_manager *obj = container_of(attr, struct ras_manager, sysfs_attr);
627 	struct ras_query_if info = {
628 		.head = obj->head,
629 	};
630 
631 	if (!amdgpu_ras_get_error_query_ready(obj->adev))
632 		return sysfs_emit(buf, "Query currently inaccessible\n");
633 
634 	if (amdgpu_ras_query_error_status(obj->adev, &info))
635 		return -EINVAL;
636 
637 	if (amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
638 	    amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {
639 		if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
640 			dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
641 	}
642 
643 	if (info.head.block == AMDGPU_RAS_BLOCK__UMC)
644 		return sysfs_emit(buf, "%s: %lu\n%s: %lu\n%s: %lu\n", "ue", info.ue_count,
645 				"ce", info.ce_count, "de", info.de_count);
646 	else
647 		return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count,
648 				"ce", info.ce_count);
649 }
650 
651 /* obj begin */
652 
653 #define get_obj(obj) do { (obj)->use++; } while (0)
654 #define alive_obj(obj) ((obj)->use)
655 
656 static inline void put_obj(struct ras_manager *obj)
657 {
658 	if (obj && (--obj->use == 0)) {
659 		list_del(&obj->node);
660 		amdgpu_ras_error_data_fini(&obj->err_data);
661 	}
662 
663 	if (obj && (obj->use < 0))
664 		DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", get_ras_block_str(&obj->head));
665 }
666 
667 /* make one obj and return it. */
668 static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev,
669 		struct ras_common_if *head)
670 {
671 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
672 	struct ras_manager *obj;
673 
674 	if (!adev->ras_enabled || !con)
675 		return NULL;
676 
677 	if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
678 		return NULL;
679 
680 	if (head->block == AMDGPU_RAS_BLOCK__MCA) {
681 		if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
682 			return NULL;
683 
684 		obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
685 	} else
686 		obj = &con->objs[head->block];
687 
688 	/* already exist. return obj? */
689 	if (alive_obj(obj))
690 		return NULL;
691 
692 	if (amdgpu_ras_error_data_init(&obj->err_data))
693 		return NULL;
694 
695 	obj->head = *head;
696 	obj->adev = adev;
697 	list_add(&obj->node, &con->head);
698 	get_obj(obj);
699 
700 	return obj;
701 }
702 
703 /* return an obj equal to head, or the first when head is NULL */
704 struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
705 		struct ras_common_if *head)
706 {
707 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
708 	struct ras_manager *obj;
709 	int i;
710 
711 	if (!adev->ras_enabled || !con)
712 		return NULL;
713 
714 	if (head) {
715 		if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
716 			return NULL;
717 
718 		if (head->block == AMDGPU_RAS_BLOCK__MCA) {
719 			if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
720 				return NULL;
721 
722 			obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
723 		} else
724 			obj = &con->objs[head->block];
725 
726 		if (alive_obj(obj))
727 			return obj;
728 	} else {
729 		for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT + AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
730 			obj = &con->objs[i];
731 			if (alive_obj(obj))
732 				return obj;
733 		}
734 	}
735 
736 	return NULL;
737 }
738 /* obj end */
739 
740 /* feature ctl begin */
741 static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev,
742 					 struct ras_common_if *head)
743 {
744 	return adev->ras_hw_enabled & BIT(head->block);
745 }
746 
747 static int amdgpu_ras_is_feature_enabled(struct amdgpu_device *adev,
748 		struct ras_common_if *head)
749 {
750 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
751 
752 	return con->features & BIT(head->block);
753 }
754 
755 /*
756  * if obj is not created, then create one.
757  * set feature enable flag.
758  */
759 static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev,
760 		struct ras_common_if *head, int enable)
761 {
762 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
763 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
764 
765 	/* If hardware does not support ras, then do not create obj.
766 	 * But if hardware support ras, we can create the obj.
767 	 * Ras framework checks con->hw_supported to see if it need do
768 	 * corresponding initialization.
769 	 * IP checks con->support to see if it need disable ras.
770 	 */
771 	if (!amdgpu_ras_is_feature_allowed(adev, head))
772 		return 0;
773 
774 	if (enable) {
775 		if (!obj) {
776 			obj = amdgpu_ras_create_obj(adev, head);
777 			if (!obj)
778 				return -EINVAL;
779 		} else {
780 			/* In case we create obj somewhere else */
781 			get_obj(obj);
782 		}
783 		con->features |= BIT(head->block);
784 	} else {
785 		if (obj && amdgpu_ras_is_feature_enabled(adev, head)) {
786 			con->features &= ~BIT(head->block);
787 			put_obj(obj);
788 		}
789 	}
790 
791 	return 0;
792 }
793 
794 /* wrapper of psp_ras_enable_features */
795 int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
796 		struct ras_common_if *head, bool enable)
797 {
798 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
799 	union ta_ras_cmd_input *info;
800 	int ret;
801 
802 	if (!con)
803 		return -EINVAL;
804 
805 	/* For non-gfx ip, do not enable ras feature if it is not allowed */
806 	/* For gfx ip, regardless of feature support status, */
807 	/* Force issue enable or disable ras feature commands */
808 	if (head->block != AMDGPU_RAS_BLOCK__GFX &&
809 	    !amdgpu_ras_is_feature_allowed(adev, head))
810 		return 0;
811 
812 	/* Only enable gfx ras feature from host side */
813 	if (head->block == AMDGPU_RAS_BLOCK__GFX &&
814 	    !amdgpu_sriov_vf(adev) &&
815 	    !amdgpu_ras_intr_triggered()) {
816 		info = kzalloc(sizeof(union ta_ras_cmd_input), GFP_KERNEL);
817 		if (!info)
818 			return -ENOMEM;
819 
820 		if (!enable) {
821 			info->disable_features = (struct ta_ras_disable_features_input) {
822 				.block_id =  amdgpu_ras_block_to_ta(head->block),
823 				.error_type = amdgpu_ras_error_to_ta(head->type),
824 			};
825 		} else {
826 			info->enable_features = (struct ta_ras_enable_features_input) {
827 				.block_id =  amdgpu_ras_block_to_ta(head->block),
828 				.error_type = amdgpu_ras_error_to_ta(head->type),
829 			};
830 		}
831 
832 		ret = psp_ras_enable_features(&adev->psp, info, enable);
833 		if (ret) {
834 			dev_err(adev->dev, "ras %s %s failed poison:%d ret:%d\n",
835 				enable ? "enable":"disable",
836 				get_ras_block_str(head),
837 				amdgpu_ras_is_poison_mode_supported(adev), ret);
838 			kfree(info);
839 			return ret;
840 		}
841 
842 		kfree(info);
843 	}
844 
845 	/* setup the obj */
846 	__amdgpu_ras_feature_enable(adev, head, enable);
847 
848 	return 0;
849 }
850 
851 /* Only used in device probe stage and called only once. */
852 int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
853 		struct ras_common_if *head, bool enable)
854 {
855 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
856 	int ret;
857 
858 	if (!con)
859 		return -EINVAL;
860 
861 	if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
862 		if (enable) {
863 			/* There is no harm to issue a ras TA cmd regardless of
864 			 * the currecnt ras state.
865 			 * If current state == target state, it will do nothing
866 			 * But sometimes it requests driver to reset and repost
867 			 * with error code -EAGAIN.
868 			 */
869 			ret = amdgpu_ras_feature_enable(adev, head, 1);
870 			/* With old ras TA, we might fail to enable ras.
871 			 * Log it and just setup the object.
872 			 * TODO need remove this WA in the future.
873 			 */
874 			if (ret == -EINVAL) {
875 				ret = __amdgpu_ras_feature_enable(adev, head, 1);
876 				if (!ret)
877 					dev_info(adev->dev,
878 						"RAS INFO: %s setup object\n",
879 						get_ras_block_str(head));
880 			}
881 		} else {
882 			/* setup the object then issue a ras TA disable cmd.*/
883 			ret = __amdgpu_ras_feature_enable(adev, head, 1);
884 			if (ret)
885 				return ret;
886 
887 			/* gfx block ras disable cmd must send to ras-ta */
888 			if (head->block == AMDGPU_RAS_BLOCK__GFX)
889 				con->features |= BIT(head->block);
890 
891 			ret = amdgpu_ras_feature_enable(adev, head, 0);
892 
893 			/* clean gfx block ras features flag */
894 			if (adev->ras_enabled && head->block == AMDGPU_RAS_BLOCK__GFX)
895 				con->features &= ~BIT(head->block);
896 		}
897 	} else
898 		ret = amdgpu_ras_feature_enable(adev, head, enable);
899 
900 	return ret;
901 }
902 
903 static int amdgpu_ras_disable_all_features(struct amdgpu_device *adev,
904 		bool bypass)
905 {
906 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
907 	struct ras_manager *obj, *tmp;
908 
909 	list_for_each_entry_safe(obj, tmp, &con->head, node) {
910 		/* bypass psp.
911 		 * aka just release the obj and corresponding flags
912 		 */
913 		if (bypass) {
914 			if (__amdgpu_ras_feature_enable(adev, &obj->head, 0))
915 				break;
916 		} else {
917 			if (amdgpu_ras_feature_enable(adev, &obj->head, 0))
918 				break;
919 		}
920 	}
921 
922 	return con->features;
923 }
924 
925 static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
926 		bool bypass)
927 {
928 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
929 	int i;
930 	const enum amdgpu_ras_error_type default_ras_type = AMDGPU_RAS_ERROR__NONE;
931 
932 	for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) {
933 		struct ras_common_if head = {
934 			.block = i,
935 			.type = default_ras_type,
936 			.sub_block_index = 0,
937 		};
938 
939 		if (i == AMDGPU_RAS_BLOCK__MCA)
940 			continue;
941 
942 		if (bypass) {
943 			/*
944 			 * bypass psp. vbios enable ras for us.
945 			 * so just create the obj
946 			 */
947 			if (__amdgpu_ras_feature_enable(adev, &head, 1))
948 				break;
949 		} else {
950 			if (amdgpu_ras_feature_enable(adev, &head, 1))
951 				break;
952 		}
953 	}
954 
955 	for (i = 0; i < AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
956 		struct ras_common_if head = {
957 			.block = AMDGPU_RAS_BLOCK__MCA,
958 			.type = default_ras_type,
959 			.sub_block_index = i,
960 		};
961 
962 		if (bypass) {
963 			/*
964 			 * bypass psp. vbios enable ras for us.
965 			 * so just create the obj
966 			 */
967 			if (__amdgpu_ras_feature_enable(adev, &head, 1))
968 				break;
969 		} else {
970 			if (amdgpu_ras_feature_enable(adev, &head, 1))
971 				break;
972 		}
973 	}
974 
975 	return con->features;
976 }
977 /* feature ctl end */
978 
979 static int amdgpu_ras_block_match_default(struct amdgpu_ras_block_object *block_obj,
980 		enum amdgpu_ras_block block)
981 {
982 	if (!block_obj)
983 		return -EINVAL;
984 
985 	if (block_obj->ras_comm.block == block)
986 		return 0;
987 
988 	return -EINVAL;
989 }
990 
991 static struct amdgpu_ras_block_object *amdgpu_ras_get_ras_block(struct amdgpu_device *adev,
992 					enum amdgpu_ras_block block, uint32_t sub_block_index)
993 {
994 	struct amdgpu_ras_block_list *node, *tmp;
995 	struct amdgpu_ras_block_object *obj;
996 
997 	if (block >= AMDGPU_RAS_BLOCK__LAST)
998 		return NULL;
999 
1000 	list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
1001 		if (!node->ras_obj) {
1002 			dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
1003 			continue;
1004 		}
1005 
1006 		obj = node->ras_obj;
1007 		if (obj->ras_block_match) {
1008 			if (obj->ras_block_match(obj, block, sub_block_index) == 0)
1009 				return obj;
1010 		} else {
1011 			if (amdgpu_ras_block_match_default(obj, block) == 0)
1012 				return obj;
1013 		}
1014 	}
1015 
1016 	return NULL;
1017 }
1018 
1019 static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_data *err_data)
1020 {
1021 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
1022 	int ret = 0;
1023 
1024 	/*
1025 	 * choosing right query method according to
1026 	 * whether smu support query error information
1027 	 */
1028 	ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(ras->umc_ecc));
1029 	if (ret == -EOPNOTSUPP) {
1030 		if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
1031 			adev->umc.ras->ras_block.hw_ops->query_ras_error_count)
1032 			adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data);
1033 
1034 		/* umc query_ras_error_address is also responsible for clearing
1035 		 * error status
1036 		 */
1037 		if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
1038 		    adev->umc.ras->ras_block.hw_ops->query_ras_error_address)
1039 			adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, err_data);
1040 	} else if (!ret) {
1041 		if (adev->umc.ras &&
1042 			adev->umc.ras->ecc_info_query_ras_error_count)
1043 			adev->umc.ras->ecc_info_query_ras_error_count(adev, err_data);
1044 
1045 		if (adev->umc.ras &&
1046 			adev->umc.ras->ecc_info_query_ras_error_address)
1047 			adev->umc.ras->ecc_info_query_ras_error_address(adev, err_data);
1048 	}
1049 }
1050 
1051 static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev,
1052 					      struct ras_manager *ras_mgr,
1053 					      struct ras_err_data *err_data,
1054 					      struct ras_query_context *qctx,
1055 					      const char *blk_name,
1056 					      bool is_ue,
1057 					      bool is_de)
1058 {
1059 	struct amdgpu_smuio_mcm_config_info *mcm_info;
1060 	struct ras_err_node *err_node;
1061 	struct ras_err_info *err_info;
1062 	u64 event_id = qctx->evid.event_id;
1063 
1064 	if (is_ue) {
1065 		for_each_ras_error(err_node, err_data) {
1066 			err_info = &err_node->err_info;
1067 			mcm_info = &err_info->mcm_info;
1068 			if (err_info->ue_count) {
1069 				RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1070 					      "%lld new uncorrectable hardware errors detected in %s block\n",
1071 					      mcm_info->socket_id,
1072 					      mcm_info->die_id,
1073 					      err_info->ue_count,
1074 					      blk_name);
1075 			}
1076 		}
1077 
1078 		for_each_ras_error(err_node, &ras_mgr->err_data) {
1079 			err_info = &err_node->err_info;
1080 			mcm_info = &err_info->mcm_info;
1081 			RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1082 				      "%lld uncorrectable hardware errors detected in total in %s block\n",
1083 				      mcm_info->socket_id, mcm_info->die_id, err_info->ue_count, blk_name);
1084 		}
1085 
1086 	} else {
1087 		if (is_de) {
1088 			for_each_ras_error(err_node, err_data) {
1089 				err_info = &err_node->err_info;
1090 				mcm_info = &err_info->mcm_info;
1091 				if (err_info->de_count) {
1092 					RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1093 						      "%lld new deferred hardware errors detected in %s block\n",
1094 						      mcm_info->socket_id,
1095 						      mcm_info->die_id,
1096 						      err_info->de_count,
1097 						      blk_name);
1098 				}
1099 			}
1100 
1101 			for_each_ras_error(err_node, &ras_mgr->err_data) {
1102 				err_info = &err_node->err_info;
1103 				mcm_info = &err_info->mcm_info;
1104 				RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1105 					      "%lld deferred hardware errors detected in total in %s block\n",
1106 					      mcm_info->socket_id, mcm_info->die_id,
1107 					      err_info->de_count, blk_name);
1108 			}
1109 		} else {
1110 			for_each_ras_error(err_node, err_data) {
1111 				err_info = &err_node->err_info;
1112 				mcm_info = &err_info->mcm_info;
1113 				if (err_info->ce_count) {
1114 					RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1115 						      "%lld new correctable hardware errors detected in %s block\n",
1116 						      mcm_info->socket_id,
1117 						      mcm_info->die_id,
1118 						      err_info->ce_count,
1119 						      blk_name);
1120 				}
1121 			}
1122 
1123 			for_each_ras_error(err_node, &ras_mgr->err_data) {
1124 				err_info = &err_node->err_info;
1125 				mcm_info = &err_info->mcm_info;
1126 				RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1127 					      "%lld correctable hardware errors detected in total in %s block\n",
1128 					      mcm_info->socket_id, mcm_info->die_id,
1129 					      err_info->ce_count, blk_name);
1130 			}
1131 		}
1132 	}
1133 }
1134 
1135 static inline bool err_data_has_source_info(struct ras_err_data *data)
1136 {
1137 	return !list_empty(&data->err_node_list);
1138 }
1139 
1140 static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev,
1141 					     struct ras_query_if *query_if,
1142 					     struct ras_err_data *err_data,
1143 					     struct ras_query_context *qctx)
1144 {
1145 	struct ras_manager *ras_mgr = amdgpu_ras_find_obj(adev, &query_if->head);
1146 	const char *blk_name = get_ras_block_str(&query_if->head);
1147 	u64 event_id = qctx->evid.event_id;
1148 
1149 	if (err_data->ce_count) {
1150 		if (err_data_has_source_info(err_data)) {
1151 			amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx,
1152 							  blk_name, false, false);
1153 		} else if (!adev->aid_mask &&
1154 			   adev->smuio.funcs &&
1155 			   adev->smuio.funcs->get_socket_id &&
1156 			   adev->smuio.funcs->get_die_id) {
1157 			RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d "
1158 				      "%ld correctable hardware errors "
1159 				      "detected in %s block\n",
1160 				      adev->smuio.funcs->get_socket_id(adev),
1161 				      adev->smuio.funcs->get_die_id(adev),
1162 				      ras_mgr->err_data.ce_count,
1163 				      blk_name);
1164 		} else {
1165 			RAS_EVENT_LOG(adev, event_id, "%ld correctable hardware errors "
1166 				      "detected in %s block\n",
1167 				      ras_mgr->err_data.ce_count,
1168 				      blk_name);
1169 		}
1170 	}
1171 
1172 	if (err_data->ue_count) {
1173 		if (err_data_has_source_info(err_data)) {
1174 			amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx,
1175 							  blk_name, true, false);
1176 		} else if (!adev->aid_mask &&
1177 			   adev->smuio.funcs &&
1178 			   adev->smuio.funcs->get_socket_id &&
1179 			   adev->smuio.funcs->get_die_id) {
1180 			RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d "
1181 				      "%ld uncorrectable hardware errors "
1182 				      "detected in %s block\n",
1183 				      adev->smuio.funcs->get_socket_id(adev),
1184 				      adev->smuio.funcs->get_die_id(adev),
1185 				      ras_mgr->err_data.ue_count,
1186 				      blk_name);
1187 		} else {
1188 			RAS_EVENT_LOG(adev, event_id, "%ld uncorrectable hardware errors "
1189 				      "detected in %s block\n",
1190 				      ras_mgr->err_data.ue_count,
1191 				      blk_name);
1192 		}
1193 	}
1194 
1195 	if (err_data->de_count) {
1196 		if (err_data_has_source_info(err_data)) {
1197 			amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx,
1198 							  blk_name, false, true);
1199 		} else if (!adev->aid_mask &&
1200 			   adev->smuio.funcs &&
1201 			   adev->smuio.funcs->get_socket_id &&
1202 			   adev->smuio.funcs->get_die_id) {
1203 			RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d "
1204 				      "%ld deferred hardware errors "
1205 				      "detected in %s block\n",
1206 				      adev->smuio.funcs->get_socket_id(adev),
1207 				      adev->smuio.funcs->get_die_id(adev),
1208 				      ras_mgr->err_data.de_count,
1209 				      blk_name);
1210 		} else {
1211 			RAS_EVENT_LOG(adev, event_id, "%ld deferred hardware errors "
1212 				      "detected in %s block\n",
1213 				      ras_mgr->err_data.de_count,
1214 				      blk_name);
1215 		}
1216 	}
1217 }
1218 
1219 static void amdgpu_ras_virt_error_generate_report(struct amdgpu_device *adev,
1220 						  struct ras_query_if *query_if,
1221 						  struct ras_err_data *err_data,
1222 						  struct ras_query_context *qctx)
1223 {
1224 	unsigned long new_ue, new_ce, new_de;
1225 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, &query_if->head);
1226 	const char *blk_name = get_ras_block_str(&query_if->head);
1227 	u64 event_id = qctx->evid.event_id;
1228 
1229 	new_ce = err_data->ce_count - obj->err_data.ce_count;
1230 	new_ue = err_data->ue_count - obj->err_data.ue_count;
1231 	new_de = err_data->de_count - obj->err_data.de_count;
1232 
1233 	if (new_ce) {
1234 		RAS_EVENT_LOG(adev, event_id, "%lu correctable hardware errors "
1235 			      "detected in %s block\n",
1236 			      new_ce,
1237 			      blk_name);
1238 	}
1239 
1240 	if (new_ue) {
1241 		RAS_EVENT_LOG(adev, event_id, "%lu uncorrectable hardware errors "
1242 			      "detected in %s block\n",
1243 			      new_ue,
1244 			      blk_name);
1245 	}
1246 
1247 	if (new_de) {
1248 		RAS_EVENT_LOG(adev, event_id, "%lu deferred hardware errors "
1249 			      "detected in %s block\n",
1250 			      new_de,
1251 			      blk_name);
1252 	}
1253 }
1254 
1255 static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, struct ras_err_data *err_data)
1256 {
1257 	struct ras_err_node *err_node;
1258 	struct ras_err_info *err_info;
1259 
1260 	if (err_data_has_source_info(err_data)) {
1261 		for_each_ras_error(err_node, err_data) {
1262 			err_info = &err_node->err_info;
1263 			amdgpu_ras_error_statistic_de_count(&obj->err_data,
1264 					&err_info->mcm_info, err_info->de_count);
1265 			amdgpu_ras_error_statistic_ce_count(&obj->err_data,
1266 					&err_info->mcm_info, err_info->ce_count);
1267 			amdgpu_ras_error_statistic_ue_count(&obj->err_data,
1268 					&err_info->mcm_info, err_info->ue_count);
1269 		}
1270 	} else {
1271 		/* for legacy asic path which doesn't has error source info */
1272 		obj->err_data.ue_count += err_data->ue_count;
1273 		obj->err_data.ce_count += err_data->ce_count;
1274 		obj->err_data.de_count += err_data->de_count;
1275 	}
1276 }
1277 
1278 static void amdgpu_ras_mgr_virt_error_data_statistics_update(struct ras_manager *obj,
1279 							     struct ras_err_data *err_data)
1280 {
1281 	/* Host reports absolute counts */
1282 	obj->err_data.ue_count = err_data->ue_count;
1283 	obj->err_data.ce_count = err_data->ce_count;
1284 	obj->err_data.de_count = err_data->de_count;
1285 }
1286 
1287 static struct ras_manager *get_ras_manager(struct amdgpu_device *adev, enum amdgpu_ras_block blk)
1288 {
1289 	struct ras_common_if head;
1290 
1291 	memset(&head, 0, sizeof(head));
1292 	head.block = blk;
1293 
1294 	return amdgpu_ras_find_obj(adev, &head);
1295 }
1296 
1297 int amdgpu_ras_bind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
1298 			const struct aca_info *aca_info, void *data)
1299 {
1300 	struct ras_manager *obj;
1301 
1302 	/* in resume phase, no need to create aca fs node */
1303 	if (adev->in_suspend || amdgpu_reset_in_recovery(adev))
1304 		return 0;
1305 
1306 	obj = get_ras_manager(adev, blk);
1307 	if (!obj)
1308 		return -EINVAL;
1309 
1310 	return amdgpu_aca_add_handle(adev, &obj->aca_handle, ras_block_str(blk), aca_info, data);
1311 }
1312 
1313 int amdgpu_ras_unbind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk)
1314 {
1315 	struct ras_manager *obj;
1316 
1317 	obj = get_ras_manager(adev, blk);
1318 	if (!obj)
1319 		return -EINVAL;
1320 
1321 	amdgpu_aca_remove_handle(&obj->aca_handle);
1322 
1323 	return 0;
1324 }
1325 
1326 static int amdgpu_aca_log_ras_error_data(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
1327 					 enum aca_error_type type, struct ras_err_data *err_data,
1328 					 struct ras_query_context *qctx)
1329 {
1330 	struct ras_manager *obj;
1331 
1332 	obj = get_ras_manager(adev, blk);
1333 	if (!obj)
1334 		return -EINVAL;
1335 
1336 	return amdgpu_aca_get_error_data(adev, &obj->aca_handle, type, err_data, qctx);
1337 }
1338 
1339 ssize_t amdgpu_ras_aca_sysfs_read(struct device *dev, struct device_attribute *attr,
1340 				  struct aca_handle *handle, char *buf, void *data)
1341 {
1342 	struct ras_manager *obj = container_of(handle, struct ras_manager, aca_handle);
1343 	struct ras_query_if info = {
1344 		.head = obj->head,
1345 	};
1346 
1347 	if (!amdgpu_ras_get_error_query_ready(obj->adev))
1348 		return sysfs_emit(buf, "Query currently inaccessible\n");
1349 
1350 	if (amdgpu_ras_query_error_status(obj->adev, &info))
1351 		return -EINVAL;
1352 
1353 	return sysfs_emit(buf, "%s: %lu\n%s: %lu\n%s: %lu\n", "ue", info.ue_count,
1354 			  "ce", info.ce_count, "de", info.de_count);
1355 }
1356 
1357 static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev,
1358 						struct ras_query_if *info,
1359 						struct ras_err_data *err_data,
1360 						struct ras_query_context *qctx,
1361 						unsigned int error_query_mode)
1362 {
1363 	enum amdgpu_ras_block blk = info ? info->head.block : AMDGPU_RAS_BLOCK_COUNT;
1364 	struct amdgpu_ras_block_object *block_obj = NULL;
1365 	int ret;
1366 
1367 	if (blk == AMDGPU_RAS_BLOCK_COUNT)
1368 		return -EINVAL;
1369 
1370 	if (error_query_mode == AMDGPU_RAS_INVALID_ERROR_QUERY)
1371 		return -EINVAL;
1372 
1373 	if (error_query_mode == AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY) {
1374 		return amdgpu_virt_req_ras_err_count(adev, blk, err_data);
1375 	} else if (error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) {
1376 		if (info->head.block == AMDGPU_RAS_BLOCK__UMC) {
1377 			amdgpu_ras_get_ecc_info(adev, err_data);
1378 		} else {
1379 			block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, 0);
1380 			if (!block_obj || !block_obj->hw_ops) {
1381 				dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1382 					     get_ras_block_str(&info->head));
1383 				return -EINVAL;
1384 			}
1385 
1386 			if (block_obj->hw_ops->query_ras_error_count)
1387 				block_obj->hw_ops->query_ras_error_count(adev, err_data);
1388 
1389 			if ((info->head.block == AMDGPU_RAS_BLOCK__SDMA) ||
1390 			    (info->head.block == AMDGPU_RAS_BLOCK__GFX) ||
1391 			    (info->head.block == AMDGPU_RAS_BLOCK__MMHUB)) {
1392 				if (block_obj->hw_ops->query_ras_error_status)
1393 					block_obj->hw_ops->query_ras_error_status(adev);
1394 			}
1395 		}
1396 	} else {
1397 		if (amdgpu_aca_is_enabled(adev)) {
1398 			ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_UE, err_data, qctx);
1399 			if (ret)
1400 				return ret;
1401 
1402 			ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_CE, err_data, qctx);
1403 			if (ret)
1404 				return ret;
1405 
1406 			ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_DEFERRED, err_data, qctx);
1407 			if (ret)
1408 				return ret;
1409 		} else {
1410 			/* FIXME: add code to check return value later */
1411 			amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_UE, err_data, qctx);
1412 			amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_CE, err_data, qctx);
1413 		}
1414 	}
1415 
1416 	return 0;
1417 }
1418 
1419 /* query/inject/cure begin */
1420 static int amdgpu_ras_query_error_status_with_event(struct amdgpu_device *adev,
1421 						    struct ras_query_if *info,
1422 						    enum ras_event_type type)
1423 {
1424 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1425 	struct ras_err_data err_data;
1426 	struct ras_query_context qctx;
1427 	unsigned int error_query_mode;
1428 	int ret;
1429 
1430 	if (!obj)
1431 		return -EINVAL;
1432 
1433 	ret = amdgpu_ras_error_data_init(&err_data);
1434 	if (ret)
1435 		return ret;
1436 
1437 	if (!amdgpu_ras_get_error_query_mode(adev, &error_query_mode))
1438 		return -EINVAL;
1439 
1440 	memset(&qctx, 0, sizeof(qctx));
1441 	qctx.evid.type = type;
1442 	qctx.evid.event_id = amdgpu_ras_acquire_event_id(adev, type);
1443 
1444 	if (!down_read_trylock(&adev->reset_domain->sem)) {
1445 		ret = -EIO;
1446 		goto out_fini_err_data;
1447 	}
1448 
1449 	ret = amdgpu_ras_query_error_status_helper(adev, info,
1450 						   &err_data,
1451 						   &qctx,
1452 						   error_query_mode);
1453 	up_read(&adev->reset_domain->sem);
1454 	if (ret)
1455 		goto out_fini_err_data;
1456 
1457 	if (error_query_mode != AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY) {
1458 		amdgpu_rasmgr_error_data_statistic_update(obj, &err_data);
1459 		amdgpu_ras_error_generate_report(adev, info, &err_data, &qctx);
1460 	} else {
1461 		/* Host provides absolute error counts. First generate the report
1462 		 * using the previous VF internal count against new host count.
1463 		 * Then Update VF internal count.
1464 		 */
1465 		amdgpu_ras_virt_error_generate_report(adev, info, &err_data, &qctx);
1466 		amdgpu_ras_mgr_virt_error_data_statistics_update(obj, &err_data);
1467 	}
1468 
1469 	info->ue_count = obj->err_data.ue_count;
1470 	info->ce_count = obj->err_data.ce_count;
1471 	info->de_count = obj->err_data.de_count;
1472 
1473 out_fini_err_data:
1474 	amdgpu_ras_error_data_fini(&err_data);
1475 
1476 	return ret;
1477 }
1478 
1479 int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_if *info)
1480 {
1481 	return amdgpu_ras_query_error_status_with_event(adev, info, RAS_EVENT_TYPE_INVALID);
1482 }
1483 
1484 int amdgpu_ras_reset_error_count(struct amdgpu_device *adev,
1485 		enum amdgpu_ras_block block)
1486 {
1487 	struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
1488 	const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
1489 	const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
1490 
1491 	if (!block_obj || !block_obj->hw_ops) {
1492 		dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1493 				ras_block_str(block));
1494 		return -EOPNOTSUPP;
1495 	}
1496 
1497 	if (!amdgpu_ras_is_supported(adev, block) ||
1498 	    !amdgpu_ras_get_aca_debug_mode(adev))
1499 		return -EOPNOTSUPP;
1500 
1501 	if (amdgpu_sriov_vf(adev))
1502 		return -EOPNOTSUPP;
1503 
1504 	/* skip ras error reset in gpu reset */
1505 	if ((amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev)) &&
1506 	    ((smu_funcs && smu_funcs->set_debug_mode) ||
1507 	     (mca_funcs && mca_funcs->mca_set_debug_mode)))
1508 		return -EOPNOTSUPP;
1509 
1510 	if (block_obj->hw_ops->reset_ras_error_count)
1511 		block_obj->hw_ops->reset_ras_error_count(adev);
1512 
1513 	return 0;
1514 }
1515 
1516 int amdgpu_ras_reset_error_status(struct amdgpu_device *adev,
1517 		enum amdgpu_ras_block block)
1518 {
1519 	struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
1520 
1521 	if (amdgpu_ras_reset_error_count(adev, block) == -EOPNOTSUPP)
1522 		return 0;
1523 
1524 	if ((block == AMDGPU_RAS_BLOCK__GFX) ||
1525 	    (block == AMDGPU_RAS_BLOCK__MMHUB)) {
1526 		if (block_obj->hw_ops->reset_ras_error_status)
1527 			block_obj->hw_ops->reset_ras_error_status(adev);
1528 	}
1529 
1530 	return 0;
1531 }
1532 
1533 /* wrapper of psp_ras_trigger_error */
1534 int amdgpu_ras_error_inject(struct amdgpu_device *adev,
1535 		struct ras_inject_if *info)
1536 {
1537 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1538 	struct ta_ras_trigger_error_input block_info = {
1539 		.block_id =  amdgpu_ras_block_to_ta(info->head.block),
1540 		.inject_error_type = amdgpu_ras_error_to_ta(info->head.type),
1541 		.sub_block_index = info->head.sub_block_index,
1542 		.address = info->address,
1543 		.value = info->value,
1544 	};
1545 	int ret = -EINVAL;
1546 	struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev,
1547 							info->head.block,
1548 							info->head.sub_block_index);
1549 
1550 	/* inject on guest isn't allowed, return success directly */
1551 	if (amdgpu_sriov_vf(adev))
1552 		return 0;
1553 
1554 	if (!obj)
1555 		return -EINVAL;
1556 
1557 	if (!block_obj || !block_obj->hw_ops)	{
1558 		dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1559 			     get_ras_block_str(&info->head));
1560 		return -EINVAL;
1561 	}
1562 
1563 	/* Calculate XGMI relative offset */
1564 	if (adev->gmc.xgmi.num_physical_nodes > 1 &&
1565 	    info->head.block != AMDGPU_RAS_BLOCK__GFX) {
1566 		block_info.address =
1567 			amdgpu_xgmi_get_relative_phy_addr(adev,
1568 							  block_info.address);
1569 	}
1570 
1571 	if (block_obj->hw_ops->ras_error_inject) {
1572 		if (info->head.block == AMDGPU_RAS_BLOCK__GFX)
1573 			ret = block_obj->hw_ops->ras_error_inject(adev, info, info->instance_mask);
1574 		else /* Special ras_error_inject is defined (e.g: xgmi) */
1575 			ret = block_obj->hw_ops->ras_error_inject(adev, &block_info,
1576 						info->instance_mask);
1577 	} else {
1578 		/* default path */
1579 		ret = psp_ras_trigger_error(&adev->psp, &block_info, info->instance_mask);
1580 	}
1581 
1582 	if (ret)
1583 		dev_err(adev->dev, "ras inject %s failed %d\n",
1584 			get_ras_block_str(&info->head), ret);
1585 
1586 	return ret;
1587 }
1588 
1589 /**
1590  * amdgpu_ras_query_error_count_helper -- Get error counter for specific IP
1591  * @adev: pointer to AMD GPU device
1592  * @ce_count: pointer to an integer to be set to the count of correctible errors.
1593  * @ue_count: pointer to an integer to be set to the count of uncorrectible errors.
1594  * @query_info: pointer to ras_query_if
1595  *
1596  * Return 0 for query success or do nothing, otherwise return an error
1597  * on failures
1598  */
1599 static int amdgpu_ras_query_error_count_helper(struct amdgpu_device *adev,
1600 					       unsigned long *ce_count,
1601 					       unsigned long *ue_count,
1602 					       struct ras_query_if *query_info)
1603 {
1604 	int ret;
1605 
1606 	if (!query_info)
1607 		/* do nothing if query_info is not specified */
1608 		return 0;
1609 
1610 	ret = amdgpu_ras_query_error_status(adev, query_info);
1611 	if (ret)
1612 		return ret;
1613 
1614 	*ce_count += query_info->ce_count;
1615 	*ue_count += query_info->ue_count;
1616 
1617 	/* some hardware/IP supports read to clear
1618 	 * no need to explictly reset the err status after the query call */
1619 	if (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
1620 	    amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {
1621 		if (amdgpu_ras_reset_error_status(adev, query_info->head.block))
1622 			dev_warn(adev->dev,
1623 				 "Failed to reset error counter and error status\n");
1624 	}
1625 
1626 	return 0;
1627 }
1628 
1629 /**
1630  * amdgpu_ras_query_error_count -- Get error counts of all IPs or specific IP
1631  * @adev: pointer to AMD GPU device
1632  * @ce_count: pointer to an integer to be set to the count of correctible errors.
1633  * @ue_count: pointer to an integer to be set to the count of uncorrectible
1634  * errors.
1635  * @query_info: pointer to ras_query_if if the query request is only for
1636  * specific ip block; if info is NULL, then the qurey request is for
1637  * all the ip blocks that support query ras error counters/status
1638  *
1639  * If set, @ce_count or @ue_count, count and return the corresponding
1640  * error counts in those integer pointers. Return 0 if the device
1641  * supports RAS. Return -EOPNOTSUPP if the device doesn't support RAS.
1642  */
1643 int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
1644 				 unsigned long *ce_count,
1645 				 unsigned long *ue_count,
1646 				 struct ras_query_if *query_info)
1647 {
1648 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1649 	struct ras_manager *obj;
1650 	unsigned long ce, ue;
1651 	int ret;
1652 
1653 	if (!adev->ras_enabled || !con)
1654 		return -EOPNOTSUPP;
1655 
1656 	/* Don't count since no reporting.
1657 	 */
1658 	if (!ce_count && !ue_count)
1659 		return 0;
1660 
1661 	ce = 0;
1662 	ue = 0;
1663 	if (!query_info) {
1664 		/* query all the ip blocks that support ras query interface */
1665 		list_for_each_entry(obj, &con->head, node) {
1666 			struct ras_query_if info = {
1667 				.head = obj->head,
1668 			};
1669 
1670 			ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, &info);
1671 		}
1672 	} else {
1673 		/* query specific ip block */
1674 		ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, query_info);
1675 	}
1676 
1677 	if (ret)
1678 		return ret;
1679 
1680 	if (ce_count)
1681 		*ce_count = ce;
1682 
1683 	if (ue_count)
1684 		*ue_count = ue;
1685 
1686 	return 0;
1687 }
1688 /* query/inject/cure end */
1689 
1690 
1691 /* sysfs begin */
1692 
1693 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
1694 		struct ras_badpage **bps, unsigned int *count);
1695 
1696 static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
1697 {
1698 	switch (flags) {
1699 	case AMDGPU_RAS_RETIRE_PAGE_RESERVED:
1700 		return "R";
1701 	case AMDGPU_RAS_RETIRE_PAGE_PENDING:
1702 		return "P";
1703 	case AMDGPU_RAS_RETIRE_PAGE_FAULT:
1704 	default:
1705 		return "F";
1706 	}
1707 }
1708 
1709 /**
1710  * DOC: AMDGPU RAS sysfs gpu_vram_bad_pages Interface
1711  *
1712  * It allows user to read the bad pages of vram on the gpu through
1713  * /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages
1714  *
1715  * It outputs multiple lines, and each line stands for one gpu page.
1716  *
1717  * The format of one line is below,
1718  * gpu pfn : gpu page size : flags
1719  *
1720  * gpu pfn and gpu page size are printed in hex format.
1721  * flags can be one of below character,
1722  *
1723  * R: reserved, this gpu page is reserved and not able to use.
1724  *
1725  * P: pending for reserve, this gpu page is marked as bad, will be reserved
1726  * in next window of page_reserve.
1727  *
1728  * F: unable to reserve. this gpu page can't be reserved due to some reasons.
1729  *
1730  * Examples:
1731  *
1732  * .. code-block:: bash
1733  *
1734  *	0x00000001 : 0x00001000 : R
1735  *	0x00000002 : 0x00001000 : P
1736  *
1737  */
1738 
1739 static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f,
1740 		struct kobject *kobj, struct bin_attribute *attr,
1741 		char *buf, loff_t ppos, size_t count)
1742 {
1743 	struct amdgpu_ras *con =
1744 		container_of(attr, struct amdgpu_ras, badpages_attr);
1745 	struct amdgpu_device *adev = con->adev;
1746 	const unsigned int element_size =
1747 		sizeof("0xabcdabcd : 0x12345678 : R\n") - 1;
1748 	unsigned int start = div64_ul(ppos + element_size - 1, element_size);
1749 	unsigned int end = div64_ul(ppos + count - 1, element_size);
1750 	ssize_t s = 0;
1751 	struct ras_badpage *bps = NULL;
1752 	unsigned int bps_count = 0;
1753 
1754 	memset(buf, 0, count);
1755 
1756 	if (amdgpu_ras_badpages_read(adev, &bps, &bps_count))
1757 		return 0;
1758 
1759 	for (; start < end && start < bps_count; start++)
1760 		s += scnprintf(&buf[s], element_size + 1,
1761 				"0x%08x : 0x%08x : %1s\n",
1762 				bps[start].bp,
1763 				bps[start].size,
1764 				amdgpu_ras_badpage_flags_str(bps[start].flags));
1765 
1766 	kfree(bps);
1767 
1768 	return s;
1769 }
1770 
1771 static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev,
1772 		struct device_attribute *attr, char *buf)
1773 {
1774 	struct amdgpu_ras *con =
1775 		container_of(attr, struct amdgpu_ras, features_attr);
1776 
1777 	return sysfs_emit(buf, "feature mask: 0x%x\n", con->features);
1778 }
1779 
1780 static ssize_t amdgpu_ras_sysfs_version_show(struct device *dev,
1781 		struct device_attribute *attr, char *buf)
1782 {
1783 	struct amdgpu_ras *con =
1784 		container_of(attr, struct amdgpu_ras, version_attr);
1785 	return sysfs_emit(buf, "table version: 0x%x\n", con->eeprom_control.tbl_hdr.version);
1786 }
1787 
1788 static ssize_t amdgpu_ras_sysfs_schema_show(struct device *dev,
1789 		struct device_attribute *attr, char *buf)
1790 {
1791 	struct amdgpu_ras *con =
1792 		container_of(attr, struct amdgpu_ras, schema_attr);
1793 	return sysfs_emit(buf, "schema: 0x%x\n", con->schema);
1794 }
1795 
1796 static struct {
1797 	enum ras_event_type type;
1798 	const char *name;
1799 } dump_event[] = {
1800 	{RAS_EVENT_TYPE_FATAL, "Fatal Error"},
1801 	{RAS_EVENT_TYPE_POISON_CREATION, "Poison Creation"},
1802 	{RAS_EVENT_TYPE_POISON_CONSUMPTION, "Poison Consumption"},
1803 };
1804 
1805 static ssize_t amdgpu_ras_sysfs_event_state_show(struct device *dev,
1806 						 struct device_attribute *attr, char *buf)
1807 {
1808 	struct amdgpu_ras *con =
1809 		container_of(attr, struct amdgpu_ras, event_state_attr);
1810 	struct ras_event_manager *event_mgr = con->event_mgr;
1811 	struct ras_event_state *event_state;
1812 	int i, size = 0;
1813 
1814 	if (!event_mgr)
1815 		return -EINVAL;
1816 
1817 	size += sysfs_emit_at(buf, size, "current seqno: %llu\n", atomic64_read(&event_mgr->seqno));
1818 	for (i = 0; i < ARRAY_SIZE(dump_event); i++) {
1819 		event_state = &event_mgr->event_state[dump_event[i].type];
1820 		size += sysfs_emit_at(buf, size, "%s: count:%llu, last_seqno:%llu\n",
1821 				      dump_event[i].name,
1822 				      atomic64_read(&event_state->count),
1823 				      event_state->last_seqno);
1824 	}
1825 
1826 	return (ssize_t)size;
1827 }
1828 
1829 static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev)
1830 {
1831 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1832 
1833 	if (adev->dev->kobj.sd)
1834 		sysfs_remove_file_from_group(&adev->dev->kobj,
1835 				&con->badpages_attr.attr,
1836 				RAS_FS_NAME);
1837 }
1838 
1839 static int amdgpu_ras_sysfs_remove_dev_attr_node(struct amdgpu_device *adev)
1840 {
1841 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1842 	struct attribute *attrs[] = {
1843 		&con->features_attr.attr,
1844 		&con->version_attr.attr,
1845 		&con->schema_attr.attr,
1846 		&con->event_state_attr.attr,
1847 		NULL
1848 	};
1849 	struct attribute_group group = {
1850 		.name = RAS_FS_NAME,
1851 		.attrs = attrs,
1852 	};
1853 
1854 	if (adev->dev->kobj.sd)
1855 		sysfs_remove_group(&adev->dev->kobj, &group);
1856 
1857 	return 0;
1858 }
1859 
1860 int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
1861 		struct ras_common_if *head)
1862 {
1863 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1864 
1865 	if (amdgpu_aca_is_enabled(adev))
1866 		return 0;
1867 
1868 	if (!obj || obj->attr_inuse)
1869 		return -EINVAL;
1870 
1871 	if (amdgpu_sriov_vf(adev) && !amdgpu_virt_ras_telemetry_block_en(adev, head->block))
1872 		return 0;
1873 
1874 	get_obj(obj);
1875 
1876 	snprintf(obj->fs_data.sysfs_name, sizeof(obj->fs_data.sysfs_name),
1877 		"%s_err_count", head->name);
1878 
1879 	obj->sysfs_attr = (struct device_attribute){
1880 		.attr = {
1881 			.name = obj->fs_data.sysfs_name,
1882 			.mode = S_IRUGO,
1883 		},
1884 			.show = amdgpu_ras_sysfs_read,
1885 	};
1886 	sysfs_attr_init(&obj->sysfs_attr.attr);
1887 
1888 	if (sysfs_add_file_to_group(&adev->dev->kobj,
1889 				&obj->sysfs_attr.attr,
1890 				RAS_FS_NAME)) {
1891 		put_obj(obj);
1892 		return -EINVAL;
1893 	}
1894 
1895 	obj->attr_inuse = 1;
1896 
1897 	return 0;
1898 }
1899 
1900 int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
1901 		struct ras_common_if *head)
1902 {
1903 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1904 
1905 	if (amdgpu_aca_is_enabled(adev))
1906 		return 0;
1907 
1908 	if (!obj || !obj->attr_inuse)
1909 		return -EINVAL;
1910 
1911 	if (adev->dev->kobj.sd)
1912 		sysfs_remove_file_from_group(&adev->dev->kobj,
1913 				&obj->sysfs_attr.attr,
1914 				RAS_FS_NAME);
1915 	obj->attr_inuse = 0;
1916 	put_obj(obj);
1917 
1918 	return 0;
1919 }
1920 
1921 static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev)
1922 {
1923 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1924 	struct ras_manager *obj, *tmp;
1925 
1926 	list_for_each_entry_safe(obj, tmp, &con->head, node) {
1927 		amdgpu_ras_sysfs_remove(adev, &obj->head);
1928 	}
1929 
1930 	if (amdgpu_bad_page_threshold != 0)
1931 		amdgpu_ras_sysfs_remove_bad_page_node(adev);
1932 
1933 	amdgpu_ras_sysfs_remove_dev_attr_node(adev);
1934 
1935 	return 0;
1936 }
1937 /* sysfs end */
1938 
1939 /**
1940  * DOC: AMDGPU RAS Reboot Behavior for Unrecoverable Errors
1941  *
1942  * Normally when there is an uncorrectable error, the driver will reset
1943  * the GPU to recover.  However, in the event of an unrecoverable error,
1944  * the driver provides an interface to reboot the system automatically
1945  * in that event.
1946  *
1947  * The following file in debugfs provides that interface:
1948  * /sys/kernel/debug/dri/[0/1/2...]/ras/auto_reboot
1949  *
1950  * Usage:
1951  *
1952  * .. code-block:: bash
1953  *
1954  *	echo true > .../ras/auto_reboot
1955  *
1956  */
1957 /* debugfs begin */
1958 static struct dentry *amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
1959 {
1960 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1961 	struct amdgpu_ras_eeprom_control *eeprom = &con->eeprom_control;
1962 	struct drm_minor  *minor = adev_to_drm(adev)->primary;
1963 	struct dentry     *dir;
1964 
1965 	dir = debugfs_create_dir(RAS_FS_NAME, minor->debugfs_root);
1966 	debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, dir, adev,
1967 			    &amdgpu_ras_debugfs_ctrl_ops);
1968 	debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, dir, adev,
1969 			    &amdgpu_ras_debugfs_eeprom_ops);
1970 	debugfs_create_u32("bad_page_cnt_threshold", 0444, dir,
1971 			   &con->bad_page_cnt_threshold);
1972 	debugfs_create_u32("ras_num_recs", 0444, dir, &eeprom->ras_num_recs);
1973 	debugfs_create_x32("ras_hw_enabled", 0444, dir, &adev->ras_hw_enabled);
1974 	debugfs_create_x32("ras_enabled", 0444, dir, &adev->ras_enabled);
1975 	debugfs_create_file("ras_eeprom_size", S_IRUGO, dir, adev,
1976 			    &amdgpu_ras_debugfs_eeprom_size_ops);
1977 	con->de_ras_eeprom_table = debugfs_create_file("ras_eeprom_table",
1978 						       S_IRUGO, dir, adev,
1979 						       &amdgpu_ras_debugfs_eeprom_table_ops);
1980 	amdgpu_ras_debugfs_set_ret_size(&con->eeprom_control);
1981 
1982 	/*
1983 	 * After one uncorrectable error happens, usually GPU recovery will
1984 	 * be scheduled. But due to the known problem in GPU recovery failing
1985 	 * to bring GPU back, below interface provides one direct way to
1986 	 * user to reboot system automatically in such case within
1987 	 * ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine
1988 	 * will never be called.
1989 	 */
1990 	debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, dir, &con->reboot);
1991 
1992 	/*
1993 	 * User could set this not to clean up hardware's error count register
1994 	 * of RAS IPs during ras recovery.
1995 	 */
1996 	debugfs_create_bool("disable_ras_err_cnt_harvest", 0644, dir,
1997 			    &con->disable_ras_err_cnt_harvest);
1998 	return dir;
1999 }
2000 
2001 static void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
2002 				      struct ras_fs_if *head,
2003 				      struct dentry *dir)
2004 {
2005 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);
2006 
2007 	if (!obj || !dir)
2008 		return;
2009 
2010 	get_obj(obj);
2011 
2012 	memcpy(obj->fs_data.debugfs_name,
2013 			head->debugfs_name,
2014 			sizeof(obj->fs_data.debugfs_name));
2015 
2016 	debugfs_create_file(obj->fs_data.debugfs_name, S_IWUGO | S_IRUGO, dir,
2017 			    obj, &amdgpu_ras_debugfs_ops);
2018 }
2019 
2020 static bool amdgpu_ras_aca_is_supported(struct amdgpu_device *adev)
2021 {
2022 	bool ret;
2023 
2024 	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
2025 	case IP_VERSION(13, 0, 6):
2026 	case IP_VERSION(13, 0, 12):
2027 	case IP_VERSION(13, 0, 14):
2028 		ret = true;
2029 		break;
2030 	default:
2031 		ret = false;
2032 		break;
2033 	}
2034 
2035 	return ret;
2036 }
2037 
2038 void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
2039 {
2040 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2041 	struct dentry *dir;
2042 	struct ras_manager *obj;
2043 	struct ras_fs_if fs_info;
2044 
2045 	/*
2046 	 * it won't be called in resume path, no need to check
2047 	 * suspend and gpu reset status
2048 	 */
2049 	if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con)
2050 		return;
2051 
2052 	dir = amdgpu_ras_debugfs_create_ctrl_node(adev);
2053 
2054 	list_for_each_entry(obj, &con->head, node) {
2055 		if (amdgpu_ras_is_supported(adev, obj->head.block) &&
2056 			(obj->attr_inuse == 1)) {
2057 			sprintf(fs_info.debugfs_name, "%s_err_inject",
2058 					get_ras_block_str(&obj->head));
2059 			fs_info.head = obj->head;
2060 			amdgpu_ras_debugfs_create(adev, &fs_info, dir);
2061 		}
2062 	}
2063 
2064 	if (amdgpu_ras_aca_is_supported(adev)) {
2065 		if (amdgpu_aca_is_enabled(adev))
2066 			amdgpu_aca_smu_debugfs_init(adev, dir);
2067 		else
2068 			amdgpu_mca_smu_debugfs_init(adev, dir);
2069 	}
2070 }
2071 
2072 /* debugfs end */
2073 
2074 /* ras fs */
2075 static BIN_ATTR(gpu_vram_bad_pages, S_IRUGO,
2076 		amdgpu_ras_sysfs_badpages_read, NULL, 0);
2077 static DEVICE_ATTR(features, S_IRUGO,
2078 		amdgpu_ras_sysfs_features_read, NULL);
2079 static DEVICE_ATTR(version, 0444,
2080 		amdgpu_ras_sysfs_version_show, NULL);
2081 static DEVICE_ATTR(schema, 0444,
2082 		amdgpu_ras_sysfs_schema_show, NULL);
2083 static DEVICE_ATTR(event_state, 0444,
2084 		   amdgpu_ras_sysfs_event_state_show, NULL);
2085 static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
2086 {
2087 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2088 	struct attribute_group group = {
2089 		.name = RAS_FS_NAME,
2090 	};
2091 	struct attribute *attrs[] = {
2092 		&con->features_attr.attr,
2093 		&con->version_attr.attr,
2094 		&con->schema_attr.attr,
2095 		&con->event_state_attr.attr,
2096 		NULL
2097 	};
2098 	struct bin_attribute *bin_attrs[] = {
2099 		NULL,
2100 		NULL,
2101 	};
2102 	int r;
2103 
2104 	group.attrs = attrs;
2105 
2106 	/* add features entry */
2107 	con->features_attr = dev_attr_features;
2108 	sysfs_attr_init(attrs[0]);
2109 
2110 	/* add version entry */
2111 	con->version_attr = dev_attr_version;
2112 	sysfs_attr_init(attrs[1]);
2113 
2114 	/* add schema entry */
2115 	con->schema_attr = dev_attr_schema;
2116 	sysfs_attr_init(attrs[2]);
2117 
2118 	/* add event_state entry */
2119 	con->event_state_attr = dev_attr_event_state;
2120 	sysfs_attr_init(attrs[3]);
2121 
2122 	if (amdgpu_bad_page_threshold != 0) {
2123 		/* add bad_page_features entry */
2124 		bin_attr_gpu_vram_bad_pages.private = NULL;
2125 		con->badpages_attr = bin_attr_gpu_vram_bad_pages;
2126 		bin_attrs[0] = &con->badpages_attr;
2127 		group.bin_attrs = bin_attrs;
2128 		sysfs_bin_attr_init(bin_attrs[0]);
2129 	}
2130 
2131 	r = sysfs_create_group(&adev->dev->kobj, &group);
2132 	if (r)
2133 		dev_err(adev->dev, "Failed to create RAS sysfs group!");
2134 
2135 	return 0;
2136 }
2137 
2138 static int amdgpu_ras_fs_fini(struct amdgpu_device *adev)
2139 {
2140 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2141 	struct ras_manager *con_obj, *ip_obj, *tmp;
2142 
2143 	if (IS_ENABLED(CONFIG_DEBUG_FS)) {
2144 		list_for_each_entry_safe(con_obj, tmp, &con->head, node) {
2145 			ip_obj = amdgpu_ras_find_obj(adev, &con_obj->head);
2146 			if (ip_obj)
2147 				put_obj(ip_obj);
2148 		}
2149 	}
2150 
2151 	amdgpu_ras_sysfs_remove_all(adev);
2152 	return 0;
2153 }
2154 /* ras fs end */
2155 
2156 /* ih begin */
2157 
2158 /* For the hardware that cannot enable bif ring for both ras_controller_irq
2159  * and ras_err_evnet_athub_irq ih cookies, the driver has to poll status
2160  * register to check whether the interrupt is triggered or not, and properly
2161  * ack the interrupt if it is there
2162  */
2163 void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev)
2164 {
2165 	/* Fatal error events are handled on host side */
2166 	if (amdgpu_sriov_vf(adev))
2167 		return;
2168 	/**
2169 	 * If the current interrupt is caused by a non-fatal RAS error, skip
2170 	 * check for fatal error. For fatal errors, FED status of all devices
2171 	 * in XGMI hive gets set when the first device gets fatal error
2172 	 * interrupt. The error gets propagated to other devices as well, so
2173 	 * make sure to ack the interrupt regardless of FED status.
2174 	 */
2175 	if (!amdgpu_ras_get_fed_status(adev) &&
2176 	    amdgpu_ras_is_err_state(adev, AMDGPU_RAS_BLOCK__ANY))
2177 		return;
2178 
2179 	if (adev->nbio.ras &&
2180 	    adev->nbio.ras->handle_ras_controller_intr_no_bifring)
2181 		adev->nbio.ras->handle_ras_controller_intr_no_bifring(adev);
2182 
2183 	if (adev->nbio.ras &&
2184 	    adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring)
2185 		adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring(adev);
2186 }
2187 
2188 static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *obj,
2189 				struct amdgpu_iv_entry *entry)
2190 {
2191 	bool poison_stat = false;
2192 	struct amdgpu_device *adev = obj->adev;
2193 	struct amdgpu_ras_block_object *block_obj =
2194 		amdgpu_ras_get_ras_block(adev, obj->head.block, 0);
2195 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2196 	enum ras_event_type type = RAS_EVENT_TYPE_POISON_CONSUMPTION;
2197 	u64 event_id;
2198 	int ret;
2199 
2200 	if (!block_obj || !con)
2201 		return;
2202 
2203 	ret = amdgpu_ras_mark_ras_event(adev, type);
2204 	if (ret)
2205 		return;
2206 
2207 	amdgpu_ras_set_err_poison(adev, block_obj->ras_comm.block);
2208 	/* both query_poison_status and handle_poison_consumption are optional,
2209 	 * but at least one of them should be implemented if we need poison
2210 	 * consumption handler
2211 	 */
2212 	if (block_obj->hw_ops && block_obj->hw_ops->query_poison_status) {
2213 		poison_stat = block_obj->hw_ops->query_poison_status(adev);
2214 		if (!poison_stat) {
2215 			/* Not poison consumption interrupt, no need to handle it */
2216 			dev_info(adev->dev, "No RAS poison status in %s poison IH.\n",
2217 					block_obj->ras_comm.name);
2218 
2219 			return;
2220 		}
2221 	}
2222 
2223 	amdgpu_umc_poison_handler(adev, obj->head.block, 0);
2224 
2225 	if (block_obj->hw_ops && block_obj->hw_ops->handle_poison_consumption)
2226 		poison_stat = block_obj->hw_ops->handle_poison_consumption(adev);
2227 
2228 	/* gpu reset is fallback for failed and default cases.
2229 	 * For RMA case, amdgpu_umc_poison_handler will handle gpu reset.
2230 	 */
2231 	if (poison_stat && !amdgpu_ras_is_rma(adev)) {
2232 		event_id = amdgpu_ras_acquire_event_id(adev, type);
2233 		RAS_EVENT_LOG(adev, event_id,
2234 			      "GPU reset for %s RAS poison consumption is issued!\n",
2235 			      block_obj->ras_comm.name);
2236 		amdgpu_ras_reset_gpu(adev);
2237 	}
2238 
2239 	if (!poison_stat)
2240 		amdgpu_gfx_poison_consumption_handler(adev, entry);
2241 }
2242 
2243 static void amdgpu_ras_interrupt_poison_creation_handler(struct ras_manager *obj,
2244 				struct amdgpu_iv_entry *entry)
2245 {
2246 	struct amdgpu_device *adev = obj->adev;
2247 	enum ras_event_type type = RAS_EVENT_TYPE_POISON_CREATION;
2248 	u64 event_id;
2249 	int ret;
2250 
2251 	ret = amdgpu_ras_mark_ras_event(adev, type);
2252 	if (ret)
2253 		return;
2254 
2255 	event_id = amdgpu_ras_acquire_event_id(adev, type);
2256 	RAS_EVENT_LOG(adev, event_id, "Poison is created\n");
2257 
2258 	if (amdgpu_ip_version(obj->adev, UMC_HWIP, 0) >= IP_VERSION(12, 0, 0)) {
2259 		struct amdgpu_ras *con = amdgpu_ras_get_context(obj->adev);
2260 
2261 		atomic_inc(&con->page_retirement_req_cnt);
2262 		atomic_inc(&con->poison_creation_count);
2263 
2264 		wake_up(&con->page_retirement_wq);
2265 	}
2266 }
2267 
2268 static void amdgpu_ras_interrupt_umc_handler(struct ras_manager *obj,
2269 				struct amdgpu_iv_entry *entry)
2270 {
2271 	struct ras_ih_data *data = &obj->ih_data;
2272 	struct ras_err_data err_data;
2273 	int ret;
2274 
2275 	if (!data->cb)
2276 		return;
2277 
2278 	ret = amdgpu_ras_error_data_init(&err_data);
2279 	if (ret)
2280 		return;
2281 
2282 	/* Let IP handle its data, maybe we need get the output
2283 	 * from the callback to update the error type/count, etc
2284 	 */
2285 	amdgpu_ras_set_fed(obj->adev, true);
2286 	ret = data->cb(obj->adev, &err_data, entry);
2287 	/* ue will trigger an interrupt, and in that case
2288 	 * we need do a reset to recovery the whole system.
2289 	 * But leave IP do that recovery, here we just dispatch
2290 	 * the error.
2291 	 */
2292 	if (ret == AMDGPU_RAS_SUCCESS) {
2293 		/* these counts could be left as 0 if
2294 		 * some blocks do not count error number
2295 		 */
2296 		obj->err_data.ue_count += err_data.ue_count;
2297 		obj->err_data.ce_count += err_data.ce_count;
2298 		obj->err_data.de_count += err_data.de_count;
2299 	}
2300 
2301 	amdgpu_ras_error_data_fini(&err_data);
2302 }
2303 
2304 static void amdgpu_ras_interrupt_handler(struct ras_manager *obj)
2305 {
2306 	struct ras_ih_data *data = &obj->ih_data;
2307 	struct amdgpu_iv_entry entry;
2308 
2309 	while (data->rptr != data->wptr) {
2310 		rmb();
2311 		memcpy(&entry, &data->ring[data->rptr],
2312 				data->element_size);
2313 
2314 		wmb();
2315 		data->rptr = (data->aligned_element_size +
2316 				data->rptr) % data->ring_size;
2317 
2318 		if (amdgpu_ras_is_poison_mode_supported(obj->adev)) {
2319 			if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
2320 				amdgpu_ras_interrupt_poison_creation_handler(obj, &entry);
2321 			else
2322 				amdgpu_ras_interrupt_poison_consumption_handler(obj, &entry);
2323 		} else {
2324 			if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
2325 				amdgpu_ras_interrupt_umc_handler(obj, &entry);
2326 			else
2327 				dev_warn(obj->adev->dev,
2328 					"No RAS interrupt handler for non-UMC block with poison disabled.\n");
2329 		}
2330 	}
2331 }
2332 
2333 static void amdgpu_ras_interrupt_process_handler(struct work_struct *work)
2334 {
2335 	struct ras_ih_data *data =
2336 		container_of(work, struct ras_ih_data, ih_work);
2337 	struct ras_manager *obj =
2338 		container_of(data, struct ras_manager, ih_data);
2339 
2340 	amdgpu_ras_interrupt_handler(obj);
2341 }
2342 
2343 int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
2344 		struct ras_dispatch_if *info)
2345 {
2346 	struct ras_manager *obj;
2347 	struct ras_ih_data *data;
2348 
2349 	obj = amdgpu_ras_find_obj(adev, &info->head);
2350 	if (!obj)
2351 		return -EINVAL;
2352 
2353 	data = &obj->ih_data;
2354 
2355 	if (data->inuse == 0)
2356 		return 0;
2357 
2358 	/* Might be overflow... */
2359 	memcpy(&data->ring[data->wptr], info->entry,
2360 			data->element_size);
2361 
2362 	wmb();
2363 	data->wptr = (data->aligned_element_size +
2364 			data->wptr) % data->ring_size;
2365 
2366 	schedule_work(&data->ih_work);
2367 
2368 	return 0;
2369 }
2370 
2371 int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev,
2372 		struct ras_common_if *head)
2373 {
2374 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
2375 	struct ras_ih_data *data;
2376 
2377 	if (!obj)
2378 		return -EINVAL;
2379 
2380 	data = &obj->ih_data;
2381 	if (data->inuse == 0)
2382 		return 0;
2383 
2384 	cancel_work_sync(&data->ih_work);
2385 
2386 	kfree(data->ring);
2387 	memset(data, 0, sizeof(*data));
2388 	put_obj(obj);
2389 
2390 	return 0;
2391 }
2392 
2393 int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev,
2394 		struct ras_common_if *head)
2395 {
2396 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
2397 	struct ras_ih_data *data;
2398 	struct amdgpu_ras_block_object *ras_obj;
2399 
2400 	if (!obj) {
2401 		/* in case we registe the IH before enable ras feature */
2402 		obj = amdgpu_ras_create_obj(adev, head);
2403 		if (!obj)
2404 			return -EINVAL;
2405 	} else
2406 		get_obj(obj);
2407 
2408 	ras_obj = container_of(head, struct amdgpu_ras_block_object, ras_comm);
2409 
2410 	data = &obj->ih_data;
2411 	/* add the callback.etc */
2412 	*data = (struct ras_ih_data) {
2413 		.inuse = 0,
2414 		.cb = ras_obj->ras_cb,
2415 		.element_size = sizeof(struct amdgpu_iv_entry),
2416 		.rptr = 0,
2417 		.wptr = 0,
2418 	};
2419 
2420 	INIT_WORK(&data->ih_work, amdgpu_ras_interrupt_process_handler);
2421 
2422 	data->aligned_element_size = ALIGN(data->element_size, 8);
2423 	/* the ring can store 64 iv entries. */
2424 	data->ring_size = 64 * data->aligned_element_size;
2425 	data->ring = kmalloc(data->ring_size, GFP_KERNEL);
2426 	if (!data->ring) {
2427 		put_obj(obj);
2428 		return -ENOMEM;
2429 	}
2430 
2431 	/* IH is ready */
2432 	data->inuse = 1;
2433 
2434 	return 0;
2435 }
2436 
2437 static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev)
2438 {
2439 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2440 	struct ras_manager *obj, *tmp;
2441 
2442 	list_for_each_entry_safe(obj, tmp, &con->head, node) {
2443 		amdgpu_ras_interrupt_remove_handler(adev, &obj->head);
2444 	}
2445 
2446 	return 0;
2447 }
2448 /* ih end */
2449 
2450 /* traversal all IPs except NBIO to query error counter */
2451 static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev, enum ras_event_type type)
2452 {
2453 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2454 	struct ras_manager *obj;
2455 
2456 	if (!adev->ras_enabled || !con)
2457 		return;
2458 
2459 	list_for_each_entry(obj, &con->head, node) {
2460 		struct ras_query_if info = {
2461 			.head = obj->head,
2462 		};
2463 
2464 		/*
2465 		 * PCIE_BIF IP has one different isr by ras controller
2466 		 * interrupt, the specific ras counter query will be
2467 		 * done in that isr. So skip such block from common
2468 		 * sync flood interrupt isr calling.
2469 		 */
2470 		if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF)
2471 			continue;
2472 
2473 		/*
2474 		 * this is a workaround for aldebaran, skip send msg to
2475 		 * smu to get ecc_info table due to smu handle get ecc
2476 		 * info table failed temporarily.
2477 		 * should be removed until smu fix handle ecc_info table.
2478 		 */
2479 		if ((info.head.block == AMDGPU_RAS_BLOCK__UMC) &&
2480 		    (amdgpu_ip_version(adev, MP1_HWIP, 0) ==
2481 		     IP_VERSION(13, 0, 2)))
2482 			continue;
2483 
2484 		amdgpu_ras_query_error_status_with_event(adev, &info, type);
2485 
2486 		if (amdgpu_ip_version(adev, MP0_HWIP, 0) !=
2487 			    IP_VERSION(11, 0, 2) &&
2488 		    amdgpu_ip_version(adev, MP0_HWIP, 0) !=
2489 			    IP_VERSION(11, 0, 4) &&
2490 		    amdgpu_ip_version(adev, MP0_HWIP, 0) !=
2491 			    IP_VERSION(13, 0, 0)) {
2492 			if (amdgpu_ras_reset_error_status(adev, info.head.block))
2493 				dev_warn(adev->dev, "Failed to reset error counter and error status");
2494 		}
2495 	}
2496 }
2497 
2498 /* Parse RdRspStatus and WrRspStatus */
2499 static void amdgpu_ras_error_status_query(struct amdgpu_device *adev,
2500 					  struct ras_query_if *info)
2501 {
2502 	struct amdgpu_ras_block_object *block_obj;
2503 	/*
2504 	 * Only two block need to query read/write
2505 	 * RspStatus at current state
2506 	 */
2507 	if ((info->head.block != AMDGPU_RAS_BLOCK__GFX) &&
2508 		(info->head.block != AMDGPU_RAS_BLOCK__MMHUB))
2509 		return;
2510 
2511 	block_obj = amdgpu_ras_get_ras_block(adev,
2512 					info->head.block,
2513 					info->head.sub_block_index);
2514 
2515 	if (!block_obj || !block_obj->hw_ops) {
2516 		dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
2517 			     get_ras_block_str(&info->head));
2518 		return;
2519 	}
2520 
2521 	if (block_obj->hw_ops->query_ras_error_status)
2522 		block_obj->hw_ops->query_ras_error_status(adev);
2523 
2524 }
2525 
2526 static void amdgpu_ras_query_err_status(struct amdgpu_device *adev)
2527 {
2528 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2529 	struct ras_manager *obj;
2530 
2531 	if (!adev->ras_enabled || !con)
2532 		return;
2533 
2534 	list_for_each_entry(obj, &con->head, node) {
2535 		struct ras_query_if info = {
2536 			.head = obj->head,
2537 		};
2538 
2539 		amdgpu_ras_error_status_query(adev, &info);
2540 	}
2541 }
2542 
2543 /* recovery begin */
2544 
2545 /* return 0 on success.
2546  * caller need free bps.
2547  */
2548 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
2549 		struct ras_badpage **bps, unsigned int *count)
2550 {
2551 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2552 	struct ras_err_handler_data *data;
2553 	int i = 0;
2554 	int ret = 0, status;
2555 
2556 	if (!con || !con->eh_data || !bps || !count)
2557 		return -EINVAL;
2558 
2559 	mutex_lock(&con->recovery_lock);
2560 	data = con->eh_data;
2561 	if (!data || data->count == 0) {
2562 		*bps = NULL;
2563 		ret = -EINVAL;
2564 		goto out;
2565 	}
2566 
2567 	*bps = kmalloc(sizeof(struct ras_badpage) * data->count, GFP_KERNEL);
2568 	if (!*bps) {
2569 		ret = -ENOMEM;
2570 		goto out;
2571 	}
2572 
2573 	for (; i < data->count; i++) {
2574 		(*bps)[i] = (struct ras_badpage){
2575 			.bp = data->bps[i].retired_page,
2576 			.size = AMDGPU_GPU_PAGE_SIZE,
2577 			.flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
2578 		};
2579 		status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr,
2580 				data->bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT);
2581 		if (status == -EBUSY)
2582 			(*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
2583 		else if (status == -ENOENT)
2584 			(*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
2585 	}
2586 
2587 	*count = data->count;
2588 out:
2589 	mutex_unlock(&con->recovery_lock);
2590 	return ret;
2591 }
2592 
2593 static void amdgpu_ras_set_fed_all(struct amdgpu_device *adev,
2594 				   struct amdgpu_hive_info *hive, bool status)
2595 {
2596 	struct amdgpu_device *tmp_adev;
2597 
2598 	if (hive) {
2599 		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
2600 			amdgpu_ras_set_fed(tmp_adev, status);
2601 	} else {
2602 		amdgpu_ras_set_fed(adev, status);
2603 	}
2604 }
2605 
2606 bool amdgpu_ras_in_recovery(struct amdgpu_device *adev)
2607 {
2608 	struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2609 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
2610 	int hive_ras_recovery = 0;
2611 
2612 	if (hive) {
2613 		hive_ras_recovery = atomic_read(&hive->ras_recovery);
2614 		amdgpu_put_xgmi_hive(hive);
2615 	}
2616 
2617 	if (ras && (atomic_read(&ras->in_recovery) || hive_ras_recovery))
2618 		return true;
2619 
2620 	return false;
2621 }
2622 
2623 static enum ras_event_type amdgpu_ras_get_fatal_error_event(struct amdgpu_device *adev)
2624 {
2625 	if (amdgpu_ras_intr_triggered())
2626 		return RAS_EVENT_TYPE_FATAL;
2627 	else
2628 		return RAS_EVENT_TYPE_POISON_CONSUMPTION;
2629 }
2630 
2631 static void amdgpu_ras_do_recovery(struct work_struct *work)
2632 {
2633 	struct amdgpu_ras *ras =
2634 		container_of(work, struct amdgpu_ras, recovery_work);
2635 	struct amdgpu_device *remote_adev = NULL;
2636 	struct amdgpu_device *adev = ras->adev;
2637 	struct list_head device_list, *device_list_handle =  NULL;
2638 	struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2639 	enum ras_event_type type;
2640 
2641 	if (hive) {
2642 		atomic_set(&hive->ras_recovery, 1);
2643 
2644 		/* If any device which is part of the hive received RAS fatal
2645 		 * error interrupt, set fatal error status on all. This
2646 		 * condition will need a recovery, and flag will be cleared
2647 		 * as part of recovery.
2648 		 */
2649 		list_for_each_entry(remote_adev, &hive->device_list,
2650 				    gmc.xgmi.head)
2651 			if (amdgpu_ras_get_fed_status(remote_adev)) {
2652 				amdgpu_ras_set_fed_all(adev, hive, true);
2653 				break;
2654 			}
2655 	}
2656 	if (!ras->disable_ras_err_cnt_harvest) {
2657 
2658 		/* Build list of devices to query RAS related errors */
2659 		if  (hive && adev->gmc.xgmi.num_physical_nodes > 1) {
2660 			device_list_handle = &hive->device_list;
2661 		} else {
2662 			INIT_LIST_HEAD(&device_list);
2663 			list_add_tail(&adev->gmc.xgmi.head, &device_list);
2664 			device_list_handle = &device_list;
2665 		}
2666 
2667 		type = amdgpu_ras_get_fatal_error_event(adev);
2668 		list_for_each_entry(remote_adev,
2669 				device_list_handle, gmc.xgmi.head) {
2670 			amdgpu_ras_query_err_status(remote_adev);
2671 			amdgpu_ras_log_on_err_counter(remote_adev, type);
2672 		}
2673 
2674 	}
2675 
2676 	if (amdgpu_device_should_recover_gpu(ras->adev)) {
2677 		struct amdgpu_reset_context reset_context;
2678 		memset(&reset_context, 0, sizeof(reset_context));
2679 
2680 		reset_context.method = AMD_RESET_METHOD_NONE;
2681 		reset_context.reset_req_dev = adev;
2682 		reset_context.src = AMDGPU_RESET_SRC_RAS;
2683 		set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);
2684 
2685 		/* Perform full reset in fatal error mode */
2686 		if (!amdgpu_ras_is_poison_mode_supported(ras->adev))
2687 			set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2688 		else {
2689 			clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2690 
2691 			if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE2_RESET) {
2692 				ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE2_RESET;
2693 				reset_context.method = AMD_RESET_METHOD_MODE2;
2694 			}
2695 
2696 			/* Fatal error occurs in poison mode, mode1 reset is used to
2697 			 * recover gpu.
2698 			 */
2699 			if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET) {
2700 				ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE1_RESET;
2701 				set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2702 
2703 				psp_fatal_error_recovery_quirk(&adev->psp);
2704 			}
2705 		}
2706 
2707 		amdgpu_device_gpu_recover(ras->adev, NULL, &reset_context);
2708 	}
2709 	atomic_set(&ras->in_recovery, 0);
2710 	if (hive) {
2711 		atomic_set(&hive->ras_recovery, 0);
2712 		amdgpu_put_xgmi_hive(hive);
2713 	}
2714 }
2715 
2716 /* alloc/realloc bps array */
2717 static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
2718 		struct ras_err_handler_data *data, int pages)
2719 {
2720 	unsigned int old_space = data->count + data->space_left;
2721 	unsigned int new_space = old_space + pages;
2722 	unsigned int align_space = ALIGN(new_space, 512);
2723 	void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
2724 
2725 	if (!bps) {
2726 		return -ENOMEM;
2727 	}
2728 
2729 	if (data->bps) {
2730 		memcpy(bps, data->bps,
2731 				data->count * sizeof(*data->bps));
2732 		kfree(data->bps);
2733 	}
2734 
2735 	data->bps = bps;
2736 	data->space_left += align_space - old_space;
2737 	return 0;
2738 }
2739 
2740 static int amdgpu_ras_mca2pa_by_idx(struct amdgpu_device *adev,
2741 			struct eeprom_table_record *bps,
2742 			struct ras_err_data *err_data)
2743 {
2744 	struct ta_ras_query_address_input addr_in;
2745 	uint32_t socket = 0;
2746 	int ret = 0;
2747 
2748 	if (adev->smuio.funcs && adev->smuio.funcs->get_socket_id)
2749 		socket = adev->smuio.funcs->get_socket_id(adev);
2750 
2751 	/* reinit err_data */
2752 	err_data->err_addr_cnt = 0;
2753 	err_data->err_addr_len = adev->umc.retire_unit;
2754 
2755 	memset(&addr_in, 0, sizeof(addr_in));
2756 	addr_in.ma.err_addr = bps->address;
2757 	addr_in.ma.socket_id = socket;
2758 	addr_in.ma.ch_inst = bps->mem_channel;
2759 	/* tell RAS TA the node instance is not used */
2760 	addr_in.ma.node_inst = TA_RAS_INV_NODE;
2761 
2762 	if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr)
2763 		ret = adev->umc.ras->convert_ras_err_addr(adev, err_data,
2764 				&addr_in, NULL, false);
2765 
2766 	return ret;
2767 }
2768 
2769 static int amdgpu_ras_mca2pa(struct amdgpu_device *adev,
2770 			struct eeprom_table_record *bps,
2771 			struct ras_err_data *err_data)
2772 {
2773 	struct ta_ras_query_address_input addr_in;
2774 	uint32_t die_id, socket = 0;
2775 
2776 	if (adev->smuio.funcs && adev->smuio.funcs->get_socket_id)
2777 		socket = adev->smuio.funcs->get_socket_id(adev);
2778 
2779 	/* although die id is gotten from PA in nps1 mode, the id is
2780 	 * fitable for any nps mode
2781 	 */
2782 	if (adev->umc.ras && adev->umc.ras->get_die_id_from_pa)
2783 		die_id = adev->umc.ras->get_die_id_from_pa(adev, bps->address,
2784 					bps->retired_page << AMDGPU_GPU_PAGE_SHIFT);
2785 	else
2786 		return -EINVAL;
2787 
2788 	/* reinit err_data */
2789 	err_data->err_addr_cnt = 0;
2790 	err_data->err_addr_len = adev->umc.retire_unit;
2791 
2792 	memset(&addr_in, 0, sizeof(addr_in));
2793 	addr_in.ma.err_addr = bps->address;
2794 	addr_in.ma.ch_inst = bps->mem_channel;
2795 	addr_in.ma.umc_inst = bps->mcumc_id;
2796 	addr_in.ma.node_inst = die_id;
2797 	addr_in.ma.socket_id = socket;
2798 
2799 	if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr)
2800 		return adev->umc.ras->convert_ras_err_addr(adev, err_data,
2801 					&addr_in, NULL, false);
2802 	else
2803 		return  -EINVAL;
2804 }
2805 
2806 static int __amdgpu_ras_restore_bad_pages(struct amdgpu_device *adev,
2807 					struct eeprom_table_record *bps, int count)
2808 {
2809 	int j;
2810 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2811 	struct ras_err_handler_data *data = con->eh_data;
2812 
2813 	for (j = 0; j < count; j++) {
2814 		if (amdgpu_ras_check_bad_page_unlock(con,
2815 			bps[j].retired_page << AMDGPU_GPU_PAGE_SHIFT))
2816 			continue;
2817 
2818 		if (!data->space_left &&
2819 		    amdgpu_ras_realloc_eh_data_space(adev, data, 256)) {
2820 			return -ENOMEM;
2821 		}
2822 
2823 		amdgpu_ras_reserve_page(adev, bps[j].retired_page);
2824 
2825 		memcpy(&data->bps[data->count], &(bps[j]),
2826 				sizeof(struct eeprom_table_record));
2827 		data->count++;
2828 		data->space_left--;
2829 	}
2830 
2831 	return 0;
2832 }
2833 
2834 static int __amdgpu_ras_convert_rec_array_from_rom(struct amdgpu_device *adev,
2835 				struct eeprom_table_record *bps, struct ras_err_data *err_data,
2836 				enum amdgpu_memory_partition nps)
2837 {
2838 	int i = 0;
2839 	enum amdgpu_memory_partition save_nps;
2840 
2841 	save_nps = (bps[0].retired_page >> UMC_NPS_SHIFT) & UMC_NPS_MASK;
2842 
2843 	/*old asics just have pa in eeprom*/
2844 	if (IP_VERSION_MAJ(amdgpu_ip_version(adev, UMC_HWIP, 0)) < 12) {
2845 		memcpy(err_data->err_addr, bps,
2846 			sizeof(struct eeprom_table_record) * adev->umc.retire_unit);
2847 		goto out;
2848 	}
2849 
2850 	for (i = 0; i < adev->umc.retire_unit; i++)
2851 		bps[i].retired_page &= ~(UMC_NPS_MASK << UMC_NPS_SHIFT);
2852 
2853 	if (save_nps) {
2854 		if (save_nps == nps) {
2855 			if (amdgpu_umc_pages_in_a_row(adev, err_data,
2856 					bps[0].retired_page << AMDGPU_GPU_PAGE_SHIFT))
2857 				return -EINVAL;
2858 		} else {
2859 			if (amdgpu_ras_mca2pa_by_idx(adev, &bps[0], err_data))
2860 				return -EINVAL;
2861 		}
2862 	} else {
2863 		if (amdgpu_ras_mca2pa(adev, &bps[0], err_data)) {
2864 			if (nps == AMDGPU_NPS1_PARTITION_MODE)
2865 				memcpy(err_data->err_addr, bps,
2866 					sizeof(struct eeprom_table_record) * adev->umc.retire_unit);
2867 			else
2868 				return -EOPNOTSUPP;
2869 		}
2870 	}
2871 
2872 out:
2873 	return __amdgpu_ras_restore_bad_pages(adev, err_data->err_addr, adev->umc.retire_unit);
2874 }
2875 
2876 static int __amdgpu_ras_convert_rec_from_rom(struct amdgpu_device *adev,
2877 				struct eeprom_table_record *bps, struct ras_err_data *err_data,
2878 				enum amdgpu_memory_partition nps)
2879 {
2880 	enum amdgpu_memory_partition save_nps;
2881 
2882 	save_nps = (bps->retired_page >> UMC_NPS_SHIFT) & UMC_NPS_MASK;
2883 	bps->retired_page &= ~(UMC_NPS_MASK << UMC_NPS_SHIFT);
2884 
2885 	if (save_nps == nps) {
2886 		if (amdgpu_umc_pages_in_a_row(adev, err_data,
2887 				bps->retired_page << AMDGPU_GPU_PAGE_SHIFT))
2888 			return -EINVAL;
2889 	} else {
2890 		if (amdgpu_ras_mca2pa_by_idx(adev, bps, err_data))
2891 			return -EINVAL;
2892 	}
2893 	return __amdgpu_ras_restore_bad_pages(adev, err_data->err_addr,
2894 									adev->umc.retire_unit);
2895 }
2896 
2897 /* it deal with vram only. */
2898 int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
2899 		struct eeprom_table_record *bps, int pages, bool from_rom)
2900 {
2901 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2902 	struct ras_err_data err_data;
2903 	struct amdgpu_ras_eeprom_control *control =
2904 			&adev->psp.ras_context.ras->eeprom_control;
2905 	enum amdgpu_memory_partition nps = AMDGPU_NPS1_PARTITION_MODE;
2906 	int ret = 0;
2907 	uint32_t i;
2908 
2909 	if (!con || !con->eh_data || !bps || pages <= 0)
2910 		return 0;
2911 
2912 	if (from_rom) {
2913 		err_data.err_addr =
2914 			kcalloc(adev->umc.retire_unit,
2915 				sizeof(struct eeprom_table_record), GFP_KERNEL);
2916 		if (!err_data.err_addr) {
2917 			dev_warn(adev->dev, "Failed to alloc UMC error address record in mca2pa conversion!\n");
2918 			return -ENOMEM;
2919 		}
2920 
2921 		if (adev->gmc.gmc_funcs->query_mem_partition_mode)
2922 			nps = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
2923 	}
2924 
2925 	mutex_lock(&con->recovery_lock);
2926 
2927 	if (from_rom) {
2928 		for (i = 0; i < pages; i++) {
2929 			if (control->ras_num_recs - i >= adev->umc.retire_unit) {
2930 				if ((bps[i].address == bps[i + 1].address) &&
2931 				    (bps[i].mem_channel == bps[i + 1].mem_channel)) {
2932 					//deal with retire_unit records a time
2933 					ret = __amdgpu_ras_convert_rec_array_from_rom(adev,
2934 									&bps[i], &err_data, nps);
2935 					if (ret)
2936 						goto free;
2937 					i += (adev->umc.retire_unit - 1);
2938 				} else {
2939 					break;
2940 				}
2941 			} else {
2942 				break;
2943 			}
2944 		}
2945 		for (; i < pages; i++) {
2946 			ret = __amdgpu_ras_convert_rec_from_rom(adev,
2947 				&bps[i], &err_data, nps);
2948 			if (ret)
2949 				goto free;
2950 		}
2951 	} else {
2952 		ret = __amdgpu_ras_restore_bad_pages(adev, bps, pages);
2953 	}
2954 
2955 free:
2956 	if (from_rom)
2957 		kfree(err_data.err_addr);
2958 	mutex_unlock(&con->recovery_lock);
2959 
2960 	return ret;
2961 }
2962 
2963 /*
2964  * write error record array to eeprom, the function should be
2965  * protected by recovery_lock
2966  * new_cnt: new added UE count, excluding reserved bad pages, can be NULL
2967  */
2968 int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
2969 		unsigned long *new_cnt)
2970 {
2971 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2972 	struct ras_err_handler_data *data;
2973 	struct amdgpu_ras_eeprom_control *control;
2974 	int save_count, unit_num, bad_page_num, i;
2975 
2976 	if (!con || !con->eh_data) {
2977 		if (new_cnt)
2978 			*new_cnt = 0;
2979 
2980 		return 0;
2981 	}
2982 
2983 	mutex_lock(&con->recovery_lock);
2984 	control = &con->eeprom_control;
2985 	data = con->eh_data;
2986 	bad_page_num = control->ras_num_bad_pages;
2987 	save_count = data->count - bad_page_num;
2988 	mutex_unlock(&con->recovery_lock);
2989 
2990 	unit_num = save_count / adev->umc.retire_unit;
2991 	if (new_cnt)
2992 		*new_cnt = unit_num;
2993 
2994 	/* only new entries are saved */
2995 	if (save_count > 0) {
2996 		/*old asics only save pa to eeprom like before*/
2997 		if (IP_VERSION_MAJ(amdgpu_ip_version(adev, UMC_HWIP, 0)) < 12) {
2998 			if (amdgpu_ras_eeprom_append(control,
2999 					&data->bps[bad_page_num], save_count)) {
3000 				dev_err(adev->dev, "Failed to save EEPROM table data!");
3001 				return -EIO;
3002 			}
3003 		} else {
3004 			for (i = 0; i < unit_num; i++) {
3005 				if (amdgpu_ras_eeprom_append(control,
3006 						&data->bps[bad_page_num +
3007 						i * adev->umc.retire_unit], 1)) {
3008 					dev_err(adev->dev, "Failed to save EEPROM table data!");
3009 					return -EIO;
3010 				}
3011 			}
3012 		}
3013 
3014 		dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count);
3015 	}
3016 
3017 	return 0;
3018 }
3019 
3020 /*
3021  * read error record array in eeprom and reserve enough space for
3022  * storing new bad pages
3023  */
3024 static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
3025 {
3026 	struct amdgpu_ras_eeprom_control *control =
3027 		&adev->psp.ras_context.ras->eeprom_control;
3028 	struct eeprom_table_record *bps;
3029 	int ret, i = 0;
3030 
3031 	/* no bad page record, skip eeprom access */
3032 	if (control->ras_num_recs == 0 || amdgpu_bad_page_threshold == 0)
3033 		return 0;
3034 
3035 	bps = kcalloc(control->ras_num_recs, sizeof(*bps), GFP_KERNEL);
3036 	if (!bps)
3037 		return -ENOMEM;
3038 
3039 	ret = amdgpu_ras_eeprom_read(control, bps, control->ras_num_recs);
3040 	if (ret) {
3041 		dev_err(adev->dev, "Failed to load EEPROM table records!");
3042 	} else {
3043 		if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr) {
3044 			for (i = 0; i < control->ras_num_recs; i++) {
3045 				if ((control->ras_num_recs - i) >= adev->umc.retire_unit) {
3046 					if ((bps[i].address == bps[i + 1].address) &&
3047 						(bps[i].mem_channel == bps[i + 1].mem_channel)) {
3048 						control->ras_num_pa_recs += adev->umc.retire_unit;
3049 						i += (adev->umc.retire_unit - 1);
3050 					} else {
3051 						control->ras_num_mca_recs +=
3052 									(control->ras_num_recs - i);
3053 						break;
3054 					}
3055 				} else {
3056 					control->ras_num_mca_recs += (control->ras_num_recs - i);
3057 					break;
3058 				}
3059 			}
3060 		}
3061 
3062 		ret = amdgpu_ras_eeprom_check(control);
3063 		if (ret)
3064 			goto out;
3065 
3066 		/* HW not usable */
3067 		if (amdgpu_ras_is_rma(adev)) {
3068 			ret = -EHWPOISON;
3069 			goto out;
3070 		}
3071 
3072 		ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs, true);
3073 	}
3074 
3075 out:
3076 	kfree(bps);
3077 	return ret;
3078 }
3079 
3080 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
3081 				uint64_t addr)
3082 {
3083 	struct ras_err_handler_data *data = con->eh_data;
3084 	int i;
3085 
3086 	addr >>= AMDGPU_GPU_PAGE_SHIFT;
3087 	for (i = 0; i < data->count; i++)
3088 		if (addr == data->bps[i].retired_page)
3089 			return true;
3090 
3091 	return false;
3092 }
3093 
3094 /*
3095  * check if an address belongs to bad page
3096  *
3097  * Note: this check is only for umc block
3098  */
3099 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
3100 				uint64_t addr)
3101 {
3102 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3103 	bool ret = false;
3104 
3105 	if (!con || !con->eh_data)
3106 		return ret;
3107 
3108 	mutex_lock(&con->recovery_lock);
3109 	ret = amdgpu_ras_check_bad_page_unlock(con, addr);
3110 	mutex_unlock(&con->recovery_lock);
3111 	return ret;
3112 }
3113 
3114 static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev,
3115 					  uint32_t max_count)
3116 {
3117 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3118 
3119 	/*
3120 	 * amdgpu_bad_page_threshold is used to config
3121 	 * the threshold for the number of bad pages.
3122 	 * -1:  Threshold is set to default value
3123 	 *      Driver will issue a warning message when threshold is reached
3124 	 *      and continue runtime services.
3125 	 * 0:   Disable bad page retirement
3126 	 *      Driver will not retire bad pages
3127 	 *      which is intended for debugging purpose.
3128 	 * -2:  Threshold is determined by a formula
3129 	 *      that assumes 1 bad page per 100M of local memory.
3130 	 *      Driver will continue runtime services when threhold is reached.
3131 	 * 0 < threshold < max number of bad page records in EEPROM,
3132 	 *      A user-defined threshold is set
3133 	 *      Driver will halt runtime services when this custom threshold is reached.
3134 	 */
3135 	if (amdgpu_bad_page_threshold == -2) {
3136 		u64 val = adev->gmc.mc_vram_size;
3137 
3138 		do_div(val, RAS_BAD_PAGE_COVER);
3139 		con->bad_page_cnt_threshold = min(lower_32_bits(val),
3140 						  max_count);
3141 	} else if (amdgpu_bad_page_threshold == -1) {
3142 		con->bad_page_cnt_threshold = ((con->reserved_pages_in_bytes) >> 21) << 4;
3143 	} else {
3144 		con->bad_page_cnt_threshold = min_t(int, max_count,
3145 						    amdgpu_bad_page_threshold);
3146 	}
3147 }
3148 
3149 int amdgpu_ras_put_poison_req(struct amdgpu_device *adev,
3150 		enum amdgpu_ras_block block, uint16_t pasid,
3151 		pasid_notify pasid_fn, void *data, uint32_t reset)
3152 {
3153 	int ret = 0;
3154 	struct ras_poison_msg poison_msg;
3155 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3156 
3157 	memset(&poison_msg, 0, sizeof(poison_msg));
3158 	poison_msg.block = block;
3159 	poison_msg.pasid = pasid;
3160 	poison_msg.reset = reset;
3161 	poison_msg.pasid_fn = pasid_fn;
3162 	poison_msg.data = data;
3163 
3164 	ret = kfifo_put(&con->poison_fifo, poison_msg);
3165 	if (!ret) {
3166 		dev_err(adev->dev, "Poison message fifo is full!\n");
3167 		return -ENOSPC;
3168 	}
3169 
3170 	return 0;
3171 }
3172 
3173 static int amdgpu_ras_get_poison_req(struct amdgpu_device *adev,
3174 		struct ras_poison_msg *poison_msg)
3175 {
3176 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3177 
3178 	return kfifo_get(&con->poison_fifo, poison_msg);
3179 }
3180 
3181 static void amdgpu_ras_ecc_log_init(struct ras_ecc_log_info *ecc_log)
3182 {
3183 	mutex_init(&ecc_log->lock);
3184 
3185 	INIT_RADIX_TREE(&ecc_log->de_page_tree, GFP_KERNEL);
3186 	ecc_log->de_queried_count = 0;
3187 	ecc_log->prev_de_queried_count = 0;
3188 }
3189 
3190 static void amdgpu_ras_ecc_log_fini(struct ras_ecc_log_info *ecc_log)
3191 {
3192 	struct radix_tree_iter iter;
3193 	void __rcu **slot;
3194 	struct ras_ecc_err *ecc_err;
3195 
3196 	mutex_lock(&ecc_log->lock);
3197 	radix_tree_for_each_slot(slot, &ecc_log->de_page_tree, &iter, 0) {
3198 		ecc_err = radix_tree_deref_slot(slot);
3199 		kfree(ecc_err->err_pages.pfn);
3200 		kfree(ecc_err);
3201 		radix_tree_iter_delete(&ecc_log->de_page_tree, &iter, slot);
3202 	}
3203 	mutex_unlock(&ecc_log->lock);
3204 
3205 	mutex_destroy(&ecc_log->lock);
3206 	ecc_log->de_queried_count = 0;
3207 	ecc_log->prev_de_queried_count = 0;
3208 }
3209 
3210 static bool amdgpu_ras_schedule_retirement_dwork(struct amdgpu_ras *con,
3211 				uint32_t delayed_ms)
3212 {
3213 	int ret;
3214 
3215 	mutex_lock(&con->umc_ecc_log.lock);
3216 	ret = radix_tree_tagged(&con->umc_ecc_log.de_page_tree,
3217 			UMC_ECC_NEW_DETECTED_TAG);
3218 	mutex_unlock(&con->umc_ecc_log.lock);
3219 
3220 	if (ret)
3221 		schedule_delayed_work(&con->page_retirement_dwork,
3222 			msecs_to_jiffies(delayed_ms));
3223 
3224 	return ret ? true : false;
3225 }
3226 
3227 static void amdgpu_ras_do_page_retirement(struct work_struct *work)
3228 {
3229 	struct amdgpu_ras *con = container_of(work, struct amdgpu_ras,
3230 					      page_retirement_dwork.work);
3231 	struct amdgpu_device *adev = con->adev;
3232 	struct ras_err_data err_data;
3233 	unsigned long err_cnt;
3234 
3235 	/* If gpu reset is ongoing, delay retiring the bad pages */
3236 	if (amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev)) {
3237 		amdgpu_ras_schedule_retirement_dwork(con,
3238 				AMDGPU_RAS_RETIRE_PAGE_INTERVAL * 3);
3239 		return;
3240 	}
3241 
3242 	amdgpu_ras_error_data_init(&err_data);
3243 
3244 	amdgpu_umc_handle_bad_pages(adev, &err_data);
3245 	err_cnt = err_data.err_addr_cnt;
3246 
3247 	amdgpu_ras_error_data_fini(&err_data);
3248 
3249 	if (err_cnt && amdgpu_ras_is_rma(adev))
3250 		amdgpu_ras_reset_gpu(adev);
3251 
3252 	amdgpu_ras_schedule_retirement_dwork(con,
3253 			AMDGPU_RAS_RETIRE_PAGE_INTERVAL);
3254 }
3255 
3256 static int amdgpu_ras_poison_creation_handler(struct amdgpu_device *adev,
3257 				uint32_t poison_creation_count)
3258 {
3259 	int ret = 0;
3260 	struct ras_ecc_log_info *ecc_log;
3261 	struct ras_query_if info;
3262 	uint32_t timeout = 0;
3263 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
3264 	uint64_t de_queried_count;
3265 	uint32_t new_detect_count, total_detect_count;
3266 	uint32_t need_query_count = poison_creation_count;
3267 	bool query_data_timeout = false;
3268 	enum ras_event_type type = RAS_EVENT_TYPE_POISON_CREATION;
3269 
3270 	memset(&info, 0, sizeof(info));
3271 	info.head.block = AMDGPU_RAS_BLOCK__UMC;
3272 
3273 	ecc_log = &ras->umc_ecc_log;
3274 	total_detect_count = 0;
3275 	do {
3276 		ret = amdgpu_ras_query_error_status_with_event(adev, &info, type);
3277 		if (ret)
3278 			return ret;
3279 
3280 		de_queried_count = ecc_log->de_queried_count;
3281 		if (de_queried_count > ecc_log->prev_de_queried_count) {
3282 			new_detect_count = de_queried_count - ecc_log->prev_de_queried_count;
3283 			ecc_log->prev_de_queried_count = de_queried_count;
3284 			timeout = 0;
3285 		} else {
3286 			new_detect_count = 0;
3287 		}
3288 
3289 		if (new_detect_count) {
3290 			total_detect_count += new_detect_count;
3291 		} else {
3292 			if (!timeout && need_query_count)
3293 				timeout = MAX_UMC_POISON_POLLING_TIME_ASYNC;
3294 
3295 			if (timeout) {
3296 				if (!--timeout) {
3297 					query_data_timeout = true;
3298 					break;
3299 				}
3300 				msleep(1);
3301 			}
3302 		}
3303 	} while (total_detect_count < need_query_count);
3304 
3305 	if (query_data_timeout) {
3306 		dev_warn(adev->dev, "Can't find deferred error! count: %u\n",
3307 			(need_query_count - total_detect_count));
3308 		return -ENOENT;
3309 	}
3310 
3311 	if (total_detect_count)
3312 		schedule_delayed_work(&ras->page_retirement_dwork, 0);
3313 
3314 	return 0;
3315 }
3316 
3317 static void amdgpu_ras_clear_poison_fifo(struct amdgpu_device *adev)
3318 {
3319 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3320 	struct ras_poison_msg msg;
3321 	int ret;
3322 
3323 	do {
3324 		ret = kfifo_get(&con->poison_fifo, &msg);
3325 	} while (ret);
3326 }
3327 
3328 static int amdgpu_ras_poison_consumption_handler(struct amdgpu_device *adev,
3329 			uint32_t msg_count, uint32_t *gpu_reset)
3330 {
3331 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3332 	uint32_t reset_flags = 0, reset = 0;
3333 	struct ras_poison_msg msg;
3334 	int ret, i;
3335 
3336 	kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
3337 
3338 	for (i = 0; i < msg_count; i++) {
3339 		ret = amdgpu_ras_get_poison_req(adev, &msg);
3340 		if (!ret)
3341 			continue;
3342 
3343 		if (msg.pasid_fn)
3344 			msg.pasid_fn(adev, msg.pasid, msg.data);
3345 
3346 		reset_flags |= msg.reset;
3347 	}
3348 
3349 	/* for RMA, amdgpu_ras_poison_creation_handler will trigger gpu reset */
3350 	if (reset_flags && !amdgpu_ras_is_rma(adev)) {
3351 		if (reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET)
3352 			reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET;
3353 		else if (reset_flags & AMDGPU_RAS_GPU_RESET_MODE2_RESET)
3354 			reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
3355 		else
3356 			reset = reset_flags;
3357 
3358 		flush_delayed_work(&con->page_retirement_dwork);
3359 
3360 		con->gpu_reset_flags |= reset;
3361 		amdgpu_ras_reset_gpu(adev);
3362 
3363 		*gpu_reset = reset;
3364 
3365 		/* Wait for gpu recovery to complete */
3366 		flush_work(&con->recovery_work);
3367 	}
3368 
3369 	return 0;
3370 }
3371 
3372 static int amdgpu_ras_page_retirement_thread(void *param)
3373 {
3374 	struct amdgpu_device *adev = (struct amdgpu_device *)param;
3375 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3376 	uint32_t poison_creation_count, msg_count;
3377 	uint32_t gpu_reset;
3378 	int ret;
3379 
3380 	while (!kthread_should_stop()) {
3381 
3382 		wait_event_interruptible(con->page_retirement_wq,
3383 				kthread_should_stop() ||
3384 				atomic_read(&con->page_retirement_req_cnt));
3385 
3386 		if (kthread_should_stop())
3387 			break;
3388 
3389 		gpu_reset = 0;
3390 
3391 		do {
3392 			poison_creation_count = atomic_read(&con->poison_creation_count);
3393 			ret = amdgpu_ras_poison_creation_handler(adev, poison_creation_count);
3394 			if (ret == -EIO)
3395 				break;
3396 
3397 			if (poison_creation_count) {
3398 				atomic_sub(poison_creation_count, &con->poison_creation_count);
3399 				atomic_sub(poison_creation_count, &con->page_retirement_req_cnt);
3400 			}
3401 		} while (atomic_read(&con->poison_creation_count));
3402 
3403 		if (ret != -EIO) {
3404 			msg_count = kfifo_len(&con->poison_fifo);
3405 			if (msg_count) {
3406 				ret = amdgpu_ras_poison_consumption_handler(adev,
3407 						msg_count, &gpu_reset);
3408 				if ((ret != -EIO) &&
3409 				    (gpu_reset != AMDGPU_RAS_GPU_RESET_MODE1_RESET))
3410 					atomic_sub(msg_count, &con->page_retirement_req_cnt);
3411 			}
3412 		}
3413 
3414 		if ((ret == -EIO) || (gpu_reset == AMDGPU_RAS_GPU_RESET_MODE1_RESET)) {
3415 			/* gpu mode-1 reset is ongoing or just completed ras mode-1 reset */
3416 			/* Clear poison creation request */
3417 			atomic_set(&con->poison_creation_count, 0);
3418 
3419 			/* Clear poison fifo */
3420 			amdgpu_ras_clear_poison_fifo(adev);
3421 
3422 			/* Clear all poison requests */
3423 			atomic_set(&con->page_retirement_req_cnt, 0);
3424 
3425 			if (ret == -EIO) {
3426 				/* Wait for mode-1 reset to complete */
3427 				down_read(&adev->reset_domain->sem);
3428 				up_read(&adev->reset_domain->sem);
3429 			}
3430 
3431 			/* Wake up work to save bad pages to eeprom */
3432 			schedule_delayed_work(&con->page_retirement_dwork, 0);
3433 		} else if (gpu_reset) {
3434 			/* gpu just completed mode-2 reset or other reset */
3435 			/* Clear poison consumption messages cached in fifo */
3436 			msg_count = kfifo_len(&con->poison_fifo);
3437 			if (msg_count) {
3438 				amdgpu_ras_clear_poison_fifo(adev);
3439 				atomic_sub(msg_count, &con->page_retirement_req_cnt);
3440 			}
3441 
3442 			/* Wake up work to save bad pages to eeprom */
3443 			schedule_delayed_work(&con->page_retirement_dwork, 0);
3444 		}
3445 	}
3446 
3447 	return 0;
3448 }
3449 
3450 int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev)
3451 {
3452 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3453 	struct amdgpu_ras_eeprom_control *control;
3454 	int ret;
3455 
3456 	if (!con || amdgpu_sriov_vf(adev))
3457 		return 0;
3458 
3459 	control = &con->eeprom_control;
3460 	ret = amdgpu_ras_eeprom_init(control);
3461 	if (ret)
3462 		return ret;
3463 
3464 	if (!adev->umc.ras || !adev->umc.ras->convert_ras_err_addr)
3465 		control->ras_num_pa_recs = control->ras_num_recs;
3466 
3467 	if (control->ras_num_recs) {
3468 		ret = amdgpu_ras_load_bad_pages(adev);
3469 		if (ret)
3470 			return ret;
3471 
3472 		amdgpu_dpm_send_hbm_bad_pages_num(
3473 			adev, control->ras_num_bad_pages);
3474 
3475 		if (con->update_channel_flag == true) {
3476 			amdgpu_dpm_send_hbm_bad_channel_flag(
3477 				adev, control->bad_channel_bitmap);
3478 			con->update_channel_flag = false;
3479 		}
3480 
3481 		/* The format action is only applied to new ASICs */
3482 		if (IP_VERSION_MAJ(amdgpu_ip_version(adev, UMC_HWIP, 0)) >= 12 &&
3483 		    control->tbl_hdr.version < RAS_TABLE_VER_V3)
3484 			if (!amdgpu_ras_eeprom_reset_table(control))
3485 				if (amdgpu_ras_save_bad_pages(adev, NULL))
3486 					dev_warn(adev->dev, "Failed to format RAS EEPROM data in V3 version!\n");
3487 	}
3488 
3489 	return ret;
3490 }
3491 
3492 int amdgpu_ras_recovery_init(struct amdgpu_device *adev, bool init_bp_info)
3493 {
3494 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3495 	struct ras_err_handler_data **data;
3496 	u32  max_eeprom_records_count = 0;
3497 	int ret;
3498 
3499 	if (!con || amdgpu_sriov_vf(adev))
3500 		return 0;
3501 
3502 	/* Allow access to RAS EEPROM via debugfs, when the ASIC
3503 	 * supports RAS and debugfs is enabled, but when
3504 	 * adev->ras_enabled is unset, i.e. when "ras_enable"
3505 	 * module parameter is set to 0.
3506 	 */
3507 	con->adev = adev;
3508 
3509 	if (!adev->ras_enabled)
3510 		return 0;
3511 
3512 	data = &con->eh_data;
3513 	*data = kzalloc(sizeof(**data), GFP_KERNEL);
3514 	if (!*data) {
3515 		ret = -ENOMEM;
3516 		goto out;
3517 	}
3518 
3519 	mutex_init(&con->recovery_lock);
3520 	INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
3521 	atomic_set(&con->in_recovery, 0);
3522 	con->eeprom_control.bad_channel_bitmap = 0;
3523 
3524 	max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count(&con->eeprom_control);
3525 	amdgpu_ras_validate_threshold(adev, max_eeprom_records_count);
3526 
3527 	if (init_bp_info) {
3528 		ret = amdgpu_ras_init_badpage_info(adev);
3529 		if (ret)
3530 			goto free;
3531 	}
3532 
3533 	mutex_init(&con->page_rsv_lock);
3534 	INIT_KFIFO(con->poison_fifo);
3535 	mutex_init(&con->page_retirement_lock);
3536 	init_waitqueue_head(&con->page_retirement_wq);
3537 	atomic_set(&con->page_retirement_req_cnt, 0);
3538 	atomic_set(&con->poison_creation_count, 0);
3539 	con->page_retirement_thread =
3540 		kthread_run(amdgpu_ras_page_retirement_thread, adev, "umc_page_retirement");
3541 	if (IS_ERR(con->page_retirement_thread)) {
3542 		con->page_retirement_thread = NULL;
3543 		dev_warn(adev->dev, "Failed to create umc_page_retirement thread!!!\n");
3544 	}
3545 
3546 	INIT_DELAYED_WORK(&con->page_retirement_dwork, amdgpu_ras_do_page_retirement);
3547 	amdgpu_ras_ecc_log_init(&con->umc_ecc_log);
3548 #ifdef CONFIG_X86_MCE_AMD
3549 	if ((adev->asic_type == CHIP_ALDEBARAN) &&
3550 	    (adev->gmc.xgmi.connected_to_cpu))
3551 		amdgpu_register_bad_pages_mca_notifier(adev);
3552 #endif
3553 	return 0;
3554 
3555 free:
3556 	kfree((*data)->bps);
3557 	kfree(*data);
3558 	con->eh_data = NULL;
3559 out:
3560 	dev_warn(adev->dev, "Failed to initialize ras recovery! (%d)\n", ret);
3561 
3562 	/*
3563 	 * Except error threshold exceeding case, other failure cases in this
3564 	 * function would not fail amdgpu driver init.
3565 	 */
3566 	if (!amdgpu_ras_is_rma(adev))
3567 		ret = 0;
3568 	else
3569 		ret = -EINVAL;
3570 
3571 	return ret;
3572 }
3573 
3574 static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
3575 {
3576 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3577 	struct ras_err_handler_data *data = con->eh_data;
3578 	int max_flush_timeout = MAX_FLUSH_RETIRE_DWORK_TIMES;
3579 	bool ret;
3580 
3581 	/* recovery_init failed to init it, fini is useless */
3582 	if (!data)
3583 		return 0;
3584 
3585 	/* Save all cached bad pages to eeprom */
3586 	do {
3587 		flush_delayed_work(&con->page_retirement_dwork);
3588 		ret = amdgpu_ras_schedule_retirement_dwork(con, 0);
3589 	} while (ret && max_flush_timeout--);
3590 
3591 	if (con->page_retirement_thread)
3592 		kthread_stop(con->page_retirement_thread);
3593 
3594 	atomic_set(&con->page_retirement_req_cnt, 0);
3595 	atomic_set(&con->poison_creation_count, 0);
3596 
3597 	mutex_destroy(&con->page_rsv_lock);
3598 
3599 	cancel_work_sync(&con->recovery_work);
3600 
3601 	cancel_delayed_work_sync(&con->page_retirement_dwork);
3602 
3603 	amdgpu_ras_ecc_log_fini(&con->umc_ecc_log);
3604 
3605 	mutex_lock(&con->recovery_lock);
3606 	con->eh_data = NULL;
3607 	kfree(data->bps);
3608 	kfree(data);
3609 	mutex_unlock(&con->recovery_lock);
3610 
3611 	return 0;
3612 }
3613 /* recovery end */
3614 
3615 static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
3616 {
3617 	if (amdgpu_sriov_vf(adev)) {
3618 		switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
3619 		case IP_VERSION(13, 0, 2):
3620 		case IP_VERSION(13, 0, 6):
3621 		case IP_VERSION(13, 0, 12):
3622 		case IP_VERSION(13, 0, 14):
3623 			return true;
3624 		default:
3625 			return false;
3626 		}
3627 	}
3628 
3629 	if (adev->asic_type == CHIP_IP_DISCOVERY) {
3630 		switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
3631 		case IP_VERSION(13, 0, 0):
3632 		case IP_VERSION(13, 0, 6):
3633 		case IP_VERSION(13, 0, 10):
3634 		case IP_VERSION(13, 0, 12):
3635 		case IP_VERSION(13, 0, 14):
3636 		case IP_VERSION(14, 0, 3):
3637 			return true;
3638 		default:
3639 			return false;
3640 		}
3641 	}
3642 
3643 	return adev->asic_type == CHIP_VEGA10 ||
3644 		adev->asic_type == CHIP_VEGA20 ||
3645 		adev->asic_type == CHIP_ARCTURUS ||
3646 		adev->asic_type == CHIP_ALDEBARAN ||
3647 		adev->asic_type == CHIP_SIENNA_CICHLID;
3648 }
3649 
3650 /*
3651  * this is workaround for vega20 workstation sku,
3652  * force enable gfx ras, ignore vbios gfx ras flag
3653  * due to GC EDC can not write
3654  */
3655 static void amdgpu_ras_get_quirks(struct amdgpu_device *adev)
3656 {
3657 	struct atom_context *ctx = adev->mode_info.atom_context;
3658 
3659 	if (!ctx)
3660 		return;
3661 
3662 	if (strnstr(ctx->vbios_pn, "D16406",
3663 		    sizeof(ctx->vbios_pn)) ||
3664 		strnstr(ctx->vbios_pn, "D36002",
3665 			sizeof(ctx->vbios_pn)))
3666 		adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX);
3667 }
3668 
3669 /* Query ras capablity via atomfirmware interface */
3670 static void amdgpu_ras_query_ras_capablity_from_vbios(struct amdgpu_device *adev)
3671 {
3672 	/* mem_ecc cap */
3673 	if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
3674 		dev_info(adev->dev, "MEM ECC is active.\n");
3675 		adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__UMC |
3676 					 1 << AMDGPU_RAS_BLOCK__DF);
3677 	} else {
3678 		dev_info(adev->dev, "MEM ECC is not presented.\n");
3679 	}
3680 
3681 	/* sram_ecc cap */
3682 	if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
3683 		dev_info(adev->dev, "SRAM ECC is active.\n");
3684 		if (!amdgpu_sriov_vf(adev))
3685 			adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
3686 						  1 << AMDGPU_RAS_BLOCK__DF);
3687 		else
3688 			adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__PCIE_BIF |
3689 						 1 << AMDGPU_RAS_BLOCK__SDMA |
3690 						 1 << AMDGPU_RAS_BLOCK__GFX);
3691 
3692 		/*
3693 		 * VCN/JPEG RAS can be supported on both bare metal and
3694 		 * SRIOV environment
3695 		 */
3696 		if (amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(2, 6, 0) ||
3697 		    amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 0) ||
3698 		    amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 3))
3699 			adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN |
3700 						 1 << AMDGPU_RAS_BLOCK__JPEG);
3701 		else
3702 			adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN |
3703 						  1 << AMDGPU_RAS_BLOCK__JPEG);
3704 
3705 		/*
3706 		 * XGMI RAS is not supported if xgmi num physical nodes
3707 		 * is zero
3708 		 */
3709 		if (!adev->gmc.xgmi.num_physical_nodes)
3710 			adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__XGMI_WAFL);
3711 	} else {
3712 		dev_info(adev->dev, "SRAM ECC is not presented.\n");
3713 	}
3714 }
3715 
3716 /* Query poison mode from umc/df IP callbacks */
3717 static void amdgpu_ras_query_poison_mode(struct amdgpu_device *adev)
3718 {
3719 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3720 	bool df_poison, umc_poison;
3721 
3722 	/* poison setting is useless on SRIOV guest */
3723 	if (amdgpu_sriov_vf(adev) || !con)
3724 		return;
3725 
3726 	/* Init poison supported flag, the default value is false */
3727 	if (adev->gmc.xgmi.connected_to_cpu ||
3728 	    adev->gmc.is_app_apu) {
3729 		/* enabled by default when GPU is connected to CPU */
3730 		con->poison_supported = true;
3731 	} else if (adev->df.funcs &&
3732 	    adev->df.funcs->query_ras_poison_mode &&
3733 	    adev->umc.ras &&
3734 	    adev->umc.ras->query_ras_poison_mode) {
3735 		df_poison =
3736 			adev->df.funcs->query_ras_poison_mode(adev);
3737 		umc_poison =
3738 			adev->umc.ras->query_ras_poison_mode(adev);
3739 
3740 		/* Only poison is set in both DF and UMC, we can support it */
3741 		if (df_poison && umc_poison)
3742 			con->poison_supported = true;
3743 		else if (df_poison != umc_poison)
3744 			dev_warn(adev->dev,
3745 				"Poison setting is inconsistent in DF/UMC(%d:%d)!\n",
3746 				df_poison, umc_poison);
3747 	}
3748 }
3749 
3750 /*
3751  * check hardware's ras ability which will be saved in hw_supported.
3752  * if hardware does not support ras, we can skip some ras initializtion and
3753  * forbid some ras operations from IP.
3754  * if software itself, say boot parameter, limit the ras ability. We still
3755  * need allow IP do some limited operations, like disable. In such case,
3756  * we have to initialize ras as normal. but need check if operation is
3757  * allowed or not in each function.
3758  */
3759 static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
3760 {
3761 	adev->ras_hw_enabled = adev->ras_enabled = 0;
3762 
3763 	if (!amdgpu_ras_asic_supported(adev))
3764 		return;
3765 
3766 	if (amdgpu_sriov_vf(adev)) {
3767 		if (amdgpu_virt_get_ras_capability(adev))
3768 			goto init_ras_enabled_flag;
3769 	}
3770 
3771 	/* query ras capability from psp */
3772 	if (amdgpu_psp_get_ras_capability(&adev->psp))
3773 		goto init_ras_enabled_flag;
3774 
3775 	/* query ras capablity from bios */
3776 	if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
3777 		amdgpu_ras_query_ras_capablity_from_vbios(adev);
3778 	} else {
3779 		/* driver only manages a few IP blocks RAS feature
3780 		 * when GPU is connected cpu through XGMI */
3781 		adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX |
3782 					   1 << AMDGPU_RAS_BLOCK__SDMA |
3783 					   1 << AMDGPU_RAS_BLOCK__MMHUB);
3784 	}
3785 
3786 	/* apply asic specific settings (vega20 only for now) */
3787 	amdgpu_ras_get_quirks(adev);
3788 
3789 	/* query poison mode from umc/df ip callback */
3790 	amdgpu_ras_query_poison_mode(adev);
3791 
3792 init_ras_enabled_flag:
3793 	/* hw_supported needs to be aligned with RAS block mask. */
3794 	adev->ras_hw_enabled &= AMDGPU_RAS_BLOCK_MASK;
3795 
3796 	adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 :
3797 		adev->ras_hw_enabled & amdgpu_ras_mask;
3798 
3799 	/* aca is disabled by default except for psp v13_0_6/v13_0_12/v13_0_14 */
3800 	if (!amdgpu_sriov_vf(adev)) {
3801 		adev->aca.is_enabled =
3802 			(amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
3803 			amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) ||
3804 			amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14));
3805 	}
3806 
3807 	/* bad page feature is not applicable to specific app platform */
3808 	if (adev->gmc.is_app_apu &&
3809 	    amdgpu_ip_version(adev, UMC_HWIP, 0) == IP_VERSION(12, 0, 0))
3810 		amdgpu_bad_page_threshold = 0;
3811 }
3812 
3813 static void amdgpu_ras_counte_dw(struct work_struct *work)
3814 {
3815 	struct amdgpu_ras *con = container_of(work, struct amdgpu_ras,
3816 					      ras_counte_delay_work.work);
3817 	struct amdgpu_device *adev = con->adev;
3818 	struct drm_device *dev = adev_to_drm(adev);
3819 	unsigned long ce_count, ue_count;
3820 	int res;
3821 
3822 	res = pm_runtime_get_sync(dev->dev);
3823 	if (res < 0)
3824 		goto Out;
3825 
3826 	/* Cache new values.
3827 	 */
3828 	if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, NULL) == 0) {
3829 		atomic_set(&con->ras_ce_count, ce_count);
3830 		atomic_set(&con->ras_ue_count, ue_count);
3831 	}
3832 
3833 	pm_runtime_mark_last_busy(dev->dev);
3834 Out:
3835 	pm_runtime_put_autosuspend(dev->dev);
3836 }
3837 
3838 static int amdgpu_get_ras_schema(struct amdgpu_device *adev)
3839 {
3840 	return  amdgpu_ras_is_poison_mode_supported(adev) ? AMDGPU_RAS_ERROR__POISON : 0 |
3841 			AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE |
3842 			AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE |
3843 			AMDGPU_RAS_ERROR__PARITY;
3844 }
3845 
3846 static void ras_event_mgr_init(struct ras_event_manager *mgr)
3847 {
3848 	struct ras_event_state *event_state;
3849 	int i;
3850 
3851 	memset(mgr, 0, sizeof(*mgr));
3852 	atomic64_set(&mgr->seqno, 0);
3853 
3854 	for (i = 0; i < ARRAY_SIZE(mgr->event_state); i++) {
3855 		event_state = &mgr->event_state[i];
3856 		event_state->last_seqno = RAS_EVENT_INVALID_ID;
3857 		atomic64_set(&event_state->count, 0);
3858 	}
3859 }
3860 
3861 static void amdgpu_ras_event_mgr_init(struct amdgpu_device *adev)
3862 {
3863 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
3864 	struct amdgpu_hive_info *hive;
3865 
3866 	if (!ras)
3867 		return;
3868 
3869 	hive = amdgpu_get_xgmi_hive(adev);
3870 	ras->event_mgr = hive ? &hive->event_mgr : &ras->__event_mgr;
3871 
3872 	/* init event manager with node 0 on xgmi system */
3873 	if (!amdgpu_reset_in_recovery(adev)) {
3874 		if (!hive || adev->gmc.xgmi.node_id == 0)
3875 			ras_event_mgr_init(ras->event_mgr);
3876 	}
3877 
3878 	if (hive)
3879 		amdgpu_put_xgmi_hive(hive);
3880 }
3881 
3882 static void amdgpu_ras_init_reserved_vram_size(struct amdgpu_device *adev)
3883 {
3884 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3885 
3886 	if (!con || (adev->flags & AMD_IS_APU))
3887 		return;
3888 
3889 	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
3890 	case IP_VERSION(13, 0, 2):
3891 	case IP_VERSION(13, 0, 6):
3892 	case IP_VERSION(13, 0, 12):
3893 		con->reserved_pages_in_bytes = AMDGPU_RAS_RESERVED_VRAM_SIZE_DEFAULT;
3894 		break;
3895 	case IP_VERSION(13, 0, 14):
3896 		con->reserved_pages_in_bytes = (AMDGPU_RAS_RESERVED_VRAM_SIZE_DEFAULT << 1);
3897 		break;
3898 	default:
3899 		break;
3900 	}
3901 }
3902 
3903 int amdgpu_ras_init(struct amdgpu_device *adev)
3904 {
3905 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3906 	int r;
3907 
3908 	if (con)
3909 		return 0;
3910 
3911 	con = kzalloc(sizeof(*con) +
3912 			sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT +
3913 			sizeof(struct ras_manager) * AMDGPU_RAS_MCA_BLOCK_COUNT,
3914 			GFP_KERNEL);
3915 	if (!con)
3916 		return -ENOMEM;
3917 
3918 	con->adev = adev;
3919 	INIT_DELAYED_WORK(&con->ras_counte_delay_work, amdgpu_ras_counte_dw);
3920 	atomic_set(&con->ras_ce_count, 0);
3921 	atomic_set(&con->ras_ue_count, 0);
3922 
3923 	con->objs = (struct ras_manager *)(con + 1);
3924 
3925 	amdgpu_ras_set_context(adev, con);
3926 
3927 	amdgpu_ras_check_supported(adev);
3928 
3929 	if (!adev->ras_enabled || adev->asic_type == CHIP_VEGA10) {
3930 		/* set gfx block ras context feature for VEGA20 Gaming
3931 		 * send ras disable cmd to ras ta during ras late init.
3932 		 */
3933 		if (!adev->ras_enabled && adev->asic_type == CHIP_VEGA20) {
3934 			con->features |= BIT(AMDGPU_RAS_BLOCK__GFX);
3935 
3936 			return 0;
3937 		}
3938 
3939 		r = 0;
3940 		goto release_con;
3941 	}
3942 
3943 	con->update_channel_flag = false;
3944 	con->features = 0;
3945 	con->schema = 0;
3946 	INIT_LIST_HEAD(&con->head);
3947 	/* Might need get this flag from vbios. */
3948 	con->flags = RAS_DEFAULT_FLAGS;
3949 
3950 	/* initialize nbio ras function ahead of any other
3951 	 * ras functions so hardware fatal error interrupt
3952 	 * can be enabled as early as possible */
3953 	switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
3954 	case IP_VERSION(7, 4, 0):
3955 	case IP_VERSION(7, 4, 1):
3956 	case IP_VERSION(7, 4, 4):
3957 		if (!adev->gmc.xgmi.connected_to_cpu)
3958 			adev->nbio.ras = &nbio_v7_4_ras;
3959 		break;
3960 	case IP_VERSION(4, 3, 0):
3961 		if (adev->ras_hw_enabled & (1 << AMDGPU_RAS_BLOCK__DF))
3962 			/* unlike other generation of nbio ras,
3963 			 * nbio v4_3 only support fatal error interrupt
3964 			 * to inform software that DF is freezed due to
3965 			 * system fatal error event. driver should not
3966 			 * enable nbio ras in such case. Instead,
3967 			 * check DF RAS */
3968 			adev->nbio.ras = &nbio_v4_3_ras;
3969 		break;
3970 	case IP_VERSION(6, 3, 1):
3971 		if (adev->ras_hw_enabled & (1 << AMDGPU_RAS_BLOCK__DF))
3972 			/* unlike other generation of nbio ras,
3973 			 * nbif v6_3_1 only support fatal error interrupt
3974 			 * to inform software that DF is freezed due to
3975 			 * system fatal error event. driver should not
3976 			 * enable nbio ras in such case. Instead,
3977 			 * check DF RAS
3978 			 */
3979 			adev->nbio.ras = &nbif_v6_3_1_ras;
3980 		break;
3981 	case IP_VERSION(7, 9, 0):
3982 	case IP_VERSION(7, 9, 1):
3983 		if (!adev->gmc.is_app_apu)
3984 			adev->nbio.ras = &nbio_v7_9_ras;
3985 		break;
3986 	default:
3987 		/* nbio ras is not available */
3988 		break;
3989 	}
3990 
3991 	/* nbio ras block needs to be enabled ahead of other ras blocks
3992 	 * to handle fatal error */
3993 	r = amdgpu_nbio_ras_sw_init(adev);
3994 	if (r)
3995 		return r;
3996 
3997 	if (adev->nbio.ras &&
3998 	    adev->nbio.ras->init_ras_controller_interrupt) {
3999 		r = adev->nbio.ras->init_ras_controller_interrupt(adev);
4000 		if (r)
4001 			goto release_con;
4002 	}
4003 
4004 	if (adev->nbio.ras &&
4005 	    adev->nbio.ras->init_ras_err_event_athub_interrupt) {
4006 		r = adev->nbio.ras->init_ras_err_event_athub_interrupt(adev);
4007 		if (r)
4008 			goto release_con;
4009 	}
4010 
4011 	/* Packed socket_id to ras feature mask bits[31:29] */
4012 	if (adev->smuio.funcs &&
4013 	    adev->smuio.funcs->get_socket_id)
4014 		con->features |= ((adev->smuio.funcs->get_socket_id(adev)) <<
4015 					AMDGPU_RAS_FEATURES_SOCKETID_SHIFT);
4016 
4017 	/* Get RAS schema for particular SOC */
4018 	con->schema = amdgpu_get_ras_schema(adev);
4019 
4020 	amdgpu_ras_init_reserved_vram_size(adev);
4021 
4022 	if (amdgpu_ras_fs_init(adev)) {
4023 		r = -EINVAL;
4024 		goto release_con;
4025 	}
4026 
4027 	if (amdgpu_ras_aca_is_supported(adev)) {
4028 		if (amdgpu_aca_is_enabled(adev))
4029 			r = amdgpu_aca_init(adev);
4030 		else
4031 			r = amdgpu_mca_init(adev);
4032 		if (r)
4033 			goto release_con;
4034 	}
4035 
4036 	dev_info(adev->dev, "RAS INFO: ras initialized successfully, "
4037 		 "hardware ability[%x] ras_mask[%x]\n",
4038 		 adev->ras_hw_enabled, adev->ras_enabled);
4039 
4040 	return 0;
4041 release_con:
4042 	amdgpu_ras_set_context(adev, NULL);
4043 	kfree(con);
4044 
4045 	return r;
4046 }
4047 
4048 int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev)
4049 {
4050 	if (adev->gmc.xgmi.connected_to_cpu ||
4051 	    adev->gmc.is_app_apu)
4052 		return 1;
4053 	return 0;
4054 }
4055 
4056 static int amdgpu_persistent_edc_harvesting(struct amdgpu_device *adev,
4057 					struct ras_common_if *ras_block)
4058 {
4059 	struct ras_query_if info = {
4060 		.head = *ras_block,
4061 	};
4062 
4063 	if (!amdgpu_persistent_edc_harvesting_supported(adev))
4064 		return 0;
4065 
4066 	if (amdgpu_ras_query_error_status(adev, &info) != 0)
4067 		DRM_WARN("RAS init harvest failure");
4068 
4069 	if (amdgpu_ras_reset_error_status(adev, ras_block->block) != 0)
4070 		DRM_WARN("RAS init harvest reset failure");
4071 
4072 	return 0;
4073 }
4074 
4075 bool amdgpu_ras_is_poison_mode_supported(struct amdgpu_device *adev)
4076 {
4077        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4078 
4079        if (!con)
4080                return false;
4081 
4082        return con->poison_supported;
4083 }
4084 
4085 /* helper function to handle common stuff in ip late init phase */
4086 int amdgpu_ras_block_late_init(struct amdgpu_device *adev,
4087 			 struct ras_common_if *ras_block)
4088 {
4089 	struct amdgpu_ras_block_object *ras_obj = NULL;
4090 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4091 	struct ras_query_if *query_info;
4092 	unsigned long ue_count, ce_count;
4093 	int r;
4094 
4095 	/* disable RAS feature per IP block if it is not supported */
4096 	if (!amdgpu_ras_is_supported(adev, ras_block->block)) {
4097 		amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
4098 		return 0;
4099 	}
4100 
4101 	r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1);
4102 	if (r) {
4103 		if (adev->in_suspend || amdgpu_reset_in_recovery(adev)) {
4104 			/* in resume phase, if fail to enable ras,
4105 			 * clean up all ras fs nodes, and disable ras */
4106 			goto cleanup;
4107 		} else
4108 			return r;
4109 	}
4110 
4111 	/* check for errors on warm reset edc persisant supported ASIC */
4112 	amdgpu_persistent_edc_harvesting(adev, ras_block);
4113 
4114 	/* in resume phase, no need to create ras fs node */
4115 	if (adev->in_suspend || amdgpu_reset_in_recovery(adev))
4116 		return 0;
4117 
4118 	ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
4119 	if (ras_obj->ras_cb || (ras_obj->hw_ops &&
4120 	    (ras_obj->hw_ops->query_poison_status ||
4121 	    ras_obj->hw_ops->handle_poison_consumption))) {
4122 		r = amdgpu_ras_interrupt_add_handler(adev, ras_block);
4123 		if (r)
4124 			goto cleanup;
4125 	}
4126 
4127 	if (ras_obj->hw_ops &&
4128 	    (ras_obj->hw_ops->query_ras_error_count ||
4129 	     ras_obj->hw_ops->query_ras_error_status)) {
4130 		r = amdgpu_ras_sysfs_create(adev, ras_block);
4131 		if (r)
4132 			goto interrupt;
4133 
4134 		/* Those are the cached values at init.
4135 		 */
4136 		query_info = kzalloc(sizeof(*query_info), GFP_KERNEL);
4137 		if (!query_info)
4138 			return -ENOMEM;
4139 		memcpy(&query_info->head, ras_block, sizeof(struct ras_common_if));
4140 
4141 		if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, query_info) == 0) {
4142 			atomic_set(&con->ras_ce_count, ce_count);
4143 			atomic_set(&con->ras_ue_count, ue_count);
4144 		}
4145 
4146 		kfree(query_info);
4147 	}
4148 
4149 	return 0;
4150 
4151 interrupt:
4152 	if (ras_obj->ras_cb)
4153 		amdgpu_ras_interrupt_remove_handler(adev, ras_block);
4154 cleanup:
4155 	amdgpu_ras_feature_enable(adev, ras_block, 0);
4156 	return r;
4157 }
4158 
4159 static int amdgpu_ras_block_late_init_default(struct amdgpu_device *adev,
4160 			 struct ras_common_if *ras_block)
4161 {
4162 	return amdgpu_ras_block_late_init(adev, ras_block);
4163 }
4164 
4165 /* helper function to remove ras fs node and interrupt handler */
4166 void amdgpu_ras_block_late_fini(struct amdgpu_device *adev,
4167 			  struct ras_common_if *ras_block)
4168 {
4169 	struct amdgpu_ras_block_object *ras_obj;
4170 	if (!ras_block)
4171 		return;
4172 
4173 	amdgpu_ras_sysfs_remove(adev, ras_block);
4174 
4175 	ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
4176 	if (ras_obj->ras_cb)
4177 		amdgpu_ras_interrupt_remove_handler(adev, ras_block);
4178 }
4179 
4180 static void amdgpu_ras_block_late_fini_default(struct amdgpu_device *adev,
4181 			  struct ras_common_if *ras_block)
4182 {
4183 	return amdgpu_ras_block_late_fini(adev, ras_block);
4184 }
4185 
4186 /* do some init work after IP late init as dependence.
4187  * and it runs in resume/gpu reset/booting up cases.
4188  */
4189 void amdgpu_ras_resume(struct amdgpu_device *adev)
4190 {
4191 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4192 	struct ras_manager *obj, *tmp;
4193 
4194 	if (!adev->ras_enabled || !con) {
4195 		/* clean ras context for VEGA20 Gaming after send ras disable cmd */
4196 		amdgpu_release_ras_context(adev);
4197 
4198 		return;
4199 	}
4200 
4201 	if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
4202 		/* Set up all other IPs which are not implemented. There is a
4203 		 * tricky thing that IP's actual ras error type should be
4204 		 * MULTI_UNCORRECTABLE, but as driver does not handle it, so
4205 		 * ERROR_NONE make sense anyway.
4206 		 */
4207 		amdgpu_ras_enable_all_features(adev, 1);
4208 
4209 		/* We enable ras on all hw_supported block, but as boot
4210 		 * parameter might disable some of them and one or more IP has
4211 		 * not implemented yet. So we disable them on behalf.
4212 		 */
4213 		list_for_each_entry_safe(obj, tmp, &con->head, node) {
4214 			if (!amdgpu_ras_is_supported(adev, obj->head.block)) {
4215 				amdgpu_ras_feature_enable(adev, &obj->head, 0);
4216 				/* there should be no any reference. */
4217 				WARN_ON(alive_obj(obj));
4218 			}
4219 		}
4220 	}
4221 }
4222 
4223 void amdgpu_ras_suspend(struct amdgpu_device *adev)
4224 {
4225 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4226 
4227 	if (!adev->ras_enabled || !con)
4228 		return;
4229 
4230 	amdgpu_ras_disable_all_features(adev, 0);
4231 	/* Make sure all ras objects are disabled. */
4232 	if (AMDGPU_RAS_GET_FEATURES(con->features))
4233 		amdgpu_ras_disable_all_features(adev, 1);
4234 }
4235 
4236 int amdgpu_ras_late_init(struct amdgpu_device *adev)
4237 {
4238 	struct amdgpu_ras_block_list *node, *tmp;
4239 	struct amdgpu_ras_block_object *obj;
4240 	int r;
4241 
4242 	amdgpu_ras_event_mgr_init(adev);
4243 
4244 	if (amdgpu_ras_aca_is_supported(adev)) {
4245 		if (amdgpu_reset_in_recovery(adev)) {
4246 			if (amdgpu_aca_is_enabled(adev))
4247 				r = amdgpu_aca_reset(adev);
4248 			else
4249 				r = amdgpu_mca_reset(adev);
4250 			if (r)
4251 				return r;
4252 		}
4253 
4254 		if (!amdgpu_sriov_vf(adev)) {
4255 			if (amdgpu_aca_is_enabled(adev))
4256 				amdgpu_ras_set_aca_debug_mode(adev, false);
4257 			else
4258 				amdgpu_ras_set_mca_debug_mode(adev, false);
4259 		}
4260 	}
4261 
4262 	/* Guest side doesn't need init ras feature */
4263 	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_ras_telemetry_en(adev))
4264 		return 0;
4265 
4266 	list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
4267 		obj = node->ras_obj;
4268 		if (!obj) {
4269 			dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
4270 			continue;
4271 		}
4272 
4273 		if (!amdgpu_ras_is_supported(adev, obj->ras_comm.block))
4274 			continue;
4275 
4276 		if (obj->ras_late_init) {
4277 			r = obj->ras_late_init(adev, &obj->ras_comm);
4278 			if (r) {
4279 				dev_err(adev->dev, "%s failed to execute ras_late_init! ret:%d\n",
4280 					obj->ras_comm.name, r);
4281 				return r;
4282 			}
4283 		} else
4284 			amdgpu_ras_block_late_init_default(adev, &obj->ras_comm);
4285 	}
4286 
4287 	return 0;
4288 }
4289 
4290 /* do some fini work before IP fini as dependence */
4291 int amdgpu_ras_pre_fini(struct amdgpu_device *adev)
4292 {
4293 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4294 
4295 	if (!adev->ras_enabled || !con)
4296 		return 0;
4297 
4298 
4299 	/* Need disable ras on all IPs here before ip [hw/sw]fini */
4300 	if (AMDGPU_RAS_GET_FEATURES(con->features))
4301 		amdgpu_ras_disable_all_features(adev, 0);
4302 	amdgpu_ras_recovery_fini(adev);
4303 	return 0;
4304 }
4305 
4306 int amdgpu_ras_fini(struct amdgpu_device *adev)
4307 {
4308 	struct amdgpu_ras_block_list *ras_node, *tmp;
4309 	struct amdgpu_ras_block_object *obj = NULL;
4310 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4311 
4312 	if (!adev->ras_enabled || !con)
4313 		return 0;
4314 
4315 	list_for_each_entry_safe(ras_node, tmp, &adev->ras_list, node) {
4316 		if (ras_node->ras_obj) {
4317 			obj = ras_node->ras_obj;
4318 			if (amdgpu_ras_is_supported(adev, obj->ras_comm.block) &&
4319 			    obj->ras_fini)
4320 				obj->ras_fini(adev, &obj->ras_comm);
4321 			else
4322 				amdgpu_ras_block_late_fini_default(adev, &obj->ras_comm);
4323 		}
4324 
4325 		/* Clear ras blocks from ras_list and free ras block list node */
4326 		list_del(&ras_node->node);
4327 		kfree(ras_node);
4328 	}
4329 
4330 	amdgpu_ras_fs_fini(adev);
4331 	amdgpu_ras_interrupt_remove_all(adev);
4332 
4333 	if (amdgpu_ras_aca_is_supported(adev)) {
4334 		if (amdgpu_aca_is_enabled(adev))
4335 			amdgpu_aca_fini(adev);
4336 		else
4337 			amdgpu_mca_fini(adev);
4338 	}
4339 
4340 	WARN(AMDGPU_RAS_GET_FEATURES(con->features), "Feature mask is not cleared");
4341 
4342 	if (AMDGPU_RAS_GET_FEATURES(con->features))
4343 		amdgpu_ras_disable_all_features(adev, 0);
4344 
4345 	cancel_delayed_work_sync(&con->ras_counte_delay_work);
4346 
4347 	amdgpu_ras_set_context(adev, NULL);
4348 	kfree(con);
4349 
4350 	return 0;
4351 }
4352 
4353 bool amdgpu_ras_get_fed_status(struct amdgpu_device *adev)
4354 {
4355 	struct amdgpu_ras *ras;
4356 
4357 	ras = amdgpu_ras_get_context(adev);
4358 	if (!ras)
4359 		return false;
4360 
4361 	return test_bit(AMDGPU_RAS_BLOCK__LAST, &ras->ras_err_state);
4362 }
4363 
4364 void amdgpu_ras_set_fed(struct amdgpu_device *adev, bool status)
4365 {
4366 	struct amdgpu_ras *ras;
4367 
4368 	ras = amdgpu_ras_get_context(adev);
4369 	if (ras) {
4370 		if (status)
4371 			set_bit(AMDGPU_RAS_BLOCK__LAST, &ras->ras_err_state);
4372 		else
4373 			clear_bit(AMDGPU_RAS_BLOCK__LAST, &ras->ras_err_state);
4374 	}
4375 }
4376 
4377 void amdgpu_ras_clear_err_state(struct amdgpu_device *adev)
4378 {
4379 	struct amdgpu_ras *ras;
4380 
4381 	ras = amdgpu_ras_get_context(adev);
4382 	if (ras)
4383 		ras->ras_err_state = 0;
4384 }
4385 
4386 void amdgpu_ras_set_err_poison(struct amdgpu_device *adev,
4387 			       enum amdgpu_ras_block block)
4388 {
4389 	struct amdgpu_ras *ras;
4390 
4391 	ras = amdgpu_ras_get_context(adev);
4392 	if (ras)
4393 		set_bit(block, &ras->ras_err_state);
4394 }
4395 
4396 bool amdgpu_ras_is_err_state(struct amdgpu_device *adev, int block)
4397 {
4398 	struct amdgpu_ras *ras;
4399 
4400 	ras = amdgpu_ras_get_context(adev);
4401 	if (ras) {
4402 		if (block == AMDGPU_RAS_BLOCK__ANY)
4403 			return (ras->ras_err_state != 0);
4404 		else
4405 			return test_bit(block, &ras->ras_err_state) ||
4406 			       test_bit(AMDGPU_RAS_BLOCK__LAST,
4407 					&ras->ras_err_state);
4408 	}
4409 
4410 	return false;
4411 }
4412 
4413 static struct ras_event_manager *__get_ras_event_mgr(struct amdgpu_device *adev)
4414 {
4415 	struct amdgpu_ras *ras;
4416 
4417 	ras = amdgpu_ras_get_context(adev);
4418 	if (!ras)
4419 		return NULL;
4420 
4421 	return ras->event_mgr;
4422 }
4423 
4424 int amdgpu_ras_mark_ras_event_caller(struct amdgpu_device *adev, enum ras_event_type type,
4425 				     const void *caller)
4426 {
4427 	struct ras_event_manager *event_mgr;
4428 	struct ras_event_state *event_state;
4429 	int ret = 0;
4430 
4431 	if (type >= RAS_EVENT_TYPE_COUNT) {
4432 		ret = -EINVAL;
4433 		goto out;
4434 	}
4435 
4436 	event_mgr = __get_ras_event_mgr(adev);
4437 	if (!event_mgr) {
4438 		ret = -EINVAL;
4439 		goto out;
4440 	}
4441 
4442 	event_state = &event_mgr->event_state[type];
4443 	event_state->last_seqno = atomic64_inc_return(&event_mgr->seqno);
4444 	atomic64_inc(&event_state->count);
4445 
4446 out:
4447 	if (ret && caller)
4448 		dev_warn(adev->dev, "failed mark ras event (%d) in %ps, ret:%d\n",
4449 			 (int)type, caller, ret);
4450 
4451 	return ret;
4452 }
4453 
4454 u64 amdgpu_ras_acquire_event_id(struct amdgpu_device *adev, enum ras_event_type type)
4455 {
4456 	struct ras_event_manager *event_mgr;
4457 	u64 id;
4458 
4459 	if (type >= RAS_EVENT_TYPE_COUNT)
4460 		return RAS_EVENT_INVALID_ID;
4461 
4462 	switch (type) {
4463 	case RAS_EVENT_TYPE_FATAL:
4464 	case RAS_EVENT_TYPE_POISON_CREATION:
4465 	case RAS_EVENT_TYPE_POISON_CONSUMPTION:
4466 		event_mgr = __get_ras_event_mgr(adev);
4467 		if (!event_mgr)
4468 			return RAS_EVENT_INVALID_ID;
4469 
4470 		id = event_mgr->event_state[type].last_seqno;
4471 		break;
4472 	case RAS_EVENT_TYPE_INVALID:
4473 	default:
4474 		id = RAS_EVENT_INVALID_ID;
4475 		break;
4476 	}
4477 
4478 	return id;
4479 }
4480 
4481 void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
4482 {
4483 	if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
4484 		struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
4485 		enum ras_event_type type = RAS_EVENT_TYPE_FATAL;
4486 		u64 event_id;
4487 
4488 		if (amdgpu_ras_mark_ras_event(adev, type))
4489 			return;
4490 
4491 		event_id = amdgpu_ras_acquire_event_id(adev, type);
4492 
4493 		RAS_EVENT_LOG(adev, event_id, "uncorrectable hardware error"
4494 			      "(ERREVENT_ATHUB_INTERRUPT) detected!\n");
4495 
4496 		amdgpu_ras_set_fed(adev, true);
4497 		ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET;
4498 		amdgpu_ras_reset_gpu(adev);
4499 	}
4500 }
4501 
4502 bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev)
4503 {
4504 	if (adev->asic_type == CHIP_VEGA20 &&
4505 	    adev->pm.fw_version <= 0x283400) {
4506 		return !(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) &&
4507 				amdgpu_ras_intr_triggered();
4508 	}
4509 
4510 	return false;
4511 }
4512 
4513 void amdgpu_release_ras_context(struct amdgpu_device *adev)
4514 {
4515 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4516 
4517 	if (!con)
4518 		return;
4519 
4520 	if (!adev->ras_enabled && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)) {
4521 		con->features &= ~BIT(AMDGPU_RAS_BLOCK__GFX);
4522 		amdgpu_ras_set_context(adev, NULL);
4523 		kfree(con);
4524 	}
4525 }
4526 
4527 #ifdef CONFIG_X86_MCE_AMD
4528 static struct amdgpu_device *find_adev(uint32_t node_id)
4529 {
4530 	int i;
4531 	struct amdgpu_device *adev = NULL;
4532 
4533 	for (i = 0; i < mce_adev_list.num_gpu; i++) {
4534 		adev = mce_adev_list.devs[i];
4535 
4536 		if (adev && adev->gmc.xgmi.connected_to_cpu &&
4537 		    adev->gmc.xgmi.physical_node_id == node_id)
4538 			break;
4539 		adev = NULL;
4540 	}
4541 
4542 	return adev;
4543 }
4544 
4545 #define GET_MCA_IPID_GPUID(m)	(((m) >> 44) & 0xF)
4546 #define GET_UMC_INST(m)		(((m) >> 21) & 0x7)
4547 #define GET_CHAN_INDEX(m)	((((m) >> 12) & 0x3) | (((m) >> 18) & 0x4))
4548 #define GPU_ID_OFFSET		8
4549 
4550 static int amdgpu_bad_page_notifier(struct notifier_block *nb,
4551 				    unsigned long val, void *data)
4552 {
4553 	struct mce *m = (struct mce *)data;
4554 	struct amdgpu_device *adev = NULL;
4555 	uint32_t gpu_id = 0;
4556 	uint32_t umc_inst = 0, ch_inst = 0;
4557 
4558 	/*
4559 	 * If the error was generated in UMC_V2, which belongs to GPU UMCs,
4560 	 * and error occurred in DramECC (Extended error code = 0) then only
4561 	 * process the error, else bail out.
4562 	 */
4563 	if (!m || !((smca_get_bank_type(m->extcpu, m->bank) == SMCA_UMC_V2) &&
4564 		    (XEC(m->status, 0x3f) == 0x0)))
4565 		return NOTIFY_DONE;
4566 
4567 	/*
4568 	 * If it is correctable error, return.
4569 	 */
4570 	if (mce_is_correctable(m))
4571 		return NOTIFY_OK;
4572 
4573 	/*
4574 	 * GPU Id is offset by GPU_ID_OFFSET in MCA_IPID_UMC register.
4575 	 */
4576 	gpu_id = GET_MCA_IPID_GPUID(m->ipid) - GPU_ID_OFFSET;
4577 
4578 	adev = find_adev(gpu_id);
4579 	if (!adev) {
4580 		DRM_WARN("%s: Unable to find adev for gpu_id: %d\n", __func__,
4581 								gpu_id);
4582 		return NOTIFY_DONE;
4583 	}
4584 
4585 	/*
4586 	 * If it is uncorrectable error, then find out UMC instance and
4587 	 * channel index.
4588 	 */
4589 	umc_inst = GET_UMC_INST(m->ipid);
4590 	ch_inst = GET_CHAN_INDEX(m->ipid);
4591 
4592 	dev_info(adev->dev, "Uncorrectable error detected in UMC inst: %d, chan_idx: %d",
4593 			     umc_inst, ch_inst);
4594 
4595 	if (!amdgpu_umc_page_retirement_mca(adev, m->addr, ch_inst, umc_inst))
4596 		return NOTIFY_OK;
4597 	else
4598 		return NOTIFY_DONE;
4599 }
4600 
4601 static struct notifier_block amdgpu_bad_page_nb = {
4602 	.notifier_call  = amdgpu_bad_page_notifier,
4603 	.priority       = MCE_PRIO_UC,
4604 };
4605 
4606 static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev)
4607 {
4608 	/*
4609 	 * Add the adev to the mce_adev_list.
4610 	 * During mode2 reset, amdgpu device is temporarily
4611 	 * removed from the mgpu_info list which can cause
4612 	 * page retirement to fail.
4613 	 * Use this list instead of mgpu_info to find the amdgpu
4614 	 * device on which the UMC error was reported.
4615 	 */
4616 	mce_adev_list.devs[mce_adev_list.num_gpu++] = adev;
4617 
4618 	/*
4619 	 * Register the x86 notifier only once
4620 	 * with MCE subsystem.
4621 	 */
4622 	if (notifier_registered == false) {
4623 		mce_register_decode_chain(&amdgpu_bad_page_nb);
4624 		notifier_registered = true;
4625 	}
4626 }
4627 #endif
4628 
4629 struct amdgpu_ras *amdgpu_ras_get_context(struct amdgpu_device *adev)
4630 {
4631 	if (!adev)
4632 		return NULL;
4633 
4634 	return adev->psp.ras_context.ras;
4635 }
4636 
4637 int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_con)
4638 {
4639 	if (!adev)
4640 		return -EINVAL;
4641 
4642 	adev->psp.ras_context.ras = ras_con;
4643 	return 0;
4644 }
4645 
4646 /* check if ras is supported on block, say, sdma, gfx */
4647 int amdgpu_ras_is_supported(struct amdgpu_device *adev,
4648 		unsigned int block)
4649 {
4650 	int ret = 0;
4651 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
4652 
4653 	if (block >= AMDGPU_RAS_BLOCK_COUNT)
4654 		return 0;
4655 
4656 	ret = ras && (adev->ras_enabled & (1 << block));
4657 
4658 	/* For the special asic with mem ecc enabled but sram ecc
4659 	 * not enabled, even if the ras block is not supported on
4660 	 * .ras_enabled, if the asic supports poison mode and the
4661 	 * ras block has ras configuration, it can be considered
4662 	 * that the ras block supports ras function.
4663 	 */
4664 	if (!ret &&
4665 	    (block == AMDGPU_RAS_BLOCK__GFX ||
4666 	     block == AMDGPU_RAS_BLOCK__SDMA ||
4667 	     block == AMDGPU_RAS_BLOCK__VCN ||
4668 	     block == AMDGPU_RAS_BLOCK__JPEG) &&
4669 		(amdgpu_ras_mask & (1 << block)) &&
4670 	    amdgpu_ras_is_poison_mode_supported(adev) &&
4671 	    amdgpu_ras_get_ras_block(adev, block, 0))
4672 		ret = 1;
4673 
4674 	return ret;
4675 }
4676 
4677 int amdgpu_ras_reset_gpu(struct amdgpu_device *adev)
4678 {
4679 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
4680 
4681 	/* mode1 is the only selection for RMA status */
4682 	if (amdgpu_ras_is_rma(adev)) {
4683 		ras->gpu_reset_flags = 0;
4684 		ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET;
4685 	}
4686 
4687 	if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0) {
4688 		struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
4689 		int hive_ras_recovery = 0;
4690 
4691 		if (hive) {
4692 			hive_ras_recovery = atomic_read(&hive->ras_recovery);
4693 			amdgpu_put_xgmi_hive(hive);
4694 		}
4695 		/* In the case of multiple GPUs, after a GPU has started
4696 		 * resetting all GPUs on hive, other GPUs do not need to
4697 		 * trigger GPU reset again.
4698 		 */
4699 		if (!hive_ras_recovery)
4700 			amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work);
4701 		else
4702 			atomic_set(&ras->in_recovery, 0);
4703 	} else {
4704 		flush_work(&ras->recovery_work);
4705 		amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work);
4706 	}
4707 
4708 	return 0;
4709 }
4710 
4711 int amdgpu_ras_set_mca_debug_mode(struct amdgpu_device *adev, bool enable)
4712 {
4713 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4714 	int ret = 0;
4715 
4716 	if (con) {
4717 		ret = amdgpu_mca_smu_set_debug_mode(adev, enable);
4718 		if (!ret)
4719 			con->is_aca_debug_mode = enable;
4720 	}
4721 
4722 	return ret;
4723 }
4724 
4725 int amdgpu_ras_set_aca_debug_mode(struct amdgpu_device *adev, bool enable)
4726 {
4727 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4728 	int ret = 0;
4729 
4730 	if (con) {
4731 		if (amdgpu_aca_is_enabled(adev))
4732 			ret = amdgpu_aca_smu_set_debug_mode(adev, enable);
4733 		else
4734 			ret = amdgpu_mca_smu_set_debug_mode(adev, enable);
4735 		if (!ret)
4736 			con->is_aca_debug_mode = enable;
4737 	}
4738 
4739 	return ret;
4740 }
4741 
4742 bool amdgpu_ras_get_aca_debug_mode(struct amdgpu_device *adev)
4743 {
4744 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4745 	const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
4746 	const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
4747 
4748 	if (!con)
4749 		return false;
4750 
4751 	if ((amdgpu_aca_is_enabled(adev) && smu_funcs && smu_funcs->set_debug_mode) ||
4752 	    (!amdgpu_aca_is_enabled(adev) && mca_funcs && mca_funcs->mca_set_debug_mode))
4753 		return con->is_aca_debug_mode;
4754 	else
4755 		return true;
4756 }
4757 
4758 bool amdgpu_ras_get_error_query_mode(struct amdgpu_device *adev,
4759 				     unsigned int *error_query_mode)
4760 {
4761 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4762 	const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
4763 	const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
4764 
4765 	if (!con) {
4766 		*error_query_mode = AMDGPU_RAS_INVALID_ERROR_QUERY;
4767 		return false;
4768 	}
4769 
4770 	if (amdgpu_sriov_vf(adev)) {
4771 		*error_query_mode = AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY;
4772 	} else if ((smu_funcs && smu_funcs->set_debug_mode) || (mca_funcs && mca_funcs->mca_set_debug_mode)) {
4773 		*error_query_mode =
4774 			(con->is_aca_debug_mode) ? AMDGPU_RAS_DIRECT_ERROR_QUERY : AMDGPU_RAS_FIRMWARE_ERROR_QUERY;
4775 	} else {
4776 		*error_query_mode = AMDGPU_RAS_DIRECT_ERROR_QUERY;
4777 	}
4778 
4779 	return true;
4780 }
4781 
4782 /* Register each ip ras block into amdgpu ras */
4783 int amdgpu_ras_register_ras_block(struct amdgpu_device *adev,
4784 		struct amdgpu_ras_block_object *ras_block_obj)
4785 {
4786 	struct amdgpu_ras_block_list *ras_node;
4787 	if (!adev || !ras_block_obj)
4788 		return -EINVAL;
4789 
4790 	ras_node = kzalloc(sizeof(*ras_node), GFP_KERNEL);
4791 	if (!ras_node)
4792 		return -ENOMEM;
4793 
4794 	INIT_LIST_HEAD(&ras_node->node);
4795 	ras_node->ras_obj = ras_block_obj;
4796 	list_add_tail(&ras_node->node, &adev->ras_list);
4797 
4798 	return 0;
4799 }
4800 
4801 void amdgpu_ras_get_error_type_name(uint32_t err_type, char *err_type_name)
4802 {
4803 	if (!err_type_name)
4804 		return;
4805 
4806 	switch (err_type) {
4807 	case AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE:
4808 		sprintf(err_type_name, "correctable");
4809 		break;
4810 	case AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE:
4811 		sprintf(err_type_name, "uncorrectable");
4812 		break;
4813 	default:
4814 		sprintf(err_type_name, "unknown");
4815 		break;
4816 	}
4817 }
4818 
4819 bool amdgpu_ras_inst_get_memory_id_field(struct amdgpu_device *adev,
4820 					 const struct amdgpu_ras_err_status_reg_entry *reg_entry,
4821 					 uint32_t instance,
4822 					 uint32_t *memory_id)
4823 {
4824 	uint32_t err_status_lo_data, err_status_lo_offset;
4825 
4826 	if (!reg_entry)
4827 		return false;
4828 
4829 	err_status_lo_offset =
4830 		AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance,
4831 					    reg_entry->seg_lo, reg_entry->reg_lo);
4832 	err_status_lo_data = RREG32(err_status_lo_offset);
4833 
4834 	if ((reg_entry->flags & AMDGPU_RAS_ERR_STATUS_VALID) &&
4835 	    !REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, ERR_STATUS_VALID_FLAG))
4836 		return false;
4837 
4838 	*memory_id = REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, MEMORY_ID);
4839 
4840 	return true;
4841 }
4842 
4843 bool amdgpu_ras_inst_get_err_cnt_field(struct amdgpu_device *adev,
4844 				       const struct amdgpu_ras_err_status_reg_entry *reg_entry,
4845 				       uint32_t instance,
4846 				       unsigned long *err_cnt)
4847 {
4848 	uint32_t err_status_hi_data, err_status_hi_offset;
4849 
4850 	if (!reg_entry)
4851 		return false;
4852 
4853 	err_status_hi_offset =
4854 		AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance,
4855 					    reg_entry->seg_hi, reg_entry->reg_hi);
4856 	err_status_hi_data = RREG32(err_status_hi_offset);
4857 
4858 	if ((reg_entry->flags & AMDGPU_RAS_ERR_INFO_VALID) &&
4859 	    !REG_GET_FIELD(err_status_hi_data, ERR_STATUS_HI, ERR_INFO_VALID_FLAG))
4860 		/* keep the check here in case we need to refer to the result later */
4861 		dev_dbg(adev->dev, "Invalid err_info field\n");
4862 
4863 	/* read err count */
4864 	*err_cnt = REG_GET_FIELD(err_status_hi_data, ERR_STATUS, ERR_CNT);
4865 
4866 	return true;
4867 }
4868 
4869 void amdgpu_ras_inst_query_ras_error_count(struct amdgpu_device *adev,
4870 					   const struct amdgpu_ras_err_status_reg_entry *reg_list,
4871 					   uint32_t reg_list_size,
4872 					   const struct amdgpu_ras_memory_id_entry *mem_list,
4873 					   uint32_t mem_list_size,
4874 					   uint32_t instance,
4875 					   uint32_t err_type,
4876 					   unsigned long *err_count)
4877 {
4878 	uint32_t memory_id;
4879 	unsigned long err_cnt;
4880 	char err_type_name[16];
4881 	uint32_t i, j;
4882 
4883 	for (i = 0; i < reg_list_size; i++) {
4884 		/* query memory_id from err_status_lo */
4885 		if (!amdgpu_ras_inst_get_memory_id_field(adev, &reg_list[i],
4886 							 instance, &memory_id))
4887 			continue;
4888 
4889 		/* query err_cnt from err_status_hi */
4890 		if (!amdgpu_ras_inst_get_err_cnt_field(adev, &reg_list[i],
4891 						       instance, &err_cnt) ||
4892 		    !err_cnt)
4893 			continue;
4894 
4895 		*err_count += err_cnt;
4896 
4897 		/* log the errors */
4898 		amdgpu_ras_get_error_type_name(err_type, err_type_name);
4899 		if (!mem_list) {
4900 			/* memory_list is not supported */
4901 			dev_info(adev->dev,
4902 				 "%ld %s hardware errors detected in %s, instance: %d, memory_id: %d\n",
4903 				 err_cnt, err_type_name,
4904 				 reg_list[i].block_name,
4905 				 instance, memory_id);
4906 		} else {
4907 			for (j = 0; j < mem_list_size; j++) {
4908 				if (memory_id == mem_list[j].memory_id) {
4909 					dev_info(adev->dev,
4910 						 "%ld %s hardware errors detected in %s, instance: %d, memory block: %s\n",
4911 						 err_cnt, err_type_name,
4912 						 reg_list[i].block_name,
4913 						 instance, mem_list[j].name);
4914 					break;
4915 				}
4916 			}
4917 		}
4918 	}
4919 }
4920 
4921 void amdgpu_ras_inst_reset_ras_error_count(struct amdgpu_device *adev,
4922 					   const struct amdgpu_ras_err_status_reg_entry *reg_list,
4923 					   uint32_t reg_list_size,
4924 					   uint32_t instance)
4925 {
4926 	uint32_t err_status_lo_offset, err_status_hi_offset;
4927 	uint32_t i;
4928 
4929 	for (i = 0; i < reg_list_size; i++) {
4930 		err_status_lo_offset =
4931 			AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance,
4932 						    reg_list[i].seg_lo, reg_list[i].reg_lo);
4933 		err_status_hi_offset =
4934 			AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance,
4935 						    reg_list[i].seg_hi, reg_list[i].reg_hi);
4936 		WREG32(err_status_lo_offset, 0);
4937 		WREG32(err_status_hi_offset, 0);
4938 	}
4939 }
4940 
4941 int amdgpu_ras_error_data_init(struct ras_err_data *err_data)
4942 {
4943 	memset(err_data, 0, sizeof(*err_data));
4944 
4945 	INIT_LIST_HEAD(&err_data->err_node_list);
4946 
4947 	return 0;
4948 }
4949 
4950 static void amdgpu_ras_error_node_release(struct ras_err_node *err_node)
4951 {
4952 	if (!err_node)
4953 		return;
4954 
4955 	list_del(&err_node->node);
4956 	kvfree(err_node);
4957 }
4958 
4959 void amdgpu_ras_error_data_fini(struct ras_err_data *err_data)
4960 {
4961 	struct ras_err_node *err_node, *tmp;
4962 
4963 	list_for_each_entry_safe(err_node, tmp, &err_data->err_node_list, node)
4964 		amdgpu_ras_error_node_release(err_node);
4965 }
4966 
4967 static struct ras_err_node *amdgpu_ras_error_find_node_by_id(struct ras_err_data *err_data,
4968 							     struct amdgpu_smuio_mcm_config_info *mcm_info)
4969 {
4970 	struct ras_err_node *err_node;
4971 	struct amdgpu_smuio_mcm_config_info *ref_id;
4972 
4973 	if (!err_data || !mcm_info)
4974 		return NULL;
4975 
4976 	for_each_ras_error(err_node, err_data) {
4977 		ref_id = &err_node->err_info.mcm_info;
4978 
4979 		if (mcm_info->socket_id == ref_id->socket_id &&
4980 		    mcm_info->die_id == ref_id->die_id)
4981 			return err_node;
4982 	}
4983 
4984 	return NULL;
4985 }
4986 
4987 static struct ras_err_node *amdgpu_ras_error_node_new(void)
4988 {
4989 	struct ras_err_node *err_node;
4990 
4991 	err_node = kvzalloc(sizeof(*err_node), GFP_KERNEL);
4992 	if (!err_node)
4993 		return NULL;
4994 
4995 	INIT_LIST_HEAD(&err_node->node);
4996 
4997 	return err_node;
4998 }
4999 
5000 static int ras_err_info_cmp(void *priv, const struct list_head *a, const struct list_head *b)
5001 {
5002 	struct ras_err_node *nodea = container_of(a, struct ras_err_node, node);
5003 	struct ras_err_node *nodeb = container_of(b, struct ras_err_node, node);
5004 	struct amdgpu_smuio_mcm_config_info *infoa = &nodea->err_info.mcm_info;
5005 	struct amdgpu_smuio_mcm_config_info *infob = &nodeb->err_info.mcm_info;
5006 
5007 	if (unlikely(infoa->socket_id != infob->socket_id))
5008 		return infoa->socket_id - infob->socket_id;
5009 	else
5010 		return infoa->die_id - infob->die_id;
5011 
5012 	return 0;
5013 }
5014 
5015 static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_data,
5016 				struct amdgpu_smuio_mcm_config_info *mcm_info)
5017 {
5018 	struct ras_err_node *err_node;
5019 
5020 	err_node = amdgpu_ras_error_find_node_by_id(err_data, mcm_info);
5021 	if (err_node)
5022 		return &err_node->err_info;
5023 
5024 	err_node = amdgpu_ras_error_node_new();
5025 	if (!err_node)
5026 		return NULL;
5027 
5028 	memcpy(&err_node->err_info.mcm_info, mcm_info, sizeof(*mcm_info));
5029 
5030 	err_data->err_list_count++;
5031 	list_add_tail(&err_node->node, &err_data->err_node_list);
5032 	list_sort(NULL, &err_data->err_node_list, ras_err_info_cmp);
5033 
5034 	return &err_node->err_info;
5035 }
5036 
5037 int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
5038 					struct amdgpu_smuio_mcm_config_info *mcm_info,
5039 					u64 count)
5040 {
5041 	struct ras_err_info *err_info;
5042 
5043 	if (!err_data || !mcm_info)
5044 		return -EINVAL;
5045 
5046 	if (!count)
5047 		return 0;
5048 
5049 	err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
5050 	if (!err_info)
5051 		return -EINVAL;
5052 
5053 	err_info->ue_count += count;
5054 	err_data->ue_count += count;
5055 
5056 	return 0;
5057 }
5058 
5059 int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data,
5060 					struct amdgpu_smuio_mcm_config_info *mcm_info,
5061 					u64 count)
5062 {
5063 	struct ras_err_info *err_info;
5064 
5065 	if (!err_data || !mcm_info)
5066 		return -EINVAL;
5067 
5068 	if (!count)
5069 		return 0;
5070 
5071 	err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
5072 	if (!err_info)
5073 		return -EINVAL;
5074 
5075 	err_info->ce_count += count;
5076 	err_data->ce_count += count;
5077 
5078 	return 0;
5079 }
5080 
5081 int amdgpu_ras_error_statistic_de_count(struct ras_err_data *err_data,
5082 					struct amdgpu_smuio_mcm_config_info *mcm_info,
5083 					u64 count)
5084 {
5085 	struct ras_err_info *err_info;
5086 
5087 	if (!err_data || !mcm_info)
5088 		return -EINVAL;
5089 
5090 	if (!count)
5091 		return 0;
5092 
5093 	err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
5094 	if (!err_info)
5095 		return -EINVAL;
5096 
5097 	err_info->de_count += count;
5098 	err_data->de_count += count;
5099 
5100 	return 0;
5101 }
5102 
5103 #define mmMP0_SMN_C2PMSG_92	0x1609C
5104 #define mmMP0_SMN_C2PMSG_126	0x160BE
5105 static void amdgpu_ras_boot_time_error_reporting(struct amdgpu_device *adev,
5106 						 u32 instance)
5107 {
5108 	u32 socket_id, aid_id, hbm_id;
5109 	u32 fw_status;
5110 	u32 boot_error;
5111 	u64 reg_addr;
5112 
5113 	/* The pattern for smn addressing in other SOC could be different from
5114 	 * the one for aqua_vanjaram. We should revisit the code if the pattern
5115 	 * is changed. In such case, replace the aqua_vanjaram implementation
5116 	 * with more common helper */
5117 	reg_addr = (mmMP0_SMN_C2PMSG_92 << 2) +
5118 		   aqua_vanjaram_encode_ext_smn_addressing(instance);
5119 	fw_status = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
5120 
5121 	reg_addr = (mmMP0_SMN_C2PMSG_126 << 2) +
5122 		   aqua_vanjaram_encode_ext_smn_addressing(instance);
5123 	boot_error = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
5124 
5125 	socket_id = AMDGPU_RAS_GPU_ERR_SOCKET_ID(boot_error);
5126 	aid_id = AMDGPU_RAS_GPU_ERR_AID_ID(boot_error);
5127 	hbm_id = ((1 == AMDGPU_RAS_GPU_ERR_HBM_ID(boot_error)) ? 0 : 1);
5128 
5129 	if (AMDGPU_RAS_GPU_ERR_MEM_TRAINING(boot_error))
5130 		dev_info(adev->dev,
5131 			 "socket: %d, aid: %d, hbm: %d, fw_status: 0x%x, memory training failed\n",
5132 			 socket_id, aid_id, hbm_id, fw_status);
5133 
5134 	if (AMDGPU_RAS_GPU_ERR_FW_LOAD(boot_error))
5135 		dev_info(adev->dev,
5136 			 "socket: %d, aid: %d, fw_status: 0x%x, firmware load failed at boot time\n",
5137 			 socket_id, aid_id, fw_status);
5138 
5139 	if (AMDGPU_RAS_GPU_ERR_WAFL_LINK_TRAINING(boot_error))
5140 		dev_info(adev->dev,
5141 			 "socket: %d, aid: %d, fw_status: 0x%x, wafl link training failed\n",
5142 			 socket_id, aid_id, fw_status);
5143 
5144 	if (AMDGPU_RAS_GPU_ERR_XGMI_LINK_TRAINING(boot_error))
5145 		dev_info(adev->dev,
5146 			 "socket: %d, aid: %d, fw_status: 0x%x, xgmi link training failed\n",
5147 			 socket_id, aid_id, fw_status);
5148 
5149 	if (AMDGPU_RAS_GPU_ERR_USR_CP_LINK_TRAINING(boot_error))
5150 		dev_info(adev->dev,
5151 			 "socket: %d, aid: %d, fw_status: 0x%x, usr cp link training failed\n",
5152 			 socket_id, aid_id, fw_status);
5153 
5154 	if (AMDGPU_RAS_GPU_ERR_USR_DP_LINK_TRAINING(boot_error))
5155 		dev_info(adev->dev,
5156 			 "socket: %d, aid: %d, fw_status: 0x%x, usr dp link training failed\n",
5157 			 socket_id, aid_id, fw_status);
5158 
5159 	if (AMDGPU_RAS_GPU_ERR_HBM_MEM_TEST(boot_error))
5160 		dev_info(adev->dev,
5161 			 "socket: %d, aid: %d, hbm: %d, fw_status: 0x%x, hbm memory test failed\n",
5162 			 socket_id, aid_id, hbm_id, fw_status);
5163 
5164 	if (AMDGPU_RAS_GPU_ERR_HBM_BIST_TEST(boot_error))
5165 		dev_info(adev->dev,
5166 			 "socket: %d, aid: %d, hbm: %d, fw_status: 0x%x, hbm bist test failed\n",
5167 			 socket_id, aid_id, hbm_id, fw_status);
5168 
5169 	if (AMDGPU_RAS_GPU_ERR_DATA_ABORT(boot_error))
5170 		dev_info(adev->dev,
5171 			 "socket: %d, aid: %d, fw_status: 0x%x, data abort exception\n",
5172 			 socket_id, aid_id, fw_status);
5173 
5174 	if (AMDGPU_RAS_GPU_ERR_GENERIC(boot_error))
5175 		dev_info(adev->dev,
5176 			 "socket: %d, aid: %d, fw_status: 0x%x, Boot Controller Generic Error\n",
5177 			 socket_id, aid_id, fw_status);
5178 }
5179 
5180 static bool amdgpu_ras_boot_error_detected(struct amdgpu_device *adev,
5181 					   u32 instance)
5182 {
5183 	u64 reg_addr;
5184 	u32 reg_data;
5185 	int retry_loop;
5186 
5187 	reg_addr = (mmMP0_SMN_C2PMSG_92 << 2) +
5188 		   aqua_vanjaram_encode_ext_smn_addressing(instance);
5189 
5190 	for (retry_loop = 0; retry_loop < AMDGPU_RAS_BOOT_STATUS_POLLING_LIMIT; retry_loop++) {
5191 		reg_data = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
5192 		if ((reg_data & AMDGPU_RAS_BOOT_STATUS_MASK) == AMDGPU_RAS_BOOT_STEADY_STATUS)
5193 			return false;
5194 		else
5195 			msleep(1);
5196 	}
5197 
5198 	return true;
5199 }
5200 
5201 void amdgpu_ras_query_boot_status(struct amdgpu_device *adev, u32 num_instances)
5202 {
5203 	u32 i;
5204 
5205 	for (i = 0; i < num_instances; i++) {
5206 		if (amdgpu_ras_boot_error_detected(adev, i))
5207 			amdgpu_ras_boot_time_error_reporting(adev, i);
5208 	}
5209 }
5210 
5211 int amdgpu_ras_reserve_page(struct amdgpu_device *adev, uint64_t pfn)
5212 {
5213 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5214 	struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
5215 	uint64_t start = pfn << AMDGPU_GPU_PAGE_SHIFT;
5216 	int ret = 0;
5217 
5218 	mutex_lock(&con->page_rsv_lock);
5219 	ret = amdgpu_vram_mgr_query_page_status(mgr, start);
5220 	if (ret == -ENOENT)
5221 		ret = amdgpu_vram_mgr_reserve_range(mgr, start, AMDGPU_GPU_PAGE_SIZE);
5222 	mutex_unlock(&con->page_rsv_lock);
5223 
5224 	return ret;
5225 }
5226 
5227 void amdgpu_ras_event_log_print(struct amdgpu_device *adev, u64 event_id,
5228 				const char *fmt, ...)
5229 {
5230 	struct va_format vaf;
5231 	va_list args;
5232 
5233 	va_start(args, fmt);
5234 	vaf.fmt = fmt;
5235 	vaf.va = &args;
5236 
5237 	if (RAS_EVENT_ID_IS_VALID(event_id))
5238 		dev_printk(KERN_INFO, adev->dev, "{%llu}%pV", event_id, &vaf);
5239 	else
5240 		dev_printk(KERN_INFO, adev->dev, "%pV", &vaf);
5241 
5242 	va_end(args);
5243 }
5244 
5245 bool amdgpu_ras_is_rma(struct amdgpu_device *adev)
5246 {
5247 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5248 
5249 	if (!con)
5250 		return false;
5251 
5252 	return con->is_rma;
5253 }
5254