xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c (revision a52a3c18cdf369a713aca7593332bbb998c71d96)
1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  *
23  */
24 #include <linux/debugfs.h>
25 #include <linux/list.h>
26 #include <linux/module.h>
27 #include <linux/uaccess.h>
28 #include <linux/reboot.h>
29 #include <linux/syscalls.h>
30 #include <linux/pm_runtime.h>
31 #include <linux/list_sort.h>
32 
33 #include "amdgpu.h"
34 #include "amdgpu_ras.h"
35 #include "amdgpu_atomfirmware.h"
36 #include "amdgpu_xgmi.h"
37 #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
38 #include "nbio_v4_3.h"
39 #include "nbif_v6_3_1.h"
40 #include "nbio_v7_9.h"
41 #include "atom.h"
42 #include "amdgpu_reset.h"
43 #include "amdgpu_psp.h"
44 
45 #ifdef CONFIG_X86_MCE_AMD
46 #include <asm/mce.h>
47 
48 static bool notifier_registered;
49 #endif
50 static const char *RAS_FS_NAME = "ras";
51 
52 const char *ras_error_string[] = {
53 	"none",
54 	"parity",
55 	"single_correctable",
56 	"multi_uncorrectable",
57 	"poison",
58 };
59 
60 const char *ras_block_string[] = {
61 	"umc",
62 	"sdma",
63 	"gfx",
64 	"mmhub",
65 	"athub",
66 	"pcie_bif",
67 	"hdp",
68 	"xgmi_wafl",
69 	"df",
70 	"smn",
71 	"sem",
72 	"mp0",
73 	"mp1",
74 	"fuse",
75 	"mca",
76 	"vcn",
77 	"jpeg",
78 	"ih",
79 	"mpio",
80 };
81 
82 const char *ras_mca_block_string[] = {
83 	"mca_mp0",
84 	"mca_mp1",
85 	"mca_mpio",
86 	"mca_iohc",
87 };
88 
89 struct amdgpu_ras_block_list {
90 	/* ras block link */
91 	struct list_head node;
92 
93 	struct amdgpu_ras_block_object *ras_obj;
94 };
95 
96 const char *get_ras_block_str(struct ras_common_if *ras_block)
97 {
98 	if (!ras_block)
99 		return "NULL";
100 
101 	if (ras_block->block >= AMDGPU_RAS_BLOCK_COUNT ||
102 	    ras_block->block >= ARRAY_SIZE(ras_block_string))
103 		return "OUT OF RANGE";
104 
105 	if (ras_block->block == AMDGPU_RAS_BLOCK__MCA)
106 		return ras_mca_block_string[ras_block->sub_block_index];
107 
108 	return ras_block_string[ras_block->block];
109 }
110 
111 #define ras_block_str(_BLOCK_) \
112 	(((_BLOCK_) < ARRAY_SIZE(ras_block_string)) ? ras_block_string[_BLOCK_] : "Out Of Range")
113 
114 #define ras_err_str(i) (ras_error_string[ffs(i)])
115 
116 #define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
117 
118 /* inject address is 52 bits */
119 #define	RAS_UMC_INJECT_ADDR_LIMIT	(0x1ULL << 52)
120 
121 /* typical ECC bad page rate is 1 bad page per 100MB VRAM */
122 #define RAS_BAD_PAGE_COVER              (100 * 1024 * 1024ULL)
123 
124 #define MAX_UMC_POISON_POLLING_TIME_ASYNC  300  //ms
125 
126 #define AMDGPU_RAS_RETIRE_PAGE_INTERVAL 100  //ms
127 
128 #define MAX_FLUSH_RETIRE_DWORK_TIMES  100
129 
130 enum amdgpu_ras_retire_page_reservation {
131 	AMDGPU_RAS_RETIRE_PAGE_RESERVED,
132 	AMDGPU_RAS_RETIRE_PAGE_PENDING,
133 	AMDGPU_RAS_RETIRE_PAGE_FAULT,
134 };
135 
136 atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0);
137 
138 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
139 				uint64_t addr);
140 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
141 				uint64_t addr);
142 #ifdef CONFIG_X86_MCE_AMD
143 static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev);
144 struct mce_notifier_adev_list {
145 	struct amdgpu_device *devs[MAX_GPU_INSTANCE];
146 	int num_gpu;
147 };
148 static struct mce_notifier_adev_list mce_adev_list;
149 #endif
150 
151 void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready)
152 {
153 	if (adev && amdgpu_ras_get_context(adev))
154 		amdgpu_ras_get_context(adev)->error_query_ready = ready;
155 }
156 
157 static bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev)
158 {
159 	if (adev && amdgpu_ras_get_context(adev))
160 		return amdgpu_ras_get_context(adev)->error_query_ready;
161 
162 	return false;
163 }
164 
165 static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t address)
166 {
167 	struct ras_err_data err_data;
168 	struct eeprom_table_record err_rec;
169 	int ret;
170 
171 	if ((address >= adev->gmc.mc_vram_size) ||
172 	    (address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
173 		dev_warn(adev->dev,
174 		         "RAS WARN: input address 0x%llx is invalid.\n",
175 		         address);
176 		return -EINVAL;
177 	}
178 
179 	if (amdgpu_ras_check_bad_page(adev, address)) {
180 		dev_warn(adev->dev,
181 			 "RAS WARN: 0x%llx has already been marked as bad page!\n",
182 			 address);
183 		return 0;
184 	}
185 
186 	ret = amdgpu_ras_error_data_init(&err_data);
187 	if (ret)
188 		return ret;
189 
190 	memset(&err_rec, 0x0, sizeof(struct eeprom_table_record));
191 	err_data.err_addr = &err_rec;
192 	amdgpu_umc_fill_error_record(&err_data, address, address, 0, 0);
193 
194 	if (amdgpu_bad_page_threshold != 0) {
195 		amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
196 					 err_data.err_addr_cnt, false);
197 		amdgpu_ras_save_bad_pages(adev, NULL);
198 	}
199 
200 	amdgpu_ras_error_data_fini(&err_data);
201 
202 	dev_warn(adev->dev, "WARNING: THIS IS ONLY FOR TEST PURPOSES AND WILL CORRUPT RAS EEPROM\n");
203 	dev_warn(adev->dev, "Clear EEPROM:\n");
204 	dev_warn(adev->dev, "    echo 1 > /sys/kernel/debug/dri/0/ras/ras_eeprom_reset\n");
205 
206 	return 0;
207 }
208 
209 static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
210 					size_t size, loff_t *pos)
211 {
212 	struct ras_manager *obj = (struct ras_manager *)file_inode(f)->i_private;
213 	struct ras_query_if info = {
214 		.head = obj->head,
215 	};
216 	ssize_t s;
217 	char val[128];
218 
219 	if (amdgpu_ras_query_error_status(obj->adev, &info))
220 		return -EINVAL;
221 
222 	/* Hardware counter will be reset automatically after the query on Vega20 and Arcturus */
223 	if (amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
224 	    amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {
225 		if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
226 			dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
227 	}
228 
229 	s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n",
230 			"ue", info.ue_count,
231 			"ce", info.ce_count);
232 	if (*pos >= s)
233 		return 0;
234 
235 	s -= *pos;
236 	s = min_t(u64, s, size);
237 
238 
239 	if (copy_to_user(buf, &val[*pos], s))
240 		return -EINVAL;
241 
242 	*pos += s;
243 
244 	return s;
245 }
246 
247 static const struct file_operations amdgpu_ras_debugfs_ops = {
248 	.owner = THIS_MODULE,
249 	.read = amdgpu_ras_debugfs_read,
250 	.write = NULL,
251 	.llseek = default_llseek
252 };
253 
254 static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id)
255 {
256 	int i;
257 
258 	for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) {
259 		*block_id = i;
260 		if (strcmp(name, ras_block_string[i]) == 0)
261 			return 0;
262 	}
263 	return -EINVAL;
264 }
265 
266 static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
267 		const char __user *buf, size_t size,
268 		loff_t *pos, struct ras_debug_if *data)
269 {
270 	ssize_t s = min_t(u64, 64, size);
271 	char str[65];
272 	char block_name[33];
273 	char err[9] = "ue";
274 	int op = -1;
275 	int block_id;
276 	uint32_t sub_block;
277 	u64 address, value;
278 	/* default value is 0 if the mask is not set by user */
279 	u32 instance_mask = 0;
280 
281 	if (*pos)
282 		return -EINVAL;
283 	*pos = size;
284 
285 	memset(str, 0, sizeof(str));
286 	memset(data, 0, sizeof(*data));
287 
288 	if (copy_from_user(str, buf, s))
289 		return -EINVAL;
290 
291 	if (sscanf(str, "disable %32s", block_name) == 1)
292 		op = 0;
293 	else if (sscanf(str, "enable %32s %8s", block_name, err) == 2)
294 		op = 1;
295 	else if (sscanf(str, "inject %32s %8s", block_name, err) == 2)
296 		op = 2;
297 	else if (strstr(str, "retire_page") != NULL)
298 		op = 3;
299 	else if (str[0] && str[1] && str[2] && str[3])
300 		/* ascii string, but commands are not matched. */
301 		return -EINVAL;
302 
303 	if (op != -1) {
304 		if (op == 3) {
305 			if (sscanf(str, "%*s 0x%llx", &address) != 1 &&
306 			    sscanf(str, "%*s %llu", &address) != 1)
307 				return -EINVAL;
308 
309 			data->op = op;
310 			data->inject.address = address;
311 
312 			return 0;
313 		}
314 
315 		if (amdgpu_ras_find_block_id_by_name(block_name, &block_id))
316 			return -EINVAL;
317 
318 		data->head.block = block_id;
319 		/* only ue, ce and poison errors are supported */
320 		if (!memcmp("ue", err, 2))
321 			data->head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
322 		else if (!memcmp("ce", err, 2))
323 			data->head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE;
324 		else if (!memcmp("poison", err, 6))
325 			data->head.type = AMDGPU_RAS_ERROR__POISON;
326 		else
327 			return -EINVAL;
328 
329 		data->op = op;
330 
331 		if (op == 2) {
332 			if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx 0x%x",
333 				   &sub_block, &address, &value, &instance_mask) != 4 &&
334 			    sscanf(str, "%*s %*s %*s %u %llu %llu %u",
335 				   &sub_block, &address, &value, &instance_mask) != 4 &&
336 				sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx",
337 				   &sub_block, &address, &value) != 3 &&
338 			    sscanf(str, "%*s %*s %*s %u %llu %llu",
339 				   &sub_block, &address, &value) != 3)
340 				return -EINVAL;
341 			data->head.sub_block_index = sub_block;
342 			data->inject.address = address;
343 			data->inject.value = value;
344 			data->inject.instance_mask = instance_mask;
345 		}
346 	} else {
347 		if (size < sizeof(*data))
348 			return -EINVAL;
349 
350 		if (copy_from_user(data, buf, sizeof(*data)))
351 			return -EINVAL;
352 	}
353 
354 	return 0;
355 }
356 
357 static void amdgpu_ras_instance_mask_check(struct amdgpu_device *adev,
358 				struct ras_debug_if *data)
359 {
360 	int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1;
361 	uint32_t mask, inst_mask = data->inject.instance_mask;
362 
363 	/* no need to set instance mask if there is only one instance */
364 	if (num_xcc <= 1 && inst_mask) {
365 		data->inject.instance_mask = 0;
366 		dev_dbg(adev->dev,
367 			"RAS inject mask(0x%x) isn't supported and force it to 0.\n",
368 			inst_mask);
369 
370 		return;
371 	}
372 
373 	switch (data->head.block) {
374 	case AMDGPU_RAS_BLOCK__GFX:
375 		mask = GENMASK(num_xcc - 1, 0);
376 		break;
377 	case AMDGPU_RAS_BLOCK__SDMA:
378 		mask = GENMASK(adev->sdma.num_instances - 1, 0);
379 		break;
380 	case AMDGPU_RAS_BLOCK__VCN:
381 	case AMDGPU_RAS_BLOCK__JPEG:
382 		mask = GENMASK(adev->vcn.num_vcn_inst - 1, 0);
383 		break;
384 	default:
385 		mask = inst_mask;
386 		break;
387 	}
388 
389 	/* remove invalid bits in instance mask */
390 	data->inject.instance_mask &= mask;
391 	if (inst_mask != data->inject.instance_mask)
392 		dev_dbg(adev->dev,
393 			"Adjust RAS inject mask 0x%x to 0x%x\n",
394 			inst_mask, data->inject.instance_mask);
395 }
396 
397 /**
398  * DOC: AMDGPU RAS debugfs control interface
399  *
400  * The control interface accepts struct ras_debug_if which has two members.
401  *
402  * First member: ras_debug_if::head or ras_debug_if::inject.
403  *
404  * head is used to indicate which IP block will be under control.
405  *
406  * head has four members, they are block, type, sub_block_index, name.
407  * block: which IP will be under control.
408  * type: what kind of error will be enabled/disabled/injected.
409  * sub_block_index: some IPs have subcomponets. say, GFX, sDMA.
410  * name: the name of IP.
411  *
412  * inject has three more members than head, they are address, value and mask.
413  * As their names indicate, inject operation will write the
414  * value to the address.
415  *
416  * The second member: struct ras_debug_if::op.
417  * It has three kinds of operations.
418  *
419  * - 0: disable RAS on the block. Take ::head as its data.
420  * - 1: enable RAS on the block. Take ::head as its data.
421  * - 2: inject errors on the block. Take ::inject as its data.
422  *
423  * How to use the interface?
424  *
425  * In a program
426  *
427  * Copy the struct ras_debug_if in your code and initialize it.
428  * Write the struct to the control interface.
429  *
430  * From shell
431  *
432  * .. code-block:: bash
433  *
434  *	echo "disable <block>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
435  *	echo "enable  <block> <error>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
436  *	echo "inject  <block> <error> <sub-block> <address> <value> <mask>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
437  *
438  * Where N, is the card which you want to affect.
439  *
440  * "disable" requires only the block.
441  * "enable" requires the block and error type.
442  * "inject" requires the block, error type, address, and value.
443  *
444  * The block is one of: umc, sdma, gfx, etc.
445  *	see ras_block_string[] for details
446  *
447  * The error type is one of: ue, ce and poison where,
448  *	ue is multi-uncorrectable
449  *	ce is single-correctable
450  *	poison is poison
451  *
452  * The sub-block is a the sub-block index, pass 0 if there is no sub-block.
453  * The address and value are hexadecimal numbers, leading 0x is optional.
454  * The mask means instance mask, is optional, default value is 0x1.
455  *
456  * For instance,
457  *
458  * .. code-block:: bash
459  *
460  *	echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
461  *	echo inject umc ce 0 0 0 3 > /sys/kernel/debug/dri/0/ras/ras_ctrl
462  *	echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl
463  *
464  * How to check the result of the operation?
465  *
466  * To check disable/enable, see "ras" features at,
467  * /sys/class/drm/card[0/1/2...]/device/ras/features
468  *
469  * To check inject, see the corresponding error count at,
470  * /sys/class/drm/card[0/1/2...]/device/ras/[gfx|sdma|umc|...]_err_count
471  *
472  * .. note::
473  *	Operations are only allowed on blocks which are supported.
474  *	Check the "ras" mask at /sys/module/amdgpu/parameters/ras_mask
475  *	to see which blocks support RAS on a particular asic.
476  *
477  */
478 static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f,
479 					     const char __user *buf,
480 					     size_t size, loff_t *pos)
481 {
482 	struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
483 	struct ras_debug_if data;
484 	int ret = 0;
485 
486 	if (!amdgpu_ras_get_error_query_ready(adev)) {
487 		dev_warn(adev->dev, "RAS WARN: error injection "
488 				"currently inaccessible\n");
489 		return size;
490 	}
491 
492 	ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data);
493 	if (ret)
494 		return ret;
495 
496 	if (data.op == 3) {
497 		ret = amdgpu_reserve_page_direct(adev, data.inject.address);
498 		if (!ret)
499 			return size;
500 		else
501 			return ret;
502 	}
503 
504 	if (!amdgpu_ras_is_supported(adev, data.head.block))
505 		return -EINVAL;
506 
507 	switch (data.op) {
508 	case 0:
509 		ret = amdgpu_ras_feature_enable(adev, &data.head, 0);
510 		break;
511 	case 1:
512 		ret = amdgpu_ras_feature_enable(adev, &data.head, 1);
513 		break;
514 	case 2:
515 		if ((data.inject.address >= adev->gmc.mc_vram_size &&
516 		    adev->gmc.mc_vram_size) ||
517 		    (data.inject.address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
518 			dev_warn(adev->dev, "RAS WARN: input address "
519 					"0x%llx is invalid.",
520 					data.inject.address);
521 			ret = -EINVAL;
522 			break;
523 		}
524 
525 		/* umc ce/ue error injection for a bad page is not allowed */
526 		if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) &&
527 		    amdgpu_ras_check_bad_page(adev, data.inject.address)) {
528 			dev_warn(adev->dev, "RAS WARN: inject: 0x%llx has "
529 				 "already been marked as bad!\n",
530 				 data.inject.address);
531 			break;
532 		}
533 
534 		amdgpu_ras_instance_mask_check(adev, &data);
535 
536 		/* data.inject.address is offset instead of absolute gpu address */
537 		ret = amdgpu_ras_error_inject(adev, &data.inject);
538 		break;
539 	default:
540 		ret = -EINVAL;
541 		break;
542 	}
543 
544 	if (ret)
545 		return ret;
546 
547 	return size;
548 }
549 
550 /**
551  * DOC: AMDGPU RAS debugfs EEPROM table reset interface
552  *
553  * Some boards contain an EEPROM which is used to persistently store a list of
554  * bad pages which experiences ECC errors in vram.  This interface provides
555  * a way to reset the EEPROM, e.g., after testing error injection.
556  *
557  * Usage:
558  *
559  * .. code-block:: bash
560  *
561  *	echo 1 > ../ras/ras_eeprom_reset
562  *
563  * will reset EEPROM table to 0 entries.
564  *
565  */
566 static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f,
567 					       const char __user *buf,
568 					       size_t size, loff_t *pos)
569 {
570 	struct amdgpu_device *adev =
571 		(struct amdgpu_device *)file_inode(f)->i_private;
572 	int ret;
573 
574 	ret = amdgpu_ras_eeprom_reset_table(
575 		&(amdgpu_ras_get_context(adev)->eeprom_control));
576 
577 	if (!ret) {
578 		/* Something was written to EEPROM.
579 		 */
580 		amdgpu_ras_get_context(adev)->flags = RAS_DEFAULT_FLAGS;
581 		return size;
582 	} else {
583 		return ret;
584 	}
585 }
586 
587 static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
588 	.owner = THIS_MODULE,
589 	.read = NULL,
590 	.write = amdgpu_ras_debugfs_ctrl_write,
591 	.llseek = default_llseek
592 };
593 
594 static const struct file_operations amdgpu_ras_debugfs_eeprom_ops = {
595 	.owner = THIS_MODULE,
596 	.read = NULL,
597 	.write = amdgpu_ras_debugfs_eeprom_write,
598 	.llseek = default_llseek
599 };
600 
601 /**
602  * DOC: AMDGPU RAS sysfs Error Count Interface
603  *
604  * It allows the user to read the error count for each IP block on the gpu through
605  * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
606  *
607  * It outputs the multiple lines which report the uncorrected (ue) and corrected
608  * (ce) error counts.
609  *
610  * The format of one line is below,
611  *
612  * [ce|ue]: count
613  *
614  * Example:
615  *
616  * .. code-block:: bash
617  *
618  *	ue: 0
619  *	ce: 1
620  *
621  */
622 static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
623 		struct device_attribute *attr, char *buf)
624 {
625 	struct ras_manager *obj = container_of(attr, struct ras_manager, sysfs_attr);
626 	struct ras_query_if info = {
627 		.head = obj->head,
628 	};
629 
630 	if (!amdgpu_ras_get_error_query_ready(obj->adev))
631 		return sysfs_emit(buf, "Query currently inaccessible\n");
632 
633 	if (amdgpu_ras_query_error_status(obj->adev, &info))
634 		return -EINVAL;
635 
636 	if (amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
637 	    amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {
638 		if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
639 			dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
640 	}
641 
642 	if (info.head.block == AMDGPU_RAS_BLOCK__UMC)
643 		return sysfs_emit(buf, "%s: %lu\n%s: %lu\n%s: %lu\n", "ue", info.ue_count,
644 				"ce", info.ce_count, "de", info.de_count);
645 	else
646 		return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count,
647 				"ce", info.ce_count);
648 }
649 
650 /* obj begin */
651 
652 #define get_obj(obj) do { (obj)->use++; } while (0)
653 #define alive_obj(obj) ((obj)->use)
654 
655 static inline void put_obj(struct ras_manager *obj)
656 {
657 	if (obj && (--obj->use == 0)) {
658 		list_del(&obj->node);
659 		amdgpu_ras_error_data_fini(&obj->err_data);
660 	}
661 
662 	if (obj && (obj->use < 0))
663 		DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", get_ras_block_str(&obj->head));
664 }
665 
666 /* make one obj and return it. */
667 static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev,
668 		struct ras_common_if *head)
669 {
670 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
671 	struct ras_manager *obj;
672 
673 	if (!adev->ras_enabled || !con)
674 		return NULL;
675 
676 	if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
677 		return NULL;
678 
679 	if (head->block == AMDGPU_RAS_BLOCK__MCA) {
680 		if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
681 			return NULL;
682 
683 		obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
684 	} else
685 		obj = &con->objs[head->block];
686 
687 	/* already exist. return obj? */
688 	if (alive_obj(obj))
689 		return NULL;
690 
691 	if (amdgpu_ras_error_data_init(&obj->err_data))
692 		return NULL;
693 
694 	obj->head = *head;
695 	obj->adev = adev;
696 	list_add(&obj->node, &con->head);
697 	get_obj(obj);
698 
699 	return obj;
700 }
701 
702 /* return an obj equal to head, or the first when head is NULL */
703 struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
704 		struct ras_common_if *head)
705 {
706 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
707 	struct ras_manager *obj;
708 	int i;
709 
710 	if (!adev->ras_enabled || !con)
711 		return NULL;
712 
713 	if (head) {
714 		if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
715 			return NULL;
716 
717 		if (head->block == AMDGPU_RAS_BLOCK__MCA) {
718 			if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
719 				return NULL;
720 
721 			obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
722 		} else
723 			obj = &con->objs[head->block];
724 
725 		if (alive_obj(obj))
726 			return obj;
727 	} else {
728 		for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT + AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
729 			obj = &con->objs[i];
730 			if (alive_obj(obj))
731 				return obj;
732 		}
733 	}
734 
735 	return NULL;
736 }
737 /* obj end */
738 
739 /* feature ctl begin */
740 static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev,
741 					 struct ras_common_if *head)
742 {
743 	return adev->ras_hw_enabled & BIT(head->block);
744 }
745 
746 static int amdgpu_ras_is_feature_enabled(struct amdgpu_device *adev,
747 		struct ras_common_if *head)
748 {
749 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
750 
751 	return con->features & BIT(head->block);
752 }
753 
754 /*
755  * if obj is not created, then create one.
756  * set feature enable flag.
757  */
758 static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev,
759 		struct ras_common_if *head, int enable)
760 {
761 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
762 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
763 
764 	/* If hardware does not support ras, then do not create obj.
765 	 * But if hardware support ras, we can create the obj.
766 	 * Ras framework checks con->hw_supported to see if it need do
767 	 * corresponding initialization.
768 	 * IP checks con->support to see if it need disable ras.
769 	 */
770 	if (!amdgpu_ras_is_feature_allowed(adev, head))
771 		return 0;
772 
773 	if (enable) {
774 		if (!obj) {
775 			obj = amdgpu_ras_create_obj(adev, head);
776 			if (!obj)
777 				return -EINVAL;
778 		} else {
779 			/* In case we create obj somewhere else */
780 			get_obj(obj);
781 		}
782 		con->features |= BIT(head->block);
783 	} else {
784 		if (obj && amdgpu_ras_is_feature_enabled(adev, head)) {
785 			con->features &= ~BIT(head->block);
786 			put_obj(obj);
787 		}
788 	}
789 
790 	return 0;
791 }
792 
793 /* wrapper of psp_ras_enable_features */
794 int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
795 		struct ras_common_if *head, bool enable)
796 {
797 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
798 	union ta_ras_cmd_input *info;
799 	int ret;
800 
801 	if (!con)
802 		return -EINVAL;
803 
804 	/* For non-gfx ip, do not enable ras feature if it is not allowed */
805 	/* For gfx ip, regardless of feature support status, */
806 	/* Force issue enable or disable ras feature commands */
807 	if (head->block != AMDGPU_RAS_BLOCK__GFX &&
808 	    !amdgpu_ras_is_feature_allowed(adev, head))
809 		return 0;
810 
811 	/* Only enable gfx ras feature from host side */
812 	if (head->block == AMDGPU_RAS_BLOCK__GFX &&
813 	    !amdgpu_sriov_vf(adev) &&
814 	    !amdgpu_ras_intr_triggered()) {
815 		info = kzalloc(sizeof(union ta_ras_cmd_input), GFP_KERNEL);
816 		if (!info)
817 			return -ENOMEM;
818 
819 		if (!enable) {
820 			info->disable_features = (struct ta_ras_disable_features_input) {
821 				.block_id =  amdgpu_ras_block_to_ta(head->block),
822 				.error_type = amdgpu_ras_error_to_ta(head->type),
823 			};
824 		} else {
825 			info->enable_features = (struct ta_ras_enable_features_input) {
826 				.block_id =  amdgpu_ras_block_to_ta(head->block),
827 				.error_type = amdgpu_ras_error_to_ta(head->type),
828 			};
829 		}
830 
831 		ret = psp_ras_enable_features(&adev->psp, info, enable);
832 		if (ret) {
833 			dev_err(adev->dev, "ras %s %s failed poison:%d ret:%d\n",
834 				enable ? "enable":"disable",
835 				get_ras_block_str(head),
836 				amdgpu_ras_is_poison_mode_supported(adev), ret);
837 			kfree(info);
838 			return ret;
839 		}
840 
841 		kfree(info);
842 	}
843 
844 	/* setup the obj */
845 	__amdgpu_ras_feature_enable(adev, head, enable);
846 
847 	return 0;
848 }
849 
850 /* Only used in device probe stage and called only once. */
851 int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
852 		struct ras_common_if *head, bool enable)
853 {
854 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
855 	int ret;
856 
857 	if (!con)
858 		return -EINVAL;
859 
860 	if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
861 		if (enable) {
862 			/* There is no harm to issue a ras TA cmd regardless of
863 			 * the currecnt ras state.
864 			 * If current state == target state, it will do nothing
865 			 * But sometimes it requests driver to reset and repost
866 			 * with error code -EAGAIN.
867 			 */
868 			ret = amdgpu_ras_feature_enable(adev, head, 1);
869 			/* With old ras TA, we might fail to enable ras.
870 			 * Log it and just setup the object.
871 			 * TODO need remove this WA in the future.
872 			 */
873 			if (ret == -EINVAL) {
874 				ret = __amdgpu_ras_feature_enable(adev, head, 1);
875 				if (!ret)
876 					dev_info(adev->dev,
877 						"RAS INFO: %s setup object\n",
878 						get_ras_block_str(head));
879 			}
880 		} else {
881 			/* setup the object then issue a ras TA disable cmd.*/
882 			ret = __amdgpu_ras_feature_enable(adev, head, 1);
883 			if (ret)
884 				return ret;
885 
886 			/* gfx block ras disable cmd must send to ras-ta */
887 			if (head->block == AMDGPU_RAS_BLOCK__GFX)
888 				con->features |= BIT(head->block);
889 
890 			ret = amdgpu_ras_feature_enable(adev, head, 0);
891 
892 			/* clean gfx block ras features flag */
893 			if (adev->ras_enabled && head->block == AMDGPU_RAS_BLOCK__GFX)
894 				con->features &= ~BIT(head->block);
895 		}
896 	} else
897 		ret = amdgpu_ras_feature_enable(adev, head, enable);
898 
899 	return ret;
900 }
901 
902 static int amdgpu_ras_disable_all_features(struct amdgpu_device *adev,
903 		bool bypass)
904 {
905 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
906 	struct ras_manager *obj, *tmp;
907 
908 	list_for_each_entry_safe(obj, tmp, &con->head, node) {
909 		/* bypass psp.
910 		 * aka just release the obj and corresponding flags
911 		 */
912 		if (bypass) {
913 			if (__amdgpu_ras_feature_enable(adev, &obj->head, 0))
914 				break;
915 		} else {
916 			if (amdgpu_ras_feature_enable(adev, &obj->head, 0))
917 				break;
918 		}
919 	}
920 
921 	return con->features;
922 }
923 
924 static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
925 		bool bypass)
926 {
927 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
928 	int i;
929 	const enum amdgpu_ras_error_type default_ras_type = AMDGPU_RAS_ERROR__NONE;
930 
931 	for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) {
932 		struct ras_common_if head = {
933 			.block = i,
934 			.type = default_ras_type,
935 			.sub_block_index = 0,
936 		};
937 
938 		if (i == AMDGPU_RAS_BLOCK__MCA)
939 			continue;
940 
941 		if (bypass) {
942 			/*
943 			 * bypass psp. vbios enable ras for us.
944 			 * so just create the obj
945 			 */
946 			if (__amdgpu_ras_feature_enable(adev, &head, 1))
947 				break;
948 		} else {
949 			if (amdgpu_ras_feature_enable(adev, &head, 1))
950 				break;
951 		}
952 	}
953 
954 	for (i = 0; i < AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
955 		struct ras_common_if head = {
956 			.block = AMDGPU_RAS_BLOCK__MCA,
957 			.type = default_ras_type,
958 			.sub_block_index = i,
959 		};
960 
961 		if (bypass) {
962 			/*
963 			 * bypass psp. vbios enable ras for us.
964 			 * so just create the obj
965 			 */
966 			if (__amdgpu_ras_feature_enable(adev, &head, 1))
967 				break;
968 		} else {
969 			if (amdgpu_ras_feature_enable(adev, &head, 1))
970 				break;
971 		}
972 	}
973 
974 	return con->features;
975 }
976 /* feature ctl end */
977 
978 static int amdgpu_ras_block_match_default(struct amdgpu_ras_block_object *block_obj,
979 		enum amdgpu_ras_block block)
980 {
981 	if (!block_obj)
982 		return -EINVAL;
983 
984 	if (block_obj->ras_comm.block == block)
985 		return 0;
986 
987 	return -EINVAL;
988 }
989 
990 static struct amdgpu_ras_block_object *amdgpu_ras_get_ras_block(struct amdgpu_device *adev,
991 					enum amdgpu_ras_block block, uint32_t sub_block_index)
992 {
993 	struct amdgpu_ras_block_list *node, *tmp;
994 	struct amdgpu_ras_block_object *obj;
995 
996 	if (block >= AMDGPU_RAS_BLOCK__LAST)
997 		return NULL;
998 
999 	list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
1000 		if (!node->ras_obj) {
1001 			dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
1002 			continue;
1003 		}
1004 
1005 		obj = node->ras_obj;
1006 		if (obj->ras_block_match) {
1007 			if (obj->ras_block_match(obj, block, sub_block_index) == 0)
1008 				return obj;
1009 		} else {
1010 			if (amdgpu_ras_block_match_default(obj, block) == 0)
1011 				return obj;
1012 		}
1013 	}
1014 
1015 	return NULL;
1016 }
1017 
1018 static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_data *err_data)
1019 {
1020 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
1021 	int ret = 0;
1022 
1023 	/*
1024 	 * choosing right query method according to
1025 	 * whether smu support query error information
1026 	 */
1027 	ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(ras->umc_ecc));
1028 	if (ret == -EOPNOTSUPP) {
1029 		if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
1030 			adev->umc.ras->ras_block.hw_ops->query_ras_error_count)
1031 			adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data);
1032 
1033 		/* umc query_ras_error_address is also responsible for clearing
1034 		 * error status
1035 		 */
1036 		if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
1037 		    adev->umc.ras->ras_block.hw_ops->query_ras_error_address)
1038 			adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, err_data);
1039 	} else if (!ret) {
1040 		if (adev->umc.ras &&
1041 			adev->umc.ras->ecc_info_query_ras_error_count)
1042 			adev->umc.ras->ecc_info_query_ras_error_count(adev, err_data);
1043 
1044 		if (adev->umc.ras &&
1045 			adev->umc.ras->ecc_info_query_ras_error_address)
1046 			adev->umc.ras->ecc_info_query_ras_error_address(adev, err_data);
1047 	}
1048 }
1049 
1050 static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev,
1051 					      struct ras_manager *ras_mgr,
1052 					      struct ras_err_data *err_data,
1053 					      struct ras_query_context *qctx,
1054 					      const char *blk_name,
1055 					      bool is_ue,
1056 					      bool is_de)
1057 {
1058 	struct amdgpu_smuio_mcm_config_info *mcm_info;
1059 	struct ras_err_node *err_node;
1060 	struct ras_err_info *err_info;
1061 	u64 event_id = qctx->evid.event_id;
1062 
1063 	if (is_ue) {
1064 		for_each_ras_error(err_node, err_data) {
1065 			err_info = &err_node->err_info;
1066 			mcm_info = &err_info->mcm_info;
1067 			if (err_info->ue_count) {
1068 				RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1069 					      "%lld new uncorrectable hardware errors detected in %s block\n",
1070 					      mcm_info->socket_id,
1071 					      mcm_info->die_id,
1072 					      err_info->ue_count,
1073 					      blk_name);
1074 			}
1075 		}
1076 
1077 		for_each_ras_error(err_node, &ras_mgr->err_data) {
1078 			err_info = &err_node->err_info;
1079 			mcm_info = &err_info->mcm_info;
1080 			RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1081 				      "%lld uncorrectable hardware errors detected in total in %s block\n",
1082 				      mcm_info->socket_id, mcm_info->die_id, err_info->ue_count, blk_name);
1083 		}
1084 
1085 	} else {
1086 		if (is_de) {
1087 			for_each_ras_error(err_node, err_data) {
1088 				err_info = &err_node->err_info;
1089 				mcm_info = &err_info->mcm_info;
1090 				if (err_info->de_count) {
1091 					RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1092 						      "%lld new deferred hardware errors detected in %s block\n",
1093 						      mcm_info->socket_id,
1094 						      mcm_info->die_id,
1095 						      err_info->de_count,
1096 						      blk_name);
1097 				}
1098 			}
1099 
1100 			for_each_ras_error(err_node, &ras_mgr->err_data) {
1101 				err_info = &err_node->err_info;
1102 				mcm_info = &err_info->mcm_info;
1103 				RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1104 					      "%lld deferred hardware errors detected in total in %s block\n",
1105 					      mcm_info->socket_id, mcm_info->die_id,
1106 					      err_info->de_count, blk_name);
1107 			}
1108 		} else {
1109 			for_each_ras_error(err_node, err_data) {
1110 				err_info = &err_node->err_info;
1111 				mcm_info = &err_info->mcm_info;
1112 				if (err_info->ce_count) {
1113 					RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1114 						      "%lld new correctable hardware errors detected in %s block\n",
1115 						      mcm_info->socket_id,
1116 						      mcm_info->die_id,
1117 						      err_info->ce_count,
1118 						      blk_name);
1119 				}
1120 			}
1121 
1122 			for_each_ras_error(err_node, &ras_mgr->err_data) {
1123 				err_info = &err_node->err_info;
1124 				mcm_info = &err_info->mcm_info;
1125 				RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1126 					      "%lld correctable hardware errors detected in total in %s block\n",
1127 					      mcm_info->socket_id, mcm_info->die_id,
1128 					      err_info->ce_count, blk_name);
1129 			}
1130 		}
1131 	}
1132 }
1133 
1134 static inline bool err_data_has_source_info(struct ras_err_data *data)
1135 {
1136 	return !list_empty(&data->err_node_list);
1137 }
1138 
1139 static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev,
1140 					     struct ras_query_if *query_if,
1141 					     struct ras_err_data *err_data,
1142 					     struct ras_query_context *qctx)
1143 {
1144 	struct ras_manager *ras_mgr = amdgpu_ras_find_obj(adev, &query_if->head);
1145 	const char *blk_name = get_ras_block_str(&query_if->head);
1146 	u64 event_id = qctx->evid.event_id;
1147 
1148 	if (err_data->ce_count) {
1149 		if (err_data_has_source_info(err_data)) {
1150 			amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx,
1151 							  blk_name, false, false);
1152 		} else if (!adev->aid_mask &&
1153 			   adev->smuio.funcs &&
1154 			   adev->smuio.funcs->get_socket_id &&
1155 			   adev->smuio.funcs->get_die_id) {
1156 			RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d "
1157 				      "%ld correctable hardware errors "
1158 				      "detected in %s block\n",
1159 				      adev->smuio.funcs->get_socket_id(adev),
1160 				      adev->smuio.funcs->get_die_id(adev),
1161 				      ras_mgr->err_data.ce_count,
1162 				      blk_name);
1163 		} else {
1164 			RAS_EVENT_LOG(adev, event_id, "%ld correctable hardware errors "
1165 				      "detected in %s block\n",
1166 				      ras_mgr->err_data.ce_count,
1167 				      blk_name);
1168 		}
1169 	}
1170 
1171 	if (err_data->ue_count) {
1172 		if (err_data_has_source_info(err_data)) {
1173 			amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx,
1174 							  blk_name, true, false);
1175 		} else if (!adev->aid_mask &&
1176 			   adev->smuio.funcs &&
1177 			   adev->smuio.funcs->get_socket_id &&
1178 			   adev->smuio.funcs->get_die_id) {
1179 			RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d "
1180 				      "%ld uncorrectable hardware errors "
1181 				      "detected in %s block\n",
1182 				      adev->smuio.funcs->get_socket_id(adev),
1183 				      adev->smuio.funcs->get_die_id(adev),
1184 				      ras_mgr->err_data.ue_count,
1185 				      blk_name);
1186 		} else {
1187 			RAS_EVENT_LOG(adev, event_id, "%ld uncorrectable hardware errors "
1188 				      "detected in %s block\n",
1189 				      ras_mgr->err_data.ue_count,
1190 				      blk_name);
1191 		}
1192 	}
1193 
1194 	if (err_data->de_count) {
1195 		if (err_data_has_source_info(err_data)) {
1196 			amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx,
1197 							  blk_name, false, true);
1198 		} else if (!adev->aid_mask &&
1199 			   adev->smuio.funcs &&
1200 			   adev->smuio.funcs->get_socket_id &&
1201 			   adev->smuio.funcs->get_die_id) {
1202 			RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d "
1203 				      "%ld deferred hardware errors "
1204 				      "detected in %s block\n",
1205 				      adev->smuio.funcs->get_socket_id(adev),
1206 				      adev->smuio.funcs->get_die_id(adev),
1207 				      ras_mgr->err_data.de_count,
1208 				      blk_name);
1209 		} else {
1210 			RAS_EVENT_LOG(adev, event_id, "%ld deferred hardware errors "
1211 				      "detected in %s block\n",
1212 				      ras_mgr->err_data.de_count,
1213 				      blk_name);
1214 		}
1215 	}
1216 }
1217 
1218 static void amdgpu_ras_virt_error_generate_report(struct amdgpu_device *adev,
1219 						  struct ras_query_if *query_if,
1220 						  struct ras_err_data *err_data,
1221 						  struct ras_query_context *qctx)
1222 {
1223 	unsigned long new_ue, new_ce, new_de;
1224 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, &query_if->head);
1225 	const char *blk_name = get_ras_block_str(&query_if->head);
1226 	u64 event_id = qctx->evid.event_id;
1227 
1228 	new_ce = err_data->ce_count - obj->err_data.ce_count;
1229 	new_ue = err_data->ue_count - obj->err_data.ue_count;
1230 	new_de = err_data->de_count - obj->err_data.de_count;
1231 
1232 	if (new_ce) {
1233 		RAS_EVENT_LOG(adev, event_id, "%lu correctable hardware errors "
1234 			      "detected in %s block\n",
1235 			      new_ce,
1236 			      blk_name);
1237 	}
1238 
1239 	if (new_ue) {
1240 		RAS_EVENT_LOG(adev, event_id, "%lu uncorrectable hardware errors "
1241 			      "detected in %s block\n",
1242 			      new_ue,
1243 			      blk_name);
1244 	}
1245 
1246 	if (new_de) {
1247 		RAS_EVENT_LOG(adev, event_id, "%lu deferred hardware errors "
1248 			      "detected in %s block\n",
1249 			      new_de,
1250 			      blk_name);
1251 	}
1252 }
1253 
1254 static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, struct ras_err_data *err_data)
1255 {
1256 	struct ras_err_node *err_node;
1257 	struct ras_err_info *err_info;
1258 
1259 	if (err_data_has_source_info(err_data)) {
1260 		for_each_ras_error(err_node, err_data) {
1261 			err_info = &err_node->err_info;
1262 			amdgpu_ras_error_statistic_de_count(&obj->err_data,
1263 					&err_info->mcm_info, err_info->de_count);
1264 			amdgpu_ras_error_statistic_ce_count(&obj->err_data,
1265 					&err_info->mcm_info, err_info->ce_count);
1266 			amdgpu_ras_error_statistic_ue_count(&obj->err_data,
1267 					&err_info->mcm_info, err_info->ue_count);
1268 		}
1269 	} else {
1270 		/* for legacy asic path which doesn't has error source info */
1271 		obj->err_data.ue_count += err_data->ue_count;
1272 		obj->err_data.ce_count += err_data->ce_count;
1273 		obj->err_data.de_count += err_data->de_count;
1274 	}
1275 }
1276 
1277 static void amdgpu_ras_mgr_virt_error_data_statistics_update(struct ras_manager *obj,
1278 							     struct ras_err_data *err_data)
1279 {
1280 	/* Host reports absolute counts */
1281 	obj->err_data.ue_count = err_data->ue_count;
1282 	obj->err_data.ce_count = err_data->ce_count;
1283 	obj->err_data.de_count = err_data->de_count;
1284 }
1285 
1286 static struct ras_manager *get_ras_manager(struct amdgpu_device *adev, enum amdgpu_ras_block blk)
1287 {
1288 	struct ras_common_if head;
1289 
1290 	memset(&head, 0, sizeof(head));
1291 	head.block = blk;
1292 
1293 	return amdgpu_ras_find_obj(adev, &head);
1294 }
1295 
1296 int amdgpu_ras_bind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
1297 			const struct aca_info *aca_info, void *data)
1298 {
1299 	struct ras_manager *obj;
1300 
1301 	/* in resume phase, no need to create aca fs node */
1302 	if (adev->in_suspend || amdgpu_reset_in_recovery(adev))
1303 		return 0;
1304 
1305 	obj = get_ras_manager(adev, blk);
1306 	if (!obj)
1307 		return -EINVAL;
1308 
1309 	return amdgpu_aca_add_handle(adev, &obj->aca_handle, ras_block_str(blk), aca_info, data);
1310 }
1311 
1312 int amdgpu_ras_unbind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk)
1313 {
1314 	struct ras_manager *obj;
1315 
1316 	obj = get_ras_manager(adev, blk);
1317 	if (!obj)
1318 		return -EINVAL;
1319 
1320 	amdgpu_aca_remove_handle(&obj->aca_handle);
1321 
1322 	return 0;
1323 }
1324 
1325 static int amdgpu_aca_log_ras_error_data(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
1326 					 enum aca_error_type type, struct ras_err_data *err_data,
1327 					 struct ras_query_context *qctx)
1328 {
1329 	struct ras_manager *obj;
1330 
1331 	obj = get_ras_manager(adev, blk);
1332 	if (!obj)
1333 		return -EINVAL;
1334 
1335 	return amdgpu_aca_get_error_data(adev, &obj->aca_handle, type, err_data, qctx);
1336 }
1337 
1338 ssize_t amdgpu_ras_aca_sysfs_read(struct device *dev, struct device_attribute *attr,
1339 				  struct aca_handle *handle, char *buf, void *data)
1340 {
1341 	struct ras_manager *obj = container_of(handle, struct ras_manager, aca_handle);
1342 	struct ras_query_if info = {
1343 		.head = obj->head,
1344 	};
1345 
1346 	if (!amdgpu_ras_get_error_query_ready(obj->adev))
1347 		return sysfs_emit(buf, "Query currently inaccessible\n");
1348 
1349 	if (amdgpu_ras_query_error_status(obj->adev, &info))
1350 		return -EINVAL;
1351 
1352 	return sysfs_emit(buf, "%s: %lu\n%s: %lu\n%s: %lu\n", "ue", info.ue_count,
1353 			  "ce", info.ce_count, "de", info.de_count);
1354 }
1355 
1356 static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev,
1357 						struct ras_query_if *info,
1358 						struct ras_err_data *err_data,
1359 						struct ras_query_context *qctx,
1360 						unsigned int error_query_mode)
1361 {
1362 	enum amdgpu_ras_block blk = info ? info->head.block : AMDGPU_RAS_BLOCK_COUNT;
1363 	struct amdgpu_ras_block_object *block_obj = NULL;
1364 	int ret;
1365 
1366 	if (blk == AMDGPU_RAS_BLOCK_COUNT)
1367 		return -EINVAL;
1368 
1369 	if (error_query_mode == AMDGPU_RAS_INVALID_ERROR_QUERY)
1370 		return -EINVAL;
1371 
1372 	if (error_query_mode == AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY) {
1373 		return amdgpu_virt_req_ras_err_count(adev, blk, err_data);
1374 	} else if (error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) {
1375 		if (info->head.block == AMDGPU_RAS_BLOCK__UMC) {
1376 			amdgpu_ras_get_ecc_info(adev, err_data);
1377 		} else {
1378 			block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, 0);
1379 			if (!block_obj || !block_obj->hw_ops) {
1380 				dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1381 					     get_ras_block_str(&info->head));
1382 				return -EINVAL;
1383 			}
1384 
1385 			if (block_obj->hw_ops->query_ras_error_count)
1386 				block_obj->hw_ops->query_ras_error_count(adev, err_data);
1387 
1388 			if ((info->head.block == AMDGPU_RAS_BLOCK__SDMA) ||
1389 			    (info->head.block == AMDGPU_RAS_BLOCK__GFX) ||
1390 			    (info->head.block == AMDGPU_RAS_BLOCK__MMHUB)) {
1391 				if (block_obj->hw_ops->query_ras_error_status)
1392 					block_obj->hw_ops->query_ras_error_status(adev);
1393 			}
1394 		}
1395 	} else {
1396 		if (amdgpu_aca_is_enabled(adev)) {
1397 			ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_UE, err_data, qctx);
1398 			if (ret)
1399 				return ret;
1400 
1401 			ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_CE, err_data, qctx);
1402 			if (ret)
1403 				return ret;
1404 
1405 			ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_DEFERRED, err_data, qctx);
1406 			if (ret)
1407 				return ret;
1408 		} else {
1409 			/* FIXME: add code to check return value later */
1410 			amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_UE, err_data, qctx);
1411 			amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_CE, err_data, qctx);
1412 		}
1413 	}
1414 
1415 	return 0;
1416 }
1417 
1418 /* query/inject/cure begin */
1419 static int amdgpu_ras_query_error_status_with_event(struct amdgpu_device *adev,
1420 						    struct ras_query_if *info,
1421 						    enum ras_event_type type)
1422 {
1423 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1424 	struct ras_err_data err_data;
1425 	struct ras_query_context qctx;
1426 	unsigned int error_query_mode;
1427 	int ret;
1428 
1429 	if (!obj)
1430 		return -EINVAL;
1431 
1432 	ret = amdgpu_ras_error_data_init(&err_data);
1433 	if (ret)
1434 		return ret;
1435 
1436 	if (!amdgpu_ras_get_error_query_mode(adev, &error_query_mode))
1437 		return -EINVAL;
1438 
1439 	memset(&qctx, 0, sizeof(qctx));
1440 	qctx.evid.type = type;
1441 	qctx.evid.event_id = amdgpu_ras_acquire_event_id(adev, type);
1442 
1443 	if (!down_read_trylock(&adev->reset_domain->sem)) {
1444 		ret = -EIO;
1445 		goto out_fini_err_data;
1446 	}
1447 
1448 	ret = amdgpu_ras_query_error_status_helper(adev, info,
1449 						   &err_data,
1450 						   &qctx,
1451 						   error_query_mode);
1452 	up_read(&adev->reset_domain->sem);
1453 	if (ret)
1454 		goto out_fini_err_data;
1455 
1456 	if (error_query_mode != AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY) {
1457 		amdgpu_rasmgr_error_data_statistic_update(obj, &err_data);
1458 		amdgpu_ras_error_generate_report(adev, info, &err_data, &qctx);
1459 	} else {
1460 		/* Host provides absolute error counts. First generate the report
1461 		 * using the previous VF internal count against new host count.
1462 		 * Then Update VF internal count.
1463 		 */
1464 		amdgpu_ras_virt_error_generate_report(adev, info, &err_data, &qctx);
1465 		amdgpu_ras_mgr_virt_error_data_statistics_update(obj, &err_data);
1466 	}
1467 
1468 	info->ue_count = obj->err_data.ue_count;
1469 	info->ce_count = obj->err_data.ce_count;
1470 	info->de_count = obj->err_data.de_count;
1471 
1472 out_fini_err_data:
1473 	amdgpu_ras_error_data_fini(&err_data);
1474 
1475 	return ret;
1476 }
1477 
1478 int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_if *info)
1479 {
1480 	return amdgpu_ras_query_error_status_with_event(adev, info, RAS_EVENT_TYPE_INVALID);
1481 }
1482 
1483 int amdgpu_ras_reset_error_count(struct amdgpu_device *adev,
1484 		enum amdgpu_ras_block block)
1485 {
1486 	struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
1487 	const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
1488 	const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
1489 
1490 	if (!block_obj || !block_obj->hw_ops) {
1491 		dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1492 				ras_block_str(block));
1493 		return -EOPNOTSUPP;
1494 	}
1495 
1496 	if (!amdgpu_ras_is_supported(adev, block) ||
1497 	    !amdgpu_ras_get_aca_debug_mode(adev))
1498 		return -EOPNOTSUPP;
1499 
1500 	/* skip ras error reset in gpu reset */
1501 	if ((amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev)) &&
1502 	    ((smu_funcs && smu_funcs->set_debug_mode) ||
1503 	     (mca_funcs && mca_funcs->mca_set_debug_mode)))
1504 		return -EOPNOTSUPP;
1505 
1506 	if (block_obj->hw_ops->reset_ras_error_count)
1507 		block_obj->hw_ops->reset_ras_error_count(adev);
1508 
1509 	return 0;
1510 }
1511 
1512 int amdgpu_ras_reset_error_status(struct amdgpu_device *adev,
1513 		enum amdgpu_ras_block block)
1514 {
1515 	struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
1516 
1517 	if (amdgpu_ras_reset_error_count(adev, block) == -EOPNOTSUPP)
1518 		return 0;
1519 
1520 	if ((block == AMDGPU_RAS_BLOCK__GFX) ||
1521 	    (block == AMDGPU_RAS_BLOCK__MMHUB)) {
1522 		if (block_obj->hw_ops->reset_ras_error_status)
1523 			block_obj->hw_ops->reset_ras_error_status(adev);
1524 	}
1525 
1526 	return 0;
1527 }
1528 
1529 /* wrapper of psp_ras_trigger_error */
1530 int amdgpu_ras_error_inject(struct amdgpu_device *adev,
1531 		struct ras_inject_if *info)
1532 {
1533 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1534 	struct ta_ras_trigger_error_input block_info = {
1535 		.block_id =  amdgpu_ras_block_to_ta(info->head.block),
1536 		.inject_error_type = amdgpu_ras_error_to_ta(info->head.type),
1537 		.sub_block_index = info->head.sub_block_index,
1538 		.address = info->address,
1539 		.value = info->value,
1540 	};
1541 	int ret = -EINVAL;
1542 	struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev,
1543 							info->head.block,
1544 							info->head.sub_block_index);
1545 
1546 	/* inject on guest isn't allowed, return success directly */
1547 	if (amdgpu_sriov_vf(adev))
1548 		return 0;
1549 
1550 	if (!obj)
1551 		return -EINVAL;
1552 
1553 	if (!block_obj || !block_obj->hw_ops)	{
1554 		dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1555 			     get_ras_block_str(&info->head));
1556 		return -EINVAL;
1557 	}
1558 
1559 	/* Calculate XGMI relative offset */
1560 	if (adev->gmc.xgmi.num_physical_nodes > 1 &&
1561 	    info->head.block != AMDGPU_RAS_BLOCK__GFX) {
1562 		block_info.address =
1563 			amdgpu_xgmi_get_relative_phy_addr(adev,
1564 							  block_info.address);
1565 	}
1566 
1567 	if (block_obj->hw_ops->ras_error_inject) {
1568 		if (info->head.block == AMDGPU_RAS_BLOCK__GFX)
1569 			ret = block_obj->hw_ops->ras_error_inject(adev, info, info->instance_mask);
1570 		else /* Special ras_error_inject is defined (e.g: xgmi) */
1571 			ret = block_obj->hw_ops->ras_error_inject(adev, &block_info,
1572 						info->instance_mask);
1573 	} else {
1574 		/* default path */
1575 		ret = psp_ras_trigger_error(&adev->psp, &block_info, info->instance_mask);
1576 	}
1577 
1578 	if (ret)
1579 		dev_err(adev->dev, "ras inject %s failed %d\n",
1580 			get_ras_block_str(&info->head), ret);
1581 
1582 	return ret;
1583 }
1584 
1585 /**
1586  * amdgpu_ras_query_error_count_helper -- Get error counter for specific IP
1587  * @adev: pointer to AMD GPU device
1588  * @ce_count: pointer to an integer to be set to the count of correctible errors.
1589  * @ue_count: pointer to an integer to be set to the count of uncorrectible errors.
1590  * @query_info: pointer to ras_query_if
1591  *
1592  * Return 0 for query success or do nothing, otherwise return an error
1593  * on failures
1594  */
1595 static int amdgpu_ras_query_error_count_helper(struct amdgpu_device *adev,
1596 					       unsigned long *ce_count,
1597 					       unsigned long *ue_count,
1598 					       struct ras_query_if *query_info)
1599 {
1600 	int ret;
1601 
1602 	if (!query_info)
1603 		/* do nothing if query_info is not specified */
1604 		return 0;
1605 
1606 	ret = amdgpu_ras_query_error_status(adev, query_info);
1607 	if (ret)
1608 		return ret;
1609 
1610 	*ce_count += query_info->ce_count;
1611 	*ue_count += query_info->ue_count;
1612 
1613 	/* some hardware/IP supports read to clear
1614 	 * no need to explictly reset the err status after the query call */
1615 	if (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
1616 	    amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {
1617 		if (amdgpu_ras_reset_error_status(adev, query_info->head.block))
1618 			dev_warn(adev->dev,
1619 				 "Failed to reset error counter and error status\n");
1620 	}
1621 
1622 	return 0;
1623 }
1624 
1625 /**
1626  * amdgpu_ras_query_error_count -- Get error counts of all IPs or specific IP
1627  * @adev: pointer to AMD GPU device
1628  * @ce_count: pointer to an integer to be set to the count of correctible errors.
1629  * @ue_count: pointer to an integer to be set to the count of uncorrectible
1630  * errors.
1631  * @query_info: pointer to ras_query_if if the query request is only for
1632  * specific ip block; if info is NULL, then the qurey request is for
1633  * all the ip blocks that support query ras error counters/status
1634  *
1635  * If set, @ce_count or @ue_count, count and return the corresponding
1636  * error counts in those integer pointers. Return 0 if the device
1637  * supports RAS. Return -EOPNOTSUPP if the device doesn't support RAS.
1638  */
1639 int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
1640 				 unsigned long *ce_count,
1641 				 unsigned long *ue_count,
1642 				 struct ras_query_if *query_info)
1643 {
1644 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1645 	struct ras_manager *obj;
1646 	unsigned long ce, ue;
1647 	int ret;
1648 
1649 	if (!adev->ras_enabled || !con)
1650 		return -EOPNOTSUPP;
1651 
1652 	/* Don't count since no reporting.
1653 	 */
1654 	if (!ce_count && !ue_count)
1655 		return 0;
1656 
1657 	ce = 0;
1658 	ue = 0;
1659 	if (!query_info) {
1660 		/* query all the ip blocks that support ras query interface */
1661 		list_for_each_entry(obj, &con->head, node) {
1662 			struct ras_query_if info = {
1663 				.head = obj->head,
1664 			};
1665 
1666 			ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, &info);
1667 		}
1668 	} else {
1669 		/* query specific ip block */
1670 		ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, query_info);
1671 	}
1672 
1673 	if (ret)
1674 		return ret;
1675 
1676 	if (ce_count)
1677 		*ce_count = ce;
1678 
1679 	if (ue_count)
1680 		*ue_count = ue;
1681 
1682 	return 0;
1683 }
1684 /* query/inject/cure end */
1685 
1686 
1687 /* sysfs begin */
1688 
1689 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
1690 		struct ras_badpage **bps, unsigned int *count);
1691 
1692 static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
1693 {
1694 	switch (flags) {
1695 	case AMDGPU_RAS_RETIRE_PAGE_RESERVED:
1696 		return "R";
1697 	case AMDGPU_RAS_RETIRE_PAGE_PENDING:
1698 		return "P";
1699 	case AMDGPU_RAS_RETIRE_PAGE_FAULT:
1700 	default:
1701 		return "F";
1702 	}
1703 }
1704 
1705 /**
1706  * DOC: AMDGPU RAS sysfs gpu_vram_bad_pages Interface
1707  *
1708  * It allows user to read the bad pages of vram on the gpu through
1709  * /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages
1710  *
1711  * It outputs multiple lines, and each line stands for one gpu page.
1712  *
1713  * The format of one line is below,
1714  * gpu pfn : gpu page size : flags
1715  *
1716  * gpu pfn and gpu page size are printed in hex format.
1717  * flags can be one of below character,
1718  *
1719  * R: reserved, this gpu page is reserved and not able to use.
1720  *
1721  * P: pending for reserve, this gpu page is marked as bad, will be reserved
1722  * in next window of page_reserve.
1723  *
1724  * F: unable to reserve. this gpu page can't be reserved due to some reasons.
1725  *
1726  * Examples:
1727  *
1728  * .. code-block:: bash
1729  *
1730  *	0x00000001 : 0x00001000 : R
1731  *	0x00000002 : 0x00001000 : P
1732  *
1733  */
1734 
1735 static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f,
1736 		struct kobject *kobj, const struct bin_attribute *attr,
1737 		char *buf, loff_t ppos, size_t count)
1738 {
1739 	struct amdgpu_ras *con =
1740 		container_of(attr, struct amdgpu_ras, badpages_attr);
1741 	struct amdgpu_device *adev = con->adev;
1742 	const unsigned int element_size =
1743 		sizeof("0xabcdabcd : 0x12345678 : R\n") - 1;
1744 	unsigned int start = div64_ul(ppos + element_size - 1, element_size);
1745 	unsigned int end = div64_ul(ppos + count - 1, element_size);
1746 	ssize_t s = 0;
1747 	struct ras_badpage *bps = NULL;
1748 	unsigned int bps_count = 0;
1749 
1750 	memset(buf, 0, count);
1751 
1752 	if (amdgpu_ras_badpages_read(adev, &bps, &bps_count))
1753 		return 0;
1754 
1755 	for (; start < end && start < bps_count; start++)
1756 		s += scnprintf(&buf[s], element_size + 1,
1757 				"0x%08x : 0x%08x : %1s\n",
1758 				bps[start].bp,
1759 				bps[start].size,
1760 				amdgpu_ras_badpage_flags_str(bps[start].flags));
1761 
1762 	kfree(bps);
1763 
1764 	return s;
1765 }
1766 
1767 static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev,
1768 		struct device_attribute *attr, char *buf)
1769 {
1770 	struct amdgpu_ras *con =
1771 		container_of(attr, struct amdgpu_ras, features_attr);
1772 
1773 	return sysfs_emit(buf, "feature mask: 0x%x\n", con->features);
1774 }
1775 
1776 static ssize_t amdgpu_ras_sysfs_version_show(struct device *dev,
1777 		struct device_attribute *attr, char *buf)
1778 {
1779 	struct amdgpu_ras *con =
1780 		container_of(attr, struct amdgpu_ras, version_attr);
1781 	return sysfs_emit(buf, "table version: 0x%x\n", con->eeprom_control.tbl_hdr.version);
1782 }
1783 
1784 static ssize_t amdgpu_ras_sysfs_schema_show(struct device *dev,
1785 		struct device_attribute *attr, char *buf)
1786 {
1787 	struct amdgpu_ras *con =
1788 		container_of(attr, struct amdgpu_ras, schema_attr);
1789 	return sysfs_emit(buf, "schema: 0x%x\n", con->schema);
1790 }
1791 
1792 static struct {
1793 	enum ras_event_type type;
1794 	const char *name;
1795 } dump_event[] = {
1796 	{RAS_EVENT_TYPE_FATAL, "Fatal Error"},
1797 	{RAS_EVENT_TYPE_POISON_CREATION, "Poison Creation"},
1798 	{RAS_EVENT_TYPE_POISON_CONSUMPTION, "Poison Consumption"},
1799 };
1800 
1801 static ssize_t amdgpu_ras_sysfs_event_state_show(struct device *dev,
1802 						 struct device_attribute *attr, char *buf)
1803 {
1804 	struct amdgpu_ras *con =
1805 		container_of(attr, struct amdgpu_ras, event_state_attr);
1806 	struct ras_event_manager *event_mgr = con->event_mgr;
1807 	struct ras_event_state *event_state;
1808 	int i, size = 0;
1809 
1810 	if (!event_mgr)
1811 		return -EINVAL;
1812 
1813 	size += sysfs_emit_at(buf, size, "current seqno: %llu\n", atomic64_read(&event_mgr->seqno));
1814 	for (i = 0; i < ARRAY_SIZE(dump_event); i++) {
1815 		event_state = &event_mgr->event_state[dump_event[i].type];
1816 		size += sysfs_emit_at(buf, size, "%s: count:%llu, last_seqno:%llu\n",
1817 				      dump_event[i].name,
1818 				      atomic64_read(&event_state->count),
1819 				      event_state->last_seqno);
1820 	}
1821 
1822 	return (ssize_t)size;
1823 }
1824 
1825 static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev)
1826 {
1827 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1828 
1829 	if (adev->dev->kobj.sd)
1830 		sysfs_remove_file_from_group(&adev->dev->kobj,
1831 				&con->badpages_attr.attr,
1832 				RAS_FS_NAME);
1833 }
1834 
1835 static int amdgpu_ras_sysfs_remove_dev_attr_node(struct amdgpu_device *adev)
1836 {
1837 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1838 	struct attribute *attrs[] = {
1839 		&con->features_attr.attr,
1840 		&con->version_attr.attr,
1841 		&con->schema_attr.attr,
1842 		&con->event_state_attr.attr,
1843 		NULL
1844 	};
1845 	struct attribute_group group = {
1846 		.name = RAS_FS_NAME,
1847 		.attrs = attrs,
1848 	};
1849 
1850 	if (adev->dev->kobj.sd)
1851 		sysfs_remove_group(&adev->dev->kobj, &group);
1852 
1853 	return 0;
1854 }
1855 
1856 int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
1857 		struct ras_common_if *head)
1858 {
1859 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1860 
1861 	if (amdgpu_aca_is_enabled(adev))
1862 		return 0;
1863 
1864 	if (!obj || obj->attr_inuse)
1865 		return -EINVAL;
1866 
1867 	if (amdgpu_sriov_vf(adev) && !amdgpu_virt_ras_telemetry_block_en(adev, head->block))
1868 		return 0;
1869 
1870 	get_obj(obj);
1871 
1872 	snprintf(obj->fs_data.sysfs_name, sizeof(obj->fs_data.sysfs_name),
1873 		"%s_err_count", head->name);
1874 
1875 	obj->sysfs_attr = (struct device_attribute){
1876 		.attr = {
1877 			.name = obj->fs_data.sysfs_name,
1878 			.mode = S_IRUGO,
1879 		},
1880 			.show = amdgpu_ras_sysfs_read,
1881 	};
1882 	sysfs_attr_init(&obj->sysfs_attr.attr);
1883 
1884 	if (sysfs_add_file_to_group(&adev->dev->kobj,
1885 				&obj->sysfs_attr.attr,
1886 				RAS_FS_NAME)) {
1887 		put_obj(obj);
1888 		return -EINVAL;
1889 	}
1890 
1891 	obj->attr_inuse = 1;
1892 
1893 	return 0;
1894 }
1895 
1896 int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
1897 		struct ras_common_if *head)
1898 {
1899 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1900 
1901 	if (amdgpu_aca_is_enabled(adev))
1902 		return 0;
1903 
1904 	if (!obj || !obj->attr_inuse)
1905 		return -EINVAL;
1906 
1907 	if (adev->dev->kobj.sd)
1908 		sysfs_remove_file_from_group(&adev->dev->kobj,
1909 				&obj->sysfs_attr.attr,
1910 				RAS_FS_NAME);
1911 	obj->attr_inuse = 0;
1912 	put_obj(obj);
1913 
1914 	return 0;
1915 }
1916 
1917 static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev)
1918 {
1919 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1920 	struct ras_manager *obj, *tmp;
1921 
1922 	list_for_each_entry_safe(obj, tmp, &con->head, node) {
1923 		amdgpu_ras_sysfs_remove(adev, &obj->head);
1924 	}
1925 
1926 	if (amdgpu_bad_page_threshold != 0)
1927 		amdgpu_ras_sysfs_remove_bad_page_node(adev);
1928 
1929 	amdgpu_ras_sysfs_remove_dev_attr_node(adev);
1930 
1931 	return 0;
1932 }
1933 /* sysfs end */
1934 
1935 /**
1936  * DOC: AMDGPU RAS Reboot Behavior for Unrecoverable Errors
1937  *
1938  * Normally when there is an uncorrectable error, the driver will reset
1939  * the GPU to recover.  However, in the event of an unrecoverable error,
1940  * the driver provides an interface to reboot the system automatically
1941  * in that event.
1942  *
1943  * The following file in debugfs provides that interface:
1944  * /sys/kernel/debug/dri/[0/1/2...]/ras/auto_reboot
1945  *
1946  * Usage:
1947  *
1948  * .. code-block:: bash
1949  *
1950  *	echo true > .../ras/auto_reboot
1951  *
1952  */
1953 /* debugfs begin */
1954 static struct dentry *amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
1955 {
1956 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1957 	struct amdgpu_ras_eeprom_control *eeprom = &con->eeprom_control;
1958 	struct drm_minor  *minor = adev_to_drm(adev)->primary;
1959 	struct dentry     *dir;
1960 
1961 	dir = debugfs_create_dir(RAS_FS_NAME, minor->debugfs_root);
1962 	debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, dir, adev,
1963 			    &amdgpu_ras_debugfs_ctrl_ops);
1964 	debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, dir, adev,
1965 			    &amdgpu_ras_debugfs_eeprom_ops);
1966 	debugfs_create_u32("bad_page_cnt_threshold", 0444, dir,
1967 			   &con->bad_page_cnt_threshold);
1968 	debugfs_create_u32("ras_num_recs", 0444, dir, &eeprom->ras_num_recs);
1969 	debugfs_create_x32("ras_hw_enabled", 0444, dir, &adev->ras_hw_enabled);
1970 	debugfs_create_x32("ras_enabled", 0444, dir, &adev->ras_enabled);
1971 	debugfs_create_file("ras_eeprom_size", S_IRUGO, dir, adev,
1972 			    &amdgpu_ras_debugfs_eeprom_size_ops);
1973 	con->de_ras_eeprom_table = debugfs_create_file("ras_eeprom_table",
1974 						       S_IRUGO, dir, adev,
1975 						       &amdgpu_ras_debugfs_eeprom_table_ops);
1976 	amdgpu_ras_debugfs_set_ret_size(&con->eeprom_control);
1977 
1978 	/*
1979 	 * After one uncorrectable error happens, usually GPU recovery will
1980 	 * be scheduled. But due to the known problem in GPU recovery failing
1981 	 * to bring GPU back, below interface provides one direct way to
1982 	 * user to reboot system automatically in such case within
1983 	 * ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine
1984 	 * will never be called.
1985 	 */
1986 	debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, dir, &con->reboot);
1987 
1988 	/*
1989 	 * User could set this not to clean up hardware's error count register
1990 	 * of RAS IPs during ras recovery.
1991 	 */
1992 	debugfs_create_bool("disable_ras_err_cnt_harvest", 0644, dir,
1993 			    &con->disable_ras_err_cnt_harvest);
1994 	return dir;
1995 }
1996 
1997 static void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
1998 				      struct ras_fs_if *head,
1999 				      struct dentry *dir)
2000 {
2001 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);
2002 
2003 	if (!obj || !dir)
2004 		return;
2005 
2006 	get_obj(obj);
2007 
2008 	memcpy(obj->fs_data.debugfs_name,
2009 			head->debugfs_name,
2010 			sizeof(obj->fs_data.debugfs_name));
2011 
2012 	debugfs_create_file(obj->fs_data.debugfs_name, S_IWUGO | S_IRUGO, dir,
2013 			    obj, &amdgpu_ras_debugfs_ops);
2014 }
2015 
2016 static bool amdgpu_ras_aca_is_supported(struct amdgpu_device *adev)
2017 {
2018 	bool ret;
2019 
2020 	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
2021 	case IP_VERSION(13, 0, 6):
2022 	case IP_VERSION(13, 0, 12):
2023 	case IP_VERSION(13, 0, 14):
2024 		ret = true;
2025 		break;
2026 	default:
2027 		ret = false;
2028 		break;
2029 	}
2030 
2031 	return ret;
2032 }
2033 
2034 void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
2035 {
2036 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2037 	struct dentry *dir;
2038 	struct ras_manager *obj;
2039 	struct ras_fs_if fs_info;
2040 
2041 	/*
2042 	 * it won't be called in resume path, no need to check
2043 	 * suspend and gpu reset status
2044 	 */
2045 	if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con)
2046 		return;
2047 
2048 	dir = amdgpu_ras_debugfs_create_ctrl_node(adev);
2049 
2050 	list_for_each_entry(obj, &con->head, node) {
2051 		if (amdgpu_ras_is_supported(adev, obj->head.block) &&
2052 			(obj->attr_inuse == 1)) {
2053 			sprintf(fs_info.debugfs_name, "%s_err_inject",
2054 					get_ras_block_str(&obj->head));
2055 			fs_info.head = obj->head;
2056 			amdgpu_ras_debugfs_create(adev, &fs_info, dir);
2057 		}
2058 	}
2059 
2060 	if (amdgpu_ras_aca_is_supported(adev)) {
2061 		if (amdgpu_aca_is_enabled(adev))
2062 			amdgpu_aca_smu_debugfs_init(adev, dir);
2063 		else
2064 			amdgpu_mca_smu_debugfs_init(adev, dir);
2065 	}
2066 }
2067 
2068 /* debugfs end */
2069 
2070 /* ras fs */
2071 static const BIN_ATTR(gpu_vram_bad_pages, S_IRUGO,
2072 		      amdgpu_ras_sysfs_badpages_read, NULL, 0);
2073 static DEVICE_ATTR(features, S_IRUGO,
2074 		amdgpu_ras_sysfs_features_read, NULL);
2075 static DEVICE_ATTR(version, 0444,
2076 		amdgpu_ras_sysfs_version_show, NULL);
2077 static DEVICE_ATTR(schema, 0444,
2078 		amdgpu_ras_sysfs_schema_show, NULL);
2079 static DEVICE_ATTR(event_state, 0444,
2080 		   amdgpu_ras_sysfs_event_state_show, NULL);
2081 static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
2082 {
2083 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2084 	struct attribute_group group = {
2085 		.name = RAS_FS_NAME,
2086 	};
2087 	struct attribute *attrs[] = {
2088 		&con->features_attr.attr,
2089 		&con->version_attr.attr,
2090 		&con->schema_attr.attr,
2091 		&con->event_state_attr.attr,
2092 		NULL
2093 	};
2094 	const struct bin_attribute *bin_attrs[] = {
2095 		NULL,
2096 		NULL,
2097 	};
2098 	int r;
2099 
2100 	group.attrs = attrs;
2101 
2102 	/* add features entry */
2103 	con->features_attr = dev_attr_features;
2104 	sysfs_attr_init(attrs[0]);
2105 
2106 	/* add version entry */
2107 	con->version_attr = dev_attr_version;
2108 	sysfs_attr_init(attrs[1]);
2109 
2110 	/* add schema entry */
2111 	con->schema_attr = dev_attr_schema;
2112 	sysfs_attr_init(attrs[2]);
2113 
2114 	/* add event_state entry */
2115 	con->event_state_attr = dev_attr_event_state;
2116 	sysfs_attr_init(attrs[3]);
2117 
2118 	if (amdgpu_bad_page_threshold != 0) {
2119 		/* add bad_page_features entry */
2120 		con->badpages_attr = bin_attr_gpu_vram_bad_pages;
2121 		sysfs_bin_attr_init(&con->badpages_attr);
2122 		bin_attrs[0] = &con->badpages_attr;
2123 		group.bin_attrs_new = bin_attrs;
2124 	}
2125 
2126 	r = sysfs_create_group(&adev->dev->kobj, &group);
2127 	if (r)
2128 		dev_err(adev->dev, "Failed to create RAS sysfs group!");
2129 
2130 	return 0;
2131 }
2132 
2133 static int amdgpu_ras_fs_fini(struct amdgpu_device *adev)
2134 {
2135 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2136 	struct ras_manager *con_obj, *ip_obj, *tmp;
2137 
2138 	if (IS_ENABLED(CONFIG_DEBUG_FS)) {
2139 		list_for_each_entry_safe(con_obj, tmp, &con->head, node) {
2140 			ip_obj = amdgpu_ras_find_obj(adev, &con_obj->head);
2141 			if (ip_obj)
2142 				put_obj(ip_obj);
2143 		}
2144 	}
2145 
2146 	amdgpu_ras_sysfs_remove_all(adev);
2147 	return 0;
2148 }
2149 /* ras fs end */
2150 
2151 /* ih begin */
2152 
2153 /* For the hardware that cannot enable bif ring for both ras_controller_irq
2154  * and ras_err_evnet_athub_irq ih cookies, the driver has to poll status
2155  * register to check whether the interrupt is triggered or not, and properly
2156  * ack the interrupt if it is there
2157  */
2158 void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev)
2159 {
2160 	/* Fatal error events are handled on host side */
2161 	if (amdgpu_sriov_vf(adev))
2162 		return;
2163 	/**
2164 	 * If the current interrupt is caused by a non-fatal RAS error, skip
2165 	 * check for fatal error. For fatal errors, FED status of all devices
2166 	 * in XGMI hive gets set when the first device gets fatal error
2167 	 * interrupt. The error gets propagated to other devices as well, so
2168 	 * make sure to ack the interrupt regardless of FED status.
2169 	 */
2170 	if (!amdgpu_ras_get_fed_status(adev) &&
2171 	    amdgpu_ras_is_err_state(adev, AMDGPU_RAS_BLOCK__ANY))
2172 		return;
2173 
2174 	if (adev->nbio.ras &&
2175 	    adev->nbio.ras->handle_ras_controller_intr_no_bifring)
2176 		adev->nbio.ras->handle_ras_controller_intr_no_bifring(adev);
2177 
2178 	if (adev->nbio.ras &&
2179 	    adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring)
2180 		adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring(adev);
2181 }
2182 
2183 static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *obj,
2184 				struct amdgpu_iv_entry *entry)
2185 {
2186 	bool poison_stat = false;
2187 	struct amdgpu_device *adev = obj->adev;
2188 	struct amdgpu_ras_block_object *block_obj =
2189 		amdgpu_ras_get_ras_block(adev, obj->head.block, 0);
2190 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2191 	enum ras_event_type type = RAS_EVENT_TYPE_POISON_CONSUMPTION;
2192 	u64 event_id;
2193 	int ret;
2194 
2195 	if (!block_obj || !con)
2196 		return;
2197 
2198 	ret = amdgpu_ras_mark_ras_event(adev, type);
2199 	if (ret)
2200 		return;
2201 
2202 	amdgpu_ras_set_err_poison(adev, block_obj->ras_comm.block);
2203 	/* both query_poison_status and handle_poison_consumption are optional,
2204 	 * but at least one of them should be implemented if we need poison
2205 	 * consumption handler
2206 	 */
2207 	if (block_obj->hw_ops && block_obj->hw_ops->query_poison_status) {
2208 		poison_stat = block_obj->hw_ops->query_poison_status(adev);
2209 		if (!poison_stat) {
2210 			/* Not poison consumption interrupt, no need to handle it */
2211 			dev_info(adev->dev, "No RAS poison status in %s poison IH.\n",
2212 					block_obj->ras_comm.name);
2213 
2214 			return;
2215 		}
2216 	}
2217 
2218 	amdgpu_umc_poison_handler(adev, obj->head.block, 0);
2219 
2220 	if (block_obj->hw_ops && block_obj->hw_ops->handle_poison_consumption)
2221 		poison_stat = block_obj->hw_ops->handle_poison_consumption(adev);
2222 
2223 	/* gpu reset is fallback for failed and default cases.
2224 	 * For RMA case, amdgpu_umc_poison_handler will handle gpu reset.
2225 	 */
2226 	if (poison_stat && !amdgpu_ras_is_rma(adev)) {
2227 		event_id = amdgpu_ras_acquire_event_id(adev, type);
2228 		RAS_EVENT_LOG(adev, event_id,
2229 			      "GPU reset for %s RAS poison consumption is issued!\n",
2230 			      block_obj->ras_comm.name);
2231 		amdgpu_ras_reset_gpu(adev);
2232 	}
2233 
2234 	if (!poison_stat)
2235 		amdgpu_gfx_poison_consumption_handler(adev, entry);
2236 }
2237 
2238 static void amdgpu_ras_interrupt_poison_creation_handler(struct ras_manager *obj,
2239 				struct amdgpu_iv_entry *entry)
2240 {
2241 	struct amdgpu_device *adev = obj->adev;
2242 	enum ras_event_type type = RAS_EVENT_TYPE_POISON_CREATION;
2243 	u64 event_id;
2244 	int ret;
2245 
2246 	ret = amdgpu_ras_mark_ras_event(adev, type);
2247 	if (ret)
2248 		return;
2249 
2250 	event_id = amdgpu_ras_acquire_event_id(adev, type);
2251 	RAS_EVENT_LOG(adev, event_id, "Poison is created\n");
2252 
2253 	if (amdgpu_ip_version(obj->adev, UMC_HWIP, 0) >= IP_VERSION(12, 0, 0)) {
2254 		struct amdgpu_ras *con = amdgpu_ras_get_context(obj->adev);
2255 
2256 		atomic_inc(&con->page_retirement_req_cnt);
2257 		atomic_inc(&con->poison_creation_count);
2258 
2259 		wake_up(&con->page_retirement_wq);
2260 	}
2261 }
2262 
2263 static void amdgpu_ras_interrupt_umc_handler(struct ras_manager *obj,
2264 				struct amdgpu_iv_entry *entry)
2265 {
2266 	struct ras_ih_data *data = &obj->ih_data;
2267 	struct ras_err_data err_data;
2268 	int ret;
2269 
2270 	if (!data->cb)
2271 		return;
2272 
2273 	ret = amdgpu_ras_error_data_init(&err_data);
2274 	if (ret)
2275 		return;
2276 
2277 	/* Let IP handle its data, maybe we need get the output
2278 	 * from the callback to update the error type/count, etc
2279 	 */
2280 	amdgpu_ras_set_fed(obj->adev, true);
2281 	ret = data->cb(obj->adev, &err_data, entry);
2282 	/* ue will trigger an interrupt, and in that case
2283 	 * we need do a reset to recovery the whole system.
2284 	 * But leave IP do that recovery, here we just dispatch
2285 	 * the error.
2286 	 */
2287 	if (ret == AMDGPU_RAS_SUCCESS) {
2288 		/* these counts could be left as 0 if
2289 		 * some blocks do not count error number
2290 		 */
2291 		obj->err_data.ue_count += err_data.ue_count;
2292 		obj->err_data.ce_count += err_data.ce_count;
2293 		obj->err_data.de_count += err_data.de_count;
2294 	}
2295 
2296 	amdgpu_ras_error_data_fini(&err_data);
2297 }
2298 
2299 static void amdgpu_ras_interrupt_handler(struct ras_manager *obj)
2300 {
2301 	struct ras_ih_data *data = &obj->ih_data;
2302 	struct amdgpu_iv_entry entry;
2303 
2304 	while (data->rptr != data->wptr) {
2305 		rmb();
2306 		memcpy(&entry, &data->ring[data->rptr],
2307 				data->element_size);
2308 
2309 		wmb();
2310 		data->rptr = (data->aligned_element_size +
2311 				data->rptr) % data->ring_size;
2312 
2313 		if (amdgpu_ras_is_poison_mode_supported(obj->adev)) {
2314 			if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
2315 				amdgpu_ras_interrupt_poison_creation_handler(obj, &entry);
2316 			else
2317 				amdgpu_ras_interrupt_poison_consumption_handler(obj, &entry);
2318 		} else {
2319 			if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
2320 				amdgpu_ras_interrupt_umc_handler(obj, &entry);
2321 			else
2322 				dev_warn(obj->adev->dev,
2323 					"No RAS interrupt handler for non-UMC block with poison disabled.\n");
2324 		}
2325 	}
2326 }
2327 
2328 static void amdgpu_ras_interrupt_process_handler(struct work_struct *work)
2329 {
2330 	struct ras_ih_data *data =
2331 		container_of(work, struct ras_ih_data, ih_work);
2332 	struct ras_manager *obj =
2333 		container_of(data, struct ras_manager, ih_data);
2334 
2335 	amdgpu_ras_interrupt_handler(obj);
2336 }
2337 
2338 int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
2339 		struct ras_dispatch_if *info)
2340 {
2341 	struct ras_manager *obj;
2342 	struct ras_ih_data *data;
2343 
2344 	obj = amdgpu_ras_find_obj(adev, &info->head);
2345 	if (!obj)
2346 		return -EINVAL;
2347 
2348 	data = &obj->ih_data;
2349 
2350 	if (data->inuse == 0)
2351 		return 0;
2352 
2353 	/* Might be overflow... */
2354 	memcpy(&data->ring[data->wptr], info->entry,
2355 			data->element_size);
2356 
2357 	wmb();
2358 	data->wptr = (data->aligned_element_size +
2359 			data->wptr) % data->ring_size;
2360 
2361 	schedule_work(&data->ih_work);
2362 
2363 	return 0;
2364 }
2365 
2366 int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev,
2367 		struct ras_common_if *head)
2368 {
2369 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
2370 	struct ras_ih_data *data;
2371 
2372 	if (!obj)
2373 		return -EINVAL;
2374 
2375 	data = &obj->ih_data;
2376 	if (data->inuse == 0)
2377 		return 0;
2378 
2379 	cancel_work_sync(&data->ih_work);
2380 
2381 	kfree(data->ring);
2382 	memset(data, 0, sizeof(*data));
2383 	put_obj(obj);
2384 
2385 	return 0;
2386 }
2387 
2388 int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev,
2389 		struct ras_common_if *head)
2390 {
2391 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
2392 	struct ras_ih_data *data;
2393 	struct amdgpu_ras_block_object *ras_obj;
2394 
2395 	if (!obj) {
2396 		/* in case we registe the IH before enable ras feature */
2397 		obj = amdgpu_ras_create_obj(adev, head);
2398 		if (!obj)
2399 			return -EINVAL;
2400 	} else
2401 		get_obj(obj);
2402 
2403 	ras_obj = container_of(head, struct amdgpu_ras_block_object, ras_comm);
2404 
2405 	data = &obj->ih_data;
2406 	/* add the callback.etc */
2407 	*data = (struct ras_ih_data) {
2408 		.inuse = 0,
2409 		.cb = ras_obj->ras_cb,
2410 		.element_size = sizeof(struct amdgpu_iv_entry),
2411 		.rptr = 0,
2412 		.wptr = 0,
2413 	};
2414 
2415 	INIT_WORK(&data->ih_work, amdgpu_ras_interrupt_process_handler);
2416 
2417 	data->aligned_element_size = ALIGN(data->element_size, 8);
2418 	/* the ring can store 64 iv entries. */
2419 	data->ring_size = 64 * data->aligned_element_size;
2420 	data->ring = kmalloc(data->ring_size, GFP_KERNEL);
2421 	if (!data->ring) {
2422 		put_obj(obj);
2423 		return -ENOMEM;
2424 	}
2425 
2426 	/* IH is ready */
2427 	data->inuse = 1;
2428 
2429 	return 0;
2430 }
2431 
2432 static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev)
2433 {
2434 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2435 	struct ras_manager *obj, *tmp;
2436 
2437 	list_for_each_entry_safe(obj, tmp, &con->head, node) {
2438 		amdgpu_ras_interrupt_remove_handler(adev, &obj->head);
2439 	}
2440 
2441 	return 0;
2442 }
2443 /* ih end */
2444 
2445 /* traversal all IPs except NBIO to query error counter */
2446 static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev, enum ras_event_type type)
2447 {
2448 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2449 	struct ras_manager *obj;
2450 
2451 	if (!adev->ras_enabled || !con)
2452 		return;
2453 
2454 	list_for_each_entry(obj, &con->head, node) {
2455 		struct ras_query_if info = {
2456 			.head = obj->head,
2457 		};
2458 
2459 		/*
2460 		 * PCIE_BIF IP has one different isr by ras controller
2461 		 * interrupt, the specific ras counter query will be
2462 		 * done in that isr. So skip such block from common
2463 		 * sync flood interrupt isr calling.
2464 		 */
2465 		if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF)
2466 			continue;
2467 
2468 		/*
2469 		 * this is a workaround for aldebaran, skip send msg to
2470 		 * smu to get ecc_info table due to smu handle get ecc
2471 		 * info table failed temporarily.
2472 		 * should be removed until smu fix handle ecc_info table.
2473 		 */
2474 		if ((info.head.block == AMDGPU_RAS_BLOCK__UMC) &&
2475 		    (amdgpu_ip_version(adev, MP1_HWIP, 0) ==
2476 		     IP_VERSION(13, 0, 2)))
2477 			continue;
2478 
2479 		amdgpu_ras_query_error_status_with_event(adev, &info, type);
2480 
2481 		if (amdgpu_ip_version(adev, MP0_HWIP, 0) !=
2482 			    IP_VERSION(11, 0, 2) &&
2483 		    amdgpu_ip_version(adev, MP0_HWIP, 0) !=
2484 			    IP_VERSION(11, 0, 4) &&
2485 		    amdgpu_ip_version(adev, MP0_HWIP, 0) !=
2486 			    IP_VERSION(13, 0, 0)) {
2487 			if (amdgpu_ras_reset_error_status(adev, info.head.block))
2488 				dev_warn(adev->dev, "Failed to reset error counter and error status");
2489 		}
2490 	}
2491 }
2492 
2493 /* Parse RdRspStatus and WrRspStatus */
2494 static void amdgpu_ras_error_status_query(struct amdgpu_device *adev,
2495 					  struct ras_query_if *info)
2496 {
2497 	struct amdgpu_ras_block_object *block_obj;
2498 	/*
2499 	 * Only two block need to query read/write
2500 	 * RspStatus at current state
2501 	 */
2502 	if ((info->head.block != AMDGPU_RAS_BLOCK__GFX) &&
2503 		(info->head.block != AMDGPU_RAS_BLOCK__MMHUB))
2504 		return;
2505 
2506 	block_obj = amdgpu_ras_get_ras_block(adev,
2507 					info->head.block,
2508 					info->head.sub_block_index);
2509 
2510 	if (!block_obj || !block_obj->hw_ops) {
2511 		dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
2512 			     get_ras_block_str(&info->head));
2513 		return;
2514 	}
2515 
2516 	if (block_obj->hw_ops->query_ras_error_status)
2517 		block_obj->hw_ops->query_ras_error_status(adev);
2518 
2519 }
2520 
2521 static void amdgpu_ras_query_err_status(struct amdgpu_device *adev)
2522 {
2523 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2524 	struct ras_manager *obj;
2525 
2526 	if (!adev->ras_enabled || !con)
2527 		return;
2528 
2529 	list_for_each_entry(obj, &con->head, node) {
2530 		struct ras_query_if info = {
2531 			.head = obj->head,
2532 		};
2533 
2534 		amdgpu_ras_error_status_query(adev, &info);
2535 	}
2536 }
2537 
2538 /* recovery begin */
2539 
2540 /* return 0 on success.
2541  * caller need free bps.
2542  */
2543 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
2544 		struct ras_badpage **bps, unsigned int *count)
2545 {
2546 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2547 	struct ras_err_handler_data *data;
2548 	int i = 0;
2549 	int ret = 0, status;
2550 
2551 	if (!con || !con->eh_data || !bps || !count)
2552 		return -EINVAL;
2553 
2554 	mutex_lock(&con->recovery_lock);
2555 	data = con->eh_data;
2556 	if (!data || data->count == 0) {
2557 		*bps = NULL;
2558 		ret = -EINVAL;
2559 		goto out;
2560 	}
2561 
2562 	*bps = kmalloc(sizeof(struct ras_badpage) * data->count, GFP_KERNEL);
2563 	if (!*bps) {
2564 		ret = -ENOMEM;
2565 		goto out;
2566 	}
2567 
2568 	for (; i < data->count; i++) {
2569 		(*bps)[i] = (struct ras_badpage){
2570 			.bp = data->bps[i].retired_page,
2571 			.size = AMDGPU_GPU_PAGE_SIZE,
2572 			.flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
2573 		};
2574 		status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr,
2575 				data->bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT);
2576 		if (status == -EBUSY)
2577 			(*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
2578 		else if (status == -ENOENT)
2579 			(*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
2580 	}
2581 
2582 	*count = data->count;
2583 out:
2584 	mutex_unlock(&con->recovery_lock);
2585 	return ret;
2586 }
2587 
2588 static void amdgpu_ras_set_fed_all(struct amdgpu_device *adev,
2589 				   struct amdgpu_hive_info *hive, bool status)
2590 {
2591 	struct amdgpu_device *tmp_adev;
2592 
2593 	if (hive) {
2594 		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
2595 			amdgpu_ras_set_fed(tmp_adev, status);
2596 	} else {
2597 		amdgpu_ras_set_fed(adev, status);
2598 	}
2599 }
2600 
2601 bool amdgpu_ras_in_recovery(struct amdgpu_device *adev)
2602 {
2603 	struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2604 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
2605 	int hive_ras_recovery = 0;
2606 
2607 	if (hive) {
2608 		hive_ras_recovery = atomic_read(&hive->ras_recovery);
2609 		amdgpu_put_xgmi_hive(hive);
2610 	}
2611 
2612 	if (ras && (atomic_read(&ras->in_recovery) || hive_ras_recovery))
2613 		return true;
2614 
2615 	return false;
2616 }
2617 
2618 static enum ras_event_type amdgpu_ras_get_fatal_error_event(struct amdgpu_device *adev)
2619 {
2620 	if (amdgpu_ras_intr_triggered())
2621 		return RAS_EVENT_TYPE_FATAL;
2622 	else
2623 		return RAS_EVENT_TYPE_POISON_CONSUMPTION;
2624 }
2625 
2626 static void amdgpu_ras_do_recovery(struct work_struct *work)
2627 {
2628 	struct amdgpu_ras *ras =
2629 		container_of(work, struct amdgpu_ras, recovery_work);
2630 	struct amdgpu_device *remote_adev = NULL;
2631 	struct amdgpu_device *adev = ras->adev;
2632 	struct list_head device_list, *device_list_handle =  NULL;
2633 	struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2634 	enum ras_event_type type;
2635 
2636 	if (hive) {
2637 		atomic_set(&hive->ras_recovery, 1);
2638 
2639 		/* If any device which is part of the hive received RAS fatal
2640 		 * error interrupt, set fatal error status on all. This
2641 		 * condition will need a recovery, and flag will be cleared
2642 		 * as part of recovery.
2643 		 */
2644 		list_for_each_entry(remote_adev, &hive->device_list,
2645 				    gmc.xgmi.head)
2646 			if (amdgpu_ras_get_fed_status(remote_adev)) {
2647 				amdgpu_ras_set_fed_all(adev, hive, true);
2648 				break;
2649 			}
2650 	}
2651 	if (!ras->disable_ras_err_cnt_harvest) {
2652 
2653 		/* Build list of devices to query RAS related errors */
2654 		if  (hive && adev->gmc.xgmi.num_physical_nodes > 1) {
2655 			device_list_handle = &hive->device_list;
2656 		} else {
2657 			INIT_LIST_HEAD(&device_list);
2658 			list_add_tail(&adev->gmc.xgmi.head, &device_list);
2659 			device_list_handle = &device_list;
2660 		}
2661 
2662 		type = amdgpu_ras_get_fatal_error_event(adev);
2663 		list_for_each_entry(remote_adev,
2664 				device_list_handle, gmc.xgmi.head) {
2665 			amdgpu_ras_query_err_status(remote_adev);
2666 			amdgpu_ras_log_on_err_counter(remote_adev, type);
2667 		}
2668 
2669 	}
2670 
2671 	if (amdgpu_device_should_recover_gpu(ras->adev)) {
2672 		struct amdgpu_reset_context reset_context;
2673 		memset(&reset_context, 0, sizeof(reset_context));
2674 
2675 		reset_context.method = AMD_RESET_METHOD_NONE;
2676 		reset_context.reset_req_dev = adev;
2677 		reset_context.src = AMDGPU_RESET_SRC_RAS;
2678 		set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);
2679 
2680 		/* Perform full reset in fatal error mode */
2681 		if (!amdgpu_ras_is_poison_mode_supported(ras->adev))
2682 			set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2683 		else {
2684 			clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2685 
2686 			if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE2_RESET) {
2687 				ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE2_RESET;
2688 				reset_context.method = AMD_RESET_METHOD_MODE2;
2689 			}
2690 
2691 			/* Fatal error occurs in poison mode, mode1 reset is used to
2692 			 * recover gpu.
2693 			 */
2694 			if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET) {
2695 				ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE1_RESET;
2696 				set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2697 
2698 				psp_fatal_error_recovery_quirk(&adev->psp);
2699 			}
2700 		}
2701 
2702 		amdgpu_device_gpu_recover(ras->adev, NULL, &reset_context);
2703 	}
2704 	atomic_set(&ras->in_recovery, 0);
2705 	if (hive) {
2706 		atomic_set(&hive->ras_recovery, 0);
2707 		amdgpu_put_xgmi_hive(hive);
2708 	}
2709 }
2710 
2711 /* alloc/realloc bps array */
2712 static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
2713 		struct ras_err_handler_data *data, int pages)
2714 {
2715 	unsigned int old_space = data->count + data->space_left;
2716 	unsigned int new_space = old_space + pages;
2717 	unsigned int align_space = ALIGN(new_space, 512);
2718 	void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
2719 
2720 	if (!bps) {
2721 		return -ENOMEM;
2722 	}
2723 
2724 	if (data->bps) {
2725 		memcpy(bps, data->bps,
2726 				data->count * sizeof(*data->bps));
2727 		kfree(data->bps);
2728 	}
2729 
2730 	data->bps = bps;
2731 	data->space_left += align_space - old_space;
2732 	return 0;
2733 }
2734 
2735 static int amdgpu_ras_mca2pa_by_idx(struct amdgpu_device *adev,
2736 			struct eeprom_table_record *bps,
2737 			struct ras_err_data *err_data)
2738 {
2739 	struct ta_ras_query_address_input addr_in;
2740 	uint32_t socket = 0;
2741 	int ret = 0;
2742 
2743 	if (adev->smuio.funcs && adev->smuio.funcs->get_socket_id)
2744 		socket = adev->smuio.funcs->get_socket_id(adev);
2745 
2746 	/* reinit err_data */
2747 	err_data->err_addr_cnt = 0;
2748 	err_data->err_addr_len = adev->umc.retire_unit;
2749 
2750 	memset(&addr_in, 0, sizeof(addr_in));
2751 	addr_in.ma.err_addr = bps->address;
2752 	addr_in.ma.socket_id = socket;
2753 	addr_in.ma.ch_inst = bps->mem_channel;
2754 	/* tell RAS TA the node instance is not used */
2755 	addr_in.ma.node_inst = TA_RAS_INV_NODE;
2756 
2757 	if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr)
2758 		ret = adev->umc.ras->convert_ras_err_addr(adev, err_data,
2759 				&addr_in, NULL, false);
2760 
2761 	return ret;
2762 }
2763 
2764 static int amdgpu_ras_mca2pa(struct amdgpu_device *adev,
2765 			struct eeprom_table_record *bps,
2766 			struct ras_err_data *err_data)
2767 {
2768 	struct ta_ras_query_address_input addr_in;
2769 	uint32_t die_id, socket = 0;
2770 
2771 	if (adev->smuio.funcs && adev->smuio.funcs->get_socket_id)
2772 		socket = adev->smuio.funcs->get_socket_id(adev);
2773 
2774 	/* although die id is gotten from PA in nps1 mode, the id is
2775 	 * fitable for any nps mode
2776 	 */
2777 	if (adev->umc.ras && adev->umc.ras->get_die_id_from_pa)
2778 		die_id = adev->umc.ras->get_die_id_from_pa(adev, bps->address,
2779 					bps->retired_page << AMDGPU_GPU_PAGE_SHIFT);
2780 	else
2781 		return -EINVAL;
2782 
2783 	/* reinit err_data */
2784 	err_data->err_addr_cnt = 0;
2785 	err_data->err_addr_len = adev->umc.retire_unit;
2786 
2787 	memset(&addr_in, 0, sizeof(addr_in));
2788 	addr_in.ma.err_addr = bps->address;
2789 	addr_in.ma.ch_inst = bps->mem_channel;
2790 	addr_in.ma.umc_inst = bps->mcumc_id;
2791 	addr_in.ma.node_inst = die_id;
2792 	addr_in.ma.socket_id = socket;
2793 
2794 	if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr)
2795 		return adev->umc.ras->convert_ras_err_addr(adev, err_data,
2796 					&addr_in, NULL, false);
2797 	else
2798 		return  -EINVAL;
2799 }
2800 
2801 static int __amdgpu_ras_restore_bad_pages(struct amdgpu_device *adev,
2802 					struct eeprom_table_record *bps, int count)
2803 {
2804 	int j;
2805 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2806 	struct ras_err_handler_data *data = con->eh_data;
2807 
2808 	for (j = 0; j < count; j++) {
2809 		if (amdgpu_ras_check_bad_page_unlock(con,
2810 			bps[j].retired_page << AMDGPU_GPU_PAGE_SHIFT))
2811 			continue;
2812 
2813 		if (!data->space_left &&
2814 		    amdgpu_ras_realloc_eh_data_space(adev, data, 256)) {
2815 			return -ENOMEM;
2816 		}
2817 
2818 		amdgpu_ras_reserve_page(adev, bps[j].retired_page);
2819 
2820 		memcpy(&data->bps[data->count], &(bps[j]),
2821 				sizeof(struct eeprom_table_record));
2822 		data->count++;
2823 		data->space_left--;
2824 	}
2825 
2826 	return 0;
2827 }
2828 
2829 static int __amdgpu_ras_convert_rec_array_from_rom(struct amdgpu_device *adev,
2830 				struct eeprom_table_record *bps, struct ras_err_data *err_data,
2831 				enum amdgpu_memory_partition nps)
2832 {
2833 	int i = 0;
2834 	enum amdgpu_memory_partition save_nps;
2835 
2836 	save_nps = (bps[0].retired_page >> UMC_NPS_SHIFT) & UMC_NPS_MASK;
2837 
2838 	/*old asics just have pa in eeprom*/
2839 	if (IP_VERSION_MAJ(amdgpu_ip_version(adev, UMC_HWIP, 0)) < 12) {
2840 		memcpy(err_data->err_addr, bps,
2841 			sizeof(struct eeprom_table_record) * adev->umc.retire_unit);
2842 		goto out;
2843 	}
2844 
2845 	for (i = 0; i < adev->umc.retire_unit; i++)
2846 		bps[i].retired_page &= ~(UMC_NPS_MASK << UMC_NPS_SHIFT);
2847 
2848 	if (save_nps) {
2849 		if (save_nps == nps) {
2850 			if (amdgpu_umc_pages_in_a_row(adev, err_data,
2851 					bps[0].retired_page << AMDGPU_GPU_PAGE_SHIFT))
2852 				return -EINVAL;
2853 		} else {
2854 			if (amdgpu_ras_mca2pa_by_idx(adev, &bps[0], err_data))
2855 				return -EINVAL;
2856 		}
2857 	} else {
2858 		if (amdgpu_ras_mca2pa(adev, &bps[0], err_data)) {
2859 			if (nps == AMDGPU_NPS1_PARTITION_MODE)
2860 				memcpy(err_data->err_addr, bps,
2861 					sizeof(struct eeprom_table_record) * adev->umc.retire_unit);
2862 			else
2863 				return -EOPNOTSUPP;
2864 		}
2865 	}
2866 
2867 out:
2868 	return __amdgpu_ras_restore_bad_pages(adev, err_data->err_addr, adev->umc.retire_unit);
2869 }
2870 
2871 static int __amdgpu_ras_convert_rec_from_rom(struct amdgpu_device *adev,
2872 				struct eeprom_table_record *bps, struct ras_err_data *err_data,
2873 				enum amdgpu_memory_partition nps)
2874 {
2875 	enum amdgpu_memory_partition save_nps;
2876 
2877 	save_nps = (bps->retired_page >> UMC_NPS_SHIFT) & UMC_NPS_MASK;
2878 	bps->retired_page &= ~(UMC_NPS_MASK << UMC_NPS_SHIFT);
2879 
2880 	if (save_nps == nps) {
2881 		if (amdgpu_umc_pages_in_a_row(adev, err_data,
2882 				bps->retired_page << AMDGPU_GPU_PAGE_SHIFT))
2883 			return -EINVAL;
2884 	} else {
2885 		if (amdgpu_ras_mca2pa_by_idx(adev, bps, err_data))
2886 			return -EINVAL;
2887 	}
2888 	return __amdgpu_ras_restore_bad_pages(adev, err_data->err_addr,
2889 									adev->umc.retire_unit);
2890 }
2891 
2892 /* it deal with vram only. */
2893 int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
2894 		struct eeprom_table_record *bps, int pages, bool from_rom)
2895 {
2896 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2897 	struct ras_err_data err_data;
2898 	struct amdgpu_ras_eeprom_control *control =
2899 			&adev->psp.ras_context.ras->eeprom_control;
2900 	enum amdgpu_memory_partition nps = AMDGPU_NPS1_PARTITION_MODE;
2901 	int ret = 0;
2902 	uint32_t i;
2903 
2904 	if (!con || !con->eh_data || !bps || pages <= 0)
2905 		return 0;
2906 
2907 	if (from_rom) {
2908 		err_data.err_addr =
2909 			kcalloc(adev->umc.retire_unit,
2910 				sizeof(struct eeprom_table_record), GFP_KERNEL);
2911 		if (!err_data.err_addr) {
2912 			dev_warn(adev->dev, "Failed to alloc UMC error address record in mca2pa conversion!\n");
2913 			return -ENOMEM;
2914 		}
2915 
2916 		if (adev->gmc.gmc_funcs->query_mem_partition_mode)
2917 			nps = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
2918 	}
2919 
2920 	mutex_lock(&con->recovery_lock);
2921 
2922 	if (from_rom) {
2923 		for (i = 0; i < pages; i++) {
2924 			if (control->ras_num_recs - i >= adev->umc.retire_unit) {
2925 				if ((bps[i].address == bps[i + 1].address) &&
2926 				    (bps[i].mem_channel == bps[i + 1].mem_channel)) {
2927 					//deal with retire_unit records a time
2928 					ret = __amdgpu_ras_convert_rec_array_from_rom(adev,
2929 									&bps[i], &err_data, nps);
2930 					if (ret)
2931 						goto free;
2932 					i += (adev->umc.retire_unit - 1);
2933 				} else {
2934 					break;
2935 				}
2936 			} else {
2937 				break;
2938 			}
2939 		}
2940 		for (; i < pages; i++) {
2941 			ret = __amdgpu_ras_convert_rec_from_rom(adev,
2942 				&bps[i], &err_data, nps);
2943 			if (ret)
2944 				goto free;
2945 		}
2946 	} else {
2947 		ret = __amdgpu_ras_restore_bad_pages(adev, bps, pages);
2948 	}
2949 
2950 free:
2951 	if (from_rom)
2952 		kfree(err_data.err_addr);
2953 	mutex_unlock(&con->recovery_lock);
2954 
2955 	return ret;
2956 }
2957 
2958 /*
2959  * write error record array to eeprom, the function should be
2960  * protected by recovery_lock
2961  * new_cnt: new added UE count, excluding reserved bad pages, can be NULL
2962  */
2963 int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
2964 		unsigned long *new_cnt)
2965 {
2966 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2967 	struct ras_err_handler_data *data;
2968 	struct amdgpu_ras_eeprom_control *control;
2969 	int save_count, unit_num, bad_page_num, i;
2970 
2971 	if (!con || !con->eh_data) {
2972 		if (new_cnt)
2973 			*new_cnt = 0;
2974 
2975 		return 0;
2976 	}
2977 
2978 	mutex_lock(&con->recovery_lock);
2979 	control = &con->eeprom_control;
2980 	data = con->eh_data;
2981 	bad_page_num = control->ras_num_bad_pages;
2982 	save_count = data->count - bad_page_num;
2983 	mutex_unlock(&con->recovery_lock);
2984 
2985 	unit_num = save_count / adev->umc.retire_unit;
2986 	if (new_cnt)
2987 		*new_cnt = unit_num;
2988 
2989 	/* only new entries are saved */
2990 	if (save_count > 0) {
2991 		/*old asics only save pa to eeprom like before*/
2992 		if (IP_VERSION_MAJ(amdgpu_ip_version(adev, UMC_HWIP, 0)) < 12) {
2993 			if (amdgpu_ras_eeprom_append(control,
2994 					&data->bps[bad_page_num], save_count)) {
2995 				dev_err(adev->dev, "Failed to save EEPROM table data!");
2996 				return -EIO;
2997 			}
2998 		} else {
2999 			for (i = 0; i < unit_num; i++) {
3000 				if (amdgpu_ras_eeprom_append(control,
3001 						&data->bps[bad_page_num +
3002 						i * adev->umc.retire_unit], 1)) {
3003 					dev_err(adev->dev, "Failed to save EEPROM table data!");
3004 					return -EIO;
3005 				}
3006 			}
3007 		}
3008 
3009 		dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count);
3010 	}
3011 
3012 	return 0;
3013 }
3014 
3015 /*
3016  * read error record array in eeprom and reserve enough space for
3017  * storing new bad pages
3018  */
3019 static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
3020 {
3021 	struct amdgpu_ras_eeprom_control *control =
3022 		&adev->psp.ras_context.ras->eeprom_control;
3023 	struct eeprom_table_record *bps;
3024 	int ret, i = 0;
3025 
3026 	/* no bad page record, skip eeprom access */
3027 	if (control->ras_num_recs == 0 || amdgpu_bad_page_threshold == 0)
3028 		return 0;
3029 
3030 	bps = kcalloc(control->ras_num_recs, sizeof(*bps), GFP_KERNEL);
3031 	if (!bps)
3032 		return -ENOMEM;
3033 
3034 	ret = amdgpu_ras_eeprom_read(control, bps, control->ras_num_recs);
3035 	if (ret) {
3036 		dev_err(adev->dev, "Failed to load EEPROM table records!");
3037 	} else {
3038 		if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr) {
3039 			for (i = 0; i < control->ras_num_recs; i++) {
3040 				if ((control->ras_num_recs - i) >= adev->umc.retire_unit) {
3041 					if ((bps[i].address == bps[i + 1].address) &&
3042 						(bps[i].mem_channel == bps[i + 1].mem_channel)) {
3043 						control->ras_num_pa_recs += adev->umc.retire_unit;
3044 						i += (adev->umc.retire_unit - 1);
3045 					} else {
3046 						control->ras_num_mca_recs +=
3047 									(control->ras_num_recs - i);
3048 						break;
3049 					}
3050 				} else {
3051 					control->ras_num_mca_recs += (control->ras_num_recs - i);
3052 					break;
3053 				}
3054 			}
3055 		}
3056 
3057 		ret = amdgpu_ras_eeprom_check(control);
3058 		if (ret)
3059 			goto out;
3060 
3061 		/* HW not usable */
3062 		if (amdgpu_ras_is_rma(adev)) {
3063 			ret = -EHWPOISON;
3064 			goto out;
3065 		}
3066 
3067 		ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs, true);
3068 	}
3069 
3070 out:
3071 	kfree(bps);
3072 	return ret;
3073 }
3074 
3075 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
3076 				uint64_t addr)
3077 {
3078 	struct ras_err_handler_data *data = con->eh_data;
3079 	int i;
3080 
3081 	addr >>= AMDGPU_GPU_PAGE_SHIFT;
3082 	for (i = 0; i < data->count; i++)
3083 		if (addr == data->bps[i].retired_page)
3084 			return true;
3085 
3086 	return false;
3087 }
3088 
3089 /*
3090  * check if an address belongs to bad page
3091  *
3092  * Note: this check is only for umc block
3093  */
3094 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
3095 				uint64_t addr)
3096 {
3097 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3098 	bool ret = false;
3099 
3100 	if (!con || !con->eh_data)
3101 		return ret;
3102 
3103 	mutex_lock(&con->recovery_lock);
3104 	ret = amdgpu_ras_check_bad_page_unlock(con, addr);
3105 	mutex_unlock(&con->recovery_lock);
3106 	return ret;
3107 }
3108 
3109 static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev,
3110 					  uint32_t max_count)
3111 {
3112 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3113 
3114 	/*
3115 	 * amdgpu_bad_page_threshold is used to config
3116 	 * the threshold for the number of bad pages.
3117 	 * -1:  Threshold is set to default value
3118 	 *      Driver will issue a warning message when threshold is reached
3119 	 *      and continue runtime services.
3120 	 * 0:   Disable bad page retirement
3121 	 *      Driver will not retire bad pages
3122 	 *      which is intended for debugging purpose.
3123 	 * -2:  Threshold is determined by a formula
3124 	 *      that assumes 1 bad page per 100M of local memory.
3125 	 *      Driver will continue runtime services when threhold is reached.
3126 	 * 0 < threshold < max number of bad page records in EEPROM,
3127 	 *      A user-defined threshold is set
3128 	 *      Driver will halt runtime services when this custom threshold is reached.
3129 	 */
3130 	if (amdgpu_bad_page_threshold == -2) {
3131 		u64 val = adev->gmc.mc_vram_size;
3132 
3133 		do_div(val, RAS_BAD_PAGE_COVER);
3134 		con->bad_page_cnt_threshold = min(lower_32_bits(val),
3135 						  max_count);
3136 	} else if (amdgpu_bad_page_threshold == -1) {
3137 		con->bad_page_cnt_threshold = ((con->reserved_pages_in_bytes) >> 21) << 4;
3138 	} else {
3139 		con->bad_page_cnt_threshold = min_t(int, max_count,
3140 						    amdgpu_bad_page_threshold);
3141 	}
3142 }
3143 
3144 int amdgpu_ras_put_poison_req(struct amdgpu_device *adev,
3145 		enum amdgpu_ras_block block, uint16_t pasid,
3146 		pasid_notify pasid_fn, void *data, uint32_t reset)
3147 {
3148 	int ret = 0;
3149 	struct ras_poison_msg poison_msg;
3150 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3151 
3152 	memset(&poison_msg, 0, sizeof(poison_msg));
3153 	poison_msg.block = block;
3154 	poison_msg.pasid = pasid;
3155 	poison_msg.reset = reset;
3156 	poison_msg.pasid_fn = pasid_fn;
3157 	poison_msg.data = data;
3158 
3159 	ret = kfifo_put(&con->poison_fifo, poison_msg);
3160 	if (!ret) {
3161 		dev_err(adev->dev, "Poison message fifo is full!\n");
3162 		return -ENOSPC;
3163 	}
3164 
3165 	return 0;
3166 }
3167 
3168 static int amdgpu_ras_get_poison_req(struct amdgpu_device *adev,
3169 		struct ras_poison_msg *poison_msg)
3170 {
3171 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3172 
3173 	return kfifo_get(&con->poison_fifo, poison_msg);
3174 }
3175 
3176 static void amdgpu_ras_ecc_log_init(struct ras_ecc_log_info *ecc_log)
3177 {
3178 	mutex_init(&ecc_log->lock);
3179 
3180 	INIT_RADIX_TREE(&ecc_log->de_page_tree, GFP_KERNEL);
3181 	ecc_log->de_queried_count = 0;
3182 	ecc_log->prev_de_queried_count = 0;
3183 }
3184 
3185 static void amdgpu_ras_ecc_log_fini(struct ras_ecc_log_info *ecc_log)
3186 {
3187 	struct radix_tree_iter iter;
3188 	void __rcu **slot;
3189 	struct ras_ecc_err *ecc_err;
3190 
3191 	mutex_lock(&ecc_log->lock);
3192 	radix_tree_for_each_slot(slot, &ecc_log->de_page_tree, &iter, 0) {
3193 		ecc_err = radix_tree_deref_slot(slot);
3194 		kfree(ecc_err->err_pages.pfn);
3195 		kfree(ecc_err);
3196 		radix_tree_iter_delete(&ecc_log->de_page_tree, &iter, slot);
3197 	}
3198 	mutex_unlock(&ecc_log->lock);
3199 
3200 	mutex_destroy(&ecc_log->lock);
3201 	ecc_log->de_queried_count = 0;
3202 	ecc_log->prev_de_queried_count = 0;
3203 }
3204 
3205 static bool amdgpu_ras_schedule_retirement_dwork(struct amdgpu_ras *con,
3206 				uint32_t delayed_ms)
3207 {
3208 	int ret;
3209 
3210 	mutex_lock(&con->umc_ecc_log.lock);
3211 	ret = radix_tree_tagged(&con->umc_ecc_log.de_page_tree,
3212 			UMC_ECC_NEW_DETECTED_TAG);
3213 	mutex_unlock(&con->umc_ecc_log.lock);
3214 
3215 	if (ret)
3216 		schedule_delayed_work(&con->page_retirement_dwork,
3217 			msecs_to_jiffies(delayed_ms));
3218 
3219 	return ret ? true : false;
3220 }
3221 
3222 static void amdgpu_ras_do_page_retirement(struct work_struct *work)
3223 {
3224 	struct amdgpu_ras *con = container_of(work, struct amdgpu_ras,
3225 					      page_retirement_dwork.work);
3226 	struct amdgpu_device *adev = con->adev;
3227 	struct ras_err_data err_data;
3228 	unsigned long err_cnt;
3229 
3230 	/* If gpu reset is ongoing, delay retiring the bad pages */
3231 	if (amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev)) {
3232 		amdgpu_ras_schedule_retirement_dwork(con,
3233 				AMDGPU_RAS_RETIRE_PAGE_INTERVAL * 3);
3234 		return;
3235 	}
3236 
3237 	amdgpu_ras_error_data_init(&err_data);
3238 
3239 	amdgpu_umc_handle_bad_pages(adev, &err_data);
3240 	err_cnt = err_data.err_addr_cnt;
3241 
3242 	amdgpu_ras_error_data_fini(&err_data);
3243 
3244 	if (err_cnt && amdgpu_ras_is_rma(adev))
3245 		amdgpu_ras_reset_gpu(adev);
3246 
3247 	amdgpu_ras_schedule_retirement_dwork(con,
3248 			AMDGPU_RAS_RETIRE_PAGE_INTERVAL);
3249 }
3250 
3251 static int amdgpu_ras_poison_creation_handler(struct amdgpu_device *adev,
3252 				uint32_t poison_creation_count)
3253 {
3254 	int ret = 0;
3255 	struct ras_ecc_log_info *ecc_log;
3256 	struct ras_query_if info;
3257 	uint32_t timeout = 0;
3258 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
3259 	uint64_t de_queried_count;
3260 	uint32_t new_detect_count, total_detect_count;
3261 	uint32_t need_query_count = poison_creation_count;
3262 	bool query_data_timeout = false;
3263 	enum ras_event_type type = RAS_EVENT_TYPE_POISON_CREATION;
3264 
3265 	memset(&info, 0, sizeof(info));
3266 	info.head.block = AMDGPU_RAS_BLOCK__UMC;
3267 
3268 	ecc_log = &ras->umc_ecc_log;
3269 	total_detect_count = 0;
3270 	do {
3271 		ret = amdgpu_ras_query_error_status_with_event(adev, &info, type);
3272 		if (ret)
3273 			return ret;
3274 
3275 		de_queried_count = ecc_log->de_queried_count;
3276 		if (de_queried_count > ecc_log->prev_de_queried_count) {
3277 			new_detect_count = de_queried_count - ecc_log->prev_de_queried_count;
3278 			ecc_log->prev_de_queried_count = de_queried_count;
3279 			timeout = 0;
3280 		} else {
3281 			new_detect_count = 0;
3282 		}
3283 
3284 		if (new_detect_count) {
3285 			total_detect_count += new_detect_count;
3286 		} else {
3287 			if (!timeout && need_query_count)
3288 				timeout = MAX_UMC_POISON_POLLING_TIME_ASYNC;
3289 
3290 			if (timeout) {
3291 				if (!--timeout) {
3292 					query_data_timeout = true;
3293 					break;
3294 				}
3295 				msleep(1);
3296 			}
3297 		}
3298 	} while (total_detect_count < need_query_count);
3299 
3300 	if (query_data_timeout) {
3301 		dev_warn(adev->dev, "Can't find deferred error! count: %u\n",
3302 			(need_query_count - total_detect_count));
3303 		return -ENOENT;
3304 	}
3305 
3306 	if (total_detect_count)
3307 		schedule_delayed_work(&ras->page_retirement_dwork, 0);
3308 
3309 	return 0;
3310 }
3311 
3312 static void amdgpu_ras_clear_poison_fifo(struct amdgpu_device *adev)
3313 {
3314 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3315 	struct ras_poison_msg msg;
3316 	int ret;
3317 
3318 	do {
3319 		ret = kfifo_get(&con->poison_fifo, &msg);
3320 	} while (ret);
3321 }
3322 
3323 static int amdgpu_ras_poison_consumption_handler(struct amdgpu_device *adev,
3324 			uint32_t msg_count, uint32_t *gpu_reset)
3325 {
3326 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3327 	uint32_t reset_flags = 0, reset = 0;
3328 	struct ras_poison_msg msg;
3329 	int ret, i;
3330 
3331 	kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
3332 
3333 	for (i = 0; i < msg_count; i++) {
3334 		ret = amdgpu_ras_get_poison_req(adev, &msg);
3335 		if (!ret)
3336 			continue;
3337 
3338 		if (msg.pasid_fn)
3339 			msg.pasid_fn(adev, msg.pasid, msg.data);
3340 
3341 		reset_flags |= msg.reset;
3342 	}
3343 
3344 	/* for RMA, amdgpu_ras_poison_creation_handler will trigger gpu reset */
3345 	if (reset_flags && !amdgpu_ras_is_rma(adev)) {
3346 		if (reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET)
3347 			reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET;
3348 		else if (reset_flags & AMDGPU_RAS_GPU_RESET_MODE2_RESET)
3349 			reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
3350 		else
3351 			reset = reset_flags;
3352 
3353 		flush_delayed_work(&con->page_retirement_dwork);
3354 
3355 		con->gpu_reset_flags |= reset;
3356 		amdgpu_ras_reset_gpu(adev);
3357 
3358 		*gpu_reset = reset;
3359 
3360 		/* Wait for gpu recovery to complete */
3361 		flush_work(&con->recovery_work);
3362 	}
3363 
3364 	return 0;
3365 }
3366 
3367 static int amdgpu_ras_page_retirement_thread(void *param)
3368 {
3369 	struct amdgpu_device *adev = (struct amdgpu_device *)param;
3370 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3371 	uint32_t poison_creation_count, msg_count;
3372 	uint32_t gpu_reset;
3373 	int ret;
3374 
3375 	while (!kthread_should_stop()) {
3376 
3377 		wait_event_interruptible(con->page_retirement_wq,
3378 				kthread_should_stop() ||
3379 				atomic_read(&con->page_retirement_req_cnt));
3380 
3381 		if (kthread_should_stop())
3382 			break;
3383 
3384 		gpu_reset = 0;
3385 
3386 		do {
3387 			poison_creation_count = atomic_read(&con->poison_creation_count);
3388 			ret = amdgpu_ras_poison_creation_handler(adev, poison_creation_count);
3389 			if (ret == -EIO)
3390 				break;
3391 
3392 			if (poison_creation_count) {
3393 				atomic_sub(poison_creation_count, &con->poison_creation_count);
3394 				atomic_sub(poison_creation_count, &con->page_retirement_req_cnt);
3395 			}
3396 		} while (atomic_read(&con->poison_creation_count));
3397 
3398 		if (ret != -EIO) {
3399 			msg_count = kfifo_len(&con->poison_fifo);
3400 			if (msg_count) {
3401 				ret = amdgpu_ras_poison_consumption_handler(adev,
3402 						msg_count, &gpu_reset);
3403 				if ((ret != -EIO) &&
3404 				    (gpu_reset != AMDGPU_RAS_GPU_RESET_MODE1_RESET))
3405 					atomic_sub(msg_count, &con->page_retirement_req_cnt);
3406 			}
3407 		}
3408 
3409 		if ((ret == -EIO) || (gpu_reset == AMDGPU_RAS_GPU_RESET_MODE1_RESET)) {
3410 			/* gpu mode-1 reset is ongoing or just completed ras mode-1 reset */
3411 			/* Clear poison creation request */
3412 			atomic_set(&con->poison_creation_count, 0);
3413 
3414 			/* Clear poison fifo */
3415 			amdgpu_ras_clear_poison_fifo(adev);
3416 
3417 			/* Clear all poison requests */
3418 			atomic_set(&con->page_retirement_req_cnt, 0);
3419 
3420 			if (ret == -EIO) {
3421 				/* Wait for mode-1 reset to complete */
3422 				down_read(&adev->reset_domain->sem);
3423 				up_read(&adev->reset_domain->sem);
3424 			}
3425 
3426 			/* Wake up work to save bad pages to eeprom */
3427 			schedule_delayed_work(&con->page_retirement_dwork, 0);
3428 		} else if (gpu_reset) {
3429 			/* gpu just completed mode-2 reset or other reset */
3430 			/* Clear poison consumption messages cached in fifo */
3431 			msg_count = kfifo_len(&con->poison_fifo);
3432 			if (msg_count) {
3433 				amdgpu_ras_clear_poison_fifo(adev);
3434 				atomic_sub(msg_count, &con->page_retirement_req_cnt);
3435 			}
3436 
3437 			/* Wake up work to save bad pages to eeprom */
3438 			schedule_delayed_work(&con->page_retirement_dwork, 0);
3439 		}
3440 	}
3441 
3442 	return 0;
3443 }
3444 
3445 int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev)
3446 {
3447 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3448 	struct amdgpu_ras_eeprom_control *control;
3449 	int ret;
3450 
3451 	if (!con || amdgpu_sriov_vf(adev))
3452 		return 0;
3453 
3454 	control = &con->eeprom_control;
3455 	ret = amdgpu_ras_eeprom_init(control);
3456 	if (ret)
3457 		return ret;
3458 
3459 	if (!adev->umc.ras || !adev->umc.ras->convert_ras_err_addr)
3460 		control->ras_num_pa_recs = control->ras_num_recs;
3461 
3462 	if (control->ras_num_recs) {
3463 		ret = amdgpu_ras_load_bad_pages(adev);
3464 		if (ret)
3465 			return ret;
3466 
3467 		amdgpu_dpm_send_hbm_bad_pages_num(
3468 			adev, control->ras_num_bad_pages);
3469 
3470 		if (con->update_channel_flag == true) {
3471 			amdgpu_dpm_send_hbm_bad_channel_flag(
3472 				adev, control->bad_channel_bitmap);
3473 			con->update_channel_flag = false;
3474 		}
3475 
3476 		/* The format action is only applied to new ASICs */
3477 		if (IP_VERSION_MAJ(amdgpu_ip_version(adev, UMC_HWIP, 0)) >= 12 &&
3478 		    control->tbl_hdr.version < RAS_TABLE_VER_V3)
3479 			if (!amdgpu_ras_eeprom_reset_table(control))
3480 				if (amdgpu_ras_save_bad_pages(adev, NULL))
3481 					dev_warn(adev->dev, "Failed to format RAS EEPROM data in V3 version!\n");
3482 	}
3483 
3484 	return ret;
3485 }
3486 
3487 int amdgpu_ras_recovery_init(struct amdgpu_device *adev, bool init_bp_info)
3488 {
3489 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3490 	struct ras_err_handler_data **data;
3491 	u32  max_eeprom_records_count = 0;
3492 	int ret;
3493 
3494 	if (!con || amdgpu_sriov_vf(adev))
3495 		return 0;
3496 
3497 	/* Allow access to RAS EEPROM via debugfs, when the ASIC
3498 	 * supports RAS and debugfs is enabled, but when
3499 	 * adev->ras_enabled is unset, i.e. when "ras_enable"
3500 	 * module parameter is set to 0.
3501 	 */
3502 	con->adev = adev;
3503 
3504 	if (!adev->ras_enabled)
3505 		return 0;
3506 
3507 	data = &con->eh_data;
3508 	*data = kzalloc(sizeof(**data), GFP_KERNEL);
3509 	if (!*data) {
3510 		ret = -ENOMEM;
3511 		goto out;
3512 	}
3513 
3514 	mutex_init(&con->recovery_lock);
3515 	INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
3516 	atomic_set(&con->in_recovery, 0);
3517 	con->eeprom_control.bad_channel_bitmap = 0;
3518 
3519 	max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count(&con->eeprom_control);
3520 	amdgpu_ras_validate_threshold(adev, max_eeprom_records_count);
3521 
3522 	if (init_bp_info) {
3523 		ret = amdgpu_ras_init_badpage_info(adev);
3524 		if (ret)
3525 			goto free;
3526 	}
3527 
3528 	mutex_init(&con->page_rsv_lock);
3529 	INIT_KFIFO(con->poison_fifo);
3530 	mutex_init(&con->page_retirement_lock);
3531 	init_waitqueue_head(&con->page_retirement_wq);
3532 	atomic_set(&con->page_retirement_req_cnt, 0);
3533 	atomic_set(&con->poison_creation_count, 0);
3534 	con->page_retirement_thread =
3535 		kthread_run(amdgpu_ras_page_retirement_thread, adev, "umc_page_retirement");
3536 	if (IS_ERR(con->page_retirement_thread)) {
3537 		con->page_retirement_thread = NULL;
3538 		dev_warn(adev->dev, "Failed to create umc_page_retirement thread!!!\n");
3539 	}
3540 
3541 	INIT_DELAYED_WORK(&con->page_retirement_dwork, amdgpu_ras_do_page_retirement);
3542 	amdgpu_ras_ecc_log_init(&con->umc_ecc_log);
3543 #ifdef CONFIG_X86_MCE_AMD
3544 	if ((adev->asic_type == CHIP_ALDEBARAN) &&
3545 	    (adev->gmc.xgmi.connected_to_cpu))
3546 		amdgpu_register_bad_pages_mca_notifier(adev);
3547 #endif
3548 	return 0;
3549 
3550 free:
3551 	kfree((*data)->bps);
3552 	kfree(*data);
3553 	con->eh_data = NULL;
3554 out:
3555 	dev_warn(adev->dev, "Failed to initialize ras recovery! (%d)\n", ret);
3556 
3557 	/*
3558 	 * Except error threshold exceeding case, other failure cases in this
3559 	 * function would not fail amdgpu driver init.
3560 	 */
3561 	if (!amdgpu_ras_is_rma(adev))
3562 		ret = 0;
3563 	else
3564 		ret = -EINVAL;
3565 
3566 	return ret;
3567 }
3568 
3569 static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
3570 {
3571 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3572 	struct ras_err_handler_data *data = con->eh_data;
3573 	int max_flush_timeout = MAX_FLUSH_RETIRE_DWORK_TIMES;
3574 	bool ret;
3575 
3576 	/* recovery_init failed to init it, fini is useless */
3577 	if (!data)
3578 		return 0;
3579 
3580 	/* Save all cached bad pages to eeprom */
3581 	do {
3582 		flush_delayed_work(&con->page_retirement_dwork);
3583 		ret = amdgpu_ras_schedule_retirement_dwork(con, 0);
3584 	} while (ret && max_flush_timeout--);
3585 
3586 	if (con->page_retirement_thread)
3587 		kthread_stop(con->page_retirement_thread);
3588 
3589 	atomic_set(&con->page_retirement_req_cnt, 0);
3590 	atomic_set(&con->poison_creation_count, 0);
3591 
3592 	mutex_destroy(&con->page_rsv_lock);
3593 
3594 	cancel_work_sync(&con->recovery_work);
3595 
3596 	cancel_delayed_work_sync(&con->page_retirement_dwork);
3597 
3598 	amdgpu_ras_ecc_log_fini(&con->umc_ecc_log);
3599 
3600 	mutex_lock(&con->recovery_lock);
3601 	con->eh_data = NULL;
3602 	kfree(data->bps);
3603 	kfree(data);
3604 	mutex_unlock(&con->recovery_lock);
3605 
3606 	return 0;
3607 }
3608 /* recovery end */
3609 
3610 static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
3611 {
3612 	if (amdgpu_sriov_vf(adev)) {
3613 		switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
3614 		case IP_VERSION(13, 0, 2):
3615 		case IP_VERSION(13, 0, 6):
3616 		case IP_VERSION(13, 0, 12):
3617 		case IP_VERSION(13, 0, 14):
3618 			return true;
3619 		default:
3620 			return false;
3621 		}
3622 	}
3623 
3624 	if (adev->asic_type == CHIP_IP_DISCOVERY) {
3625 		switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
3626 		case IP_VERSION(13, 0, 0):
3627 		case IP_VERSION(13, 0, 6):
3628 		case IP_VERSION(13, 0, 10):
3629 		case IP_VERSION(13, 0, 12):
3630 		case IP_VERSION(13, 0, 14):
3631 		case IP_VERSION(14, 0, 3):
3632 			return true;
3633 		default:
3634 			return false;
3635 		}
3636 	}
3637 
3638 	return adev->asic_type == CHIP_VEGA10 ||
3639 		adev->asic_type == CHIP_VEGA20 ||
3640 		adev->asic_type == CHIP_ARCTURUS ||
3641 		adev->asic_type == CHIP_ALDEBARAN ||
3642 		adev->asic_type == CHIP_SIENNA_CICHLID;
3643 }
3644 
3645 /*
3646  * this is workaround for vega20 workstation sku,
3647  * force enable gfx ras, ignore vbios gfx ras flag
3648  * due to GC EDC can not write
3649  */
3650 static void amdgpu_ras_get_quirks(struct amdgpu_device *adev)
3651 {
3652 	struct atom_context *ctx = adev->mode_info.atom_context;
3653 
3654 	if (!ctx)
3655 		return;
3656 
3657 	if (strnstr(ctx->vbios_pn, "D16406",
3658 		    sizeof(ctx->vbios_pn)) ||
3659 		strnstr(ctx->vbios_pn, "D36002",
3660 			sizeof(ctx->vbios_pn)))
3661 		adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX);
3662 }
3663 
3664 /* Query ras capablity via atomfirmware interface */
3665 static void amdgpu_ras_query_ras_capablity_from_vbios(struct amdgpu_device *adev)
3666 {
3667 	/* mem_ecc cap */
3668 	if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
3669 		dev_info(adev->dev, "MEM ECC is active.\n");
3670 		adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__UMC |
3671 					 1 << AMDGPU_RAS_BLOCK__DF);
3672 	} else {
3673 		dev_info(adev->dev, "MEM ECC is not presented.\n");
3674 	}
3675 
3676 	/* sram_ecc cap */
3677 	if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
3678 		dev_info(adev->dev, "SRAM ECC is active.\n");
3679 		if (!amdgpu_sriov_vf(adev))
3680 			adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
3681 						  1 << AMDGPU_RAS_BLOCK__DF);
3682 		else
3683 			adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__PCIE_BIF |
3684 						 1 << AMDGPU_RAS_BLOCK__SDMA |
3685 						 1 << AMDGPU_RAS_BLOCK__GFX);
3686 
3687 		/*
3688 		 * VCN/JPEG RAS can be supported on both bare metal and
3689 		 * SRIOV environment
3690 		 */
3691 		if (amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(2, 6, 0) ||
3692 		    amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 0) ||
3693 		    amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 3))
3694 			adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN |
3695 						 1 << AMDGPU_RAS_BLOCK__JPEG);
3696 		else
3697 			adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN |
3698 						  1 << AMDGPU_RAS_BLOCK__JPEG);
3699 
3700 		/*
3701 		 * XGMI RAS is not supported if xgmi num physical nodes
3702 		 * is zero
3703 		 */
3704 		if (!adev->gmc.xgmi.num_physical_nodes)
3705 			adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__XGMI_WAFL);
3706 	} else {
3707 		dev_info(adev->dev, "SRAM ECC is not presented.\n");
3708 	}
3709 }
3710 
3711 /* Query poison mode from umc/df IP callbacks */
3712 static void amdgpu_ras_query_poison_mode(struct amdgpu_device *adev)
3713 {
3714 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3715 	bool df_poison, umc_poison;
3716 
3717 	/* poison setting is useless on SRIOV guest */
3718 	if (amdgpu_sriov_vf(adev) || !con)
3719 		return;
3720 
3721 	/* Init poison supported flag, the default value is false */
3722 	if (adev->gmc.xgmi.connected_to_cpu ||
3723 	    adev->gmc.is_app_apu) {
3724 		/* enabled by default when GPU is connected to CPU */
3725 		con->poison_supported = true;
3726 	} else if (adev->df.funcs &&
3727 	    adev->df.funcs->query_ras_poison_mode &&
3728 	    adev->umc.ras &&
3729 	    adev->umc.ras->query_ras_poison_mode) {
3730 		df_poison =
3731 			adev->df.funcs->query_ras_poison_mode(adev);
3732 		umc_poison =
3733 			adev->umc.ras->query_ras_poison_mode(adev);
3734 
3735 		/* Only poison is set in both DF and UMC, we can support it */
3736 		if (df_poison && umc_poison)
3737 			con->poison_supported = true;
3738 		else if (df_poison != umc_poison)
3739 			dev_warn(adev->dev,
3740 				"Poison setting is inconsistent in DF/UMC(%d:%d)!\n",
3741 				df_poison, umc_poison);
3742 	}
3743 }
3744 
3745 /*
3746  * check hardware's ras ability which will be saved in hw_supported.
3747  * if hardware does not support ras, we can skip some ras initializtion and
3748  * forbid some ras operations from IP.
3749  * if software itself, say boot parameter, limit the ras ability. We still
3750  * need allow IP do some limited operations, like disable. In such case,
3751  * we have to initialize ras as normal. but need check if operation is
3752  * allowed or not in each function.
3753  */
3754 static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
3755 {
3756 	adev->ras_hw_enabled = adev->ras_enabled = 0;
3757 
3758 	if (!amdgpu_ras_asic_supported(adev))
3759 		return;
3760 
3761 	if (amdgpu_sriov_vf(adev)) {
3762 		if (amdgpu_virt_get_ras_capability(adev))
3763 			goto init_ras_enabled_flag;
3764 	}
3765 
3766 	/* query ras capability from psp */
3767 	if (amdgpu_psp_get_ras_capability(&adev->psp))
3768 		goto init_ras_enabled_flag;
3769 
3770 	/* query ras capablity from bios */
3771 	if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
3772 		amdgpu_ras_query_ras_capablity_from_vbios(adev);
3773 	} else {
3774 		/* driver only manages a few IP blocks RAS feature
3775 		 * when GPU is connected cpu through XGMI */
3776 		adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX |
3777 					   1 << AMDGPU_RAS_BLOCK__SDMA |
3778 					   1 << AMDGPU_RAS_BLOCK__MMHUB);
3779 	}
3780 
3781 	/* apply asic specific settings (vega20 only for now) */
3782 	amdgpu_ras_get_quirks(adev);
3783 
3784 	/* query poison mode from umc/df ip callback */
3785 	amdgpu_ras_query_poison_mode(adev);
3786 
3787 init_ras_enabled_flag:
3788 	/* hw_supported needs to be aligned with RAS block mask. */
3789 	adev->ras_hw_enabled &= AMDGPU_RAS_BLOCK_MASK;
3790 
3791 	adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 :
3792 		adev->ras_hw_enabled & amdgpu_ras_mask;
3793 
3794 	/* aca is disabled by default except for psp v13_0_6/v13_0_12/v13_0_14 */
3795 	adev->aca.is_enabled =
3796 		(amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
3797 		 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) ||
3798 		 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14));
3799 
3800 	/* bad page feature is not applicable to specific app platform */
3801 	if (adev->gmc.is_app_apu &&
3802 	    amdgpu_ip_version(adev, UMC_HWIP, 0) == IP_VERSION(12, 0, 0))
3803 		amdgpu_bad_page_threshold = 0;
3804 }
3805 
3806 static void amdgpu_ras_counte_dw(struct work_struct *work)
3807 {
3808 	struct amdgpu_ras *con = container_of(work, struct amdgpu_ras,
3809 					      ras_counte_delay_work.work);
3810 	struct amdgpu_device *adev = con->adev;
3811 	struct drm_device *dev = adev_to_drm(adev);
3812 	unsigned long ce_count, ue_count;
3813 	int res;
3814 
3815 	res = pm_runtime_get_sync(dev->dev);
3816 	if (res < 0)
3817 		goto Out;
3818 
3819 	/* Cache new values.
3820 	 */
3821 	if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, NULL) == 0) {
3822 		atomic_set(&con->ras_ce_count, ce_count);
3823 		atomic_set(&con->ras_ue_count, ue_count);
3824 	}
3825 
3826 	pm_runtime_mark_last_busy(dev->dev);
3827 Out:
3828 	pm_runtime_put_autosuspend(dev->dev);
3829 }
3830 
3831 static int amdgpu_get_ras_schema(struct amdgpu_device *adev)
3832 {
3833 	return  amdgpu_ras_is_poison_mode_supported(adev) ? AMDGPU_RAS_ERROR__POISON : 0 |
3834 			AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE |
3835 			AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE |
3836 			AMDGPU_RAS_ERROR__PARITY;
3837 }
3838 
3839 static void ras_event_mgr_init(struct ras_event_manager *mgr)
3840 {
3841 	struct ras_event_state *event_state;
3842 	int i;
3843 
3844 	memset(mgr, 0, sizeof(*mgr));
3845 	atomic64_set(&mgr->seqno, 0);
3846 
3847 	for (i = 0; i < ARRAY_SIZE(mgr->event_state); i++) {
3848 		event_state = &mgr->event_state[i];
3849 		event_state->last_seqno = RAS_EVENT_INVALID_ID;
3850 		atomic64_set(&event_state->count, 0);
3851 	}
3852 }
3853 
3854 static void amdgpu_ras_event_mgr_init(struct amdgpu_device *adev)
3855 {
3856 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
3857 	struct amdgpu_hive_info *hive;
3858 
3859 	if (!ras)
3860 		return;
3861 
3862 	hive = amdgpu_get_xgmi_hive(adev);
3863 	ras->event_mgr = hive ? &hive->event_mgr : &ras->__event_mgr;
3864 
3865 	/* init event manager with node 0 on xgmi system */
3866 	if (!amdgpu_reset_in_recovery(adev)) {
3867 		if (!hive || adev->gmc.xgmi.node_id == 0)
3868 			ras_event_mgr_init(ras->event_mgr);
3869 	}
3870 
3871 	if (hive)
3872 		amdgpu_put_xgmi_hive(hive);
3873 }
3874 
3875 static void amdgpu_ras_init_reserved_vram_size(struct amdgpu_device *adev)
3876 {
3877 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3878 
3879 	if (!con || (adev->flags & AMD_IS_APU))
3880 		return;
3881 
3882 	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
3883 	case IP_VERSION(13, 0, 2):
3884 	case IP_VERSION(13, 0, 6):
3885 	case IP_VERSION(13, 0, 12):
3886 		con->reserved_pages_in_bytes = AMDGPU_RAS_RESERVED_VRAM_SIZE_DEFAULT;
3887 		break;
3888 	case IP_VERSION(13, 0, 14):
3889 		con->reserved_pages_in_bytes = (AMDGPU_RAS_RESERVED_VRAM_SIZE_DEFAULT << 1);
3890 		break;
3891 	default:
3892 		break;
3893 	}
3894 }
3895 
3896 int amdgpu_ras_init(struct amdgpu_device *adev)
3897 {
3898 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3899 	int r;
3900 
3901 	if (con)
3902 		return 0;
3903 
3904 	con = kzalloc(sizeof(*con) +
3905 			sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT +
3906 			sizeof(struct ras_manager) * AMDGPU_RAS_MCA_BLOCK_COUNT,
3907 			GFP_KERNEL);
3908 	if (!con)
3909 		return -ENOMEM;
3910 
3911 	con->adev = adev;
3912 	INIT_DELAYED_WORK(&con->ras_counte_delay_work, amdgpu_ras_counte_dw);
3913 	atomic_set(&con->ras_ce_count, 0);
3914 	atomic_set(&con->ras_ue_count, 0);
3915 
3916 	con->objs = (struct ras_manager *)(con + 1);
3917 
3918 	amdgpu_ras_set_context(adev, con);
3919 
3920 	amdgpu_ras_check_supported(adev);
3921 
3922 	if (!adev->ras_enabled || adev->asic_type == CHIP_VEGA10) {
3923 		/* set gfx block ras context feature for VEGA20 Gaming
3924 		 * send ras disable cmd to ras ta during ras late init.
3925 		 */
3926 		if (!adev->ras_enabled && adev->asic_type == CHIP_VEGA20) {
3927 			con->features |= BIT(AMDGPU_RAS_BLOCK__GFX);
3928 
3929 			return 0;
3930 		}
3931 
3932 		r = 0;
3933 		goto release_con;
3934 	}
3935 
3936 	con->update_channel_flag = false;
3937 	con->features = 0;
3938 	con->schema = 0;
3939 	INIT_LIST_HEAD(&con->head);
3940 	/* Might need get this flag from vbios. */
3941 	con->flags = RAS_DEFAULT_FLAGS;
3942 
3943 	/* initialize nbio ras function ahead of any other
3944 	 * ras functions so hardware fatal error interrupt
3945 	 * can be enabled as early as possible */
3946 	switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
3947 	case IP_VERSION(7, 4, 0):
3948 	case IP_VERSION(7, 4, 1):
3949 	case IP_VERSION(7, 4, 4):
3950 		if (!adev->gmc.xgmi.connected_to_cpu)
3951 			adev->nbio.ras = &nbio_v7_4_ras;
3952 		break;
3953 	case IP_VERSION(4, 3, 0):
3954 		if (adev->ras_hw_enabled & (1 << AMDGPU_RAS_BLOCK__DF))
3955 			/* unlike other generation of nbio ras,
3956 			 * nbio v4_3 only support fatal error interrupt
3957 			 * to inform software that DF is freezed due to
3958 			 * system fatal error event. driver should not
3959 			 * enable nbio ras in such case. Instead,
3960 			 * check DF RAS */
3961 			adev->nbio.ras = &nbio_v4_3_ras;
3962 		break;
3963 	case IP_VERSION(6, 3, 1):
3964 		if (adev->ras_hw_enabled & (1 << AMDGPU_RAS_BLOCK__DF))
3965 			/* unlike other generation of nbio ras,
3966 			 * nbif v6_3_1 only support fatal error interrupt
3967 			 * to inform software that DF is freezed due to
3968 			 * system fatal error event. driver should not
3969 			 * enable nbio ras in such case. Instead,
3970 			 * check DF RAS
3971 			 */
3972 			adev->nbio.ras = &nbif_v6_3_1_ras;
3973 		break;
3974 	case IP_VERSION(7, 9, 0):
3975 	case IP_VERSION(7, 9, 1):
3976 		if (!adev->gmc.is_app_apu)
3977 			adev->nbio.ras = &nbio_v7_9_ras;
3978 		break;
3979 	default:
3980 		/* nbio ras is not available */
3981 		break;
3982 	}
3983 
3984 	/* nbio ras block needs to be enabled ahead of other ras blocks
3985 	 * to handle fatal error */
3986 	r = amdgpu_nbio_ras_sw_init(adev);
3987 	if (r)
3988 		return r;
3989 
3990 	if (adev->nbio.ras &&
3991 	    adev->nbio.ras->init_ras_controller_interrupt) {
3992 		r = adev->nbio.ras->init_ras_controller_interrupt(adev);
3993 		if (r)
3994 			goto release_con;
3995 	}
3996 
3997 	if (adev->nbio.ras &&
3998 	    adev->nbio.ras->init_ras_err_event_athub_interrupt) {
3999 		r = adev->nbio.ras->init_ras_err_event_athub_interrupt(adev);
4000 		if (r)
4001 			goto release_con;
4002 	}
4003 
4004 	/* Packed socket_id to ras feature mask bits[31:29] */
4005 	if (adev->smuio.funcs &&
4006 	    adev->smuio.funcs->get_socket_id)
4007 		con->features |= ((adev->smuio.funcs->get_socket_id(adev)) <<
4008 					AMDGPU_RAS_FEATURES_SOCKETID_SHIFT);
4009 
4010 	/* Get RAS schema for particular SOC */
4011 	con->schema = amdgpu_get_ras_schema(adev);
4012 
4013 	amdgpu_ras_init_reserved_vram_size(adev);
4014 
4015 	if (amdgpu_ras_fs_init(adev)) {
4016 		r = -EINVAL;
4017 		goto release_con;
4018 	}
4019 
4020 	if (amdgpu_ras_aca_is_supported(adev)) {
4021 		if (amdgpu_aca_is_enabled(adev))
4022 			r = amdgpu_aca_init(adev);
4023 		else
4024 			r = amdgpu_mca_init(adev);
4025 		if (r)
4026 			goto release_con;
4027 	}
4028 
4029 	dev_info(adev->dev, "RAS INFO: ras initialized successfully, "
4030 		 "hardware ability[%x] ras_mask[%x]\n",
4031 		 adev->ras_hw_enabled, adev->ras_enabled);
4032 
4033 	return 0;
4034 release_con:
4035 	amdgpu_ras_set_context(adev, NULL);
4036 	kfree(con);
4037 
4038 	return r;
4039 }
4040 
4041 int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev)
4042 {
4043 	if (adev->gmc.xgmi.connected_to_cpu ||
4044 	    adev->gmc.is_app_apu)
4045 		return 1;
4046 	return 0;
4047 }
4048 
4049 static int amdgpu_persistent_edc_harvesting(struct amdgpu_device *adev,
4050 					struct ras_common_if *ras_block)
4051 {
4052 	struct ras_query_if info = {
4053 		.head = *ras_block,
4054 	};
4055 
4056 	if (!amdgpu_persistent_edc_harvesting_supported(adev))
4057 		return 0;
4058 
4059 	if (amdgpu_ras_query_error_status(adev, &info) != 0)
4060 		DRM_WARN("RAS init harvest failure");
4061 
4062 	if (amdgpu_ras_reset_error_status(adev, ras_block->block) != 0)
4063 		DRM_WARN("RAS init harvest reset failure");
4064 
4065 	return 0;
4066 }
4067 
4068 bool amdgpu_ras_is_poison_mode_supported(struct amdgpu_device *adev)
4069 {
4070        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4071 
4072        if (!con)
4073                return false;
4074 
4075        return con->poison_supported;
4076 }
4077 
4078 /* helper function to handle common stuff in ip late init phase */
4079 int amdgpu_ras_block_late_init(struct amdgpu_device *adev,
4080 			 struct ras_common_if *ras_block)
4081 {
4082 	struct amdgpu_ras_block_object *ras_obj = NULL;
4083 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4084 	struct ras_query_if *query_info;
4085 	unsigned long ue_count, ce_count;
4086 	int r;
4087 
4088 	/* disable RAS feature per IP block if it is not supported */
4089 	if (!amdgpu_ras_is_supported(adev, ras_block->block)) {
4090 		amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
4091 		return 0;
4092 	}
4093 
4094 	r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1);
4095 	if (r) {
4096 		if (adev->in_suspend || amdgpu_reset_in_recovery(adev)) {
4097 			/* in resume phase, if fail to enable ras,
4098 			 * clean up all ras fs nodes, and disable ras */
4099 			goto cleanup;
4100 		} else
4101 			return r;
4102 	}
4103 
4104 	/* check for errors on warm reset edc persisant supported ASIC */
4105 	amdgpu_persistent_edc_harvesting(adev, ras_block);
4106 
4107 	/* in resume phase, no need to create ras fs node */
4108 	if (adev->in_suspend || amdgpu_reset_in_recovery(adev))
4109 		return 0;
4110 
4111 	ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
4112 	if (ras_obj->ras_cb || (ras_obj->hw_ops &&
4113 	    (ras_obj->hw_ops->query_poison_status ||
4114 	    ras_obj->hw_ops->handle_poison_consumption))) {
4115 		r = amdgpu_ras_interrupt_add_handler(adev, ras_block);
4116 		if (r)
4117 			goto cleanup;
4118 	}
4119 
4120 	if (ras_obj->hw_ops &&
4121 	    (ras_obj->hw_ops->query_ras_error_count ||
4122 	     ras_obj->hw_ops->query_ras_error_status)) {
4123 		r = amdgpu_ras_sysfs_create(adev, ras_block);
4124 		if (r)
4125 			goto interrupt;
4126 
4127 		/* Those are the cached values at init.
4128 		 */
4129 		query_info = kzalloc(sizeof(*query_info), GFP_KERNEL);
4130 		if (!query_info)
4131 			return -ENOMEM;
4132 		memcpy(&query_info->head, ras_block, sizeof(struct ras_common_if));
4133 
4134 		if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, query_info) == 0) {
4135 			atomic_set(&con->ras_ce_count, ce_count);
4136 			atomic_set(&con->ras_ue_count, ue_count);
4137 		}
4138 
4139 		kfree(query_info);
4140 	}
4141 
4142 	return 0;
4143 
4144 interrupt:
4145 	if (ras_obj->ras_cb)
4146 		amdgpu_ras_interrupt_remove_handler(adev, ras_block);
4147 cleanup:
4148 	amdgpu_ras_feature_enable(adev, ras_block, 0);
4149 	return r;
4150 }
4151 
4152 static int amdgpu_ras_block_late_init_default(struct amdgpu_device *adev,
4153 			 struct ras_common_if *ras_block)
4154 {
4155 	return amdgpu_ras_block_late_init(adev, ras_block);
4156 }
4157 
4158 /* helper function to remove ras fs node and interrupt handler */
4159 void amdgpu_ras_block_late_fini(struct amdgpu_device *adev,
4160 			  struct ras_common_if *ras_block)
4161 {
4162 	struct amdgpu_ras_block_object *ras_obj;
4163 	if (!ras_block)
4164 		return;
4165 
4166 	amdgpu_ras_sysfs_remove(adev, ras_block);
4167 
4168 	ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
4169 	if (ras_obj->ras_cb)
4170 		amdgpu_ras_interrupt_remove_handler(adev, ras_block);
4171 }
4172 
4173 static void amdgpu_ras_block_late_fini_default(struct amdgpu_device *adev,
4174 			  struct ras_common_if *ras_block)
4175 {
4176 	return amdgpu_ras_block_late_fini(adev, ras_block);
4177 }
4178 
4179 /* do some init work after IP late init as dependence.
4180  * and it runs in resume/gpu reset/booting up cases.
4181  */
4182 void amdgpu_ras_resume(struct amdgpu_device *adev)
4183 {
4184 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4185 	struct ras_manager *obj, *tmp;
4186 
4187 	if (!adev->ras_enabled || !con) {
4188 		/* clean ras context for VEGA20 Gaming after send ras disable cmd */
4189 		amdgpu_release_ras_context(adev);
4190 
4191 		return;
4192 	}
4193 
4194 	if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
4195 		/* Set up all other IPs which are not implemented. There is a
4196 		 * tricky thing that IP's actual ras error type should be
4197 		 * MULTI_UNCORRECTABLE, but as driver does not handle it, so
4198 		 * ERROR_NONE make sense anyway.
4199 		 */
4200 		amdgpu_ras_enable_all_features(adev, 1);
4201 
4202 		/* We enable ras on all hw_supported block, but as boot
4203 		 * parameter might disable some of them and one or more IP has
4204 		 * not implemented yet. So we disable them on behalf.
4205 		 */
4206 		list_for_each_entry_safe(obj, tmp, &con->head, node) {
4207 			if (!amdgpu_ras_is_supported(adev, obj->head.block)) {
4208 				amdgpu_ras_feature_enable(adev, &obj->head, 0);
4209 				/* there should be no any reference. */
4210 				WARN_ON(alive_obj(obj));
4211 			}
4212 		}
4213 	}
4214 }
4215 
4216 void amdgpu_ras_suspend(struct amdgpu_device *adev)
4217 {
4218 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4219 
4220 	if (!adev->ras_enabled || !con)
4221 		return;
4222 
4223 	amdgpu_ras_disable_all_features(adev, 0);
4224 	/* Make sure all ras objects are disabled. */
4225 	if (AMDGPU_RAS_GET_FEATURES(con->features))
4226 		amdgpu_ras_disable_all_features(adev, 1);
4227 }
4228 
4229 int amdgpu_ras_late_init(struct amdgpu_device *adev)
4230 {
4231 	struct amdgpu_ras_block_list *node, *tmp;
4232 	struct amdgpu_ras_block_object *obj;
4233 	int r;
4234 
4235 	amdgpu_ras_event_mgr_init(adev);
4236 
4237 	if (amdgpu_ras_aca_is_supported(adev)) {
4238 		if (amdgpu_reset_in_recovery(adev)) {
4239 			if (amdgpu_aca_is_enabled(adev))
4240 				r = amdgpu_aca_reset(adev);
4241 			else
4242 				r = amdgpu_mca_reset(adev);
4243 			if (r)
4244 				return r;
4245 		}
4246 
4247 		if (!amdgpu_sriov_vf(adev)) {
4248 			if (amdgpu_aca_is_enabled(adev))
4249 				amdgpu_ras_set_aca_debug_mode(adev, false);
4250 			else
4251 				amdgpu_ras_set_mca_debug_mode(adev, false);
4252 		}
4253 	}
4254 
4255 	/* Guest side doesn't need init ras feature */
4256 	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_ras_telemetry_en(adev))
4257 		return 0;
4258 
4259 	list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
4260 		obj = node->ras_obj;
4261 		if (!obj) {
4262 			dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
4263 			continue;
4264 		}
4265 
4266 		if (!amdgpu_ras_is_supported(adev, obj->ras_comm.block))
4267 			continue;
4268 
4269 		if (obj->ras_late_init) {
4270 			r = obj->ras_late_init(adev, &obj->ras_comm);
4271 			if (r) {
4272 				dev_err(adev->dev, "%s failed to execute ras_late_init! ret:%d\n",
4273 					obj->ras_comm.name, r);
4274 				return r;
4275 			}
4276 		} else
4277 			amdgpu_ras_block_late_init_default(adev, &obj->ras_comm);
4278 	}
4279 
4280 	return 0;
4281 }
4282 
4283 /* do some fini work before IP fini as dependence */
4284 int amdgpu_ras_pre_fini(struct amdgpu_device *adev)
4285 {
4286 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4287 
4288 	if (!adev->ras_enabled || !con)
4289 		return 0;
4290 
4291 
4292 	/* Need disable ras on all IPs here before ip [hw/sw]fini */
4293 	if (AMDGPU_RAS_GET_FEATURES(con->features))
4294 		amdgpu_ras_disable_all_features(adev, 0);
4295 	amdgpu_ras_recovery_fini(adev);
4296 	return 0;
4297 }
4298 
4299 int amdgpu_ras_fini(struct amdgpu_device *adev)
4300 {
4301 	struct amdgpu_ras_block_list *ras_node, *tmp;
4302 	struct amdgpu_ras_block_object *obj = NULL;
4303 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4304 
4305 	if (!adev->ras_enabled || !con)
4306 		return 0;
4307 
4308 	list_for_each_entry_safe(ras_node, tmp, &adev->ras_list, node) {
4309 		if (ras_node->ras_obj) {
4310 			obj = ras_node->ras_obj;
4311 			if (amdgpu_ras_is_supported(adev, obj->ras_comm.block) &&
4312 			    obj->ras_fini)
4313 				obj->ras_fini(adev, &obj->ras_comm);
4314 			else
4315 				amdgpu_ras_block_late_fini_default(adev, &obj->ras_comm);
4316 		}
4317 
4318 		/* Clear ras blocks from ras_list and free ras block list node */
4319 		list_del(&ras_node->node);
4320 		kfree(ras_node);
4321 	}
4322 
4323 	amdgpu_ras_fs_fini(adev);
4324 	amdgpu_ras_interrupt_remove_all(adev);
4325 
4326 	if (amdgpu_ras_aca_is_supported(adev)) {
4327 		if (amdgpu_aca_is_enabled(adev))
4328 			amdgpu_aca_fini(adev);
4329 		else
4330 			amdgpu_mca_fini(adev);
4331 	}
4332 
4333 	WARN(AMDGPU_RAS_GET_FEATURES(con->features), "Feature mask is not cleared");
4334 
4335 	if (AMDGPU_RAS_GET_FEATURES(con->features))
4336 		amdgpu_ras_disable_all_features(adev, 0);
4337 
4338 	cancel_delayed_work_sync(&con->ras_counte_delay_work);
4339 
4340 	amdgpu_ras_set_context(adev, NULL);
4341 	kfree(con);
4342 
4343 	return 0;
4344 }
4345 
4346 bool amdgpu_ras_get_fed_status(struct amdgpu_device *adev)
4347 {
4348 	struct amdgpu_ras *ras;
4349 
4350 	ras = amdgpu_ras_get_context(adev);
4351 	if (!ras)
4352 		return false;
4353 
4354 	return test_bit(AMDGPU_RAS_BLOCK__LAST, &ras->ras_err_state);
4355 }
4356 
4357 void amdgpu_ras_set_fed(struct amdgpu_device *adev, bool status)
4358 {
4359 	struct amdgpu_ras *ras;
4360 
4361 	ras = amdgpu_ras_get_context(adev);
4362 	if (ras) {
4363 		if (status)
4364 			set_bit(AMDGPU_RAS_BLOCK__LAST, &ras->ras_err_state);
4365 		else
4366 			clear_bit(AMDGPU_RAS_BLOCK__LAST, &ras->ras_err_state);
4367 	}
4368 }
4369 
4370 void amdgpu_ras_clear_err_state(struct amdgpu_device *adev)
4371 {
4372 	struct amdgpu_ras *ras;
4373 
4374 	ras = amdgpu_ras_get_context(adev);
4375 	if (ras)
4376 		ras->ras_err_state = 0;
4377 }
4378 
4379 void amdgpu_ras_set_err_poison(struct amdgpu_device *adev,
4380 			       enum amdgpu_ras_block block)
4381 {
4382 	struct amdgpu_ras *ras;
4383 
4384 	ras = amdgpu_ras_get_context(adev);
4385 	if (ras)
4386 		set_bit(block, &ras->ras_err_state);
4387 }
4388 
4389 bool amdgpu_ras_is_err_state(struct amdgpu_device *adev, int block)
4390 {
4391 	struct amdgpu_ras *ras;
4392 
4393 	ras = amdgpu_ras_get_context(adev);
4394 	if (ras) {
4395 		if (block == AMDGPU_RAS_BLOCK__ANY)
4396 			return (ras->ras_err_state != 0);
4397 		else
4398 			return test_bit(block, &ras->ras_err_state) ||
4399 			       test_bit(AMDGPU_RAS_BLOCK__LAST,
4400 					&ras->ras_err_state);
4401 	}
4402 
4403 	return false;
4404 }
4405 
4406 static struct ras_event_manager *__get_ras_event_mgr(struct amdgpu_device *adev)
4407 {
4408 	struct amdgpu_ras *ras;
4409 
4410 	ras = amdgpu_ras_get_context(adev);
4411 	if (!ras)
4412 		return NULL;
4413 
4414 	return ras->event_mgr;
4415 }
4416 
4417 int amdgpu_ras_mark_ras_event_caller(struct amdgpu_device *adev, enum ras_event_type type,
4418 				     const void *caller)
4419 {
4420 	struct ras_event_manager *event_mgr;
4421 	struct ras_event_state *event_state;
4422 	int ret = 0;
4423 
4424 	if (type >= RAS_EVENT_TYPE_COUNT) {
4425 		ret = -EINVAL;
4426 		goto out;
4427 	}
4428 
4429 	event_mgr = __get_ras_event_mgr(adev);
4430 	if (!event_mgr) {
4431 		ret = -EINVAL;
4432 		goto out;
4433 	}
4434 
4435 	event_state = &event_mgr->event_state[type];
4436 	event_state->last_seqno = atomic64_inc_return(&event_mgr->seqno);
4437 	atomic64_inc(&event_state->count);
4438 
4439 out:
4440 	if (ret && caller)
4441 		dev_warn(adev->dev, "failed mark ras event (%d) in %ps, ret:%d\n",
4442 			 (int)type, caller, ret);
4443 
4444 	return ret;
4445 }
4446 
4447 u64 amdgpu_ras_acquire_event_id(struct amdgpu_device *adev, enum ras_event_type type)
4448 {
4449 	struct ras_event_manager *event_mgr;
4450 	u64 id;
4451 
4452 	if (type >= RAS_EVENT_TYPE_COUNT)
4453 		return RAS_EVENT_INVALID_ID;
4454 
4455 	switch (type) {
4456 	case RAS_EVENT_TYPE_FATAL:
4457 	case RAS_EVENT_TYPE_POISON_CREATION:
4458 	case RAS_EVENT_TYPE_POISON_CONSUMPTION:
4459 		event_mgr = __get_ras_event_mgr(adev);
4460 		if (!event_mgr)
4461 			return RAS_EVENT_INVALID_ID;
4462 
4463 		id = event_mgr->event_state[type].last_seqno;
4464 		break;
4465 	case RAS_EVENT_TYPE_INVALID:
4466 	default:
4467 		id = RAS_EVENT_INVALID_ID;
4468 		break;
4469 	}
4470 
4471 	return id;
4472 }
4473 
4474 void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
4475 {
4476 	if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
4477 		struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
4478 		enum ras_event_type type = RAS_EVENT_TYPE_FATAL;
4479 		u64 event_id;
4480 
4481 		if (amdgpu_ras_mark_ras_event(adev, type))
4482 			return;
4483 
4484 		event_id = amdgpu_ras_acquire_event_id(adev, type);
4485 
4486 		RAS_EVENT_LOG(adev, event_id, "uncorrectable hardware error"
4487 			      "(ERREVENT_ATHUB_INTERRUPT) detected!\n");
4488 
4489 		amdgpu_ras_set_fed(adev, true);
4490 		ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET;
4491 		amdgpu_ras_reset_gpu(adev);
4492 	}
4493 }
4494 
4495 bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev)
4496 {
4497 	if (adev->asic_type == CHIP_VEGA20 &&
4498 	    adev->pm.fw_version <= 0x283400) {
4499 		return !(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) &&
4500 				amdgpu_ras_intr_triggered();
4501 	}
4502 
4503 	return false;
4504 }
4505 
4506 void amdgpu_release_ras_context(struct amdgpu_device *adev)
4507 {
4508 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4509 
4510 	if (!con)
4511 		return;
4512 
4513 	if (!adev->ras_enabled && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)) {
4514 		con->features &= ~BIT(AMDGPU_RAS_BLOCK__GFX);
4515 		amdgpu_ras_set_context(adev, NULL);
4516 		kfree(con);
4517 	}
4518 }
4519 
4520 #ifdef CONFIG_X86_MCE_AMD
4521 static struct amdgpu_device *find_adev(uint32_t node_id)
4522 {
4523 	int i;
4524 	struct amdgpu_device *adev = NULL;
4525 
4526 	for (i = 0; i < mce_adev_list.num_gpu; i++) {
4527 		adev = mce_adev_list.devs[i];
4528 
4529 		if (adev && adev->gmc.xgmi.connected_to_cpu &&
4530 		    adev->gmc.xgmi.physical_node_id == node_id)
4531 			break;
4532 		adev = NULL;
4533 	}
4534 
4535 	return adev;
4536 }
4537 
4538 #define GET_MCA_IPID_GPUID(m)	(((m) >> 44) & 0xF)
4539 #define GET_UMC_INST(m)		(((m) >> 21) & 0x7)
4540 #define GET_CHAN_INDEX(m)	((((m) >> 12) & 0x3) | (((m) >> 18) & 0x4))
4541 #define GPU_ID_OFFSET		8
4542 
4543 static int amdgpu_bad_page_notifier(struct notifier_block *nb,
4544 				    unsigned long val, void *data)
4545 {
4546 	struct mce *m = (struct mce *)data;
4547 	struct amdgpu_device *adev = NULL;
4548 	uint32_t gpu_id = 0;
4549 	uint32_t umc_inst = 0, ch_inst = 0;
4550 
4551 	/*
4552 	 * If the error was generated in UMC_V2, which belongs to GPU UMCs,
4553 	 * and error occurred in DramECC (Extended error code = 0) then only
4554 	 * process the error, else bail out.
4555 	 */
4556 	if (!m || !((smca_get_bank_type(m->extcpu, m->bank) == SMCA_UMC_V2) &&
4557 		    (XEC(m->status, 0x3f) == 0x0)))
4558 		return NOTIFY_DONE;
4559 
4560 	/*
4561 	 * If it is correctable error, return.
4562 	 */
4563 	if (mce_is_correctable(m))
4564 		return NOTIFY_OK;
4565 
4566 	/*
4567 	 * GPU Id is offset by GPU_ID_OFFSET in MCA_IPID_UMC register.
4568 	 */
4569 	gpu_id = GET_MCA_IPID_GPUID(m->ipid) - GPU_ID_OFFSET;
4570 
4571 	adev = find_adev(gpu_id);
4572 	if (!adev) {
4573 		DRM_WARN("%s: Unable to find adev for gpu_id: %d\n", __func__,
4574 								gpu_id);
4575 		return NOTIFY_DONE;
4576 	}
4577 
4578 	/*
4579 	 * If it is uncorrectable error, then find out UMC instance and
4580 	 * channel index.
4581 	 */
4582 	umc_inst = GET_UMC_INST(m->ipid);
4583 	ch_inst = GET_CHAN_INDEX(m->ipid);
4584 
4585 	dev_info(adev->dev, "Uncorrectable error detected in UMC inst: %d, chan_idx: %d",
4586 			     umc_inst, ch_inst);
4587 
4588 	if (!amdgpu_umc_page_retirement_mca(adev, m->addr, ch_inst, umc_inst))
4589 		return NOTIFY_OK;
4590 	else
4591 		return NOTIFY_DONE;
4592 }
4593 
4594 static struct notifier_block amdgpu_bad_page_nb = {
4595 	.notifier_call  = amdgpu_bad_page_notifier,
4596 	.priority       = MCE_PRIO_UC,
4597 };
4598 
4599 static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev)
4600 {
4601 	/*
4602 	 * Add the adev to the mce_adev_list.
4603 	 * During mode2 reset, amdgpu device is temporarily
4604 	 * removed from the mgpu_info list which can cause
4605 	 * page retirement to fail.
4606 	 * Use this list instead of mgpu_info to find the amdgpu
4607 	 * device on which the UMC error was reported.
4608 	 */
4609 	mce_adev_list.devs[mce_adev_list.num_gpu++] = adev;
4610 
4611 	/*
4612 	 * Register the x86 notifier only once
4613 	 * with MCE subsystem.
4614 	 */
4615 	if (notifier_registered == false) {
4616 		mce_register_decode_chain(&amdgpu_bad_page_nb);
4617 		notifier_registered = true;
4618 	}
4619 }
4620 #endif
4621 
4622 struct amdgpu_ras *amdgpu_ras_get_context(struct amdgpu_device *adev)
4623 {
4624 	if (!adev)
4625 		return NULL;
4626 
4627 	return adev->psp.ras_context.ras;
4628 }
4629 
4630 int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_con)
4631 {
4632 	if (!adev)
4633 		return -EINVAL;
4634 
4635 	adev->psp.ras_context.ras = ras_con;
4636 	return 0;
4637 }
4638 
4639 /* check if ras is supported on block, say, sdma, gfx */
4640 int amdgpu_ras_is_supported(struct amdgpu_device *adev,
4641 		unsigned int block)
4642 {
4643 	int ret = 0;
4644 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
4645 
4646 	if (block >= AMDGPU_RAS_BLOCK_COUNT)
4647 		return 0;
4648 
4649 	ret = ras && (adev->ras_enabled & (1 << block));
4650 
4651 	/* For the special asic with mem ecc enabled but sram ecc
4652 	 * not enabled, even if the ras block is not supported on
4653 	 * .ras_enabled, if the asic supports poison mode and the
4654 	 * ras block has ras configuration, it can be considered
4655 	 * that the ras block supports ras function.
4656 	 */
4657 	if (!ret &&
4658 	    (block == AMDGPU_RAS_BLOCK__GFX ||
4659 	     block == AMDGPU_RAS_BLOCK__SDMA ||
4660 	     block == AMDGPU_RAS_BLOCK__VCN ||
4661 	     block == AMDGPU_RAS_BLOCK__JPEG) &&
4662 		(amdgpu_ras_mask & (1 << block)) &&
4663 	    amdgpu_ras_is_poison_mode_supported(adev) &&
4664 	    amdgpu_ras_get_ras_block(adev, block, 0))
4665 		ret = 1;
4666 
4667 	return ret;
4668 }
4669 
4670 int amdgpu_ras_reset_gpu(struct amdgpu_device *adev)
4671 {
4672 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
4673 
4674 	/* mode1 is the only selection for RMA status */
4675 	if (amdgpu_ras_is_rma(adev)) {
4676 		ras->gpu_reset_flags = 0;
4677 		ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET;
4678 	}
4679 
4680 	if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0) {
4681 		struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
4682 		int hive_ras_recovery = 0;
4683 
4684 		if (hive) {
4685 			hive_ras_recovery = atomic_read(&hive->ras_recovery);
4686 			amdgpu_put_xgmi_hive(hive);
4687 		}
4688 		/* In the case of multiple GPUs, after a GPU has started
4689 		 * resetting all GPUs on hive, other GPUs do not need to
4690 		 * trigger GPU reset again.
4691 		 */
4692 		if (!hive_ras_recovery)
4693 			amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work);
4694 		else
4695 			atomic_set(&ras->in_recovery, 0);
4696 	} else {
4697 		flush_work(&ras->recovery_work);
4698 		amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work);
4699 	}
4700 
4701 	return 0;
4702 }
4703 
4704 int amdgpu_ras_set_mca_debug_mode(struct amdgpu_device *adev, bool enable)
4705 {
4706 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4707 	int ret = 0;
4708 
4709 	if (con) {
4710 		ret = amdgpu_mca_smu_set_debug_mode(adev, enable);
4711 		if (!ret)
4712 			con->is_aca_debug_mode = enable;
4713 	}
4714 
4715 	return ret;
4716 }
4717 
4718 int amdgpu_ras_set_aca_debug_mode(struct amdgpu_device *adev, bool enable)
4719 {
4720 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4721 	int ret = 0;
4722 
4723 	if (con) {
4724 		if (amdgpu_aca_is_enabled(adev))
4725 			ret = amdgpu_aca_smu_set_debug_mode(adev, enable);
4726 		else
4727 			ret = amdgpu_mca_smu_set_debug_mode(adev, enable);
4728 		if (!ret)
4729 			con->is_aca_debug_mode = enable;
4730 	}
4731 
4732 	return ret;
4733 }
4734 
4735 bool amdgpu_ras_get_aca_debug_mode(struct amdgpu_device *adev)
4736 {
4737 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4738 	const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
4739 	const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
4740 
4741 	if (!con)
4742 		return false;
4743 
4744 	if ((amdgpu_aca_is_enabled(adev) && smu_funcs && smu_funcs->set_debug_mode) ||
4745 	    (!amdgpu_aca_is_enabled(adev) && mca_funcs && mca_funcs->mca_set_debug_mode))
4746 		return con->is_aca_debug_mode;
4747 	else
4748 		return true;
4749 }
4750 
4751 bool amdgpu_ras_get_error_query_mode(struct amdgpu_device *adev,
4752 				     unsigned int *error_query_mode)
4753 {
4754 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4755 	const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
4756 	const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
4757 
4758 	if (!con) {
4759 		*error_query_mode = AMDGPU_RAS_INVALID_ERROR_QUERY;
4760 		return false;
4761 	}
4762 
4763 	if (amdgpu_sriov_vf(adev)) {
4764 		*error_query_mode = AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY;
4765 	} else if ((smu_funcs && smu_funcs->set_debug_mode) || (mca_funcs && mca_funcs->mca_set_debug_mode)) {
4766 		*error_query_mode =
4767 			(con->is_aca_debug_mode) ? AMDGPU_RAS_DIRECT_ERROR_QUERY : AMDGPU_RAS_FIRMWARE_ERROR_QUERY;
4768 	} else {
4769 		*error_query_mode = AMDGPU_RAS_DIRECT_ERROR_QUERY;
4770 	}
4771 
4772 	return true;
4773 }
4774 
4775 /* Register each ip ras block into amdgpu ras */
4776 int amdgpu_ras_register_ras_block(struct amdgpu_device *adev,
4777 		struct amdgpu_ras_block_object *ras_block_obj)
4778 {
4779 	struct amdgpu_ras_block_list *ras_node;
4780 	if (!adev || !ras_block_obj)
4781 		return -EINVAL;
4782 
4783 	ras_node = kzalloc(sizeof(*ras_node), GFP_KERNEL);
4784 	if (!ras_node)
4785 		return -ENOMEM;
4786 
4787 	INIT_LIST_HEAD(&ras_node->node);
4788 	ras_node->ras_obj = ras_block_obj;
4789 	list_add_tail(&ras_node->node, &adev->ras_list);
4790 
4791 	return 0;
4792 }
4793 
4794 void amdgpu_ras_get_error_type_name(uint32_t err_type, char *err_type_name)
4795 {
4796 	if (!err_type_name)
4797 		return;
4798 
4799 	switch (err_type) {
4800 	case AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE:
4801 		sprintf(err_type_name, "correctable");
4802 		break;
4803 	case AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE:
4804 		sprintf(err_type_name, "uncorrectable");
4805 		break;
4806 	default:
4807 		sprintf(err_type_name, "unknown");
4808 		break;
4809 	}
4810 }
4811 
4812 bool amdgpu_ras_inst_get_memory_id_field(struct amdgpu_device *adev,
4813 					 const struct amdgpu_ras_err_status_reg_entry *reg_entry,
4814 					 uint32_t instance,
4815 					 uint32_t *memory_id)
4816 {
4817 	uint32_t err_status_lo_data, err_status_lo_offset;
4818 
4819 	if (!reg_entry)
4820 		return false;
4821 
4822 	err_status_lo_offset =
4823 		AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance,
4824 					    reg_entry->seg_lo, reg_entry->reg_lo);
4825 	err_status_lo_data = RREG32(err_status_lo_offset);
4826 
4827 	if ((reg_entry->flags & AMDGPU_RAS_ERR_STATUS_VALID) &&
4828 	    !REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, ERR_STATUS_VALID_FLAG))
4829 		return false;
4830 
4831 	*memory_id = REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, MEMORY_ID);
4832 
4833 	return true;
4834 }
4835 
4836 bool amdgpu_ras_inst_get_err_cnt_field(struct amdgpu_device *adev,
4837 				       const struct amdgpu_ras_err_status_reg_entry *reg_entry,
4838 				       uint32_t instance,
4839 				       unsigned long *err_cnt)
4840 {
4841 	uint32_t err_status_hi_data, err_status_hi_offset;
4842 
4843 	if (!reg_entry)
4844 		return false;
4845 
4846 	err_status_hi_offset =
4847 		AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance,
4848 					    reg_entry->seg_hi, reg_entry->reg_hi);
4849 	err_status_hi_data = RREG32(err_status_hi_offset);
4850 
4851 	if ((reg_entry->flags & AMDGPU_RAS_ERR_INFO_VALID) &&
4852 	    !REG_GET_FIELD(err_status_hi_data, ERR_STATUS_HI, ERR_INFO_VALID_FLAG))
4853 		/* keep the check here in case we need to refer to the result later */
4854 		dev_dbg(adev->dev, "Invalid err_info field\n");
4855 
4856 	/* read err count */
4857 	*err_cnt = REG_GET_FIELD(err_status_hi_data, ERR_STATUS, ERR_CNT);
4858 
4859 	return true;
4860 }
4861 
4862 void amdgpu_ras_inst_query_ras_error_count(struct amdgpu_device *adev,
4863 					   const struct amdgpu_ras_err_status_reg_entry *reg_list,
4864 					   uint32_t reg_list_size,
4865 					   const struct amdgpu_ras_memory_id_entry *mem_list,
4866 					   uint32_t mem_list_size,
4867 					   uint32_t instance,
4868 					   uint32_t err_type,
4869 					   unsigned long *err_count)
4870 {
4871 	uint32_t memory_id;
4872 	unsigned long err_cnt;
4873 	char err_type_name[16];
4874 	uint32_t i, j;
4875 
4876 	for (i = 0; i < reg_list_size; i++) {
4877 		/* query memory_id from err_status_lo */
4878 		if (!amdgpu_ras_inst_get_memory_id_field(adev, &reg_list[i],
4879 							 instance, &memory_id))
4880 			continue;
4881 
4882 		/* query err_cnt from err_status_hi */
4883 		if (!amdgpu_ras_inst_get_err_cnt_field(adev, &reg_list[i],
4884 						       instance, &err_cnt) ||
4885 		    !err_cnt)
4886 			continue;
4887 
4888 		*err_count += err_cnt;
4889 
4890 		/* log the errors */
4891 		amdgpu_ras_get_error_type_name(err_type, err_type_name);
4892 		if (!mem_list) {
4893 			/* memory_list is not supported */
4894 			dev_info(adev->dev,
4895 				 "%ld %s hardware errors detected in %s, instance: %d, memory_id: %d\n",
4896 				 err_cnt, err_type_name,
4897 				 reg_list[i].block_name,
4898 				 instance, memory_id);
4899 		} else {
4900 			for (j = 0; j < mem_list_size; j++) {
4901 				if (memory_id == mem_list[j].memory_id) {
4902 					dev_info(adev->dev,
4903 						 "%ld %s hardware errors detected in %s, instance: %d, memory block: %s\n",
4904 						 err_cnt, err_type_name,
4905 						 reg_list[i].block_name,
4906 						 instance, mem_list[j].name);
4907 					break;
4908 				}
4909 			}
4910 		}
4911 	}
4912 }
4913 
4914 void amdgpu_ras_inst_reset_ras_error_count(struct amdgpu_device *adev,
4915 					   const struct amdgpu_ras_err_status_reg_entry *reg_list,
4916 					   uint32_t reg_list_size,
4917 					   uint32_t instance)
4918 {
4919 	uint32_t err_status_lo_offset, err_status_hi_offset;
4920 	uint32_t i;
4921 
4922 	for (i = 0; i < reg_list_size; i++) {
4923 		err_status_lo_offset =
4924 			AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance,
4925 						    reg_list[i].seg_lo, reg_list[i].reg_lo);
4926 		err_status_hi_offset =
4927 			AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance,
4928 						    reg_list[i].seg_hi, reg_list[i].reg_hi);
4929 		WREG32(err_status_lo_offset, 0);
4930 		WREG32(err_status_hi_offset, 0);
4931 	}
4932 }
4933 
4934 int amdgpu_ras_error_data_init(struct ras_err_data *err_data)
4935 {
4936 	memset(err_data, 0, sizeof(*err_data));
4937 
4938 	INIT_LIST_HEAD(&err_data->err_node_list);
4939 
4940 	return 0;
4941 }
4942 
4943 static void amdgpu_ras_error_node_release(struct ras_err_node *err_node)
4944 {
4945 	if (!err_node)
4946 		return;
4947 
4948 	list_del(&err_node->node);
4949 	kvfree(err_node);
4950 }
4951 
4952 void amdgpu_ras_error_data_fini(struct ras_err_data *err_data)
4953 {
4954 	struct ras_err_node *err_node, *tmp;
4955 
4956 	list_for_each_entry_safe(err_node, tmp, &err_data->err_node_list, node)
4957 		amdgpu_ras_error_node_release(err_node);
4958 }
4959 
4960 static struct ras_err_node *amdgpu_ras_error_find_node_by_id(struct ras_err_data *err_data,
4961 							     struct amdgpu_smuio_mcm_config_info *mcm_info)
4962 {
4963 	struct ras_err_node *err_node;
4964 	struct amdgpu_smuio_mcm_config_info *ref_id;
4965 
4966 	if (!err_data || !mcm_info)
4967 		return NULL;
4968 
4969 	for_each_ras_error(err_node, err_data) {
4970 		ref_id = &err_node->err_info.mcm_info;
4971 
4972 		if (mcm_info->socket_id == ref_id->socket_id &&
4973 		    mcm_info->die_id == ref_id->die_id)
4974 			return err_node;
4975 	}
4976 
4977 	return NULL;
4978 }
4979 
4980 static struct ras_err_node *amdgpu_ras_error_node_new(void)
4981 {
4982 	struct ras_err_node *err_node;
4983 
4984 	err_node = kvzalloc(sizeof(*err_node), GFP_KERNEL);
4985 	if (!err_node)
4986 		return NULL;
4987 
4988 	INIT_LIST_HEAD(&err_node->node);
4989 
4990 	return err_node;
4991 }
4992 
4993 static int ras_err_info_cmp(void *priv, const struct list_head *a, const struct list_head *b)
4994 {
4995 	struct ras_err_node *nodea = container_of(a, struct ras_err_node, node);
4996 	struct ras_err_node *nodeb = container_of(b, struct ras_err_node, node);
4997 	struct amdgpu_smuio_mcm_config_info *infoa = &nodea->err_info.mcm_info;
4998 	struct amdgpu_smuio_mcm_config_info *infob = &nodeb->err_info.mcm_info;
4999 
5000 	if (unlikely(infoa->socket_id != infob->socket_id))
5001 		return infoa->socket_id - infob->socket_id;
5002 	else
5003 		return infoa->die_id - infob->die_id;
5004 
5005 	return 0;
5006 }
5007 
5008 static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_data,
5009 				struct amdgpu_smuio_mcm_config_info *mcm_info)
5010 {
5011 	struct ras_err_node *err_node;
5012 
5013 	err_node = amdgpu_ras_error_find_node_by_id(err_data, mcm_info);
5014 	if (err_node)
5015 		return &err_node->err_info;
5016 
5017 	err_node = amdgpu_ras_error_node_new();
5018 	if (!err_node)
5019 		return NULL;
5020 
5021 	memcpy(&err_node->err_info.mcm_info, mcm_info, sizeof(*mcm_info));
5022 
5023 	err_data->err_list_count++;
5024 	list_add_tail(&err_node->node, &err_data->err_node_list);
5025 	list_sort(NULL, &err_data->err_node_list, ras_err_info_cmp);
5026 
5027 	return &err_node->err_info;
5028 }
5029 
5030 int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
5031 					struct amdgpu_smuio_mcm_config_info *mcm_info,
5032 					u64 count)
5033 {
5034 	struct ras_err_info *err_info;
5035 
5036 	if (!err_data || !mcm_info)
5037 		return -EINVAL;
5038 
5039 	if (!count)
5040 		return 0;
5041 
5042 	err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
5043 	if (!err_info)
5044 		return -EINVAL;
5045 
5046 	err_info->ue_count += count;
5047 	err_data->ue_count += count;
5048 
5049 	return 0;
5050 }
5051 
5052 int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data,
5053 					struct amdgpu_smuio_mcm_config_info *mcm_info,
5054 					u64 count)
5055 {
5056 	struct ras_err_info *err_info;
5057 
5058 	if (!err_data || !mcm_info)
5059 		return -EINVAL;
5060 
5061 	if (!count)
5062 		return 0;
5063 
5064 	err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
5065 	if (!err_info)
5066 		return -EINVAL;
5067 
5068 	err_info->ce_count += count;
5069 	err_data->ce_count += count;
5070 
5071 	return 0;
5072 }
5073 
5074 int amdgpu_ras_error_statistic_de_count(struct ras_err_data *err_data,
5075 					struct amdgpu_smuio_mcm_config_info *mcm_info,
5076 					u64 count)
5077 {
5078 	struct ras_err_info *err_info;
5079 
5080 	if (!err_data || !mcm_info)
5081 		return -EINVAL;
5082 
5083 	if (!count)
5084 		return 0;
5085 
5086 	err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
5087 	if (!err_info)
5088 		return -EINVAL;
5089 
5090 	err_info->de_count += count;
5091 	err_data->de_count += count;
5092 
5093 	return 0;
5094 }
5095 
5096 #define mmMP0_SMN_C2PMSG_92	0x1609C
5097 #define mmMP0_SMN_C2PMSG_126	0x160BE
5098 static void amdgpu_ras_boot_time_error_reporting(struct amdgpu_device *adev,
5099 						 u32 instance)
5100 {
5101 	u32 socket_id, aid_id, hbm_id;
5102 	u32 fw_status;
5103 	u32 boot_error;
5104 	u64 reg_addr;
5105 
5106 	/* The pattern for smn addressing in other SOC could be different from
5107 	 * the one for aqua_vanjaram. We should revisit the code if the pattern
5108 	 * is changed. In such case, replace the aqua_vanjaram implementation
5109 	 * with more common helper */
5110 	reg_addr = (mmMP0_SMN_C2PMSG_92 << 2) +
5111 		   aqua_vanjaram_encode_ext_smn_addressing(instance);
5112 	fw_status = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
5113 
5114 	reg_addr = (mmMP0_SMN_C2PMSG_126 << 2) +
5115 		   aqua_vanjaram_encode_ext_smn_addressing(instance);
5116 	boot_error = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
5117 
5118 	socket_id = AMDGPU_RAS_GPU_ERR_SOCKET_ID(boot_error);
5119 	aid_id = AMDGPU_RAS_GPU_ERR_AID_ID(boot_error);
5120 	hbm_id = ((1 == AMDGPU_RAS_GPU_ERR_HBM_ID(boot_error)) ? 0 : 1);
5121 
5122 	if (AMDGPU_RAS_GPU_ERR_MEM_TRAINING(boot_error))
5123 		dev_info(adev->dev,
5124 			 "socket: %d, aid: %d, hbm: %d, fw_status: 0x%x, memory training failed\n",
5125 			 socket_id, aid_id, hbm_id, fw_status);
5126 
5127 	if (AMDGPU_RAS_GPU_ERR_FW_LOAD(boot_error))
5128 		dev_info(adev->dev,
5129 			 "socket: %d, aid: %d, fw_status: 0x%x, firmware load failed at boot time\n",
5130 			 socket_id, aid_id, fw_status);
5131 
5132 	if (AMDGPU_RAS_GPU_ERR_WAFL_LINK_TRAINING(boot_error))
5133 		dev_info(adev->dev,
5134 			 "socket: %d, aid: %d, fw_status: 0x%x, wafl link training failed\n",
5135 			 socket_id, aid_id, fw_status);
5136 
5137 	if (AMDGPU_RAS_GPU_ERR_XGMI_LINK_TRAINING(boot_error))
5138 		dev_info(adev->dev,
5139 			 "socket: %d, aid: %d, fw_status: 0x%x, xgmi link training failed\n",
5140 			 socket_id, aid_id, fw_status);
5141 
5142 	if (AMDGPU_RAS_GPU_ERR_USR_CP_LINK_TRAINING(boot_error))
5143 		dev_info(adev->dev,
5144 			 "socket: %d, aid: %d, fw_status: 0x%x, usr cp link training failed\n",
5145 			 socket_id, aid_id, fw_status);
5146 
5147 	if (AMDGPU_RAS_GPU_ERR_USR_DP_LINK_TRAINING(boot_error))
5148 		dev_info(adev->dev,
5149 			 "socket: %d, aid: %d, fw_status: 0x%x, usr dp link training failed\n",
5150 			 socket_id, aid_id, fw_status);
5151 
5152 	if (AMDGPU_RAS_GPU_ERR_HBM_MEM_TEST(boot_error))
5153 		dev_info(adev->dev,
5154 			 "socket: %d, aid: %d, hbm: %d, fw_status: 0x%x, hbm memory test failed\n",
5155 			 socket_id, aid_id, hbm_id, fw_status);
5156 
5157 	if (AMDGPU_RAS_GPU_ERR_HBM_BIST_TEST(boot_error))
5158 		dev_info(adev->dev,
5159 			 "socket: %d, aid: %d, hbm: %d, fw_status: 0x%x, hbm bist test failed\n",
5160 			 socket_id, aid_id, hbm_id, fw_status);
5161 
5162 	if (AMDGPU_RAS_GPU_ERR_DATA_ABORT(boot_error))
5163 		dev_info(adev->dev,
5164 			 "socket: %d, aid: %d, fw_status: 0x%x, data abort exception\n",
5165 			 socket_id, aid_id, fw_status);
5166 
5167 	if (AMDGPU_RAS_GPU_ERR_GENERIC(boot_error))
5168 		dev_info(adev->dev,
5169 			 "socket: %d, aid: %d, fw_status: 0x%x, Boot Controller Generic Error\n",
5170 			 socket_id, aid_id, fw_status);
5171 }
5172 
5173 static bool amdgpu_ras_boot_error_detected(struct amdgpu_device *adev,
5174 					   u32 instance)
5175 {
5176 	u64 reg_addr;
5177 	u32 reg_data;
5178 	int retry_loop;
5179 
5180 	reg_addr = (mmMP0_SMN_C2PMSG_92 << 2) +
5181 		   aqua_vanjaram_encode_ext_smn_addressing(instance);
5182 
5183 	for (retry_loop = 0; retry_loop < AMDGPU_RAS_BOOT_STATUS_POLLING_LIMIT; retry_loop++) {
5184 		reg_data = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
5185 		if ((reg_data & AMDGPU_RAS_BOOT_STATUS_MASK) == AMDGPU_RAS_BOOT_STEADY_STATUS)
5186 			return false;
5187 		else
5188 			msleep(1);
5189 	}
5190 
5191 	return true;
5192 }
5193 
5194 void amdgpu_ras_query_boot_status(struct amdgpu_device *adev, u32 num_instances)
5195 {
5196 	u32 i;
5197 
5198 	for (i = 0; i < num_instances; i++) {
5199 		if (amdgpu_ras_boot_error_detected(adev, i))
5200 			amdgpu_ras_boot_time_error_reporting(adev, i);
5201 	}
5202 }
5203 
5204 int amdgpu_ras_reserve_page(struct amdgpu_device *adev, uint64_t pfn)
5205 {
5206 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5207 	struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
5208 	uint64_t start = pfn << AMDGPU_GPU_PAGE_SHIFT;
5209 	int ret = 0;
5210 
5211 	mutex_lock(&con->page_rsv_lock);
5212 	ret = amdgpu_vram_mgr_query_page_status(mgr, start);
5213 	if (ret == -ENOENT)
5214 		ret = amdgpu_vram_mgr_reserve_range(mgr, start, AMDGPU_GPU_PAGE_SIZE);
5215 	mutex_unlock(&con->page_rsv_lock);
5216 
5217 	return ret;
5218 }
5219 
5220 void amdgpu_ras_event_log_print(struct amdgpu_device *adev, u64 event_id,
5221 				const char *fmt, ...)
5222 {
5223 	struct va_format vaf;
5224 	va_list args;
5225 
5226 	va_start(args, fmt);
5227 	vaf.fmt = fmt;
5228 	vaf.va = &args;
5229 
5230 	if (RAS_EVENT_ID_IS_VALID(event_id))
5231 		dev_printk(KERN_INFO, adev->dev, "{%llu}%pV", event_id, &vaf);
5232 	else
5233 		dev_printk(KERN_INFO, adev->dev, "%pV", &vaf);
5234 
5235 	va_end(args);
5236 }
5237 
5238 bool amdgpu_ras_is_rma(struct amdgpu_device *adev)
5239 {
5240 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5241 
5242 	if (!con)
5243 		return false;
5244 
5245 	return con->is_rma;
5246 }
5247