xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c (revision e77a8005748547fb1f10645097f13ccdd804d7e5)
1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  *
23  */
24 #include <linux/debugfs.h>
25 #include <linux/list.h>
26 #include <linux/module.h>
27 #include <linux/uaccess.h>
28 #include <linux/reboot.h>
29 #include <linux/syscalls.h>
30 #include <linux/pm_runtime.h>
31 #include <linux/list_sort.h>
32 
33 #include "amdgpu.h"
34 #include "amdgpu_ras.h"
35 #include "amdgpu_atomfirmware.h"
36 #include "amdgpu_xgmi.h"
37 #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
38 #include "nbio_v4_3.h"
39 #include "nbio_v7_9.h"
40 #include "atom.h"
41 #include "amdgpu_reset.h"
42 #include "amdgpu_psp.h"
43 
44 #ifdef CONFIG_X86_MCE_AMD
45 #include <asm/mce.h>
46 
47 static bool notifier_registered;
48 #endif
49 static const char *RAS_FS_NAME = "ras";
50 
51 const char *ras_error_string[] = {
52 	"none",
53 	"parity",
54 	"single_correctable",
55 	"multi_uncorrectable",
56 	"poison",
57 };
58 
59 const char *ras_block_string[] = {
60 	"umc",
61 	"sdma",
62 	"gfx",
63 	"mmhub",
64 	"athub",
65 	"pcie_bif",
66 	"hdp",
67 	"xgmi_wafl",
68 	"df",
69 	"smn",
70 	"sem",
71 	"mp0",
72 	"mp1",
73 	"fuse",
74 	"mca",
75 	"vcn",
76 	"jpeg",
77 	"ih",
78 	"mpio",
79 };
80 
81 const char *ras_mca_block_string[] = {
82 	"mca_mp0",
83 	"mca_mp1",
84 	"mca_mpio",
85 	"mca_iohc",
86 };
87 
88 struct amdgpu_ras_block_list {
89 	/* ras block link */
90 	struct list_head node;
91 
92 	struct amdgpu_ras_block_object *ras_obj;
93 };
94 
95 const char *get_ras_block_str(struct ras_common_if *ras_block)
96 {
97 	if (!ras_block)
98 		return "NULL";
99 
100 	if (ras_block->block >= AMDGPU_RAS_BLOCK_COUNT ||
101 	    ras_block->block >= ARRAY_SIZE(ras_block_string))
102 		return "OUT OF RANGE";
103 
104 	if (ras_block->block == AMDGPU_RAS_BLOCK__MCA)
105 		return ras_mca_block_string[ras_block->sub_block_index];
106 
107 	return ras_block_string[ras_block->block];
108 }
109 
110 #define ras_block_str(_BLOCK_) \
111 	(((_BLOCK_) < ARRAY_SIZE(ras_block_string)) ? ras_block_string[_BLOCK_] : "Out Of Range")
112 
113 #define ras_err_str(i) (ras_error_string[ffs(i)])
114 
115 #define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
116 
117 /* inject address is 52 bits */
118 #define	RAS_UMC_INJECT_ADDR_LIMIT	(0x1ULL << 52)
119 
120 /* typical ECC bad page rate is 1 bad page per 100MB VRAM */
121 #define RAS_BAD_PAGE_COVER              (100 * 1024 * 1024ULL)
122 
123 #define MAX_UMC_POISON_POLLING_TIME_ASYNC  300  //ms
124 
125 #define AMDGPU_RAS_RETIRE_PAGE_INTERVAL 100  //ms
126 
127 #define MAX_FLUSH_RETIRE_DWORK_TIMES  100
128 
129 enum amdgpu_ras_retire_page_reservation {
130 	AMDGPU_RAS_RETIRE_PAGE_RESERVED,
131 	AMDGPU_RAS_RETIRE_PAGE_PENDING,
132 	AMDGPU_RAS_RETIRE_PAGE_FAULT,
133 };
134 
135 atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0);
136 
137 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
138 				uint64_t addr);
139 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
140 				uint64_t addr);
141 #ifdef CONFIG_X86_MCE_AMD
142 static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev);
143 struct mce_notifier_adev_list {
144 	struct amdgpu_device *devs[MAX_GPU_INSTANCE];
145 	int num_gpu;
146 };
147 static struct mce_notifier_adev_list mce_adev_list;
148 #endif
149 
150 void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready)
151 {
152 	if (adev && amdgpu_ras_get_context(adev))
153 		amdgpu_ras_get_context(adev)->error_query_ready = ready;
154 }
155 
156 static bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev)
157 {
158 	if (adev && amdgpu_ras_get_context(adev))
159 		return amdgpu_ras_get_context(adev)->error_query_ready;
160 
161 	return false;
162 }
163 
164 static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t address)
165 {
166 	struct ras_err_data err_data;
167 	struct eeprom_table_record err_rec;
168 	int ret;
169 
170 	if ((address >= adev->gmc.mc_vram_size) ||
171 	    (address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
172 		dev_warn(adev->dev,
173 		         "RAS WARN: input address 0x%llx is invalid.\n",
174 		         address);
175 		return -EINVAL;
176 	}
177 
178 	if (amdgpu_ras_check_bad_page(adev, address)) {
179 		dev_warn(adev->dev,
180 			 "RAS WARN: 0x%llx has already been marked as bad page!\n",
181 			 address);
182 		return 0;
183 	}
184 
185 	ret = amdgpu_ras_error_data_init(&err_data);
186 	if (ret)
187 		return ret;
188 
189 	memset(&err_rec, 0x0, sizeof(struct eeprom_table_record));
190 	err_data.err_addr = &err_rec;
191 	amdgpu_umc_fill_error_record(&err_data, address, address, 0, 0);
192 
193 	if (amdgpu_bad_page_threshold != 0) {
194 		amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
195 					 err_data.err_addr_cnt);
196 		amdgpu_ras_save_bad_pages(adev, NULL);
197 	}
198 
199 	amdgpu_ras_error_data_fini(&err_data);
200 
201 	dev_warn(adev->dev, "WARNING: THIS IS ONLY FOR TEST PURPOSES AND WILL CORRUPT RAS EEPROM\n");
202 	dev_warn(adev->dev, "Clear EEPROM:\n");
203 	dev_warn(adev->dev, "    echo 1 > /sys/kernel/debug/dri/0/ras/ras_eeprom_reset\n");
204 
205 	return 0;
206 }
207 
208 static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
209 					size_t size, loff_t *pos)
210 {
211 	struct ras_manager *obj = (struct ras_manager *)file_inode(f)->i_private;
212 	struct ras_query_if info = {
213 		.head = obj->head,
214 	};
215 	ssize_t s;
216 	char val[128];
217 
218 	if (amdgpu_ras_query_error_status(obj->adev, &info))
219 		return -EINVAL;
220 
221 	/* Hardware counter will be reset automatically after the query on Vega20 and Arcturus */
222 	if (amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
223 	    amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {
224 		if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
225 			dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
226 	}
227 
228 	s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n",
229 			"ue", info.ue_count,
230 			"ce", info.ce_count);
231 	if (*pos >= s)
232 		return 0;
233 
234 	s -= *pos;
235 	s = min_t(u64, s, size);
236 
237 
238 	if (copy_to_user(buf, &val[*pos], s))
239 		return -EINVAL;
240 
241 	*pos += s;
242 
243 	return s;
244 }
245 
246 static const struct file_operations amdgpu_ras_debugfs_ops = {
247 	.owner = THIS_MODULE,
248 	.read = amdgpu_ras_debugfs_read,
249 	.write = NULL,
250 	.llseek = default_llseek
251 };
252 
253 static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id)
254 {
255 	int i;
256 
257 	for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) {
258 		*block_id = i;
259 		if (strcmp(name, ras_block_string[i]) == 0)
260 			return 0;
261 	}
262 	return -EINVAL;
263 }
264 
265 static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
266 		const char __user *buf, size_t size,
267 		loff_t *pos, struct ras_debug_if *data)
268 {
269 	ssize_t s = min_t(u64, 64, size);
270 	char str[65];
271 	char block_name[33];
272 	char err[9] = "ue";
273 	int op = -1;
274 	int block_id;
275 	uint32_t sub_block;
276 	u64 address, value;
277 	/* default value is 0 if the mask is not set by user */
278 	u32 instance_mask = 0;
279 
280 	if (*pos)
281 		return -EINVAL;
282 	*pos = size;
283 
284 	memset(str, 0, sizeof(str));
285 	memset(data, 0, sizeof(*data));
286 
287 	if (copy_from_user(str, buf, s))
288 		return -EINVAL;
289 
290 	if (sscanf(str, "disable %32s", block_name) == 1)
291 		op = 0;
292 	else if (sscanf(str, "enable %32s %8s", block_name, err) == 2)
293 		op = 1;
294 	else if (sscanf(str, "inject %32s %8s", block_name, err) == 2)
295 		op = 2;
296 	else if (strstr(str, "retire_page") != NULL)
297 		op = 3;
298 	else if (str[0] && str[1] && str[2] && str[3])
299 		/* ascii string, but commands are not matched. */
300 		return -EINVAL;
301 
302 	if (op != -1) {
303 		if (op == 3) {
304 			if (sscanf(str, "%*s 0x%llx", &address) != 1 &&
305 			    sscanf(str, "%*s %llu", &address) != 1)
306 				return -EINVAL;
307 
308 			data->op = op;
309 			data->inject.address = address;
310 
311 			return 0;
312 		}
313 
314 		if (amdgpu_ras_find_block_id_by_name(block_name, &block_id))
315 			return -EINVAL;
316 
317 		data->head.block = block_id;
318 		/* only ue, ce and poison errors are supported */
319 		if (!memcmp("ue", err, 2))
320 			data->head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
321 		else if (!memcmp("ce", err, 2))
322 			data->head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE;
323 		else if (!memcmp("poison", err, 6))
324 			data->head.type = AMDGPU_RAS_ERROR__POISON;
325 		else
326 			return -EINVAL;
327 
328 		data->op = op;
329 
330 		if (op == 2) {
331 			if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx 0x%x",
332 				   &sub_block, &address, &value, &instance_mask) != 4 &&
333 			    sscanf(str, "%*s %*s %*s %u %llu %llu %u",
334 				   &sub_block, &address, &value, &instance_mask) != 4 &&
335 				sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx",
336 				   &sub_block, &address, &value) != 3 &&
337 			    sscanf(str, "%*s %*s %*s %u %llu %llu",
338 				   &sub_block, &address, &value) != 3)
339 				return -EINVAL;
340 			data->head.sub_block_index = sub_block;
341 			data->inject.address = address;
342 			data->inject.value = value;
343 			data->inject.instance_mask = instance_mask;
344 		}
345 	} else {
346 		if (size < sizeof(*data))
347 			return -EINVAL;
348 
349 		if (copy_from_user(data, buf, sizeof(*data)))
350 			return -EINVAL;
351 	}
352 
353 	return 0;
354 }
355 
356 static void amdgpu_ras_instance_mask_check(struct amdgpu_device *adev,
357 				struct ras_debug_if *data)
358 {
359 	int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1;
360 	uint32_t mask, inst_mask = data->inject.instance_mask;
361 
362 	/* no need to set instance mask if there is only one instance */
363 	if (num_xcc <= 1 && inst_mask) {
364 		data->inject.instance_mask = 0;
365 		dev_dbg(adev->dev,
366 			"RAS inject mask(0x%x) isn't supported and force it to 0.\n",
367 			inst_mask);
368 
369 		return;
370 	}
371 
372 	switch (data->head.block) {
373 	case AMDGPU_RAS_BLOCK__GFX:
374 		mask = GENMASK(num_xcc - 1, 0);
375 		break;
376 	case AMDGPU_RAS_BLOCK__SDMA:
377 		mask = GENMASK(adev->sdma.num_instances - 1, 0);
378 		break;
379 	case AMDGPU_RAS_BLOCK__VCN:
380 	case AMDGPU_RAS_BLOCK__JPEG:
381 		mask = GENMASK(adev->vcn.num_vcn_inst - 1, 0);
382 		break;
383 	default:
384 		mask = inst_mask;
385 		break;
386 	}
387 
388 	/* remove invalid bits in instance mask */
389 	data->inject.instance_mask &= mask;
390 	if (inst_mask != data->inject.instance_mask)
391 		dev_dbg(adev->dev,
392 			"Adjust RAS inject mask 0x%x to 0x%x\n",
393 			inst_mask, data->inject.instance_mask);
394 }
395 
396 /**
397  * DOC: AMDGPU RAS debugfs control interface
398  *
399  * The control interface accepts struct ras_debug_if which has two members.
400  *
401  * First member: ras_debug_if::head or ras_debug_if::inject.
402  *
403  * head is used to indicate which IP block will be under control.
404  *
405  * head has four members, they are block, type, sub_block_index, name.
406  * block: which IP will be under control.
407  * type: what kind of error will be enabled/disabled/injected.
408  * sub_block_index: some IPs have subcomponets. say, GFX, sDMA.
409  * name: the name of IP.
410  *
411  * inject has three more members than head, they are address, value and mask.
412  * As their names indicate, inject operation will write the
413  * value to the address.
414  *
415  * The second member: struct ras_debug_if::op.
416  * It has three kinds of operations.
417  *
418  * - 0: disable RAS on the block. Take ::head as its data.
419  * - 1: enable RAS on the block. Take ::head as its data.
420  * - 2: inject errors on the block. Take ::inject as its data.
421  *
422  * How to use the interface?
423  *
424  * In a program
425  *
426  * Copy the struct ras_debug_if in your code and initialize it.
427  * Write the struct to the control interface.
428  *
429  * From shell
430  *
431  * .. code-block:: bash
432  *
433  *	echo "disable <block>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
434  *	echo "enable  <block> <error>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
435  *	echo "inject  <block> <error> <sub-block> <address> <value> <mask>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
436  *
437  * Where N, is the card which you want to affect.
438  *
439  * "disable" requires only the block.
440  * "enable" requires the block and error type.
441  * "inject" requires the block, error type, address, and value.
442  *
443  * The block is one of: umc, sdma, gfx, etc.
444  *	see ras_block_string[] for details
445  *
446  * The error type is one of: ue, ce and poison where,
447  *	ue is multi-uncorrectable
448  *	ce is single-correctable
449  *	poison is poison
450  *
451  * The sub-block is a the sub-block index, pass 0 if there is no sub-block.
452  * The address and value are hexadecimal numbers, leading 0x is optional.
453  * The mask means instance mask, is optional, default value is 0x1.
454  *
455  * For instance,
456  *
457  * .. code-block:: bash
458  *
459  *	echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
460  *	echo inject umc ce 0 0 0 3 > /sys/kernel/debug/dri/0/ras/ras_ctrl
461  *	echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl
462  *
463  * How to check the result of the operation?
464  *
465  * To check disable/enable, see "ras" features at,
466  * /sys/class/drm/card[0/1/2...]/device/ras/features
467  *
468  * To check inject, see the corresponding error count at,
469  * /sys/class/drm/card[0/1/2...]/device/ras/[gfx|sdma|umc|...]_err_count
470  *
471  * .. note::
472  *	Operations are only allowed on blocks which are supported.
473  *	Check the "ras" mask at /sys/module/amdgpu/parameters/ras_mask
474  *	to see which blocks support RAS on a particular asic.
475  *
476  */
477 static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f,
478 					     const char __user *buf,
479 					     size_t size, loff_t *pos)
480 {
481 	struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
482 	struct ras_debug_if data;
483 	int ret = 0;
484 
485 	if (!amdgpu_ras_get_error_query_ready(adev)) {
486 		dev_warn(adev->dev, "RAS WARN: error injection "
487 				"currently inaccessible\n");
488 		return size;
489 	}
490 
491 	ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data);
492 	if (ret)
493 		return ret;
494 
495 	if (data.op == 3) {
496 		ret = amdgpu_reserve_page_direct(adev, data.inject.address);
497 		if (!ret)
498 			return size;
499 		else
500 			return ret;
501 	}
502 
503 	if (!amdgpu_ras_is_supported(adev, data.head.block))
504 		return -EINVAL;
505 
506 	switch (data.op) {
507 	case 0:
508 		ret = amdgpu_ras_feature_enable(adev, &data.head, 0);
509 		break;
510 	case 1:
511 		ret = amdgpu_ras_feature_enable(adev, &data.head, 1);
512 		break;
513 	case 2:
514 		if ((data.inject.address >= adev->gmc.mc_vram_size &&
515 		    adev->gmc.mc_vram_size) ||
516 		    (data.inject.address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
517 			dev_warn(adev->dev, "RAS WARN: input address "
518 					"0x%llx is invalid.",
519 					data.inject.address);
520 			ret = -EINVAL;
521 			break;
522 		}
523 
524 		/* umc ce/ue error injection for a bad page is not allowed */
525 		if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) &&
526 		    amdgpu_ras_check_bad_page(adev, data.inject.address)) {
527 			dev_warn(adev->dev, "RAS WARN: inject: 0x%llx has "
528 				 "already been marked as bad!\n",
529 				 data.inject.address);
530 			break;
531 		}
532 
533 		amdgpu_ras_instance_mask_check(adev, &data);
534 
535 		/* data.inject.address is offset instead of absolute gpu address */
536 		ret = amdgpu_ras_error_inject(adev, &data.inject);
537 		break;
538 	default:
539 		ret = -EINVAL;
540 		break;
541 	}
542 
543 	if (ret)
544 		return ret;
545 
546 	return size;
547 }
548 
549 /**
550  * DOC: AMDGPU RAS debugfs EEPROM table reset interface
551  *
552  * Some boards contain an EEPROM which is used to persistently store a list of
553  * bad pages which experiences ECC errors in vram.  This interface provides
554  * a way to reset the EEPROM, e.g., after testing error injection.
555  *
556  * Usage:
557  *
558  * .. code-block:: bash
559  *
560  *	echo 1 > ../ras/ras_eeprom_reset
561  *
562  * will reset EEPROM table to 0 entries.
563  *
564  */
565 static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f,
566 					       const char __user *buf,
567 					       size_t size, loff_t *pos)
568 {
569 	struct amdgpu_device *adev =
570 		(struct amdgpu_device *)file_inode(f)->i_private;
571 	int ret;
572 
573 	ret = amdgpu_ras_eeprom_reset_table(
574 		&(amdgpu_ras_get_context(adev)->eeprom_control));
575 
576 	if (!ret) {
577 		/* Something was written to EEPROM.
578 		 */
579 		amdgpu_ras_get_context(adev)->flags = RAS_DEFAULT_FLAGS;
580 		return size;
581 	} else {
582 		return ret;
583 	}
584 }
585 
586 static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
587 	.owner = THIS_MODULE,
588 	.read = NULL,
589 	.write = amdgpu_ras_debugfs_ctrl_write,
590 	.llseek = default_llseek
591 };
592 
593 static const struct file_operations amdgpu_ras_debugfs_eeprom_ops = {
594 	.owner = THIS_MODULE,
595 	.read = NULL,
596 	.write = amdgpu_ras_debugfs_eeprom_write,
597 	.llseek = default_llseek
598 };
599 
600 /**
601  * DOC: AMDGPU RAS sysfs Error Count Interface
602  *
603  * It allows the user to read the error count for each IP block on the gpu through
604  * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
605  *
606  * It outputs the multiple lines which report the uncorrected (ue) and corrected
607  * (ce) error counts.
608  *
609  * The format of one line is below,
610  *
611  * [ce|ue]: count
612  *
613  * Example:
614  *
615  * .. code-block:: bash
616  *
617  *	ue: 0
618  *	ce: 1
619  *
620  */
621 static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
622 		struct device_attribute *attr, char *buf)
623 {
624 	struct ras_manager *obj = container_of(attr, struct ras_manager, sysfs_attr);
625 	struct ras_query_if info = {
626 		.head = obj->head,
627 	};
628 
629 	if (!amdgpu_ras_get_error_query_ready(obj->adev))
630 		return sysfs_emit(buf, "Query currently inaccessible\n");
631 
632 	if (amdgpu_ras_query_error_status(obj->adev, &info))
633 		return -EINVAL;
634 
635 	if (amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
636 	    amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {
637 		if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
638 			dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
639 	}
640 
641 	if (info.head.block == AMDGPU_RAS_BLOCK__UMC)
642 		return sysfs_emit(buf, "%s: %lu\n%s: %lu\n%s: %lu\n", "ue", info.ue_count,
643 				"ce", info.ce_count, "de", info.de_count);
644 	else
645 		return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count,
646 				"ce", info.ce_count);
647 }
648 
649 /* obj begin */
650 
651 #define get_obj(obj) do { (obj)->use++; } while (0)
652 #define alive_obj(obj) ((obj)->use)
653 
654 static inline void put_obj(struct ras_manager *obj)
655 {
656 	if (obj && (--obj->use == 0)) {
657 		list_del(&obj->node);
658 		amdgpu_ras_error_data_fini(&obj->err_data);
659 	}
660 
661 	if (obj && (obj->use < 0))
662 		DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", get_ras_block_str(&obj->head));
663 }
664 
665 /* make one obj and return it. */
666 static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev,
667 		struct ras_common_if *head)
668 {
669 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
670 	struct ras_manager *obj;
671 
672 	if (!adev->ras_enabled || !con)
673 		return NULL;
674 
675 	if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
676 		return NULL;
677 
678 	if (head->block == AMDGPU_RAS_BLOCK__MCA) {
679 		if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
680 			return NULL;
681 
682 		obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
683 	} else
684 		obj = &con->objs[head->block];
685 
686 	/* already exist. return obj? */
687 	if (alive_obj(obj))
688 		return NULL;
689 
690 	if (amdgpu_ras_error_data_init(&obj->err_data))
691 		return NULL;
692 
693 	obj->head = *head;
694 	obj->adev = adev;
695 	list_add(&obj->node, &con->head);
696 	get_obj(obj);
697 
698 	return obj;
699 }
700 
701 /* return an obj equal to head, or the first when head is NULL */
702 struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
703 		struct ras_common_if *head)
704 {
705 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
706 	struct ras_manager *obj;
707 	int i;
708 
709 	if (!adev->ras_enabled || !con)
710 		return NULL;
711 
712 	if (head) {
713 		if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
714 			return NULL;
715 
716 		if (head->block == AMDGPU_RAS_BLOCK__MCA) {
717 			if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
718 				return NULL;
719 
720 			obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
721 		} else
722 			obj = &con->objs[head->block];
723 
724 		if (alive_obj(obj))
725 			return obj;
726 	} else {
727 		for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT + AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
728 			obj = &con->objs[i];
729 			if (alive_obj(obj))
730 				return obj;
731 		}
732 	}
733 
734 	return NULL;
735 }
736 /* obj end */
737 
738 /* feature ctl begin */
739 static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev,
740 					 struct ras_common_if *head)
741 {
742 	return adev->ras_hw_enabled & BIT(head->block);
743 }
744 
745 static int amdgpu_ras_is_feature_enabled(struct amdgpu_device *adev,
746 		struct ras_common_if *head)
747 {
748 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
749 
750 	return con->features & BIT(head->block);
751 }
752 
753 /*
754  * if obj is not created, then create one.
755  * set feature enable flag.
756  */
757 static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev,
758 		struct ras_common_if *head, int enable)
759 {
760 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
761 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
762 
763 	/* If hardware does not support ras, then do not create obj.
764 	 * But if hardware support ras, we can create the obj.
765 	 * Ras framework checks con->hw_supported to see if it need do
766 	 * corresponding initialization.
767 	 * IP checks con->support to see if it need disable ras.
768 	 */
769 	if (!amdgpu_ras_is_feature_allowed(adev, head))
770 		return 0;
771 
772 	if (enable) {
773 		if (!obj) {
774 			obj = amdgpu_ras_create_obj(adev, head);
775 			if (!obj)
776 				return -EINVAL;
777 		} else {
778 			/* In case we create obj somewhere else */
779 			get_obj(obj);
780 		}
781 		con->features |= BIT(head->block);
782 	} else {
783 		if (obj && amdgpu_ras_is_feature_enabled(adev, head)) {
784 			con->features &= ~BIT(head->block);
785 			put_obj(obj);
786 		}
787 	}
788 
789 	return 0;
790 }
791 
792 /* wrapper of psp_ras_enable_features */
793 int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
794 		struct ras_common_if *head, bool enable)
795 {
796 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
797 	union ta_ras_cmd_input *info;
798 	int ret;
799 
800 	if (!con)
801 		return -EINVAL;
802 
803 	/* For non-gfx ip, do not enable ras feature if it is not allowed */
804 	/* For gfx ip, regardless of feature support status, */
805 	/* Force issue enable or disable ras feature commands */
806 	if (head->block != AMDGPU_RAS_BLOCK__GFX &&
807 	    !amdgpu_ras_is_feature_allowed(adev, head))
808 		return 0;
809 
810 	/* Only enable gfx ras feature from host side */
811 	if (head->block == AMDGPU_RAS_BLOCK__GFX &&
812 	    !amdgpu_sriov_vf(adev) &&
813 	    !amdgpu_ras_intr_triggered()) {
814 		info = kzalloc(sizeof(union ta_ras_cmd_input), GFP_KERNEL);
815 		if (!info)
816 			return -ENOMEM;
817 
818 		if (!enable) {
819 			info->disable_features = (struct ta_ras_disable_features_input) {
820 				.block_id =  amdgpu_ras_block_to_ta(head->block),
821 				.error_type = amdgpu_ras_error_to_ta(head->type),
822 			};
823 		} else {
824 			info->enable_features = (struct ta_ras_enable_features_input) {
825 				.block_id =  amdgpu_ras_block_to_ta(head->block),
826 				.error_type = amdgpu_ras_error_to_ta(head->type),
827 			};
828 		}
829 
830 		ret = psp_ras_enable_features(&adev->psp, info, enable);
831 		if (ret) {
832 			dev_err(adev->dev, "ras %s %s failed poison:%d ret:%d\n",
833 				enable ? "enable":"disable",
834 				get_ras_block_str(head),
835 				amdgpu_ras_is_poison_mode_supported(adev), ret);
836 			kfree(info);
837 			return ret;
838 		}
839 
840 		kfree(info);
841 	}
842 
843 	/* setup the obj */
844 	__amdgpu_ras_feature_enable(adev, head, enable);
845 
846 	return 0;
847 }
848 
849 /* Only used in device probe stage and called only once. */
850 int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
851 		struct ras_common_if *head, bool enable)
852 {
853 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
854 	int ret;
855 
856 	if (!con)
857 		return -EINVAL;
858 
859 	if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
860 		if (enable) {
861 			/* There is no harm to issue a ras TA cmd regardless of
862 			 * the currecnt ras state.
863 			 * If current state == target state, it will do nothing
864 			 * But sometimes it requests driver to reset and repost
865 			 * with error code -EAGAIN.
866 			 */
867 			ret = amdgpu_ras_feature_enable(adev, head, 1);
868 			/* With old ras TA, we might fail to enable ras.
869 			 * Log it and just setup the object.
870 			 * TODO need remove this WA in the future.
871 			 */
872 			if (ret == -EINVAL) {
873 				ret = __amdgpu_ras_feature_enable(adev, head, 1);
874 				if (!ret)
875 					dev_info(adev->dev,
876 						"RAS INFO: %s setup object\n",
877 						get_ras_block_str(head));
878 			}
879 		} else {
880 			/* setup the object then issue a ras TA disable cmd.*/
881 			ret = __amdgpu_ras_feature_enable(adev, head, 1);
882 			if (ret)
883 				return ret;
884 
885 			/* gfx block ras disable cmd must send to ras-ta */
886 			if (head->block == AMDGPU_RAS_BLOCK__GFX)
887 				con->features |= BIT(head->block);
888 
889 			ret = amdgpu_ras_feature_enable(adev, head, 0);
890 
891 			/* clean gfx block ras features flag */
892 			if (adev->ras_enabled && head->block == AMDGPU_RAS_BLOCK__GFX)
893 				con->features &= ~BIT(head->block);
894 		}
895 	} else
896 		ret = amdgpu_ras_feature_enable(adev, head, enable);
897 
898 	return ret;
899 }
900 
901 static int amdgpu_ras_disable_all_features(struct amdgpu_device *adev,
902 		bool bypass)
903 {
904 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
905 	struct ras_manager *obj, *tmp;
906 
907 	list_for_each_entry_safe(obj, tmp, &con->head, node) {
908 		/* bypass psp.
909 		 * aka just release the obj and corresponding flags
910 		 */
911 		if (bypass) {
912 			if (__amdgpu_ras_feature_enable(adev, &obj->head, 0))
913 				break;
914 		} else {
915 			if (amdgpu_ras_feature_enable(adev, &obj->head, 0))
916 				break;
917 		}
918 	}
919 
920 	return con->features;
921 }
922 
923 static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
924 		bool bypass)
925 {
926 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
927 	int i;
928 	const enum amdgpu_ras_error_type default_ras_type = AMDGPU_RAS_ERROR__NONE;
929 
930 	for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) {
931 		struct ras_common_if head = {
932 			.block = i,
933 			.type = default_ras_type,
934 			.sub_block_index = 0,
935 		};
936 
937 		if (i == AMDGPU_RAS_BLOCK__MCA)
938 			continue;
939 
940 		if (bypass) {
941 			/*
942 			 * bypass psp. vbios enable ras for us.
943 			 * so just create the obj
944 			 */
945 			if (__amdgpu_ras_feature_enable(adev, &head, 1))
946 				break;
947 		} else {
948 			if (amdgpu_ras_feature_enable(adev, &head, 1))
949 				break;
950 		}
951 	}
952 
953 	for (i = 0; i < AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
954 		struct ras_common_if head = {
955 			.block = AMDGPU_RAS_BLOCK__MCA,
956 			.type = default_ras_type,
957 			.sub_block_index = i,
958 		};
959 
960 		if (bypass) {
961 			/*
962 			 * bypass psp. vbios enable ras for us.
963 			 * so just create the obj
964 			 */
965 			if (__amdgpu_ras_feature_enable(adev, &head, 1))
966 				break;
967 		} else {
968 			if (amdgpu_ras_feature_enable(adev, &head, 1))
969 				break;
970 		}
971 	}
972 
973 	return con->features;
974 }
975 /* feature ctl end */
976 
977 static int amdgpu_ras_block_match_default(struct amdgpu_ras_block_object *block_obj,
978 		enum amdgpu_ras_block block)
979 {
980 	if (!block_obj)
981 		return -EINVAL;
982 
983 	if (block_obj->ras_comm.block == block)
984 		return 0;
985 
986 	return -EINVAL;
987 }
988 
989 static struct amdgpu_ras_block_object *amdgpu_ras_get_ras_block(struct amdgpu_device *adev,
990 					enum amdgpu_ras_block block, uint32_t sub_block_index)
991 {
992 	struct amdgpu_ras_block_list *node, *tmp;
993 	struct amdgpu_ras_block_object *obj;
994 
995 	if (block >= AMDGPU_RAS_BLOCK__LAST)
996 		return NULL;
997 
998 	list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
999 		if (!node->ras_obj) {
1000 			dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
1001 			continue;
1002 		}
1003 
1004 		obj = node->ras_obj;
1005 		if (obj->ras_block_match) {
1006 			if (obj->ras_block_match(obj, block, sub_block_index) == 0)
1007 				return obj;
1008 		} else {
1009 			if (amdgpu_ras_block_match_default(obj, block) == 0)
1010 				return obj;
1011 		}
1012 	}
1013 
1014 	return NULL;
1015 }
1016 
1017 static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_data *err_data)
1018 {
1019 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
1020 	int ret = 0;
1021 
1022 	/*
1023 	 * choosing right query method according to
1024 	 * whether smu support query error information
1025 	 */
1026 	ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(ras->umc_ecc));
1027 	if (ret == -EOPNOTSUPP) {
1028 		if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
1029 			adev->umc.ras->ras_block.hw_ops->query_ras_error_count)
1030 			adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data);
1031 
1032 		/* umc query_ras_error_address is also responsible for clearing
1033 		 * error status
1034 		 */
1035 		if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
1036 		    adev->umc.ras->ras_block.hw_ops->query_ras_error_address)
1037 			adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, err_data);
1038 	} else if (!ret) {
1039 		if (adev->umc.ras &&
1040 			adev->umc.ras->ecc_info_query_ras_error_count)
1041 			adev->umc.ras->ecc_info_query_ras_error_count(adev, err_data);
1042 
1043 		if (adev->umc.ras &&
1044 			adev->umc.ras->ecc_info_query_ras_error_address)
1045 			adev->umc.ras->ecc_info_query_ras_error_address(adev, err_data);
1046 	}
1047 }
1048 
1049 static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev,
1050 					      struct ras_manager *ras_mgr,
1051 					      struct ras_err_data *err_data,
1052 					      struct ras_query_context *qctx,
1053 					      const char *blk_name,
1054 					      bool is_ue,
1055 					      bool is_de)
1056 {
1057 	struct amdgpu_smuio_mcm_config_info *mcm_info;
1058 	struct ras_err_node *err_node;
1059 	struct ras_err_info *err_info;
1060 	u64 event_id = qctx->evid.event_id;
1061 
1062 	if (is_ue) {
1063 		for_each_ras_error(err_node, err_data) {
1064 			err_info = &err_node->err_info;
1065 			mcm_info = &err_info->mcm_info;
1066 			if (err_info->ue_count) {
1067 				RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1068 					      "%lld new uncorrectable hardware errors detected in %s block\n",
1069 					      mcm_info->socket_id,
1070 					      mcm_info->die_id,
1071 					      err_info->ue_count,
1072 					      blk_name);
1073 			}
1074 		}
1075 
1076 		for_each_ras_error(err_node, &ras_mgr->err_data) {
1077 			err_info = &err_node->err_info;
1078 			mcm_info = &err_info->mcm_info;
1079 			RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1080 				      "%lld uncorrectable hardware errors detected in total in %s block\n",
1081 				      mcm_info->socket_id, mcm_info->die_id, err_info->ue_count, blk_name);
1082 		}
1083 
1084 	} else {
1085 		if (is_de) {
1086 			for_each_ras_error(err_node, err_data) {
1087 				err_info = &err_node->err_info;
1088 				mcm_info = &err_info->mcm_info;
1089 				if (err_info->de_count) {
1090 					RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1091 						      "%lld new deferred hardware errors detected in %s block\n",
1092 						      mcm_info->socket_id,
1093 						      mcm_info->die_id,
1094 						      err_info->de_count,
1095 						      blk_name);
1096 				}
1097 			}
1098 
1099 			for_each_ras_error(err_node, &ras_mgr->err_data) {
1100 				err_info = &err_node->err_info;
1101 				mcm_info = &err_info->mcm_info;
1102 				RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1103 					      "%lld deferred hardware errors detected in total in %s block\n",
1104 					      mcm_info->socket_id, mcm_info->die_id,
1105 					      err_info->de_count, blk_name);
1106 			}
1107 		} else {
1108 			for_each_ras_error(err_node, err_data) {
1109 				err_info = &err_node->err_info;
1110 				mcm_info = &err_info->mcm_info;
1111 				if (err_info->ce_count) {
1112 					RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1113 						      "%lld new correctable hardware errors detected in %s block\n",
1114 						      mcm_info->socket_id,
1115 						      mcm_info->die_id,
1116 						      err_info->ce_count,
1117 						      blk_name);
1118 				}
1119 			}
1120 
1121 			for_each_ras_error(err_node, &ras_mgr->err_data) {
1122 				err_info = &err_node->err_info;
1123 				mcm_info = &err_info->mcm_info;
1124 				RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1125 					      "%lld correctable hardware errors detected in total in %s block\n",
1126 					      mcm_info->socket_id, mcm_info->die_id,
1127 					      err_info->ce_count, blk_name);
1128 			}
1129 		}
1130 	}
1131 }
1132 
1133 static inline bool err_data_has_source_info(struct ras_err_data *data)
1134 {
1135 	return !list_empty(&data->err_node_list);
1136 }
1137 
1138 static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev,
1139 					     struct ras_query_if *query_if,
1140 					     struct ras_err_data *err_data,
1141 					     struct ras_query_context *qctx)
1142 {
1143 	struct ras_manager *ras_mgr = amdgpu_ras_find_obj(adev, &query_if->head);
1144 	const char *blk_name = get_ras_block_str(&query_if->head);
1145 	u64 event_id = qctx->evid.event_id;
1146 
1147 	if (err_data->ce_count) {
1148 		if (err_data_has_source_info(err_data)) {
1149 			amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx,
1150 							  blk_name, false, false);
1151 		} else if (!adev->aid_mask &&
1152 			   adev->smuio.funcs &&
1153 			   adev->smuio.funcs->get_socket_id &&
1154 			   adev->smuio.funcs->get_die_id) {
1155 			RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d "
1156 				      "%ld correctable hardware errors "
1157 				      "detected in %s block\n",
1158 				      adev->smuio.funcs->get_socket_id(adev),
1159 				      adev->smuio.funcs->get_die_id(adev),
1160 				      ras_mgr->err_data.ce_count,
1161 				      blk_name);
1162 		} else {
1163 			RAS_EVENT_LOG(adev, event_id, "%ld correctable hardware errors "
1164 				      "detected in %s block\n",
1165 				      ras_mgr->err_data.ce_count,
1166 				      blk_name);
1167 		}
1168 	}
1169 
1170 	if (err_data->ue_count) {
1171 		if (err_data_has_source_info(err_data)) {
1172 			amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx,
1173 							  blk_name, true, false);
1174 		} else if (!adev->aid_mask &&
1175 			   adev->smuio.funcs &&
1176 			   adev->smuio.funcs->get_socket_id &&
1177 			   adev->smuio.funcs->get_die_id) {
1178 			RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d "
1179 				      "%ld uncorrectable hardware errors "
1180 				      "detected in %s block\n",
1181 				      adev->smuio.funcs->get_socket_id(adev),
1182 				      adev->smuio.funcs->get_die_id(adev),
1183 				      ras_mgr->err_data.ue_count,
1184 				      blk_name);
1185 		} else {
1186 			RAS_EVENT_LOG(adev, event_id, "%ld uncorrectable hardware errors "
1187 				      "detected in %s block\n",
1188 				      ras_mgr->err_data.ue_count,
1189 				      blk_name);
1190 		}
1191 	}
1192 
1193 	if (err_data->de_count) {
1194 		if (err_data_has_source_info(err_data)) {
1195 			amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx,
1196 							  blk_name, false, true);
1197 		} else if (!adev->aid_mask &&
1198 			   adev->smuio.funcs &&
1199 			   adev->smuio.funcs->get_socket_id &&
1200 			   adev->smuio.funcs->get_die_id) {
1201 			RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d "
1202 				      "%ld deferred hardware errors "
1203 				      "detected in %s block\n",
1204 				      adev->smuio.funcs->get_socket_id(adev),
1205 				      adev->smuio.funcs->get_die_id(adev),
1206 				      ras_mgr->err_data.de_count,
1207 				      blk_name);
1208 		} else {
1209 			RAS_EVENT_LOG(adev, event_id, "%ld deferred hardware errors "
1210 				      "detected in %s block\n",
1211 				      ras_mgr->err_data.de_count,
1212 				      blk_name);
1213 		}
1214 	}
1215 }
1216 
1217 static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, struct ras_err_data *err_data)
1218 {
1219 	struct ras_err_node *err_node;
1220 	struct ras_err_info *err_info;
1221 
1222 	if (err_data_has_source_info(err_data)) {
1223 		for_each_ras_error(err_node, err_data) {
1224 			err_info = &err_node->err_info;
1225 			amdgpu_ras_error_statistic_de_count(&obj->err_data,
1226 					&err_info->mcm_info, err_info->de_count);
1227 			amdgpu_ras_error_statistic_ce_count(&obj->err_data,
1228 					&err_info->mcm_info, err_info->ce_count);
1229 			amdgpu_ras_error_statistic_ue_count(&obj->err_data,
1230 					&err_info->mcm_info, err_info->ue_count);
1231 		}
1232 	} else {
1233 		/* for legacy asic path which doesn't has error source info */
1234 		obj->err_data.ue_count += err_data->ue_count;
1235 		obj->err_data.ce_count += err_data->ce_count;
1236 		obj->err_data.de_count += err_data->de_count;
1237 	}
1238 }
1239 
1240 static struct ras_manager *get_ras_manager(struct amdgpu_device *adev, enum amdgpu_ras_block blk)
1241 {
1242 	struct ras_common_if head;
1243 
1244 	memset(&head, 0, sizeof(head));
1245 	head.block = blk;
1246 
1247 	return amdgpu_ras_find_obj(adev, &head);
1248 }
1249 
1250 int amdgpu_ras_bind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
1251 			const struct aca_info *aca_info, void *data)
1252 {
1253 	struct ras_manager *obj;
1254 
1255 	/* in resume phase, no need to create aca fs node */
1256 	if (adev->in_suspend || amdgpu_in_reset(adev))
1257 		return 0;
1258 
1259 	obj = get_ras_manager(adev, blk);
1260 	if (!obj)
1261 		return -EINVAL;
1262 
1263 	return amdgpu_aca_add_handle(adev, &obj->aca_handle, ras_block_str(blk), aca_info, data);
1264 }
1265 
1266 int amdgpu_ras_unbind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk)
1267 {
1268 	struct ras_manager *obj;
1269 
1270 	obj = get_ras_manager(adev, blk);
1271 	if (!obj)
1272 		return -EINVAL;
1273 
1274 	amdgpu_aca_remove_handle(&obj->aca_handle);
1275 
1276 	return 0;
1277 }
1278 
1279 static int amdgpu_aca_log_ras_error_data(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
1280 					 enum aca_error_type type, struct ras_err_data *err_data,
1281 					 struct ras_query_context *qctx)
1282 {
1283 	struct ras_manager *obj;
1284 
1285 	obj = get_ras_manager(adev, blk);
1286 	if (!obj)
1287 		return -EINVAL;
1288 
1289 	return amdgpu_aca_get_error_data(adev, &obj->aca_handle, type, err_data, qctx);
1290 }
1291 
1292 ssize_t amdgpu_ras_aca_sysfs_read(struct device *dev, struct device_attribute *attr,
1293 				  struct aca_handle *handle, char *buf, void *data)
1294 {
1295 	struct ras_manager *obj = container_of(handle, struct ras_manager, aca_handle);
1296 	struct ras_query_if info = {
1297 		.head = obj->head,
1298 	};
1299 
1300 	if (!amdgpu_ras_get_error_query_ready(obj->adev))
1301 		return sysfs_emit(buf, "Query currently inaccessible\n");
1302 
1303 	if (amdgpu_ras_query_error_status(obj->adev, &info))
1304 		return -EINVAL;
1305 
1306 	return sysfs_emit(buf, "%s: %lu\n%s: %lu\n%s: %lu\n", "ue", info.ue_count,
1307 			  "ce", info.ce_count, "de", info.de_count);
1308 }
1309 
1310 static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev,
1311 						struct ras_query_if *info,
1312 						struct ras_err_data *err_data,
1313 						struct ras_query_context *qctx,
1314 						unsigned int error_query_mode)
1315 {
1316 	enum amdgpu_ras_block blk = info ? info->head.block : AMDGPU_RAS_BLOCK_COUNT;
1317 	struct amdgpu_ras_block_object *block_obj = NULL;
1318 	int ret;
1319 
1320 	if (blk == AMDGPU_RAS_BLOCK_COUNT)
1321 		return -EINVAL;
1322 
1323 	if (error_query_mode == AMDGPU_RAS_INVALID_ERROR_QUERY)
1324 		return -EINVAL;
1325 
1326 	if (error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) {
1327 		if (info->head.block == AMDGPU_RAS_BLOCK__UMC) {
1328 			amdgpu_ras_get_ecc_info(adev, err_data);
1329 		} else {
1330 			block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, 0);
1331 			if (!block_obj || !block_obj->hw_ops) {
1332 				dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1333 					     get_ras_block_str(&info->head));
1334 				return -EINVAL;
1335 			}
1336 
1337 			if (block_obj->hw_ops->query_ras_error_count)
1338 				block_obj->hw_ops->query_ras_error_count(adev, err_data);
1339 
1340 			if ((info->head.block == AMDGPU_RAS_BLOCK__SDMA) ||
1341 			    (info->head.block == AMDGPU_RAS_BLOCK__GFX) ||
1342 			    (info->head.block == AMDGPU_RAS_BLOCK__MMHUB)) {
1343 				if (block_obj->hw_ops->query_ras_error_status)
1344 					block_obj->hw_ops->query_ras_error_status(adev);
1345 			}
1346 		}
1347 	} else {
1348 		if (amdgpu_aca_is_enabled(adev)) {
1349 			ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_UE, err_data, qctx);
1350 			if (ret)
1351 				return ret;
1352 
1353 			ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_CE, err_data, qctx);
1354 			if (ret)
1355 				return ret;
1356 
1357 			ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_DEFERRED, err_data, qctx);
1358 			if (ret)
1359 				return ret;
1360 		} else {
1361 			/* FIXME: add code to check return value later */
1362 			amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_UE, err_data, qctx);
1363 			amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_CE, err_data, qctx);
1364 		}
1365 	}
1366 
1367 	return 0;
1368 }
1369 
1370 /* query/inject/cure begin */
1371 static int amdgpu_ras_query_error_status_with_event(struct amdgpu_device *adev,
1372 						    struct ras_query_if *info,
1373 						    enum ras_event_type type)
1374 {
1375 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1376 	struct ras_err_data err_data;
1377 	struct ras_query_context qctx;
1378 	unsigned int error_query_mode;
1379 	int ret;
1380 
1381 	if (!obj)
1382 		return -EINVAL;
1383 
1384 	ret = amdgpu_ras_error_data_init(&err_data);
1385 	if (ret)
1386 		return ret;
1387 
1388 	if (!amdgpu_ras_get_error_query_mode(adev, &error_query_mode))
1389 		return -EINVAL;
1390 
1391 	memset(&qctx, 0, sizeof(qctx));
1392 	qctx.evid.type = type;
1393 	qctx.evid.event_id = amdgpu_ras_acquire_event_id(adev, type);
1394 
1395 	if (!down_read_trylock(&adev->reset_domain->sem)) {
1396 		ret = -EIO;
1397 		goto out_fini_err_data;
1398 	}
1399 
1400 	ret = amdgpu_ras_query_error_status_helper(adev, info,
1401 						   &err_data,
1402 						   &qctx,
1403 						   error_query_mode);
1404 	up_read(&adev->reset_domain->sem);
1405 	if (ret)
1406 		goto out_fini_err_data;
1407 
1408 	amdgpu_rasmgr_error_data_statistic_update(obj, &err_data);
1409 
1410 	info->ue_count = obj->err_data.ue_count;
1411 	info->ce_count = obj->err_data.ce_count;
1412 	info->de_count = obj->err_data.de_count;
1413 
1414 	amdgpu_ras_error_generate_report(adev, info, &err_data, &qctx);
1415 
1416 out_fini_err_data:
1417 	amdgpu_ras_error_data_fini(&err_data);
1418 
1419 	return ret;
1420 }
1421 
1422 int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_if *info)
1423 {
1424 	return amdgpu_ras_query_error_status_with_event(adev, info, RAS_EVENT_TYPE_INVALID);
1425 }
1426 
1427 int amdgpu_ras_reset_error_count(struct amdgpu_device *adev,
1428 		enum amdgpu_ras_block block)
1429 {
1430 	struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
1431 	const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
1432 	const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
1433 
1434 	if (!block_obj || !block_obj->hw_ops) {
1435 		dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1436 				ras_block_str(block));
1437 		return -EOPNOTSUPP;
1438 	}
1439 
1440 	if (!amdgpu_ras_is_supported(adev, block) ||
1441 	    !amdgpu_ras_get_aca_debug_mode(adev))
1442 		return -EOPNOTSUPP;
1443 
1444 	/* skip ras error reset in gpu reset */
1445 	if ((amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev)) &&
1446 	    ((smu_funcs && smu_funcs->set_debug_mode) ||
1447 	     (mca_funcs && mca_funcs->mca_set_debug_mode)))
1448 		return -EOPNOTSUPP;
1449 
1450 	if (block_obj->hw_ops->reset_ras_error_count)
1451 		block_obj->hw_ops->reset_ras_error_count(adev);
1452 
1453 	return 0;
1454 }
1455 
1456 int amdgpu_ras_reset_error_status(struct amdgpu_device *adev,
1457 		enum amdgpu_ras_block block)
1458 {
1459 	struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
1460 
1461 	if (amdgpu_ras_reset_error_count(adev, block) == -EOPNOTSUPP)
1462 		return 0;
1463 
1464 	if ((block == AMDGPU_RAS_BLOCK__GFX) ||
1465 	    (block == AMDGPU_RAS_BLOCK__MMHUB)) {
1466 		if (block_obj->hw_ops->reset_ras_error_status)
1467 			block_obj->hw_ops->reset_ras_error_status(adev);
1468 	}
1469 
1470 	return 0;
1471 }
1472 
1473 /* wrapper of psp_ras_trigger_error */
1474 int amdgpu_ras_error_inject(struct amdgpu_device *adev,
1475 		struct ras_inject_if *info)
1476 {
1477 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1478 	struct ta_ras_trigger_error_input block_info = {
1479 		.block_id =  amdgpu_ras_block_to_ta(info->head.block),
1480 		.inject_error_type = amdgpu_ras_error_to_ta(info->head.type),
1481 		.sub_block_index = info->head.sub_block_index,
1482 		.address = info->address,
1483 		.value = info->value,
1484 	};
1485 	int ret = -EINVAL;
1486 	struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev,
1487 							info->head.block,
1488 							info->head.sub_block_index);
1489 
1490 	/* inject on guest isn't allowed, return success directly */
1491 	if (amdgpu_sriov_vf(adev))
1492 		return 0;
1493 
1494 	if (!obj)
1495 		return -EINVAL;
1496 
1497 	if (!block_obj || !block_obj->hw_ops)	{
1498 		dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1499 			     get_ras_block_str(&info->head));
1500 		return -EINVAL;
1501 	}
1502 
1503 	/* Calculate XGMI relative offset */
1504 	if (adev->gmc.xgmi.num_physical_nodes > 1 &&
1505 	    info->head.block != AMDGPU_RAS_BLOCK__GFX) {
1506 		block_info.address =
1507 			amdgpu_xgmi_get_relative_phy_addr(adev,
1508 							  block_info.address);
1509 	}
1510 
1511 	if (block_obj->hw_ops->ras_error_inject) {
1512 		if (info->head.block == AMDGPU_RAS_BLOCK__GFX)
1513 			ret = block_obj->hw_ops->ras_error_inject(adev, info, info->instance_mask);
1514 		else /* Special ras_error_inject is defined (e.g: xgmi) */
1515 			ret = block_obj->hw_ops->ras_error_inject(adev, &block_info,
1516 						info->instance_mask);
1517 	} else {
1518 		/* default path */
1519 		ret = psp_ras_trigger_error(&adev->psp, &block_info, info->instance_mask);
1520 	}
1521 
1522 	if (ret)
1523 		dev_err(adev->dev, "ras inject %s failed %d\n",
1524 			get_ras_block_str(&info->head), ret);
1525 
1526 	return ret;
1527 }
1528 
1529 /**
1530  * amdgpu_ras_query_error_count_helper -- Get error counter for specific IP
1531  * @adev: pointer to AMD GPU device
1532  * @ce_count: pointer to an integer to be set to the count of correctible errors.
1533  * @ue_count: pointer to an integer to be set to the count of uncorrectible errors.
1534  * @query_info: pointer to ras_query_if
1535  *
1536  * Return 0 for query success or do nothing, otherwise return an error
1537  * on failures
1538  */
1539 static int amdgpu_ras_query_error_count_helper(struct amdgpu_device *adev,
1540 					       unsigned long *ce_count,
1541 					       unsigned long *ue_count,
1542 					       struct ras_query_if *query_info)
1543 {
1544 	int ret;
1545 
1546 	if (!query_info)
1547 		/* do nothing if query_info is not specified */
1548 		return 0;
1549 
1550 	ret = amdgpu_ras_query_error_status(adev, query_info);
1551 	if (ret)
1552 		return ret;
1553 
1554 	*ce_count += query_info->ce_count;
1555 	*ue_count += query_info->ue_count;
1556 
1557 	/* some hardware/IP supports read to clear
1558 	 * no need to explictly reset the err status after the query call */
1559 	if (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
1560 	    amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {
1561 		if (amdgpu_ras_reset_error_status(adev, query_info->head.block))
1562 			dev_warn(adev->dev,
1563 				 "Failed to reset error counter and error status\n");
1564 	}
1565 
1566 	return 0;
1567 }
1568 
1569 /**
1570  * amdgpu_ras_query_error_count -- Get error counts of all IPs or specific IP
1571  * @adev: pointer to AMD GPU device
1572  * @ce_count: pointer to an integer to be set to the count of correctible errors.
1573  * @ue_count: pointer to an integer to be set to the count of uncorrectible
1574  * errors.
1575  * @query_info: pointer to ras_query_if if the query request is only for
1576  * specific ip block; if info is NULL, then the qurey request is for
1577  * all the ip blocks that support query ras error counters/status
1578  *
1579  * If set, @ce_count or @ue_count, count and return the corresponding
1580  * error counts in those integer pointers. Return 0 if the device
1581  * supports RAS. Return -EOPNOTSUPP if the device doesn't support RAS.
1582  */
1583 int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
1584 				 unsigned long *ce_count,
1585 				 unsigned long *ue_count,
1586 				 struct ras_query_if *query_info)
1587 {
1588 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1589 	struct ras_manager *obj;
1590 	unsigned long ce, ue;
1591 	int ret;
1592 
1593 	if (!adev->ras_enabled || !con)
1594 		return -EOPNOTSUPP;
1595 
1596 	/* Don't count since no reporting.
1597 	 */
1598 	if (!ce_count && !ue_count)
1599 		return 0;
1600 
1601 	ce = 0;
1602 	ue = 0;
1603 	if (!query_info) {
1604 		/* query all the ip blocks that support ras query interface */
1605 		list_for_each_entry(obj, &con->head, node) {
1606 			struct ras_query_if info = {
1607 				.head = obj->head,
1608 			};
1609 
1610 			ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, &info);
1611 		}
1612 	} else {
1613 		/* query specific ip block */
1614 		ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, query_info);
1615 	}
1616 
1617 	if (ret)
1618 		return ret;
1619 
1620 	if (ce_count)
1621 		*ce_count = ce;
1622 
1623 	if (ue_count)
1624 		*ue_count = ue;
1625 
1626 	return 0;
1627 }
1628 /* query/inject/cure end */
1629 
1630 
1631 /* sysfs begin */
1632 
1633 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
1634 		struct ras_badpage **bps, unsigned int *count);
1635 
1636 static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
1637 {
1638 	switch (flags) {
1639 	case AMDGPU_RAS_RETIRE_PAGE_RESERVED:
1640 		return "R";
1641 	case AMDGPU_RAS_RETIRE_PAGE_PENDING:
1642 		return "P";
1643 	case AMDGPU_RAS_RETIRE_PAGE_FAULT:
1644 	default:
1645 		return "F";
1646 	}
1647 }
1648 
1649 /**
1650  * DOC: AMDGPU RAS sysfs gpu_vram_bad_pages Interface
1651  *
1652  * It allows user to read the bad pages of vram on the gpu through
1653  * /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages
1654  *
1655  * It outputs multiple lines, and each line stands for one gpu page.
1656  *
1657  * The format of one line is below,
1658  * gpu pfn : gpu page size : flags
1659  *
1660  * gpu pfn and gpu page size are printed in hex format.
1661  * flags can be one of below character,
1662  *
1663  * R: reserved, this gpu page is reserved and not able to use.
1664  *
1665  * P: pending for reserve, this gpu page is marked as bad, will be reserved
1666  * in next window of page_reserve.
1667  *
1668  * F: unable to reserve. this gpu page can't be reserved due to some reasons.
1669  *
1670  * Examples:
1671  *
1672  * .. code-block:: bash
1673  *
1674  *	0x00000001 : 0x00001000 : R
1675  *	0x00000002 : 0x00001000 : P
1676  *
1677  */
1678 
1679 static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f,
1680 		struct kobject *kobj, struct bin_attribute *attr,
1681 		char *buf, loff_t ppos, size_t count)
1682 {
1683 	struct amdgpu_ras *con =
1684 		container_of(attr, struct amdgpu_ras, badpages_attr);
1685 	struct amdgpu_device *adev = con->adev;
1686 	const unsigned int element_size =
1687 		sizeof("0xabcdabcd : 0x12345678 : R\n") - 1;
1688 	unsigned int start = div64_ul(ppos + element_size - 1, element_size);
1689 	unsigned int end = div64_ul(ppos + count - 1, element_size);
1690 	ssize_t s = 0;
1691 	struct ras_badpage *bps = NULL;
1692 	unsigned int bps_count = 0;
1693 
1694 	memset(buf, 0, count);
1695 
1696 	if (amdgpu_ras_badpages_read(adev, &bps, &bps_count))
1697 		return 0;
1698 
1699 	for (; start < end && start < bps_count; start++)
1700 		s += scnprintf(&buf[s], element_size + 1,
1701 				"0x%08x : 0x%08x : %1s\n",
1702 				bps[start].bp,
1703 				bps[start].size,
1704 				amdgpu_ras_badpage_flags_str(bps[start].flags));
1705 
1706 	kfree(bps);
1707 
1708 	return s;
1709 }
1710 
1711 static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev,
1712 		struct device_attribute *attr, char *buf)
1713 {
1714 	struct amdgpu_ras *con =
1715 		container_of(attr, struct amdgpu_ras, features_attr);
1716 
1717 	return sysfs_emit(buf, "feature mask: 0x%x\n", con->features);
1718 }
1719 
1720 static ssize_t amdgpu_ras_sysfs_version_show(struct device *dev,
1721 		struct device_attribute *attr, char *buf)
1722 {
1723 	struct amdgpu_ras *con =
1724 		container_of(attr, struct amdgpu_ras, version_attr);
1725 	return sysfs_emit(buf, "table version: 0x%x\n", con->eeprom_control.tbl_hdr.version);
1726 }
1727 
1728 static ssize_t amdgpu_ras_sysfs_schema_show(struct device *dev,
1729 		struct device_attribute *attr, char *buf)
1730 {
1731 	struct amdgpu_ras *con =
1732 		container_of(attr, struct amdgpu_ras, schema_attr);
1733 	return sysfs_emit(buf, "schema: 0x%x\n", con->schema);
1734 }
1735 
1736 static struct {
1737 	enum ras_event_type type;
1738 	const char *name;
1739 } dump_event[] = {
1740 	{RAS_EVENT_TYPE_FATAL, "Fatal Error"},
1741 	{RAS_EVENT_TYPE_POISON_CREATION, "Poison Creation"},
1742 	{RAS_EVENT_TYPE_POISON_CONSUMPTION, "Poison Consumption"},
1743 };
1744 
1745 static ssize_t amdgpu_ras_sysfs_event_state_show(struct device *dev,
1746 						 struct device_attribute *attr, char *buf)
1747 {
1748 	struct amdgpu_ras *con =
1749 		container_of(attr, struct amdgpu_ras, event_state_attr);
1750 	struct ras_event_manager *event_mgr = con->event_mgr;
1751 	struct ras_event_state *event_state;
1752 	int i, size = 0;
1753 
1754 	if (!event_mgr)
1755 		return -EINVAL;
1756 
1757 	size += sysfs_emit_at(buf, size, "current seqno: %llu\n", atomic64_read(&event_mgr->seqno));
1758 	for (i = 0; i < ARRAY_SIZE(dump_event); i++) {
1759 		event_state = &event_mgr->event_state[dump_event[i].type];
1760 		size += sysfs_emit_at(buf, size, "%s: count:%llu, last_seqno:%llu\n",
1761 				      dump_event[i].name,
1762 				      atomic64_read(&event_state->count),
1763 				      event_state->last_seqno);
1764 	}
1765 
1766 	return (ssize_t)size;
1767 }
1768 
1769 static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev)
1770 {
1771 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1772 
1773 	if (adev->dev->kobj.sd)
1774 		sysfs_remove_file_from_group(&adev->dev->kobj,
1775 				&con->badpages_attr.attr,
1776 				RAS_FS_NAME);
1777 }
1778 
1779 static int amdgpu_ras_sysfs_remove_dev_attr_node(struct amdgpu_device *adev)
1780 {
1781 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1782 	struct attribute *attrs[] = {
1783 		&con->features_attr.attr,
1784 		&con->version_attr.attr,
1785 		&con->schema_attr.attr,
1786 		&con->event_state_attr.attr,
1787 		NULL
1788 	};
1789 	struct attribute_group group = {
1790 		.name = RAS_FS_NAME,
1791 		.attrs = attrs,
1792 	};
1793 
1794 	if (adev->dev->kobj.sd)
1795 		sysfs_remove_group(&adev->dev->kobj, &group);
1796 
1797 	return 0;
1798 }
1799 
1800 int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
1801 		struct ras_common_if *head)
1802 {
1803 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1804 
1805 	if (amdgpu_aca_is_enabled(adev))
1806 		return 0;
1807 
1808 	if (!obj || obj->attr_inuse)
1809 		return -EINVAL;
1810 
1811 	get_obj(obj);
1812 
1813 	snprintf(obj->fs_data.sysfs_name, sizeof(obj->fs_data.sysfs_name),
1814 		"%s_err_count", head->name);
1815 
1816 	obj->sysfs_attr = (struct device_attribute){
1817 		.attr = {
1818 			.name = obj->fs_data.sysfs_name,
1819 			.mode = S_IRUGO,
1820 		},
1821 			.show = amdgpu_ras_sysfs_read,
1822 	};
1823 	sysfs_attr_init(&obj->sysfs_attr.attr);
1824 
1825 	if (sysfs_add_file_to_group(&adev->dev->kobj,
1826 				&obj->sysfs_attr.attr,
1827 				RAS_FS_NAME)) {
1828 		put_obj(obj);
1829 		return -EINVAL;
1830 	}
1831 
1832 	obj->attr_inuse = 1;
1833 
1834 	return 0;
1835 }
1836 
1837 int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
1838 		struct ras_common_if *head)
1839 {
1840 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1841 
1842 	if (amdgpu_aca_is_enabled(adev))
1843 		return 0;
1844 
1845 	if (!obj || !obj->attr_inuse)
1846 		return -EINVAL;
1847 
1848 	if (adev->dev->kobj.sd)
1849 		sysfs_remove_file_from_group(&adev->dev->kobj,
1850 				&obj->sysfs_attr.attr,
1851 				RAS_FS_NAME);
1852 	obj->attr_inuse = 0;
1853 	put_obj(obj);
1854 
1855 	return 0;
1856 }
1857 
1858 static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev)
1859 {
1860 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1861 	struct ras_manager *obj, *tmp;
1862 
1863 	list_for_each_entry_safe(obj, tmp, &con->head, node) {
1864 		amdgpu_ras_sysfs_remove(adev, &obj->head);
1865 	}
1866 
1867 	if (amdgpu_bad_page_threshold != 0)
1868 		amdgpu_ras_sysfs_remove_bad_page_node(adev);
1869 
1870 	amdgpu_ras_sysfs_remove_dev_attr_node(adev);
1871 
1872 	return 0;
1873 }
1874 /* sysfs end */
1875 
1876 /**
1877  * DOC: AMDGPU RAS Reboot Behavior for Unrecoverable Errors
1878  *
1879  * Normally when there is an uncorrectable error, the driver will reset
1880  * the GPU to recover.  However, in the event of an unrecoverable error,
1881  * the driver provides an interface to reboot the system automatically
1882  * in that event.
1883  *
1884  * The following file in debugfs provides that interface:
1885  * /sys/kernel/debug/dri/[0/1/2...]/ras/auto_reboot
1886  *
1887  * Usage:
1888  *
1889  * .. code-block:: bash
1890  *
1891  *	echo true > .../ras/auto_reboot
1892  *
1893  */
1894 /* debugfs begin */
1895 static struct dentry *amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
1896 {
1897 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1898 	struct amdgpu_ras_eeprom_control *eeprom = &con->eeprom_control;
1899 	struct drm_minor  *minor = adev_to_drm(adev)->primary;
1900 	struct dentry     *dir;
1901 
1902 	dir = debugfs_create_dir(RAS_FS_NAME, minor->debugfs_root);
1903 	debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, dir, adev,
1904 			    &amdgpu_ras_debugfs_ctrl_ops);
1905 	debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, dir, adev,
1906 			    &amdgpu_ras_debugfs_eeprom_ops);
1907 	debugfs_create_u32("bad_page_cnt_threshold", 0444, dir,
1908 			   &con->bad_page_cnt_threshold);
1909 	debugfs_create_u32("ras_num_recs", 0444, dir, &eeprom->ras_num_recs);
1910 	debugfs_create_x32("ras_hw_enabled", 0444, dir, &adev->ras_hw_enabled);
1911 	debugfs_create_x32("ras_enabled", 0444, dir, &adev->ras_enabled);
1912 	debugfs_create_file("ras_eeprom_size", S_IRUGO, dir, adev,
1913 			    &amdgpu_ras_debugfs_eeprom_size_ops);
1914 	con->de_ras_eeprom_table = debugfs_create_file("ras_eeprom_table",
1915 						       S_IRUGO, dir, adev,
1916 						       &amdgpu_ras_debugfs_eeprom_table_ops);
1917 	amdgpu_ras_debugfs_set_ret_size(&con->eeprom_control);
1918 
1919 	/*
1920 	 * After one uncorrectable error happens, usually GPU recovery will
1921 	 * be scheduled. But due to the known problem in GPU recovery failing
1922 	 * to bring GPU back, below interface provides one direct way to
1923 	 * user to reboot system automatically in such case within
1924 	 * ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine
1925 	 * will never be called.
1926 	 */
1927 	debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, dir, &con->reboot);
1928 
1929 	/*
1930 	 * User could set this not to clean up hardware's error count register
1931 	 * of RAS IPs during ras recovery.
1932 	 */
1933 	debugfs_create_bool("disable_ras_err_cnt_harvest", 0644, dir,
1934 			    &con->disable_ras_err_cnt_harvest);
1935 	return dir;
1936 }
1937 
1938 static void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
1939 				      struct ras_fs_if *head,
1940 				      struct dentry *dir)
1941 {
1942 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);
1943 
1944 	if (!obj || !dir)
1945 		return;
1946 
1947 	get_obj(obj);
1948 
1949 	memcpy(obj->fs_data.debugfs_name,
1950 			head->debugfs_name,
1951 			sizeof(obj->fs_data.debugfs_name));
1952 
1953 	debugfs_create_file(obj->fs_data.debugfs_name, S_IWUGO | S_IRUGO, dir,
1954 			    obj, &amdgpu_ras_debugfs_ops);
1955 }
1956 
1957 static bool amdgpu_ras_aca_is_supported(struct amdgpu_device *adev)
1958 {
1959 	bool ret;
1960 
1961 	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
1962 	case IP_VERSION(13, 0, 6):
1963 	case IP_VERSION(13, 0, 14):
1964 		ret = true;
1965 		break;
1966 	default:
1967 		ret = false;
1968 		break;
1969 	}
1970 
1971 	return ret;
1972 }
1973 
1974 void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
1975 {
1976 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1977 	struct dentry *dir;
1978 	struct ras_manager *obj;
1979 	struct ras_fs_if fs_info;
1980 
1981 	/*
1982 	 * it won't be called in resume path, no need to check
1983 	 * suspend and gpu reset status
1984 	 */
1985 	if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con)
1986 		return;
1987 
1988 	dir = amdgpu_ras_debugfs_create_ctrl_node(adev);
1989 
1990 	list_for_each_entry(obj, &con->head, node) {
1991 		if (amdgpu_ras_is_supported(adev, obj->head.block) &&
1992 			(obj->attr_inuse == 1)) {
1993 			sprintf(fs_info.debugfs_name, "%s_err_inject",
1994 					get_ras_block_str(&obj->head));
1995 			fs_info.head = obj->head;
1996 			amdgpu_ras_debugfs_create(adev, &fs_info, dir);
1997 		}
1998 	}
1999 
2000 	if (amdgpu_ras_aca_is_supported(adev)) {
2001 		if (amdgpu_aca_is_enabled(adev))
2002 			amdgpu_aca_smu_debugfs_init(adev, dir);
2003 		else
2004 			amdgpu_mca_smu_debugfs_init(adev, dir);
2005 	}
2006 }
2007 
2008 /* debugfs end */
2009 
2010 /* ras fs */
2011 static BIN_ATTR(gpu_vram_bad_pages, S_IRUGO,
2012 		amdgpu_ras_sysfs_badpages_read, NULL, 0);
2013 static DEVICE_ATTR(features, S_IRUGO,
2014 		amdgpu_ras_sysfs_features_read, NULL);
2015 static DEVICE_ATTR(version, 0444,
2016 		amdgpu_ras_sysfs_version_show, NULL);
2017 static DEVICE_ATTR(schema, 0444,
2018 		amdgpu_ras_sysfs_schema_show, NULL);
2019 static DEVICE_ATTR(event_state, 0444,
2020 		   amdgpu_ras_sysfs_event_state_show, NULL);
2021 static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
2022 {
2023 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2024 	struct attribute_group group = {
2025 		.name = RAS_FS_NAME,
2026 	};
2027 	struct attribute *attrs[] = {
2028 		&con->features_attr.attr,
2029 		&con->version_attr.attr,
2030 		&con->schema_attr.attr,
2031 		&con->event_state_attr.attr,
2032 		NULL
2033 	};
2034 	struct bin_attribute *bin_attrs[] = {
2035 		NULL,
2036 		NULL,
2037 	};
2038 	int r;
2039 
2040 	group.attrs = attrs;
2041 
2042 	/* add features entry */
2043 	con->features_attr = dev_attr_features;
2044 	sysfs_attr_init(attrs[0]);
2045 
2046 	/* add version entry */
2047 	con->version_attr = dev_attr_version;
2048 	sysfs_attr_init(attrs[1]);
2049 
2050 	/* add schema entry */
2051 	con->schema_attr = dev_attr_schema;
2052 	sysfs_attr_init(attrs[2]);
2053 
2054 	/* add event_state entry */
2055 	con->event_state_attr = dev_attr_event_state;
2056 	sysfs_attr_init(attrs[3]);
2057 
2058 	if (amdgpu_bad_page_threshold != 0) {
2059 		/* add bad_page_features entry */
2060 		bin_attr_gpu_vram_bad_pages.private = NULL;
2061 		con->badpages_attr = bin_attr_gpu_vram_bad_pages;
2062 		bin_attrs[0] = &con->badpages_attr;
2063 		group.bin_attrs = bin_attrs;
2064 		sysfs_bin_attr_init(bin_attrs[0]);
2065 	}
2066 
2067 	r = sysfs_create_group(&adev->dev->kobj, &group);
2068 	if (r)
2069 		dev_err(adev->dev, "Failed to create RAS sysfs group!");
2070 
2071 	return 0;
2072 }
2073 
2074 static int amdgpu_ras_fs_fini(struct amdgpu_device *adev)
2075 {
2076 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2077 	struct ras_manager *con_obj, *ip_obj, *tmp;
2078 
2079 	if (IS_ENABLED(CONFIG_DEBUG_FS)) {
2080 		list_for_each_entry_safe(con_obj, tmp, &con->head, node) {
2081 			ip_obj = amdgpu_ras_find_obj(adev, &con_obj->head);
2082 			if (ip_obj)
2083 				put_obj(ip_obj);
2084 		}
2085 	}
2086 
2087 	amdgpu_ras_sysfs_remove_all(adev);
2088 	return 0;
2089 }
2090 /* ras fs end */
2091 
2092 /* ih begin */
2093 
2094 /* For the hardware that cannot enable bif ring for both ras_controller_irq
2095  * and ras_err_evnet_athub_irq ih cookies, the driver has to poll status
2096  * register to check whether the interrupt is triggered or not, and properly
2097  * ack the interrupt if it is there
2098  */
2099 void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev)
2100 {
2101 	/* Fatal error events are handled on host side */
2102 	if (amdgpu_sriov_vf(adev))
2103 		return;
2104 
2105 	if (adev->nbio.ras &&
2106 	    adev->nbio.ras->handle_ras_controller_intr_no_bifring)
2107 		adev->nbio.ras->handle_ras_controller_intr_no_bifring(adev);
2108 
2109 	if (adev->nbio.ras &&
2110 	    adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring)
2111 		adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring(adev);
2112 }
2113 
2114 static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *obj,
2115 				struct amdgpu_iv_entry *entry)
2116 {
2117 	bool poison_stat = false;
2118 	struct amdgpu_device *adev = obj->adev;
2119 	struct amdgpu_ras_block_object *block_obj =
2120 		amdgpu_ras_get_ras_block(adev, obj->head.block, 0);
2121 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2122 	enum ras_event_type type = RAS_EVENT_TYPE_POISON_CONSUMPTION;
2123 	u64 event_id;
2124 	int ret;
2125 
2126 	if (!block_obj || !con)
2127 		return;
2128 
2129 	ret = amdgpu_ras_mark_ras_event(adev, type);
2130 	if (ret)
2131 		return;
2132 
2133 	/* both query_poison_status and handle_poison_consumption are optional,
2134 	 * but at least one of them should be implemented if we need poison
2135 	 * consumption handler
2136 	 */
2137 	if (block_obj->hw_ops && block_obj->hw_ops->query_poison_status) {
2138 		poison_stat = block_obj->hw_ops->query_poison_status(adev);
2139 		if (!poison_stat) {
2140 			/* Not poison consumption interrupt, no need to handle it */
2141 			dev_info(adev->dev, "No RAS poison status in %s poison IH.\n",
2142 					block_obj->ras_comm.name);
2143 
2144 			return;
2145 		}
2146 	}
2147 
2148 	amdgpu_umc_poison_handler(adev, obj->head.block, 0);
2149 
2150 	if (block_obj->hw_ops && block_obj->hw_ops->handle_poison_consumption)
2151 		poison_stat = block_obj->hw_ops->handle_poison_consumption(adev);
2152 
2153 	/* gpu reset is fallback for failed and default cases.
2154 	 * For RMA case, amdgpu_umc_poison_handler will handle gpu reset.
2155 	 */
2156 	if (poison_stat && !amdgpu_ras_is_rma(adev)) {
2157 		event_id = amdgpu_ras_acquire_event_id(adev, type);
2158 		RAS_EVENT_LOG(adev, event_id,
2159 			      "GPU reset for %s RAS poison consumption is issued!\n",
2160 			      block_obj->ras_comm.name);
2161 		amdgpu_ras_reset_gpu(adev);
2162 	}
2163 
2164 	if (!poison_stat)
2165 		amdgpu_gfx_poison_consumption_handler(adev, entry);
2166 }
2167 
2168 static void amdgpu_ras_interrupt_poison_creation_handler(struct ras_manager *obj,
2169 				struct amdgpu_iv_entry *entry)
2170 {
2171 	struct amdgpu_device *adev = obj->adev;
2172 	enum ras_event_type type = RAS_EVENT_TYPE_POISON_CREATION;
2173 	u64 event_id;
2174 	int ret;
2175 
2176 	ret = amdgpu_ras_mark_ras_event(adev, type);
2177 	if (ret)
2178 		return;
2179 
2180 	event_id = amdgpu_ras_acquire_event_id(adev, type);
2181 	RAS_EVENT_LOG(adev, event_id, "Poison is created\n");
2182 
2183 	if (amdgpu_ip_version(obj->adev, UMC_HWIP, 0) >= IP_VERSION(12, 0, 0)) {
2184 		struct amdgpu_ras *con = amdgpu_ras_get_context(obj->adev);
2185 
2186 		atomic_inc(&con->page_retirement_req_cnt);
2187 		atomic_inc(&con->poison_creation_count);
2188 
2189 		wake_up(&con->page_retirement_wq);
2190 	}
2191 }
2192 
2193 static void amdgpu_ras_interrupt_umc_handler(struct ras_manager *obj,
2194 				struct amdgpu_iv_entry *entry)
2195 {
2196 	struct ras_ih_data *data = &obj->ih_data;
2197 	struct ras_err_data err_data;
2198 	int ret;
2199 
2200 	if (!data->cb)
2201 		return;
2202 
2203 	ret = amdgpu_ras_error_data_init(&err_data);
2204 	if (ret)
2205 		return;
2206 
2207 	/* Let IP handle its data, maybe we need get the output
2208 	 * from the callback to update the error type/count, etc
2209 	 */
2210 	amdgpu_ras_set_fed(obj->adev, true);
2211 	ret = data->cb(obj->adev, &err_data, entry);
2212 	/* ue will trigger an interrupt, and in that case
2213 	 * we need do a reset to recovery the whole system.
2214 	 * But leave IP do that recovery, here we just dispatch
2215 	 * the error.
2216 	 */
2217 	if (ret == AMDGPU_RAS_SUCCESS) {
2218 		/* these counts could be left as 0 if
2219 		 * some blocks do not count error number
2220 		 */
2221 		obj->err_data.ue_count += err_data.ue_count;
2222 		obj->err_data.ce_count += err_data.ce_count;
2223 		obj->err_data.de_count += err_data.de_count;
2224 	}
2225 
2226 	amdgpu_ras_error_data_fini(&err_data);
2227 }
2228 
2229 static void amdgpu_ras_interrupt_handler(struct ras_manager *obj)
2230 {
2231 	struct ras_ih_data *data = &obj->ih_data;
2232 	struct amdgpu_iv_entry entry;
2233 
2234 	while (data->rptr != data->wptr) {
2235 		rmb();
2236 		memcpy(&entry, &data->ring[data->rptr],
2237 				data->element_size);
2238 
2239 		wmb();
2240 		data->rptr = (data->aligned_element_size +
2241 				data->rptr) % data->ring_size;
2242 
2243 		if (amdgpu_ras_is_poison_mode_supported(obj->adev)) {
2244 			if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
2245 				amdgpu_ras_interrupt_poison_creation_handler(obj, &entry);
2246 			else
2247 				amdgpu_ras_interrupt_poison_consumption_handler(obj, &entry);
2248 		} else {
2249 			if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
2250 				amdgpu_ras_interrupt_umc_handler(obj, &entry);
2251 			else
2252 				dev_warn(obj->adev->dev,
2253 					"No RAS interrupt handler for non-UMC block with poison disabled.\n");
2254 		}
2255 	}
2256 }
2257 
2258 static void amdgpu_ras_interrupt_process_handler(struct work_struct *work)
2259 {
2260 	struct ras_ih_data *data =
2261 		container_of(work, struct ras_ih_data, ih_work);
2262 	struct ras_manager *obj =
2263 		container_of(data, struct ras_manager, ih_data);
2264 
2265 	amdgpu_ras_interrupt_handler(obj);
2266 }
2267 
2268 int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
2269 		struct ras_dispatch_if *info)
2270 {
2271 	struct ras_manager *obj;
2272 	struct ras_ih_data *data;
2273 
2274 	obj = amdgpu_ras_find_obj(adev, &info->head);
2275 	if (!obj)
2276 		return -EINVAL;
2277 
2278 	data = &obj->ih_data;
2279 
2280 	if (data->inuse == 0)
2281 		return 0;
2282 
2283 	/* Might be overflow... */
2284 	memcpy(&data->ring[data->wptr], info->entry,
2285 			data->element_size);
2286 
2287 	wmb();
2288 	data->wptr = (data->aligned_element_size +
2289 			data->wptr) % data->ring_size;
2290 
2291 	schedule_work(&data->ih_work);
2292 
2293 	return 0;
2294 }
2295 
2296 int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev,
2297 		struct ras_common_if *head)
2298 {
2299 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
2300 	struct ras_ih_data *data;
2301 
2302 	if (!obj)
2303 		return -EINVAL;
2304 
2305 	data = &obj->ih_data;
2306 	if (data->inuse == 0)
2307 		return 0;
2308 
2309 	cancel_work_sync(&data->ih_work);
2310 
2311 	kfree(data->ring);
2312 	memset(data, 0, sizeof(*data));
2313 	put_obj(obj);
2314 
2315 	return 0;
2316 }
2317 
2318 int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev,
2319 		struct ras_common_if *head)
2320 {
2321 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
2322 	struct ras_ih_data *data;
2323 	struct amdgpu_ras_block_object *ras_obj;
2324 
2325 	if (!obj) {
2326 		/* in case we registe the IH before enable ras feature */
2327 		obj = amdgpu_ras_create_obj(adev, head);
2328 		if (!obj)
2329 			return -EINVAL;
2330 	} else
2331 		get_obj(obj);
2332 
2333 	ras_obj = container_of(head, struct amdgpu_ras_block_object, ras_comm);
2334 
2335 	data = &obj->ih_data;
2336 	/* add the callback.etc */
2337 	*data = (struct ras_ih_data) {
2338 		.inuse = 0,
2339 		.cb = ras_obj->ras_cb,
2340 		.element_size = sizeof(struct amdgpu_iv_entry),
2341 		.rptr = 0,
2342 		.wptr = 0,
2343 	};
2344 
2345 	INIT_WORK(&data->ih_work, amdgpu_ras_interrupt_process_handler);
2346 
2347 	data->aligned_element_size = ALIGN(data->element_size, 8);
2348 	/* the ring can store 64 iv entries. */
2349 	data->ring_size = 64 * data->aligned_element_size;
2350 	data->ring = kmalloc(data->ring_size, GFP_KERNEL);
2351 	if (!data->ring) {
2352 		put_obj(obj);
2353 		return -ENOMEM;
2354 	}
2355 
2356 	/* IH is ready */
2357 	data->inuse = 1;
2358 
2359 	return 0;
2360 }
2361 
2362 static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev)
2363 {
2364 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2365 	struct ras_manager *obj, *tmp;
2366 
2367 	list_for_each_entry_safe(obj, tmp, &con->head, node) {
2368 		amdgpu_ras_interrupt_remove_handler(adev, &obj->head);
2369 	}
2370 
2371 	return 0;
2372 }
2373 /* ih end */
2374 
2375 /* traversal all IPs except NBIO to query error counter */
2376 static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev, enum ras_event_type type)
2377 {
2378 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2379 	struct ras_manager *obj;
2380 
2381 	if (!adev->ras_enabled || !con)
2382 		return;
2383 
2384 	list_for_each_entry(obj, &con->head, node) {
2385 		struct ras_query_if info = {
2386 			.head = obj->head,
2387 		};
2388 
2389 		/*
2390 		 * PCIE_BIF IP has one different isr by ras controller
2391 		 * interrupt, the specific ras counter query will be
2392 		 * done in that isr. So skip such block from common
2393 		 * sync flood interrupt isr calling.
2394 		 */
2395 		if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF)
2396 			continue;
2397 
2398 		/*
2399 		 * this is a workaround for aldebaran, skip send msg to
2400 		 * smu to get ecc_info table due to smu handle get ecc
2401 		 * info table failed temporarily.
2402 		 * should be removed until smu fix handle ecc_info table.
2403 		 */
2404 		if ((info.head.block == AMDGPU_RAS_BLOCK__UMC) &&
2405 		    (amdgpu_ip_version(adev, MP1_HWIP, 0) ==
2406 		     IP_VERSION(13, 0, 2)))
2407 			continue;
2408 
2409 		amdgpu_ras_query_error_status_with_event(adev, &info, type);
2410 
2411 		if (amdgpu_ip_version(adev, MP0_HWIP, 0) !=
2412 			    IP_VERSION(11, 0, 2) &&
2413 		    amdgpu_ip_version(adev, MP0_HWIP, 0) !=
2414 			    IP_VERSION(11, 0, 4) &&
2415 		    amdgpu_ip_version(adev, MP0_HWIP, 0) !=
2416 			    IP_VERSION(13, 0, 0)) {
2417 			if (amdgpu_ras_reset_error_status(adev, info.head.block))
2418 				dev_warn(adev->dev, "Failed to reset error counter and error status");
2419 		}
2420 	}
2421 }
2422 
2423 /* Parse RdRspStatus and WrRspStatus */
2424 static void amdgpu_ras_error_status_query(struct amdgpu_device *adev,
2425 					  struct ras_query_if *info)
2426 {
2427 	struct amdgpu_ras_block_object *block_obj;
2428 	/*
2429 	 * Only two block need to query read/write
2430 	 * RspStatus at current state
2431 	 */
2432 	if ((info->head.block != AMDGPU_RAS_BLOCK__GFX) &&
2433 		(info->head.block != AMDGPU_RAS_BLOCK__MMHUB))
2434 		return;
2435 
2436 	block_obj = amdgpu_ras_get_ras_block(adev,
2437 					info->head.block,
2438 					info->head.sub_block_index);
2439 
2440 	if (!block_obj || !block_obj->hw_ops) {
2441 		dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
2442 			     get_ras_block_str(&info->head));
2443 		return;
2444 	}
2445 
2446 	if (block_obj->hw_ops->query_ras_error_status)
2447 		block_obj->hw_ops->query_ras_error_status(adev);
2448 
2449 }
2450 
2451 static void amdgpu_ras_query_err_status(struct amdgpu_device *adev)
2452 {
2453 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2454 	struct ras_manager *obj;
2455 
2456 	if (!adev->ras_enabled || !con)
2457 		return;
2458 
2459 	list_for_each_entry(obj, &con->head, node) {
2460 		struct ras_query_if info = {
2461 			.head = obj->head,
2462 		};
2463 
2464 		amdgpu_ras_error_status_query(adev, &info);
2465 	}
2466 }
2467 
2468 /* recovery begin */
2469 
2470 /* return 0 on success.
2471  * caller need free bps.
2472  */
2473 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
2474 		struct ras_badpage **bps, unsigned int *count)
2475 {
2476 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2477 	struct ras_err_handler_data *data;
2478 	int i = 0;
2479 	int ret = 0, status;
2480 
2481 	if (!con || !con->eh_data || !bps || !count)
2482 		return -EINVAL;
2483 
2484 	mutex_lock(&con->recovery_lock);
2485 	data = con->eh_data;
2486 	if (!data || data->count == 0) {
2487 		*bps = NULL;
2488 		ret = -EINVAL;
2489 		goto out;
2490 	}
2491 
2492 	*bps = kmalloc(sizeof(struct ras_badpage) * data->count, GFP_KERNEL);
2493 	if (!*bps) {
2494 		ret = -ENOMEM;
2495 		goto out;
2496 	}
2497 
2498 	for (; i < data->count; i++) {
2499 		(*bps)[i] = (struct ras_badpage){
2500 			.bp = data->bps[i].retired_page,
2501 			.size = AMDGPU_GPU_PAGE_SIZE,
2502 			.flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
2503 		};
2504 		status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr,
2505 				data->bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT);
2506 		if (status == -EBUSY)
2507 			(*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
2508 		else if (status == -ENOENT)
2509 			(*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
2510 	}
2511 
2512 	*count = data->count;
2513 out:
2514 	mutex_unlock(&con->recovery_lock);
2515 	return ret;
2516 }
2517 
2518 static void amdgpu_ras_set_fed_all(struct amdgpu_device *adev,
2519 				   struct amdgpu_hive_info *hive, bool status)
2520 {
2521 	struct amdgpu_device *tmp_adev;
2522 
2523 	if (hive) {
2524 		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
2525 			amdgpu_ras_set_fed(tmp_adev, status);
2526 	} else {
2527 		amdgpu_ras_set_fed(adev, status);
2528 	}
2529 }
2530 
2531 bool amdgpu_ras_in_recovery(struct amdgpu_device *adev)
2532 {
2533 	struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2534 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
2535 	int hive_ras_recovery = 0;
2536 
2537 	if (hive) {
2538 		hive_ras_recovery = atomic_read(&hive->ras_recovery);
2539 		amdgpu_put_xgmi_hive(hive);
2540 	}
2541 
2542 	if (ras && (atomic_read(&ras->in_recovery) || hive_ras_recovery))
2543 		return true;
2544 
2545 	return false;
2546 }
2547 
2548 static enum ras_event_type amdgpu_ras_get_fatal_error_event(struct amdgpu_device *adev)
2549 {
2550 	if (amdgpu_ras_intr_triggered())
2551 		return RAS_EVENT_TYPE_FATAL;
2552 	else
2553 		return RAS_EVENT_TYPE_POISON_CONSUMPTION;
2554 }
2555 
2556 static void amdgpu_ras_do_recovery(struct work_struct *work)
2557 {
2558 	struct amdgpu_ras *ras =
2559 		container_of(work, struct amdgpu_ras, recovery_work);
2560 	struct amdgpu_device *remote_adev = NULL;
2561 	struct amdgpu_device *adev = ras->adev;
2562 	struct list_head device_list, *device_list_handle =  NULL;
2563 	struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2564 	enum ras_event_type type;
2565 
2566 	if (hive) {
2567 		atomic_set(&hive->ras_recovery, 1);
2568 
2569 		/* If any device which is part of the hive received RAS fatal
2570 		 * error interrupt, set fatal error status on all. This
2571 		 * condition will need a recovery, and flag will be cleared
2572 		 * as part of recovery.
2573 		 */
2574 		list_for_each_entry(remote_adev, &hive->device_list,
2575 				    gmc.xgmi.head)
2576 			if (amdgpu_ras_get_fed_status(remote_adev)) {
2577 				amdgpu_ras_set_fed_all(adev, hive, true);
2578 				break;
2579 			}
2580 	}
2581 	if (!ras->disable_ras_err_cnt_harvest) {
2582 
2583 		/* Build list of devices to query RAS related errors */
2584 		if  (hive && adev->gmc.xgmi.num_physical_nodes > 1) {
2585 			device_list_handle = &hive->device_list;
2586 		} else {
2587 			INIT_LIST_HEAD(&device_list);
2588 			list_add_tail(&adev->gmc.xgmi.head, &device_list);
2589 			device_list_handle = &device_list;
2590 		}
2591 
2592 		type = amdgpu_ras_get_fatal_error_event(adev);
2593 		list_for_each_entry(remote_adev,
2594 				device_list_handle, gmc.xgmi.head) {
2595 			amdgpu_ras_query_err_status(remote_adev);
2596 			amdgpu_ras_log_on_err_counter(remote_adev, type);
2597 		}
2598 
2599 	}
2600 
2601 	if (amdgpu_device_should_recover_gpu(ras->adev)) {
2602 		struct amdgpu_reset_context reset_context;
2603 		memset(&reset_context, 0, sizeof(reset_context));
2604 
2605 		reset_context.method = AMD_RESET_METHOD_NONE;
2606 		reset_context.reset_req_dev = adev;
2607 		reset_context.src = AMDGPU_RESET_SRC_RAS;
2608 		set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);
2609 
2610 		/* Perform full reset in fatal error mode */
2611 		if (!amdgpu_ras_is_poison_mode_supported(ras->adev))
2612 			set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2613 		else {
2614 			clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2615 
2616 			if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE2_RESET) {
2617 				ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE2_RESET;
2618 				reset_context.method = AMD_RESET_METHOD_MODE2;
2619 			}
2620 
2621 			/* Fatal error occurs in poison mode, mode1 reset is used to
2622 			 * recover gpu.
2623 			 */
2624 			if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET) {
2625 				ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE1_RESET;
2626 				set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2627 
2628 				psp_fatal_error_recovery_quirk(&adev->psp);
2629 			}
2630 		}
2631 
2632 		amdgpu_device_gpu_recover(ras->adev, NULL, &reset_context);
2633 	}
2634 	atomic_set(&ras->in_recovery, 0);
2635 	if (hive) {
2636 		atomic_set(&hive->ras_recovery, 0);
2637 		amdgpu_put_xgmi_hive(hive);
2638 	}
2639 }
2640 
2641 /* alloc/realloc bps array */
2642 static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
2643 		struct ras_err_handler_data *data, int pages)
2644 {
2645 	unsigned int old_space = data->count + data->space_left;
2646 	unsigned int new_space = old_space + pages;
2647 	unsigned int align_space = ALIGN(new_space, 512);
2648 	void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
2649 
2650 	if (!bps) {
2651 		return -ENOMEM;
2652 	}
2653 
2654 	if (data->bps) {
2655 		memcpy(bps, data->bps,
2656 				data->count * sizeof(*data->bps));
2657 		kfree(data->bps);
2658 	}
2659 
2660 	data->bps = bps;
2661 	data->space_left += align_space - old_space;
2662 	return 0;
2663 }
2664 
2665 /* it deal with vram only. */
2666 int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
2667 		struct eeprom_table_record *bps, int pages)
2668 {
2669 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2670 	struct ras_err_handler_data *data;
2671 	int ret = 0;
2672 	uint32_t i;
2673 
2674 	if (!con || !con->eh_data || !bps || pages <= 0)
2675 		return 0;
2676 
2677 	mutex_lock(&con->recovery_lock);
2678 	data = con->eh_data;
2679 	if (!data)
2680 		goto out;
2681 
2682 	for (i = 0; i < pages; i++) {
2683 		if (amdgpu_ras_check_bad_page_unlock(con,
2684 			bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT))
2685 			continue;
2686 
2687 		if (!data->space_left &&
2688 			amdgpu_ras_realloc_eh_data_space(adev, data, 256)) {
2689 			ret = -ENOMEM;
2690 			goto out;
2691 		}
2692 
2693 		amdgpu_ras_reserve_page(adev, bps[i].retired_page);
2694 
2695 		memcpy(&data->bps[data->count], &bps[i], sizeof(*data->bps));
2696 		data->count++;
2697 		data->space_left--;
2698 	}
2699 out:
2700 	mutex_unlock(&con->recovery_lock);
2701 
2702 	return ret;
2703 }
2704 
2705 /*
2706  * write error record array to eeprom, the function should be
2707  * protected by recovery_lock
2708  * new_cnt: new added UE count, excluding reserved bad pages, can be NULL
2709  */
2710 int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
2711 		unsigned long *new_cnt)
2712 {
2713 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2714 	struct ras_err_handler_data *data;
2715 	struct amdgpu_ras_eeprom_control *control;
2716 	int save_count;
2717 
2718 	if (!con || !con->eh_data) {
2719 		if (new_cnt)
2720 			*new_cnt = 0;
2721 
2722 		return 0;
2723 	}
2724 
2725 	mutex_lock(&con->recovery_lock);
2726 	control = &con->eeprom_control;
2727 	data = con->eh_data;
2728 	save_count = data->count - control->ras_num_recs;
2729 	mutex_unlock(&con->recovery_lock);
2730 
2731 	if (new_cnt)
2732 		*new_cnt = save_count / adev->umc.retire_unit;
2733 
2734 	/* only new entries are saved */
2735 	if (save_count > 0) {
2736 		if (amdgpu_ras_eeprom_append(control,
2737 					     &data->bps[control->ras_num_recs],
2738 					     save_count)) {
2739 			dev_err(adev->dev, "Failed to save EEPROM table data!");
2740 			return -EIO;
2741 		}
2742 
2743 		dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count);
2744 	}
2745 
2746 	return 0;
2747 }
2748 
2749 /*
2750  * read error record array in eeprom and reserve enough space for
2751  * storing new bad pages
2752  */
2753 static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
2754 {
2755 	struct amdgpu_ras_eeprom_control *control =
2756 		&adev->psp.ras_context.ras->eeprom_control;
2757 	struct eeprom_table_record *bps;
2758 	int ret;
2759 
2760 	/* no bad page record, skip eeprom access */
2761 	if (control->ras_num_recs == 0 || amdgpu_bad_page_threshold == 0)
2762 		return 0;
2763 
2764 	bps = kcalloc(control->ras_num_recs, sizeof(*bps), GFP_KERNEL);
2765 	if (!bps)
2766 		return -ENOMEM;
2767 
2768 	ret = amdgpu_ras_eeprom_read(control, bps, control->ras_num_recs);
2769 	if (ret)
2770 		dev_err(adev->dev, "Failed to load EEPROM table records!");
2771 	else
2772 		ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs);
2773 
2774 	kfree(bps);
2775 	return ret;
2776 }
2777 
2778 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
2779 				uint64_t addr)
2780 {
2781 	struct ras_err_handler_data *data = con->eh_data;
2782 	int i;
2783 
2784 	addr >>= AMDGPU_GPU_PAGE_SHIFT;
2785 	for (i = 0; i < data->count; i++)
2786 		if (addr == data->bps[i].retired_page)
2787 			return true;
2788 
2789 	return false;
2790 }
2791 
2792 /*
2793  * check if an address belongs to bad page
2794  *
2795  * Note: this check is only for umc block
2796  */
2797 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
2798 				uint64_t addr)
2799 {
2800 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2801 	bool ret = false;
2802 
2803 	if (!con || !con->eh_data)
2804 		return ret;
2805 
2806 	mutex_lock(&con->recovery_lock);
2807 	ret = amdgpu_ras_check_bad_page_unlock(con, addr);
2808 	mutex_unlock(&con->recovery_lock);
2809 	return ret;
2810 }
2811 
2812 static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev,
2813 					  uint32_t max_count)
2814 {
2815 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2816 
2817 	/*
2818 	 * Justification of value bad_page_cnt_threshold in ras structure
2819 	 *
2820 	 * Generally, 0 <= amdgpu_bad_page_threshold <= max record length
2821 	 * in eeprom or amdgpu_bad_page_threshold == -2, introduce two
2822 	 * scenarios accordingly.
2823 	 *
2824 	 * Bad page retirement enablement:
2825 	 *    - If amdgpu_bad_page_threshold = -2,
2826 	 *      bad_page_cnt_threshold = typical value by formula.
2827 	 *
2828 	 *    - When the value from user is 0 < amdgpu_bad_page_threshold <
2829 	 *      max record length in eeprom, use it directly.
2830 	 *
2831 	 * Bad page retirement disablement:
2832 	 *    - If amdgpu_bad_page_threshold = 0, bad page retirement
2833 	 *      functionality is disabled, and bad_page_cnt_threshold will
2834 	 *      take no effect.
2835 	 */
2836 
2837 	if (amdgpu_bad_page_threshold < 0) {
2838 		u64 val = adev->gmc.mc_vram_size;
2839 
2840 		do_div(val, RAS_BAD_PAGE_COVER);
2841 		con->bad_page_cnt_threshold = min(lower_32_bits(val),
2842 						  max_count);
2843 	} else {
2844 		con->bad_page_cnt_threshold = min_t(int, max_count,
2845 						    amdgpu_bad_page_threshold);
2846 	}
2847 }
2848 
2849 int amdgpu_ras_put_poison_req(struct amdgpu_device *adev,
2850 		enum amdgpu_ras_block block, uint16_t pasid,
2851 		pasid_notify pasid_fn, void *data, uint32_t reset)
2852 {
2853 	int ret = 0;
2854 	struct ras_poison_msg poison_msg;
2855 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2856 
2857 	memset(&poison_msg, 0, sizeof(poison_msg));
2858 	poison_msg.block = block;
2859 	poison_msg.pasid = pasid;
2860 	poison_msg.reset = reset;
2861 	poison_msg.pasid_fn = pasid_fn;
2862 	poison_msg.data = data;
2863 
2864 	ret = kfifo_put(&con->poison_fifo, poison_msg);
2865 	if (!ret) {
2866 		dev_err(adev->dev, "Poison message fifo is full!\n");
2867 		return -ENOSPC;
2868 	}
2869 
2870 	return 0;
2871 }
2872 
2873 static int amdgpu_ras_get_poison_req(struct amdgpu_device *adev,
2874 		struct ras_poison_msg *poison_msg)
2875 {
2876 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2877 
2878 	return kfifo_get(&con->poison_fifo, poison_msg);
2879 }
2880 
2881 static void amdgpu_ras_ecc_log_init(struct ras_ecc_log_info *ecc_log)
2882 {
2883 	mutex_init(&ecc_log->lock);
2884 
2885 	INIT_RADIX_TREE(&ecc_log->de_page_tree, GFP_KERNEL);
2886 	ecc_log->de_queried_count = 0;
2887 	ecc_log->prev_de_queried_count = 0;
2888 }
2889 
2890 static void amdgpu_ras_ecc_log_fini(struct ras_ecc_log_info *ecc_log)
2891 {
2892 	struct radix_tree_iter iter;
2893 	void __rcu **slot;
2894 	struct ras_ecc_err *ecc_err;
2895 
2896 	mutex_lock(&ecc_log->lock);
2897 	radix_tree_for_each_slot(slot, &ecc_log->de_page_tree, &iter, 0) {
2898 		ecc_err = radix_tree_deref_slot(slot);
2899 		kfree(ecc_err->err_pages.pfn);
2900 		kfree(ecc_err);
2901 		radix_tree_iter_delete(&ecc_log->de_page_tree, &iter, slot);
2902 	}
2903 	mutex_unlock(&ecc_log->lock);
2904 
2905 	mutex_destroy(&ecc_log->lock);
2906 	ecc_log->de_queried_count = 0;
2907 	ecc_log->prev_de_queried_count = 0;
2908 }
2909 
2910 static bool amdgpu_ras_schedule_retirement_dwork(struct amdgpu_ras *con,
2911 				uint32_t delayed_ms)
2912 {
2913 	int ret;
2914 
2915 	mutex_lock(&con->umc_ecc_log.lock);
2916 	ret = radix_tree_tagged(&con->umc_ecc_log.de_page_tree,
2917 			UMC_ECC_NEW_DETECTED_TAG);
2918 	mutex_unlock(&con->umc_ecc_log.lock);
2919 
2920 	if (ret)
2921 		schedule_delayed_work(&con->page_retirement_dwork,
2922 			msecs_to_jiffies(delayed_ms));
2923 
2924 	return ret ? true : false;
2925 }
2926 
2927 static void amdgpu_ras_do_page_retirement(struct work_struct *work)
2928 {
2929 	struct amdgpu_ras *con = container_of(work, struct amdgpu_ras,
2930 					      page_retirement_dwork.work);
2931 	struct amdgpu_device *adev = con->adev;
2932 	struct ras_err_data err_data;
2933 	unsigned long err_cnt;
2934 
2935 	/* If gpu reset is ongoing, delay retiring the bad pages */
2936 	if (amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev)) {
2937 		amdgpu_ras_schedule_retirement_dwork(con,
2938 				AMDGPU_RAS_RETIRE_PAGE_INTERVAL * 3);
2939 		return;
2940 	}
2941 
2942 	amdgpu_ras_error_data_init(&err_data);
2943 
2944 	amdgpu_umc_handle_bad_pages(adev, &err_data);
2945 	err_cnt = err_data.err_addr_cnt;
2946 
2947 	amdgpu_ras_error_data_fini(&err_data);
2948 
2949 	if (err_cnt && amdgpu_ras_is_rma(adev))
2950 		amdgpu_ras_reset_gpu(adev);
2951 
2952 	amdgpu_ras_schedule_retirement_dwork(con,
2953 			AMDGPU_RAS_RETIRE_PAGE_INTERVAL);
2954 }
2955 
2956 static int amdgpu_ras_poison_creation_handler(struct amdgpu_device *adev,
2957 				uint32_t poison_creation_count)
2958 {
2959 	int ret = 0;
2960 	struct ras_ecc_log_info *ecc_log;
2961 	struct ras_query_if info;
2962 	uint32_t timeout = 0;
2963 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
2964 	uint64_t de_queried_count;
2965 	uint32_t new_detect_count, total_detect_count;
2966 	uint32_t need_query_count = poison_creation_count;
2967 	bool query_data_timeout = false;
2968 	enum ras_event_type type = RAS_EVENT_TYPE_POISON_CREATION;
2969 
2970 	memset(&info, 0, sizeof(info));
2971 	info.head.block = AMDGPU_RAS_BLOCK__UMC;
2972 
2973 	ecc_log = &ras->umc_ecc_log;
2974 	total_detect_count = 0;
2975 	do {
2976 		ret = amdgpu_ras_query_error_status_with_event(adev, &info, type);
2977 		if (ret)
2978 			return ret;
2979 
2980 		de_queried_count = ecc_log->de_queried_count;
2981 		if (de_queried_count > ecc_log->prev_de_queried_count) {
2982 			new_detect_count = de_queried_count - ecc_log->prev_de_queried_count;
2983 			ecc_log->prev_de_queried_count = de_queried_count;
2984 			timeout = 0;
2985 		} else {
2986 			new_detect_count = 0;
2987 		}
2988 
2989 		if (new_detect_count) {
2990 			total_detect_count += new_detect_count;
2991 		} else {
2992 			if (!timeout && need_query_count)
2993 				timeout = MAX_UMC_POISON_POLLING_TIME_ASYNC;
2994 
2995 			if (timeout) {
2996 				if (!--timeout) {
2997 					query_data_timeout = true;
2998 					break;
2999 				}
3000 				msleep(1);
3001 			}
3002 		}
3003 	} while (total_detect_count < need_query_count);
3004 
3005 	if (query_data_timeout) {
3006 		dev_warn(adev->dev, "Can't find deferred error! count: %u\n",
3007 			(need_query_count - total_detect_count));
3008 		return -ENOENT;
3009 	}
3010 
3011 	if (total_detect_count)
3012 		schedule_delayed_work(&ras->page_retirement_dwork, 0);
3013 
3014 	return 0;
3015 }
3016 
3017 static void amdgpu_ras_clear_poison_fifo(struct amdgpu_device *adev)
3018 {
3019 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3020 	struct ras_poison_msg msg;
3021 	int ret;
3022 
3023 	do {
3024 		ret = kfifo_get(&con->poison_fifo, &msg);
3025 	} while (ret);
3026 }
3027 
3028 static int amdgpu_ras_poison_consumption_handler(struct amdgpu_device *adev,
3029 			uint32_t msg_count, uint32_t *gpu_reset)
3030 {
3031 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3032 	uint32_t reset_flags = 0, reset = 0;
3033 	struct ras_poison_msg msg;
3034 	int ret, i;
3035 
3036 	kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
3037 
3038 	for (i = 0; i < msg_count; i++) {
3039 		ret = amdgpu_ras_get_poison_req(adev, &msg);
3040 		if (!ret)
3041 			continue;
3042 
3043 		if (msg.pasid_fn)
3044 			msg.pasid_fn(adev, msg.pasid, msg.data);
3045 
3046 		reset_flags |= msg.reset;
3047 	}
3048 
3049 	/* for RMA, amdgpu_ras_poison_creation_handler will trigger gpu reset */
3050 	if (reset_flags && !amdgpu_ras_is_rma(adev)) {
3051 		if (reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET)
3052 			reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET;
3053 		else if (reset_flags & AMDGPU_RAS_GPU_RESET_MODE2_RESET)
3054 			reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
3055 		else
3056 			reset = reset_flags;
3057 
3058 		flush_delayed_work(&con->page_retirement_dwork);
3059 
3060 		con->gpu_reset_flags |= reset;
3061 		amdgpu_ras_reset_gpu(adev);
3062 
3063 		*gpu_reset = reset;
3064 
3065 		/* Wait for gpu recovery to complete */
3066 		flush_work(&con->recovery_work);
3067 	}
3068 
3069 	return 0;
3070 }
3071 
3072 static int amdgpu_ras_page_retirement_thread(void *param)
3073 {
3074 	struct amdgpu_device *adev = (struct amdgpu_device *)param;
3075 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3076 	uint32_t poison_creation_count, msg_count;
3077 	uint32_t gpu_reset;
3078 	int ret;
3079 
3080 	while (!kthread_should_stop()) {
3081 
3082 		wait_event_interruptible(con->page_retirement_wq,
3083 				kthread_should_stop() ||
3084 				atomic_read(&con->page_retirement_req_cnt));
3085 
3086 		if (kthread_should_stop())
3087 			break;
3088 
3089 		gpu_reset = 0;
3090 
3091 		do {
3092 			poison_creation_count = atomic_read(&con->poison_creation_count);
3093 			ret = amdgpu_ras_poison_creation_handler(adev, poison_creation_count);
3094 			if (ret == -EIO)
3095 				break;
3096 
3097 			if (poison_creation_count) {
3098 				atomic_sub(poison_creation_count, &con->poison_creation_count);
3099 				atomic_sub(poison_creation_count, &con->page_retirement_req_cnt);
3100 			}
3101 		} while (atomic_read(&con->poison_creation_count));
3102 
3103 		if (ret != -EIO) {
3104 			msg_count = kfifo_len(&con->poison_fifo);
3105 			if (msg_count) {
3106 				ret = amdgpu_ras_poison_consumption_handler(adev,
3107 						msg_count, &gpu_reset);
3108 				if ((ret != -EIO) &&
3109 				    (gpu_reset != AMDGPU_RAS_GPU_RESET_MODE1_RESET))
3110 					atomic_sub(msg_count, &con->page_retirement_req_cnt);
3111 			}
3112 		}
3113 
3114 		if ((ret == -EIO) || (gpu_reset == AMDGPU_RAS_GPU_RESET_MODE1_RESET)) {
3115 			/* gpu mode-1 reset is ongoing or just completed ras mode-1 reset */
3116 			/* Clear poison creation request */
3117 			atomic_set(&con->poison_creation_count, 0);
3118 
3119 			/* Clear poison fifo */
3120 			amdgpu_ras_clear_poison_fifo(adev);
3121 
3122 			/* Clear all poison requests */
3123 			atomic_set(&con->page_retirement_req_cnt, 0);
3124 
3125 			if (ret == -EIO) {
3126 				/* Wait for mode-1 reset to complete */
3127 				down_read(&adev->reset_domain->sem);
3128 				up_read(&adev->reset_domain->sem);
3129 			}
3130 
3131 			/* Wake up work to save bad pages to eeprom */
3132 			schedule_delayed_work(&con->page_retirement_dwork, 0);
3133 		} else if (gpu_reset) {
3134 			/* gpu just completed mode-2 reset or other reset */
3135 			/* Clear poison consumption messages cached in fifo */
3136 			msg_count = kfifo_len(&con->poison_fifo);
3137 			if (msg_count) {
3138 				amdgpu_ras_clear_poison_fifo(adev);
3139 				atomic_sub(msg_count, &con->page_retirement_req_cnt);
3140 			}
3141 
3142 			/* Wake up work to save bad pages to eeprom */
3143 			schedule_delayed_work(&con->page_retirement_dwork, 0);
3144 		}
3145 	}
3146 
3147 	return 0;
3148 }
3149 
3150 int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev)
3151 {
3152 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3153 	int ret;
3154 
3155 	if (!con || amdgpu_sriov_vf(adev))
3156 		return 0;
3157 
3158 	ret = amdgpu_ras_eeprom_init(&con->eeprom_control);
3159 
3160 	if (ret)
3161 		return ret;
3162 
3163 	/* HW not usable */
3164 	if (amdgpu_ras_is_rma(adev))
3165 		return -EHWPOISON;
3166 
3167 	if (con->eeprom_control.ras_num_recs) {
3168 		ret = amdgpu_ras_load_bad_pages(adev);
3169 		if (ret)
3170 			return ret;
3171 
3172 		amdgpu_dpm_send_hbm_bad_pages_num(
3173 			adev, con->eeprom_control.ras_num_recs);
3174 
3175 		if (con->update_channel_flag == true) {
3176 			amdgpu_dpm_send_hbm_bad_channel_flag(
3177 				adev, con->eeprom_control.bad_channel_bitmap);
3178 			con->update_channel_flag = false;
3179 		}
3180 	}
3181 
3182 	return ret;
3183 }
3184 
3185 int amdgpu_ras_recovery_init(struct amdgpu_device *adev, bool init_bp_info)
3186 {
3187 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3188 	struct ras_err_handler_data **data;
3189 	u32  max_eeprom_records_count = 0;
3190 	int ret;
3191 
3192 	if (!con || amdgpu_sriov_vf(adev))
3193 		return 0;
3194 
3195 	/* Allow access to RAS EEPROM via debugfs, when the ASIC
3196 	 * supports RAS and debugfs is enabled, but when
3197 	 * adev->ras_enabled is unset, i.e. when "ras_enable"
3198 	 * module parameter is set to 0.
3199 	 */
3200 	con->adev = adev;
3201 
3202 	if (!adev->ras_enabled)
3203 		return 0;
3204 
3205 	data = &con->eh_data;
3206 	*data = kzalloc(sizeof(**data), GFP_KERNEL);
3207 	if (!*data) {
3208 		ret = -ENOMEM;
3209 		goto out;
3210 	}
3211 
3212 	mutex_init(&con->recovery_lock);
3213 	INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
3214 	atomic_set(&con->in_recovery, 0);
3215 	con->eeprom_control.bad_channel_bitmap = 0;
3216 
3217 	max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count(&con->eeprom_control);
3218 	amdgpu_ras_validate_threshold(adev, max_eeprom_records_count);
3219 
3220 	if (init_bp_info) {
3221 		ret = amdgpu_ras_init_badpage_info(adev);
3222 		if (ret)
3223 			goto free;
3224 	}
3225 
3226 	mutex_init(&con->page_rsv_lock);
3227 	INIT_KFIFO(con->poison_fifo);
3228 	mutex_init(&con->page_retirement_lock);
3229 	init_waitqueue_head(&con->page_retirement_wq);
3230 	atomic_set(&con->page_retirement_req_cnt, 0);
3231 	atomic_set(&con->poison_creation_count, 0);
3232 	con->page_retirement_thread =
3233 		kthread_run(amdgpu_ras_page_retirement_thread, adev, "umc_page_retirement");
3234 	if (IS_ERR(con->page_retirement_thread)) {
3235 		con->page_retirement_thread = NULL;
3236 		dev_warn(adev->dev, "Failed to create umc_page_retirement thread!!!\n");
3237 	}
3238 
3239 	INIT_DELAYED_WORK(&con->page_retirement_dwork, amdgpu_ras_do_page_retirement);
3240 	amdgpu_ras_ecc_log_init(&con->umc_ecc_log);
3241 #ifdef CONFIG_X86_MCE_AMD
3242 	if ((adev->asic_type == CHIP_ALDEBARAN) &&
3243 	    (adev->gmc.xgmi.connected_to_cpu))
3244 		amdgpu_register_bad_pages_mca_notifier(adev);
3245 #endif
3246 	return 0;
3247 
3248 free:
3249 	kfree((*data)->bps);
3250 	kfree(*data);
3251 	con->eh_data = NULL;
3252 out:
3253 	dev_warn(adev->dev, "Failed to initialize ras recovery! (%d)\n", ret);
3254 
3255 	/*
3256 	 * Except error threshold exceeding case, other failure cases in this
3257 	 * function would not fail amdgpu driver init.
3258 	 */
3259 	if (!amdgpu_ras_is_rma(adev))
3260 		ret = 0;
3261 	else
3262 		ret = -EINVAL;
3263 
3264 	return ret;
3265 }
3266 
3267 static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
3268 {
3269 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3270 	struct ras_err_handler_data *data = con->eh_data;
3271 	int max_flush_timeout = MAX_FLUSH_RETIRE_DWORK_TIMES;
3272 	bool ret;
3273 
3274 	/* recovery_init failed to init it, fini is useless */
3275 	if (!data)
3276 		return 0;
3277 
3278 	/* Save all cached bad pages to eeprom */
3279 	do {
3280 		flush_delayed_work(&con->page_retirement_dwork);
3281 		ret = amdgpu_ras_schedule_retirement_dwork(con, 0);
3282 	} while (ret && max_flush_timeout--);
3283 
3284 	if (con->page_retirement_thread)
3285 		kthread_stop(con->page_retirement_thread);
3286 
3287 	atomic_set(&con->page_retirement_req_cnt, 0);
3288 	atomic_set(&con->poison_creation_count, 0);
3289 
3290 	mutex_destroy(&con->page_rsv_lock);
3291 
3292 	cancel_work_sync(&con->recovery_work);
3293 
3294 	cancel_delayed_work_sync(&con->page_retirement_dwork);
3295 
3296 	amdgpu_ras_ecc_log_fini(&con->umc_ecc_log);
3297 
3298 	mutex_lock(&con->recovery_lock);
3299 	con->eh_data = NULL;
3300 	kfree(data->bps);
3301 	kfree(data);
3302 	mutex_unlock(&con->recovery_lock);
3303 
3304 	return 0;
3305 }
3306 /* recovery end */
3307 
3308 static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
3309 {
3310 	if (amdgpu_sriov_vf(adev)) {
3311 		switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
3312 		case IP_VERSION(13, 0, 2):
3313 		case IP_VERSION(13, 0, 6):
3314 		case IP_VERSION(13, 0, 14):
3315 			return true;
3316 		default:
3317 			return false;
3318 		}
3319 	}
3320 
3321 	if (adev->asic_type == CHIP_IP_DISCOVERY) {
3322 		switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
3323 		case IP_VERSION(13, 0, 0):
3324 		case IP_VERSION(13, 0, 6):
3325 		case IP_VERSION(13, 0, 10):
3326 		case IP_VERSION(13, 0, 14):
3327 			return true;
3328 		default:
3329 			return false;
3330 		}
3331 	}
3332 
3333 	return adev->asic_type == CHIP_VEGA10 ||
3334 		adev->asic_type == CHIP_VEGA20 ||
3335 		adev->asic_type == CHIP_ARCTURUS ||
3336 		adev->asic_type == CHIP_ALDEBARAN ||
3337 		adev->asic_type == CHIP_SIENNA_CICHLID;
3338 }
3339 
3340 /*
3341  * this is workaround for vega20 workstation sku,
3342  * force enable gfx ras, ignore vbios gfx ras flag
3343  * due to GC EDC can not write
3344  */
3345 static void amdgpu_ras_get_quirks(struct amdgpu_device *adev)
3346 {
3347 	struct atom_context *ctx = adev->mode_info.atom_context;
3348 
3349 	if (!ctx)
3350 		return;
3351 
3352 	if (strnstr(ctx->vbios_pn, "D16406",
3353 		    sizeof(ctx->vbios_pn)) ||
3354 		strnstr(ctx->vbios_pn, "D36002",
3355 			sizeof(ctx->vbios_pn)))
3356 		adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX);
3357 }
3358 
3359 /* Query ras capablity via atomfirmware interface */
3360 static void amdgpu_ras_query_ras_capablity_from_vbios(struct amdgpu_device *adev)
3361 {
3362 	/* mem_ecc cap */
3363 	if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
3364 		dev_info(adev->dev, "MEM ECC is active.\n");
3365 		adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__UMC |
3366 					 1 << AMDGPU_RAS_BLOCK__DF);
3367 	} else {
3368 		dev_info(adev->dev, "MEM ECC is not presented.\n");
3369 	}
3370 
3371 	/* sram_ecc cap */
3372 	if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
3373 		dev_info(adev->dev, "SRAM ECC is active.\n");
3374 		if (!amdgpu_sriov_vf(adev))
3375 			adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
3376 						  1 << AMDGPU_RAS_BLOCK__DF);
3377 		else
3378 			adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__PCIE_BIF |
3379 						 1 << AMDGPU_RAS_BLOCK__SDMA |
3380 						 1 << AMDGPU_RAS_BLOCK__GFX);
3381 
3382 		/*
3383 		 * VCN/JPEG RAS can be supported on both bare metal and
3384 		 * SRIOV environment
3385 		 */
3386 		if (amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(2, 6, 0) ||
3387 		    amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 0) ||
3388 		    amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 3))
3389 			adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN |
3390 						 1 << AMDGPU_RAS_BLOCK__JPEG);
3391 		else
3392 			adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN |
3393 						  1 << AMDGPU_RAS_BLOCK__JPEG);
3394 
3395 		/*
3396 		 * XGMI RAS is not supported if xgmi num physical nodes
3397 		 * is zero
3398 		 */
3399 		if (!adev->gmc.xgmi.num_physical_nodes)
3400 			adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__XGMI_WAFL);
3401 	} else {
3402 		dev_info(adev->dev, "SRAM ECC is not presented.\n");
3403 	}
3404 }
3405 
3406 /* Query poison mode from umc/df IP callbacks */
3407 static void amdgpu_ras_query_poison_mode(struct amdgpu_device *adev)
3408 {
3409 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3410 	bool df_poison, umc_poison;
3411 
3412 	/* poison setting is useless on SRIOV guest */
3413 	if (amdgpu_sriov_vf(adev) || !con)
3414 		return;
3415 
3416 	/* Init poison supported flag, the default value is false */
3417 	if (adev->gmc.xgmi.connected_to_cpu ||
3418 	    adev->gmc.is_app_apu) {
3419 		/* enabled by default when GPU is connected to CPU */
3420 		con->poison_supported = true;
3421 	} else if (adev->df.funcs &&
3422 	    adev->df.funcs->query_ras_poison_mode &&
3423 	    adev->umc.ras &&
3424 	    adev->umc.ras->query_ras_poison_mode) {
3425 		df_poison =
3426 			adev->df.funcs->query_ras_poison_mode(adev);
3427 		umc_poison =
3428 			adev->umc.ras->query_ras_poison_mode(adev);
3429 
3430 		/* Only poison is set in both DF and UMC, we can support it */
3431 		if (df_poison && umc_poison)
3432 			con->poison_supported = true;
3433 		else if (df_poison != umc_poison)
3434 			dev_warn(adev->dev,
3435 				"Poison setting is inconsistent in DF/UMC(%d:%d)!\n",
3436 				df_poison, umc_poison);
3437 	}
3438 }
3439 
3440 /*
3441  * check hardware's ras ability which will be saved in hw_supported.
3442  * if hardware does not support ras, we can skip some ras initializtion and
3443  * forbid some ras operations from IP.
3444  * if software itself, say boot parameter, limit the ras ability. We still
3445  * need allow IP do some limited operations, like disable. In such case,
3446  * we have to initialize ras as normal. but need check if operation is
3447  * allowed or not in each function.
3448  */
3449 static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
3450 {
3451 	adev->ras_hw_enabled = adev->ras_enabled = 0;
3452 
3453 	if (!amdgpu_ras_asic_supported(adev))
3454 		return;
3455 
3456 	/* query ras capability from psp */
3457 	if (amdgpu_psp_get_ras_capability(&adev->psp))
3458 		goto init_ras_enabled_flag;
3459 
3460 	/* query ras capablity from bios */
3461 	if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
3462 		amdgpu_ras_query_ras_capablity_from_vbios(adev);
3463 	} else {
3464 		/* driver only manages a few IP blocks RAS feature
3465 		 * when GPU is connected cpu through XGMI */
3466 		adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX |
3467 					   1 << AMDGPU_RAS_BLOCK__SDMA |
3468 					   1 << AMDGPU_RAS_BLOCK__MMHUB);
3469 	}
3470 
3471 	/* apply asic specific settings (vega20 only for now) */
3472 	amdgpu_ras_get_quirks(adev);
3473 
3474 	/* query poison mode from umc/df ip callback */
3475 	amdgpu_ras_query_poison_mode(adev);
3476 
3477 init_ras_enabled_flag:
3478 	/* hw_supported needs to be aligned with RAS block mask. */
3479 	adev->ras_hw_enabled &= AMDGPU_RAS_BLOCK_MASK;
3480 
3481 	adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 :
3482 		adev->ras_hw_enabled & amdgpu_ras_mask;
3483 
3484 	/* aca is disabled by default */
3485 	adev->aca.is_enabled = false;
3486 
3487 	/* bad page feature is not applicable to specific app platform */
3488 	if (adev->gmc.is_app_apu &&
3489 	    amdgpu_ip_version(adev, UMC_HWIP, 0) == IP_VERSION(12, 0, 0))
3490 		amdgpu_bad_page_threshold = 0;
3491 }
3492 
3493 static void amdgpu_ras_counte_dw(struct work_struct *work)
3494 {
3495 	struct amdgpu_ras *con = container_of(work, struct amdgpu_ras,
3496 					      ras_counte_delay_work.work);
3497 	struct amdgpu_device *adev = con->adev;
3498 	struct drm_device *dev = adev_to_drm(adev);
3499 	unsigned long ce_count, ue_count;
3500 	int res;
3501 
3502 	res = pm_runtime_get_sync(dev->dev);
3503 	if (res < 0)
3504 		goto Out;
3505 
3506 	/* Cache new values.
3507 	 */
3508 	if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, NULL) == 0) {
3509 		atomic_set(&con->ras_ce_count, ce_count);
3510 		atomic_set(&con->ras_ue_count, ue_count);
3511 	}
3512 
3513 	pm_runtime_mark_last_busy(dev->dev);
3514 Out:
3515 	pm_runtime_put_autosuspend(dev->dev);
3516 }
3517 
3518 static int amdgpu_get_ras_schema(struct amdgpu_device *adev)
3519 {
3520 	return  amdgpu_ras_is_poison_mode_supported(adev) ? AMDGPU_RAS_ERROR__POISON : 0 |
3521 			AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE |
3522 			AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE |
3523 			AMDGPU_RAS_ERROR__PARITY;
3524 }
3525 
3526 static void ras_event_mgr_init(struct ras_event_manager *mgr)
3527 {
3528 	struct ras_event_state *event_state;
3529 	int i;
3530 
3531 	memset(mgr, 0, sizeof(*mgr));
3532 	atomic64_set(&mgr->seqno, 0);
3533 
3534 	for (i = 0; i < ARRAY_SIZE(mgr->event_state); i++) {
3535 		event_state = &mgr->event_state[i];
3536 		event_state->last_seqno = RAS_EVENT_INVALID_ID;
3537 		atomic64_set(&event_state->count, 0);
3538 	}
3539 }
3540 
3541 static void amdgpu_ras_event_mgr_init(struct amdgpu_device *adev)
3542 {
3543 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
3544 	struct amdgpu_hive_info *hive;
3545 
3546 	if (!ras)
3547 		return;
3548 
3549 	hive = amdgpu_get_xgmi_hive(adev);
3550 	ras->event_mgr = hive ? &hive->event_mgr : &ras->__event_mgr;
3551 
3552 	/* init event manager with node 0 on xgmi system */
3553 	if (!amdgpu_in_reset(adev)) {
3554 		if (!hive || adev->gmc.xgmi.node_id == 0)
3555 			ras_event_mgr_init(ras->event_mgr);
3556 	}
3557 
3558 	if (hive)
3559 		amdgpu_put_xgmi_hive(hive);
3560 }
3561 
3562 static void amdgpu_ras_init_reserved_vram_size(struct amdgpu_device *adev)
3563 {
3564 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3565 
3566 	if (!con || (adev->flags & AMD_IS_APU))
3567 		return;
3568 
3569 	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
3570 	case IP_VERSION(13, 0, 2):
3571 	case IP_VERSION(13, 0, 6):
3572 	case IP_VERSION(13, 0, 14):
3573 		con->reserved_pages_in_bytes = AMDGPU_RAS_RESERVED_VRAM_SIZE;
3574 		break;
3575 	default:
3576 		break;
3577 	}
3578 }
3579 
3580 int amdgpu_ras_init(struct amdgpu_device *adev)
3581 {
3582 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3583 	int r;
3584 
3585 	if (con)
3586 		return 0;
3587 
3588 	con = kzalloc(sizeof(*con) +
3589 			sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT +
3590 			sizeof(struct ras_manager) * AMDGPU_RAS_MCA_BLOCK_COUNT,
3591 			GFP_KERNEL);
3592 	if (!con)
3593 		return -ENOMEM;
3594 
3595 	con->adev = adev;
3596 	INIT_DELAYED_WORK(&con->ras_counte_delay_work, amdgpu_ras_counte_dw);
3597 	atomic_set(&con->ras_ce_count, 0);
3598 	atomic_set(&con->ras_ue_count, 0);
3599 
3600 	con->objs = (struct ras_manager *)(con + 1);
3601 
3602 	amdgpu_ras_set_context(adev, con);
3603 
3604 	amdgpu_ras_check_supported(adev);
3605 
3606 	if (!adev->ras_enabled || adev->asic_type == CHIP_VEGA10) {
3607 		/* set gfx block ras context feature for VEGA20 Gaming
3608 		 * send ras disable cmd to ras ta during ras late init.
3609 		 */
3610 		if (!adev->ras_enabled && adev->asic_type == CHIP_VEGA20) {
3611 			con->features |= BIT(AMDGPU_RAS_BLOCK__GFX);
3612 
3613 			return 0;
3614 		}
3615 
3616 		r = 0;
3617 		goto release_con;
3618 	}
3619 
3620 	con->update_channel_flag = false;
3621 	con->features = 0;
3622 	con->schema = 0;
3623 	INIT_LIST_HEAD(&con->head);
3624 	/* Might need get this flag from vbios. */
3625 	con->flags = RAS_DEFAULT_FLAGS;
3626 
3627 	/* initialize nbio ras function ahead of any other
3628 	 * ras functions so hardware fatal error interrupt
3629 	 * can be enabled as early as possible */
3630 	switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
3631 	case IP_VERSION(7, 4, 0):
3632 	case IP_VERSION(7, 4, 1):
3633 	case IP_VERSION(7, 4, 4):
3634 		if (!adev->gmc.xgmi.connected_to_cpu)
3635 			adev->nbio.ras = &nbio_v7_4_ras;
3636 		break;
3637 	case IP_VERSION(4, 3, 0):
3638 		if (adev->ras_hw_enabled & (1 << AMDGPU_RAS_BLOCK__DF))
3639 			/* unlike other generation of nbio ras,
3640 			 * nbio v4_3 only support fatal error interrupt
3641 			 * to inform software that DF is freezed due to
3642 			 * system fatal error event. driver should not
3643 			 * enable nbio ras in such case. Instead,
3644 			 * check DF RAS */
3645 			adev->nbio.ras = &nbio_v4_3_ras;
3646 		break;
3647 	case IP_VERSION(7, 9, 0):
3648 		if (!adev->gmc.is_app_apu)
3649 			adev->nbio.ras = &nbio_v7_9_ras;
3650 		break;
3651 	default:
3652 		/* nbio ras is not available */
3653 		break;
3654 	}
3655 
3656 	/* nbio ras block needs to be enabled ahead of other ras blocks
3657 	 * to handle fatal error */
3658 	r = amdgpu_nbio_ras_sw_init(adev);
3659 	if (r)
3660 		return r;
3661 
3662 	if (adev->nbio.ras &&
3663 	    adev->nbio.ras->init_ras_controller_interrupt) {
3664 		r = adev->nbio.ras->init_ras_controller_interrupt(adev);
3665 		if (r)
3666 			goto release_con;
3667 	}
3668 
3669 	if (adev->nbio.ras &&
3670 	    adev->nbio.ras->init_ras_err_event_athub_interrupt) {
3671 		r = adev->nbio.ras->init_ras_err_event_athub_interrupt(adev);
3672 		if (r)
3673 			goto release_con;
3674 	}
3675 
3676 	/* Packed socket_id to ras feature mask bits[31:29] */
3677 	if (adev->smuio.funcs &&
3678 	    adev->smuio.funcs->get_socket_id)
3679 		con->features |= ((adev->smuio.funcs->get_socket_id(adev)) <<
3680 					AMDGPU_RAS_FEATURES_SOCKETID_SHIFT);
3681 
3682 	/* Get RAS schema for particular SOC */
3683 	con->schema = amdgpu_get_ras_schema(adev);
3684 
3685 	amdgpu_ras_init_reserved_vram_size(adev);
3686 
3687 	if (amdgpu_ras_fs_init(adev)) {
3688 		r = -EINVAL;
3689 		goto release_con;
3690 	}
3691 
3692 	if (amdgpu_ras_aca_is_supported(adev)) {
3693 		if (amdgpu_aca_is_enabled(adev))
3694 			r = amdgpu_aca_init(adev);
3695 		else
3696 			r = amdgpu_mca_init(adev);
3697 		if (r)
3698 			goto release_con;
3699 	}
3700 
3701 	dev_info(adev->dev, "RAS INFO: ras initialized successfully, "
3702 		 "hardware ability[%x] ras_mask[%x]\n",
3703 		 adev->ras_hw_enabled, adev->ras_enabled);
3704 
3705 	return 0;
3706 release_con:
3707 	amdgpu_ras_set_context(adev, NULL);
3708 	kfree(con);
3709 
3710 	return r;
3711 }
3712 
3713 int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev)
3714 {
3715 	if (adev->gmc.xgmi.connected_to_cpu ||
3716 	    adev->gmc.is_app_apu)
3717 		return 1;
3718 	return 0;
3719 }
3720 
3721 static int amdgpu_persistent_edc_harvesting(struct amdgpu_device *adev,
3722 					struct ras_common_if *ras_block)
3723 {
3724 	struct ras_query_if info = {
3725 		.head = *ras_block,
3726 	};
3727 
3728 	if (!amdgpu_persistent_edc_harvesting_supported(adev))
3729 		return 0;
3730 
3731 	if (amdgpu_ras_query_error_status(adev, &info) != 0)
3732 		DRM_WARN("RAS init harvest failure");
3733 
3734 	if (amdgpu_ras_reset_error_status(adev, ras_block->block) != 0)
3735 		DRM_WARN("RAS init harvest reset failure");
3736 
3737 	return 0;
3738 }
3739 
3740 bool amdgpu_ras_is_poison_mode_supported(struct amdgpu_device *adev)
3741 {
3742        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3743 
3744        if (!con)
3745                return false;
3746 
3747        return con->poison_supported;
3748 }
3749 
3750 /* helper function to handle common stuff in ip late init phase */
3751 int amdgpu_ras_block_late_init(struct amdgpu_device *adev,
3752 			 struct ras_common_if *ras_block)
3753 {
3754 	struct amdgpu_ras_block_object *ras_obj = NULL;
3755 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3756 	struct ras_query_if *query_info;
3757 	unsigned long ue_count, ce_count;
3758 	int r;
3759 
3760 	/* disable RAS feature per IP block if it is not supported */
3761 	if (!amdgpu_ras_is_supported(adev, ras_block->block)) {
3762 		amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
3763 		return 0;
3764 	}
3765 
3766 	r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1);
3767 	if (r) {
3768 		if (adev->in_suspend || amdgpu_in_reset(adev)) {
3769 			/* in resume phase, if fail to enable ras,
3770 			 * clean up all ras fs nodes, and disable ras */
3771 			goto cleanup;
3772 		} else
3773 			return r;
3774 	}
3775 
3776 	/* check for errors on warm reset edc persisant supported ASIC */
3777 	amdgpu_persistent_edc_harvesting(adev, ras_block);
3778 
3779 	/* in resume phase, no need to create ras fs node */
3780 	if (adev->in_suspend || amdgpu_in_reset(adev))
3781 		return 0;
3782 
3783 	ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
3784 	if (ras_obj->ras_cb || (ras_obj->hw_ops &&
3785 	    (ras_obj->hw_ops->query_poison_status ||
3786 	    ras_obj->hw_ops->handle_poison_consumption))) {
3787 		r = amdgpu_ras_interrupt_add_handler(adev, ras_block);
3788 		if (r)
3789 			goto cleanup;
3790 	}
3791 
3792 	if (ras_obj->hw_ops &&
3793 	    (ras_obj->hw_ops->query_ras_error_count ||
3794 	     ras_obj->hw_ops->query_ras_error_status)) {
3795 		r = amdgpu_ras_sysfs_create(adev, ras_block);
3796 		if (r)
3797 			goto interrupt;
3798 
3799 		/* Those are the cached values at init.
3800 		 */
3801 		query_info = kzalloc(sizeof(*query_info), GFP_KERNEL);
3802 		if (!query_info)
3803 			return -ENOMEM;
3804 		memcpy(&query_info->head, ras_block, sizeof(struct ras_common_if));
3805 
3806 		if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, query_info) == 0) {
3807 			atomic_set(&con->ras_ce_count, ce_count);
3808 			atomic_set(&con->ras_ue_count, ue_count);
3809 		}
3810 
3811 		kfree(query_info);
3812 	}
3813 
3814 	return 0;
3815 
3816 interrupt:
3817 	if (ras_obj->ras_cb)
3818 		amdgpu_ras_interrupt_remove_handler(adev, ras_block);
3819 cleanup:
3820 	amdgpu_ras_feature_enable(adev, ras_block, 0);
3821 	return r;
3822 }
3823 
3824 static int amdgpu_ras_block_late_init_default(struct amdgpu_device *adev,
3825 			 struct ras_common_if *ras_block)
3826 {
3827 	return amdgpu_ras_block_late_init(adev, ras_block);
3828 }
3829 
3830 /* helper function to remove ras fs node and interrupt handler */
3831 void amdgpu_ras_block_late_fini(struct amdgpu_device *adev,
3832 			  struct ras_common_if *ras_block)
3833 {
3834 	struct amdgpu_ras_block_object *ras_obj;
3835 	if (!ras_block)
3836 		return;
3837 
3838 	amdgpu_ras_sysfs_remove(adev, ras_block);
3839 
3840 	ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
3841 	if (ras_obj->ras_cb)
3842 		amdgpu_ras_interrupt_remove_handler(adev, ras_block);
3843 }
3844 
3845 static void amdgpu_ras_block_late_fini_default(struct amdgpu_device *adev,
3846 			  struct ras_common_if *ras_block)
3847 {
3848 	return amdgpu_ras_block_late_fini(adev, ras_block);
3849 }
3850 
3851 /* do some init work after IP late init as dependence.
3852  * and it runs in resume/gpu reset/booting up cases.
3853  */
3854 void amdgpu_ras_resume(struct amdgpu_device *adev)
3855 {
3856 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3857 	struct ras_manager *obj, *tmp;
3858 
3859 	if (!adev->ras_enabled || !con) {
3860 		/* clean ras context for VEGA20 Gaming after send ras disable cmd */
3861 		amdgpu_release_ras_context(adev);
3862 
3863 		return;
3864 	}
3865 
3866 	if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
3867 		/* Set up all other IPs which are not implemented. There is a
3868 		 * tricky thing that IP's actual ras error type should be
3869 		 * MULTI_UNCORRECTABLE, but as driver does not handle it, so
3870 		 * ERROR_NONE make sense anyway.
3871 		 */
3872 		amdgpu_ras_enable_all_features(adev, 1);
3873 
3874 		/* We enable ras on all hw_supported block, but as boot
3875 		 * parameter might disable some of them and one or more IP has
3876 		 * not implemented yet. So we disable them on behalf.
3877 		 */
3878 		list_for_each_entry_safe(obj, tmp, &con->head, node) {
3879 			if (!amdgpu_ras_is_supported(adev, obj->head.block)) {
3880 				amdgpu_ras_feature_enable(adev, &obj->head, 0);
3881 				/* there should be no any reference. */
3882 				WARN_ON(alive_obj(obj));
3883 			}
3884 		}
3885 	}
3886 }
3887 
3888 void amdgpu_ras_suspend(struct amdgpu_device *adev)
3889 {
3890 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3891 
3892 	if (!adev->ras_enabled || !con)
3893 		return;
3894 
3895 	amdgpu_ras_disable_all_features(adev, 0);
3896 	/* Make sure all ras objects are disabled. */
3897 	if (AMDGPU_RAS_GET_FEATURES(con->features))
3898 		amdgpu_ras_disable_all_features(adev, 1);
3899 }
3900 
3901 int amdgpu_ras_late_init(struct amdgpu_device *adev)
3902 {
3903 	struct amdgpu_ras_block_list *node, *tmp;
3904 	struct amdgpu_ras_block_object *obj;
3905 	int r;
3906 
3907 	amdgpu_ras_event_mgr_init(adev);
3908 
3909 	if (amdgpu_ras_aca_is_supported(adev)) {
3910 		if (amdgpu_in_reset(adev)) {
3911 			if (amdgpu_aca_is_enabled(adev))
3912 				r = amdgpu_aca_reset(adev);
3913 			else
3914 				r = amdgpu_mca_reset(adev);
3915 			if (r)
3916 				return r;
3917 		}
3918 
3919 		if (!amdgpu_sriov_vf(adev)) {
3920 			if (amdgpu_aca_is_enabled(adev))
3921 				amdgpu_ras_set_aca_debug_mode(adev, false);
3922 			else
3923 				amdgpu_ras_set_mca_debug_mode(adev, false);
3924 		}
3925 	}
3926 
3927 	/* Guest side doesn't need init ras feature */
3928 	if (amdgpu_sriov_vf(adev))
3929 		return 0;
3930 
3931 	list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
3932 		obj = node->ras_obj;
3933 		if (!obj) {
3934 			dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
3935 			continue;
3936 		}
3937 
3938 		if (!amdgpu_ras_is_supported(adev, obj->ras_comm.block))
3939 			continue;
3940 
3941 		if (obj->ras_late_init) {
3942 			r = obj->ras_late_init(adev, &obj->ras_comm);
3943 			if (r) {
3944 				dev_err(adev->dev, "%s failed to execute ras_late_init! ret:%d\n",
3945 					obj->ras_comm.name, r);
3946 				return r;
3947 			}
3948 		} else
3949 			amdgpu_ras_block_late_init_default(adev, &obj->ras_comm);
3950 	}
3951 
3952 	return 0;
3953 }
3954 
3955 /* do some fini work before IP fini as dependence */
3956 int amdgpu_ras_pre_fini(struct amdgpu_device *adev)
3957 {
3958 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3959 
3960 	if (!adev->ras_enabled || !con)
3961 		return 0;
3962 
3963 
3964 	/* Need disable ras on all IPs here before ip [hw/sw]fini */
3965 	if (AMDGPU_RAS_GET_FEATURES(con->features))
3966 		amdgpu_ras_disable_all_features(adev, 0);
3967 	amdgpu_ras_recovery_fini(adev);
3968 	return 0;
3969 }
3970 
3971 int amdgpu_ras_fini(struct amdgpu_device *adev)
3972 {
3973 	struct amdgpu_ras_block_list *ras_node, *tmp;
3974 	struct amdgpu_ras_block_object *obj = NULL;
3975 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3976 
3977 	if (!adev->ras_enabled || !con)
3978 		return 0;
3979 
3980 	list_for_each_entry_safe(ras_node, tmp, &adev->ras_list, node) {
3981 		if (ras_node->ras_obj) {
3982 			obj = ras_node->ras_obj;
3983 			if (amdgpu_ras_is_supported(adev, obj->ras_comm.block) &&
3984 			    obj->ras_fini)
3985 				obj->ras_fini(adev, &obj->ras_comm);
3986 			else
3987 				amdgpu_ras_block_late_fini_default(adev, &obj->ras_comm);
3988 		}
3989 
3990 		/* Clear ras blocks from ras_list and free ras block list node */
3991 		list_del(&ras_node->node);
3992 		kfree(ras_node);
3993 	}
3994 
3995 	amdgpu_ras_fs_fini(adev);
3996 	amdgpu_ras_interrupt_remove_all(adev);
3997 
3998 	if (amdgpu_ras_aca_is_supported(adev)) {
3999 		if (amdgpu_aca_is_enabled(adev))
4000 			amdgpu_aca_fini(adev);
4001 		else
4002 			amdgpu_mca_fini(adev);
4003 	}
4004 
4005 	WARN(AMDGPU_RAS_GET_FEATURES(con->features), "Feature mask is not cleared");
4006 
4007 	if (AMDGPU_RAS_GET_FEATURES(con->features))
4008 		amdgpu_ras_disable_all_features(adev, 0);
4009 
4010 	cancel_delayed_work_sync(&con->ras_counte_delay_work);
4011 
4012 	amdgpu_ras_set_context(adev, NULL);
4013 	kfree(con);
4014 
4015 	return 0;
4016 }
4017 
4018 bool amdgpu_ras_get_fed_status(struct amdgpu_device *adev)
4019 {
4020 	struct amdgpu_ras *ras;
4021 
4022 	ras = amdgpu_ras_get_context(adev);
4023 	if (!ras)
4024 		return false;
4025 
4026 	return atomic_read(&ras->fed);
4027 }
4028 
4029 void amdgpu_ras_set_fed(struct amdgpu_device *adev, bool status)
4030 {
4031 	struct amdgpu_ras *ras;
4032 
4033 	ras = amdgpu_ras_get_context(adev);
4034 	if (ras)
4035 		atomic_set(&ras->fed, !!status);
4036 }
4037 
4038 static struct ras_event_manager *__get_ras_event_mgr(struct amdgpu_device *adev)
4039 {
4040 	struct amdgpu_ras *ras;
4041 
4042 	ras = amdgpu_ras_get_context(adev);
4043 	if (!ras)
4044 		return NULL;
4045 
4046 	return ras->event_mgr;
4047 }
4048 
4049 int amdgpu_ras_mark_ras_event_caller(struct amdgpu_device *adev, enum ras_event_type type,
4050 				     const void *caller)
4051 {
4052 	struct ras_event_manager *event_mgr;
4053 	struct ras_event_state *event_state;
4054 	int ret = 0;
4055 
4056 	if (type >= RAS_EVENT_TYPE_COUNT) {
4057 		ret = -EINVAL;
4058 		goto out;
4059 	}
4060 
4061 	event_mgr = __get_ras_event_mgr(adev);
4062 	if (!event_mgr) {
4063 		ret = -EINVAL;
4064 		goto out;
4065 	}
4066 
4067 	event_state = &event_mgr->event_state[type];
4068 	event_state->last_seqno = atomic64_inc_return(&event_mgr->seqno);
4069 	atomic64_inc(&event_state->count);
4070 
4071 out:
4072 	if (ret && caller)
4073 		dev_warn(adev->dev, "failed mark ras event (%d) in %ps, ret:%d\n",
4074 			 (int)type, caller, ret);
4075 
4076 	return ret;
4077 }
4078 
4079 u64 amdgpu_ras_acquire_event_id(struct amdgpu_device *adev, enum ras_event_type type)
4080 {
4081 	struct ras_event_manager *event_mgr;
4082 	u64 id;
4083 
4084 	if (type >= RAS_EVENT_TYPE_COUNT)
4085 		return RAS_EVENT_INVALID_ID;
4086 
4087 	switch (type) {
4088 	case RAS_EVENT_TYPE_FATAL:
4089 	case RAS_EVENT_TYPE_POISON_CREATION:
4090 	case RAS_EVENT_TYPE_POISON_CONSUMPTION:
4091 		event_mgr = __get_ras_event_mgr(adev);
4092 		if (!event_mgr)
4093 			return RAS_EVENT_INVALID_ID;
4094 
4095 		id = event_mgr->event_state[type].last_seqno;
4096 		break;
4097 	case RAS_EVENT_TYPE_INVALID:
4098 	default:
4099 		id = RAS_EVENT_INVALID_ID;
4100 		break;
4101 	}
4102 
4103 	return id;
4104 }
4105 
4106 void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
4107 {
4108 	if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
4109 		struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
4110 		enum ras_event_type type = RAS_EVENT_TYPE_FATAL;
4111 		u64 event_id;
4112 
4113 		if (amdgpu_ras_mark_ras_event(adev, type))
4114 			return;
4115 
4116 		event_id = amdgpu_ras_acquire_event_id(adev, type);
4117 
4118 		RAS_EVENT_LOG(adev, event_id, "uncorrectable hardware error"
4119 			      "(ERREVENT_ATHUB_INTERRUPT) detected!\n");
4120 
4121 		amdgpu_ras_set_fed(adev, true);
4122 		ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET;
4123 		amdgpu_ras_reset_gpu(adev);
4124 	}
4125 }
4126 
4127 bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev)
4128 {
4129 	if (adev->asic_type == CHIP_VEGA20 &&
4130 	    adev->pm.fw_version <= 0x283400) {
4131 		return !(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) &&
4132 				amdgpu_ras_intr_triggered();
4133 	}
4134 
4135 	return false;
4136 }
4137 
4138 void amdgpu_release_ras_context(struct amdgpu_device *adev)
4139 {
4140 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4141 
4142 	if (!con)
4143 		return;
4144 
4145 	if (!adev->ras_enabled && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)) {
4146 		con->features &= ~BIT(AMDGPU_RAS_BLOCK__GFX);
4147 		amdgpu_ras_set_context(adev, NULL);
4148 		kfree(con);
4149 	}
4150 }
4151 
4152 #ifdef CONFIG_X86_MCE_AMD
4153 static struct amdgpu_device *find_adev(uint32_t node_id)
4154 {
4155 	int i;
4156 	struct amdgpu_device *adev = NULL;
4157 
4158 	for (i = 0; i < mce_adev_list.num_gpu; i++) {
4159 		adev = mce_adev_list.devs[i];
4160 
4161 		if (adev && adev->gmc.xgmi.connected_to_cpu &&
4162 		    adev->gmc.xgmi.physical_node_id == node_id)
4163 			break;
4164 		adev = NULL;
4165 	}
4166 
4167 	return adev;
4168 }
4169 
4170 #define GET_MCA_IPID_GPUID(m)	(((m) >> 44) & 0xF)
4171 #define GET_UMC_INST(m)		(((m) >> 21) & 0x7)
4172 #define GET_CHAN_INDEX(m)	((((m) >> 12) & 0x3) | (((m) >> 18) & 0x4))
4173 #define GPU_ID_OFFSET		8
4174 
4175 static int amdgpu_bad_page_notifier(struct notifier_block *nb,
4176 				    unsigned long val, void *data)
4177 {
4178 	struct mce *m = (struct mce *)data;
4179 	struct amdgpu_device *adev = NULL;
4180 	uint32_t gpu_id = 0;
4181 	uint32_t umc_inst = 0, ch_inst = 0;
4182 
4183 	/*
4184 	 * If the error was generated in UMC_V2, which belongs to GPU UMCs,
4185 	 * and error occurred in DramECC (Extended error code = 0) then only
4186 	 * process the error, else bail out.
4187 	 */
4188 	if (!m || !((smca_get_bank_type(m->extcpu, m->bank) == SMCA_UMC_V2) &&
4189 		    (XEC(m->status, 0x3f) == 0x0)))
4190 		return NOTIFY_DONE;
4191 
4192 	/*
4193 	 * If it is correctable error, return.
4194 	 */
4195 	if (mce_is_correctable(m))
4196 		return NOTIFY_OK;
4197 
4198 	/*
4199 	 * GPU Id is offset by GPU_ID_OFFSET in MCA_IPID_UMC register.
4200 	 */
4201 	gpu_id = GET_MCA_IPID_GPUID(m->ipid) - GPU_ID_OFFSET;
4202 
4203 	adev = find_adev(gpu_id);
4204 	if (!adev) {
4205 		DRM_WARN("%s: Unable to find adev for gpu_id: %d\n", __func__,
4206 								gpu_id);
4207 		return NOTIFY_DONE;
4208 	}
4209 
4210 	/*
4211 	 * If it is uncorrectable error, then find out UMC instance and
4212 	 * channel index.
4213 	 */
4214 	umc_inst = GET_UMC_INST(m->ipid);
4215 	ch_inst = GET_CHAN_INDEX(m->ipid);
4216 
4217 	dev_info(adev->dev, "Uncorrectable error detected in UMC inst: %d, chan_idx: %d",
4218 			     umc_inst, ch_inst);
4219 
4220 	if (!amdgpu_umc_page_retirement_mca(adev, m->addr, ch_inst, umc_inst))
4221 		return NOTIFY_OK;
4222 	else
4223 		return NOTIFY_DONE;
4224 }
4225 
4226 static struct notifier_block amdgpu_bad_page_nb = {
4227 	.notifier_call  = amdgpu_bad_page_notifier,
4228 	.priority       = MCE_PRIO_UC,
4229 };
4230 
4231 static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev)
4232 {
4233 	/*
4234 	 * Add the adev to the mce_adev_list.
4235 	 * During mode2 reset, amdgpu device is temporarily
4236 	 * removed from the mgpu_info list which can cause
4237 	 * page retirement to fail.
4238 	 * Use this list instead of mgpu_info to find the amdgpu
4239 	 * device on which the UMC error was reported.
4240 	 */
4241 	mce_adev_list.devs[mce_adev_list.num_gpu++] = adev;
4242 
4243 	/*
4244 	 * Register the x86 notifier only once
4245 	 * with MCE subsystem.
4246 	 */
4247 	if (notifier_registered == false) {
4248 		mce_register_decode_chain(&amdgpu_bad_page_nb);
4249 		notifier_registered = true;
4250 	}
4251 }
4252 #endif
4253 
4254 struct amdgpu_ras *amdgpu_ras_get_context(struct amdgpu_device *adev)
4255 {
4256 	if (!adev)
4257 		return NULL;
4258 
4259 	return adev->psp.ras_context.ras;
4260 }
4261 
4262 int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_con)
4263 {
4264 	if (!adev)
4265 		return -EINVAL;
4266 
4267 	adev->psp.ras_context.ras = ras_con;
4268 	return 0;
4269 }
4270 
4271 /* check if ras is supported on block, say, sdma, gfx */
4272 int amdgpu_ras_is_supported(struct amdgpu_device *adev,
4273 		unsigned int block)
4274 {
4275 	int ret = 0;
4276 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
4277 
4278 	if (block >= AMDGPU_RAS_BLOCK_COUNT)
4279 		return 0;
4280 
4281 	ret = ras && (adev->ras_enabled & (1 << block));
4282 
4283 	/* For the special asic with mem ecc enabled but sram ecc
4284 	 * not enabled, even if the ras block is not supported on
4285 	 * .ras_enabled, if the asic supports poison mode and the
4286 	 * ras block has ras configuration, it can be considered
4287 	 * that the ras block supports ras function.
4288 	 */
4289 	if (!ret &&
4290 	    (block == AMDGPU_RAS_BLOCK__GFX ||
4291 	     block == AMDGPU_RAS_BLOCK__SDMA ||
4292 	     block == AMDGPU_RAS_BLOCK__VCN ||
4293 	     block == AMDGPU_RAS_BLOCK__JPEG) &&
4294 		(amdgpu_ras_mask & (1 << block)) &&
4295 	    amdgpu_ras_is_poison_mode_supported(adev) &&
4296 	    amdgpu_ras_get_ras_block(adev, block, 0))
4297 		ret = 1;
4298 
4299 	return ret;
4300 }
4301 
4302 int amdgpu_ras_reset_gpu(struct amdgpu_device *adev)
4303 {
4304 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
4305 
4306 	/* mode1 is the only selection for RMA status */
4307 	if (amdgpu_ras_is_rma(adev)) {
4308 		ras->gpu_reset_flags = 0;
4309 		ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET;
4310 	}
4311 
4312 	if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0) {
4313 		struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
4314 		int hive_ras_recovery = 0;
4315 
4316 		if (hive) {
4317 			hive_ras_recovery = atomic_read(&hive->ras_recovery);
4318 			amdgpu_put_xgmi_hive(hive);
4319 		}
4320 		/* In the case of multiple GPUs, after a GPU has started
4321 		 * resetting all GPUs on hive, other GPUs do not need to
4322 		 * trigger GPU reset again.
4323 		 */
4324 		if (!hive_ras_recovery)
4325 			amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work);
4326 		else
4327 			atomic_set(&ras->in_recovery, 0);
4328 	} else {
4329 		flush_work(&ras->recovery_work);
4330 		amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work);
4331 	}
4332 
4333 	return 0;
4334 }
4335 
4336 int amdgpu_ras_set_mca_debug_mode(struct amdgpu_device *adev, bool enable)
4337 {
4338 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4339 	int ret = 0;
4340 
4341 	if (con) {
4342 		ret = amdgpu_mca_smu_set_debug_mode(adev, enable);
4343 		if (!ret)
4344 			con->is_aca_debug_mode = enable;
4345 	}
4346 
4347 	return ret;
4348 }
4349 
4350 int amdgpu_ras_set_aca_debug_mode(struct amdgpu_device *adev, bool enable)
4351 {
4352 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4353 	int ret = 0;
4354 
4355 	if (con) {
4356 		if (amdgpu_aca_is_enabled(adev))
4357 			ret = amdgpu_aca_smu_set_debug_mode(adev, enable);
4358 		else
4359 			ret = amdgpu_mca_smu_set_debug_mode(adev, enable);
4360 		if (!ret)
4361 			con->is_aca_debug_mode = enable;
4362 	}
4363 
4364 	return ret;
4365 }
4366 
4367 bool amdgpu_ras_get_aca_debug_mode(struct amdgpu_device *adev)
4368 {
4369 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4370 	const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
4371 	const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
4372 
4373 	if (!con)
4374 		return false;
4375 
4376 	if ((amdgpu_aca_is_enabled(adev) && smu_funcs && smu_funcs->set_debug_mode) ||
4377 	    (!amdgpu_aca_is_enabled(adev) && mca_funcs && mca_funcs->mca_set_debug_mode))
4378 		return con->is_aca_debug_mode;
4379 	else
4380 		return true;
4381 }
4382 
4383 bool amdgpu_ras_get_error_query_mode(struct amdgpu_device *adev,
4384 				     unsigned int *error_query_mode)
4385 {
4386 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4387 	const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
4388 	const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
4389 
4390 	if (!con) {
4391 		*error_query_mode = AMDGPU_RAS_INVALID_ERROR_QUERY;
4392 		return false;
4393 	}
4394 
4395 	if ((smu_funcs && smu_funcs->set_debug_mode) || (mca_funcs && mca_funcs->mca_set_debug_mode))
4396 		*error_query_mode =
4397 			(con->is_aca_debug_mode) ? AMDGPU_RAS_DIRECT_ERROR_QUERY : AMDGPU_RAS_FIRMWARE_ERROR_QUERY;
4398 	else
4399 		*error_query_mode = AMDGPU_RAS_DIRECT_ERROR_QUERY;
4400 
4401 	return true;
4402 }
4403 
4404 /* Register each ip ras block into amdgpu ras */
4405 int amdgpu_ras_register_ras_block(struct amdgpu_device *adev,
4406 		struct amdgpu_ras_block_object *ras_block_obj)
4407 {
4408 	struct amdgpu_ras_block_list *ras_node;
4409 	if (!adev || !ras_block_obj)
4410 		return -EINVAL;
4411 
4412 	ras_node = kzalloc(sizeof(*ras_node), GFP_KERNEL);
4413 	if (!ras_node)
4414 		return -ENOMEM;
4415 
4416 	INIT_LIST_HEAD(&ras_node->node);
4417 	ras_node->ras_obj = ras_block_obj;
4418 	list_add_tail(&ras_node->node, &adev->ras_list);
4419 
4420 	return 0;
4421 }
4422 
4423 void amdgpu_ras_get_error_type_name(uint32_t err_type, char *err_type_name)
4424 {
4425 	if (!err_type_name)
4426 		return;
4427 
4428 	switch (err_type) {
4429 	case AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE:
4430 		sprintf(err_type_name, "correctable");
4431 		break;
4432 	case AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE:
4433 		sprintf(err_type_name, "uncorrectable");
4434 		break;
4435 	default:
4436 		sprintf(err_type_name, "unknown");
4437 		break;
4438 	}
4439 }
4440 
4441 bool amdgpu_ras_inst_get_memory_id_field(struct amdgpu_device *adev,
4442 					 const struct amdgpu_ras_err_status_reg_entry *reg_entry,
4443 					 uint32_t instance,
4444 					 uint32_t *memory_id)
4445 {
4446 	uint32_t err_status_lo_data, err_status_lo_offset;
4447 
4448 	if (!reg_entry)
4449 		return false;
4450 
4451 	err_status_lo_offset =
4452 		AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance,
4453 					    reg_entry->seg_lo, reg_entry->reg_lo);
4454 	err_status_lo_data = RREG32(err_status_lo_offset);
4455 
4456 	if ((reg_entry->flags & AMDGPU_RAS_ERR_STATUS_VALID) &&
4457 	    !REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, ERR_STATUS_VALID_FLAG))
4458 		return false;
4459 
4460 	*memory_id = REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, MEMORY_ID);
4461 
4462 	return true;
4463 }
4464 
4465 bool amdgpu_ras_inst_get_err_cnt_field(struct amdgpu_device *adev,
4466 				       const struct amdgpu_ras_err_status_reg_entry *reg_entry,
4467 				       uint32_t instance,
4468 				       unsigned long *err_cnt)
4469 {
4470 	uint32_t err_status_hi_data, err_status_hi_offset;
4471 
4472 	if (!reg_entry)
4473 		return false;
4474 
4475 	err_status_hi_offset =
4476 		AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance,
4477 					    reg_entry->seg_hi, reg_entry->reg_hi);
4478 	err_status_hi_data = RREG32(err_status_hi_offset);
4479 
4480 	if ((reg_entry->flags & AMDGPU_RAS_ERR_INFO_VALID) &&
4481 	    !REG_GET_FIELD(err_status_hi_data, ERR_STATUS_HI, ERR_INFO_VALID_FLAG))
4482 		/* keep the check here in case we need to refer to the result later */
4483 		dev_dbg(adev->dev, "Invalid err_info field\n");
4484 
4485 	/* read err count */
4486 	*err_cnt = REG_GET_FIELD(err_status_hi_data, ERR_STATUS, ERR_CNT);
4487 
4488 	return true;
4489 }
4490 
4491 void amdgpu_ras_inst_query_ras_error_count(struct amdgpu_device *adev,
4492 					   const struct amdgpu_ras_err_status_reg_entry *reg_list,
4493 					   uint32_t reg_list_size,
4494 					   const struct amdgpu_ras_memory_id_entry *mem_list,
4495 					   uint32_t mem_list_size,
4496 					   uint32_t instance,
4497 					   uint32_t err_type,
4498 					   unsigned long *err_count)
4499 {
4500 	uint32_t memory_id;
4501 	unsigned long err_cnt;
4502 	char err_type_name[16];
4503 	uint32_t i, j;
4504 
4505 	for (i = 0; i < reg_list_size; i++) {
4506 		/* query memory_id from err_status_lo */
4507 		if (!amdgpu_ras_inst_get_memory_id_field(adev, &reg_list[i],
4508 							 instance, &memory_id))
4509 			continue;
4510 
4511 		/* query err_cnt from err_status_hi */
4512 		if (!amdgpu_ras_inst_get_err_cnt_field(adev, &reg_list[i],
4513 						       instance, &err_cnt) ||
4514 		    !err_cnt)
4515 			continue;
4516 
4517 		*err_count += err_cnt;
4518 
4519 		/* log the errors */
4520 		amdgpu_ras_get_error_type_name(err_type, err_type_name);
4521 		if (!mem_list) {
4522 			/* memory_list is not supported */
4523 			dev_info(adev->dev,
4524 				 "%ld %s hardware errors detected in %s, instance: %d, memory_id: %d\n",
4525 				 err_cnt, err_type_name,
4526 				 reg_list[i].block_name,
4527 				 instance, memory_id);
4528 		} else {
4529 			for (j = 0; j < mem_list_size; j++) {
4530 				if (memory_id == mem_list[j].memory_id) {
4531 					dev_info(adev->dev,
4532 						 "%ld %s hardware errors detected in %s, instance: %d, memory block: %s\n",
4533 						 err_cnt, err_type_name,
4534 						 reg_list[i].block_name,
4535 						 instance, mem_list[j].name);
4536 					break;
4537 				}
4538 			}
4539 		}
4540 	}
4541 }
4542 
4543 void amdgpu_ras_inst_reset_ras_error_count(struct amdgpu_device *adev,
4544 					   const struct amdgpu_ras_err_status_reg_entry *reg_list,
4545 					   uint32_t reg_list_size,
4546 					   uint32_t instance)
4547 {
4548 	uint32_t err_status_lo_offset, err_status_hi_offset;
4549 	uint32_t i;
4550 
4551 	for (i = 0; i < reg_list_size; i++) {
4552 		err_status_lo_offset =
4553 			AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance,
4554 						    reg_list[i].seg_lo, reg_list[i].reg_lo);
4555 		err_status_hi_offset =
4556 			AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance,
4557 						    reg_list[i].seg_hi, reg_list[i].reg_hi);
4558 		WREG32(err_status_lo_offset, 0);
4559 		WREG32(err_status_hi_offset, 0);
4560 	}
4561 }
4562 
4563 int amdgpu_ras_error_data_init(struct ras_err_data *err_data)
4564 {
4565 	memset(err_data, 0, sizeof(*err_data));
4566 
4567 	INIT_LIST_HEAD(&err_data->err_node_list);
4568 
4569 	return 0;
4570 }
4571 
4572 static void amdgpu_ras_error_node_release(struct ras_err_node *err_node)
4573 {
4574 	if (!err_node)
4575 		return;
4576 
4577 	list_del(&err_node->node);
4578 	kvfree(err_node);
4579 }
4580 
4581 void amdgpu_ras_error_data_fini(struct ras_err_data *err_data)
4582 {
4583 	struct ras_err_node *err_node, *tmp;
4584 
4585 	list_for_each_entry_safe(err_node, tmp, &err_data->err_node_list, node)
4586 		amdgpu_ras_error_node_release(err_node);
4587 }
4588 
4589 static struct ras_err_node *amdgpu_ras_error_find_node_by_id(struct ras_err_data *err_data,
4590 							     struct amdgpu_smuio_mcm_config_info *mcm_info)
4591 {
4592 	struct ras_err_node *err_node;
4593 	struct amdgpu_smuio_mcm_config_info *ref_id;
4594 
4595 	if (!err_data || !mcm_info)
4596 		return NULL;
4597 
4598 	for_each_ras_error(err_node, err_data) {
4599 		ref_id = &err_node->err_info.mcm_info;
4600 
4601 		if (mcm_info->socket_id == ref_id->socket_id &&
4602 		    mcm_info->die_id == ref_id->die_id)
4603 			return err_node;
4604 	}
4605 
4606 	return NULL;
4607 }
4608 
4609 static struct ras_err_node *amdgpu_ras_error_node_new(void)
4610 {
4611 	struct ras_err_node *err_node;
4612 
4613 	err_node = kvzalloc(sizeof(*err_node), GFP_KERNEL);
4614 	if (!err_node)
4615 		return NULL;
4616 
4617 	INIT_LIST_HEAD(&err_node->node);
4618 
4619 	return err_node;
4620 }
4621 
4622 static int ras_err_info_cmp(void *priv, const struct list_head *a, const struct list_head *b)
4623 {
4624 	struct ras_err_node *nodea = container_of(a, struct ras_err_node, node);
4625 	struct ras_err_node *nodeb = container_of(b, struct ras_err_node, node);
4626 	struct amdgpu_smuio_mcm_config_info *infoa = &nodea->err_info.mcm_info;
4627 	struct amdgpu_smuio_mcm_config_info *infob = &nodeb->err_info.mcm_info;
4628 
4629 	if (unlikely(infoa->socket_id != infob->socket_id))
4630 		return infoa->socket_id - infob->socket_id;
4631 	else
4632 		return infoa->die_id - infob->die_id;
4633 
4634 	return 0;
4635 }
4636 
4637 static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_data,
4638 				struct amdgpu_smuio_mcm_config_info *mcm_info)
4639 {
4640 	struct ras_err_node *err_node;
4641 
4642 	err_node = amdgpu_ras_error_find_node_by_id(err_data, mcm_info);
4643 	if (err_node)
4644 		return &err_node->err_info;
4645 
4646 	err_node = amdgpu_ras_error_node_new();
4647 	if (!err_node)
4648 		return NULL;
4649 
4650 	memcpy(&err_node->err_info.mcm_info, mcm_info, sizeof(*mcm_info));
4651 
4652 	err_data->err_list_count++;
4653 	list_add_tail(&err_node->node, &err_data->err_node_list);
4654 	list_sort(NULL, &err_data->err_node_list, ras_err_info_cmp);
4655 
4656 	return &err_node->err_info;
4657 }
4658 
4659 int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
4660 					struct amdgpu_smuio_mcm_config_info *mcm_info,
4661 					u64 count)
4662 {
4663 	struct ras_err_info *err_info;
4664 
4665 	if (!err_data || !mcm_info)
4666 		return -EINVAL;
4667 
4668 	if (!count)
4669 		return 0;
4670 
4671 	err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
4672 	if (!err_info)
4673 		return -EINVAL;
4674 
4675 	err_info->ue_count += count;
4676 	err_data->ue_count += count;
4677 
4678 	return 0;
4679 }
4680 
4681 int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data,
4682 					struct amdgpu_smuio_mcm_config_info *mcm_info,
4683 					u64 count)
4684 {
4685 	struct ras_err_info *err_info;
4686 
4687 	if (!err_data || !mcm_info)
4688 		return -EINVAL;
4689 
4690 	if (!count)
4691 		return 0;
4692 
4693 	err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
4694 	if (!err_info)
4695 		return -EINVAL;
4696 
4697 	err_info->ce_count += count;
4698 	err_data->ce_count += count;
4699 
4700 	return 0;
4701 }
4702 
4703 int amdgpu_ras_error_statistic_de_count(struct ras_err_data *err_data,
4704 					struct amdgpu_smuio_mcm_config_info *mcm_info,
4705 					u64 count)
4706 {
4707 	struct ras_err_info *err_info;
4708 
4709 	if (!err_data || !mcm_info)
4710 		return -EINVAL;
4711 
4712 	if (!count)
4713 		return 0;
4714 
4715 	err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
4716 	if (!err_info)
4717 		return -EINVAL;
4718 
4719 	err_info->de_count += count;
4720 	err_data->de_count += count;
4721 
4722 	return 0;
4723 }
4724 
4725 #define mmMP0_SMN_C2PMSG_92	0x1609C
4726 #define mmMP0_SMN_C2PMSG_126	0x160BE
4727 static void amdgpu_ras_boot_time_error_reporting(struct amdgpu_device *adev,
4728 						 u32 instance)
4729 {
4730 	u32 socket_id, aid_id, hbm_id;
4731 	u32 fw_status;
4732 	u32 boot_error;
4733 	u64 reg_addr;
4734 
4735 	/* The pattern for smn addressing in other SOC could be different from
4736 	 * the one for aqua_vanjaram. We should revisit the code if the pattern
4737 	 * is changed. In such case, replace the aqua_vanjaram implementation
4738 	 * with more common helper */
4739 	reg_addr = (mmMP0_SMN_C2PMSG_92 << 2) +
4740 		   aqua_vanjaram_encode_ext_smn_addressing(instance);
4741 	fw_status = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
4742 
4743 	reg_addr = (mmMP0_SMN_C2PMSG_126 << 2) +
4744 		   aqua_vanjaram_encode_ext_smn_addressing(instance);
4745 	boot_error = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
4746 
4747 	socket_id = AMDGPU_RAS_GPU_ERR_SOCKET_ID(boot_error);
4748 	aid_id = AMDGPU_RAS_GPU_ERR_AID_ID(boot_error);
4749 	hbm_id = ((1 == AMDGPU_RAS_GPU_ERR_HBM_ID(boot_error)) ? 0 : 1);
4750 
4751 	if (AMDGPU_RAS_GPU_ERR_MEM_TRAINING(boot_error))
4752 		dev_info(adev->dev,
4753 			 "socket: %d, aid: %d, hbm: %d, fw_status: 0x%x, memory training failed\n",
4754 			 socket_id, aid_id, hbm_id, fw_status);
4755 
4756 	if (AMDGPU_RAS_GPU_ERR_FW_LOAD(boot_error))
4757 		dev_info(adev->dev,
4758 			 "socket: %d, aid: %d, fw_status: 0x%x, firmware load failed at boot time\n",
4759 			 socket_id, aid_id, fw_status);
4760 
4761 	if (AMDGPU_RAS_GPU_ERR_WAFL_LINK_TRAINING(boot_error))
4762 		dev_info(adev->dev,
4763 			 "socket: %d, aid: %d, fw_status: 0x%x, wafl link training failed\n",
4764 			 socket_id, aid_id, fw_status);
4765 
4766 	if (AMDGPU_RAS_GPU_ERR_XGMI_LINK_TRAINING(boot_error))
4767 		dev_info(adev->dev,
4768 			 "socket: %d, aid: %d, fw_status: 0x%x, xgmi link training failed\n",
4769 			 socket_id, aid_id, fw_status);
4770 
4771 	if (AMDGPU_RAS_GPU_ERR_USR_CP_LINK_TRAINING(boot_error))
4772 		dev_info(adev->dev,
4773 			 "socket: %d, aid: %d, fw_status: 0x%x, usr cp link training failed\n",
4774 			 socket_id, aid_id, fw_status);
4775 
4776 	if (AMDGPU_RAS_GPU_ERR_USR_DP_LINK_TRAINING(boot_error))
4777 		dev_info(adev->dev,
4778 			 "socket: %d, aid: %d, fw_status: 0x%x, usr dp link training failed\n",
4779 			 socket_id, aid_id, fw_status);
4780 
4781 	if (AMDGPU_RAS_GPU_ERR_HBM_MEM_TEST(boot_error))
4782 		dev_info(adev->dev,
4783 			 "socket: %d, aid: %d, hbm: %d, fw_status: 0x%x, hbm memory test failed\n",
4784 			 socket_id, aid_id, hbm_id, fw_status);
4785 
4786 	if (AMDGPU_RAS_GPU_ERR_HBM_BIST_TEST(boot_error))
4787 		dev_info(adev->dev,
4788 			 "socket: %d, aid: %d, hbm: %d, fw_status: 0x%x, hbm bist test failed\n",
4789 			 socket_id, aid_id, hbm_id, fw_status);
4790 
4791 	if (AMDGPU_RAS_GPU_ERR_DATA_ABORT(boot_error))
4792 		dev_info(adev->dev,
4793 			 "socket: %d, aid: %d, fw_status: 0x%x, data abort exception\n",
4794 			 socket_id, aid_id, fw_status);
4795 
4796 	if (AMDGPU_RAS_GPU_ERR_UNKNOWN(boot_error))
4797 		dev_info(adev->dev,
4798 			 "socket: %d, aid: %d, fw_status: 0x%x, unknown boot time errors\n",
4799 			 socket_id, aid_id, fw_status);
4800 }
4801 
4802 static bool amdgpu_ras_boot_error_detected(struct amdgpu_device *adev,
4803 					   u32 instance)
4804 {
4805 	u64 reg_addr;
4806 	u32 reg_data;
4807 	int retry_loop;
4808 
4809 	reg_addr = (mmMP0_SMN_C2PMSG_92 << 2) +
4810 		   aqua_vanjaram_encode_ext_smn_addressing(instance);
4811 
4812 	for (retry_loop = 0; retry_loop < AMDGPU_RAS_BOOT_STATUS_POLLING_LIMIT; retry_loop++) {
4813 		reg_data = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
4814 		if ((reg_data & AMDGPU_RAS_BOOT_STATUS_MASK) == AMDGPU_RAS_BOOT_STEADY_STATUS)
4815 			return false;
4816 		else
4817 			msleep(1);
4818 	}
4819 
4820 	return true;
4821 }
4822 
4823 void amdgpu_ras_query_boot_status(struct amdgpu_device *adev, u32 num_instances)
4824 {
4825 	u32 i;
4826 
4827 	for (i = 0; i < num_instances; i++) {
4828 		if (amdgpu_ras_boot_error_detected(adev, i))
4829 			amdgpu_ras_boot_time_error_reporting(adev, i);
4830 	}
4831 }
4832 
4833 int amdgpu_ras_reserve_page(struct amdgpu_device *adev, uint64_t pfn)
4834 {
4835 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4836 	struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
4837 	uint64_t start = pfn << AMDGPU_GPU_PAGE_SHIFT;
4838 	int ret = 0;
4839 
4840 	mutex_lock(&con->page_rsv_lock);
4841 	ret = amdgpu_vram_mgr_query_page_status(mgr, start);
4842 	if (ret == -ENOENT)
4843 		ret = amdgpu_vram_mgr_reserve_range(mgr, start, AMDGPU_GPU_PAGE_SIZE);
4844 	mutex_unlock(&con->page_rsv_lock);
4845 
4846 	return ret;
4847 }
4848 
4849 void amdgpu_ras_event_log_print(struct amdgpu_device *adev, u64 event_id,
4850 				const char *fmt, ...)
4851 {
4852 	struct va_format vaf;
4853 	va_list args;
4854 
4855 	va_start(args, fmt);
4856 	vaf.fmt = fmt;
4857 	vaf.va = &args;
4858 
4859 	if (RAS_EVENT_ID_IS_VALID(event_id))
4860 		dev_printk(KERN_INFO, adev->dev, "{%llu}%pV", event_id, &vaf);
4861 	else
4862 		dev_printk(KERN_INFO, adev->dev, "%pV", &vaf);
4863 
4864 	va_end(args);
4865 }
4866 
4867 bool amdgpu_ras_is_rma(struct amdgpu_device *adev)
4868 {
4869 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4870 
4871 	if (!con)
4872 		return false;
4873 
4874 	return con->is_rma;
4875 }
4876