1 /*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 *
23 */
24 #include <linux/debugfs.h>
25 #include <linux/list.h>
26 #include <linux/module.h>
27 #include <linux/uaccess.h>
28 #include <linux/reboot.h>
29 #include <linux/syscalls.h>
30 #include <linux/pm_runtime.h>
31 #include <linux/list_sort.h>
32
33 #include "amdgpu.h"
34 #include "amdgpu_ras.h"
35 #include "amdgpu_atomfirmware.h"
36 #include "amdgpu_xgmi.h"
37 #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
38 #include "nbio_v4_3.h"
39 #include "nbif_v6_3_1.h"
40 #include "nbio_v7_9.h"
41 #include "atom.h"
42 #include "amdgpu_reset.h"
43 #include "amdgpu_psp.h"
44 #include "amdgpu_ras_mgr.h"
45
46 #ifdef CONFIG_X86_MCE_AMD
47 #include <asm/mce.h>
48
49 static bool notifier_registered;
50 #endif
51 static const char *RAS_FS_NAME = "ras";
52
53 const char *ras_error_string[] = {
54 "none",
55 "parity",
56 "single_correctable",
57 "multi_uncorrectable",
58 "poison",
59 };
60
61 const char *ras_block_string[] = {
62 "umc",
63 "sdma",
64 "gfx",
65 "mmhub",
66 "athub",
67 "pcie_bif",
68 "hdp",
69 "xgmi_wafl",
70 "df",
71 "smn",
72 "sem",
73 "mp0",
74 "mp1",
75 "fuse",
76 "mca",
77 "vcn",
78 "jpeg",
79 "ih",
80 "mpio",
81 "mmsch",
82 };
83
84 const char *ras_mca_block_string[] = {
85 "mca_mp0",
86 "mca_mp1",
87 "mca_mpio",
88 "mca_iohc",
89 };
90
91 struct amdgpu_ras_block_list {
92 /* ras block link */
93 struct list_head node;
94
95 struct amdgpu_ras_block_object *ras_obj;
96 };
97
get_ras_block_str(struct ras_common_if * ras_block)98 const char *get_ras_block_str(struct ras_common_if *ras_block)
99 {
100 if (!ras_block)
101 return "NULL";
102
103 if (ras_block->block >= AMDGPU_RAS_BLOCK_COUNT ||
104 ras_block->block >= ARRAY_SIZE(ras_block_string))
105 return "OUT OF RANGE";
106
107 if (ras_block->block == AMDGPU_RAS_BLOCK__MCA)
108 return ras_mca_block_string[ras_block->sub_block_index];
109
110 return ras_block_string[ras_block->block];
111 }
112
113 #define ras_block_str(_BLOCK_) \
114 (((_BLOCK_) < ARRAY_SIZE(ras_block_string)) ? ras_block_string[_BLOCK_] : "Out Of Range")
115
116 #define ras_err_str(i) (ras_error_string[ffs(i)])
117
118 #define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
119
120 /* inject address is 52 bits */
121 #define RAS_UMC_INJECT_ADDR_LIMIT (0x1ULL << 52)
122
123 /* typical ECC bad page rate is 1 bad page per 100MB VRAM */
124 #define RAS_BAD_PAGE_COVER (100 * 1024 * 1024ULL)
125
126 #define MAX_UMC_POISON_POLLING_TIME_ASYNC 10
127
128 #define AMDGPU_RAS_RETIRE_PAGE_INTERVAL 100 //ms
129
130 #define MAX_FLUSH_RETIRE_DWORK_TIMES 100
131
132 #define BYPASS_ALLOCATED_ADDRESS 0x0
133 #define BYPASS_INITIALIZATION_ADDRESS 0x1
134
135 enum amdgpu_ras_retire_page_reservation {
136 AMDGPU_RAS_RETIRE_PAGE_RESERVED,
137 AMDGPU_RAS_RETIRE_PAGE_PENDING,
138 AMDGPU_RAS_RETIRE_PAGE_FAULT,
139 };
140
141 atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0);
142
143 static int amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
144 uint64_t addr);
145 static int amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
146 uint64_t addr);
147
148 static void amdgpu_ras_critical_region_init(struct amdgpu_device *adev);
149 static void amdgpu_ras_critical_region_fini(struct amdgpu_device *adev);
150
151 #ifdef CONFIG_X86_MCE_AMD
152 static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev);
153 static void
154 amdgpu_unregister_bad_pages_mca_notifier(struct amdgpu_device *adev);
155 struct mce_notifier_adev_list {
156 struct amdgpu_device *devs[MAX_GPU_INSTANCE];
157 int num_gpu;
158 };
159 static struct mce_notifier_adev_list mce_adev_list;
160 #endif
161
amdgpu_ras_set_error_query_ready(struct amdgpu_device * adev,bool ready)162 void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready)
163 {
164 if (adev && amdgpu_ras_get_context(adev))
165 amdgpu_ras_get_context(adev)->error_query_ready = ready;
166 }
167
amdgpu_ras_get_error_query_ready(struct amdgpu_device * adev)168 static bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev)
169 {
170 if (adev && amdgpu_ras_get_context(adev))
171 return amdgpu_ras_get_context(adev)->error_query_ready;
172
173 return false;
174 }
175
amdgpu_reserve_page_direct(struct amdgpu_device * adev,uint64_t address)176 static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t address)
177 {
178 struct ras_err_data err_data;
179 struct eeprom_table_record err_rec;
180 int ret;
181
182 ret = amdgpu_ras_check_bad_page(adev, address);
183 if (ret == -EINVAL) {
184 dev_warn(adev->dev,
185 "RAS WARN: input address 0x%llx is invalid.\n",
186 address);
187 return -EINVAL;
188 } else if (ret == 1) {
189 dev_warn(adev->dev,
190 "RAS WARN: 0x%llx has already been marked as bad page!\n",
191 address);
192 return 0;
193 }
194
195 ret = amdgpu_ras_error_data_init(&err_data);
196 if (ret)
197 return ret;
198
199 memset(&err_rec, 0x0, sizeof(struct eeprom_table_record));
200 err_data.err_addr = &err_rec;
201 amdgpu_umc_fill_error_record(&err_data, address, address, 0, 0);
202
203 if (amdgpu_bad_page_threshold != 0) {
204 amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
205 err_data.err_addr_cnt, false);
206 amdgpu_ras_save_bad_pages(adev, NULL);
207 }
208
209 amdgpu_ras_error_data_fini(&err_data);
210
211 dev_warn(adev->dev, "WARNING: THIS IS ONLY FOR TEST PURPOSES AND WILL CORRUPT RAS EEPROM\n");
212 dev_warn(adev->dev, "Clear EEPROM:\n");
213 dev_warn(adev->dev, " echo 1 > /sys/kernel/debug/dri/0/ras/ras_eeprom_reset\n");
214
215 return 0;
216 }
217
amdgpu_check_address_validity(struct amdgpu_device * adev,uint64_t address,uint64_t flags)218 static int amdgpu_check_address_validity(struct amdgpu_device *adev,
219 uint64_t address, uint64_t flags)
220 {
221 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
222 struct amdgpu_vram_block_info blk_info;
223 uint64_t page_pfns[32] = {0};
224 int i, ret, count;
225 bool hit = false;
226
227 if (amdgpu_ip_version(adev, UMC_HWIP, 0) < IP_VERSION(12, 0, 0))
228 return 0;
229
230 if (amdgpu_sriov_vf(adev)) {
231 if (amdgpu_virt_check_vf_critical_region(adev, address, &hit))
232 return -EPERM;
233 return hit ? -EACCES : 0;
234 }
235
236 if ((address >= adev->gmc.mc_vram_size) ||
237 (address >= RAS_UMC_INJECT_ADDR_LIMIT))
238 return -EFAULT;
239
240 if (amdgpu_uniras_enabled(adev))
241 count = amdgpu_ras_mgr_lookup_bad_pages_in_a_row(adev, address,
242 page_pfns, ARRAY_SIZE(page_pfns));
243 else
244 count = amdgpu_umc_lookup_bad_pages_in_a_row(adev,
245 address, page_pfns, ARRAY_SIZE(page_pfns));
246
247 if (count <= 0)
248 return -EPERM;
249
250 for (i = 0; i < count; i++) {
251 memset(&blk_info, 0, sizeof(blk_info));
252 ret = amdgpu_vram_mgr_query_address_block_info(&adev->mman.vram_mgr,
253 page_pfns[i] << AMDGPU_GPU_PAGE_SHIFT, &blk_info);
254 if (!ret) {
255 /* The input address that needs to be checked is allocated by
256 * current calling process, so it is necessary to exclude
257 * the calling process.
258 */
259 if ((flags == BYPASS_ALLOCATED_ADDRESS) &&
260 ((blk_info.task.pid != task_pid_nr(current)) ||
261 strncmp(blk_info.task.comm, current->comm, TASK_COMM_LEN)))
262 return -EACCES;
263 else if ((flags == BYPASS_INITIALIZATION_ADDRESS) &&
264 (blk_info.task.pid == con->init_task_pid) &&
265 !strncmp(blk_info.task.comm, con->init_task_comm, TASK_COMM_LEN))
266 return -EACCES;
267 }
268 }
269
270 return 0;
271 }
272
amdgpu_ras_debugfs_read(struct file * f,char __user * buf,size_t size,loff_t * pos)273 static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
274 size_t size, loff_t *pos)
275 {
276 struct ras_manager *obj = (struct ras_manager *)file_inode(f)->i_private;
277 struct ras_query_if info = {
278 .head = obj->head,
279 };
280 ssize_t s;
281 char val[128];
282
283 if (amdgpu_ras_query_error_status(obj->adev, &info))
284 return -EINVAL;
285
286 /* Hardware counter will be reset automatically after the query on Vega20 and Arcturus */
287 if (amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
288 amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {
289 if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
290 dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
291 }
292
293 s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n",
294 "ue", info.ue_count,
295 "ce", info.ce_count);
296 if (*pos >= s)
297 return 0;
298
299 s -= *pos;
300 s = min_t(u64, s, size);
301
302
303 if (copy_to_user(buf, &val[*pos], s))
304 return -EINVAL;
305
306 *pos += s;
307
308 return s;
309 }
310
311 static const struct file_operations amdgpu_ras_debugfs_ops = {
312 .owner = THIS_MODULE,
313 .read = amdgpu_ras_debugfs_read,
314 .write = NULL,
315 .llseek = default_llseek
316 };
317
amdgpu_ras_find_block_id_by_name(const char * name,int * block_id)318 static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id)
319 {
320 int i;
321
322 for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) {
323 *block_id = i;
324 if (strcmp(name, ras_block_string[i]) == 0)
325 return 0;
326 }
327 return -EINVAL;
328 }
329
amdgpu_ras_debugfs_ctrl_parse_data(struct file * f,const char __user * buf,size_t size,loff_t * pos,struct ras_debug_if * data)330 static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
331 const char __user *buf, size_t size,
332 loff_t *pos, struct ras_debug_if *data)
333 {
334 ssize_t s = min_t(u64, 64, size);
335 char str[65];
336 char block_name[33];
337 char err[9] = "ue";
338 int op = -1;
339 int block_id;
340 uint32_t sub_block;
341 u64 address, value;
342 /* default value is 0 if the mask is not set by user */
343 u32 instance_mask = 0;
344
345 if (*pos)
346 return -EINVAL;
347 *pos = size;
348
349 memset(str, 0, sizeof(str));
350 memset(data, 0, sizeof(*data));
351
352 if (copy_from_user(str, buf, s))
353 return -EINVAL;
354
355 if (sscanf(str, "disable %32s", block_name) == 1)
356 op = 0;
357 else if (sscanf(str, "enable %32s %8s", block_name, err) == 2)
358 op = 1;
359 else if (sscanf(str, "inject %32s %8s", block_name, err) == 2)
360 op = 2;
361 else if (strstr(str, "retire_page") != NULL)
362 op = 3;
363 else if (strstr(str, "check_address") != NULL)
364 op = 4;
365 else if (str[0] && str[1] && str[2] && str[3])
366 /* ascii string, but commands are not matched. */
367 return -EINVAL;
368
369 if (op != -1) {
370 if (op == 3) {
371 if (sscanf(str, "%*s 0x%llx", &address) != 1 &&
372 sscanf(str, "%*s %llu", &address) != 1)
373 return -EINVAL;
374
375 data->op = op;
376 data->inject.address = address;
377
378 return 0;
379 } else if (op == 4) {
380 if (sscanf(str, "%*s 0x%llx 0x%llx", &address, &value) != 2 &&
381 sscanf(str, "%*s %llu %llu", &address, &value) != 2)
382 return -EINVAL;
383
384 data->op = op;
385 data->inject.address = address;
386 data->inject.value = value;
387 return 0;
388 }
389
390 if (amdgpu_ras_find_block_id_by_name(block_name, &block_id))
391 return -EINVAL;
392
393 data->head.block = block_id;
394 /* only ue, ce and poison errors are supported */
395 if (!memcmp("ue", err, 2))
396 data->head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
397 else if (!memcmp("ce", err, 2))
398 data->head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE;
399 else if (!memcmp("poison", err, 6))
400 data->head.type = AMDGPU_RAS_ERROR__POISON;
401 else
402 return -EINVAL;
403
404 data->op = op;
405
406 if (op == 2) {
407 if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx 0x%x",
408 &sub_block, &address, &value, &instance_mask) != 4 &&
409 sscanf(str, "%*s %*s %*s %u %llu %llu %u",
410 &sub_block, &address, &value, &instance_mask) != 4 &&
411 sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx",
412 &sub_block, &address, &value) != 3 &&
413 sscanf(str, "%*s %*s %*s %u %llu %llu",
414 &sub_block, &address, &value) != 3)
415 return -EINVAL;
416 data->head.sub_block_index = sub_block;
417 data->inject.address = address;
418 data->inject.value = value;
419 data->inject.instance_mask = instance_mask;
420 }
421 } else {
422 if (size < sizeof(*data))
423 return -EINVAL;
424
425 if (copy_from_user(data, buf, sizeof(*data)))
426 return -EINVAL;
427 }
428
429 return 0;
430 }
431
amdgpu_ras_instance_mask_check(struct amdgpu_device * adev,struct ras_debug_if * data)432 static void amdgpu_ras_instance_mask_check(struct amdgpu_device *adev,
433 struct ras_debug_if *data)
434 {
435 int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1;
436 uint32_t mask, inst_mask = data->inject.instance_mask;
437
438 /* no need to set instance mask if there is only one instance */
439 if (num_xcc <= 1 && inst_mask) {
440 data->inject.instance_mask = 0;
441 dev_dbg(adev->dev,
442 "RAS inject mask(0x%x) isn't supported and force it to 0.\n",
443 inst_mask);
444
445 return;
446 }
447
448 switch (data->head.block) {
449 case AMDGPU_RAS_BLOCK__GFX:
450 mask = GENMASK(num_xcc - 1, 0);
451 break;
452 case AMDGPU_RAS_BLOCK__SDMA:
453 mask = GENMASK(adev->sdma.num_instances - 1, 0);
454 break;
455 case AMDGPU_RAS_BLOCK__VCN:
456 case AMDGPU_RAS_BLOCK__JPEG:
457 mask = GENMASK(adev->vcn.num_vcn_inst - 1, 0);
458 break;
459 default:
460 mask = inst_mask;
461 break;
462 }
463
464 /* remove invalid bits in instance mask */
465 data->inject.instance_mask &= mask;
466 if (inst_mask != data->inject.instance_mask)
467 dev_dbg(adev->dev,
468 "Adjust RAS inject mask 0x%x to 0x%x\n",
469 inst_mask, data->inject.instance_mask);
470 }
471
472 /**
473 * DOC: AMDGPU RAS debugfs control interface
474 *
475 * The control interface accepts struct ras_debug_if which has two members.
476 *
477 * First member: ras_debug_if::head or ras_debug_if::inject.
478 *
479 * head is used to indicate which IP block will be under control.
480 *
481 * head has four members, they are block, type, sub_block_index, name.
482 * block: which IP will be under control.
483 * type: what kind of error will be enabled/disabled/injected.
484 * sub_block_index: some IPs have subcomponets. say, GFX, sDMA.
485 * name: the name of IP.
486 *
487 * inject has three more members than head, they are address, value and mask.
488 * As their names indicate, inject operation will write the
489 * value to the address.
490 *
491 * The second member: struct ras_debug_if::op.
492 * It has three kinds of operations.
493 *
494 * - 0: disable RAS on the block. Take ::head as its data.
495 * - 1: enable RAS on the block. Take ::head as its data.
496 * - 2: inject errors on the block. Take ::inject as its data.
497 *
498 * How to use the interface?
499 *
500 * In a program
501 *
502 * Copy the struct ras_debug_if in your code and initialize it.
503 * Write the struct to the control interface.
504 *
505 * From shell
506 *
507 * .. code-block:: bash
508 *
509 * echo "disable <block>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
510 * echo "enable <block> <error>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
511 * echo "inject <block> <error> <sub-block> <address> <value> <mask>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
512 *
513 * Where N, is the card which you want to affect.
514 *
515 * "disable" requires only the block.
516 * "enable" requires the block and error type.
517 * "inject" requires the block, error type, address, and value.
518 *
519 * The block is one of: umc, sdma, gfx, etc.
520 * see ras_block_string[] for details
521 *
522 * The error type is one of: ue, ce and poison where,
523 * ue is multi-uncorrectable
524 * ce is single-correctable
525 * poison is poison
526 *
527 * The sub-block is a the sub-block index, pass 0 if there is no sub-block.
528 * The address and value are hexadecimal numbers, leading 0x is optional.
529 * The mask means instance mask, is optional, default value is 0x1.
530 *
531 * For instance,
532 *
533 * .. code-block:: bash
534 *
535 * echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
536 * echo inject umc ce 0 0 0 3 > /sys/kernel/debug/dri/0/ras/ras_ctrl
537 * echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl
538 *
539 * How to check the result of the operation?
540 *
541 * To check disable/enable, see "ras" features at,
542 * /sys/class/drm/card[0/1/2...]/device/ras/features
543 *
544 * To check inject, see the corresponding error count at,
545 * /sys/class/drm/card[0/1/2...]/device/ras/[gfx|sdma|umc|...]_err_count
546 *
547 * .. note::
548 * Operations are only allowed on blocks which are supported.
549 * Check the "ras" mask at /sys/module/amdgpu/parameters/ras_mask
550 * to see which blocks support RAS on a particular asic.
551 *
552 */
amdgpu_ras_debugfs_ctrl_write(struct file * f,const char __user * buf,size_t size,loff_t * pos)553 static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f,
554 const char __user *buf,
555 size_t size, loff_t *pos)
556 {
557 struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
558 struct ras_debug_if data;
559 int ret = 0;
560
561 if (!amdgpu_ras_get_error_query_ready(adev)) {
562 dev_warn(adev->dev, "RAS WARN: error injection "
563 "currently inaccessible\n");
564 return size;
565 }
566
567 ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data);
568 if (ret)
569 return ret;
570
571 if (data.op == 3) {
572 ret = amdgpu_reserve_page_direct(adev, data.inject.address);
573 if (!ret)
574 return size;
575 else
576 return ret;
577 } else if (data.op == 4) {
578 ret = amdgpu_check_address_validity(adev, data.inject.address, data.inject.value);
579 return ret ? ret : size;
580 }
581
582 if (!amdgpu_ras_is_supported(adev, data.head.block))
583 return -EINVAL;
584
585 switch (data.op) {
586 case 0:
587 ret = amdgpu_ras_feature_enable(adev, &data.head, 0);
588 break;
589 case 1:
590 ret = amdgpu_ras_feature_enable(adev, &data.head, 1);
591 break;
592 case 2:
593 /* umc ce/ue error injection for a bad page is not allowed */
594 if (data.head.block == AMDGPU_RAS_BLOCK__UMC)
595 ret = amdgpu_ras_check_bad_page(adev, data.inject.address);
596 if (ret == -EINVAL) {
597 dev_warn(adev->dev, "RAS WARN: input address 0x%llx is invalid.",
598 data.inject.address);
599 break;
600 } else if (ret == 1) {
601 dev_warn(adev->dev, "RAS WARN: inject: 0x%llx has already been marked as bad!\n",
602 data.inject.address);
603 break;
604 }
605
606 amdgpu_ras_instance_mask_check(adev, &data);
607
608 /* data.inject.address is offset instead of absolute gpu address */
609 ret = amdgpu_ras_error_inject(adev, &data.inject);
610 break;
611 default:
612 ret = -EINVAL;
613 break;
614 }
615
616 if (ret)
617 return ret;
618
619 return size;
620 }
621
622 static int amdgpu_uniras_clear_badpages_info(struct amdgpu_device *adev);
623
624 /**
625 * DOC: AMDGPU RAS debugfs EEPROM table reset interface
626 *
627 * Some boards contain an EEPROM which is used to persistently store a list of
628 * bad pages which experiences ECC errors in vram. This interface provides
629 * a way to reset the EEPROM, e.g., after testing error injection.
630 *
631 * Usage:
632 *
633 * .. code-block:: bash
634 *
635 * echo 1 > ../ras/ras_eeprom_reset
636 *
637 * will reset EEPROM table to 0 entries.
638 *
639 */
amdgpu_ras_debugfs_eeprom_write(struct file * f,const char __user * buf,size_t size,loff_t * pos)640 static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f,
641 const char __user *buf,
642 size_t size, loff_t *pos)
643 {
644 struct amdgpu_device *adev =
645 (struct amdgpu_device *)file_inode(f)->i_private;
646 int ret;
647
648 if (amdgpu_uniras_enabled(adev)) {
649 ret = amdgpu_uniras_clear_badpages_info(adev);
650 return ret ? ret : size;
651 }
652
653 ret = amdgpu_ras_eeprom_reset_table(
654 &(amdgpu_ras_get_context(adev)->eeprom_control));
655
656 if (!ret) {
657 /* Something was written to EEPROM.
658 */
659 amdgpu_ras_get_context(adev)->flags = RAS_DEFAULT_FLAGS;
660 return size;
661 } else {
662 return ret;
663 }
664 }
665
666 static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
667 .owner = THIS_MODULE,
668 .read = NULL,
669 .write = amdgpu_ras_debugfs_ctrl_write,
670 .llseek = default_llseek
671 };
672
673 static const struct file_operations amdgpu_ras_debugfs_eeprom_ops = {
674 .owner = THIS_MODULE,
675 .read = NULL,
676 .write = amdgpu_ras_debugfs_eeprom_write,
677 .llseek = default_llseek
678 };
679
680 /**
681 * DOC: AMDGPU RAS sysfs Error Count Interface
682 *
683 * It allows the user to read the error count for each IP block on the gpu through
684 * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
685 *
686 * It outputs the multiple lines which report the uncorrected (ue) and corrected
687 * (ce) error counts.
688 *
689 * The format of one line is below,
690 *
691 * [ce|ue]: count
692 *
693 * Example:
694 *
695 * .. code-block:: bash
696 *
697 * ue: 0
698 * ce: 1
699 *
700 */
amdgpu_ras_sysfs_read(struct device * dev,struct device_attribute * attr,char * buf)701 static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
702 struct device_attribute *attr, char *buf)
703 {
704 struct ras_manager *obj = container_of(attr, struct ras_manager, sysfs_attr);
705 struct ras_query_if info = {
706 .head = obj->head,
707 };
708
709 if (!amdgpu_ras_get_error_query_ready(obj->adev))
710 return sysfs_emit(buf, "Query currently inaccessible\n");
711
712 if (amdgpu_ras_query_error_status(obj->adev, &info))
713 return -EINVAL;
714
715 if (amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
716 amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {
717 if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
718 dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
719 }
720
721 if (info.head.block == AMDGPU_RAS_BLOCK__UMC)
722 return sysfs_emit(buf, "%s: %lu\n%s: %lu\n%s: %lu\n", "ue", info.ue_count,
723 "ce", info.ce_count, "de", info.de_count);
724 else
725 return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count,
726 "ce", info.ce_count);
727 }
728
729 /* obj begin */
730
731 #define get_obj(obj) do { (obj)->use++; } while (0)
732 #define alive_obj(obj) ((obj)->use)
733
put_obj(struct ras_manager * obj)734 static inline void put_obj(struct ras_manager *obj)
735 {
736 if (obj && (--obj->use == 0)) {
737 list_del(&obj->node);
738 amdgpu_ras_error_data_fini(&obj->err_data);
739 }
740
741 if (obj && (obj->use < 0))
742 DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", get_ras_block_str(&obj->head));
743 }
744
745 /* make one obj and return it. */
amdgpu_ras_create_obj(struct amdgpu_device * adev,struct ras_common_if * head)746 static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev,
747 struct ras_common_if *head)
748 {
749 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
750 struct ras_manager *obj;
751
752 if (!adev->ras_enabled || !con)
753 return NULL;
754
755 if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
756 return NULL;
757
758 if (head->block == AMDGPU_RAS_BLOCK__MCA) {
759 if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
760 return NULL;
761
762 obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
763 } else
764 obj = &con->objs[head->block];
765
766 /* already exist. return obj? */
767 if (alive_obj(obj))
768 return NULL;
769
770 if (amdgpu_ras_error_data_init(&obj->err_data))
771 return NULL;
772
773 obj->head = *head;
774 obj->adev = adev;
775 list_add(&obj->node, &con->head);
776 get_obj(obj);
777
778 return obj;
779 }
780
781 /* return an obj equal to head, or the first when head is NULL */
amdgpu_ras_find_obj(struct amdgpu_device * adev,struct ras_common_if * head)782 struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
783 struct ras_common_if *head)
784 {
785 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
786 struct ras_manager *obj;
787 int i;
788
789 if (!adev->ras_enabled || !con)
790 return NULL;
791
792 if (head) {
793 if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
794 return NULL;
795
796 if (head->block == AMDGPU_RAS_BLOCK__MCA) {
797 if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
798 return NULL;
799
800 obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
801 } else
802 obj = &con->objs[head->block];
803
804 if (alive_obj(obj))
805 return obj;
806 } else {
807 for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT + AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
808 obj = &con->objs[i];
809 if (alive_obj(obj))
810 return obj;
811 }
812 }
813
814 return NULL;
815 }
816 /* obj end */
817
818 /* feature ctl begin */
amdgpu_ras_is_feature_allowed(struct amdgpu_device * adev,struct ras_common_if * head)819 static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev,
820 struct ras_common_if *head)
821 {
822 return adev->ras_hw_enabled & BIT(head->block);
823 }
824
amdgpu_ras_is_feature_enabled(struct amdgpu_device * adev,struct ras_common_if * head)825 static int amdgpu_ras_is_feature_enabled(struct amdgpu_device *adev,
826 struct ras_common_if *head)
827 {
828 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
829
830 return con->features & BIT(head->block);
831 }
832
833 /*
834 * if obj is not created, then create one.
835 * set feature enable flag.
836 */
__amdgpu_ras_feature_enable(struct amdgpu_device * adev,struct ras_common_if * head,int enable)837 static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev,
838 struct ras_common_if *head, int enable)
839 {
840 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
841 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
842
843 /* If hardware does not support ras, then do not create obj.
844 * But if hardware support ras, we can create the obj.
845 * Ras framework checks con->hw_supported to see if it need do
846 * corresponding initialization.
847 * IP checks con->support to see if it need disable ras.
848 */
849 if (!amdgpu_ras_is_feature_allowed(adev, head))
850 return 0;
851
852 if (enable) {
853 if (!obj) {
854 obj = amdgpu_ras_create_obj(adev, head);
855 if (!obj)
856 return -EINVAL;
857 } else {
858 /* In case we create obj somewhere else */
859 get_obj(obj);
860 }
861 con->features |= BIT(head->block);
862 } else {
863 if (obj && amdgpu_ras_is_feature_enabled(adev, head)) {
864 con->features &= ~BIT(head->block);
865 put_obj(obj);
866 }
867 }
868
869 return 0;
870 }
871
872 /* wrapper of psp_ras_enable_features */
amdgpu_ras_feature_enable(struct amdgpu_device * adev,struct ras_common_if * head,bool enable)873 int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
874 struct ras_common_if *head, bool enable)
875 {
876 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
877 union ta_ras_cmd_input *info;
878 int ret;
879
880 if (!con)
881 return -EINVAL;
882
883 /* For non-gfx ip, do not enable ras feature if it is not allowed */
884 /* For gfx ip, regardless of feature support status, */
885 /* Force issue enable or disable ras feature commands */
886 if (head->block != AMDGPU_RAS_BLOCK__GFX &&
887 !amdgpu_ras_is_feature_allowed(adev, head))
888 return 0;
889
890 /* Only enable gfx ras feature from host side */
891 if (head->block == AMDGPU_RAS_BLOCK__GFX &&
892 !amdgpu_sriov_vf(adev) &&
893 !amdgpu_ras_intr_triggered()) {
894 info = kzalloc_obj(union ta_ras_cmd_input, GFP_KERNEL);
895 if (!info)
896 return -ENOMEM;
897
898 if (!enable) {
899 info->disable_features = (struct ta_ras_disable_features_input) {
900 .block_id = amdgpu_ras_block_to_ta(head->block),
901 .error_type = amdgpu_ras_error_to_ta(head->type),
902 };
903 } else {
904 info->enable_features = (struct ta_ras_enable_features_input) {
905 .block_id = amdgpu_ras_block_to_ta(head->block),
906 .error_type = amdgpu_ras_error_to_ta(head->type),
907 };
908 }
909
910 ret = psp_ras_enable_features(&adev->psp, info, enable);
911 if (ret) {
912 dev_err(adev->dev, "ras %s %s failed poison:%d ret:%d\n",
913 enable ? "enable":"disable",
914 get_ras_block_str(head),
915 amdgpu_ras_is_poison_mode_supported(adev), ret);
916 kfree(info);
917 return ret;
918 }
919
920 kfree(info);
921 }
922
923 /* setup the obj */
924 __amdgpu_ras_feature_enable(adev, head, enable);
925
926 return 0;
927 }
928
929 /* Only used in device probe stage and called only once. */
amdgpu_ras_feature_enable_on_boot(struct amdgpu_device * adev,struct ras_common_if * head,bool enable)930 int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
931 struct ras_common_if *head, bool enable)
932 {
933 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
934 int ret;
935
936 if (!con)
937 return -EINVAL;
938
939 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
940 if (enable) {
941 /* There is no harm to issue a ras TA cmd regardless of
942 * the currecnt ras state.
943 * If current state == target state, it will do nothing
944 * But sometimes it requests driver to reset and repost
945 * with error code -EAGAIN.
946 */
947 ret = amdgpu_ras_feature_enable(adev, head, 1);
948 /* With old ras TA, we might fail to enable ras.
949 * Log it and just setup the object.
950 * TODO need remove this WA in the future.
951 */
952 if (ret == -EINVAL) {
953 ret = __amdgpu_ras_feature_enable(adev, head, 1);
954 if (!ret)
955 dev_info(adev->dev,
956 "RAS INFO: %s setup object\n",
957 get_ras_block_str(head));
958 }
959 } else {
960 /* setup the object then issue a ras TA disable cmd.*/
961 ret = __amdgpu_ras_feature_enable(adev, head, 1);
962 if (ret)
963 return ret;
964
965 /* gfx block ras disable cmd must send to ras-ta */
966 if (head->block == AMDGPU_RAS_BLOCK__GFX)
967 con->features |= BIT(head->block);
968
969 ret = amdgpu_ras_feature_enable(adev, head, 0);
970
971 /* clean gfx block ras features flag */
972 if (adev->ras_enabled && head->block == AMDGPU_RAS_BLOCK__GFX)
973 con->features &= ~BIT(head->block);
974 }
975 } else
976 ret = amdgpu_ras_feature_enable(adev, head, enable);
977
978 return ret;
979 }
980
amdgpu_ras_disable_all_features(struct amdgpu_device * adev,bool bypass)981 static int amdgpu_ras_disable_all_features(struct amdgpu_device *adev,
982 bool bypass)
983 {
984 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
985 struct ras_manager *obj, *tmp;
986
987 list_for_each_entry_safe(obj, tmp, &con->head, node) {
988 /* bypass psp.
989 * aka just release the obj and corresponding flags
990 */
991 if (bypass) {
992 if (__amdgpu_ras_feature_enable(adev, &obj->head, 0))
993 break;
994 } else {
995 if (amdgpu_ras_feature_enable(adev, &obj->head, 0))
996 break;
997 }
998 }
999
1000 return con->features;
1001 }
1002
amdgpu_ras_enable_all_features(struct amdgpu_device * adev,bool bypass)1003 static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
1004 bool bypass)
1005 {
1006 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1007 int i;
1008 const enum amdgpu_ras_error_type default_ras_type = AMDGPU_RAS_ERROR__NONE;
1009
1010 for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) {
1011 struct ras_common_if head = {
1012 .block = i,
1013 .type = default_ras_type,
1014 .sub_block_index = 0,
1015 };
1016
1017 if (i == AMDGPU_RAS_BLOCK__MCA)
1018 continue;
1019
1020 if (bypass) {
1021 /*
1022 * bypass psp. vbios enable ras for us.
1023 * so just create the obj
1024 */
1025 if (__amdgpu_ras_feature_enable(adev, &head, 1))
1026 break;
1027 } else {
1028 if (amdgpu_ras_feature_enable(adev, &head, 1))
1029 break;
1030 }
1031 }
1032
1033 for (i = 0; i < AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
1034 struct ras_common_if head = {
1035 .block = AMDGPU_RAS_BLOCK__MCA,
1036 .type = default_ras_type,
1037 .sub_block_index = i,
1038 };
1039
1040 if (bypass) {
1041 /*
1042 * bypass psp. vbios enable ras for us.
1043 * so just create the obj
1044 */
1045 if (__amdgpu_ras_feature_enable(adev, &head, 1))
1046 break;
1047 } else {
1048 if (amdgpu_ras_feature_enable(adev, &head, 1))
1049 break;
1050 }
1051 }
1052
1053 return con->features;
1054 }
1055 /* feature ctl end */
1056
amdgpu_ras_block_match_default(struct amdgpu_ras_block_object * block_obj,enum amdgpu_ras_block block)1057 static int amdgpu_ras_block_match_default(struct amdgpu_ras_block_object *block_obj,
1058 enum amdgpu_ras_block block)
1059 {
1060 if (!block_obj)
1061 return -EINVAL;
1062
1063 if (block_obj->ras_comm.block == block)
1064 return 0;
1065
1066 return -EINVAL;
1067 }
1068
amdgpu_ras_get_ras_block(struct amdgpu_device * adev,enum amdgpu_ras_block block,uint32_t sub_block_index)1069 static struct amdgpu_ras_block_object *amdgpu_ras_get_ras_block(struct amdgpu_device *adev,
1070 enum amdgpu_ras_block block, uint32_t sub_block_index)
1071 {
1072 struct amdgpu_ras_block_list *node, *tmp;
1073 struct amdgpu_ras_block_object *obj;
1074
1075 if (block >= AMDGPU_RAS_BLOCK__LAST)
1076 return NULL;
1077
1078 list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
1079 if (!node->ras_obj) {
1080 dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
1081 continue;
1082 }
1083
1084 obj = node->ras_obj;
1085 if (obj->ras_block_match) {
1086 if (obj->ras_block_match(obj, block, sub_block_index) == 0)
1087 return obj;
1088 } else {
1089 if (amdgpu_ras_block_match_default(obj, block) == 0)
1090 return obj;
1091 }
1092 }
1093
1094 return NULL;
1095 }
1096
amdgpu_ras_get_ecc_info(struct amdgpu_device * adev,struct ras_err_data * err_data)1097 static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_data *err_data)
1098 {
1099 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
1100 int ret = 0;
1101
1102 /*
1103 * choosing right query method according to
1104 * whether smu support query error information
1105 */
1106 ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(ras->umc_ecc));
1107 if (ret == -EOPNOTSUPP) {
1108 if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
1109 adev->umc.ras->ras_block.hw_ops->query_ras_error_count)
1110 adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data);
1111
1112 /* umc query_ras_error_address is also responsible for clearing
1113 * error status
1114 */
1115 if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
1116 adev->umc.ras->ras_block.hw_ops->query_ras_error_address)
1117 adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, err_data);
1118 } else if (!ret) {
1119 if (adev->umc.ras &&
1120 adev->umc.ras->ecc_info_query_ras_error_count)
1121 adev->umc.ras->ecc_info_query_ras_error_count(adev, err_data);
1122
1123 if (adev->umc.ras &&
1124 adev->umc.ras->ecc_info_query_ras_error_address)
1125 adev->umc.ras->ecc_info_query_ras_error_address(adev, err_data);
1126 }
1127 }
1128
amdgpu_ras_error_print_error_data(struct amdgpu_device * adev,struct ras_manager * ras_mgr,struct ras_err_data * err_data,struct ras_query_context * qctx,const char * blk_name,bool is_ue,bool is_de)1129 static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev,
1130 struct ras_manager *ras_mgr,
1131 struct ras_err_data *err_data,
1132 struct ras_query_context *qctx,
1133 const char *blk_name,
1134 bool is_ue,
1135 bool is_de)
1136 {
1137 struct amdgpu_smuio_mcm_config_info *mcm_info;
1138 struct ras_err_node *err_node;
1139 struct ras_err_info *err_info;
1140 u64 event_id = qctx->evid.event_id;
1141
1142 if (is_ue) {
1143 for_each_ras_error(err_node, err_data) {
1144 err_info = &err_node->err_info;
1145 mcm_info = &err_info->mcm_info;
1146 if (err_info->ue_count) {
1147 RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1148 "%lld new uncorrectable hardware errors detected in %s block\n",
1149 mcm_info->socket_id,
1150 mcm_info->die_id,
1151 err_info->ue_count,
1152 blk_name);
1153 }
1154 }
1155
1156 for_each_ras_error(err_node, &ras_mgr->err_data) {
1157 err_info = &err_node->err_info;
1158 mcm_info = &err_info->mcm_info;
1159 RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1160 "%lld uncorrectable hardware errors detected in total in %s block\n",
1161 mcm_info->socket_id, mcm_info->die_id, err_info->ue_count, blk_name);
1162 }
1163
1164 } else {
1165 if (is_de) {
1166 for_each_ras_error(err_node, err_data) {
1167 err_info = &err_node->err_info;
1168 mcm_info = &err_info->mcm_info;
1169 if (err_info->de_count) {
1170 RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1171 "%lld new deferred hardware errors detected in %s block\n",
1172 mcm_info->socket_id,
1173 mcm_info->die_id,
1174 err_info->de_count,
1175 blk_name);
1176 }
1177 }
1178
1179 for_each_ras_error(err_node, &ras_mgr->err_data) {
1180 err_info = &err_node->err_info;
1181 mcm_info = &err_info->mcm_info;
1182 RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1183 "%lld deferred hardware errors detected in total in %s block\n",
1184 mcm_info->socket_id, mcm_info->die_id,
1185 err_info->de_count, blk_name);
1186 }
1187 } else {
1188 if (adev->debug_disable_ce_logs)
1189 return;
1190
1191 for_each_ras_error(err_node, err_data) {
1192 err_info = &err_node->err_info;
1193 mcm_info = &err_info->mcm_info;
1194 if (err_info->ce_count) {
1195 RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1196 "%lld new correctable hardware errors detected in %s block\n",
1197 mcm_info->socket_id,
1198 mcm_info->die_id,
1199 err_info->ce_count,
1200 blk_name);
1201 }
1202 }
1203
1204 for_each_ras_error(err_node, &ras_mgr->err_data) {
1205 err_info = &err_node->err_info;
1206 mcm_info = &err_info->mcm_info;
1207 RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1208 "%lld correctable hardware errors detected in total in %s block\n",
1209 mcm_info->socket_id, mcm_info->die_id,
1210 err_info->ce_count, blk_name);
1211 }
1212 }
1213 }
1214 }
1215
err_data_has_source_info(struct ras_err_data * data)1216 static inline bool err_data_has_source_info(struct ras_err_data *data)
1217 {
1218 return !list_empty(&data->err_node_list);
1219 }
1220
amdgpu_ras_error_generate_report(struct amdgpu_device * adev,struct ras_query_if * query_if,struct ras_err_data * err_data,struct ras_query_context * qctx)1221 static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev,
1222 struct ras_query_if *query_if,
1223 struct ras_err_data *err_data,
1224 struct ras_query_context *qctx)
1225 {
1226 struct ras_manager *ras_mgr = amdgpu_ras_find_obj(adev, &query_if->head);
1227 const char *blk_name = get_ras_block_str(&query_if->head);
1228 u64 event_id = qctx->evid.event_id;
1229
1230 if (err_data->ce_count) {
1231 if (err_data_has_source_info(err_data)) {
1232 amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx,
1233 blk_name, false, false);
1234 } else if (!adev->aid_mask &&
1235 adev->smuio.funcs &&
1236 adev->smuio.funcs->get_socket_id &&
1237 adev->smuio.funcs->get_die_id) {
1238 RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d "
1239 "%ld correctable hardware errors "
1240 "detected in %s block\n",
1241 adev->smuio.funcs->get_socket_id(adev),
1242 adev->smuio.funcs->get_die_id(adev),
1243 ras_mgr->err_data.ce_count,
1244 blk_name);
1245 } else {
1246 RAS_EVENT_LOG(adev, event_id, "%ld correctable hardware errors "
1247 "detected in %s block\n",
1248 ras_mgr->err_data.ce_count,
1249 blk_name);
1250 }
1251 }
1252
1253 if (err_data->ue_count) {
1254 if (err_data_has_source_info(err_data)) {
1255 amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx,
1256 blk_name, true, false);
1257 } else if (!adev->aid_mask &&
1258 adev->smuio.funcs &&
1259 adev->smuio.funcs->get_socket_id &&
1260 adev->smuio.funcs->get_die_id) {
1261 RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d "
1262 "%ld uncorrectable hardware errors "
1263 "detected in %s block\n",
1264 adev->smuio.funcs->get_socket_id(adev),
1265 adev->smuio.funcs->get_die_id(adev),
1266 ras_mgr->err_data.ue_count,
1267 blk_name);
1268 } else {
1269 RAS_EVENT_LOG(adev, event_id, "%ld uncorrectable hardware errors "
1270 "detected in %s block\n",
1271 ras_mgr->err_data.ue_count,
1272 blk_name);
1273 }
1274 }
1275
1276 if (err_data->de_count) {
1277 if (err_data_has_source_info(err_data)) {
1278 amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx,
1279 blk_name, false, true);
1280 } else if (!adev->aid_mask &&
1281 adev->smuio.funcs &&
1282 adev->smuio.funcs->get_socket_id &&
1283 adev->smuio.funcs->get_die_id) {
1284 RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d "
1285 "%ld deferred hardware errors "
1286 "detected in %s block\n",
1287 adev->smuio.funcs->get_socket_id(adev),
1288 adev->smuio.funcs->get_die_id(adev),
1289 ras_mgr->err_data.de_count,
1290 blk_name);
1291 } else {
1292 RAS_EVENT_LOG(adev, event_id, "%ld deferred hardware errors "
1293 "detected in %s block\n",
1294 ras_mgr->err_data.de_count,
1295 blk_name);
1296 }
1297 }
1298 }
1299
amdgpu_ras_virt_error_generate_report(struct amdgpu_device * adev,struct ras_query_if * query_if,struct ras_err_data * err_data,struct ras_query_context * qctx)1300 static void amdgpu_ras_virt_error_generate_report(struct amdgpu_device *adev,
1301 struct ras_query_if *query_if,
1302 struct ras_err_data *err_data,
1303 struct ras_query_context *qctx)
1304 {
1305 unsigned long new_ue, new_ce, new_de;
1306 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &query_if->head);
1307 const char *blk_name = get_ras_block_str(&query_if->head);
1308 u64 event_id = qctx->evid.event_id;
1309
1310 new_ce = err_data->ce_count - obj->err_data.ce_count;
1311 new_ue = err_data->ue_count - obj->err_data.ue_count;
1312 new_de = err_data->de_count - obj->err_data.de_count;
1313
1314 if (new_ce) {
1315 RAS_EVENT_LOG(adev, event_id, "%lu correctable hardware errors "
1316 "detected in %s block\n",
1317 new_ce,
1318 blk_name);
1319 }
1320
1321 if (new_ue) {
1322 RAS_EVENT_LOG(adev, event_id, "%lu uncorrectable hardware errors "
1323 "detected in %s block\n",
1324 new_ue,
1325 blk_name);
1326 }
1327
1328 if (new_de) {
1329 RAS_EVENT_LOG(adev, event_id, "%lu deferred hardware errors "
1330 "detected in %s block\n",
1331 new_de,
1332 blk_name);
1333 }
1334 }
1335
amdgpu_rasmgr_error_data_statistic_update(struct ras_manager * obj,struct ras_err_data * err_data)1336 static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, struct ras_err_data *err_data)
1337 {
1338 struct ras_err_node *err_node;
1339 struct ras_err_info *err_info;
1340
1341 if (err_data_has_source_info(err_data)) {
1342 for_each_ras_error(err_node, err_data) {
1343 err_info = &err_node->err_info;
1344 amdgpu_ras_error_statistic_de_count(&obj->err_data,
1345 &err_info->mcm_info, err_info->de_count);
1346 amdgpu_ras_error_statistic_ce_count(&obj->err_data,
1347 &err_info->mcm_info, err_info->ce_count);
1348 amdgpu_ras_error_statistic_ue_count(&obj->err_data,
1349 &err_info->mcm_info, err_info->ue_count);
1350 }
1351 } else {
1352 /* for legacy asic path which doesn't has error source info */
1353 obj->err_data.ue_count += err_data->ue_count;
1354 obj->err_data.ce_count += err_data->ce_count;
1355 obj->err_data.de_count += err_data->de_count;
1356 }
1357 }
1358
amdgpu_ras_mgr_virt_error_data_statistics_update(struct ras_manager * obj,struct ras_err_data * err_data)1359 static void amdgpu_ras_mgr_virt_error_data_statistics_update(struct ras_manager *obj,
1360 struct ras_err_data *err_data)
1361 {
1362 /* Host reports absolute counts */
1363 obj->err_data.ue_count = err_data->ue_count;
1364 obj->err_data.ce_count = err_data->ce_count;
1365 obj->err_data.de_count = err_data->de_count;
1366 }
1367
get_ras_manager(struct amdgpu_device * adev,enum amdgpu_ras_block blk)1368 static struct ras_manager *get_ras_manager(struct amdgpu_device *adev, enum amdgpu_ras_block blk)
1369 {
1370 struct ras_common_if head;
1371
1372 memset(&head, 0, sizeof(head));
1373 head.block = blk;
1374
1375 return amdgpu_ras_find_obj(adev, &head);
1376 }
1377
amdgpu_ras_bind_aca(struct amdgpu_device * adev,enum amdgpu_ras_block blk,const struct aca_info * aca_info,void * data)1378 int amdgpu_ras_bind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
1379 const struct aca_info *aca_info, void *data)
1380 {
1381 struct ras_manager *obj;
1382
1383 /* in resume phase, no need to create aca fs node */
1384 if (adev->in_suspend || amdgpu_reset_in_recovery(adev))
1385 return 0;
1386
1387 obj = get_ras_manager(adev, blk);
1388 if (!obj)
1389 return -EINVAL;
1390
1391 return amdgpu_aca_add_handle(adev, &obj->aca_handle, ras_block_str(blk), aca_info, data);
1392 }
1393
amdgpu_ras_unbind_aca(struct amdgpu_device * adev,enum amdgpu_ras_block blk)1394 int amdgpu_ras_unbind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk)
1395 {
1396 struct ras_manager *obj;
1397
1398 obj = get_ras_manager(adev, blk);
1399 if (!obj)
1400 return -EINVAL;
1401
1402 amdgpu_aca_remove_handle(&obj->aca_handle);
1403
1404 return 0;
1405 }
1406
amdgpu_aca_log_ras_error_data(struct amdgpu_device * adev,enum amdgpu_ras_block blk,enum aca_error_type type,struct ras_err_data * err_data,struct ras_query_context * qctx)1407 static int amdgpu_aca_log_ras_error_data(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
1408 enum aca_error_type type, struct ras_err_data *err_data,
1409 struct ras_query_context *qctx)
1410 {
1411 struct ras_manager *obj;
1412
1413 obj = get_ras_manager(adev, blk);
1414 if (!obj)
1415 return -EINVAL;
1416
1417 return amdgpu_aca_get_error_data(adev, &obj->aca_handle, type, err_data, qctx);
1418 }
1419
amdgpu_ras_aca_sysfs_read(struct device * dev,struct device_attribute * attr,struct aca_handle * handle,char * buf,void * data)1420 ssize_t amdgpu_ras_aca_sysfs_read(struct device *dev, struct device_attribute *attr,
1421 struct aca_handle *handle, char *buf, void *data)
1422 {
1423 struct ras_manager *obj = container_of(handle, struct ras_manager, aca_handle);
1424 struct ras_query_if info = {
1425 .head = obj->head,
1426 };
1427
1428 if (!amdgpu_ras_get_error_query_ready(obj->adev))
1429 return sysfs_emit(buf, "Query currently inaccessible\n");
1430
1431 if (amdgpu_ras_query_error_status(obj->adev, &info))
1432 return -EINVAL;
1433
1434 return sysfs_emit(buf, "%s: %lu\n%s: %lu\n%s: %lu\n", "ue", info.ue_count,
1435 "ce", info.ce_count, "de", info.de_count);
1436 }
1437
amdgpu_ras_query_error_status_helper(struct amdgpu_device * adev,struct ras_query_if * info,struct ras_err_data * err_data,struct ras_query_context * qctx,unsigned int error_query_mode)1438 static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev,
1439 struct ras_query_if *info,
1440 struct ras_err_data *err_data,
1441 struct ras_query_context *qctx,
1442 unsigned int error_query_mode)
1443 {
1444 enum amdgpu_ras_block blk = info ? info->head.block : AMDGPU_RAS_BLOCK_COUNT;
1445 struct amdgpu_ras_block_object *block_obj = NULL;
1446 int ret;
1447
1448 if (blk == AMDGPU_RAS_BLOCK_COUNT)
1449 return -EINVAL;
1450
1451 if (error_query_mode == AMDGPU_RAS_INVALID_ERROR_QUERY)
1452 return -EINVAL;
1453
1454 if (error_query_mode == AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY) {
1455 return amdgpu_virt_req_ras_err_count(adev, blk, err_data);
1456 } else if (error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) {
1457 if (info->head.block == AMDGPU_RAS_BLOCK__UMC) {
1458 amdgpu_ras_get_ecc_info(adev, err_data);
1459 } else {
1460 block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, 0);
1461 if (!block_obj || !block_obj->hw_ops) {
1462 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1463 get_ras_block_str(&info->head));
1464 return -EINVAL;
1465 }
1466
1467 if (block_obj->hw_ops->query_ras_error_count)
1468 block_obj->hw_ops->query_ras_error_count(adev, err_data);
1469
1470 if ((info->head.block == AMDGPU_RAS_BLOCK__SDMA) ||
1471 (info->head.block == AMDGPU_RAS_BLOCK__GFX) ||
1472 (info->head.block == AMDGPU_RAS_BLOCK__MMHUB)) {
1473 if (block_obj->hw_ops->query_ras_error_status)
1474 block_obj->hw_ops->query_ras_error_status(adev);
1475 }
1476 }
1477 } else {
1478 if (amdgpu_aca_is_enabled(adev)) {
1479 ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_UE, err_data, qctx);
1480 if (ret)
1481 return ret;
1482
1483 ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_CE, err_data, qctx);
1484 if (ret)
1485 return ret;
1486
1487 ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_DEFERRED, err_data, qctx);
1488 if (ret)
1489 return ret;
1490 } else {
1491 /* FIXME: add code to check return value later */
1492 amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_UE, err_data, qctx);
1493 amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_CE, err_data, qctx);
1494 }
1495 }
1496
1497 return 0;
1498 }
1499
1500 /* query/inject/cure begin */
amdgpu_ras_query_error_status_with_event(struct amdgpu_device * adev,struct ras_query_if * info,enum ras_event_type type)1501 static int amdgpu_ras_query_error_status_with_event(struct amdgpu_device *adev,
1502 struct ras_query_if *info,
1503 enum ras_event_type type)
1504 {
1505 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1506 struct ras_err_data err_data;
1507 struct ras_query_context qctx;
1508 unsigned int error_query_mode;
1509 int ret;
1510
1511 if (!obj)
1512 return -EINVAL;
1513
1514 ret = amdgpu_ras_error_data_init(&err_data);
1515 if (ret)
1516 return ret;
1517
1518 if (!amdgpu_ras_get_error_query_mode(adev, &error_query_mode))
1519 return -EINVAL;
1520
1521 memset(&qctx, 0, sizeof(qctx));
1522 qctx.evid.type = type;
1523 qctx.evid.event_id = amdgpu_ras_acquire_event_id(adev, type);
1524
1525 if (!down_read_trylock(&adev->reset_domain->sem)) {
1526 ret = -EIO;
1527 goto out_fini_err_data;
1528 }
1529
1530 ret = amdgpu_ras_query_error_status_helper(adev, info,
1531 &err_data,
1532 &qctx,
1533 error_query_mode);
1534 up_read(&adev->reset_domain->sem);
1535 if (ret)
1536 goto out_fini_err_data;
1537
1538 if (error_query_mode != AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY) {
1539 amdgpu_rasmgr_error_data_statistic_update(obj, &err_data);
1540 amdgpu_ras_error_generate_report(adev, info, &err_data, &qctx);
1541 } else {
1542 /* Host provides absolute error counts. First generate the report
1543 * using the previous VF internal count against new host count.
1544 * Then Update VF internal count.
1545 */
1546 amdgpu_ras_virt_error_generate_report(adev, info, &err_data, &qctx);
1547 amdgpu_ras_mgr_virt_error_data_statistics_update(obj, &err_data);
1548 }
1549
1550 info->ue_count = obj->err_data.ue_count;
1551 info->ce_count = obj->err_data.ce_count;
1552 info->de_count = obj->err_data.de_count;
1553
1554 out_fini_err_data:
1555 amdgpu_ras_error_data_fini(&err_data);
1556
1557 return ret;
1558 }
1559
amdgpu_uniras_clear_badpages_info(struct amdgpu_device * adev)1560 static int amdgpu_uniras_clear_badpages_info(struct amdgpu_device *adev)
1561 {
1562 struct ras_cmd_dev_handle req = {0};
1563 int ret;
1564
1565 ret = amdgpu_ras_mgr_handle_ras_cmd(adev, RAS_CMD__CLEAR_BAD_PAGE_INFO,
1566 &req, sizeof(req), NULL, 0);
1567 if (ret) {
1568 dev_err(adev->dev, "Failed to clear bad pages info, ret: %d\n", ret);
1569 return ret;
1570 }
1571
1572 return 0;
1573 }
1574
amdgpu_uniras_query_block_ecc(struct amdgpu_device * adev,struct ras_query_if * info)1575 static int amdgpu_uniras_query_block_ecc(struct amdgpu_device *adev,
1576 struct ras_query_if *info)
1577 {
1578 struct ras_cmd_block_ecc_info_req req = {0};
1579 struct ras_cmd_block_ecc_info_rsp rsp = {0};
1580 int ret;
1581
1582 if (!info)
1583 return -EINVAL;
1584
1585 req.block_id = info->head.block;
1586 req.subblock_id = info->head.sub_block_index;
1587
1588 ret = amdgpu_ras_mgr_handle_ras_cmd(adev, RAS_CMD__GET_BLOCK_ECC_STATUS,
1589 &req, sizeof(req), &rsp, sizeof(rsp));
1590 if (!ret) {
1591 info->ce_count = rsp.ce_count;
1592 info->ue_count = rsp.ue_count;
1593 info->de_count = rsp.de_count;
1594 }
1595
1596 return ret;
1597 }
1598
amdgpu_ras_query_error_status(struct amdgpu_device * adev,struct ras_query_if * info)1599 int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_if *info)
1600 {
1601 if (amdgpu_uniras_enabled(adev))
1602 return amdgpu_uniras_query_block_ecc(adev, info);
1603 else
1604 return amdgpu_ras_query_error_status_with_event(adev, info, RAS_EVENT_TYPE_INVALID);
1605 }
1606
amdgpu_ras_reset_error_count(struct amdgpu_device * adev,enum amdgpu_ras_block block)1607 int amdgpu_ras_reset_error_count(struct amdgpu_device *adev,
1608 enum amdgpu_ras_block block)
1609 {
1610 struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
1611 const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
1612 const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
1613
1614 if (!block_obj || !block_obj->hw_ops) {
1615 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1616 ras_block_str(block));
1617 return -EOPNOTSUPP;
1618 }
1619
1620 if (!amdgpu_ras_is_supported(adev, block) ||
1621 !amdgpu_ras_get_aca_debug_mode(adev))
1622 return -EOPNOTSUPP;
1623
1624 if (amdgpu_sriov_vf(adev))
1625 return -EOPNOTSUPP;
1626
1627 /* skip ras error reset in gpu reset */
1628 if ((amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev)) &&
1629 ((smu_funcs && smu_funcs->set_debug_mode) ||
1630 (mca_funcs && mca_funcs->mca_set_debug_mode)))
1631 return -EOPNOTSUPP;
1632
1633 if (block_obj->hw_ops->reset_ras_error_count)
1634 block_obj->hw_ops->reset_ras_error_count(adev);
1635
1636 return 0;
1637 }
1638
amdgpu_ras_reset_error_status(struct amdgpu_device * adev,enum amdgpu_ras_block block)1639 int amdgpu_ras_reset_error_status(struct amdgpu_device *adev,
1640 enum amdgpu_ras_block block)
1641 {
1642 struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
1643
1644 if (amdgpu_ras_reset_error_count(adev, block) == -EOPNOTSUPP)
1645 return 0;
1646
1647 if ((block == AMDGPU_RAS_BLOCK__GFX) ||
1648 (block == AMDGPU_RAS_BLOCK__MMHUB)) {
1649 if (block_obj->hw_ops->reset_ras_error_status)
1650 block_obj->hw_ops->reset_ras_error_status(adev);
1651 }
1652
1653 return 0;
1654 }
1655
amdgpu_uniras_error_inject(struct amdgpu_device * adev,struct ras_inject_if * info)1656 static int amdgpu_uniras_error_inject(struct amdgpu_device *adev,
1657 struct ras_inject_if *info)
1658 {
1659 struct ras_cmd_inject_error_req inject_req;
1660 struct ras_cmd_inject_error_rsp rsp;
1661
1662 if (!info)
1663 return -EINVAL;
1664
1665 memset(&inject_req, 0, sizeof(inject_req));
1666 inject_req.block_id = info->head.block;
1667 inject_req.subblock_id = info->head.sub_block_index;
1668 inject_req.address = info->address;
1669 inject_req.error_type = info->head.type;
1670 inject_req.instance_mask = info->instance_mask;
1671 inject_req.method = info->value;
1672
1673 return amdgpu_ras_mgr_handle_ras_cmd(adev, RAS_CMD__INJECT_ERROR,
1674 &inject_req, sizeof(inject_req), &rsp, sizeof(rsp));
1675 }
1676
1677 /* wrapper of psp_ras_trigger_error */
amdgpu_ras_error_inject(struct amdgpu_device * adev,struct ras_inject_if * info)1678 int amdgpu_ras_error_inject(struct amdgpu_device *adev,
1679 struct ras_inject_if *info)
1680 {
1681 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1682 struct ta_ras_trigger_error_input block_info = {
1683 .block_id = amdgpu_ras_block_to_ta(info->head.block),
1684 .inject_error_type = amdgpu_ras_error_to_ta(info->head.type),
1685 .sub_block_index = info->head.sub_block_index,
1686 .address = info->address,
1687 .value = info->value,
1688 };
1689 int ret = -EINVAL;
1690 struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev,
1691 info->head.block,
1692 info->head.sub_block_index);
1693
1694 if (amdgpu_uniras_enabled(adev))
1695 return amdgpu_uniras_error_inject(adev, info);
1696
1697 /* inject on guest isn't allowed, return success directly */
1698 if (amdgpu_sriov_vf(adev))
1699 return 0;
1700
1701 if (!obj)
1702 return -EINVAL;
1703
1704 if (!block_obj || !block_obj->hw_ops) {
1705 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1706 get_ras_block_str(&info->head));
1707 return -EINVAL;
1708 }
1709
1710 /* Calculate XGMI relative offset */
1711 if (adev->gmc.xgmi.num_physical_nodes > 1 &&
1712 info->head.block != AMDGPU_RAS_BLOCK__GFX) {
1713 block_info.address =
1714 amdgpu_xgmi_get_relative_phy_addr(adev,
1715 block_info.address);
1716 }
1717
1718 if (block_obj->hw_ops->ras_error_inject) {
1719 if (info->head.block == AMDGPU_RAS_BLOCK__GFX)
1720 ret = block_obj->hw_ops->ras_error_inject(adev, info, info->instance_mask);
1721 else /* Special ras_error_inject is defined (e.g: xgmi) */
1722 ret = block_obj->hw_ops->ras_error_inject(adev, &block_info,
1723 info->instance_mask);
1724 } else {
1725 /* default path */
1726 ret = psp_ras_trigger_error(&adev->psp, &block_info, info->instance_mask);
1727 }
1728
1729 if (ret)
1730 dev_err(adev->dev, "ras inject %s failed %d\n",
1731 get_ras_block_str(&info->head), ret);
1732
1733 return ret;
1734 }
1735
1736 /**
1737 * amdgpu_ras_query_error_count_helper -- Get error counter for specific IP
1738 * @adev: pointer to AMD GPU device
1739 * @ce_count: pointer to an integer to be set to the count of correctible errors.
1740 * @ue_count: pointer to an integer to be set to the count of uncorrectible errors.
1741 * @query_info: pointer to ras_query_if
1742 *
1743 * Return 0 for query success or do nothing, otherwise return an error
1744 * on failures
1745 */
amdgpu_ras_query_error_count_helper(struct amdgpu_device * adev,unsigned long * ce_count,unsigned long * ue_count,struct ras_query_if * query_info)1746 static int amdgpu_ras_query_error_count_helper(struct amdgpu_device *adev,
1747 unsigned long *ce_count,
1748 unsigned long *ue_count,
1749 struct ras_query_if *query_info)
1750 {
1751 int ret;
1752
1753 if (!query_info)
1754 /* do nothing if query_info is not specified */
1755 return 0;
1756
1757 ret = amdgpu_ras_query_error_status(adev, query_info);
1758 if (ret)
1759 return ret;
1760
1761 *ce_count += query_info->ce_count;
1762 *ue_count += query_info->ue_count;
1763
1764 /* some hardware/IP supports read to clear
1765 * no need to explictly reset the err status after the query call */
1766 if (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
1767 amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {
1768 if (amdgpu_ras_reset_error_status(adev, query_info->head.block))
1769 dev_warn(adev->dev,
1770 "Failed to reset error counter and error status\n");
1771 }
1772
1773 return 0;
1774 }
1775
1776 /**
1777 * amdgpu_ras_query_error_count -- Get error counts of all IPs or specific IP
1778 * @adev: pointer to AMD GPU device
1779 * @ce_count: pointer to an integer to be set to the count of correctible errors.
1780 * @ue_count: pointer to an integer to be set to the count of uncorrectible
1781 * errors.
1782 * @query_info: pointer to ras_query_if if the query request is only for
1783 * specific ip block; if info is NULL, then the qurey request is for
1784 * all the ip blocks that support query ras error counters/status
1785 *
1786 * If set, @ce_count or @ue_count, count and return the corresponding
1787 * error counts in those integer pointers. Return 0 if the device
1788 * supports RAS. Return -EOPNOTSUPP if the device doesn't support RAS.
1789 */
amdgpu_ras_query_error_count(struct amdgpu_device * adev,unsigned long * ce_count,unsigned long * ue_count,struct ras_query_if * query_info)1790 int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
1791 unsigned long *ce_count,
1792 unsigned long *ue_count,
1793 struct ras_query_if *query_info)
1794 {
1795 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1796 struct ras_manager *obj;
1797 unsigned long ce, ue;
1798 int ret;
1799
1800 if (!adev->ras_enabled || !con)
1801 return -EOPNOTSUPP;
1802
1803 /* Don't count since no reporting.
1804 */
1805 if (!ce_count && !ue_count)
1806 return 0;
1807
1808 ce = 0;
1809 ue = 0;
1810 if (!query_info) {
1811 /* query all the ip blocks that support ras query interface */
1812 list_for_each_entry(obj, &con->head, node) {
1813 struct ras_query_if info = {
1814 .head = obj->head,
1815 };
1816
1817 ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, &info);
1818 }
1819 } else {
1820 /* query specific ip block */
1821 ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, query_info);
1822 }
1823
1824 if (ret)
1825 return ret;
1826
1827 if (ce_count)
1828 *ce_count = ce;
1829
1830 if (ue_count)
1831 *ue_count = ue;
1832
1833 return 0;
1834 }
1835 /* query/inject/cure end */
1836
1837
1838 /* sysfs begin */
1839
1840 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
1841 struct ras_badpage *bps, uint32_t count, uint32_t start);
1842 static int amdgpu_uniras_badpages_read(struct amdgpu_device *adev,
1843 struct ras_badpage *bps, uint32_t count, uint32_t start);
1844
amdgpu_ras_badpage_flags_str(unsigned int flags)1845 static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
1846 {
1847 switch (flags) {
1848 case AMDGPU_RAS_RETIRE_PAGE_RESERVED:
1849 return "R";
1850 case AMDGPU_RAS_RETIRE_PAGE_PENDING:
1851 return "P";
1852 case AMDGPU_RAS_RETIRE_PAGE_FAULT:
1853 default:
1854 return "F";
1855 }
1856 }
1857
1858 /**
1859 * DOC: AMDGPU RAS sysfs gpu_vram_bad_pages Interface
1860 *
1861 * It allows user to read the bad pages of vram on the gpu through
1862 * /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages
1863 *
1864 * It outputs multiple lines, and each line stands for one gpu page.
1865 *
1866 * The format of one line is below,
1867 * gpu pfn : gpu page size : flags
1868 *
1869 * gpu pfn and gpu page size are printed in hex format.
1870 * flags can be one of below character,
1871 *
1872 * R: reserved, this gpu page is reserved and not able to use.
1873 *
1874 * P: pending for reserve, this gpu page is marked as bad, will be reserved
1875 * in next window of page_reserve.
1876 *
1877 * F: unable to reserve. this gpu page can't be reserved due to some reasons.
1878 *
1879 * Examples:
1880 *
1881 * .. code-block:: bash
1882 *
1883 * 0x00000001 : 0x00001000 : R
1884 * 0x00000002 : 0x00001000 : P
1885 *
1886 */
1887
amdgpu_ras_sysfs_badpages_read(struct file * f,struct kobject * kobj,const struct bin_attribute * attr,char * buf,loff_t ppos,size_t count)1888 static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f,
1889 struct kobject *kobj, const struct bin_attribute *attr,
1890 char *buf, loff_t ppos, size_t count)
1891 {
1892 struct amdgpu_ras *con =
1893 container_of(attr, struct amdgpu_ras, badpages_attr);
1894 struct amdgpu_device *adev = con->adev;
1895 const unsigned int element_size =
1896 sizeof("0xabcdabcd : 0x12345678 : R\n") - 1;
1897 unsigned int start = div64_ul(ppos + element_size - 1, element_size);
1898 unsigned int end = div64_ul(ppos + count - 1, element_size);
1899 ssize_t s = 0;
1900 struct ras_badpage *bps = NULL;
1901 int bps_count = 0, i, status;
1902 uint64_t address;
1903
1904 memset(buf, 0, count);
1905
1906 bps_count = end - start;
1907 bps = kmalloc_objs(*bps, bps_count, GFP_KERNEL);
1908 if (!bps)
1909 return 0;
1910
1911 memset(bps, 0, sizeof(*bps) * bps_count);
1912
1913 if (amdgpu_uniras_enabled(adev))
1914 bps_count = amdgpu_uniras_badpages_read(adev, bps, bps_count, start);
1915 else
1916 bps_count = amdgpu_ras_badpages_read(adev, bps, bps_count, start);
1917
1918 if (bps_count <= 0) {
1919 kfree(bps);
1920 return 0;
1921 }
1922
1923 for (i = 0; i < bps_count; i++) {
1924 address = ((uint64_t)bps[i].bp) << AMDGPU_GPU_PAGE_SHIFT;
1925
1926 bps[i].size = AMDGPU_GPU_PAGE_SIZE;
1927
1928 status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr,
1929 address);
1930 if (status == -EBUSY)
1931 bps[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
1932 else if (status == -ENOENT)
1933 bps[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
1934 else
1935 bps[i].flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED;
1936
1937 if ((bps[i].flags != AMDGPU_RAS_RETIRE_PAGE_RESERVED) &&
1938 amdgpu_ras_check_critical_address(adev, address))
1939 bps[i].flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED;
1940
1941 s += scnprintf(&buf[s], element_size + 1,
1942 "0x%08x : 0x%08x : %1s\n",
1943 bps[i].bp,
1944 bps[i].size,
1945 amdgpu_ras_badpage_flags_str(bps[i].flags));
1946 }
1947
1948 kfree(bps);
1949
1950 return s;
1951 }
1952
amdgpu_ras_sysfs_features_read(struct device * dev,struct device_attribute * attr,char * buf)1953 static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev,
1954 struct device_attribute *attr, char *buf)
1955 {
1956 struct amdgpu_ras *con =
1957 container_of(attr, struct amdgpu_ras, features_attr);
1958
1959 return sysfs_emit(buf, "feature mask: 0x%x\n", con->features);
1960 }
1961
amdgpu_ras_get_version_info(struct amdgpu_device * adev,u32 * major,u32 * minor,u32 * rev)1962 static bool amdgpu_ras_get_version_info(struct amdgpu_device *adev, u32 *major,
1963 u32 *minor, u32 *rev)
1964 {
1965 int i;
1966
1967 if (!adev || !major || !minor || !rev || !amdgpu_uniras_enabled(adev))
1968 return false;
1969
1970 for (i = 0; i < adev->num_ip_blocks; i++) {
1971 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_RAS) {
1972 *major = adev->ip_blocks[i].version->major;
1973 *minor = adev->ip_blocks[i].version->minor;
1974 *rev = adev->ip_blocks[i].version->rev;
1975 return true;
1976 }
1977 }
1978
1979 return false;
1980 }
1981
amdgpu_ras_sysfs_version_show(struct device * dev,struct device_attribute * attr,char * buf)1982 static ssize_t amdgpu_ras_sysfs_version_show(struct device *dev,
1983 struct device_attribute *attr, char *buf)
1984 {
1985 struct amdgpu_ras *con =
1986 container_of(attr, struct amdgpu_ras, version_attr);
1987 u32 major, minor, rev;
1988 ssize_t size = 0;
1989
1990 size += sysfs_emit_at(buf, size, "table version: 0x%x\n",
1991 con->eeprom_control.tbl_hdr.version);
1992
1993 if (amdgpu_ras_get_version_info(con->adev, &major, &minor, &rev))
1994 size += sysfs_emit_at(buf, size, "ras version: %u.%u.%u\n",
1995 major, minor, rev);
1996
1997 return size;
1998 }
1999
amdgpu_ras_sysfs_schema_show(struct device * dev,struct device_attribute * attr,char * buf)2000 static ssize_t amdgpu_ras_sysfs_schema_show(struct device *dev,
2001 struct device_attribute *attr, char *buf)
2002 {
2003 struct amdgpu_ras *con =
2004 container_of(attr, struct amdgpu_ras, schema_attr);
2005 return sysfs_emit(buf, "schema: 0x%x\n", con->schema);
2006 }
2007
2008 static struct {
2009 enum ras_event_type type;
2010 const char *name;
2011 } dump_event[] = {
2012 {RAS_EVENT_TYPE_FATAL, "Fatal Error"},
2013 {RAS_EVENT_TYPE_POISON_CREATION, "Poison Creation"},
2014 {RAS_EVENT_TYPE_POISON_CONSUMPTION, "Poison Consumption"},
2015 };
2016
amdgpu_ras_sysfs_event_state_show(struct device * dev,struct device_attribute * attr,char * buf)2017 static ssize_t amdgpu_ras_sysfs_event_state_show(struct device *dev,
2018 struct device_attribute *attr, char *buf)
2019 {
2020 struct amdgpu_ras *con =
2021 container_of(attr, struct amdgpu_ras, event_state_attr);
2022 struct ras_event_manager *event_mgr = con->event_mgr;
2023 struct ras_event_state *event_state;
2024 int i, size = 0;
2025
2026 if (!event_mgr)
2027 return -EINVAL;
2028
2029 size += sysfs_emit_at(buf, size, "current seqno: %llu\n", atomic64_read(&event_mgr->seqno));
2030 for (i = 0; i < ARRAY_SIZE(dump_event); i++) {
2031 event_state = &event_mgr->event_state[dump_event[i].type];
2032 size += sysfs_emit_at(buf, size, "%s: count:%llu, last_seqno:%llu\n",
2033 dump_event[i].name,
2034 atomic64_read(&event_state->count),
2035 event_state->last_seqno);
2036 }
2037
2038 return (ssize_t)size;
2039 }
2040
amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device * adev)2041 static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev)
2042 {
2043 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2044
2045 if (adev->dev->kobj.sd)
2046 sysfs_remove_file_from_group(&adev->dev->kobj,
2047 &con->badpages_attr.attr,
2048 RAS_FS_NAME);
2049 }
2050
amdgpu_ras_sysfs_remove_dev_attr_node(struct amdgpu_device * adev)2051 static int amdgpu_ras_sysfs_remove_dev_attr_node(struct amdgpu_device *adev)
2052 {
2053 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2054 struct attribute *attrs[] = {
2055 &con->features_attr.attr,
2056 &con->version_attr.attr,
2057 &con->schema_attr.attr,
2058 &con->event_state_attr.attr,
2059 NULL
2060 };
2061 struct attribute_group group = {
2062 .name = RAS_FS_NAME,
2063 .attrs = attrs,
2064 };
2065
2066 if (adev->dev->kobj.sd)
2067 sysfs_remove_group(&adev->dev->kobj, &group);
2068
2069 return 0;
2070 }
2071
amdgpu_ras_sysfs_create(struct amdgpu_device * adev,struct ras_common_if * head)2072 int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
2073 struct ras_common_if *head)
2074 {
2075 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
2076
2077 if (amdgpu_aca_is_enabled(adev))
2078 return 0;
2079
2080 if (!obj || obj->attr_inuse)
2081 return -EINVAL;
2082
2083 if (amdgpu_sriov_vf(adev) && !amdgpu_virt_ras_telemetry_block_en(adev, head->block))
2084 return 0;
2085
2086 get_obj(obj);
2087
2088 snprintf(obj->fs_data.sysfs_name, sizeof(obj->fs_data.sysfs_name),
2089 "%s_err_count", head->name);
2090
2091 obj->sysfs_attr = (struct device_attribute){
2092 .attr = {
2093 .name = obj->fs_data.sysfs_name,
2094 .mode = S_IRUGO,
2095 },
2096 .show = amdgpu_ras_sysfs_read,
2097 };
2098 sysfs_attr_init(&obj->sysfs_attr.attr);
2099
2100 if (sysfs_add_file_to_group(&adev->dev->kobj,
2101 &obj->sysfs_attr.attr,
2102 RAS_FS_NAME)) {
2103 put_obj(obj);
2104 return -EINVAL;
2105 }
2106
2107 obj->attr_inuse = 1;
2108
2109 return 0;
2110 }
2111
amdgpu_ras_sysfs_remove(struct amdgpu_device * adev,struct ras_common_if * head)2112 int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
2113 struct ras_common_if *head)
2114 {
2115 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
2116
2117 if (amdgpu_aca_is_enabled(adev))
2118 return 0;
2119
2120 if (!obj || !obj->attr_inuse)
2121 return -EINVAL;
2122
2123 if (adev->dev->kobj.sd)
2124 sysfs_remove_file_from_group(&adev->dev->kobj,
2125 &obj->sysfs_attr.attr,
2126 RAS_FS_NAME);
2127 obj->attr_inuse = 0;
2128 put_obj(obj);
2129
2130 return 0;
2131 }
2132
amdgpu_ras_sysfs_remove_all(struct amdgpu_device * adev)2133 static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev)
2134 {
2135 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2136 struct ras_manager *obj, *tmp;
2137
2138 list_for_each_entry_safe(obj, tmp, &con->head, node) {
2139 amdgpu_ras_sysfs_remove(adev, &obj->head);
2140 }
2141
2142 if (amdgpu_bad_page_threshold != 0)
2143 amdgpu_ras_sysfs_remove_bad_page_node(adev);
2144
2145 amdgpu_ras_sysfs_remove_dev_attr_node(adev);
2146
2147 return 0;
2148 }
2149 /* sysfs end */
2150
2151 /**
2152 * DOC: AMDGPU RAS Reboot Behavior for Unrecoverable Errors
2153 *
2154 * Normally when there is an uncorrectable error, the driver will reset
2155 * the GPU to recover. However, in the event of an unrecoverable error,
2156 * the driver provides an interface to reboot the system automatically
2157 * in that event.
2158 *
2159 * The following file in debugfs provides that interface:
2160 * /sys/kernel/debug/dri/[0/1/2...]/ras/auto_reboot
2161 *
2162 * Usage:
2163 *
2164 * .. code-block:: bash
2165 *
2166 * echo true > .../ras/auto_reboot
2167 *
2168 */
2169 /* debugfs begin */
amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device * adev)2170 static struct dentry *amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
2171 {
2172 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2173 struct amdgpu_ras_eeprom_control *eeprom = &con->eeprom_control;
2174 struct drm_minor *minor = adev_to_drm(adev)->primary;
2175 struct dentry *dir;
2176
2177 dir = debugfs_create_dir(RAS_FS_NAME, minor->debugfs_root);
2178 debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, dir, adev,
2179 &amdgpu_ras_debugfs_ctrl_ops);
2180 debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, dir, adev,
2181 &amdgpu_ras_debugfs_eeprom_ops);
2182 debugfs_create_u32("bad_page_cnt_threshold", 0444, dir,
2183 &con->bad_page_cnt_threshold);
2184 debugfs_create_u32("ras_num_recs", 0444, dir, &eeprom->ras_num_recs);
2185 debugfs_create_x32("ras_hw_enabled", 0444, dir, &adev->ras_hw_enabled);
2186 debugfs_create_x32("ras_enabled", 0444, dir, &adev->ras_enabled);
2187 debugfs_create_file("ras_eeprom_size", S_IRUGO, dir, adev,
2188 &amdgpu_ras_debugfs_eeprom_size_ops);
2189 con->de_ras_eeprom_table = debugfs_create_file("ras_eeprom_table",
2190 S_IRUGO, dir, adev,
2191 &amdgpu_ras_debugfs_eeprom_table_ops);
2192 amdgpu_ras_debugfs_set_ret_size(&con->eeprom_control);
2193
2194 /*
2195 * After one uncorrectable error happens, usually GPU recovery will
2196 * be scheduled. But due to the known problem in GPU recovery failing
2197 * to bring GPU back, below interface provides one direct way to
2198 * user to reboot system automatically in such case within
2199 * ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine
2200 * will never be called.
2201 */
2202 debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, dir, &con->reboot);
2203
2204 /*
2205 * User could set this not to clean up hardware's error count register
2206 * of RAS IPs during ras recovery.
2207 */
2208 debugfs_create_bool("disable_ras_err_cnt_harvest", 0644, dir,
2209 &con->disable_ras_err_cnt_harvest);
2210 return dir;
2211 }
2212
amdgpu_ras_debugfs_create(struct amdgpu_device * adev,struct ras_fs_if * head,struct dentry * dir)2213 static void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
2214 struct ras_fs_if *head,
2215 struct dentry *dir)
2216 {
2217 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);
2218
2219 if (!obj || !dir)
2220 return;
2221
2222 get_obj(obj);
2223
2224 memcpy(obj->fs_data.debugfs_name,
2225 head->debugfs_name,
2226 sizeof(obj->fs_data.debugfs_name));
2227
2228 debugfs_create_file(obj->fs_data.debugfs_name, S_IWUGO | S_IRUGO, dir,
2229 obj, &amdgpu_ras_debugfs_ops);
2230 }
2231
amdgpu_ras_aca_is_supported(struct amdgpu_device * adev)2232 static bool amdgpu_ras_aca_is_supported(struct amdgpu_device *adev)
2233 {
2234 bool ret;
2235
2236 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
2237 case IP_VERSION(13, 0, 6):
2238 case IP_VERSION(13, 0, 12):
2239 case IP_VERSION(13, 0, 14):
2240 ret = true;
2241 break;
2242 default:
2243 ret = false;
2244 break;
2245 }
2246
2247 return ret;
2248 }
2249
amdgpu_ras_debugfs_create_all(struct amdgpu_device * adev)2250 void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
2251 {
2252 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2253 struct dentry *dir;
2254 struct ras_manager *obj;
2255 struct ras_fs_if fs_info;
2256
2257 /*
2258 * it won't be called in resume path, no need to check
2259 * suspend and gpu reset status
2260 */
2261 if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con)
2262 return;
2263
2264 dir = amdgpu_ras_debugfs_create_ctrl_node(adev);
2265
2266 list_for_each_entry(obj, &con->head, node) {
2267 if (amdgpu_ras_is_supported(adev, obj->head.block) &&
2268 (obj->attr_inuse == 1)) {
2269 sprintf(fs_info.debugfs_name, "%s_err_inject",
2270 get_ras_block_str(&obj->head));
2271 fs_info.head = obj->head;
2272 amdgpu_ras_debugfs_create(adev, &fs_info, dir);
2273 }
2274 }
2275
2276 if (amdgpu_ras_aca_is_supported(adev)) {
2277 if (amdgpu_aca_is_enabled(adev))
2278 amdgpu_aca_smu_debugfs_init(adev, dir);
2279 else
2280 amdgpu_mca_smu_debugfs_init(adev, dir);
2281 }
2282 }
2283
2284 /* debugfs end */
2285
2286 /* ras fs */
2287 static const BIN_ATTR(gpu_vram_bad_pages, S_IRUGO,
2288 amdgpu_ras_sysfs_badpages_read, NULL, 0);
2289 static DEVICE_ATTR(features, S_IRUGO,
2290 amdgpu_ras_sysfs_features_read, NULL);
2291 static DEVICE_ATTR(version, 0444,
2292 amdgpu_ras_sysfs_version_show, NULL);
2293 static DEVICE_ATTR(schema, 0444,
2294 amdgpu_ras_sysfs_schema_show, NULL);
2295 static DEVICE_ATTR(event_state, 0444,
2296 amdgpu_ras_sysfs_event_state_show, NULL);
amdgpu_ras_fs_init(struct amdgpu_device * adev)2297 static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
2298 {
2299 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2300 struct attribute_group group = {
2301 .name = RAS_FS_NAME,
2302 };
2303 struct attribute *attrs[] = {
2304 &con->features_attr.attr,
2305 &con->version_attr.attr,
2306 &con->schema_attr.attr,
2307 &con->event_state_attr.attr,
2308 NULL
2309 };
2310 const struct bin_attribute *bin_attrs[] = {
2311 NULL,
2312 NULL,
2313 };
2314 int r;
2315
2316 group.attrs = attrs;
2317
2318 /* add features entry */
2319 con->features_attr = dev_attr_features;
2320 sysfs_attr_init(attrs[0]);
2321
2322 /* add version entry */
2323 con->version_attr = dev_attr_version;
2324 sysfs_attr_init(attrs[1]);
2325
2326 /* add schema entry */
2327 con->schema_attr = dev_attr_schema;
2328 sysfs_attr_init(attrs[2]);
2329
2330 /* add event_state entry */
2331 con->event_state_attr = dev_attr_event_state;
2332 sysfs_attr_init(attrs[3]);
2333
2334 if (amdgpu_bad_page_threshold != 0) {
2335 /* add bad_page_features entry */
2336 con->badpages_attr = bin_attr_gpu_vram_bad_pages;
2337 sysfs_bin_attr_init(&con->badpages_attr);
2338 bin_attrs[0] = &con->badpages_attr;
2339 group.bin_attrs = bin_attrs;
2340 }
2341
2342 r = sysfs_create_group(&adev->dev->kobj, &group);
2343 if (r)
2344 dev_err(adev->dev, "Failed to create RAS sysfs group!");
2345
2346 return 0;
2347 }
2348
amdgpu_ras_fs_fini(struct amdgpu_device * adev)2349 static int amdgpu_ras_fs_fini(struct amdgpu_device *adev)
2350 {
2351 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2352 struct ras_manager *con_obj, *ip_obj, *tmp;
2353
2354 if (IS_ENABLED(CONFIG_DEBUG_FS)) {
2355 list_for_each_entry_safe(con_obj, tmp, &con->head, node) {
2356 ip_obj = amdgpu_ras_find_obj(adev, &con_obj->head);
2357 if (ip_obj)
2358 put_obj(ip_obj);
2359 }
2360 }
2361
2362 amdgpu_ras_sysfs_remove_all(adev);
2363 return 0;
2364 }
2365 /* ras fs end */
2366
2367 /* ih begin */
2368
2369 /* For the hardware that cannot enable bif ring for both ras_controller_irq
2370 * and ras_err_evnet_athub_irq ih cookies, the driver has to poll status
2371 * register to check whether the interrupt is triggered or not, and properly
2372 * ack the interrupt if it is there
2373 */
amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device * adev)2374 void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev)
2375 {
2376 /* Fatal error events are handled on host side */
2377 if (amdgpu_sriov_vf(adev))
2378 return;
2379 /*
2380 * If the current interrupt is caused by a non-fatal RAS error, skip
2381 * check for fatal error. For fatal errors, FED status of all devices
2382 * in XGMI hive gets set when the first device gets fatal error
2383 * interrupt. The error gets propagated to other devices as well, so
2384 * make sure to ack the interrupt regardless of FED status.
2385 */
2386 if (!amdgpu_ras_get_fed_status(adev) &&
2387 amdgpu_ras_is_err_state(adev, AMDGPU_RAS_BLOCK__ANY))
2388 return;
2389
2390 if (amdgpu_uniras_enabled(adev)) {
2391 amdgpu_ras_mgr_handle_fatal_interrupt(adev, NULL);
2392 return;
2393 }
2394
2395 if (adev->nbio.ras &&
2396 adev->nbio.ras->handle_ras_controller_intr_no_bifring)
2397 adev->nbio.ras->handle_ras_controller_intr_no_bifring(adev);
2398
2399 if (adev->nbio.ras &&
2400 adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring)
2401 adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring(adev);
2402 }
2403
amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager * obj,struct amdgpu_iv_entry * entry)2404 static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *obj,
2405 struct amdgpu_iv_entry *entry)
2406 {
2407 bool poison_stat = false;
2408 struct amdgpu_device *adev = obj->adev;
2409 struct amdgpu_ras_block_object *block_obj =
2410 amdgpu_ras_get_ras_block(adev, obj->head.block, 0);
2411 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2412 enum ras_event_type type = RAS_EVENT_TYPE_POISON_CONSUMPTION;
2413 u64 event_id;
2414 int ret;
2415
2416 if (!block_obj || !con)
2417 return;
2418
2419 ret = amdgpu_ras_mark_ras_event(adev, type);
2420 if (ret)
2421 return;
2422
2423 amdgpu_ras_set_err_poison(adev, block_obj->ras_comm.block);
2424 /* both query_poison_status and handle_poison_consumption are optional,
2425 * but at least one of them should be implemented if we need poison
2426 * consumption handler
2427 */
2428 if (block_obj->hw_ops && block_obj->hw_ops->query_poison_status) {
2429 poison_stat = block_obj->hw_ops->query_poison_status(adev);
2430 if (!poison_stat) {
2431 /* Not poison consumption interrupt, no need to handle it */
2432 dev_info(adev->dev, "No RAS poison status in %s poison IH.\n",
2433 block_obj->ras_comm.name);
2434
2435 return;
2436 }
2437 }
2438
2439 amdgpu_umc_poison_handler(adev, obj->head.block, 0);
2440
2441 if (block_obj->hw_ops && block_obj->hw_ops->handle_poison_consumption)
2442 poison_stat = block_obj->hw_ops->handle_poison_consumption(adev);
2443
2444 /* gpu reset is fallback for failed and default cases.
2445 * For RMA case, amdgpu_umc_poison_handler will handle gpu reset.
2446 */
2447 if (poison_stat && !amdgpu_ras_is_rma(adev)) {
2448 event_id = amdgpu_ras_acquire_event_id(adev, type);
2449 RAS_EVENT_LOG(adev, event_id,
2450 "GPU reset for %s RAS poison consumption is issued!\n",
2451 block_obj->ras_comm.name);
2452 amdgpu_ras_reset_gpu(adev);
2453 }
2454
2455 if (!poison_stat)
2456 amdgpu_gfx_poison_consumption_handler(adev, entry);
2457 }
2458
amdgpu_ras_interrupt_poison_creation_handler(struct ras_manager * obj,struct amdgpu_iv_entry * entry)2459 static void amdgpu_ras_interrupt_poison_creation_handler(struct ras_manager *obj,
2460 struct amdgpu_iv_entry *entry)
2461 {
2462 struct amdgpu_device *adev = obj->adev;
2463 enum ras_event_type type = RAS_EVENT_TYPE_POISON_CREATION;
2464 u64 event_id;
2465 int ret;
2466
2467 ret = amdgpu_ras_mark_ras_event(adev, type);
2468 if (ret)
2469 return;
2470
2471 event_id = amdgpu_ras_acquire_event_id(adev, type);
2472 RAS_EVENT_LOG(adev, event_id, "Poison is created\n");
2473
2474 if (amdgpu_ip_version(obj->adev, UMC_HWIP, 0) >= IP_VERSION(12, 0, 0)) {
2475 struct amdgpu_ras *con = amdgpu_ras_get_context(obj->adev);
2476
2477 atomic_inc(&con->page_retirement_req_cnt);
2478 atomic_inc(&con->poison_creation_count);
2479
2480 wake_up(&con->page_retirement_wq);
2481 }
2482 }
2483
amdgpu_ras_interrupt_umc_handler(struct ras_manager * obj,struct amdgpu_iv_entry * entry)2484 static void amdgpu_ras_interrupt_umc_handler(struct ras_manager *obj,
2485 struct amdgpu_iv_entry *entry)
2486 {
2487 struct ras_ih_data *data = &obj->ih_data;
2488 struct ras_err_data err_data;
2489 int ret;
2490
2491 if (!data->cb)
2492 return;
2493
2494 ret = amdgpu_ras_error_data_init(&err_data);
2495 if (ret)
2496 return;
2497
2498 /* Let IP handle its data, maybe we need get the output
2499 * from the callback to update the error type/count, etc
2500 */
2501 amdgpu_ras_set_fed(obj->adev, true);
2502 ret = data->cb(obj->adev, &err_data, entry);
2503 /* ue will trigger an interrupt, and in that case
2504 * we need do a reset to recovery the whole system.
2505 * But leave IP do that recovery, here we just dispatch
2506 * the error.
2507 */
2508 if (ret == AMDGPU_RAS_SUCCESS) {
2509 /* these counts could be left as 0 if
2510 * some blocks do not count error number
2511 */
2512 obj->err_data.ue_count += err_data.ue_count;
2513 obj->err_data.ce_count += err_data.ce_count;
2514 obj->err_data.de_count += err_data.de_count;
2515 }
2516
2517 amdgpu_ras_error_data_fini(&err_data);
2518 }
2519
amdgpu_ras_interrupt_handler(struct ras_manager * obj)2520 static void amdgpu_ras_interrupt_handler(struct ras_manager *obj)
2521 {
2522 struct ras_ih_data *data = &obj->ih_data;
2523 struct amdgpu_iv_entry entry;
2524
2525 while (data->rptr != data->wptr) {
2526 rmb();
2527 memcpy(&entry, &data->ring[data->rptr],
2528 data->element_size);
2529
2530 wmb();
2531 data->rptr = (data->aligned_element_size +
2532 data->rptr) % data->ring_size;
2533
2534 if (amdgpu_ras_is_poison_mode_supported(obj->adev)) {
2535 if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
2536 amdgpu_ras_interrupt_poison_creation_handler(obj, &entry);
2537 else
2538 amdgpu_ras_interrupt_poison_consumption_handler(obj, &entry);
2539 } else {
2540 if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
2541 amdgpu_ras_interrupt_umc_handler(obj, &entry);
2542 else
2543 dev_warn(obj->adev->dev,
2544 "No RAS interrupt handler for non-UMC block with poison disabled.\n");
2545 }
2546 }
2547 }
2548
amdgpu_ras_interrupt_process_handler(struct work_struct * work)2549 static void amdgpu_ras_interrupt_process_handler(struct work_struct *work)
2550 {
2551 struct ras_ih_data *data =
2552 container_of(work, struct ras_ih_data, ih_work);
2553 struct ras_manager *obj =
2554 container_of(data, struct ras_manager, ih_data);
2555
2556 amdgpu_ras_interrupt_handler(obj);
2557 }
2558
amdgpu_ras_interrupt_dispatch(struct amdgpu_device * adev,struct ras_dispatch_if * info)2559 int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
2560 struct ras_dispatch_if *info)
2561 {
2562 struct ras_manager *obj;
2563 struct ras_ih_data *data;
2564
2565 if (amdgpu_uniras_enabled(adev)) {
2566 struct ras_ih_info ih_info;
2567
2568 memset(&ih_info, 0, sizeof(ih_info));
2569 ih_info.block = info->head.block;
2570 memcpy(&ih_info.iv_entry, info->entry, sizeof(struct amdgpu_iv_entry));
2571
2572 return amdgpu_ras_mgr_handle_controller_interrupt(adev, &ih_info);
2573 }
2574
2575 obj = amdgpu_ras_find_obj(adev, &info->head);
2576 if (!obj)
2577 return -EINVAL;
2578
2579 data = &obj->ih_data;
2580
2581 if (data->inuse == 0)
2582 return 0;
2583
2584 /* Might be overflow... */
2585 memcpy(&data->ring[data->wptr], info->entry,
2586 data->element_size);
2587
2588 wmb();
2589 data->wptr = (data->aligned_element_size +
2590 data->wptr) % data->ring_size;
2591
2592 schedule_work(&data->ih_work);
2593
2594 return 0;
2595 }
2596
amdgpu_ras_interrupt_remove_handler(struct amdgpu_device * adev,struct ras_common_if * head)2597 int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev,
2598 struct ras_common_if *head)
2599 {
2600 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
2601 struct ras_ih_data *data;
2602
2603 if (!obj)
2604 return -EINVAL;
2605
2606 data = &obj->ih_data;
2607 if (data->inuse == 0)
2608 return 0;
2609
2610 cancel_work_sync(&data->ih_work);
2611
2612 kfree(data->ring);
2613 memset(data, 0, sizeof(*data));
2614 put_obj(obj);
2615
2616 return 0;
2617 }
2618
amdgpu_ras_interrupt_add_handler(struct amdgpu_device * adev,struct ras_common_if * head)2619 int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev,
2620 struct ras_common_if *head)
2621 {
2622 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
2623 struct ras_ih_data *data;
2624 struct amdgpu_ras_block_object *ras_obj;
2625
2626 if (!obj) {
2627 /* in case we registe the IH before enable ras feature */
2628 obj = amdgpu_ras_create_obj(adev, head);
2629 if (!obj)
2630 return -EINVAL;
2631 } else
2632 get_obj(obj);
2633
2634 ras_obj = container_of(head, struct amdgpu_ras_block_object, ras_comm);
2635
2636 data = &obj->ih_data;
2637 /* add the callback.etc */
2638 *data = (struct ras_ih_data) {
2639 .inuse = 0,
2640 .cb = ras_obj->ras_cb,
2641 .element_size = sizeof(struct amdgpu_iv_entry),
2642 .rptr = 0,
2643 .wptr = 0,
2644 };
2645
2646 INIT_WORK(&data->ih_work, amdgpu_ras_interrupt_process_handler);
2647
2648 data->aligned_element_size = ALIGN(data->element_size, 8);
2649 /* the ring can store 64 iv entries. */
2650 data->ring_size = 64 * data->aligned_element_size;
2651 data->ring = kmalloc(data->ring_size, GFP_KERNEL);
2652 if (!data->ring) {
2653 put_obj(obj);
2654 return -ENOMEM;
2655 }
2656
2657 /* IH is ready */
2658 data->inuse = 1;
2659
2660 return 0;
2661 }
2662
amdgpu_ras_interrupt_remove_all(struct amdgpu_device * adev)2663 static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev)
2664 {
2665 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2666 struct ras_manager *obj, *tmp;
2667
2668 list_for_each_entry_safe(obj, tmp, &con->head, node) {
2669 amdgpu_ras_interrupt_remove_handler(adev, &obj->head);
2670 }
2671
2672 return 0;
2673 }
2674 /* ih end */
2675
2676 /* traversal all IPs except NBIO to query error counter */
amdgpu_ras_log_on_err_counter(struct amdgpu_device * adev,enum ras_event_type type)2677 static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev, enum ras_event_type type)
2678 {
2679 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2680 struct ras_manager *obj;
2681
2682 if (!adev->ras_enabled || !con)
2683 return;
2684
2685 list_for_each_entry(obj, &con->head, node) {
2686 struct ras_query_if info = {
2687 .head = obj->head,
2688 };
2689
2690 /*
2691 * PCIE_BIF IP has one different isr by ras controller
2692 * interrupt, the specific ras counter query will be
2693 * done in that isr. So skip such block from common
2694 * sync flood interrupt isr calling.
2695 */
2696 if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF)
2697 continue;
2698
2699 /*
2700 * this is a workaround for aldebaran, skip send msg to
2701 * smu to get ecc_info table due to smu handle get ecc
2702 * info table failed temporarily.
2703 * should be removed until smu fix handle ecc_info table.
2704 */
2705 if ((info.head.block == AMDGPU_RAS_BLOCK__UMC) &&
2706 (amdgpu_ip_version(adev, MP1_HWIP, 0) ==
2707 IP_VERSION(13, 0, 2)))
2708 continue;
2709
2710 amdgpu_ras_query_error_status_with_event(adev, &info, type);
2711
2712 if (amdgpu_ip_version(adev, MP0_HWIP, 0) !=
2713 IP_VERSION(11, 0, 2) &&
2714 amdgpu_ip_version(adev, MP0_HWIP, 0) !=
2715 IP_VERSION(11, 0, 4) &&
2716 amdgpu_ip_version(adev, MP0_HWIP, 0) !=
2717 IP_VERSION(13, 0, 0)) {
2718 if (amdgpu_ras_reset_error_status(adev, info.head.block))
2719 dev_warn(adev->dev, "Failed to reset error counter and error status");
2720 }
2721 }
2722 }
2723
2724 /* Parse RdRspStatus and WrRspStatus */
amdgpu_ras_error_status_query(struct amdgpu_device * adev,struct ras_query_if * info)2725 static void amdgpu_ras_error_status_query(struct amdgpu_device *adev,
2726 struct ras_query_if *info)
2727 {
2728 struct amdgpu_ras_block_object *block_obj;
2729 /*
2730 * Only two block need to query read/write
2731 * RspStatus at current state
2732 */
2733 if ((info->head.block != AMDGPU_RAS_BLOCK__GFX) &&
2734 (info->head.block != AMDGPU_RAS_BLOCK__MMHUB))
2735 return;
2736
2737 block_obj = amdgpu_ras_get_ras_block(adev,
2738 info->head.block,
2739 info->head.sub_block_index);
2740
2741 if (!block_obj || !block_obj->hw_ops) {
2742 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
2743 get_ras_block_str(&info->head));
2744 return;
2745 }
2746
2747 if (block_obj->hw_ops->query_ras_error_status)
2748 block_obj->hw_ops->query_ras_error_status(adev);
2749
2750 }
2751
amdgpu_ras_query_err_status(struct amdgpu_device * adev)2752 static void amdgpu_ras_query_err_status(struct amdgpu_device *adev)
2753 {
2754 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2755 struct ras_manager *obj;
2756
2757 if (!adev->ras_enabled || !con)
2758 return;
2759
2760 list_for_each_entry(obj, &con->head, node) {
2761 struct ras_query_if info = {
2762 .head = obj->head,
2763 };
2764
2765 amdgpu_ras_error_status_query(adev, &info);
2766 }
2767 }
2768
amdgpu_ras_badpages_read(struct amdgpu_device * adev,struct ras_badpage * bps,uint32_t count,uint32_t start)2769 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
2770 struct ras_badpage *bps, uint32_t count, uint32_t start)
2771 {
2772 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2773 struct ras_err_handler_data *data;
2774 int r = 0;
2775 uint32_t i;
2776
2777 if (!con || !con->eh_data || !bps || !count)
2778 return -EINVAL;
2779
2780 mutex_lock(&con->recovery_lock);
2781 data = con->eh_data;
2782 if (start < data->count) {
2783 for (i = start; i < data->count; i++) {
2784 if (!data->bps[i].ts)
2785 continue;
2786
2787 /* U64_MAX is used to mark the record as invalid */
2788 if (data->bps[i].retired_page == U64_MAX)
2789 continue;
2790
2791 bps[r].bp = data->bps[i].retired_page;
2792 r++;
2793 if (r >= count)
2794 break;
2795 }
2796 }
2797 mutex_unlock(&con->recovery_lock);
2798
2799 return r;
2800 }
2801
amdgpu_uniras_badpages_read(struct amdgpu_device * adev,struct ras_badpage * bps,uint32_t count,uint32_t start)2802 static int amdgpu_uniras_badpages_read(struct amdgpu_device *adev,
2803 struct ras_badpage *bps, uint32_t count, uint32_t start)
2804 {
2805 struct ras_cmd_bad_pages_info_req cmd_input;
2806 struct ras_cmd_bad_pages_info_rsp *output;
2807 uint32_t group, start_group, end_group;
2808 uint32_t pos, pos_in_group;
2809 int r = 0, i;
2810
2811 if (!bps || !count)
2812 return -EINVAL;
2813
2814 output = kmalloc_obj(*output, GFP_KERNEL);
2815 if (!output)
2816 return -ENOMEM;
2817
2818 memset(&cmd_input, 0, sizeof(cmd_input));
2819
2820 start_group = start / RAS_CMD_MAX_BAD_PAGES_PER_GROUP;
2821 end_group = (start + count + RAS_CMD_MAX_BAD_PAGES_PER_GROUP - 1) /
2822 RAS_CMD_MAX_BAD_PAGES_PER_GROUP;
2823
2824 pos = start;
2825 for (group = start_group; group < end_group; group++) {
2826 memset(output, 0, sizeof(*output));
2827 cmd_input.group_index = group;
2828 if (amdgpu_ras_mgr_handle_ras_cmd(adev, RAS_CMD__GET_BAD_PAGES,
2829 &cmd_input, sizeof(cmd_input), output, sizeof(*output)))
2830 goto out;
2831
2832 if (pos >= output->bp_total_cnt)
2833 goto out;
2834
2835 pos_in_group = pos - group * RAS_CMD_MAX_BAD_PAGES_PER_GROUP;
2836 for (i = pos_in_group; i < output->bp_in_group; i++, pos++) {
2837 if (!output->records[i].ts)
2838 continue;
2839
2840 bps[r].bp = output->records[i].retired_page;
2841 r++;
2842 if (r >= count)
2843 goto out;
2844 }
2845 }
2846
2847 out:
2848 kfree(output);
2849 return r;
2850 }
2851
amdgpu_ras_set_fed_all(struct amdgpu_device * adev,struct amdgpu_hive_info * hive,bool status)2852 static void amdgpu_ras_set_fed_all(struct amdgpu_device *adev,
2853 struct amdgpu_hive_info *hive, bool status)
2854 {
2855 struct amdgpu_device *tmp_adev;
2856
2857 if (hive) {
2858 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
2859 amdgpu_ras_set_fed(tmp_adev, status);
2860 } else {
2861 amdgpu_ras_set_fed(adev, status);
2862 }
2863 }
2864
amdgpu_ras_in_recovery(struct amdgpu_device * adev)2865 bool amdgpu_ras_in_recovery(struct amdgpu_device *adev)
2866 {
2867 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2868 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
2869 int hive_ras_recovery = 0;
2870
2871 if (hive) {
2872 hive_ras_recovery = atomic_read(&hive->ras_recovery);
2873 amdgpu_put_xgmi_hive(hive);
2874 }
2875
2876 if (ras && (atomic_read(&ras->in_recovery) || hive_ras_recovery))
2877 return true;
2878
2879 return false;
2880 }
2881
amdgpu_ras_get_fatal_error_event(struct amdgpu_device * adev)2882 static enum ras_event_type amdgpu_ras_get_fatal_error_event(struct amdgpu_device *adev)
2883 {
2884 if (amdgpu_ras_intr_triggered())
2885 return RAS_EVENT_TYPE_FATAL;
2886 else
2887 return RAS_EVENT_TYPE_POISON_CONSUMPTION;
2888 }
2889
amdgpu_ras_do_recovery(struct work_struct * work)2890 static void amdgpu_ras_do_recovery(struct work_struct *work)
2891 {
2892 struct amdgpu_ras *ras =
2893 container_of(work, struct amdgpu_ras, recovery_work);
2894 struct amdgpu_device *remote_adev = NULL;
2895 struct amdgpu_device *adev = ras->adev;
2896 struct list_head device_list, *device_list_handle = NULL;
2897 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2898 unsigned int error_query_mode;
2899 enum ras_event_type type;
2900
2901 if (hive) {
2902 atomic_set(&hive->ras_recovery, 1);
2903
2904 /* If any device which is part of the hive received RAS fatal
2905 * error interrupt, set fatal error status on all. This
2906 * condition will need a recovery, and flag will be cleared
2907 * as part of recovery.
2908 */
2909 list_for_each_entry(remote_adev, &hive->device_list,
2910 gmc.xgmi.head)
2911 if (amdgpu_ras_get_fed_status(remote_adev)) {
2912 amdgpu_ras_set_fed_all(adev, hive, true);
2913 break;
2914 }
2915 }
2916 if (!ras->disable_ras_err_cnt_harvest) {
2917
2918 /* Build list of devices to query RAS related errors */
2919 if (hive && adev->gmc.xgmi.num_physical_nodes > 1) {
2920 device_list_handle = &hive->device_list;
2921 } else {
2922 INIT_LIST_HEAD(&device_list);
2923 list_add_tail(&adev->gmc.xgmi.head, &device_list);
2924 device_list_handle = &device_list;
2925 }
2926
2927 if (amdgpu_ras_get_error_query_mode(adev, &error_query_mode)) {
2928 if (error_query_mode == AMDGPU_RAS_FIRMWARE_ERROR_QUERY) {
2929 /* wait 500ms to ensure pmfw polling mca bank info done */
2930 msleep(500);
2931 }
2932 }
2933
2934 type = amdgpu_ras_get_fatal_error_event(adev);
2935 list_for_each_entry(remote_adev,
2936 device_list_handle, gmc.xgmi.head) {
2937 if (amdgpu_uniras_enabled(remote_adev)) {
2938 amdgpu_ras_mgr_update_ras_ecc(remote_adev);
2939 } else {
2940 amdgpu_ras_query_err_status(remote_adev);
2941 amdgpu_ras_log_on_err_counter(remote_adev, type);
2942 }
2943 }
2944
2945 }
2946
2947 if (amdgpu_device_should_recover_gpu(ras->adev)) {
2948 struct amdgpu_reset_context reset_context;
2949 memset(&reset_context, 0, sizeof(reset_context));
2950
2951 reset_context.method = AMD_RESET_METHOD_NONE;
2952 reset_context.reset_req_dev = adev;
2953 reset_context.src = AMDGPU_RESET_SRC_RAS;
2954 set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);
2955
2956 /* Perform full reset in fatal error mode */
2957 if (!amdgpu_ras_is_poison_mode_supported(ras->adev))
2958 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2959 else {
2960 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2961
2962 if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE2_RESET) {
2963 ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE2_RESET;
2964 reset_context.method = AMD_RESET_METHOD_MODE2;
2965 }
2966
2967 /* Fatal error occurs in poison mode, mode1 reset is used to
2968 * recover gpu.
2969 */
2970 if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET) {
2971 ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE1_RESET;
2972 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2973
2974 psp_fatal_error_recovery_quirk(&adev->psp);
2975 }
2976 }
2977
2978 amdgpu_device_gpu_recover(ras->adev, NULL, &reset_context);
2979 }
2980 atomic_set(&ras->in_recovery, 0);
2981 if (hive) {
2982 atomic_set(&hive->ras_recovery, 0);
2983 amdgpu_put_xgmi_hive(hive);
2984 }
2985 }
2986
2987 /* alloc/realloc bps array */
amdgpu_ras_realloc_eh_data_space(struct amdgpu_device * adev,struct ras_err_handler_data * data,int pages)2988 static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
2989 struct ras_err_handler_data *data, int pages)
2990 {
2991 unsigned int old_space = data->count + data->space_left;
2992 unsigned int new_space = old_space + pages;
2993 unsigned int align_space = ALIGN(new_space, 512);
2994 void *bps = kmalloc_objs(*data->bps, align_space, GFP_KERNEL);
2995
2996 if (!bps) {
2997 return -ENOMEM;
2998 }
2999
3000 if (data->bps) {
3001 memcpy(bps, data->bps,
3002 data->count * sizeof(*data->bps));
3003 kfree(data->bps);
3004 }
3005
3006 data->bps = bps;
3007 data->space_left += align_space - old_space;
3008 return 0;
3009 }
3010
amdgpu_ras_mca2pa_by_idx(struct amdgpu_device * adev,struct eeprom_table_record * bps,struct ras_err_data * err_data)3011 static int amdgpu_ras_mca2pa_by_idx(struct amdgpu_device *adev,
3012 struct eeprom_table_record *bps,
3013 struct ras_err_data *err_data)
3014 {
3015 struct ta_ras_query_address_input addr_in;
3016 uint32_t socket = 0;
3017 int ret = 0;
3018
3019 if (adev->smuio.funcs && adev->smuio.funcs->get_socket_id)
3020 socket = adev->smuio.funcs->get_socket_id(adev);
3021
3022 /* reinit err_data */
3023 err_data->err_addr_cnt = 0;
3024 err_data->err_addr_len = adev->umc.retire_unit;
3025
3026 memset(&addr_in, 0, sizeof(addr_in));
3027 addr_in.ma.err_addr = bps->address;
3028 addr_in.ma.socket_id = socket;
3029 addr_in.ma.ch_inst = bps->mem_channel;
3030 if (!amdgpu_ras_smu_eeprom_supported(adev)) {
3031 /* tell RAS TA the node instance is not used */
3032 addr_in.ma.node_inst = TA_RAS_INV_NODE;
3033 } else {
3034 addr_in.ma.umc_inst = bps->mcumc_id;
3035 addr_in.ma.node_inst = bps->cu;
3036 }
3037
3038 if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr)
3039 ret = adev->umc.ras->convert_ras_err_addr(adev, err_data,
3040 &addr_in, NULL, false);
3041
3042 return ret;
3043 }
3044
amdgpu_ras_mca2pa(struct amdgpu_device * adev,struct eeprom_table_record * bps,struct ras_err_data * err_data)3045 static int amdgpu_ras_mca2pa(struct amdgpu_device *adev,
3046 struct eeprom_table_record *bps,
3047 struct ras_err_data *err_data)
3048 {
3049 struct ta_ras_query_address_input addr_in;
3050 uint32_t die_id, socket = 0;
3051
3052 if (adev->smuio.funcs && adev->smuio.funcs->get_socket_id)
3053 socket = adev->smuio.funcs->get_socket_id(adev);
3054
3055 /* although die id is gotten from PA in nps1 mode, the id is
3056 * fitable for any nps mode
3057 */
3058 if (adev->umc.ras && adev->umc.ras->get_die_id_from_pa)
3059 die_id = adev->umc.ras->get_die_id_from_pa(adev, bps->address,
3060 bps->retired_page << AMDGPU_GPU_PAGE_SHIFT);
3061 else
3062 return -EINVAL;
3063
3064 /* reinit err_data */
3065 err_data->err_addr_cnt = 0;
3066 err_data->err_addr_len = adev->umc.retire_unit;
3067
3068 memset(&addr_in, 0, sizeof(addr_in));
3069 addr_in.ma.err_addr = bps->address;
3070 addr_in.ma.ch_inst = bps->mem_channel;
3071 addr_in.ma.umc_inst = bps->mcumc_id;
3072 addr_in.ma.node_inst = die_id;
3073 addr_in.ma.socket_id = socket;
3074
3075 if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr)
3076 return adev->umc.ras->convert_ras_err_addr(adev, err_data,
3077 &addr_in, NULL, false);
3078 else
3079 return -EINVAL;
3080 }
3081
__amdgpu_ras_restore_bad_pages(struct amdgpu_device * adev,struct eeprom_table_record * bps,int count)3082 static int __amdgpu_ras_restore_bad_pages(struct amdgpu_device *adev,
3083 struct eeprom_table_record *bps, int count)
3084 {
3085 int j;
3086 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3087 struct ras_err_handler_data *data = con->eh_data;
3088
3089 for (j = 0; j < count; j++) {
3090 if (!data->space_left &&
3091 amdgpu_ras_realloc_eh_data_space(adev, data, 256)) {
3092 return -ENOMEM;
3093 }
3094
3095 if (amdgpu_ras_check_bad_page_unlock(con,
3096 bps[j].retired_page << AMDGPU_GPU_PAGE_SHIFT)) {
3097 /* set to U64_MAX to mark it as invalid */
3098 data->bps[data->count].retired_page = U64_MAX;
3099 data->count++;
3100 data->space_left--;
3101 continue;
3102 }
3103
3104 amdgpu_ras_reserve_page(adev, bps[j].retired_page);
3105
3106 memcpy(&data->bps[data->count], &(bps[j]),
3107 sizeof(struct eeprom_table_record));
3108 data->count++;
3109 data->space_left--;
3110 con->bad_page_num++;
3111 }
3112
3113 return 0;
3114 }
3115
__amdgpu_ras_convert_rec_array_from_rom(struct amdgpu_device * adev,struct eeprom_table_record * bps,struct ras_err_data * err_data,enum amdgpu_memory_partition nps)3116 static int __amdgpu_ras_convert_rec_array_from_rom(struct amdgpu_device *adev,
3117 struct eeprom_table_record *bps, struct ras_err_data *err_data,
3118 enum amdgpu_memory_partition nps)
3119 {
3120 int i = 0;
3121 enum amdgpu_memory_partition save_nps;
3122
3123 save_nps = (bps[0].retired_page >> UMC_NPS_SHIFT) & UMC_NPS_MASK;
3124
3125 /*old asics just have pa in eeprom*/
3126 if (IP_VERSION_MAJ(amdgpu_ip_version(adev, UMC_HWIP, 0)) < 12) {
3127 memcpy(err_data->err_addr, bps,
3128 sizeof(struct eeprom_table_record) * adev->umc.retire_unit);
3129 goto out;
3130 }
3131
3132 for (i = 0; i < adev->umc.retire_unit; i++)
3133 bps[i].retired_page &= ~(UMC_NPS_MASK << UMC_NPS_SHIFT);
3134
3135 if (save_nps) {
3136 if (save_nps == nps) {
3137 if (amdgpu_umc_pages_in_a_row(adev, err_data,
3138 bps[0].retired_page << AMDGPU_GPU_PAGE_SHIFT))
3139 return -EINVAL;
3140 for (i = 0; i < adev->umc.retire_unit; i++) {
3141 err_data->err_addr[i].address = bps[0].address;
3142 err_data->err_addr[i].mem_channel = bps[0].mem_channel;
3143 err_data->err_addr[i].bank = bps[0].bank;
3144 err_data->err_addr[i].err_type = bps[0].err_type;
3145 err_data->err_addr[i].mcumc_id = bps[0].mcumc_id;
3146 }
3147 } else {
3148 if (amdgpu_ras_mca2pa_by_idx(adev, &bps[0], err_data))
3149 return -EINVAL;
3150 }
3151 } else {
3152 if (bps[0].address == 0) {
3153 /* for specific old eeprom data, mca address is not stored,
3154 * calc it from pa
3155 */
3156 if (amdgpu_umc_pa2mca(adev, bps[0].retired_page << AMDGPU_GPU_PAGE_SHIFT,
3157 &(bps[0].address), AMDGPU_NPS1_PARTITION_MODE))
3158 return -EINVAL;
3159 }
3160
3161 if (amdgpu_ras_mca2pa(adev, &bps[0], err_data)) {
3162 if (nps == AMDGPU_NPS1_PARTITION_MODE)
3163 memcpy(err_data->err_addr, bps,
3164 sizeof(struct eeprom_table_record) * adev->umc.retire_unit);
3165 else
3166 return -EOPNOTSUPP;
3167 }
3168 }
3169
3170 out:
3171 return __amdgpu_ras_restore_bad_pages(adev, err_data->err_addr, adev->umc.retire_unit);
3172 }
3173
__amdgpu_ras_convert_rec_from_rom(struct amdgpu_device * adev,struct eeprom_table_record * bps,struct ras_err_data * err_data,enum amdgpu_memory_partition nps)3174 static int __amdgpu_ras_convert_rec_from_rom(struct amdgpu_device *adev,
3175 struct eeprom_table_record *bps, struct ras_err_data *err_data,
3176 enum amdgpu_memory_partition nps)
3177 {
3178 int i = 0;
3179 enum amdgpu_memory_partition save_nps;
3180
3181 if (!amdgpu_ras_smu_eeprom_supported(adev)) {
3182 save_nps = (bps->retired_page >> UMC_NPS_SHIFT) & UMC_NPS_MASK;
3183 bps->retired_page &= ~(UMC_NPS_MASK << UMC_NPS_SHIFT);
3184 } else {
3185 /* if pmfw manages eeprom, save_nps is not stored on eeprom,
3186 * we should always convert mca address into physical address,
3187 * make save_nps different from nps
3188 */
3189 save_nps = nps + 1;
3190 }
3191
3192 if (save_nps == nps) {
3193 if (amdgpu_umc_pages_in_a_row(adev, err_data,
3194 bps->retired_page << AMDGPU_GPU_PAGE_SHIFT))
3195 return -EINVAL;
3196 for (i = 0; i < adev->umc.retire_unit; i++) {
3197 err_data->err_addr[i].address = bps->address;
3198 err_data->err_addr[i].mem_channel = bps->mem_channel;
3199 err_data->err_addr[i].bank = bps->bank;
3200 err_data->err_addr[i].err_type = bps->err_type;
3201 err_data->err_addr[i].mcumc_id = bps->mcumc_id;
3202 }
3203 } else {
3204 if (bps->address) {
3205 if (amdgpu_ras_mca2pa_by_idx(adev, bps, err_data))
3206 return -EINVAL;
3207 } else {
3208 /* for specific old eeprom data, mca address is not stored,
3209 * calc it from pa
3210 */
3211 if (amdgpu_umc_pa2mca(adev, bps->retired_page << AMDGPU_GPU_PAGE_SHIFT,
3212 &(bps->address), AMDGPU_NPS1_PARTITION_MODE))
3213 return -EINVAL;
3214
3215 if (amdgpu_ras_mca2pa(adev, bps, err_data))
3216 return -EOPNOTSUPP;
3217 }
3218 }
3219
3220 return __amdgpu_ras_restore_bad_pages(adev, err_data->err_addr,
3221 adev->umc.retire_unit);
3222 }
3223
3224 /* it deal with vram only. */
amdgpu_ras_add_bad_pages(struct amdgpu_device * adev,struct eeprom_table_record * bps,int pages,bool from_rom)3225 int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
3226 struct eeprom_table_record *bps, int pages, bool from_rom)
3227 {
3228 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3229 struct ras_err_data err_data;
3230 struct amdgpu_ras_eeprom_control *control =
3231 &adev->psp.ras_context.ras->eeprom_control;
3232 enum amdgpu_memory_partition nps = AMDGPU_NPS1_PARTITION_MODE;
3233 int ret = 0;
3234 uint32_t i = 0;
3235
3236 if (!con || !con->eh_data || !bps || pages <= 0)
3237 return 0;
3238
3239 if (from_rom) {
3240 err_data.err_addr =
3241 kzalloc_objs(struct eeprom_table_record,
3242 adev->umc.retire_unit, GFP_KERNEL);
3243 if (!err_data.err_addr) {
3244 dev_warn(adev->dev, "Failed to alloc UMC error address record in mca2pa conversion!\n");
3245 return -ENOMEM;
3246 }
3247
3248 if (adev->gmc.gmc_funcs->query_mem_partition_mode)
3249 nps = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
3250 }
3251
3252 mutex_lock(&con->recovery_lock);
3253
3254 if (from_rom) {
3255 /* there is no pa recs in V3, so skip pa recs processing */
3256 if ((control->tbl_hdr.version < RAS_TABLE_VER_V3) &&
3257 !amdgpu_ras_smu_eeprom_supported(adev)) {
3258 for (i = 0; i < pages; i++) {
3259 if (control->ras_num_recs - i >= adev->umc.retire_unit) {
3260 if ((bps[i].address == bps[i + 1].address) &&
3261 (bps[i].mem_channel == bps[i + 1].mem_channel)) {
3262 /* deal with retire_unit records a time */
3263 ret = __amdgpu_ras_convert_rec_array_from_rom(adev,
3264 &bps[i], &err_data, nps);
3265 i += (adev->umc.retire_unit - 1);
3266 } else {
3267 break;
3268 }
3269 } else {
3270 break;
3271 }
3272 }
3273 }
3274 for (; i < pages; i++) {
3275 ret = __amdgpu_ras_convert_rec_from_rom(adev,
3276 &bps[i], &err_data, nps);
3277 }
3278
3279 con->eh_data->count_saved = con->eh_data->count;
3280 } else {
3281 ret = __amdgpu_ras_restore_bad_pages(adev, bps, pages);
3282 }
3283
3284 if (from_rom)
3285 kfree(err_data.err_addr);
3286 mutex_unlock(&con->recovery_lock);
3287
3288 return ret;
3289 }
3290
3291 /*
3292 * write error record array to eeprom, the function should be
3293 * protected by recovery_lock
3294 * new_cnt: new added UE count, excluding reserved bad pages, can be NULL
3295 */
amdgpu_ras_save_bad_pages(struct amdgpu_device * adev,unsigned long * new_cnt)3296 int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
3297 unsigned long *new_cnt)
3298 {
3299 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3300 struct ras_err_handler_data *data;
3301 struct amdgpu_ras_eeprom_control *control;
3302 int save_count, unit_num, i;
3303
3304 if (!con || !con->eh_data) {
3305 if (new_cnt)
3306 *new_cnt = 0;
3307
3308 return 0;
3309 }
3310
3311 if (!con->eeprom_control.is_eeprom_valid) {
3312 dev_warn(adev->dev,
3313 "Failed to save EEPROM table data because of EEPROM data corruption!");
3314 if (new_cnt)
3315 *new_cnt = 0;
3316
3317 return 0;
3318 }
3319
3320 mutex_lock(&con->recovery_lock);
3321 control = &con->eeprom_control;
3322 data = con->eh_data;
3323 if (amdgpu_ras_smu_eeprom_supported(adev))
3324 unit_num = control->ras_num_recs -
3325 control->ras_num_recs_old;
3326 else
3327 unit_num = data->count / adev->umc.retire_unit -
3328 control->ras_num_recs;
3329
3330 save_count = con->bad_page_num - control->ras_num_bad_pages;
3331 mutex_unlock(&con->recovery_lock);
3332
3333 if (new_cnt)
3334 *new_cnt = unit_num;
3335
3336 /* only new entries are saved */
3337 if (unit_num && save_count) {
3338 /*old asics only save pa to eeprom like before*/
3339 if (IP_VERSION_MAJ(amdgpu_ip_version(adev, UMC_HWIP, 0)) < 12) {
3340 if (amdgpu_ras_eeprom_append(control,
3341 &data->bps[data->count_saved], unit_num)) {
3342 dev_err(adev->dev, "Failed to save EEPROM table data!");
3343 return -EIO;
3344 }
3345 } else {
3346 for (i = 0; i < unit_num; i++) {
3347 if (amdgpu_ras_eeprom_append(control,
3348 &data->bps[data->count_saved +
3349 i * adev->umc.retire_unit], 1)) {
3350 dev_err(adev->dev, "Failed to save EEPROM table data!");
3351 return -EIO;
3352 }
3353 }
3354 }
3355
3356 dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count);
3357 data->count_saved = data->count;
3358 }
3359
3360 return 0;
3361 }
3362
3363 /*
3364 * read error record array in eeprom and reserve enough space for
3365 * storing new bad pages
3366 */
amdgpu_ras_load_bad_pages(struct amdgpu_device * adev)3367 static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
3368 {
3369 struct amdgpu_ras_eeprom_control *control =
3370 &adev->psp.ras_context.ras->eeprom_control;
3371 struct eeprom_table_record *bps;
3372 int ret, i = 0;
3373
3374 /* no bad page record, skip eeprom access */
3375 if (control->ras_num_recs == 0 || amdgpu_bad_page_threshold == 0)
3376 return 0;
3377
3378 bps = kzalloc_objs(*bps, control->ras_num_recs, GFP_KERNEL);
3379 if (!bps)
3380 return -ENOMEM;
3381
3382 ret = amdgpu_ras_eeprom_read(control, bps, control->ras_num_recs);
3383 if (ret) {
3384 dev_err(adev->dev, "Failed to load EEPROM table records!");
3385 } else {
3386 if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr) {
3387 /*In V3, there is no pa recs, and some cases(when address==0) may be parsed
3388 as pa recs, so add verion check to avoid it.
3389 */
3390 if ((control->tbl_hdr.version < RAS_TABLE_VER_V3) &&
3391 !amdgpu_ras_smu_eeprom_supported(adev)) {
3392 for (i = 0; i < control->ras_num_recs; i++) {
3393 if ((control->ras_num_recs - i) >= adev->umc.retire_unit) {
3394 if ((bps[i].address == bps[i + 1].address) &&
3395 (bps[i].mem_channel == bps[i + 1].mem_channel)) {
3396 control->ras_num_pa_recs += adev->umc.retire_unit;
3397 i += (adev->umc.retire_unit - 1);
3398 } else {
3399 control->ras_num_mca_recs +=
3400 (control->ras_num_recs - i);
3401 break;
3402 }
3403 } else {
3404 control->ras_num_mca_recs += (control->ras_num_recs - i);
3405 break;
3406 }
3407 }
3408 } else {
3409 control->ras_num_mca_recs = control->ras_num_recs;
3410 }
3411 }
3412
3413 ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs, true);
3414 if (ret)
3415 goto out;
3416
3417 ret = amdgpu_ras_eeprom_check(control);
3418 if (ret)
3419 goto out;
3420
3421 /* HW not usable */
3422 if (amdgpu_ras_is_rma(adev))
3423 ret = -EHWPOISON;
3424 }
3425
3426 out:
3427 kfree(bps);
3428 return ret;
3429 }
3430
amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras * con,uint64_t addr)3431 static int amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
3432 uint64_t addr)
3433 {
3434 struct ras_err_handler_data *data = con->eh_data;
3435 struct amdgpu_device *adev = con->adev;
3436 int i;
3437
3438 if ((addr >= adev->gmc.mc_vram_size &&
3439 adev->gmc.mc_vram_size) ||
3440 (addr >= RAS_UMC_INJECT_ADDR_LIMIT))
3441 return -EINVAL;
3442
3443 addr >>= AMDGPU_GPU_PAGE_SHIFT;
3444 for (i = 0; i < data->count; i++)
3445 if (addr == data->bps[i].retired_page)
3446 return 1;
3447
3448 return 0;
3449 }
3450
3451 /*
3452 * check if an address belongs to bad page
3453 *
3454 * Note: this check is only for umc block
3455 */
amdgpu_ras_check_bad_page(struct amdgpu_device * adev,uint64_t addr)3456 static int amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
3457 uint64_t addr)
3458 {
3459 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3460 int ret = 0;
3461
3462 if (!con || !con->eh_data)
3463 return ret;
3464
3465 mutex_lock(&con->recovery_lock);
3466 ret = amdgpu_ras_check_bad_page_unlock(con, addr);
3467 mutex_unlock(&con->recovery_lock);
3468 return ret;
3469 }
3470
amdgpu_ras_validate_threshold(struct amdgpu_device * adev,uint32_t max_count)3471 static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev,
3472 uint32_t max_count)
3473 {
3474 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3475
3476 /*
3477 * amdgpu_bad_page_threshold is used to config
3478 * the threshold for the number of bad pages.
3479 * -1: Threshold is set to default value
3480 * Driver will issue a warning message when threshold is reached
3481 * and continue runtime services.
3482 * 0: Disable bad page retirement
3483 * Driver will not retire bad pages
3484 * which is intended for debugging purpose.
3485 * -2: Threshold is determined by a formula
3486 * that assumes 1 bad page per 100M of local memory.
3487 * Driver will continue runtime services when threhold is reached.
3488 * 0 < threshold < max number of bad page records in EEPROM,
3489 * A user-defined threshold is set
3490 * Driver will halt runtime services when this custom threshold is reached.
3491 */
3492 if (amdgpu_bad_page_threshold == -2) {
3493 u64 val = adev->gmc.mc_vram_size;
3494
3495 do_div(val, RAS_BAD_PAGE_COVER);
3496 con->bad_page_cnt_threshold = min(lower_32_bits(val),
3497 max_count);
3498 } else if (amdgpu_bad_page_threshold == -1) {
3499 con->bad_page_cnt_threshold = ((con->reserved_pages_in_bytes) >> 21) << 4;
3500 } else {
3501 con->bad_page_cnt_threshold = min_t(int, max_count,
3502 amdgpu_bad_page_threshold);
3503 }
3504 }
3505
amdgpu_ras_put_poison_req(struct amdgpu_device * adev,enum amdgpu_ras_block block,uint16_t pasid,pasid_notify pasid_fn,void * data,uint32_t reset)3506 int amdgpu_ras_put_poison_req(struct amdgpu_device *adev,
3507 enum amdgpu_ras_block block, uint16_t pasid,
3508 pasid_notify pasid_fn, void *data, uint32_t reset)
3509 {
3510 int ret = 0;
3511 struct ras_poison_msg poison_msg;
3512 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3513
3514 memset(&poison_msg, 0, sizeof(poison_msg));
3515 poison_msg.block = block;
3516 poison_msg.pasid = pasid;
3517 poison_msg.reset = reset;
3518 poison_msg.pasid_fn = pasid_fn;
3519 poison_msg.data = data;
3520
3521 ret = kfifo_put(&con->poison_fifo, poison_msg);
3522 if (!ret) {
3523 dev_err(adev->dev, "Poison message fifo is full!\n");
3524 return -ENOSPC;
3525 }
3526
3527 return 0;
3528 }
3529
amdgpu_ras_get_poison_req(struct amdgpu_device * adev,struct ras_poison_msg * poison_msg)3530 static int amdgpu_ras_get_poison_req(struct amdgpu_device *adev,
3531 struct ras_poison_msg *poison_msg)
3532 {
3533 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3534
3535 return kfifo_get(&con->poison_fifo, poison_msg);
3536 }
3537
amdgpu_ras_ecc_log_init(struct ras_ecc_log_info * ecc_log)3538 static void amdgpu_ras_ecc_log_init(struct ras_ecc_log_info *ecc_log)
3539 {
3540 mutex_init(&ecc_log->lock);
3541
3542 INIT_RADIX_TREE(&ecc_log->de_page_tree, GFP_KERNEL);
3543 ecc_log->de_queried_count = 0;
3544 ecc_log->consumption_q_count = 0;
3545 }
3546
amdgpu_ras_ecc_log_fini(struct ras_ecc_log_info * ecc_log)3547 static void amdgpu_ras_ecc_log_fini(struct ras_ecc_log_info *ecc_log)
3548 {
3549 struct radix_tree_iter iter;
3550 void __rcu **slot;
3551 struct ras_ecc_err *ecc_err;
3552
3553 mutex_lock(&ecc_log->lock);
3554 radix_tree_for_each_slot(slot, &ecc_log->de_page_tree, &iter, 0) {
3555 ecc_err = radix_tree_deref_slot(slot);
3556 kfree(ecc_err->err_pages.pfn);
3557 kfree(ecc_err);
3558 radix_tree_iter_delete(&ecc_log->de_page_tree, &iter, slot);
3559 }
3560 mutex_unlock(&ecc_log->lock);
3561
3562 mutex_destroy(&ecc_log->lock);
3563 ecc_log->de_queried_count = 0;
3564 ecc_log->consumption_q_count = 0;
3565 }
3566
amdgpu_ras_schedule_retirement_dwork(struct amdgpu_ras * con,uint32_t delayed_ms)3567 static bool amdgpu_ras_schedule_retirement_dwork(struct amdgpu_ras *con,
3568 uint32_t delayed_ms)
3569 {
3570 int ret;
3571
3572 mutex_lock(&con->umc_ecc_log.lock);
3573 ret = radix_tree_tagged(&con->umc_ecc_log.de_page_tree,
3574 UMC_ECC_NEW_DETECTED_TAG);
3575 mutex_unlock(&con->umc_ecc_log.lock);
3576
3577 if (ret)
3578 schedule_delayed_work(&con->page_retirement_dwork,
3579 msecs_to_jiffies(delayed_ms));
3580
3581 return ret ? true : false;
3582 }
3583
amdgpu_ras_do_page_retirement(struct work_struct * work)3584 static void amdgpu_ras_do_page_retirement(struct work_struct *work)
3585 {
3586 struct amdgpu_ras *con = container_of(work, struct amdgpu_ras,
3587 page_retirement_dwork.work);
3588 struct amdgpu_device *adev = con->adev;
3589 struct ras_err_data err_data;
3590
3591 /* If gpu reset is ongoing, delay retiring the bad pages */
3592 if (amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev)) {
3593 amdgpu_ras_schedule_retirement_dwork(con,
3594 AMDGPU_RAS_RETIRE_PAGE_INTERVAL * 3);
3595 return;
3596 }
3597
3598 amdgpu_ras_error_data_init(&err_data);
3599
3600 amdgpu_umc_handle_bad_pages(adev, &err_data);
3601
3602 amdgpu_ras_error_data_fini(&err_data);
3603
3604 amdgpu_ras_schedule_retirement_dwork(con,
3605 AMDGPU_RAS_RETIRE_PAGE_INTERVAL);
3606 }
3607
amdgpu_ras_poison_creation_handler(struct amdgpu_device * adev,uint32_t poison_creation_count)3608 static int amdgpu_ras_poison_creation_handler(struct amdgpu_device *adev,
3609 uint32_t poison_creation_count)
3610 {
3611 int ret = 0;
3612 struct ras_ecc_log_info *ecc_log;
3613 struct ras_query_if info;
3614 u32 timeout = MAX_UMC_POISON_POLLING_TIME_ASYNC;
3615 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
3616 u64 de_queried_count;
3617 u64 consumption_q_count;
3618 enum ras_event_type type = RAS_EVENT_TYPE_POISON_CREATION;
3619
3620 memset(&info, 0, sizeof(info));
3621 info.head.block = AMDGPU_RAS_BLOCK__UMC;
3622
3623 ecc_log = &ras->umc_ecc_log;
3624 ecc_log->de_queried_count = 0;
3625 ecc_log->consumption_q_count = 0;
3626
3627 do {
3628 ret = amdgpu_ras_query_error_status_with_event(adev, &info, type);
3629 if (ret)
3630 return ret;
3631
3632 de_queried_count = ecc_log->de_queried_count;
3633 consumption_q_count = ecc_log->consumption_q_count;
3634
3635 if (de_queried_count && consumption_q_count)
3636 break;
3637
3638 msleep(100);
3639 } while (--timeout);
3640
3641 if (de_queried_count)
3642 schedule_delayed_work(&ras->page_retirement_dwork, 0);
3643
3644 if (amdgpu_ras_is_rma(adev) && atomic_cmpxchg(&ras->rma_in_recovery, 0, 1) == 0)
3645 amdgpu_ras_reset_gpu(adev);
3646
3647 return 0;
3648 }
3649
amdgpu_ras_clear_poison_fifo(struct amdgpu_device * adev)3650 static void amdgpu_ras_clear_poison_fifo(struct amdgpu_device *adev)
3651 {
3652 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3653 struct ras_poison_msg msg;
3654 int ret;
3655
3656 do {
3657 ret = kfifo_get(&con->poison_fifo, &msg);
3658 } while (ret);
3659 }
3660
amdgpu_ras_poison_consumption_handler(struct amdgpu_device * adev,uint32_t msg_count,uint32_t * gpu_reset)3661 static int amdgpu_ras_poison_consumption_handler(struct amdgpu_device *adev,
3662 uint32_t msg_count, uint32_t *gpu_reset)
3663 {
3664 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3665 uint32_t reset_flags = 0, reset = 0;
3666 struct ras_poison_msg msg;
3667 int ret, i;
3668
3669 kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
3670
3671 for (i = 0; i < msg_count; i++) {
3672 ret = amdgpu_ras_get_poison_req(adev, &msg);
3673 if (!ret)
3674 continue;
3675
3676 if (msg.pasid_fn)
3677 msg.pasid_fn(adev, msg.pasid, msg.data);
3678
3679 reset_flags |= msg.reset;
3680 }
3681
3682 /*
3683 * Try to ensure poison creation handler is completed first
3684 * to set rma if bad page exceed threshold.
3685 */
3686 flush_delayed_work(&con->page_retirement_dwork);
3687
3688 /* for RMA, amdgpu_ras_poison_creation_handler will trigger gpu reset */
3689 if (reset_flags && !amdgpu_ras_is_rma(adev)) {
3690 if (reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET)
3691 reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET;
3692 else if (reset_flags & AMDGPU_RAS_GPU_RESET_MODE2_RESET)
3693 reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
3694 else
3695 reset = reset_flags;
3696
3697 con->gpu_reset_flags |= reset;
3698 amdgpu_ras_reset_gpu(adev);
3699
3700 *gpu_reset = reset;
3701
3702 /* Wait for gpu recovery to complete */
3703 flush_work(&con->recovery_work);
3704 }
3705
3706 return 0;
3707 }
3708
amdgpu_ras_page_retirement_thread(void * param)3709 static int amdgpu_ras_page_retirement_thread(void *param)
3710 {
3711 struct amdgpu_device *adev = (struct amdgpu_device *)param;
3712 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3713 uint32_t poison_creation_count, msg_count;
3714 uint32_t gpu_reset;
3715 int ret;
3716
3717 while (!kthread_should_stop()) {
3718
3719 wait_event_interruptible(con->page_retirement_wq,
3720 kthread_should_stop() ||
3721 atomic_read(&con->page_retirement_req_cnt));
3722
3723 if (kthread_should_stop())
3724 break;
3725
3726 mutex_lock(&con->poison_lock);
3727 gpu_reset = 0;
3728
3729 do {
3730 poison_creation_count = atomic_read(&con->poison_creation_count);
3731 ret = amdgpu_ras_poison_creation_handler(adev, poison_creation_count);
3732 if (ret == -EIO)
3733 break;
3734
3735 if (poison_creation_count) {
3736 atomic_sub(poison_creation_count, &con->poison_creation_count);
3737 atomic_sub(poison_creation_count, &con->page_retirement_req_cnt);
3738 }
3739 } while (atomic_read(&con->poison_creation_count) &&
3740 !atomic_read(&con->poison_consumption_count));
3741
3742 if (ret != -EIO) {
3743 msg_count = kfifo_len(&con->poison_fifo);
3744 if (msg_count) {
3745 ret = amdgpu_ras_poison_consumption_handler(adev,
3746 msg_count, &gpu_reset);
3747 if ((ret != -EIO) &&
3748 (gpu_reset != AMDGPU_RAS_GPU_RESET_MODE1_RESET))
3749 atomic_sub(msg_count, &con->page_retirement_req_cnt);
3750 }
3751 }
3752
3753 if ((ret == -EIO) || (gpu_reset == AMDGPU_RAS_GPU_RESET_MODE1_RESET)) {
3754 /* gpu mode-1 reset is ongoing or just completed ras mode-1 reset */
3755 /* Clear poison creation request */
3756 atomic_set(&con->poison_creation_count, 0);
3757 atomic_set(&con->poison_consumption_count, 0);
3758
3759 /* Clear poison fifo */
3760 amdgpu_ras_clear_poison_fifo(adev);
3761
3762 /* Clear all poison requests */
3763 atomic_set(&con->page_retirement_req_cnt, 0);
3764
3765 if (ret == -EIO) {
3766 /* Wait for mode-1 reset to complete */
3767 down_read(&adev->reset_domain->sem);
3768 up_read(&adev->reset_domain->sem);
3769 }
3770
3771 /* Wake up work to save bad pages to eeprom */
3772 schedule_delayed_work(&con->page_retirement_dwork, 0);
3773 } else if (gpu_reset) {
3774 /* gpu just completed mode-2 reset or other reset */
3775 /* Clear poison consumption messages cached in fifo */
3776 msg_count = kfifo_len(&con->poison_fifo);
3777 if (msg_count) {
3778 amdgpu_ras_clear_poison_fifo(adev);
3779 atomic_sub(msg_count, &con->page_retirement_req_cnt);
3780 }
3781
3782 atomic_set(&con->poison_consumption_count, 0);
3783
3784 /* Wake up work to save bad pages to eeprom */
3785 schedule_delayed_work(&con->page_retirement_dwork, 0);
3786 }
3787 mutex_unlock(&con->poison_lock);
3788 }
3789
3790 return 0;
3791 }
3792
amdgpu_ras_init_badpage_info(struct amdgpu_device * adev)3793 int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev)
3794 {
3795 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3796 struct amdgpu_ras_eeprom_control *control;
3797 int ret;
3798
3799 if (!con || amdgpu_sriov_vf(adev))
3800 return 0;
3801
3802 if (amdgpu_uniras_enabled(adev))
3803 return 0;
3804
3805 control = &con->eeprom_control;
3806 con->ras_smu_drv = amdgpu_dpm_get_ras_smu_driver(adev);
3807
3808 ret = amdgpu_ras_eeprom_init(control);
3809 control->is_eeprom_valid = !ret;
3810
3811 if (!adev->umc.ras || !adev->umc.ras->convert_ras_err_addr)
3812 control->ras_num_pa_recs = control->ras_num_recs;
3813
3814 if (adev->umc.ras &&
3815 adev->umc.ras->get_retire_flip_bits)
3816 adev->umc.ras->get_retire_flip_bits(adev);
3817
3818 if (control->ras_num_recs && control->is_eeprom_valid) {
3819 ret = amdgpu_ras_load_bad_pages(adev);
3820 if (ret) {
3821 control->is_eeprom_valid = false;
3822 return 0;
3823 }
3824
3825 amdgpu_dpm_send_hbm_bad_pages_num(
3826 adev, control->ras_num_bad_pages);
3827
3828 if (con->update_channel_flag == true) {
3829 amdgpu_dpm_send_hbm_bad_channel_flag(
3830 adev, control->bad_channel_bitmap);
3831 con->update_channel_flag = false;
3832 }
3833
3834 /* The format action is only applied to new ASICs */
3835 if (IP_VERSION_MAJ(amdgpu_ip_version(adev, UMC_HWIP, 0)) >= 12 &&
3836 control->tbl_hdr.version < RAS_TABLE_VER_V3)
3837 if (!amdgpu_ras_eeprom_reset_table(control))
3838 if (amdgpu_ras_save_bad_pages(adev, NULL))
3839 dev_warn(adev->dev, "Failed to format RAS EEPROM data in V3 version!\n");
3840 }
3841
3842 return 0;
3843 }
3844
amdgpu_ras_recovery_init(struct amdgpu_device * adev,bool init_bp_info)3845 int amdgpu_ras_recovery_init(struct amdgpu_device *adev, bool init_bp_info)
3846 {
3847 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3848 struct ras_err_handler_data **data;
3849 u32 max_eeprom_records_count = 0;
3850 int ret;
3851
3852 if (!con || amdgpu_sriov_vf(adev))
3853 return 0;
3854
3855 /* Allow access to RAS EEPROM via debugfs, when the ASIC
3856 * supports RAS and debugfs is enabled, but when
3857 * adev->ras_enabled is unset, i.e. when "ras_enable"
3858 * module parameter is set to 0.
3859 */
3860 con->adev = adev;
3861
3862 if (!adev->ras_enabled)
3863 return 0;
3864
3865 data = &con->eh_data;
3866 *data = kzalloc_obj(**data, GFP_KERNEL);
3867 if (!*data) {
3868 ret = -ENOMEM;
3869 goto out;
3870 }
3871
3872 mutex_init(&con->recovery_lock);
3873 mutex_init(&con->poison_lock);
3874 INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
3875 atomic_set(&con->in_recovery, 0);
3876 atomic_set(&con->rma_in_recovery, 0);
3877 con->eeprom_control.bad_channel_bitmap = 0;
3878
3879 max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count(&con->eeprom_control);
3880 amdgpu_ras_validate_threshold(adev, max_eeprom_records_count);
3881
3882 if (init_bp_info) {
3883 ret = amdgpu_ras_init_badpage_info(adev);
3884 if (ret)
3885 goto free;
3886 }
3887
3888 mutex_init(&con->page_rsv_lock);
3889 INIT_KFIFO(con->poison_fifo);
3890 mutex_init(&con->page_retirement_lock);
3891 init_waitqueue_head(&con->page_retirement_wq);
3892 atomic_set(&con->page_retirement_req_cnt, 0);
3893 atomic_set(&con->poison_creation_count, 0);
3894 atomic_set(&con->poison_consumption_count, 0);
3895 con->page_retirement_thread =
3896 kthread_run(amdgpu_ras_page_retirement_thread, adev, "umc_page_retirement");
3897 if (IS_ERR(con->page_retirement_thread)) {
3898 con->page_retirement_thread = NULL;
3899 dev_warn(adev->dev, "Failed to create umc_page_retirement thread!!!\n");
3900 }
3901
3902 INIT_DELAYED_WORK(&con->page_retirement_dwork, amdgpu_ras_do_page_retirement);
3903 amdgpu_ras_ecc_log_init(&con->umc_ecc_log);
3904 #ifdef CONFIG_X86_MCE_AMD
3905 if ((adev->asic_type == CHIP_ALDEBARAN) &&
3906 (adev->gmc.xgmi.connected_to_cpu))
3907 amdgpu_register_bad_pages_mca_notifier(adev);
3908 #endif
3909 return 0;
3910
3911 free:
3912 kfree((*data)->bps);
3913 kfree(*data);
3914 con->eh_data = NULL;
3915 out:
3916 dev_warn(adev->dev, "Failed to initialize ras recovery! (%d)\n", ret);
3917
3918 /*
3919 * Except error threshold exceeding case, other failure cases in this
3920 * function would not fail amdgpu driver init.
3921 */
3922 if (!amdgpu_ras_is_rma(adev))
3923 ret = 0;
3924 else
3925 ret = -EINVAL;
3926
3927 return ret;
3928 }
3929
amdgpu_ras_recovery_fini(struct amdgpu_device * adev)3930 static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
3931 {
3932 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3933 struct ras_err_handler_data *data = con->eh_data;
3934 int max_flush_timeout = MAX_FLUSH_RETIRE_DWORK_TIMES;
3935 bool ret;
3936
3937 /* recovery_init failed to init it, fini is useless */
3938 if (!data)
3939 return 0;
3940
3941 /* Save all cached bad pages to eeprom */
3942 do {
3943 flush_delayed_work(&con->page_retirement_dwork);
3944 ret = amdgpu_ras_schedule_retirement_dwork(con, 0);
3945 } while (ret && max_flush_timeout--);
3946
3947 if (con->page_retirement_thread)
3948 kthread_stop(con->page_retirement_thread);
3949
3950 atomic_set(&con->page_retirement_req_cnt, 0);
3951 atomic_set(&con->poison_creation_count, 0);
3952
3953 mutex_destroy(&con->page_rsv_lock);
3954
3955 cancel_work_sync(&con->recovery_work);
3956
3957 cancel_delayed_work_sync(&con->page_retirement_dwork);
3958
3959 amdgpu_ras_ecc_log_fini(&con->umc_ecc_log);
3960
3961 mutex_lock(&con->recovery_lock);
3962 con->eh_data = NULL;
3963 kfree(data->bps);
3964 kfree(data);
3965 mutex_unlock(&con->recovery_lock);
3966
3967 amdgpu_ras_critical_region_init(adev);
3968 #ifdef CONFIG_X86_MCE_AMD
3969 amdgpu_unregister_bad_pages_mca_notifier(adev);
3970 #endif
3971 return 0;
3972 }
3973 /* recovery end */
3974
amdgpu_ras_asic_supported(struct amdgpu_device * adev)3975 static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
3976 {
3977 if (amdgpu_sriov_vf(adev)) {
3978 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
3979 case IP_VERSION(13, 0, 2):
3980 case IP_VERSION(13, 0, 6):
3981 case IP_VERSION(13, 0, 12):
3982 case IP_VERSION(13, 0, 14):
3983 return true;
3984 default:
3985 return false;
3986 }
3987 }
3988
3989 if (adev->asic_type == CHIP_IP_DISCOVERY) {
3990 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
3991 case IP_VERSION(13, 0, 0):
3992 case IP_VERSION(13, 0, 6):
3993 case IP_VERSION(13, 0, 10):
3994 case IP_VERSION(13, 0, 12):
3995 case IP_VERSION(13, 0, 14):
3996 case IP_VERSION(14, 0, 3):
3997 return true;
3998 default:
3999 return false;
4000 }
4001 }
4002
4003 return adev->asic_type == CHIP_VEGA10 ||
4004 adev->asic_type == CHIP_VEGA20 ||
4005 adev->asic_type == CHIP_ARCTURUS ||
4006 adev->asic_type == CHIP_ALDEBARAN ||
4007 adev->asic_type == CHIP_SIENNA_CICHLID;
4008 }
4009
4010 /*
4011 * this is workaround for vega20 workstation sku,
4012 * force enable gfx ras, ignore vbios gfx ras flag
4013 * due to GC EDC can not write
4014 */
amdgpu_ras_get_quirks(struct amdgpu_device * adev)4015 static void amdgpu_ras_get_quirks(struct amdgpu_device *adev)
4016 {
4017 struct atom_context *ctx = adev->mode_info.atom_context;
4018
4019 if (!ctx)
4020 return;
4021
4022 if (strnstr(ctx->vbios_pn, "D16406",
4023 sizeof(ctx->vbios_pn)) ||
4024 strnstr(ctx->vbios_pn, "D36002",
4025 sizeof(ctx->vbios_pn)))
4026 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX);
4027 }
4028
4029 /* Query ras capablity via atomfirmware interface */
amdgpu_ras_query_ras_capablity_from_vbios(struct amdgpu_device * adev)4030 static void amdgpu_ras_query_ras_capablity_from_vbios(struct amdgpu_device *adev)
4031 {
4032 /* mem_ecc cap */
4033 if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
4034 dev_info(adev->dev, "MEM ECC is active.\n");
4035 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__UMC |
4036 1 << AMDGPU_RAS_BLOCK__DF);
4037 } else {
4038 dev_info(adev->dev, "MEM ECC is not presented.\n");
4039 }
4040
4041 /* sram_ecc cap */
4042 if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
4043 dev_info(adev->dev, "SRAM ECC is active.\n");
4044 if (!amdgpu_sriov_vf(adev))
4045 adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
4046 1 << AMDGPU_RAS_BLOCK__DF);
4047 else
4048 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__PCIE_BIF |
4049 1 << AMDGPU_RAS_BLOCK__SDMA |
4050 1 << AMDGPU_RAS_BLOCK__GFX);
4051
4052 /*
4053 * VCN/JPEG RAS can be supported on both bare metal and
4054 * SRIOV environment
4055 */
4056 if (amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(2, 6, 0) ||
4057 amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 0) ||
4058 amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 3) ||
4059 amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(5, 0, 1))
4060 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN |
4061 1 << AMDGPU_RAS_BLOCK__JPEG);
4062 else
4063 adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN |
4064 1 << AMDGPU_RAS_BLOCK__JPEG);
4065
4066 /*
4067 * XGMI RAS is not supported if xgmi num physical nodes
4068 * is zero
4069 */
4070 if (!adev->gmc.xgmi.num_physical_nodes)
4071 adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__XGMI_WAFL);
4072 } else {
4073 dev_info(adev->dev, "SRAM ECC is not presented.\n");
4074 }
4075 }
4076
4077 /* Query poison mode from umc/df IP callbacks */
amdgpu_ras_query_poison_mode(struct amdgpu_device * adev)4078 static void amdgpu_ras_query_poison_mode(struct amdgpu_device *adev)
4079 {
4080 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4081 bool df_poison, umc_poison;
4082
4083 /* poison setting is useless on SRIOV guest */
4084 if (amdgpu_sriov_vf(adev) || !con)
4085 return;
4086
4087 /* Init poison supported flag, the default value is false */
4088 if (adev->gmc.xgmi.connected_to_cpu ||
4089 adev->gmc.is_app_apu) {
4090 /* enabled by default when GPU is connected to CPU */
4091 con->poison_supported = true;
4092 } else if (adev->df.funcs &&
4093 adev->df.funcs->query_ras_poison_mode &&
4094 adev->umc.ras &&
4095 adev->umc.ras->query_ras_poison_mode) {
4096 df_poison =
4097 adev->df.funcs->query_ras_poison_mode(adev);
4098 umc_poison =
4099 adev->umc.ras->query_ras_poison_mode(adev);
4100
4101 /* Only poison is set in both DF and UMC, we can support it */
4102 if (df_poison && umc_poison)
4103 con->poison_supported = true;
4104 else if (df_poison != umc_poison)
4105 dev_warn(adev->dev,
4106 "Poison setting is inconsistent in DF/UMC(%d:%d)!\n",
4107 df_poison, umc_poison);
4108 }
4109 }
4110
4111 /*
4112 * check hardware's ras ability which will be saved in hw_supported.
4113 * if hardware does not support ras, we can skip some ras initializtion and
4114 * forbid some ras operations from IP.
4115 * if software itself, say boot parameter, limit the ras ability. We still
4116 * need allow IP do some limited operations, like disable. In such case,
4117 * we have to initialize ras as normal. but need check if operation is
4118 * allowed or not in each function.
4119 */
amdgpu_ras_check_supported(struct amdgpu_device * adev)4120 static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
4121 {
4122 adev->ras_hw_enabled = adev->ras_enabled = 0;
4123
4124 if (!amdgpu_ras_asic_supported(adev))
4125 return;
4126
4127 if (amdgpu_sriov_vf(adev)) {
4128 if (amdgpu_virt_get_ras_capability(adev))
4129 goto init_ras_enabled_flag;
4130 }
4131
4132 /* query ras capability from psp */
4133 if (amdgpu_psp_get_ras_capability(&adev->psp))
4134 goto init_ras_enabled_flag;
4135
4136 /* query ras capablity from bios */
4137 if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
4138 amdgpu_ras_query_ras_capablity_from_vbios(adev);
4139 } else {
4140 /* driver only manages a few IP blocks RAS feature
4141 * when GPU is connected cpu through XGMI */
4142 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX |
4143 1 << AMDGPU_RAS_BLOCK__SDMA |
4144 1 << AMDGPU_RAS_BLOCK__MMHUB);
4145 }
4146
4147 /* apply asic specific settings (vega20 only for now) */
4148 amdgpu_ras_get_quirks(adev);
4149
4150 /* query poison mode from umc/df ip callback */
4151 amdgpu_ras_query_poison_mode(adev);
4152
4153 init_ras_enabled_flag:
4154 /* hw_supported needs to be aligned with RAS block mask. */
4155 adev->ras_hw_enabled &= AMDGPU_RAS_BLOCK_MASK;
4156
4157 adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 :
4158 adev->ras_hw_enabled & amdgpu_ras_mask;
4159
4160 /* aca is disabled by default except for psp v13_0_6/v13_0_12/v13_0_14 */
4161 if (!amdgpu_sriov_vf(adev)) {
4162 adev->aca.is_enabled =
4163 (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
4164 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) ||
4165 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14));
4166 }
4167
4168 /* bad page feature is not applicable to specific app platform */
4169 if (adev->gmc.is_app_apu &&
4170 amdgpu_ip_version(adev, UMC_HWIP, 0) == IP_VERSION(12, 0, 0))
4171 amdgpu_bad_page_threshold = 0;
4172 }
4173
amdgpu_ras_counte_dw(struct work_struct * work)4174 static void amdgpu_ras_counte_dw(struct work_struct *work)
4175 {
4176 struct amdgpu_ras *con = container_of(work, struct amdgpu_ras,
4177 ras_counte_delay_work.work);
4178 struct amdgpu_device *adev = con->adev;
4179 struct drm_device *dev = adev_to_drm(adev);
4180 unsigned long ce_count, ue_count;
4181 int res;
4182
4183 res = pm_runtime_get_sync(dev->dev);
4184 if (res < 0)
4185 goto Out;
4186
4187 /* Cache new values.
4188 */
4189 if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, NULL) == 0) {
4190 atomic_set(&con->ras_ce_count, ce_count);
4191 atomic_set(&con->ras_ue_count, ue_count);
4192 }
4193
4194 Out:
4195 pm_runtime_put_autosuspend(dev->dev);
4196 }
4197
amdgpu_get_ras_schema(struct amdgpu_device * adev)4198 static int amdgpu_get_ras_schema(struct amdgpu_device *adev)
4199 {
4200 return amdgpu_ras_is_poison_mode_supported(adev) ? AMDGPU_RAS_ERROR__POISON : 0 |
4201 AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE |
4202 AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE |
4203 AMDGPU_RAS_ERROR__PARITY;
4204 }
4205
ras_event_mgr_init(struct ras_event_manager * mgr)4206 static void ras_event_mgr_init(struct ras_event_manager *mgr)
4207 {
4208 struct ras_event_state *event_state;
4209 int i;
4210
4211 memset(mgr, 0, sizeof(*mgr));
4212 atomic64_set(&mgr->seqno, 0);
4213
4214 for (i = 0; i < ARRAY_SIZE(mgr->event_state); i++) {
4215 event_state = &mgr->event_state[i];
4216 event_state->last_seqno = RAS_EVENT_INVALID_ID;
4217 atomic64_set(&event_state->count, 0);
4218 }
4219 }
4220
amdgpu_ras_event_mgr_init(struct amdgpu_device * adev)4221 static void amdgpu_ras_event_mgr_init(struct amdgpu_device *adev)
4222 {
4223 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
4224 struct amdgpu_hive_info *hive;
4225
4226 if (!ras)
4227 return;
4228
4229 hive = amdgpu_get_xgmi_hive(adev);
4230 ras->event_mgr = hive ? &hive->event_mgr : &ras->__event_mgr;
4231
4232 /* init event manager with node 0 on xgmi system */
4233 if (!amdgpu_reset_in_recovery(adev)) {
4234 if (!hive || adev->gmc.xgmi.node_id == 0)
4235 ras_event_mgr_init(ras->event_mgr);
4236 }
4237
4238 if (hive)
4239 amdgpu_put_xgmi_hive(hive);
4240 }
4241
amdgpu_ras_init_reserved_vram_size(struct amdgpu_device * adev)4242 static void amdgpu_ras_init_reserved_vram_size(struct amdgpu_device *adev)
4243 {
4244 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4245
4246 if (!con || (adev->flags & AMD_IS_APU))
4247 return;
4248
4249 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
4250 case IP_VERSION(13, 0, 2):
4251 case IP_VERSION(13, 0, 6):
4252 case IP_VERSION(13, 0, 12):
4253 con->reserved_pages_in_bytes = AMDGPU_RAS_RESERVED_VRAM_SIZE_DEFAULT;
4254 break;
4255 case IP_VERSION(13, 0, 14):
4256 con->reserved_pages_in_bytes = (AMDGPU_RAS_RESERVED_VRAM_SIZE_DEFAULT << 1);
4257 break;
4258 default:
4259 break;
4260 }
4261 }
4262
amdgpu_ras_init(struct amdgpu_device * adev)4263 int amdgpu_ras_init(struct amdgpu_device *adev)
4264 {
4265 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4266 int r;
4267
4268 if (con)
4269 return 0;
4270
4271 con = kzalloc(sizeof(*con) +
4272 sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT +
4273 sizeof(struct ras_manager) * AMDGPU_RAS_MCA_BLOCK_COUNT,
4274 GFP_KERNEL);
4275 if (!con)
4276 return -ENOMEM;
4277
4278 con->adev = adev;
4279 INIT_DELAYED_WORK(&con->ras_counte_delay_work, amdgpu_ras_counte_dw);
4280 atomic_set(&con->ras_ce_count, 0);
4281 atomic_set(&con->ras_ue_count, 0);
4282
4283 con->objs = (struct ras_manager *)(con + 1);
4284
4285 amdgpu_ras_set_context(adev, con);
4286
4287 amdgpu_ras_check_supported(adev);
4288
4289 if (!adev->ras_enabled || adev->asic_type == CHIP_VEGA10) {
4290 /* set gfx block ras context feature for VEGA20 Gaming
4291 * send ras disable cmd to ras ta during ras late init.
4292 */
4293 if (!adev->ras_enabled && adev->asic_type == CHIP_VEGA20) {
4294 con->features |= BIT(AMDGPU_RAS_BLOCK__GFX);
4295
4296 return 0;
4297 }
4298
4299 r = 0;
4300 goto release_con;
4301 }
4302
4303 con->update_channel_flag = false;
4304 con->features = 0;
4305 con->schema = 0;
4306 INIT_LIST_HEAD(&con->head);
4307 /* Might need get this flag from vbios. */
4308 con->flags = RAS_DEFAULT_FLAGS;
4309
4310 /* initialize nbio ras function ahead of any other
4311 * ras functions so hardware fatal error interrupt
4312 * can be enabled as early as possible */
4313 switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
4314 case IP_VERSION(7, 4, 0):
4315 case IP_VERSION(7, 4, 1):
4316 case IP_VERSION(7, 4, 4):
4317 if (!adev->gmc.xgmi.connected_to_cpu)
4318 adev->nbio.ras = &nbio_v7_4_ras;
4319 break;
4320 case IP_VERSION(4, 3, 0):
4321 if (adev->ras_hw_enabled & (1 << AMDGPU_RAS_BLOCK__DF))
4322 /* unlike other generation of nbio ras,
4323 * nbio v4_3 only support fatal error interrupt
4324 * to inform software that DF is freezed due to
4325 * system fatal error event. driver should not
4326 * enable nbio ras in such case. Instead,
4327 * check DF RAS */
4328 adev->nbio.ras = &nbio_v4_3_ras;
4329 break;
4330 case IP_VERSION(6, 3, 1):
4331 if (adev->ras_hw_enabled & (1 << AMDGPU_RAS_BLOCK__DF))
4332 /* unlike other generation of nbio ras,
4333 * nbif v6_3_1 only support fatal error interrupt
4334 * to inform software that DF is freezed due to
4335 * system fatal error event. driver should not
4336 * enable nbio ras in such case. Instead,
4337 * check DF RAS
4338 */
4339 adev->nbio.ras = &nbif_v6_3_1_ras;
4340 break;
4341 case IP_VERSION(7, 9, 0):
4342 case IP_VERSION(7, 9, 1):
4343 if (!adev->gmc.is_app_apu)
4344 adev->nbio.ras = &nbio_v7_9_ras;
4345 break;
4346 default:
4347 /* nbio ras is not available */
4348 break;
4349 }
4350
4351 /* nbio ras block needs to be enabled ahead of other ras blocks
4352 * to handle fatal error */
4353 r = amdgpu_nbio_ras_sw_init(adev);
4354 if (r)
4355 goto release_con;
4356
4357 if (adev->nbio.ras &&
4358 adev->nbio.ras->init_ras_controller_interrupt) {
4359 r = adev->nbio.ras->init_ras_controller_interrupt(adev);
4360 if (r)
4361 goto release_con;
4362 }
4363
4364 if (adev->nbio.ras &&
4365 adev->nbio.ras->init_ras_err_event_athub_interrupt) {
4366 r = adev->nbio.ras->init_ras_err_event_athub_interrupt(adev);
4367 if (r)
4368 goto release_con;
4369 }
4370
4371 /* Packed socket_id to ras feature mask bits[31:29] */
4372 if (adev->smuio.funcs &&
4373 adev->smuio.funcs->get_socket_id)
4374 con->features |= ((adev->smuio.funcs->get_socket_id(adev)) <<
4375 AMDGPU_RAS_FEATURES_SOCKETID_SHIFT);
4376
4377 /* Get RAS schema for particular SOC */
4378 con->schema = amdgpu_get_ras_schema(adev);
4379
4380 amdgpu_ras_init_reserved_vram_size(adev);
4381
4382 if (amdgpu_ras_fs_init(adev)) {
4383 r = -EINVAL;
4384 goto release_con;
4385 }
4386
4387 if (amdgpu_ras_aca_is_supported(adev)) {
4388 if (amdgpu_aca_is_enabled(adev))
4389 r = amdgpu_aca_init(adev);
4390 else
4391 r = amdgpu_mca_init(adev);
4392 if (r)
4393 goto release_con;
4394 }
4395
4396 con->init_task_pid = task_pid_nr(current);
4397 get_task_comm(con->init_task_comm, current);
4398
4399 mutex_init(&con->critical_region_lock);
4400 INIT_LIST_HEAD(&con->critical_region_head);
4401
4402 dev_info(adev->dev, "RAS INFO: ras initialized successfully, "
4403 "hardware ability[%x] ras_mask[%x]\n",
4404 adev->ras_hw_enabled, adev->ras_enabled);
4405
4406 return 0;
4407 release_con:
4408 amdgpu_ras_set_context(adev, NULL);
4409 kfree(con);
4410
4411 return r;
4412 }
4413
amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device * adev)4414 int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev)
4415 {
4416 if (adev->gmc.xgmi.connected_to_cpu ||
4417 adev->gmc.is_app_apu)
4418 return 1;
4419 return 0;
4420 }
4421
amdgpu_persistent_edc_harvesting(struct amdgpu_device * adev,struct ras_common_if * ras_block)4422 static int amdgpu_persistent_edc_harvesting(struct amdgpu_device *adev,
4423 struct ras_common_if *ras_block)
4424 {
4425 struct ras_query_if info = {
4426 .head = *ras_block,
4427 };
4428
4429 if (!amdgpu_persistent_edc_harvesting_supported(adev))
4430 return 0;
4431
4432 if (amdgpu_ras_query_error_status(adev, &info) != 0)
4433 drm_warn(adev_to_drm(adev), "RAS init query failure");
4434
4435 if (amdgpu_ras_reset_error_status(adev, ras_block->block) != 0)
4436 drm_warn(adev_to_drm(adev), "RAS init harvest reset failure");
4437
4438 return 0;
4439 }
4440
amdgpu_ras_is_poison_mode_supported(struct amdgpu_device * adev)4441 bool amdgpu_ras_is_poison_mode_supported(struct amdgpu_device *adev)
4442 {
4443 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4444
4445 if (!con)
4446 return false;
4447
4448 return con->poison_supported;
4449 }
4450
4451 /* helper function to handle common stuff in ip late init phase */
amdgpu_ras_block_late_init(struct amdgpu_device * adev,struct ras_common_if * ras_block)4452 int amdgpu_ras_block_late_init(struct amdgpu_device *adev,
4453 struct ras_common_if *ras_block)
4454 {
4455 struct amdgpu_ras_block_object *ras_obj = NULL;
4456 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4457 struct ras_query_if *query_info;
4458 unsigned long ue_count, ce_count;
4459 int r;
4460
4461 /* disable RAS feature per IP block if it is not supported */
4462 if (!amdgpu_ras_is_supported(adev, ras_block->block)) {
4463 amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
4464 return 0;
4465 }
4466
4467 r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1);
4468 if (r) {
4469 if (adev->in_suspend || amdgpu_reset_in_recovery(adev)) {
4470 /* in resume phase, if fail to enable ras,
4471 * clean up all ras fs nodes, and disable ras */
4472 goto cleanup;
4473 } else
4474 return r;
4475 }
4476
4477 /* check for errors on warm reset edc persisant supported ASIC */
4478 amdgpu_persistent_edc_harvesting(adev, ras_block);
4479
4480 /* in resume phase, no need to create ras fs node */
4481 if (adev->in_suspend || amdgpu_reset_in_recovery(adev))
4482 return 0;
4483
4484 ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
4485 if (ras_obj->ras_cb || (ras_obj->hw_ops &&
4486 (ras_obj->hw_ops->query_poison_status ||
4487 ras_obj->hw_ops->handle_poison_consumption))) {
4488 r = amdgpu_ras_interrupt_add_handler(adev, ras_block);
4489 if (r)
4490 goto cleanup;
4491 }
4492
4493 if (ras_obj->hw_ops &&
4494 (ras_obj->hw_ops->query_ras_error_count ||
4495 ras_obj->hw_ops->query_ras_error_status)) {
4496 r = amdgpu_ras_sysfs_create(adev, ras_block);
4497 if (r)
4498 goto interrupt;
4499
4500 /* Those are the cached values at init.
4501 */
4502 query_info = kzalloc_obj(*query_info, GFP_KERNEL);
4503 if (!query_info)
4504 return -ENOMEM;
4505 memcpy(&query_info->head, ras_block, sizeof(struct ras_common_if));
4506
4507 if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, query_info) == 0) {
4508 atomic_set(&con->ras_ce_count, ce_count);
4509 atomic_set(&con->ras_ue_count, ue_count);
4510 }
4511
4512 kfree(query_info);
4513 }
4514
4515 return 0;
4516
4517 interrupt:
4518 if (ras_obj->ras_cb)
4519 amdgpu_ras_interrupt_remove_handler(adev, ras_block);
4520 cleanup:
4521 amdgpu_ras_feature_enable(adev, ras_block, 0);
4522 return r;
4523 }
4524
amdgpu_ras_block_late_init_default(struct amdgpu_device * adev,struct ras_common_if * ras_block)4525 static int amdgpu_ras_block_late_init_default(struct amdgpu_device *adev,
4526 struct ras_common_if *ras_block)
4527 {
4528 return amdgpu_ras_block_late_init(adev, ras_block);
4529 }
4530
4531 /* helper function to remove ras fs node and interrupt handler */
amdgpu_ras_block_late_fini(struct amdgpu_device * adev,struct ras_common_if * ras_block)4532 void amdgpu_ras_block_late_fini(struct amdgpu_device *adev,
4533 struct ras_common_if *ras_block)
4534 {
4535 struct amdgpu_ras_block_object *ras_obj;
4536 if (!ras_block)
4537 return;
4538
4539 amdgpu_ras_sysfs_remove(adev, ras_block);
4540
4541 ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
4542 if (ras_obj->ras_cb)
4543 amdgpu_ras_interrupt_remove_handler(adev, ras_block);
4544 }
4545
amdgpu_ras_block_late_fini_default(struct amdgpu_device * adev,struct ras_common_if * ras_block)4546 static void amdgpu_ras_block_late_fini_default(struct amdgpu_device *adev,
4547 struct ras_common_if *ras_block)
4548 {
4549 return amdgpu_ras_block_late_fini(adev, ras_block);
4550 }
4551
4552 /* do some init work after IP late init as dependence.
4553 * and it runs in resume/gpu reset/booting up cases.
4554 */
amdgpu_ras_resume(struct amdgpu_device * adev)4555 void amdgpu_ras_resume(struct amdgpu_device *adev)
4556 {
4557 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4558 struct ras_manager *obj, *tmp;
4559
4560 if (!adev->ras_enabled || !con) {
4561 /* clean ras context for VEGA20 Gaming after send ras disable cmd */
4562 amdgpu_release_ras_context(adev);
4563
4564 return;
4565 }
4566
4567 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
4568 /* Set up all other IPs which are not implemented. There is a
4569 * tricky thing that IP's actual ras error type should be
4570 * MULTI_UNCORRECTABLE, but as driver does not handle it, so
4571 * ERROR_NONE make sense anyway.
4572 */
4573 amdgpu_ras_enable_all_features(adev, 1);
4574
4575 /* We enable ras on all hw_supported block, but as boot
4576 * parameter might disable some of them and one or more IP has
4577 * not implemented yet. So we disable them on behalf.
4578 */
4579 list_for_each_entry_safe(obj, tmp, &con->head, node) {
4580 if (!amdgpu_ras_is_supported(adev, obj->head.block)) {
4581 amdgpu_ras_feature_enable(adev, &obj->head, 0);
4582 /* there should be no any reference. */
4583 WARN_ON(alive_obj(obj));
4584 }
4585 }
4586 }
4587 }
4588
amdgpu_ras_suspend(struct amdgpu_device * adev)4589 void amdgpu_ras_suspend(struct amdgpu_device *adev)
4590 {
4591 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4592
4593 if (!adev->ras_enabled || !con)
4594 return;
4595
4596 amdgpu_ras_disable_all_features(adev, 0);
4597 /* Make sure all ras objects are disabled. */
4598 if (AMDGPU_RAS_GET_FEATURES(con->features))
4599 amdgpu_ras_disable_all_features(adev, 1);
4600 }
4601
amdgpu_ras_late_init(struct amdgpu_device * adev)4602 int amdgpu_ras_late_init(struct amdgpu_device *adev)
4603 {
4604 struct amdgpu_ras_block_list *node, *tmp;
4605 struct amdgpu_ras_block_object *obj;
4606 int r;
4607
4608 amdgpu_ras_event_mgr_init(adev);
4609
4610 if (amdgpu_ras_aca_is_supported(adev)) {
4611 if (amdgpu_reset_in_recovery(adev)) {
4612 if (amdgpu_aca_is_enabled(adev))
4613 r = amdgpu_aca_reset(adev);
4614 else
4615 r = amdgpu_mca_reset(adev);
4616 if (r)
4617 return r;
4618 }
4619
4620 if (!amdgpu_sriov_vf(adev)) {
4621 if (amdgpu_aca_is_enabled(adev))
4622 amdgpu_ras_set_aca_debug_mode(adev, false);
4623 else
4624 amdgpu_ras_set_mca_debug_mode(adev, false);
4625 }
4626 }
4627
4628 /* Guest side doesn't need init ras feature */
4629 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_ras_telemetry_en(adev))
4630 return 0;
4631
4632 list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
4633 obj = node->ras_obj;
4634 if (!obj) {
4635 dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
4636 continue;
4637 }
4638
4639 if (!amdgpu_ras_is_supported(adev, obj->ras_comm.block))
4640 continue;
4641
4642 if (obj->ras_late_init) {
4643 r = obj->ras_late_init(adev, &obj->ras_comm);
4644 if (r) {
4645 dev_err(adev->dev, "%s failed to execute ras_late_init! ret:%d\n",
4646 obj->ras_comm.name, r);
4647 return r;
4648 }
4649 } else
4650 amdgpu_ras_block_late_init_default(adev, &obj->ras_comm);
4651 }
4652
4653 amdgpu_ras_check_bad_page_status(adev);
4654
4655 return 0;
4656 }
4657
4658 /* do some fini work before IP fini as dependence */
amdgpu_ras_pre_fini(struct amdgpu_device * adev)4659 int amdgpu_ras_pre_fini(struct amdgpu_device *adev)
4660 {
4661 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4662
4663 if (!adev->ras_enabled || !con)
4664 return 0;
4665
4666
4667 /* Need disable ras on all IPs here before ip [hw/sw]fini */
4668 if (AMDGPU_RAS_GET_FEATURES(con->features))
4669 amdgpu_ras_disable_all_features(adev, 0);
4670 amdgpu_ras_recovery_fini(adev);
4671 return 0;
4672 }
4673
amdgpu_ras_fini(struct amdgpu_device * adev)4674 int amdgpu_ras_fini(struct amdgpu_device *adev)
4675 {
4676 struct amdgpu_ras_block_list *ras_node, *tmp;
4677 struct amdgpu_ras_block_object *obj = NULL;
4678 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4679
4680 if (!adev->ras_enabled || !con)
4681 return 0;
4682
4683 amdgpu_ras_critical_region_fini(adev);
4684 mutex_destroy(&con->critical_region_lock);
4685
4686 list_for_each_entry_safe(ras_node, tmp, &adev->ras_list, node) {
4687 if (ras_node->ras_obj) {
4688 obj = ras_node->ras_obj;
4689 if (amdgpu_ras_is_supported(adev, obj->ras_comm.block) &&
4690 obj->ras_fini)
4691 obj->ras_fini(adev, &obj->ras_comm);
4692 else
4693 amdgpu_ras_block_late_fini_default(adev, &obj->ras_comm);
4694 }
4695
4696 /* Clear ras blocks from ras_list and free ras block list node */
4697 list_del(&ras_node->node);
4698 kfree(ras_node);
4699 }
4700
4701 amdgpu_ras_fs_fini(adev);
4702 amdgpu_ras_interrupt_remove_all(adev);
4703
4704 if (amdgpu_ras_aca_is_supported(adev)) {
4705 if (amdgpu_aca_is_enabled(adev))
4706 amdgpu_aca_fini(adev);
4707 else
4708 amdgpu_mca_fini(adev);
4709 }
4710
4711 WARN(AMDGPU_RAS_GET_FEATURES(con->features), "Feature mask is not cleared");
4712
4713 if (AMDGPU_RAS_GET_FEATURES(con->features))
4714 amdgpu_ras_disable_all_features(adev, 0);
4715
4716 cancel_delayed_work_sync(&con->ras_counte_delay_work);
4717
4718 amdgpu_ras_set_context(adev, NULL);
4719 kfree(con);
4720
4721 return 0;
4722 }
4723
amdgpu_ras_get_fed_status(struct amdgpu_device * adev)4724 bool amdgpu_ras_get_fed_status(struct amdgpu_device *adev)
4725 {
4726 struct amdgpu_ras *ras;
4727
4728 ras = amdgpu_ras_get_context(adev);
4729 if (!ras)
4730 return false;
4731
4732 return test_bit(AMDGPU_RAS_BLOCK__LAST, &ras->ras_err_state);
4733 }
4734
amdgpu_ras_set_fed(struct amdgpu_device * adev,bool status)4735 void amdgpu_ras_set_fed(struct amdgpu_device *adev, bool status)
4736 {
4737 struct amdgpu_ras *ras;
4738
4739 ras = amdgpu_ras_get_context(adev);
4740 if (ras) {
4741 if (status)
4742 set_bit(AMDGPU_RAS_BLOCK__LAST, &ras->ras_err_state);
4743 else
4744 clear_bit(AMDGPU_RAS_BLOCK__LAST, &ras->ras_err_state);
4745 }
4746 }
4747
amdgpu_ras_clear_err_state(struct amdgpu_device * adev)4748 void amdgpu_ras_clear_err_state(struct amdgpu_device *adev)
4749 {
4750 struct amdgpu_ras *ras;
4751
4752 ras = amdgpu_ras_get_context(adev);
4753 if (ras) {
4754 ras->ras_err_state = 0;
4755 ras->gpu_reset_flags = 0;
4756 }
4757 }
4758
amdgpu_ras_set_err_poison(struct amdgpu_device * adev,enum amdgpu_ras_block block)4759 void amdgpu_ras_set_err_poison(struct amdgpu_device *adev,
4760 enum amdgpu_ras_block block)
4761 {
4762 struct amdgpu_ras *ras;
4763
4764 ras = amdgpu_ras_get_context(adev);
4765 if (ras)
4766 set_bit(block, &ras->ras_err_state);
4767 }
4768
amdgpu_ras_is_err_state(struct amdgpu_device * adev,int block)4769 bool amdgpu_ras_is_err_state(struct amdgpu_device *adev, int block)
4770 {
4771 struct amdgpu_ras *ras;
4772
4773 ras = amdgpu_ras_get_context(adev);
4774 if (ras) {
4775 if (block == AMDGPU_RAS_BLOCK__ANY)
4776 return (ras->ras_err_state != 0);
4777 else
4778 return test_bit(block, &ras->ras_err_state) ||
4779 test_bit(AMDGPU_RAS_BLOCK__LAST,
4780 &ras->ras_err_state);
4781 }
4782
4783 return false;
4784 }
4785
__get_ras_event_mgr(struct amdgpu_device * adev)4786 static struct ras_event_manager *__get_ras_event_mgr(struct amdgpu_device *adev)
4787 {
4788 struct amdgpu_ras *ras;
4789
4790 ras = amdgpu_ras_get_context(adev);
4791 if (!ras)
4792 return NULL;
4793
4794 return ras->event_mgr;
4795 }
4796
amdgpu_ras_mark_ras_event_caller(struct amdgpu_device * adev,enum ras_event_type type,const void * caller)4797 int amdgpu_ras_mark_ras_event_caller(struct amdgpu_device *adev, enum ras_event_type type,
4798 const void *caller)
4799 {
4800 struct ras_event_manager *event_mgr;
4801 struct ras_event_state *event_state;
4802 int ret = 0;
4803
4804 if (amdgpu_uniras_enabled(adev))
4805 return 0;
4806
4807 if (type >= RAS_EVENT_TYPE_COUNT) {
4808 ret = -EINVAL;
4809 goto out;
4810 }
4811
4812 event_mgr = __get_ras_event_mgr(adev);
4813 if (!event_mgr) {
4814 ret = -EINVAL;
4815 goto out;
4816 }
4817
4818 event_state = &event_mgr->event_state[type];
4819 event_state->last_seqno = atomic64_inc_return(&event_mgr->seqno);
4820 atomic64_inc(&event_state->count);
4821
4822 out:
4823 if (ret && caller)
4824 dev_warn(adev->dev, "failed mark ras event (%d) in %ps, ret:%d\n",
4825 (int)type, caller, ret);
4826
4827 return ret;
4828 }
4829
amdgpu_ras_acquire_event_id(struct amdgpu_device * adev,enum ras_event_type type)4830 u64 amdgpu_ras_acquire_event_id(struct amdgpu_device *adev, enum ras_event_type type)
4831 {
4832 struct ras_event_manager *event_mgr;
4833 u64 id;
4834
4835 if (type >= RAS_EVENT_TYPE_COUNT)
4836 return RAS_EVENT_INVALID_ID;
4837
4838 switch (type) {
4839 case RAS_EVENT_TYPE_FATAL:
4840 case RAS_EVENT_TYPE_POISON_CREATION:
4841 case RAS_EVENT_TYPE_POISON_CONSUMPTION:
4842 event_mgr = __get_ras_event_mgr(adev);
4843 if (!event_mgr)
4844 return RAS_EVENT_INVALID_ID;
4845
4846 id = event_mgr->event_state[type].last_seqno;
4847 break;
4848 case RAS_EVENT_TYPE_INVALID:
4849 default:
4850 id = RAS_EVENT_INVALID_ID;
4851 break;
4852 }
4853
4854 return id;
4855 }
4856
amdgpu_ras_global_ras_isr(struct amdgpu_device * adev)4857 int amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
4858 {
4859 if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
4860 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
4861 enum ras_event_type type = RAS_EVENT_TYPE_FATAL;
4862 u64 event_id = RAS_EVENT_INVALID_ID;
4863
4864 if (amdgpu_uniras_enabled(adev))
4865 return 0;
4866
4867 if (!amdgpu_ras_mark_ras_event(adev, type))
4868 event_id = amdgpu_ras_acquire_event_id(adev, type);
4869
4870 RAS_EVENT_LOG(adev, event_id, "uncorrectable hardware error"
4871 "(ERREVENT_ATHUB_INTERRUPT) detected!\n");
4872
4873 amdgpu_ras_set_fed(adev, true);
4874 ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET;
4875 amdgpu_ras_reset_gpu(adev);
4876 }
4877
4878 return -EBUSY;
4879 }
4880
amdgpu_ras_need_emergency_restart(struct amdgpu_device * adev)4881 bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev)
4882 {
4883 if (adev->asic_type == CHIP_VEGA20 &&
4884 adev->pm.fw_version <= 0x283400) {
4885 return !(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) &&
4886 amdgpu_ras_intr_triggered();
4887 }
4888
4889 return false;
4890 }
4891
amdgpu_release_ras_context(struct amdgpu_device * adev)4892 void amdgpu_release_ras_context(struct amdgpu_device *adev)
4893 {
4894 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4895
4896 if (!con)
4897 return;
4898
4899 if (!adev->ras_enabled && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)) {
4900 con->features &= ~BIT(AMDGPU_RAS_BLOCK__GFX);
4901 amdgpu_ras_set_context(adev, NULL);
4902 kfree(con);
4903 }
4904 }
4905
4906 #ifdef CONFIG_X86_MCE_AMD
find_adev(uint32_t node_id)4907 static struct amdgpu_device *find_adev(uint32_t node_id)
4908 {
4909 int i;
4910 struct amdgpu_device *adev = NULL;
4911
4912 for (i = 0; i < mce_adev_list.num_gpu; i++) {
4913 adev = mce_adev_list.devs[i];
4914
4915 if (adev && adev->gmc.xgmi.connected_to_cpu &&
4916 adev->gmc.xgmi.physical_node_id == node_id)
4917 break;
4918 adev = NULL;
4919 }
4920
4921 return adev;
4922 }
4923
4924 #define GET_MCA_IPID_GPUID(m) (((m) >> 44) & 0xF)
4925 #define GET_UMC_INST(m) (((m) >> 21) & 0x7)
4926 #define GET_CHAN_INDEX(m) ((((m) >> 12) & 0x3) | (((m) >> 18) & 0x4))
4927 #define GPU_ID_OFFSET 8
4928
amdgpu_bad_page_notifier(struct notifier_block * nb,unsigned long val,void * data)4929 static int amdgpu_bad_page_notifier(struct notifier_block *nb,
4930 unsigned long val, void *data)
4931 {
4932 struct mce *m = (struct mce *)data;
4933 struct amdgpu_device *adev = NULL;
4934 uint32_t gpu_id = 0;
4935 uint32_t umc_inst = 0, ch_inst = 0;
4936
4937 /*
4938 * If the error was generated in UMC_V2, which belongs to GPU UMCs,
4939 * and error occurred in DramECC (Extended error code = 0) then only
4940 * process the error, else bail out.
4941 */
4942 if (!m || !((smca_get_bank_type(m->extcpu, m->bank) == SMCA_UMC_V2) &&
4943 (XEC(m->status, 0x3f) == 0x0)))
4944 return NOTIFY_DONE;
4945
4946 /*
4947 * If it is correctable error, return.
4948 */
4949 if (mce_is_correctable(m))
4950 return NOTIFY_OK;
4951
4952 /*
4953 * GPU Id is offset by GPU_ID_OFFSET in MCA_IPID_UMC register.
4954 */
4955 gpu_id = GET_MCA_IPID_GPUID(m->ipid) - GPU_ID_OFFSET;
4956
4957 adev = find_adev(gpu_id);
4958 if (!adev) {
4959 DRM_WARN("%s: Unable to find adev for gpu_id: %d\n", __func__,
4960 gpu_id);
4961 return NOTIFY_DONE;
4962 }
4963
4964 /*
4965 * If it is uncorrectable error, then find out UMC instance and
4966 * channel index.
4967 */
4968 umc_inst = GET_UMC_INST(m->ipid);
4969 ch_inst = GET_CHAN_INDEX(m->ipid);
4970
4971 dev_info(adev->dev, "Uncorrectable error detected in UMC inst: %d, chan_idx: %d",
4972 umc_inst, ch_inst);
4973
4974 if (!amdgpu_umc_page_retirement_mca(adev, m->addr, ch_inst, umc_inst))
4975 return NOTIFY_OK;
4976 else
4977 return NOTIFY_DONE;
4978 }
4979
4980 static struct notifier_block amdgpu_bad_page_nb = {
4981 .notifier_call = amdgpu_bad_page_notifier,
4982 .priority = MCE_PRIO_UC,
4983 };
4984
amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device * adev)4985 static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev)
4986 {
4987 /*
4988 * Add the adev to the mce_adev_list.
4989 * During mode2 reset, amdgpu device is temporarily
4990 * removed from the mgpu_info list which can cause
4991 * page retirement to fail.
4992 * Use this list instead of mgpu_info to find the amdgpu
4993 * device on which the UMC error was reported.
4994 */
4995 mce_adev_list.devs[mce_adev_list.num_gpu++] = adev;
4996
4997 /*
4998 * Register the x86 notifier only once
4999 * with MCE subsystem.
5000 */
5001 if (notifier_registered == false) {
5002 mce_register_decode_chain(&amdgpu_bad_page_nb);
5003 notifier_registered = true;
5004 }
5005 }
amdgpu_unregister_bad_pages_mca_notifier(struct amdgpu_device * adev)5006 static void amdgpu_unregister_bad_pages_mca_notifier(struct amdgpu_device *adev)
5007 {
5008 int i, j;
5009
5010 if (!notifier_registered && !mce_adev_list.num_gpu)
5011 return;
5012 for (i = 0, j = 0; i < mce_adev_list.num_gpu; i++) {
5013 if (mce_adev_list.devs[i] == adev)
5014 mce_adev_list.devs[i] = NULL;
5015 if (!mce_adev_list.devs[i])
5016 ++j;
5017 }
5018
5019 if (j == mce_adev_list.num_gpu) {
5020 mce_adev_list.num_gpu = 0;
5021 /* Unregister x86 notifier with MCE subsystem. */
5022 if (notifier_registered) {
5023 mce_unregister_decode_chain(&amdgpu_bad_page_nb);
5024 notifier_registered = false;
5025 }
5026 }
5027 }
5028 #endif
5029
amdgpu_ras_get_context(struct amdgpu_device * adev)5030 struct amdgpu_ras *amdgpu_ras_get_context(struct amdgpu_device *adev)
5031 {
5032 if (!adev)
5033 return NULL;
5034
5035 return adev->psp.ras_context.ras;
5036 }
5037
amdgpu_ras_set_context(struct amdgpu_device * adev,struct amdgpu_ras * ras_con)5038 int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_con)
5039 {
5040 if (!adev)
5041 return -EINVAL;
5042
5043 adev->psp.ras_context.ras = ras_con;
5044 return 0;
5045 }
5046
5047 /* check if ras is supported on block, say, sdma, gfx */
amdgpu_ras_is_supported(struct amdgpu_device * adev,unsigned int block)5048 int amdgpu_ras_is_supported(struct amdgpu_device *adev,
5049 unsigned int block)
5050 {
5051 int ret = 0;
5052 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5053
5054 if (block >= AMDGPU_RAS_BLOCK_COUNT)
5055 return 0;
5056
5057 ret = ras && (adev->ras_enabled & (1 << block));
5058
5059 /* For the special asic with mem ecc enabled but sram ecc
5060 * not enabled, even if the ras block is not supported on
5061 * .ras_enabled, if the asic supports poison mode and the
5062 * ras block has ras configuration, it can be considered
5063 * that the ras block supports ras function.
5064 */
5065 if (!ret &&
5066 (block == AMDGPU_RAS_BLOCK__GFX ||
5067 block == AMDGPU_RAS_BLOCK__SDMA ||
5068 block == AMDGPU_RAS_BLOCK__VCN ||
5069 block == AMDGPU_RAS_BLOCK__JPEG) &&
5070 (amdgpu_ras_mask & (1 << block)) &&
5071 amdgpu_ras_is_poison_mode_supported(adev) &&
5072 amdgpu_ras_get_ras_block(adev, block, 0))
5073 ret = 1;
5074
5075 return ret;
5076 }
5077
amdgpu_ras_reset_gpu(struct amdgpu_device * adev)5078 int amdgpu_ras_reset_gpu(struct amdgpu_device *adev)
5079 {
5080 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5081
5082 /* mode1 is the only selection for RMA status */
5083 if (amdgpu_ras_is_rma(adev)) {
5084 ras->gpu_reset_flags = 0;
5085 ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET;
5086 }
5087
5088 if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0) {
5089 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
5090 int hive_ras_recovery = 0;
5091
5092 if (hive) {
5093 hive_ras_recovery = atomic_read(&hive->ras_recovery);
5094 amdgpu_put_xgmi_hive(hive);
5095 }
5096 /* In the case of multiple GPUs, after a GPU has started
5097 * resetting all GPUs on hive, other GPUs do not need to
5098 * trigger GPU reset again.
5099 */
5100 if (!hive_ras_recovery)
5101 amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work);
5102 else
5103 atomic_set(&ras->in_recovery, 0);
5104 } else {
5105 flush_work(&ras->recovery_work);
5106 amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work);
5107 }
5108
5109 return 0;
5110 }
5111
amdgpu_ras_set_mca_debug_mode(struct amdgpu_device * adev,bool enable)5112 int amdgpu_ras_set_mca_debug_mode(struct amdgpu_device *adev, bool enable)
5113 {
5114 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5115 int ret = 0;
5116
5117 if (con) {
5118 ret = amdgpu_mca_smu_set_debug_mode(adev, enable);
5119 if (!ret)
5120 con->is_aca_debug_mode = enable;
5121 }
5122
5123 return ret;
5124 }
5125
amdgpu_ras_set_aca_debug_mode(struct amdgpu_device * adev,bool enable)5126 int amdgpu_ras_set_aca_debug_mode(struct amdgpu_device *adev, bool enable)
5127 {
5128 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5129 int ret = 0;
5130
5131 if (con) {
5132 if (amdgpu_aca_is_enabled(adev))
5133 ret = amdgpu_aca_smu_set_debug_mode(adev, enable);
5134 else
5135 ret = amdgpu_mca_smu_set_debug_mode(adev, enable);
5136 if (!ret)
5137 con->is_aca_debug_mode = enable;
5138 }
5139
5140 return ret;
5141 }
5142
amdgpu_ras_get_aca_debug_mode(struct amdgpu_device * adev)5143 bool amdgpu_ras_get_aca_debug_mode(struct amdgpu_device *adev)
5144 {
5145 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5146 const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
5147 const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
5148
5149 if (!con)
5150 return false;
5151
5152 if ((amdgpu_aca_is_enabled(adev) && smu_funcs && smu_funcs->set_debug_mode) ||
5153 (!amdgpu_aca_is_enabled(adev) && mca_funcs && mca_funcs->mca_set_debug_mode))
5154 return con->is_aca_debug_mode;
5155 else
5156 return true;
5157 }
5158
amdgpu_ras_get_error_query_mode(struct amdgpu_device * adev,unsigned int * error_query_mode)5159 bool amdgpu_ras_get_error_query_mode(struct amdgpu_device *adev,
5160 unsigned int *error_query_mode)
5161 {
5162 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5163 const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
5164 const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
5165
5166 if (!con) {
5167 *error_query_mode = AMDGPU_RAS_INVALID_ERROR_QUERY;
5168 return false;
5169 }
5170
5171 if (amdgpu_sriov_vf(adev)) {
5172 *error_query_mode = AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY;
5173 } else if ((smu_funcs && smu_funcs->set_debug_mode) || (mca_funcs && mca_funcs->mca_set_debug_mode)) {
5174 *error_query_mode =
5175 (con->is_aca_debug_mode) ? AMDGPU_RAS_DIRECT_ERROR_QUERY : AMDGPU_RAS_FIRMWARE_ERROR_QUERY;
5176 } else {
5177 *error_query_mode = AMDGPU_RAS_DIRECT_ERROR_QUERY;
5178 }
5179
5180 return true;
5181 }
5182
5183 /* Register each ip ras block into amdgpu ras */
amdgpu_ras_register_ras_block(struct amdgpu_device * adev,struct amdgpu_ras_block_object * ras_block_obj)5184 int amdgpu_ras_register_ras_block(struct amdgpu_device *adev,
5185 struct amdgpu_ras_block_object *ras_block_obj)
5186 {
5187 struct amdgpu_ras_block_list *ras_node;
5188 if (!adev || !ras_block_obj)
5189 return -EINVAL;
5190
5191 ras_node = kzalloc_obj(*ras_node, GFP_KERNEL);
5192 if (!ras_node)
5193 return -ENOMEM;
5194
5195 INIT_LIST_HEAD(&ras_node->node);
5196 ras_node->ras_obj = ras_block_obj;
5197 list_add_tail(&ras_node->node, &adev->ras_list);
5198
5199 return 0;
5200 }
5201
amdgpu_ras_get_error_type_name(uint32_t err_type,char * err_type_name)5202 void amdgpu_ras_get_error_type_name(uint32_t err_type, char *err_type_name)
5203 {
5204 if (!err_type_name)
5205 return;
5206
5207 switch (err_type) {
5208 case AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE:
5209 sprintf(err_type_name, "correctable");
5210 break;
5211 case AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE:
5212 sprintf(err_type_name, "uncorrectable");
5213 break;
5214 default:
5215 sprintf(err_type_name, "unknown");
5216 break;
5217 }
5218 }
5219
amdgpu_ras_inst_get_memory_id_field(struct amdgpu_device * adev,const struct amdgpu_ras_err_status_reg_entry * reg_entry,uint32_t instance,uint32_t * memory_id)5220 bool amdgpu_ras_inst_get_memory_id_field(struct amdgpu_device *adev,
5221 const struct amdgpu_ras_err_status_reg_entry *reg_entry,
5222 uint32_t instance,
5223 uint32_t *memory_id)
5224 {
5225 uint32_t err_status_lo_data, err_status_lo_offset;
5226
5227 if (!reg_entry)
5228 return false;
5229
5230 err_status_lo_offset =
5231 AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance,
5232 reg_entry->seg_lo, reg_entry->reg_lo);
5233 err_status_lo_data = RREG32(err_status_lo_offset);
5234
5235 if ((reg_entry->flags & AMDGPU_RAS_ERR_STATUS_VALID) &&
5236 !REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, ERR_STATUS_VALID_FLAG))
5237 return false;
5238
5239 *memory_id = REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, MEMORY_ID);
5240
5241 return true;
5242 }
5243
amdgpu_ras_inst_get_err_cnt_field(struct amdgpu_device * adev,const struct amdgpu_ras_err_status_reg_entry * reg_entry,uint32_t instance,unsigned long * err_cnt)5244 bool amdgpu_ras_inst_get_err_cnt_field(struct amdgpu_device *adev,
5245 const struct amdgpu_ras_err_status_reg_entry *reg_entry,
5246 uint32_t instance,
5247 unsigned long *err_cnt)
5248 {
5249 uint32_t err_status_hi_data, err_status_hi_offset;
5250
5251 if (!reg_entry)
5252 return false;
5253
5254 err_status_hi_offset =
5255 AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance,
5256 reg_entry->seg_hi, reg_entry->reg_hi);
5257 err_status_hi_data = RREG32(err_status_hi_offset);
5258
5259 if ((reg_entry->flags & AMDGPU_RAS_ERR_INFO_VALID) &&
5260 !REG_GET_FIELD(err_status_hi_data, ERR_STATUS_HI, ERR_INFO_VALID_FLAG))
5261 /* keep the check here in case we need to refer to the result later */
5262 dev_dbg(adev->dev, "Invalid err_info field\n");
5263
5264 /* read err count */
5265 *err_cnt = REG_GET_FIELD(err_status_hi_data, ERR_STATUS, ERR_CNT);
5266
5267 return true;
5268 }
5269
amdgpu_ras_inst_query_ras_error_count(struct amdgpu_device * adev,const struct amdgpu_ras_err_status_reg_entry * reg_list,uint32_t reg_list_size,const struct amdgpu_ras_memory_id_entry * mem_list,uint32_t mem_list_size,uint32_t instance,uint32_t err_type,unsigned long * err_count)5270 void amdgpu_ras_inst_query_ras_error_count(struct amdgpu_device *adev,
5271 const struct amdgpu_ras_err_status_reg_entry *reg_list,
5272 uint32_t reg_list_size,
5273 const struct amdgpu_ras_memory_id_entry *mem_list,
5274 uint32_t mem_list_size,
5275 uint32_t instance,
5276 uint32_t err_type,
5277 unsigned long *err_count)
5278 {
5279 uint32_t memory_id;
5280 unsigned long err_cnt;
5281 char err_type_name[16];
5282 uint32_t i, j;
5283
5284 for (i = 0; i < reg_list_size; i++) {
5285 /* query memory_id from err_status_lo */
5286 if (!amdgpu_ras_inst_get_memory_id_field(adev, ®_list[i],
5287 instance, &memory_id))
5288 continue;
5289
5290 /* query err_cnt from err_status_hi */
5291 if (!amdgpu_ras_inst_get_err_cnt_field(adev, ®_list[i],
5292 instance, &err_cnt) ||
5293 !err_cnt)
5294 continue;
5295
5296 *err_count += err_cnt;
5297
5298 /* log the errors */
5299 amdgpu_ras_get_error_type_name(err_type, err_type_name);
5300 if (!mem_list) {
5301 /* memory_list is not supported */
5302 dev_info(adev->dev,
5303 "%ld %s hardware errors detected in %s, instance: %d, memory_id: %d\n",
5304 err_cnt, err_type_name,
5305 reg_list[i].block_name,
5306 instance, memory_id);
5307 } else {
5308 for (j = 0; j < mem_list_size; j++) {
5309 if (memory_id == mem_list[j].memory_id) {
5310 dev_info(adev->dev,
5311 "%ld %s hardware errors detected in %s, instance: %d, memory block: %s\n",
5312 err_cnt, err_type_name,
5313 reg_list[i].block_name,
5314 instance, mem_list[j].name);
5315 break;
5316 }
5317 }
5318 }
5319 }
5320 }
5321
amdgpu_ras_inst_reset_ras_error_count(struct amdgpu_device * adev,const struct amdgpu_ras_err_status_reg_entry * reg_list,uint32_t reg_list_size,uint32_t instance)5322 void amdgpu_ras_inst_reset_ras_error_count(struct amdgpu_device *adev,
5323 const struct amdgpu_ras_err_status_reg_entry *reg_list,
5324 uint32_t reg_list_size,
5325 uint32_t instance)
5326 {
5327 uint32_t err_status_lo_offset, err_status_hi_offset;
5328 uint32_t i;
5329
5330 for (i = 0; i < reg_list_size; i++) {
5331 err_status_lo_offset =
5332 AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance,
5333 reg_list[i].seg_lo, reg_list[i].reg_lo);
5334 err_status_hi_offset =
5335 AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance,
5336 reg_list[i].seg_hi, reg_list[i].reg_hi);
5337 WREG32(err_status_lo_offset, 0);
5338 WREG32(err_status_hi_offset, 0);
5339 }
5340 }
5341
amdgpu_ras_error_data_init(struct ras_err_data * err_data)5342 int amdgpu_ras_error_data_init(struct ras_err_data *err_data)
5343 {
5344 memset(err_data, 0, sizeof(*err_data));
5345
5346 INIT_LIST_HEAD(&err_data->err_node_list);
5347
5348 return 0;
5349 }
5350
amdgpu_ras_error_node_release(struct ras_err_node * err_node)5351 static void amdgpu_ras_error_node_release(struct ras_err_node *err_node)
5352 {
5353 if (!err_node)
5354 return;
5355
5356 list_del(&err_node->node);
5357 kvfree(err_node);
5358 }
5359
amdgpu_ras_error_data_fini(struct ras_err_data * err_data)5360 void amdgpu_ras_error_data_fini(struct ras_err_data *err_data)
5361 {
5362 struct ras_err_node *err_node, *tmp;
5363
5364 list_for_each_entry_safe(err_node, tmp, &err_data->err_node_list, node)
5365 amdgpu_ras_error_node_release(err_node);
5366 }
5367
amdgpu_ras_error_find_node_by_id(struct ras_err_data * err_data,struct amdgpu_smuio_mcm_config_info * mcm_info)5368 static struct ras_err_node *amdgpu_ras_error_find_node_by_id(struct ras_err_data *err_data,
5369 struct amdgpu_smuio_mcm_config_info *mcm_info)
5370 {
5371 struct ras_err_node *err_node;
5372 struct amdgpu_smuio_mcm_config_info *ref_id;
5373
5374 if (!err_data || !mcm_info)
5375 return NULL;
5376
5377 for_each_ras_error(err_node, err_data) {
5378 ref_id = &err_node->err_info.mcm_info;
5379
5380 if (mcm_info->socket_id == ref_id->socket_id &&
5381 mcm_info->die_id == ref_id->die_id)
5382 return err_node;
5383 }
5384
5385 return NULL;
5386 }
5387
amdgpu_ras_error_node_new(void)5388 static struct ras_err_node *amdgpu_ras_error_node_new(void)
5389 {
5390 struct ras_err_node *err_node;
5391
5392 err_node = kvzalloc_obj(*err_node, GFP_KERNEL);
5393 if (!err_node)
5394 return NULL;
5395
5396 INIT_LIST_HEAD(&err_node->node);
5397
5398 return err_node;
5399 }
5400
ras_err_info_cmp(void * priv,const struct list_head * a,const struct list_head * b)5401 static int ras_err_info_cmp(void *priv, const struct list_head *a, const struct list_head *b)
5402 {
5403 struct ras_err_node *nodea = container_of(a, struct ras_err_node, node);
5404 struct ras_err_node *nodeb = container_of(b, struct ras_err_node, node);
5405 struct amdgpu_smuio_mcm_config_info *infoa = &nodea->err_info.mcm_info;
5406 struct amdgpu_smuio_mcm_config_info *infob = &nodeb->err_info.mcm_info;
5407
5408 if (unlikely(infoa->socket_id != infob->socket_id))
5409 return infoa->socket_id - infob->socket_id;
5410 else
5411 return infoa->die_id - infob->die_id;
5412
5413 return 0;
5414 }
5415
amdgpu_ras_error_get_info(struct ras_err_data * err_data,struct amdgpu_smuio_mcm_config_info * mcm_info)5416 static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_data,
5417 struct amdgpu_smuio_mcm_config_info *mcm_info)
5418 {
5419 struct ras_err_node *err_node;
5420
5421 err_node = amdgpu_ras_error_find_node_by_id(err_data, mcm_info);
5422 if (err_node)
5423 return &err_node->err_info;
5424
5425 err_node = amdgpu_ras_error_node_new();
5426 if (!err_node)
5427 return NULL;
5428
5429 memcpy(&err_node->err_info.mcm_info, mcm_info, sizeof(*mcm_info));
5430
5431 err_data->err_list_count++;
5432 list_add_tail(&err_node->node, &err_data->err_node_list);
5433 list_sort(NULL, &err_data->err_node_list, ras_err_info_cmp);
5434
5435 return &err_node->err_info;
5436 }
5437
amdgpu_ras_error_statistic_ue_count(struct ras_err_data * err_data,struct amdgpu_smuio_mcm_config_info * mcm_info,u64 count)5438 int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
5439 struct amdgpu_smuio_mcm_config_info *mcm_info,
5440 u64 count)
5441 {
5442 struct ras_err_info *err_info;
5443
5444 if (!err_data || !mcm_info)
5445 return -EINVAL;
5446
5447 if (!count)
5448 return 0;
5449
5450 err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
5451 if (!err_info)
5452 return -EINVAL;
5453
5454 err_info->ue_count += count;
5455 err_data->ue_count += count;
5456
5457 return 0;
5458 }
5459
amdgpu_ras_error_statistic_ce_count(struct ras_err_data * err_data,struct amdgpu_smuio_mcm_config_info * mcm_info,u64 count)5460 int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data,
5461 struct amdgpu_smuio_mcm_config_info *mcm_info,
5462 u64 count)
5463 {
5464 struct ras_err_info *err_info;
5465
5466 if (!err_data || !mcm_info)
5467 return -EINVAL;
5468
5469 if (!count)
5470 return 0;
5471
5472 err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
5473 if (!err_info)
5474 return -EINVAL;
5475
5476 err_info->ce_count += count;
5477 err_data->ce_count += count;
5478
5479 return 0;
5480 }
5481
amdgpu_ras_error_statistic_de_count(struct ras_err_data * err_data,struct amdgpu_smuio_mcm_config_info * mcm_info,u64 count)5482 int amdgpu_ras_error_statistic_de_count(struct ras_err_data *err_data,
5483 struct amdgpu_smuio_mcm_config_info *mcm_info,
5484 u64 count)
5485 {
5486 struct ras_err_info *err_info;
5487
5488 if (!err_data || !mcm_info)
5489 return -EINVAL;
5490
5491 if (!count)
5492 return 0;
5493
5494 err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
5495 if (!err_info)
5496 return -EINVAL;
5497
5498 err_info->de_count += count;
5499 err_data->de_count += count;
5500
5501 return 0;
5502 }
5503
5504 #define mmMP0_SMN_C2PMSG_92 0x1609C
5505 #define mmMP0_SMN_C2PMSG_126 0x160BE
amdgpu_ras_boot_time_error_reporting(struct amdgpu_device * adev,u32 instance)5506 static void amdgpu_ras_boot_time_error_reporting(struct amdgpu_device *adev,
5507 u32 instance)
5508 {
5509 u32 socket_id, aid_id, hbm_id;
5510 u32 fw_status;
5511 u32 boot_error;
5512 u64 reg_addr;
5513
5514 /* The pattern for smn addressing in other SOC could be different from
5515 * the one for aqua_vanjaram. We should revisit the code if the pattern
5516 * is changed. In such case, replace the aqua_vanjaram implementation
5517 * with more common helper */
5518 reg_addr = (mmMP0_SMN_C2PMSG_92 << 2) +
5519 aqua_vanjaram_encode_ext_smn_addressing(instance);
5520 fw_status = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
5521
5522 reg_addr = (mmMP0_SMN_C2PMSG_126 << 2) +
5523 aqua_vanjaram_encode_ext_smn_addressing(instance);
5524 boot_error = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
5525
5526 socket_id = AMDGPU_RAS_GPU_ERR_SOCKET_ID(boot_error);
5527 aid_id = AMDGPU_RAS_GPU_ERR_AID_ID(boot_error);
5528 hbm_id = ((1 == AMDGPU_RAS_GPU_ERR_HBM_ID(boot_error)) ? 0 : 1);
5529
5530 if (AMDGPU_RAS_GPU_ERR_MEM_TRAINING(boot_error))
5531 dev_info(adev->dev,
5532 "socket: %d, aid: %d, hbm: %d, fw_status: 0x%x, memory training failed\n",
5533 socket_id, aid_id, hbm_id, fw_status);
5534
5535 if (AMDGPU_RAS_GPU_ERR_FW_LOAD(boot_error))
5536 dev_info(adev->dev,
5537 "socket: %d, aid: %d, fw_status: 0x%x, firmware load failed at boot time\n",
5538 socket_id, aid_id, fw_status);
5539
5540 if (AMDGPU_RAS_GPU_ERR_WAFL_LINK_TRAINING(boot_error))
5541 dev_info(adev->dev,
5542 "socket: %d, aid: %d, fw_status: 0x%x, wafl link training failed\n",
5543 socket_id, aid_id, fw_status);
5544
5545 if (AMDGPU_RAS_GPU_ERR_XGMI_LINK_TRAINING(boot_error))
5546 dev_info(adev->dev,
5547 "socket: %d, aid: %d, fw_status: 0x%x, xgmi link training failed\n",
5548 socket_id, aid_id, fw_status);
5549
5550 if (AMDGPU_RAS_GPU_ERR_USR_CP_LINK_TRAINING(boot_error))
5551 dev_info(adev->dev,
5552 "socket: %d, aid: %d, fw_status: 0x%x, usr cp link training failed\n",
5553 socket_id, aid_id, fw_status);
5554
5555 if (AMDGPU_RAS_GPU_ERR_USR_DP_LINK_TRAINING(boot_error))
5556 dev_info(adev->dev,
5557 "socket: %d, aid: %d, fw_status: 0x%x, usr dp link training failed\n",
5558 socket_id, aid_id, fw_status);
5559
5560 if (AMDGPU_RAS_GPU_ERR_HBM_MEM_TEST(boot_error))
5561 dev_info(adev->dev,
5562 "socket: %d, aid: %d, hbm: %d, fw_status: 0x%x, hbm memory test failed\n",
5563 socket_id, aid_id, hbm_id, fw_status);
5564
5565 if (AMDGPU_RAS_GPU_ERR_HBM_BIST_TEST(boot_error))
5566 dev_info(adev->dev,
5567 "socket: %d, aid: %d, hbm: %d, fw_status: 0x%x, hbm bist test failed\n",
5568 socket_id, aid_id, hbm_id, fw_status);
5569
5570 if (AMDGPU_RAS_GPU_ERR_DATA_ABORT(boot_error))
5571 dev_info(adev->dev,
5572 "socket: %d, aid: %d, fw_status: 0x%x, data abort exception\n",
5573 socket_id, aid_id, fw_status);
5574
5575 if (AMDGPU_RAS_GPU_ERR_GENERIC(boot_error))
5576 dev_info(adev->dev,
5577 "socket: %d, aid: %d, fw_status: 0x%x, Boot Controller Generic Error\n",
5578 socket_id, aid_id, fw_status);
5579 }
5580
amdgpu_ras_boot_error_detected(struct amdgpu_device * adev,u32 instance)5581 static bool amdgpu_ras_boot_error_detected(struct amdgpu_device *adev,
5582 u32 instance)
5583 {
5584 u64 reg_addr;
5585 u32 reg_data;
5586 int retry_loop;
5587
5588 reg_addr = (mmMP0_SMN_C2PMSG_92 << 2) +
5589 aqua_vanjaram_encode_ext_smn_addressing(instance);
5590
5591 for (retry_loop = 0; retry_loop < AMDGPU_RAS_BOOT_STATUS_POLLING_LIMIT; retry_loop++) {
5592 reg_data = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
5593 if ((reg_data & AMDGPU_RAS_BOOT_STATUS_MASK) == AMDGPU_RAS_BOOT_STEADY_STATUS)
5594 return false;
5595 else
5596 msleep(1);
5597 }
5598
5599 return true;
5600 }
5601
amdgpu_ras_query_boot_status(struct amdgpu_device * adev,u32 num_instances)5602 void amdgpu_ras_query_boot_status(struct amdgpu_device *adev, u32 num_instances)
5603 {
5604 u32 i;
5605
5606 for (i = 0; i < num_instances; i++) {
5607 if (amdgpu_ras_boot_error_detected(adev, i))
5608 amdgpu_ras_boot_time_error_reporting(adev, i);
5609 }
5610 }
5611
amdgpu_ras_reserve_page(struct amdgpu_device * adev,uint64_t pfn)5612 int amdgpu_ras_reserve_page(struct amdgpu_device *adev, uint64_t pfn)
5613 {
5614 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5615 struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
5616 uint64_t start = pfn << AMDGPU_GPU_PAGE_SHIFT;
5617 int ret = 0;
5618
5619 if (amdgpu_ras_check_critical_address(adev, start))
5620 return 0;
5621
5622 mutex_lock(&con->page_rsv_lock);
5623 ret = amdgpu_vram_mgr_query_page_status(mgr, start);
5624 if (ret == -ENOENT)
5625 ret = amdgpu_vram_mgr_reserve_range(mgr, start, AMDGPU_GPU_PAGE_SIZE);
5626 mutex_unlock(&con->page_rsv_lock);
5627
5628 return ret;
5629 }
5630
amdgpu_ras_event_log_print(struct amdgpu_device * adev,u64 event_id,const char * fmt,...)5631 void amdgpu_ras_event_log_print(struct amdgpu_device *adev, u64 event_id,
5632 const char *fmt, ...)
5633 {
5634 struct va_format vaf;
5635 va_list args;
5636
5637 va_start(args, fmt);
5638 vaf.fmt = fmt;
5639 vaf.va = &args;
5640
5641 if (RAS_EVENT_ID_IS_VALID(event_id))
5642 dev_printk(KERN_INFO, adev->dev, "{%llu}%pV", event_id, &vaf);
5643 else
5644 dev_printk(KERN_INFO, adev->dev, "%pV", &vaf);
5645
5646 va_end(args);
5647 }
5648
amdgpu_ras_is_rma(struct amdgpu_device * adev)5649 bool amdgpu_ras_is_rma(struct amdgpu_device *adev)
5650 {
5651 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5652
5653 if (amdgpu_uniras_enabled(adev))
5654 return amdgpu_ras_mgr_is_rma(adev);
5655
5656 if (!con)
5657 return false;
5658
5659 return con->is_rma;
5660 }
5661
amdgpu_ras_add_critical_region(struct amdgpu_device * adev,struct amdgpu_bo * bo)5662 int amdgpu_ras_add_critical_region(struct amdgpu_device *adev,
5663 struct amdgpu_bo *bo)
5664 {
5665 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5666 struct amdgpu_vram_mgr_resource *vres;
5667 struct ras_critical_region *region;
5668 struct drm_buddy_block *block;
5669 int ret = 0;
5670
5671 if (!bo || !bo->tbo.resource)
5672 return -EINVAL;
5673
5674 vres = to_amdgpu_vram_mgr_resource(bo->tbo.resource);
5675
5676 mutex_lock(&con->critical_region_lock);
5677
5678 /* Check if the bo had been recorded */
5679 list_for_each_entry(region, &con->critical_region_head, node)
5680 if (region->bo == bo)
5681 goto out;
5682
5683 /* Record new critical amdgpu bo */
5684 list_for_each_entry(block, &vres->blocks, link) {
5685 region = kzalloc_obj(*region, GFP_KERNEL);
5686 if (!region) {
5687 ret = -ENOMEM;
5688 goto out;
5689 }
5690 region->bo = bo;
5691 region->start = amdgpu_vram_mgr_block_start(block);
5692 region->size = amdgpu_vram_mgr_block_size(block);
5693 list_add_tail(®ion->node, &con->critical_region_head);
5694 }
5695
5696 out:
5697 mutex_unlock(&con->critical_region_lock);
5698
5699 return ret;
5700 }
5701
amdgpu_ras_critical_region_init(struct amdgpu_device * adev)5702 static void amdgpu_ras_critical_region_init(struct amdgpu_device *adev)
5703 {
5704 amdgpu_ras_add_critical_region(adev, adev->mman.fw_reserved_memory);
5705 }
5706
amdgpu_ras_critical_region_fini(struct amdgpu_device * adev)5707 static void amdgpu_ras_critical_region_fini(struct amdgpu_device *adev)
5708 {
5709 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5710 struct ras_critical_region *region, *tmp;
5711
5712 mutex_lock(&con->critical_region_lock);
5713 list_for_each_entry_safe(region, tmp, &con->critical_region_head, node) {
5714 list_del(®ion->node);
5715 kfree(region);
5716 }
5717 mutex_unlock(&con->critical_region_lock);
5718 }
5719
amdgpu_ras_check_critical_address(struct amdgpu_device * adev,uint64_t addr)5720 bool amdgpu_ras_check_critical_address(struct amdgpu_device *adev, uint64_t addr)
5721 {
5722 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5723 struct ras_critical_region *region;
5724 bool ret = false;
5725
5726 mutex_lock(&con->critical_region_lock);
5727 list_for_each_entry(region, &con->critical_region_head, node) {
5728 if ((region->start <= addr) &&
5729 (addr < (region->start + region->size))) {
5730 ret = true;
5731 break;
5732 }
5733 }
5734 mutex_unlock(&con->critical_region_lock);
5735
5736 return ret;
5737 }
5738
amdgpu_ras_pre_reset(struct amdgpu_device * adev,struct list_head * device_list)5739 void amdgpu_ras_pre_reset(struct amdgpu_device *adev,
5740 struct list_head *device_list)
5741 {
5742 struct amdgpu_device *tmp_adev = NULL;
5743
5744 list_for_each_entry(tmp_adev, device_list, reset_list) {
5745 if (amdgpu_uniras_enabled(tmp_adev))
5746 amdgpu_ras_mgr_pre_reset(tmp_adev);
5747 }
5748 }
5749
amdgpu_ras_post_reset(struct amdgpu_device * adev,struct list_head * device_list)5750 void amdgpu_ras_post_reset(struct amdgpu_device *adev,
5751 struct list_head *device_list)
5752 {
5753 struct amdgpu_device *tmp_adev = NULL;
5754
5755 list_for_each_entry(tmp_adev, device_list, reset_list) {
5756 if (amdgpu_uniras_enabled(tmp_adev))
5757 amdgpu_ras_mgr_post_reset(tmp_adev);
5758 }
5759 }
5760