Lines Matching refs:con

143 static int amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
221 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_check_address_validity() local
259 (blk_info.task.pid == con->init_task_pid) && in amdgpu_check_address_validity()
260 !strncmp(blk_info.task.comm, con->init_task_comm, TASK_COMM_LEN)) in amdgpu_check_address_validity()
744 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_create_obj() local
747 if (!adev->ras_enabled || !con) in amdgpu_ras_create_obj()
757 obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index]; in amdgpu_ras_create_obj()
759 obj = &con->objs[head->block]; in amdgpu_ras_create_obj()
770 list_add(&obj->node, &con->head); in amdgpu_ras_create_obj()
780 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_find_obj() local
784 if (!adev->ras_enabled || !con) in amdgpu_ras_find_obj()
795 obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index]; in amdgpu_ras_find_obj()
797 obj = &con->objs[head->block]; in amdgpu_ras_find_obj()
803 obj = &con->objs[i]; in amdgpu_ras_find_obj()
823 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_is_feature_enabled() local
825 return con->features & BIT(head->block); in amdgpu_ras_is_feature_enabled()
835 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in __amdgpu_ras_feature_enable() local
856 con->features |= BIT(head->block); in __amdgpu_ras_feature_enable()
859 con->features &= ~BIT(head->block); in __amdgpu_ras_feature_enable()
871 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_feature_enable() local
875 if (!con) in amdgpu_ras_feature_enable()
928 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_feature_enable_on_boot() local
931 if (!con) in amdgpu_ras_feature_enable_on_boot()
934 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) { in amdgpu_ras_feature_enable_on_boot()
962 con->features |= BIT(head->block); in amdgpu_ras_feature_enable_on_boot()
968 con->features &= ~BIT(head->block); in amdgpu_ras_feature_enable_on_boot()
979 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_disable_all_features() local
982 list_for_each_entry_safe(obj, tmp, &con->head, node) { in amdgpu_ras_disable_all_features()
995 return con->features; in amdgpu_ras_disable_all_features()
1001 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_enable_all_features() local
1048 return con->features; in amdgpu_ras_enable_all_features()
1790 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_query_error_count() local
1795 if (!adev->ras_enabled || !con) in amdgpu_ras_query_error_count()
1807 list_for_each_entry(obj, &con->head, node) { in amdgpu_ras_query_error_count()
1887 struct amdgpu_ras *con = in amdgpu_ras_sysfs_badpages_read() local
1889 struct amdgpu_device *adev = con->adev; in amdgpu_ras_sysfs_badpages_read()
1949 struct amdgpu_ras *con = in amdgpu_ras_sysfs_features_read() local
1952 return sysfs_emit(buf, "feature mask: 0x%x\n", con->features); in amdgpu_ras_sysfs_features_read()
1978 struct amdgpu_ras *con = in amdgpu_ras_sysfs_version_show() local
1984 con->eeprom_control.tbl_hdr.version); in amdgpu_ras_sysfs_version_show()
1986 if (amdgpu_ras_get_version_info(con->adev, &major, &minor, &rev)) in amdgpu_ras_sysfs_version_show()
1996 struct amdgpu_ras *con = in amdgpu_ras_sysfs_schema_show() local
1998 return sysfs_emit(buf, "schema: 0x%x\n", con->schema); in amdgpu_ras_sysfs_schema_show()
2013 struct amdgpu_ras *con = in amdgpu_ras_sysfs_event_state_show() local
2015 struct ras_event_manager *event_mgr = con->event_mgr; in amdgpu_ras_sysfs_event_state_show()
2036 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_sysfs_remove_bad_page_node() local
2040 &con->badpages_attr.attr, in amdgpu_ras_sysfs_remove_bad_page_node()
2046 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_sysfs_remove_dev_attr_node() local
2048 &con->features_attr.attr, in amdgpu_ras_sysfs_remove_dev_attr_node()
2049 &con->version_attr.attr, in amdgpu_ras_sysfs_remove_dev_attr_node()
2050 &con->schema_attr.attr, in amdgpu_ras_sysfs_remove_dev_attr_node()
2051 &con->event_state_attr.attr, in amdgpu_ras_sysfs_remove_dev_attr_node()
2128 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_sysfs_remove_all() local
2131 list_for_each_entry_safe(obj, tmp, &con->head, node) { in amdgpu_ras_sysfs_remove_all()
2165 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_debugfs_create_ctrl_node() local
2166 struct amdgpu_ras_eeprom_control *eeprom = &con->eeprom_control; in amdgpu_ras_debugfs_create_ctrl_node()
2176 &con->bad_page_cnt_threshold); in amdgpu_ras_debugfs_create_ctrl_node()
2182 con->de_ras_eeprom_table = debugfs_create_file("ras_eeprom_table", in amdgpu_ras_debugfs_create_ctrl_node()
2185 amdgpu_ras_debugfs_set_ret_size(&con->eeprom_control); in amdgpu_ras_debugfs_create_ctrl_node()
2195 debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, dir, &con->reboot); in amdgpu_ras_debugfs_create_ctrl_node()
2202 &con->disable_ras_err_cnt_harvest); in amdgpu_ras_debugfs_create_ctrl_node()
2245 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_debugfs_create_all() local
2254 if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con) in amdgpu_ras_debugfs_create_all()
2259 list_for_each_entry(obj, &con->head, node) { in amdgpu_ras_debugfs_create_all()
2292 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_fs_init() local
2297 &con->features_attr.attr, in amdgpu_ras_fs_init()
2298 &con->version_attr.attr, in amdgpu_ras_fs_init()
2299 &con->schema_attr.attr, in amdgpu_ras_fs_init()
2300 &con->event_state_attr.attr, in amdgpu_ras_fs_init()
2312 con->features_attr = dev_attr_features; in amdgpu_ras_fs_init()
2316 con->version_attr = dev_attr_version; in amdgpu_ras_fs_init()
2320 con->schema_attr = dev_attr_schema; in amdgpu_ras_fs_init()
2324 con->event_state_attr = dev_attr_event_state; in amdgpu_ras_fs_init()
2329 con->badpages_attr = bin_attr_gpu_vram_bad_pages; in amdgpu_ras_fs_init()
2330 sysfs_bin_attr_init(&con->badpages_attr); in amdgpu_ras_fs_init()
2331 bin_attrs[0] = &con->badpages_attr; in amdgpu_ras_fs_init()
2344 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_fs_fini() local
2348 list_for_each_entry_safe(con_obj, tmp, &con->head, node) { in amdgpu_ras_fs_fini()
2404 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_interrupt_poison_consumption_handler() local
2409 if (!block_obj || !con) in amdgpu_ras_interrupt_poison_consumption_handler()
2468 struct amdgpu_ras *con = amdgpu_ras_get_context(obj->adev); in amdgpu_ras_interrupt_poison_creation_handler() local
2470 atomic_inc(&con->page_retirement_req_cnt); in amdgpu_ras_interrupt_poison_creation_handler()
2471 atomic_inc(&con->poison_creation_count); in amdgpu_ras_interrupt_poison_creation_handler()
2473 wake_up(&con->page_retirement_wq); in amdgpu_ras_interrupt_poison_creation_handler()
2658 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_interrupt_remove_all() local
2661 list_for_each_entry_safe(obj, tmp, &con->head, node) { in amdgpu_ras_interrupt_remove_all()
2672 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_log_on_err_counter() local
2675 if (!adev->ras_enabled || !con) in amdgpu_ras_log_on_err_counter()
2678 list_for_each_entry(obj, &con->head, node) { in amdgpu_ras_log_on_err_counter()
2747 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_query_err_status() local
2750 if (!adev->ras_enabled || !con) in amdgpu_ras_query_err_status()
2753 list_for_each_entry(obj, &con->head, node) { in amdgpu_ras_query_err_status()
2765 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_badpages_read() local
2770 if (!con || !con->eh_data || !bps || !count) in amdgpu_ras_badpages_read()
2773 mutex_lock(&con->recovery_lock); in amdgpu_ras_badpages_read()
2774 data = con->eh_data; in amdgpu_ras_badpages_read()
2786 mutex_unlock(&con->recovery_lock); in amdgpu_ras_badpages_read()
3075 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in __amdgpu_ras_restore_bad_pages() local
3076 struct ras_err_handler_data *data = con->eh_data; in __amdgpu_ras_restore_bad_pages()
3079 if (amdgpu_ras_check_bad_page_unlock(con, in __amdgpu_ras_restore_bad_pages()
3097 con->bad_page_num++; in __amdgpu_ras_restore_bad_pages()
3215 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_add_bad_pages() local
3223 if (!con || !con->eh_data || !bps || pages <= 0) in amdgpu_ras_add_bad_pages()
3239 mutex_lock(&con->recovery_lock); in amdgpu_ras_add_bad_pages()
3253 con->bad_page_num -= adev->umc.retire_unit; in amdgpu_ras_add_bad_pages()
3267 con->bad_page_num -= adev->umc.retire_unit; in amdgpu_ras_add_bad_pages()
3270 con->eh_data->count_saved = con->eh_data->count; in amdgpu_ras_add_bad_pages()
3277 mutex_unlock(&con->recovery_lock); in amdgpu_ras_add_bad_pages()
3290 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_save_bad_pages() local
3295 if (!con || !con->eh_data) { in amdgpu_ras_save_bad_pages()
3302 if (!con->eeprom_control.is_eeprom_valid) { in amdgpu_ras_save_bad_pages()
3311 mutex_lock(&con->recovery_lock); in amdgpu_ras_save_bad_pages()
3312 control = &con->eeprom_control; in amdgpu_ras_save_bad_pages()
3313 data = con->eh_data; in amdgpu_ras_save_bad_pages()
3321 save_count = con->bad_page_num - control->ras_num_bad_pages; in amdgpu_ras_save_bad_pages()
3322 mutex_unlock(&con->recovery_lock); in amdgpu_ras_save_bad_pages()
3422 static int amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con, in amdgpu_ras_check_bad_page_unlock() argument
3425 struct ras_err_handler_data *data = con->eh_data; in amdgpu_ras_check_bad_page_unlock()
3426 struct amdgpu_device *adev = con->adev; in amdgpu_ras_check_bad_page_unlock()
3450 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_check_bad_page() local
3453 if (!con || !con->eh_data) in amdgpu_ras_check_bad_page()
3456 mutex_lock(&con->recovery_lock); in amdgpu_ras_check_bad_page()
3457 ret = amdgpu_ras_check_bad_page_unlock(con, addr); in amdgpu_ras_check_bad_page()
3458 mutex_unlock(&con->recovery_lock); in amdgpu_ras_check_bad_page()
3465 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_validate_threshold() local
3487 con->bad_page_cnt_threshold = min(lower_32_bits(val), in amdgpu_ras_validate_threshold()
3490 con->bad_page_cnt_threshold = ((con->reserved_pages_in_bytes) >> 21) << 4; in amdgpu_ras_validate_threshold()
3492 con->bad_page_cnt_threshold = min_t(int, max_count, in amdgpu_ras_validate_threshold()
3503 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_put_poison_req() local
3512 ret = kfifo_put(&con->poison_fifo, poison_msg); in amdgpu_ras_put_poison_req()
3524 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_get_poison_req() local
3526 return kfifo_get(&con->poison_fifo, poison_msg); in amdgpu_ras_get_poison_req()
3558 static bool amdgpu_ras_schedule_retirement_dwork(struct amdgpu_ras *con, in amdgpu_ras_schedule_retirement_dwork() argument
3563 mutex_lock(&con->umc_ecc_log.lock); in amdgpu_ras_schedule_retirement_dwork()
3564 ret = radix_tree_tagged(&con->umc_ecc_log.de_page_tree, in amdgpu_ras_schedule_retirement_dwork()
3566 mutex_unlock(&con->umc_ecc_log.lock); in amdgpu_ras_schedule_retirement_dwork()
3569 schedule_delayed_work(&con->page_retirement_dwork, in amdgpu_ras_schedule_retirement_dwork()
3577 struct amdgpu_ras *con = container_of(work, struct amdgpu_ras, in amdgpu_ras_do_page_retirement() local
3579 struct amdgpu_device *adev = con->adev; in amdgpu_ras_do_page_retirement()
3584 amdgpu_ras_schedule_retirement_dwork(con, in amdgpu_ras_do_page_retirement()
3595 amdgpu_ras_schedule_retirement_dwork(con, in amdgpu_ras_do_page_retirement()
3643 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_clear_poison_fifo() local
3648 ret = kfifo_get(&con->poison_fifo, &msg); in amdgpu_ras_clear_poison_fifo()
3655 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_poison_consumption_handler() local
3677 flush_delayed_work(&con->page_retirement_dwork); in amdgpu_ras_poison_consumption_handler()
3688 con->gpu_reset_flags |= reset; in amdgpu_ras_poison_consumption_handler()
3694 flush_work(&con->recovery_work); in amdgpu_ras_poison_consumption_handler()
3703 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_page_retirement_thread() local
3710 wait_event_interruptible(con->page_retirement_wq, in amdgpu_ras_page_retirement_thread()
3712 atomic_read(&con->page_retirement_req_cnt)); in amdgpu_ras_page_retirement_thread()
3717 mutex_lock(&con->poison_lock); in amdgpu_ras_page_retirement_thread()
3721 poison_creation_count = atomic_read(&con->poison_creation_count); in amdgpu_ras_page_retirement_thread()
3727 atomic_sub(poison_creation_count, &con->poison_creation_count); in amdgpu_ras_page_retirement_thread()
3728 atomic_sub(poison_creation_count, &con->page_retirement_req_cnt); in amdgpu_ras_page_retirement_thread()
3730 } while (atomic_read(&con->poison_creation_count) && in amdgpu_ras_page_retirement_thread()
3731 !atomic_read(&con->poison_consumption_count)); in amdgpu_ras_page_retirement_thread()
3734 msg_count = kfifo_len(&con->poison_fifo); in amdgpu_ras_page_retirement_thread()
3740 atomic_sub(msg_count, &con->page_retirement_req_cnt); in amdgpu_ras_page_retirement_thread()
3747 atomic_set(&con->poison_creation_count, 0); in amdgpu_ras_page_retirement_thread()
3748 atomic_set(&con->poison_consumption_count, 0); in amdgpu_ras_page_retirement_thread()
3754 atomic_set(&con->page_retirement_req_cnt, 0); in amdgpu_ras_page_retirement_thread()
3763 schedule_delayed_work(&con->page_retirement_dwork, 0); in amdgpu_ras_page_retirement_thread()
3767 msg_count = kfifo_len(&con->poison_fifo); in amdgpu_ras_page_retirement_thread()
3770 atomic_sub(msg_count, &con->page_retirement_req_cnt); in amdgpu_ras_page_retirement_thread()
3773 atomic_set(&con->poison_consumption_count, 0); in amdgpu_ras_page_retirement_thread()
3776 schedule_delayed_work(&con->page_retirement_dwork, 0); in amdgpu_ras_page_retirement_thread()
3778 mutex_unlock(&con->poison_lock); in amdgpu_ras_page_retirement_thread()
3786 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_init_badpage_info() local
3790 if (!con || amdgpu_sriov_vf(adev)) in amdgpu_ras_init_badpage_info()
3796 control = &con->eeprom_control; in amdgpu_ras_init_badpage_info()
3797 con->ras_smu_drv = amdgpu_dpm_get_ras_smu_driver(adev); in amdgpu_ras_init_badpage_info()
3819 if (con->update_channel_flag == true) { in amdgpu_ras_init_badpage_info()
3822 con->update_channel_flag = false; in amdgpu_ras_init_badpage_info()
3838 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_recovery_init() local
3843 if (!con || amdgpu_sriov_vf(adev)) in amdgpu_ras_recovery_init()
3851 con->adev = adev; in amdgpu_ras_recovery_init()
3856 data = &con->eh_data; in amdgpu_ras_recovery_init()
3863 mutex_init(&con->recovery_lock); in amdgpu_ras_recovery_init()
3864 mutex_init(&con->poison_lock); in amdgpu_ras_recovery_init()
3865 INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery); in amdgpu_ras_recovery_init()
3866 atomic_set(&con->in_recovery, 0); in amdgpu_ras_recovery_init()
3867 atomic_set(&con->rma_in_recovery, 0); in amdgpu_ras_recovery_init()
3868 con->eeprom_control.bad_channel_bitmap = 0; in amdgpu_ras_recovery_init()
3870 max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count(&con->eeprom_control); in amdgpu_ras_recovery_init()
3879 mutex_init(&con->page_rsv_lock); in amdgpu_ras_recovery_init()
3880 INIT_KFIFO(con->poison_fifo); in amdgpu_ras_recovery_init()
3881 mutex_init(&con->page_retirement_lock); in amdgpu_ras_recovery_init()
3882 init_waitqueue_head(&con->page_retirement_wq); in amdgpu_ras_recovery_init()
3883 atomic_set(&con->page_retirement_req_cnt, 0); in amdgpu_ras_recovery_init()
3884 atomic_set(&con->poison_creation_count, 0); in amdgpu_ras_recovery_init()
3885 atomic_set(&con->poison_consumption_count, 0); in amdgpu_ras_recovery_init()
3886 con->page_retirement_thread = in amdgpu_ras_recovery_init()
3888 if (IS_ERR(con->page_retirement_thread)) { in amdgpu_ras_recovery_init()
3889 con->page_retirement_thread = NULL; in amdgpu_ras_recovery_init()
3893 INIT_DELAYED_WORK(&con->page_retirement_dwork, amdgpu_ras_do_page_retirement); in amdgpu_ras_recovery_init()
3894 amdgpu_ras_ecc_log_init(&con->umc_ecc_log); in amdgpu_ras_recovery_init()
3905 con->eh_data = NULL; in amdgpu_ras_recovery_init()
3923 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_recovery_fini() local
3924 struct ras_err_handler_data *data = con->eh_data; in amdgpu_ras_recovery_fini()
3934 flush_delayed_work(&con->page_retirement_dwork); in amdgpu_ras_recovery_fini()
3935 ret = amdgpu_ras_schedule_retirement_dwork(con, 0); in amdgpu_ras_recovery_fini()
3938 if (con->page_retirement_thread) in amdgpu_ras_recovery_fini()
3939 kthread_stop(con->page_retirement_thread); in amdgpu_ras_recovery_fini()
3941 atomic_set(&con->page_retirement_req_cnt, 0); in amdgpu_ras_recovery_fini()
3942 atomic_set(&con->poison_creation_count, 0); in amdgpu_ras_recovery_fini()
3944 mutex_destroy(&con->page_rsv_lock); in amdgpu_ras_recovery_fini()
3946 cancel_work_sync(&con->recovery_work); in amdgpu_ras_recovery_fini()
3948 cancel_delayed_work_sync(&con->page_retirement_dwork); in amdgpu_ras_recovery_fini()
3950 amdgpu_ras_ecc_log_fini(&con->umc_ecc_log); in amdgpu_ras_recovery_fini()
3952 mutex_lock(&con->recovery_lock); in amdgpu_ras_recovery_fini()
3953 con->eh_data = NULL; in amdgpu_ras_recovery_fini()
3956 mutex_unlock(&con->recovery_lock); in amdgpu_ras_recovery_fini()
4071 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_query_poison_mode() local
4075 if (amdgpu_sriov_vf(adev) || !con) in amdgpu_ras_query_poison_mode()
4082 con->poison_supported = true; in amdgpu_ras_query_poison_mode()
4094 con->poison_supported = true; in amdgpu_ras_query_poison_mode()
4167 struct amdgpu_ras *con = container_of(work, struct amdgpu_ras, in amdgpu_ras_counte_dw() local
4169 struct amdgpu_device *adev = con->adev; in amdgpu_ras_counte_dw()
4181 atomic_set(&con->ras_ce_count, ce_count); in amdgpu_ras_counte_dw()
4182 atomic_set(&con->ras_ue_count, ue_count); in amdgpu_ras_counte_dw()
4235 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_init_reserved_vram_size() local
4237 if (!con || (adev->flags & AMD_IS_APU)) in amdgpu_ras_init_reserved_vram_size()
4244 con->reserved_pages_in_bytes = AMDGPU_RAS_RESERVED_VRAM_SIZE_DEFAULT; in amdgpu_ras_init_reserved_vram_size()
4247 con->reserved_pages_in_bytes = (AMDGPU_RAS_RESERVED_VRAM_SIZE_DEFAULT << 1); in amdgpu_ras_init_reserved_vram_size()
4256 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_init() local
4259 if (con) in amdgpu_ras_init()
4262 con = kzalloc(sizeof(*con) + in amdgpu_ras_init()
4266 if (!con) in amdgpu_ras_init()
4269 con->adev = adev; in amdgpu_ras_init()
4270 INIT_DELAYED_WORK(&con->ras_counte_delay_work, amdgpu_ras_counte_dw); in amdgpu_ras_init()
4271 atomic_set(&con->ras_ce_count, 0); in amdgpu_ras_init()
4272 atomic_set(&con->ras_ue_count, 0); in amdgpu_ras_init()
4274 con->objs = (struct ras_manager *)(con + 1); in amdgpu_ras_init()
4276 amdgpu_ras_set_context(adev, con); in amdgpu_ras_init()
4285 con->features |= BIT(AMDGPU_RAS_BLOCK__GFX); in amdgpu_ras_init()
4294 con->update_channel_flag = false; in amdgpu_ras_init()
4295 con->features = 0; in amdgpu_ras_init()
4296 con->schema = 0; in amdgpu_ras_init()
4297 INIT_LIST_HEAD(&con->head); in amdgpu_ras_init()
4299 con->flags = RAS_DEFAULT_FLAGS; in amdgpu_ras_init()
4365 con->features |= ((adev->smuio.funcs->get_socket_id(adev)) << in amdgpu_ras_init()
4369 con->schema = amdgpu_get_ras_schema(adev); in amdgpu_ras_init()
4387 con->init_task_pid = task_pid_nr(current); in amdgpu_ras_init()
4388 get_task_comm(con->init_task_comm, current); in amdgpu_ras_init()
4390 mutex_init(&con->critical_region_lock); in amdgpu_ras_init()
4391 INIT_LIST_HEAD(&con->critical_region_head); in amdgpu_ras_init()
4400 kfree(con); in amdgpu_ras_init()
4434 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_is_poison_mode_supported() local
4436 if (!con) in amdgpu_ras_is_poison_mode_supported()
4439 return con->poison_supported; in amdgpu_ras_is_poison_mode_supported()
4447 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_block_late_init() local
4499 atomic_set(&con->ras_ce_count, ce_count); in amdgpu_ras_block_late_init()
4500 atomic_set(&con->ras_ue_count, ue_count); in amdgpu_ras_block_late_init()
4548 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_resume() local
4551 if (!adev->ras_enabled || !con) { in amdgpu_ras_resume()
4558 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) { in amdgpu_ras_resume()
4570 list_for_each_entry_safe(obj, tmp, &con->head, node) { in amdgpu_ras_resume()
4582 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_suspend() local
4584 if (!adev->ras_enabled || !con) in amdgpu_ras_suspend()
4589 if (AMDGPU_RAS_GET_FEATURES(con->features)) in amdgpu_ras_suspend()
4650 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_pre_fini() local
4652 if (!adev->ras_enabled || !con) in amdgpu_ras_pre_fini()
4657 if (AMDGPU_RAS_GET_FEATURES(con->features)) in amdgpu_ras_pre_fini()
4667 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_fini() local
4669 if (!adev->ras_enabled || !con) in amdgpu_ras_fini()
4673 mutex_destroy(&con->critical_region_lock); in amdgpu_ras_fini()
4700 WARN(AMDGPU_RAS_GET_FEATURES(con->features), "Feature mask is not cleared"); in amdgpu_ras_fini()
4702 if (AMDGPU_RAS_GET_FEATURES(con->features)) in amdgpu_ras_fini()
4705 cancel_delayed_work_sync(&con->ras_counte_delay_work); in amdgpu_ras_fini()
4708 kfree(con); in amdgpu_ras_fini()
4883 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_release_ras_context() local
4885 if (!con) in amdgpu_release_ras_context()
4888 if (!adev->ras_enabled && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)) { in amdgpu_release_ras_context()
4889 con->features &= ~BIT(AMDGPU_RAS_BLOCK__GFX); in amdgpu_release_ras_context()
4891 kfree(con); in amdgpu_release_ras_context()
5103 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_set_mca_debug_mode() local
5106 if (con) { in amdgpu_ras_set_mca_debug_mode()
5109 con->is_aca_debug_mode = enable; in amdgpu_ras_set_mca_debug_mode()
5117 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_set_aca_debug_mode() local
5120 if (con) { in amdgpu_ras_set_aca_debug_mode()
5126 con->is_aca_debug_mode = enable; in amdgpu_ras_set_aca_debug_mode()
5134 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_get_aca_debug_mode() local
5138 if (!con) in amdgpu_ras_get_aca_debug_mode()
5143 return con->is_aca_debug_mode; in amdgpu_ras_get_aca_debug_mode()
5151 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_get_error_query_mode() local
5155 if (!con) { in amdgpu_ras_get_error_query_mode()
5164 (con->is_aca_debug_mode) ? AMDGPU_RAS_DIRECT_ERROR_QUERY : AMDGPU_RAS_FIRMWARE_ERROR_QUERY; in amdgpu_ras_get_error_query_mode()
5603 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_reserve_page() local
5611 mutex_lock(&con->page_rsv_lock); in amdgpu_ras_reserve_page()
5615 mutex_unlock(&con->page_rsv_lock); in amdgpu_ras_reserve_page()
5640 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_is_rma() local
5645 if (!con) in amdgpu_ras_is_rma()
5648 return con->is_rma; in amdgpu_ras_is_rma()
5654 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_add_critical_region() local
5665 mutex_lock(&con->critical_region_lock); in amdgpu_ras_add_critical_region()
5668 list_for_each_entry(region, &con->critical_region_head, node) in amdgpu_ras_add_critical_region()
5682 list_add_tail(&region->node, &con->critical_region_head); in amdgpu_ras_add_critical_region()
5686 mutex_unlock(&con->critical_region_lock); in amdgpu_ras_add_critical_region()
5698 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_critical_region_fini() local
5701 mutex_lock(&con->critical_region_lock); in amdgpu_ras_critical_region_fini()
5702 list_for_each_entry_safe(region, tmp, &con->critical_region_head, node) { in amdgpu_ras_critical_region_fini()
5706 mutex_unlock(&con->critical_region_lock); in amdgpu_ras_critical_region_fini()
5711 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_check_critical_address() local
5715 mutex_lock(&con->critical_region_lock); in amdgpu_ras_check_critical_address()
5716 list_for_each_entry(region, &con->critical_region_head, node) { in amdgpu_ras_check_critical_address()
5723 mutex_unlock(&con->critical_region_lock); in amdgpu_ras_check_critical_address()