Lines Matching full:con

138 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
670 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_create_obj() local
673 if (!adev->ras_enabled || !con) in amdgpu_ras_create_obj()
683 obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index]; in amdgpu_ras_create_obj()
685 obj = &con->objs[head->block]; in amdgpu_ras_create_obj()
696 list_add(&obj->node, &con->head); in amdgpu_ras_create_obj()
706 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_find_obj() local
710 if (!adev->ras_enabled || !con) in amdgpu_ras_find_obj()
721 obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index]; in amdgpu_ras_find_obj()
723 obj = &con->objs[head->block]; in amdgpu_ras_find_obj()
729 obj = &con->objs[i]; in amdgpu_ras_find_obj()
749 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_is_feature_enabled() local
751 return con->features & BIT(head->block); in amdgpu_ras_is_feature_enabled()
761 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in __amdgpu_ras_feature_enable() local
766 * Ras framework checks con->hw_supported to see if it need do in __amdgpu_ras_feature_enable()
768 * IP checks con->support to see if it need disable ras. in __amdgpu_ras_feature_enable()
782 con->features |= BIT(head->block); in __amdgpu_ras_feature_enable()
785 con->features &= ~BIT(head->block); in __amdgpu_ras_feature_enable()
797 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_feature_enable() local
801 if (!con) in amdgpu_ras_feature_enable()
854 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_feature_enable_on_boot() local
857 if (!con) in amdgpu_ras_feature_enable_on_boot()
860 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) { in amdgpu_ras_feature_enable_on_boot()
888 con->features |= BIT(head->block); in amdgpu_ras_feature_enable_on_boot()
894 con->features &= ~BIT(head->block); in amdgpu_ras_feature_enable_on_boot()
905 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_disable_all_features() local
908 list_for_each_entry_safe(obj, tmp, &con->head, node) { in amdgpu_ras_disable_all_features()
921 return con->features; in amdgpu_ras_disable_all_features()
927 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_enable_all_features() local
974 return con->features; in amdgpu_ras_enable_all_features()
1644 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_query_error_count() local
1649 if (!adev->ras_enabled || !con) in amdgpu_ras_query_error_count()
1661 list_for_each_entry(obj, &con->head, node) { in amdgpu_ras_query_error_count()
1739 struct amdgpu_ras *con = in amdgpu_ras_sysfs_badpages_read() local
1741 struct amdgpu_device *adev = con->adev; in amdgpu_ras_sysfs_badpages_read()
1770 struct amdgpu_ras *con = in amdgpu_ras_sysfs_features_read() local
1773 return sysfs_emit(buf, "feature mask: 0x%x\n", con->features); in amdgpu_ras_sysfs_features_read()
1779 struct amdgpu_ras *con = in amdgpu_ras_sysfs_version_show() local
1781 return sysfs_emit(buf, "table version: 0x%x\n", con->eeprom_control.tbl_hdr.version); in amdgpu_ras_sysfs_version_show()
1787 struct amdgpu_ras *con = in amdgpu_ras_sysfs_schema_show() local
1789 return sysfs_emit(buf, "schema: 0x%x\n", con->schema); in amdgpu_ras_sysfs_schema_show()
1804 struct amdgpu_ras *con = in amdgpu_ras_sysfs_event_state_show() local
1806 struct ras_event_manager *event_mgr = con->event_mgr; in amdgpu_ras_sysfs_event_state_show()
1827 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_sysfs_remove_bad_page_node() local
1831 &con->badpages_attr.attr, in amdgpu_ras_sysfs_remove_bad_page_node()
1837 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_sysfs_remove_dev_attr_node() local
1839 &con->features_attr.attr, in amdgpu_ras_sysfs_remove_dev_attr_node()
1840 &con->version_attr.attr, in amdgpu_ras_sysfs_remove_dev_attr_node()
1841 &con->schema_attr.attr, in amdgpu_ras_sysfs_remove_dev_attr_node()
1842 &con->event_state_attr.attr, in amdgpu_ras_sysfs_remove_dev_attr_node()
1916 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_sysfs_remove_all() local
1919 list_for_each_entry_safe(obj, tmp, &con->head, node) { in amdgpu_ras_sysfs_remove_all()
1953 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_debugfs_create_ctrl_node() local
1954 struct amdgpu_ras_eeprom_control *eeprom = &con->eeprom_control; in amdgpu_ras_debugfs_create_ctrl_node()
1964 &con->bad_page_cnt_threshold); in amdgpu_ras_debugfs_create_ctrl_node()
1970 con->de_ras_eeprom_table = debugfs_create_file("ras_eeprom_table", in amdgpu_ras_debugfs_create_ctrl_node()
1973 amdgpu_ras_debugfs_set_ret_size(&con->eeprom_control); in amdgpu_ras_debugfs_create_ctrl_node()
1983 debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, dir, &con->reboot); in amdgpu_ras_debugfs_create_ctrl_node()
1990 &con->disable_ras_err_cnt_harvest); in amdgpu_ras_debugfs_create_ctrl_node()
2033 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_debugfs_create_all() local
2042 if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con) in amdgpu_ras_debugfs_create_all()
2047 list_for_each_entry(obj, &con->head, node) { in amdgpu_ras_debugfs_create_all()
2080 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_fs_init() local
2085 &con->features_attr.attr, in amdgpu_ras_fs_init()
2086 &con->version_attr.attr, in amdgpu_ras_fs_init()
2087 &con->schema_attr.attr, in amdgpu_ras_fs_init()
2088 &con->event_state_attr.attr, in amdgpu_ras_fs_init()
2100 con->features_attr = dev_attr_features; in amdgpu_ras_fs_init()
2104 con->version_attr = dev_attr_version; in amdgpu_ras_fs_init()
2108 con->schema_attr = dev_attr_schema; in amdgpu_ras_fs_init()
2112 con->event_state_attr = dev_attr_event_state; in amdgpu_ras_fs_init()
2118 con->badpages_attr = bin_attr_gpu_vram_bad_pages; in amdgpu_ras_fs_init()
2119 bin_attrs[0] = &con->badpages_attr; in amdgpu_ras_fs_init()
2133 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_fs_fini() local
2137 list_for_each_entry_safe(con_obj, tmp, &con->head, node) { in amdgpu_ras_fs_fini()
2188 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_interrupt_poison_consumption_handler() local
2193 if (!block_obj || !con) in amdgpu_ras_interrupt_poison_consumption_handler()
2252 struct amdgpu_ras *con = amdgpu_ras_get_context(obj->adev); in amdgpu_ras_interrupt_poison_creation_handler() local
2254 atomic_inc(&con->page_retirement_req_cnt); in amdgpu_ras_interrupt_poison_creation_handler()
2255 atomic_inc(&con->poison_creation_count); in amdgpu_ras_interrupt_poison_creation_handler()
2257 wake_up(&con->page_retirement_wq); in amdgpu_ras_interrupt_poison_creation_handler()
2432 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_interrupt_remove_all() local
2435 list_for_each_entry_safe(obj, tmp, &con->head, node) { in amdgpu_ras_interrupt_remove_all()
2446 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_log_on_err_counter() local
2449 if (!adev->ras_enabled || !con) in amdgpu_ras_log_on_err_counter()
2452 list_for_each_entry(obj, &con->head, node) { in amdgpu_ras_log_on_err_counter()
2521 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_query_err_status() local
2524 if (!adev->ras_enabled || !con) in amdgpu_ras_query_err_status()
2527 list_for_each_entry(obj, &con->head, node) { in amdgpu_ras_query_err_status()
2544 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_badpages_read() local
2549 if (!con || !con->eh_data || !bps || !count) in amdgpu_ras_badpages_read()
2552 mutex_lock(&con->recovery_lock); in amdgpu_ras_badpages_read()
2553 data = con->eh_data; in amdgpu_ras_badpages_read()
2582 mutex_unlock(&con->recovery_lock); in amdgpu_ras_badpages_read()
2803 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_add_bad_pages() local
2814 if (!con || !con->eh_data || !bps || pages <= 0) in amdgpu_ras_add_bad_pages()
2833 mutex_lock(&con->recovery_lock); in amdgpu_ras_add_bad_pages()
2834 data = con->eh_data; in amdgpu_ras_add_bad_pages()
2908 if (amdgpu_ras_check_bad_page_unlock(con, in amdgpu_ras_add_bad_pages()
2931 mutex_unlock(&con->recovery_lock); in amdgpu_ras_add_bad_pages()
2944 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_save_bad_pages() local
2949 if (!con || !con->eh_data) { in amdgpu_ras_save_bad_pages()
2956 mutex_lock(&con->recovery_lock); in amdgpu_ras_save_bad_pages()
2957 control = &con->eeprom_control; in amdgpu_ras_save_bad_pages()
2958 data = con->eh_data; in amdgpu_ras_save_bad_pages()
2961 mutex_unlock(&con->recovery_lock); in amdgpu_ras_save_bad_pages()
3043 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con, in amdgpu_ras_check_bad_page_unlock() argument
3046 struct ras_err_handler_data *data = con->eh_data; in amdgpu_ras_check_bad_page_unlock()
3065 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_check_bad_page() local
3068 if (!con || !con->eh_data) in amdgpu_ras_check_bad_page()
3071 mutex_lock(&con->recovery_lock); in amdgpu_ras_check_bad_page()
3072 ret = amdgpu_ras_check_bad_page_unlock(con, addr); in amdgpu_ras_check_bad_page()
3073 mutex_unlock(&con->recovery_lock); in amdgpu_ras_check_bad_page()
3080 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_validate_threshold() local
3106 con->bad_page_cnt_threshold = min(lower_32_bits(val), in amdgpu_ras_validate_threshold()
3109 con->bad_page_cnt_threshold = min_t(int, max_count, in amdgpu_ras_validate_threshold()
3120 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_put_poison_req() local
3129 ret = kfifo_put(&con->poison_fifo, poison_msg); in amdgpu_ras_put_poison_req()
3141 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_get_poison_req() local
3143 return kfifo_get(&con->poison_fifo, poison_msg); in amdgpu_ras_get_poison_req()
3175 static bool amdgpu_ras_schedule_retirement_dwork(struct amdgpu_ras *con, in amdgpu_ras_schedule_retirement_dwork() argument
3180 mutex_lock(&con->umc_ecc_log.lock); in amdgpu_ras_schedule_retirement_dwork()
3181 ret = radix_tree_tagged(&con->umc_ecc_log.de_page_tree, in amdgpu_ras_schedule_retirement_dwork()
3183 mutex_unlock(&con->umc_ecc_log.lock); in amdgpu_ras_schedule_retirement_dwork()
3186 schedule_delayed_work(&con->page_retirement_dwork, in amdgpu_ras_schedule_retirement_dwork()
3194 struct amdgpu_ras *con = container_of(work, struct amdgpu_ras, in amdgpu_ras_do_page_retirement() local
3196 struct amdgpu_device *adev = con->adev; in amdgpu_ras_do_page_retirement()
3202 amdgpu_ras_schedule_retirement_dwork(con, in amdgpu_ras_do_page_retirement()
3217 amdgpu_ras_schedule_retirement_dwork(con, in amdgpu_ras_do_page_retirement()
3284 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_clear_poison_fifo() local
3289 ret = kfifo_get(&con->poison_fifo, &msg); in amdgpu_ras_clear_poison_fifo()
3296 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_poison_consumption_handler() local
3323 flush_delayed_work(&con->page_retirement_dwork); in amdgpu_ras_poison_consumption_handler()
3325 con->gpu_reset_flags |= reset; in amdgpu_ras_poison_consumption_handler()
3331 flush_work(&con->recovery_work); in amdgpu_ras_poison_consumption_handler()
3340 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_page_retirement_thread() local
3347 wait_event_interruptible(con->page_retirement_wq, in amdgpu_ras_page_retirement_thread()
3349 atomic_read(&con->page_retirement_req_cnt)); in amdgpu_ras_page_retirement_thread()
3357 poison_creation_count = atomic_read(&con->poison_creation_count); in amdgpu_ras_page_retirement_thread()
3363 atomic_sub(poison_creation_count, &con->poison_creation_count); in amdgpu_ras_page_retirement_thread()
3364 atomic_sub(poison_creation_count, &con->page_retirement_req_cnt); in amdgpu_ras_page_retirement_thread()
3366 } while (atomic_read(&con->poison_creation_count)); in amdgpu_ras_page_retirement_thread()
3369 msg_count = kfifo_len(&con->poison_fifo); in amdgpu_ras_page_retirement_thread()
3375 atomic_sub(msg_count, &con->page_retirement_req_cnt); in amdgpu_ras_page_retirement_thread()
3382 atomic_set(&con->poison_creation_count, 0); in amdgpu_ras_page_retirement_thread()
3388 atomic_set(&con->page_retirement_req_cnt, 0); in amdgpu_ras_page_retirement_thread()
3397 schedule_delayed_work(&con->page_retirement_dwork, 0); in amdgpu_ras_page_retirement_thread()
3401 msg_count = kfifo_len(&con->poison_fifo); in amdgpu_ras_page_retirement_thread()
3404 atomic_sub(msg_count, &con->page_retirement_req_cnt); in amdgpu_ras_page_retirement_thread()
3408 schedule_delayed_work(&con->page_retirement_dwork, 0); in amdgpu_ras_page_retirement_thread()
3417 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_init_badpage_info() local
3421 if (!con || amdgpu_sriov_vf(adev)) in amdgpu_ras_init_badpage_info()
3424 control = &con->eeprom_control; in amdgpu_ras_init_badpage_info()
3445 if (con->update_channel_flag == true) { in amdgpu_ras_init_badpage_info()
3448 con->update_channel_flag = false; in amdgpu_ras_init_badpage_info()
3457 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_recovery_init() local
3462 if (!con || amdgpu_sriov_vf(adev)) in amdgpu_ras_recovery_init()
3470 con->adev = adev; in amdgpu_ras_recovery_init()
3475 data = &con->eh_data; in amdgpu_ras_recovery_init()
3482 mutex_init(&con->recovery_lock); in amdgpu_ras_recovery_init()
3483 INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery); in amdgpu_ras_recovery_init()
3484 atomic_set(&con->in_recovery, 0); in amdgpu_ras_recovery_init()
3485 con->eeprom_control.bad_channel_bitmap = 0; in amdgpu_ras_recovery_init()
3487 max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count(&con->eeprom_control); in amdgpu_ras_recovery_init()
3496 mutex_init(&con->page_rsv_lock); in amdgpu_ras_recovery_init()
3497 INIT_KFIFO(con->poison_fifo); in amdgpu_ras_recovery_init()
3498 mutex_init(&con->page_retirement_lock); in amdgpu_ras_recovery_init()
3499 init_waitqueue_head(&con->page_retirement_wq); in amdgpu_ras_recovery_init()
3500 atomic_set(&con->page_retirement_req_cnt, 0); in amdgpu_ras_recovery_init()
3501 atomic_set(&con->poison_creation_count, 0); in amdgpu_ras_recovery_init()
3502 con->page_retirement_thread = in amdgpu_ras_recovery_init()
3504 if (IS_ERR(con->page_retirement_thread)) { in amdgpu_ras_recovery_init()
3505 con->page_retirement_thread = NULL; in amdgpu_ras_recovery_init()
3509 INIT_DELAYED_WORK(&con->page_retirement_dwork, amdgpu_ras_do_page_retirement); in amdgpu_ras_recovery_init()
3510 amdgpu_ras_ecc_log_init(&con->umc_ecc_log); in amdgpu_ras_recovery_init()
3521 con->eh_data = NULL; in amdgpu_ras_recovery_init()
3539 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_recovery_fini() local
3540 struct ras_err_handler_data *data = con->eh_data; in amdgpu_ras_recovery_fini()
3550 flush_delayed_work(&con->page_retirement_dwork); in amdgpu_ras_recovery_fini()
3551 ret = amdgpu_ras_schedule_retirement_dwork(con, 0); in amdgpu_ras_recovery_fini()
3554 if (con->page_retirement_thread) in amdgpu_ras_recovery_fini()
3555 kthread_stop(con->page_retirement_thread); in amdgpu_ras_recovery_fini()
3557 atomic_set(&con->page_retirement_req_cnt, 0); in amdgpu_ras_recovery_fini()
3558 atomic_set(&con->poison_creation_count, 0); in amdgpu_ras_recovery_fini()
3560 mutex_destroy(&con->page_rsv_lock); in amdgpu_ras_recovery_fini()
3562 cancel_work_sync(&con->recovery_work); in amdgpu_ras_recovery_fini()
3564 cancel_delayed_work_sync(&con->page_retirement_dwork); in amdgpu_ras_recovery_fini()
3566 amdgpu_ras_ecc_log_fini(&con->umc_ecc_log); in amdgpu_ras_recovery_fini()
3568 mutex_lock(&con->recovery_lock); in amdgpu_ras_recovery_fini()
3569 con->eh_data = NULL; in amdgpu_ras_recovery_fini()
3572 mutex_unlock(&con->recovery_lock); in amdgpu_ras_recovery_fini()
3682 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_query_poison_mode() local
3686 if (amdgpu_sriov_vf(adev) || !con) in amdgpu_ras_query_poison_mode()
3693 con->poison_supported = true; in amdgpu_ras_query_poison_mode()
3705 con->poison_supported = true; in amdgpu_ras_query_poison_mode()
3773 struct amdgpu_ras *con = container_of(work, struct amdgpu_ras, in amdgpu_ras_counte_dw() local
3775 struct amdgpu_device *adev = con->adev; in amdgpu_ras_counte_dw()
3787 atomic_set(&con->ras_ce_count, ce_count); in amdgpu_ras_counte_dw()
3788 atomic_set(&con->ras_ue_count, ue_count); in amdgpu_ras_counte_dw()
3842 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_init_reserved_vram_size() local
3844 if (!con || (adev->flags & AMD_IS_APU)) in amdgpu_ras_init_reserved_vram_size()
3852 con->reserved_pages_in_bytes = AMDGPU_RAS_RESERVED_VRAM_SIZE; in amdgpu_ras_init_reserved_vram_size()
3861 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_init() local
3864 if (con) in amdgpu_ras_init()
3867 con = kzalloc(sizeof(*con) + in amdgpu_ras_init()
3871 if (!con) in amdgpu_ras_init()
3874 con->adev = adev; in amdgpu_ras_init()
3875 INIT_DELAYED_WORK(&con->ras_counte_delay_work, amdgpu_ras_counte_dw); in amdgpu_ras_init()
3876 atomic_set(&con->ras_ce_count, 0); in amdgpu_ras_init()
3877 atomic_set(&con->ras_ue_count, 0); in amdgpu_ras_init()
3879 con->objs = (struct ras_manager *)(con + 1); in amdgpu_ras_init()
3881 amdgpu_ras_set_context(adev, con); in amdgpu_ras_init()
3890 con->features |= BIT(AMDGPU_RAS_BLOCK__GFX); in amdgpu_ras_init()
3899 con->update_channel_flag = false; in amdgpu_ras_init()
3900 con->features = 0; in amdgpu_ras_init()
3901 con->schema = 0; in amdgpu_ras_init()
3902 INIT_LIST_HEAD(&con->head); in amdgpu_ras_init()
3904 con->flags = RAS_DEFAULT_FLAGS; in amdgpu_ras_init()
3970 con->features |= ((adev->smuio.funcs->get_socket_id(adev)) << in amdgpu_ras_init()
3974 con->schema = amdgpu_get_ras_schema(adev); in amdgpu_ras_init()
3999 kfree(con); in amdgpu_ras_init()
4033 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_is_poison_mode_supported() local
4035 if (!con) in amdgpu_ras_is_poison_mode_supported()
4038 return con->poison_supported; in amdgpu_ras_is_poison_mode_supported()
4046 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_block_late_init() local
4098 atomic_set(&con->ras_ce_count, ce_count); in amdgpu_ras_block_late_init()
4099 atomic_set(&con->ras_ue_count, ue_count); in amdgpu_ras_block_late_init()
4147 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_resume() local
4150 if (!adev->ras_enabled || !con) { in amdgpu_ras_resume()
4157 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) { in amdgpu_ras_resume()
4169 list_for_each_entry_safe(obj, tmp, &con->head, node) { in amdgpu_ras_resume()
4181 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_suspend() local
4183 if (!adev->ras_enabled || !con) in amdgpu_ras_suspend()
4188 if (AMDGPU_RAS_GET_FEATURES(con->features)) in amdgpu_ras_suspend()
4249 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_pre_fini() local
4251 if (!adev->ras_enabled || !con) in amdgpu_ras_pre_fini()
4256 if (AMDGPU_RAS_GET_FEATURES(con->features)) in amdgpu_ras_pre_fini()
4266 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_fini() local
4268 if (!adev->ras_enabled || !con) in amdgpu_ras_fini()
4296 WARN(AMDGPU_RAS_GET_FEATURES(con->features), "Feature mask is not cleared"); in amdgpu_ras_fini()
4298 if (AMDGPU_RAS_GET_FEATURES(con->features)) in amdgpu_ras_fini()
4301 cancel_delayed_work_sync(&con->ras_counte_delay_work); in amdgpu_ras_fini()
4304 kfree(con); in amdgpu_ras_fini()
4471 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_release_ras_context() local
4473 if (!con) in amdgpu_release_ras_context()
4476 if (!adev->ras_enabled && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)) { in amdgpu_release_ras_context()
4477 con->features &= ~BIT(AMDGPU_RAS_BLOCK__GFX); in amdgpu_release_ras_context()
4479 kfree(con); in amdgpu_release_ras_context()
4669 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_set_mca_debug_mode() local
4672 if (con) { in amdgpu_ras_set_mca_debug_mode()
4675 con->is_aca_debug_mode = enable; in amdgpu_ras_set_mca_debug_mode()
4683 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_set_aca_debug_mode() local
4686 if (con) { in amdgpu_ras_set_aca_debug_mode()
4692 con->is_aca_debug_mode = enable; in amdgpu_ras_set_aca_debug_mode()
4700 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_get_aca_debug_mode() local
4704 if (!con) in amdgpu_ras_get_aca_debug_mode()
4709 return con->is_aca_debug_mode; in amdgpu_ras_get_aca_debug_mode()
4717 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_get_error_query_mode() local
4721 if (!con) { in amdgpu_ras_get_error_query_mode()
4730 (con->is_aca_debug_mode) ? AMDGPU_RAS_DIRECT_ERROR_QUERY : AMDGPU_RAS_FIRMWARE_ERROR_QUERY; in amdgpu_ras_get_error_query_mode()
5169 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_reserve_page() local
5174 mutex_lock(&con->page_rsv_lock); in amdgpu_ras_reserve_page()
5178 mutex_unlock(&con->page_rsv_lock); in amdgpu_ras_reserve_page()
5203 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); in amdgpu_ras_is_rma() local
5205 if (!con) in amdgpu_ras_is_rma()
5208 return con->is_rma; in amdgpu_ras_is_rma()