Lines Matching defs:tmp_adev
5094 struct amdgpu_device *tmp_adev = reset_context->reset_req_dev;
5147 dev_info(tmp_adev->dev, "Dumping IP State\n");
5149 for (i = 0; i < tmp_adev->num_ip_blocks; i++)
5150 if (tmp_adev->ip_blocks[i].version->funcs->dump_ip_state)
5151 tmp_adev->ip_blocks[i].version->funcs
5152 ->dump_ip_state((void *)&tmp_adev->ip_blocks[i]);
5153 dev_info(tmp_adev->dev, "Dumping IP State Completed\n");
5172 struct amdgpu_device *tmp_adev;
5192 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5193 amdgpu_set_init_level(tmp_adev, init_level);
5196 amdgpu_reset_set_dpc_status(tmp_adev, false);
5197 amdgpu_ras_clear_err_state(tmp_adev);
5198 r = amdgpu_device_asic_init(tmp_adev);
5200 dev_warn(tmp_adev->dev, "asic atom init failed!");
5202 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
5204 r = amdgpu_device_ip_resume_phase1(tmp_adev);
5208 vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
5211 amdgpu_coredump(tmp_adev, false, vram_lost, reset_context->job);
5215 tmp_adev->dev,
5217 amdgpu_inc_vram_lost(tmp_adev);
5220 r = amdgpu_device_fw_loading(tmp_adev);
5225 tmp_adev->xcp_mgr);
5229 r = amdgpu_device_ip_resume_phase2(tmp_adev);
5233 amdgpu_ttm_set_buffer_funcs_status(tmp_adev, true);
5235 r = amdgpu_device_ip_resume_phase3(tmp_adev);
5240 amdgpu_device_fill_reset_magic(tmp_adev);
5246 amdgpu_register_gpu_instance(tmp_adev);
5249 tmp_adev->gmc.xgmi.num_physical_nodes > 1)
5250 amdgpu_xgmi_add_device(tmp_adev);
5252 r = amdgpu_device_ip_late_init(tmp_adev);
5256 r = amdgpu_userq_post_reset(tmp_adev, vram_lost);
5260 drm_client_dev_resume(adev_to_drm(tmp_adev));
5272 if (!amdgpu_ras_is_rma(tmp_adev)) {
5274 amdgpu_ras_resume(tmp_adev);
5282 tmp_adev->gmc.xgmi.num_physical_nodes > 1)
5284 reset_context->hive, tmp_adev);
5291 amdgpu_set_init_level(tmp_adev,
5293 amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
5294 r = amdgpu_ib_ring_tests(tmp_adev);
5296 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
5303 tmp_adev->asic_reset_res = r;
5313 struct amdgpu_device *tmp_adev = NULL;
5318 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5322 r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
5339 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5341 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
5343 &tmp_adev->xgmi_reset_work))
5346 r = amdgpu_asic_reset(tmp_adev);
5349 dev_err(tmp_adev->dev,
5351 r, adev_to_drm(tmp_adev)->unique);
5358 list_for_each_entry(tmp_adev, device_list_handle,
5360 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
5361 flush_work(&tmp_adev->xgmi_reset_work);
5362 r = tmp_adev->asic_reset_res;
5371 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5372 amdgpu_ras_reset_error_count(tmp_adev,
5497 struct amdgpu_device *tmp_adev;
5500 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5501 ret |= amdgpu_device_bus_status_check(tmp_adev);
5511 struct amdgpu_device *tmp_adev = NULL;
5519 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
5520 list_add_tail(&tmp_adev->reset_list, device_list);
5522 tmp_adev->shutdown = true;
5524 tmp_adev->pcie_reset_ctx.in_link_reset = true;
5536 struct amdgpu_device *tmp_adev = NULL;
5540 tmp_adev =
5542 amdgpu_device_lock_reset_domain(tmp_adev->reset_domain);
5548 struct amdgpu_device *tmp_adev = NULL;
5552 tmp_adev =
5554 amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
5564 struct amdgpu_device *tmp_adev = NULL;
5568 list_for_each_entry(tmp_adev, device_list, reset_list) {
5569 amdgpu_device_set_mp1_state(tmp_adev);
5581 if (!amdgpu_device_suspend_display_audio(tmp_adev))
5582 tmp_adev->pcie_reset_ctx.audio_suspended = true;
5584 amdgpu_ras_set_error_query_ready(tmp_adev, false);
5586 cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
5588 amdgpu_amdkfd_pre_reset(tmp_adev, reset_context);
5594 amdgpu_unregister_gpu_instance(tmp_adev);
5596 drm_client_dev_suspend(adev_to_drm(tmp_adev));
5600 amdgpu_device_ip_need_full_reset(tmp_adev))
5601 amdgpu_ras_suspend(tmp_adev);
5603 amdgpu_userq_pre_reset(tmp_adev);
5606 struct amdgpu_ring *ring = tmp_adev->rings[i];
5616 atomic_inc(&tmp_adev->gpu_reset_counter);
5624 struct amdgpu_device *tmp_adev = NULL;
5629 list_for_each_entry(tmp_adev, device_list, reset_list) {
5630 r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
5633 dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
5634 r, adev_to_drm(tmp_adev)->unique);
5635 tmp_adev->asic_reset_res = r;
5666 list_for_each_entry(tmp_adev, device_list, reset_list) {
5673 amdgpu_device_stop_pending_resets(tmp_adev);
5683 struct amdgpu_device *tmp_adev = NULL;
5687 list_for_each_entry(tmp_adev, device_list, reset_list) {
5690 struct amdgpu_ring *ring = tmp_adev->rings[i];
5698 if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled)
5699 drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
5701 if (tmp_adev->asic_reset_res) {
5707 !amdgpu_ras_eeprom_check_err_threshold(tmp_adev))
5709 tmp_adev->dev,
5712 &tmp_adev->gpu_reset_counter),
5713 tmp_adev->asic_reset_res);
5714 amdgpu_vf_error_put(tmp_adev,
5716 tmp_adev->asic_reset_res);
5718 r = tmp_adev->asic_reset_res;
5719 tmp_adev->asic_reset_res = 0;
5721 dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n",
5722 atomic_read(&tmp_adev->gpu_reset_counter));
5723 if (amdgpu_acpi_smart_shift_update(tmp_adev,
5725 dev_warn(tmp_adev->dev,
5737 struct amdgpu_device *tmp_adev = NULL;
5739 list_for_each_entry(tmp_adev, device_list, reset_list) {
5741 if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
5742 amdgpu_amdkfd_post_reset(tmp_adev);
5750 if (tmp_adev->pcie_reset_ctx.audio_suspended)
5751 amdgpu_device_resume_display_audio(tmp_adev);
5753 amdgpu_device_unset_mp1_state(tmp_adev);
5755 amdgpu_ras_set_error_query_ready(tmp_adev, true);
6350 struct amdgpu_device *tmp_adev;
6365 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
6366 list_add_tail(&tmp_adev->reset_list, &device_list);
6413 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
6414 tmp_adev->pcie_reset_ctx.in_link_reset = true;
6428 list_for_each_entry(tmp_adev, &device_list, reset_list)
6429 amdgpu_device_unset_mp1_state(tmp_adev);
6455 struct amdgpu_device *tmp_adev = NULL;
6468 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
6469 tmp_adev->pcie_reset_ctx.in_link_reset = false;
6470 list_add_tail(&tmp_adev->reset_list, &device_list);