Lines Matching +full:ati +full:- +full:exclude

37 #include <linux/pci-p2pdma.h>
38 #include <linux/apple-gmux.h>
87 #include <asm/intel-family.h>
102 #define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL)
153 #define AMDGPU_IP_BLK_MASK_ALL GENMASK(AMD_IP_BLOCK_TYPE_NUM - 1, 0)
187 return (adev->init_lvl->hwini_ip_block_mask & (1U << block)) != 0; in amdgpu_ip_member_of_hwini()
195 adev->init_lvl = &amdgpu_init_minimal_xgmi; in amdgpu_set_init_level()
198 adev->init_lvl = &amdgpu_init_recovery; in amdgpu_set_init_level()
203 adev->init_lvl = &amdgpu_init_default; in amdgpu_set_init_level()
239 ret = sysfs_create_file(&adev->dev->kobj, in amdgpu_device_attr_sysfs_init()
248 sysfs_remove_file(&adev->dev->kobj, in amdgpu_device_attr_sysfs_fini()
283 return -EINVAL; in amdgpu_sysfs_reg_state_get()
299 ret = sysfs_create_bin_file(&adev->dev->kobj, &bin_attr_reg_state); in amdgpu_reg_state_sysfs_init()
308 sysfs_remove_bin_file(&adev->dev->kobj, &bin_attr_reg_state); in amdgpu_reg_state_sysfs_fini()
315 if (ip_block->version->funcs->suspend) { in amdgpu_ip_block_suspend()
316 r = ip_block->version->funcs->suspend(ip_block); in amdgpu_ip_block_suspend()
318 dev_err(ip_block->adev->dev, in amdgpu_ip_block_suspend()
320 ip_block->version->funcs->name, r); in amdgpu_ip_block_suspend()
325 ip_block->status.hw = false; in amdgpu_ip_block_suspend()
333 if (ip_block->version->funcs->resume) { in amdgpu_ip_block_resume()
334 r = ip_block->version->funcs->resume(ip_block); in amdgpu_ip_block_resume()
336 dev_err(ip_block->adev->dev, in amdgpu_ip_block_resume()
338 ip_block->version->funcs->name, r); in amdgpu_ip_block_resume()
343 ip_block->status.hw = true; in amdgpu_ip_block_resume()
357 * - "cem" - PCIE CEM card
358 * - "oam" - Open Compute Accelerator Module
359 * - "unknown" - Not known
372 if (adev->smuio.funcs && adev->smuio.funcs->get_pkg_type) in amdgpu_device_get_board_info()
373 pkg_type = adev->smuio.funcs->get_pkg_type(adev); in amdgpu_device_get_board_info()
404 if (adev->flags & AMD_IS_APU) in amdgpu_board_attrs_is_visible()
407 return attr->mode; in amdgpu_board_attrs_is_visible()
418 * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
427 if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid()) in amdgpu_device_supports_px()
433 * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
445 if (adev->has_pr3 || in amdgpu_device_supports_boco()
446 ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid())) in amdgpu_device_supports_boco()
452 * amdgpu_device_supports_baco - Does the device support BACO
470 adev->pm.rpm_mode = AMDGPU_RUNPM_NONE; in amdgpu_device_detect_runtime_pm_mode()
476 adev->pm.rpm_mode = AMDGPU_RUNPM_BAMACO; in amdgpu_device_detect_runtime_pm_mode()
477 dev_info(adev->dev, "Forcing BAMACO for runtime pm\n"); in amdgpu_device_detect_runtime_pm_mode()
479 adev->pm.rpm_mode = AMDGPU_RUNPM_BACO; in amdgpu_device_detect_runtime_pm_mode()
480 dev_info(adev->dev, "Requested mode BAMACO not available,fallback to use BACO\n"); in amdgpu_device_detect_runtime_pm_mode()
485 adev->pm.rpm_mode = AMDGPU_RUNPM_BACO; in amdgpu_device_detect_runtime_pm_mode()
486 dev_info(adev->dev, "Forcing BACO for runtime pm\n"); in amdgpu_device_detect_runtime_pm_mode()
489 case -1: in amdgpu_device_detect_runtime_pm_mode()
490 case -2: in amdgpu_device_detect_runtime_pm_mode()
493 adev->pm.rpm_mode = AMDGPU_RUNPM_PX; in amdgpu_device_detect_runtime_pm_mode()
494 dev_info(adev->dev, "Using ATPX for runtime pm\n"); in amdgpu_device_detect_runtime_pm_mode()
497 adev->pm.rpm_mode = AMDGPU_RUNPM_BOCO; in amdgpu_device_detect_runtime_pm_mode()
498 dev_info(adev->dev, "Using BOCO for runtime pm\n"); in amdgpu_device_detect_runtime_pm_mode()
503 switch (adev->asic_type) { in amdgpu_device_detect_runtime_pm_mode()
510 if (!adev->gmc.noretry && !amdgpu_passthrough(adev)) in amdgpu_device_detect_runtime_pm_mode()
511 adev->pm.rpm_mode = AMDGPU_RUNPM_BACO; in amdgpu_device_detect_runtime_pm_mode()
516 adev->pm.rpm_mode = AMDGPU_RUNPM_BACO; in amdgpu_device_detect_runtime_pm_mode()
520 if (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) { in amdgpu_device_detect_runtime_pm_mode()
522 adev->pm.rpm_mode = AMDGPU_RUNPM_BAMACO; in amdgpu_device_detect_runtime_pm_mode()
523 dev_info(adev->dev, "Using BAMACO for runtime pm\n"); in amdgpu_device_detect_runtime_pm_mode()
525 dev_info(adev->dev, "Using BACO for runtime pm\n"); in amdgpu_device_detect_runtime_pm_mode()
531 dev_info(adev->dev, "runtime pm is manually disabled\n"); in amdgpu_device_detect_runtime_pm_mode()
538 if (adev->pm.rpm_mode == AMDGPU_RUNPM_NONE) in amdgpu_device_detect_runtime_pm_mode()
539 dev_info(adev->dev, "Runtime PM not available\n"); in amdgpu_device_detect_runtime_pm_mode()
542 * amdgpu_device_supports_smart_shift - Is the device dGPU with
561 * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA
567 * @write: true - write to vram, otherwise - read from vram
583 spin_lock_irqsave(&adev->mmio_idx_lock, flags); in amdgpu_device_mm_access()
598 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); in amdgpu_device_mm_access()
603 * amdgpu_device_aper_access - access vram by vram aperture
609 * @write: true - write to vram, otherwise - read from vram
621 if (!adev->mman.aper_base_kaddr) in amdgpu_device_aper_access()
624 last = min(pos + size, adev->gmc.visible_vram_size); in amdgpu_device_aper_access()
626 addr = adev->mman.aper_base_kaddr + pos; in amdgpu_device_aper_access()
627 count = last - pos; in amdgpu_device_aper_access()
654 * amdgpu_device_vram_access - read/write a buffer in vram
660 * @write: true - write to vram, otherwise - read from vram
669 size -= count; in amdgpu_device_vram_access()
685 if (adev->no_hw_access) in amdgpu_device_skip_hw_access()
701 if (down_read_trylock(&adev->reset_domain->sem)) in amdgpu_device_skip_hw_access()
702 up_read(&adev->reset_domain->sem); in amdgpu_device_skip_hw_access()
704 lockdep_assert_held(&adev->reset_domain->sem); in amdgpu_device_skip_hw_access()
711 * amdgpu_device_rreg - read a memory mapped IO or indirect register
727 if ((reg * 4) < adev->rmmio_size) { in amdgpu_device_rreg()
730 down_read_trylock(&adev->reset_domain->sem)) { in amdgpu_device_rreg()
732 up_read(&adev->reset_domain->sem); in amdgpu_device_rreg()
734 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4)); in amdgpu_device_rreg()
737 ret = adev->pcie_rreg(adev, reg * 4); in amdgpu_device_rreg()
740 trace_amdgpu_device_rreg(adev->pdev->device, reg, ret); in amdgpu_device_rreg()
751 * amdgpu_mm_rreg8 - read a memory mapped IO register
763 if (offset < adev->rmmio_size) in amdgpu_mm_rreg8()
764 return (readb(adev->rmmio + offset)); in amdgpu_mm_rreg8()
770 * amdgpu_device_xcc_rreg - read a memory mapped IO or indirect register with specific XCC
788 if ((reg * 4) < adev->rmmio_size) { in amdgpu_device_xcc_rreg()
791 adev->gfx.rlc.rlcg_reg_access_supported && in amdgpu_device_xcc_rreg()
798 down_read_trylock(&adev->reset_domain->sem)) { in amdgpu_device_xcc_rreg()
800 up_read(&adev->reset_domain->sem); in amdgpu_device_xcc_rreg()
802 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4)); in amdgpu_device_xcc_rreg()
805 ret = adev->pcie_rreg(adev, reg * 4); in amdgpu_device_xcc_rreg()
818 * amdgpu_mm_wreg8 - read a memory mapped IO register
831 if (offset < adev->rmmio_size) in amdgpu_mm_wreg8()
832 writeb(value, adev->rmmio + offset); in amdgpu_mm_wreg8()
838 * amdgpu_device_wreg - write to a memory mapped IO or indirect register
854 if ((reg * 4) < adev->rmmio_size) { in amdgpu_device_wreg()
857 down_read_trylock(&adev->reset_domain->sem)) { in amdgpu_device_wreg()
859 up_read(&adev->reset_domain->sem); in amdgpu_device_wreg()
861 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); in amdgpu_device_wreg()
864 adev->pcie_wreg(adev, reg * 4, v); in amdgpu_device_wreg()
867 trace_amdgpu_device_wreg(adev->pdev->device, reg, v); in amdgpu_device_wreg()
871 …* amdgpu_mm_wreg_mmio_rlc - write register either with direct/indirect mmio or with RLC path if i…
888 adev->gfx.rlc.funcs && in amdgpu_mm_wreg_mmio_rlc()
889 adev->gfx.rlc.funcs->is_rlcg_access_range) { in amdgpu_mm_wreg_mmio_rlc()
890 if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg)) in amdgpu_mm_wreg_mmio_rlc()
892 } else if ((reg * 4) >= adev->rmmio_size) { in amdgpu_mm_wreg_mmio_rlc()
893 adev->pcie_wreg(adev, reg * 4, v); in amdgpu_mm_wreg_mmio_rlc()
895 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); in amdgpu_mm_wreg_mmio_rlc()
900 * amdgpu_device_xcc_wreg - write to a memory mapped IO or indirect register with specific XCC
919 if ((reg * 4) < adev->rmmio_size) { in amdgpu_device_xcc_wreg()
922 adev->gfx.rlc.rlcg_reg_access_supported && in amdgpu_device_xcc_wreg()
929 down_read_trylock(&adev->reset_domain->sem)) { in amdgpu_device_xcc_wreg()
931 up_read(&adev->reset_domain->sem); in amdgpu_device_xcc_wreg()
933 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); in amdgpu_device_xcc_wreg()
936 adev->pcie_wreg(adev, reg * 4, v); in amdgpu_device_xcc_wreg()
941 * amdgpu_device_indirect_rreg - read an indirect register
956 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev); in amdgpu_device_indirect_rreg()
957 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev); in amdgpu_device_indirect_rreg()
959 spin_lock_irqsave(&adev->pcie_idx_lock, flags); in amdgpu_device_indirect_rreg()
960 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; in amdgpu_device_indirect_rreg()
961 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; in amdgpu_device_indirect_rreg()
966 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); in amdgpu_device_indirect_rreg()
980 if (unlikely(!adev->nbio.funcs)) { in amdgpu_device_indirect_rreg_ext()
984 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev); in amdgpu_device_indirect_rreg_ext()
985 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev); in amdgpu_device_indirect_rreg_ext()
989 if (unlikely(!adev->nbio.funcs)) in amdgpu_device_indirect_rreg_ext()
992 pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev); in amdgpu_device_indirect_rreg_ext()
997 spin_lock_irqsave(&adev->pcie_idx_lock, flags); in amdgpu_device_indirect_rreg_ext()
998 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; in amdgpu_device_indirect_rreg_ext()
999 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; in amdgpu_device_indirect_rreg_ext()
1001 pcie_index_hi_offset = (void __iomem *)adev->rmmio + in amdgpu_device_indirect_rreg_ext()
1018 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); in amdgpu_device_indirect_rreg_ext()
1024 * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
1039 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev); in amdgpu_device_indirect_rreg64()
1040 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev); in amdgpu_device_indirect_rreg64()
1042 spin_lock_irqsave(&adev->pcie_idx_lock, flags); in amdgpu_device_indirect_rreg64()
1043 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; in amdgpu_device_indirect_rreg64()
1044 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; in amdgpu_device_indirect_rreg64()
1054 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); in amdgpu_device_indirect_rreg64()
1069 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev); in amdgpu_device_indirect_rreg64_ext()
1070 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev); in amdgpu_device_indirect_rreg64_ext()
1071 if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset)) in amdgpu_device_indirect_rreg64_ext()
1072 pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev); in amdgpu_device_indirect_rreg64_ext()
1074 spin_lock_irqsave(&adev->pcie_idx_lock, flags); in amdgpu_device_indirect_rreg64_ext()
1075 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; in amdgpu_device_indirect_rreg64_ext()
1076 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; in amdgpu_device_indirect_rreg64_ext()
1078 pcie_index_hi_offset = (void __iomem *)adev->rmmio + in amdgpu_device_indirect_rreg64_ext()
1104 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); in amdgpu_device_indirect_rreg64_ext()
1110 * amdgpu_device_indirect_wreg - write an indirect register address
1124 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev); in amdgpu_device_indirect_wreg()
1125 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev); in amdgpu_device_indirect_wreg()
1127 spin_lock_irqsave(&adev->pcie_idx_lock, flags); in amdgpu_device_indirect_wreg()
1128 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; in amdgpu_device_indirect_wreg()
1129 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; in amdgpu_device_indirect_wreg()
1135 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); in amdgpu_device_indirect_wreg()
1146 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev); in amdgpu_device_indirect_wreg_ext()
1147 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev); in amdgpu_device_indirect_wreg_ext()
1148 if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset)) in amdgpu_device_indirect_wreg_ext()
1149 pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev); in amdgpu_device_indirect_wreg_ext()
1153 spin_lock_irqsave(&adev->pcie_idx_lock, flags); in amdgpu_device_indirect_wreg_ext()
1154 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; in amdgpu_device_indirect_wreg_ext()
1155 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; in amdgpu_device_indirect_wreg_ext()
1157 pcie_index_hi_offset = (void __iomem *)adev->rmmio + in amdgpu_device_indirect_wreg_ext()
1175 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); in amdgpu_device_indirect_wreg_ext()
1179 * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
1193 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev); in amdgpu_device_indirect_wreg64()
1194 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev); in amdgpu_device_indirect_wreg64()
1196 spin_lock_irqsave(&adev->pcie_idx_lock, flags); in amdgpu_device_indirect_wreg64()
1197 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; in amdgpu_device_indirect_wreg64()
1198 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; in amdgpu_device_indirect_wreg64()
1210 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); in amdgpu_device_indirect_wreg64()
1222 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev); in amdgpu_device_indirect_wreg64_ext()
1223 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev); in amdgpu_device_indirect_wreg64_ext()
1224 if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset)) in amdgpu_device_indirect_wreg64_ext()
1225 pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev); in amdgpu_device_indirect_wreg64_ext()
1227 spin_lock_irqsave(&adev->pcie_idx_lock, flags); in amdgpu_device_indirect_wreg64_ext()
1228 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; in amdgpu_device_indirect_wreg64_ext()
1229 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; in amdgpu_device_indirect_wreg64_ext()
1231 pcie_index_hi_offset = (void __iomem *)adev->rmmio + in amdgpu_device_indirect_wreg64_ext()
1259 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); in amdgpu_device_indirect_wreg64_ext()
1263 * amdgpu_device_get_rev_id - query device rev_id
1271 return adev->nbio.funcs->get_rev_id(adev); in amdgpu_device_get_rev_id()
1275 * amdgpu_invalid_rreg - dummy reg read function
1286 dev_err(adev->dev, "Invalid callback to read register 0x%04X\n", reg); in amdgpu_invalid_rreg()
1293 dev_err(adev->dev, "Invalid callback to read register 0x%llX\n", reg); in amdgpu_invalid_rreg_ext()
1299 * amdgpu_invalid_wreg - dummy reg write function
1310 dev_err(adev->dev, in amdgpu_invalid_wreg()
1318 dev_err(adev->dev, in amdgpu_invalid_wreg_ext()
1325 * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
1336 dev_err(adev->dev, "Invalid callback to read 64 bit register 0x%04X\n", in amdgpu_invalid_rreg64()
1344 dev_err(adev->dev, "Invalid callback to read register 0x%llX\n", reg); in amdgpu_invalid_rreg64_ext()
1350 * amdgpu_invalid_wreg64 - dummy reg write function
1361 dev_err(adev->dev, in amdgpu_invalid_wreg64()
1369 dev_err(adev->dev, in amdgpu_invalid_wreg64_ext()
1376 * amdgpu_block_invalid_rreg - dummy reg read function
1389 dev_err(adev->dev, in amdgpu_block_invalid_rreg()
1397 * amdgpu_block_invalid_wreg - dummy reg write function
1411 dev_err(adev->dev, in amdgpu_block_invalid_wreg()
1419 if (hweight32(adev->aid_mask) && (adev->flags & AMD_IS_APU)) in amdgpu_device_get_vbios_flags()
1422 if (hweight32(adev->aid_mask) && amdgpu_passthrough(adev)) in amdgpu_device_get_vbios_flags()
1429 * amdgpu_device_asic_init - Wrapper for atom asic_init
1450 if (optional && !adev->bios) in amdgpu_device_asic_init()
1456 if (optional && !adev->bios) in amdgpu_device_asic_init()
1459 return amdgpu_atom_asic_init(adev->mode_info.atom_context); in amdgpu_device_asic_init()
1466 * amdgpu_device_mem_scratch_init - allocate the VRAM scratch page
1478 &adev->mem_scratch.robj, in amdgpu_device_mem_scratch_init()
1479 &adev->mem_scratch.gpu_addr, in amdgpu_device_mem_scratch_init()
1480 (void **)&adev->mem_scratch.ptr); in amdgpu_device_mem_scratch_init()
1484 * amdgpu_device_mem_scratch_fini - Free the VRAM scratch page
1492 amdgpu_bo_free_kernel(&adev->mem_scratch.robj, NULL, NULL); in amdgpu_device_mem_scratch_fini()
1496 * amdgpu_device_program_register_sequence - program an array of registers.
1525 if (adev->family >= AMDGPU_FAMILY_AI) in amdgpu_device_program_register_sequence()
1535 * amdgpu_device_pci_config_reset - reset the GPU
1544 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA); in amdgpu_device_pci_config_reset()
1548 * amdgpu_device_pci_reset - reset the GPU using generic PCI means
1556 return pci_reset_function(adev->pdev); in amdgpu_device_pci_reset()
1566 * amdgpu_device_wb_fini - Disable Writeback and free memory
1575 if (adev->wb.wb_obj) { in amdgpu_device_wb_fini()
1576 amdgpu_bo_free_kernel(&adev->wb.wb_obj, in amdgpu_device_wb_fini()
1577 &adev->wb.gpu_addr, in amdgpu_device_wb_fini()
1578 (void **)&adev->wb.wb); in amdgpu_device_wb_fini()
1579 adev->wb.wb_obj = NULL; in amdgpu_device_wb_fini()
1584 * amdgpu_device_wb_init - Init Writeback driver info and allocate memory
1590 * Returns 0 on success or an -error on failure.
1596 if (adev->wb.wb_obj == NULL) { in amdgpu_device_wb_init()
1600 &adev->wb.wb_obj, &adev->wb.gpu_addr, in amdgpu_device_wb_init()
1601 (void **)&adev->wb.wb); in amdgpu_device_wb_init()
1603 dev_warn(adev->dev, "(%d) create WB bo failed\n", r); in amdgpu_device_wb_init()
1607 adev->wb.num_wb = AMDGPU_MAX_WB; in amdgpu_device_wb_init()
1608 memset(&adev->wb.used, 0, sizeof(adev->wb.used)); in amdgpu_device_wb_init()
1611 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8); in amdgpu_device_wb_init()
1618 * amdgpu_device_wb_get - Allocate a wb entry
1624 * Returns 0 on success or -EINVAL on failure.
1630 spin_lock_irqsave(&adev->wb.lock, flags); in amdgpu_device_wb_get()
1631 offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb); in amdgpu_device_wb_get()
1632 if (offset < adev->wb.num_wb) { in amdgpu_device_wb_get()
1633 __set_bit(offset, adev->wb.used); in amdgpu_device_wb_get()
1634 spin_unlock_irqrestore(&adev->wb.lock, flags); in amdgpu_device_wb_get()
1638 spin_unlock_irqrestore(&adev->wb.lock, flags); in amdgpu_device_wb_get()
1639 return -EINVAL; in amdgpu_device_wb_get()
1644 * amdgpu_device_wb_free - Free a wb entry
1656 spin_lock_irqsave(&adev->wb.lock, flags); in amdgpu_device_wb_free()
1657 if (wb < adev->wb.num_wb) in amdgpu_device_wb_free()
1658 __clear_bit(wb, adev->wb.used); in amdgpu_device_wb_free()
1659 spin_unlock_irqrestore(&adev->wb.lock, flags); in amdgpu_device_wb_free()
1663 * amdgpu_device_resize_fb_bar - try to resize FB BAR
1669 * driver loading by returning -ENODEV.
1673 int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size); in amdgpu_device_resize_fb_bar()
1692 adev->pdev->vendor == PCI_VENDOR_ID_ATI && in amdgpu_device_resize_fb_bar()
1693 adev->pdev->device == 0x731f && in amdgpu_device_resize_fb_bar()
1694 adev->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL) in amdgpu_device_resize_fb_bar()
1698 if (!pci_find_ext_capability(adev->pdev, PCI_EXT_CAP_ID_VNDR)) in amdgpu_device_resize_fb_bar()
1700 adev->dev, in amdgpu_device_resize_fb_bar()
1704 if (adev->gmc.real_vram_size && in amdgpu_device_resize_fb_bar()
1705 (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size)) in amdgpu_device_resize_fb_bar()
1709 root = adev->pdev->bus; in amdgpu_device_resize_fb_bar()
1710 while (root->parent) in amdgpu_device_resize_fb_bar()
1711 root = root->parent; in amdgpu_device_resize_fb_bar()
1714 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) && in amdgpu_device_resize_fb_bar()
1715 res->start > 0x100000000ull) in amdgpu_device_resize_fb_bar()
1724 rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1, in amdgpu_device_resize_fb_bar()
1728 pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd); in amdgpu_device_resize_fb_bar()
1729 pci_write_config_word(adev->pdev, PCI_COMMAND, in amdgpu_device_resize_fb_bar()
1734 if (adev->asic_type >= CHIP_BONAIRE) in amdgpu_device_resize_fb_bar()
1735 pci_release_resource(adev->pdev, 2); in amdgpu_device_resize_fb_bar()
1737 pci_release_resource(adev->pdev, 0); in amdgpu_device_resize_fb_bar()
1739 r = pci_resize_resource(adev->pdev, 0, rbar_size); in amdgpu_device_resize_fb_bar()
1740 if (r == -ENOSPC) in amdgpu_device_resize_fb_bar()
1741 dev_info(adev->dev, in amdgpu_device_resize_fb_bar()
1743 else if (r && r != -ENOTSUPP) in amdgpu_device_resize_fb_bar()
1744 dev_err(adev->dev, "Problem resizing BAR0 (%d).", r); in amdgpu_device_resize_fb_bar()
1746 pci_assign_unassigned_bus_resources(adev->pdev->bus); in amdgpu_device_resize_fb_bar()
1752 if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET)) in amdgpu_device_resize_fb_bar()
1753 return -ENODEV; in amdgpu_device_resize_fb_bar()
1755 pci_write_config_word(adev->pdev, PCI_COMMAND, cmd); in amdgpu_device_resize_fb_bar()
1764 * amdgpu_device_need_post - check if the hw need post or not
1782 if ((flags & AMDGPU_VBIOS_OPTIONAL) && !adev->bios) in amdgpu_device_need_post()
1786 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot in amdgpu_device_need_post()
1791 if (adev->asic_type == CHIP_FIJI) { in amdgpu_device_need_post()
1795 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev); in amdgpu_device_need_post()
1800 fw_ver = *((uint32_t *)adev->pm.fw->data + 69); in amdgpu_device_need_post()
1801 release_firmware(adev->pm.fw); in amdgpu_device_need_post()
1808 if (adev->init_lvl->level == AMDGPU_INIT_LEVEL_MINIMAL_XGMI) in amdgpu_device_need_post()
1811 if (adev->has_hw_reset) { in amdgpu_device_need_post()
1812 adev->has_hw_reset = false; in amdgpu_device_need_post()
1817 if (adev->asic_type >= CHIP_BONAIRE) in amdgpu_device_need_post()
1839 case -1: in amdgpu_device_seamless_boot_supported()
1846 dev_err(adev->dev, "Invalid value for amdgpu.seamless: %d\n", in amdgpu_device_seamless_boot_supported()
1851 if (!(adev->flags & AMD_IS_APU)) in amdgpu_device_seamless_boot_supported()
1854 if (adev->mman.keep_stolen_vga_memory) in amdgpu_device_seamless_boot_supported()
1865 …gn/products/platforms/details/raptor-lake-s/13th-generation-core-processors-datasheet-volume-1-of-
1866 * https://gitlab.freedesktop.org/drm/amd/-/issues/2663
1874 if (dev_is_removable(adev->dev)) in amdgpu_device_pcie_dynamic_switching_supported()
1877 if (c->x86_vendor == X86_VENDOR_INTEL) in amdgpu_device_pcie_dynamic_switching_supported()
1886 * It's unclear if this is a platform-specific or GPU-specific issue. in amdgpu_device_aspm_support_quirk()
1889 if (adev->family == AMDGPU_FAMILY_SI) in amdgpu_device_aspm_support_quirk()
1899 if (c->x86 == 6 && in amdgpu_device_aspm_support_quirk()
1900 adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5) { in amdgpu_device_aspm_support_quirk()
1901 switch (c->x86_model) { in amdgpu_device_aspm_support_quirk()
1920 * amdgpu_device_should_use_aspm - check if the device should program ASPM
1932 case -1: in amdgpu_device_should_use_aspm()
1941 if (adev->flags & AMD_IS_APU) in amdgpu_device_should_use_aspm()
1945 return pcie_aspm_enabled(adev->pdev); in amdgpu_device_should_use_aspm()
1950 * amdgpu_device_vga_set_decode - enable/disable vga decode
1972 * amdgpu_device_check_block_size - validate the vm block size
1987 if (amdgpu_vm_block_size == -1) in amdgpu_device_check_block_size()
1991 dev_warn(adev->dev, "VM page table size (%d) too small\n", in amdgpu_device_check_block_size()
1993 amdgpu_vm_block_size = -1; in amdgpu_device_check_block_size()
1998 * amdgpu_device_check_vm_size - validate the vm size
2008 if (amdgpu_vm_size == -1) in amdgpu_device_check_vm_size()
2012 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n", in amdgpu_device_check_vm_size()
2014 amdgpu_vm_size = -1; in amdgpu_device_check_vm_size()
2030 dev_warn(adev->dev, "Not 64-bit OS, feature not supported\n"); in amdgpu_device_check_smu_prv_buffer_size()
2045 dev_warn(adev->dev, "Smu memory pool size not supported\n"); in amdgpu_device_check_smu_prv_buffer_size()
2048 adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28; in amdgpu_device_check_smu_prv_buffer_size()
2053 dev_warn(adev->dev, "No enough system memory\n"); in amdgpu_device_check_smu_prv_buffer_size()
2055 adev->pm.smu_prv_buffer_size = 0; in amdgpu_device_check_smu_prv_buffer_size()
2060 if (!(adev->flags & AMD_IS_APU) || in amdgpu_device_init_apu_flags()
2061 adev->asic_type < CHIP_RAVEN) in amdgpu_device_init_apu_flags()
2064 switch (adev->asic_type) { in amdgpu_device_init_apu_flags()
2066 if (adev->pdev->device == 0x15dd) in amdgpu_device_init_apu_flags()
2067 adev->apu_flags |= AMD_APU_IS_RAVEN; in amdgpu_device_init_apu_flags()
2068 if (adev->pdev->device == 0x15d8) in amdgpu_device_init_apu_flags()
2069 adev->apu_flags |= AMD_APU_IS_PICASSO; in amdgpu_device_init_apu_flags()
2072 if ((adev->pdev->device == 0x1636) || in amdgpu_device_init_apu_flags()
2073 (adev->pdev->device == 0x164c)) in amdgpu_device_init_apu_flags()
2074 adev->apu_flags |= AMD_APU_IS_RENOIR; in amdgpu_device_init_apu_flags()
2076 adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE; in amdgpu_device_init_apu_flags()
2079 adev->apu_flags |= AMD_APU_IS_VANGOGH; in amdgpu_device_init_apu_flags()
2084 if ((adev->pdev->device == 0x13FE) || in amdgpu_device_init_apu_flags()
2085 (adev->pdev->device == 0x143F)) in amdgpu_device_init_apu_flags()
2086 adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2; in amdgpu_device_init_apu_flags()
2096 * amdgpu_device_check_arguments - validate module params
2108 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n", in amdgpu_device_check_arguments()
2112 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n", in amdgpu_device_check_arguments()
2117 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) { in amdgpu_device_check_arguments()
2119 dev_warn(adev->dev, "gart size (%d) too small\n", in amdgpu_device_check_arguments()
2121 amdgpu_gart_size = -1; in amdgpu_device_check_arguments()
2124 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) { in amdgpu_device_check_arguments()
2126 dev_warn(adev->dev, "gtt size (%d) too small\n", in amdgpu_device_check_arguments()
2128 amdgpu_gtt_size = -1; in amdgpu_device_check_arguments()
2132 if (amdgpu_vm_fragment_size != -1 && in amdgpu_device_check_arguments()
2134 dev_warn(adev->dev, "valid range is between 4 and 9\n"); in amdgpu_device_check_arguments()
2135 amdgpu_vm_fragment_size = -1; in amdgpu_device_check_arguments()
2139 dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n", in amdgpu_device_check_arguments()
2143 dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n", in amdgpu_device_check_arguments()
2148 if (amdgpu_reset_method < -1 || amdgpu_reset_method > 4) { in amdgpu_device_check_arguments()
2149 dev_warn(adev->dev, "invalid option for reset method, reverting to default\n"); in amdgpu_device_check_arguments()
2150 amdgpu_reset_method = -1; in amdgpu_device_check_arguments()
2159 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type); in amdgpu_device_check_arguments()
2163 case -1: in amdgpu_device_check_arguments()
2167 adev->enforce_isolation[i] = AMDGPU_ENFORCE_ISOLATION_DISABLE; in amdgpu_device_check_arguments()
2171 adev->enforce_isolation[i] = in amdgpu_device_check_arguments()
2176 adev->enforce_isolation[i] = in amdgpu_device_check_arguments()
2181 adev->enforce_isolation[i] = in amdgpu_device_check_arguments()
2191 * amdgpu_switcheroo_set_state - set switcheroo state
2212 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; in amdgpu_switcheroo_set_state()
2218 dev_warn(&pdev->dev, "pci_enable_device failed (%d)\n", in amdgpu_switcheroo_set_state()
2222 dev->switch_power_state = DRM_SWITCH_POWER_ON; in amdgpu_switcheroo_set_state()
2224 dev_info(&pdev->dev, "switched off\n"); in amdgpu_switcheroo_set_state()
2225 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; in amdgpu_switcheroo_set_state()
2232 dev->switch_power_state = DRM_SWITCH_POWER_OFF; in amdgpu_switcheroo_set_state()
2237 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
2254 return atomic_read(&dev->open_count) == 0; in amdgpu_switcheroo_can_switch()
2264 * amdgpu_device_ip_set_clockgating_state - set the CG state
2281 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_set_clockgating_state()
2282 if (!adev->ip_blocks[i].status.valid) in amdgpu_device_ip_set_clockgating_state()
2284 if (adev->ip_blocks[i].version->type != block_type) in amdgpu_device_ip_set_clockgating_state()
2286 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state) in amdgpu_device_ip_set_clockgating_state()
2288 r = adev->ip_blocks[i].version->funcs->set_clockgating_state( in amdgpu_device_ip_set_clockgating_state()
2289 &adev->ip_blocks[i], state); in amdgpu_device_ip_set_clockgating_state()
2291 dev_err(adev->dev, in amdgpu_device_ip_set_clockgating_state()
2293 adev->ip_blocks[i].version->funcs->name, r); in amdgpu_device_ip_set_clockgating_state()
2299 * amdgpu_device_ip_set_powergating_state - set the PG state
2316 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_set_powergating_state()
2317 if (!adev->ip_blocks[i].status.valid) in amdgpu_device_ip_set_powergating_state()
2319 if (adev->ip_blocks[i].version->type != block_type) in amdgpu_device_ip_set_powergating_state()
2321 if (!adev->ip_blocks[i].version->funcs->set_powergating_state) in amdgpu_device_ip_set_powergating_state()
2323 r = adev->ip_blocks[i].version->funcs->set_powergating_state( in amdgpu_device_ip_set_powergating_state()
2324 &adev->ip_blocks[i], state); in amdgpu_device_ip_set_powergating_state()
2326 dev_err(adev->dev, in amdgpu_device_ip_set_powergating_state()
2328 adev->ip_blocks[i].version->funcs->name, r); in amdgpu_device_ip_set_powergating_state()
2334 * amdgpu_device_ip_get_clockgating_state - get the CG state
2349 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_get_clockgating_state()
2350 if (!adev->ip_blocks[i].status.valid) in amdgpu_device_ip_get_clockgating_state()
2352 if (adev->ip_blocks[i].version->funcs->get_clockgating_state) in amdgpu_device_ip_get_clockgating_state()
2353 adev->ip_blocks[i].version->funcs->get_clockgating_state( in amdgpu_device_ip_get_clockgating_state()
2354 &adev->ip_blocks[i], flags); in amdgpu_device_ip_get_clockgating_state()
2359 * amdgpu_device_ip_wait_for_idle - wait for idle
2372 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_wait_for_idle()
2373 if (!adev->ip_blocks[i].status.valid) in amdgpu_device_ip_wait_for_idle()
2375 if (adev->ip_blocks[i].version->type == block_type) { in amdgpu_device_ip_wait_for_idle()
2376 if (adev->ip_blocks[i].version->funcs->wait_for_idle) { in amdgpu_device_ip_wait_for_idle()
2377 r = adev->ip_blocks[i].version->funcs->wait_for_idle( in amdgpu_device_ip_wait_for_idle()
2378 &adev->ip_blocks[i]); in amdgpu_device_ip_wait_for_idle()
2390 * amdgpu_device_ip_is_valid - is the hardware IP enabled
2403 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_is_valid()
2404 if (adev->ip_blocks[i].version->type == block_type) in amdgpu_device_ip_is_valid()
2405 return adev->ip_blocks[i].status.valid; in amdgpu_device_ip_is_valid()
2412 * amdgpu_device_ip_get_ip_block - get a hw IP pointer
2426 for (i = 0; i < adev->num_ip_blocks; i++) in amdgpu_device_ip_get_ip_block()
2427 if (adev->ip_blocks[i].version->type == type) in amdgpu_device_ip_get_ip_block()
2428 return &adev->ip_blocks[i]; in amdgpu_device_ip_get_ip_block()
2450 if (ip_block && ((ip_block->version->major > major) || in amdgpu_device_ip_block_version_cmp()
2451 ((ip_block->version->major == major) && in amdgpu_device_ip_block_version_cmp()
2452 (ip_block->version->minor >= minor)))) in amdgpu_device_ip_block_version_cmp()
2498 return -EINVAL; in amdgpu_device_ip_block_add()
2500 switch (ip_block_version->type) { in amdgpu_device_ip_block_add()
2502 if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK) in amdgpu_device_ip_block_add()
2506 if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK) in amdgpu_device_ip_block_add()
2513 dev_info(adev->dev, "detected ip block number %d <%s_v%d_%d_%d> (%s)\n", in amdgpu_device_ip_block_add()
2514 adev->num_ip_blocks, in amdgpu_device_ip_block_add()
2515 ip_block_name(adev, ip_block_version->type), in amdgpu_device_ip_block_add()
2516 ip_block_version->major, in amdgpu_device_ip_block_add()
2517 ip_block_version->minor, in amdgpu_device_ip_block_add()
2518 ip_block_version->rev, in amdgpu_device_ip_block_add()
2519 ip_block_version->funcs->name); in amdgpu_device_ip_block_add()
2521 adev->ip_blocks[adev->num_ip_blocks].adev = adev; in amdgpu_device_ip_block_add()
2523 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version; in amdgpu_device_ip_block_add()
2529 * amdgpu_device_enable_virtual_display - enable virtual display feature
2542 adev->enable_virtual_display = false; in amdgpu_device_enable_virtual_display()
2545 const char *pci_address_name = pci_name(adev->pdev); in amdgpu_device_enable_virtual_display()
2555 int res = -1; in amdgpu_device_enable_virtual_display()
2557 adev->enable_virtual_display = true; in amdgpu_device_enable_virtual_display()
2568 adev->mode_info.num_crtc = num_crtc; in amdgpu_device_enable_virtual_display()
2570 adev->mode_info.num_crtc = 1; in amdgpu_device_enable_virtual_display()
2577 adev->dev, in amdgpu_device_enable_virtual_display()
2580 adev->enable_virtual_display, adev->mode_info.num_crtc); in amdgpu_device_enable_virtual_display()
2588 if (amdgpu_sriov_vf(adev) && !adev->enable_virtual_display) { in amdgpu_device_set_sriov_virtual_display()
2589 adev->mode_info.num_crtc = 1; in amdgpu_device_set_sriov_virtual_display()
2590 adev->enable_virtual_display = true; in amdgpu_device_set_sriov_virtual_display()
2591 dev_info(adev->dev, "virtual_display:%d, num_crtc:%d\n", in amdgpu_device_set_sriov_virtual_display()
2592 adev->enable_virtual_display, in amdgpu_device_set_sriov_virtual_display()
2593 adev->mode_info.num_crtc); in amdgpu_device_set_sriov_virtual_display()
2598 * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
2605 * Returns 0 on success, -EINVAL on failure.
2613 adev->firmware.gpu_info_fw = NULL; in amdgpu_device_parse_gpu_info_fw()
2615 switch (adev->asic_type) { in amdgpu_device_parse_gpu_info_fw()
2625 if (adev->apu_flags & AMD_APU_IS_RAVEN2) in amdgpu_device_parse_gpu_info_fw()
2627 else if (adev->apu_flags & AMD_APU_IS_PICASSO) in amdgpu_device_parse_gpu_info_fw()
2636 if (adev->mman.discovery_bin) in amdgpu_device_parse_gpu_info_fw()
2645 err = amdgpu_ucode_request(adev, &adev->firmware.gpu_info_fw, in amdgpu_device_parse_gpu_info_fw()
2649 dev_err(adev->dev, in amdgpu_device_parse_gpu_info_fw()
2655 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data; in amdgpu_device_parse_gpu_info_fw()
2656 amdgpu_ucode_print_gpu_info_hdr(&hdr->header); in amdgpu_device_parse_gpu_info_fw()
2658 switch (hdr->version_major) { in amdgpu_device_parse_gpu_info_fw()
2662 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data + in amdgpu_device_parse_gpu_info_fw()
2663 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); in amdgpu_device_parse_gpu_info_fw()
2668 if (adev->asic_type == CHIP_NAVI12) in amdgpu_device_parse_gpu_info_fw()
2671 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se); in amdgpu_device_parse_gpu_info_fw()
2672 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh); in amdgpu_device_parse_gpu_info_fw()
2673 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se); in amdgpu_device_parse_gpu_info_fw()
2674 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se); in amdgpu_device_parse_gpu_info_fw()
2675 adev->gfx.config.max_texture_channel_caches = in amdgpu_device_parse_gpu_info_fw()
2676 le32_to_cpu(gpu_info_fw->gc_num_tccs); in amdgpu_device_parse_gpu_info_fw()
2677 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs); in amdgpu_device_parse_gpu_info_fw()
2678 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds); in amdgpu_device_parse_gpu_info_fw()
2679 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth); in amdgpu_device_parse_gpu_info_fw()
2680 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth); in amdgpu_device_parse_gpu_info_fw()
2681 adev->gfx.config.double_offchip_lds_buf = in amdgpu_device_parse_gpu_info_fw()
2682 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer); in amdgpu_device_parse_gpu_info_fw()
2683 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size); in amdgpu_device_parse_gpu_info_fw()
2684 adev->gfx.cu_info.max_waves_per_simd = in amdgpu_device_parse_gpu_info_fw()
2685 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd); in amdgpu_device_parse_gpu_info_fw()
2686 adev->gfx.cu_info.max_scratch_slots_per_cu = in amdgpu_device_parse_gpu_info_fw()
2687 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu); in amdgpu_device_parse_gpu_info_fw()
2688 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size); in amdgpu_device_parse_gpu_info_fw()
2689 if (hdr->version_minor >= 1) { in amdgpu_device_parse_gpu_info_fw()
2691 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data + in amdgpu_device_parse_gpu_info_fw()
2692 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); in amdgpu_device_parse_gpu_info_fw()
2693 adev->gfx.config.num_sc_per_sh = in amdgpu_device_parse_gpu_info_fw()
2694 le32_to_cpu(gpu_info_fw->num_sc_per_sh); in amdgpu_device_parse_gpu_info_fw()
2695 adev->gfx.config.num_packer_per_sc = in amdgpu_device_parse_gpu_info_fw()
2696 le32_to_cpu(gpu_info_fw->num_packer_per_sc); in amdgpu_device_parse_gpu_info_fw()
2704 if (hdr->version_minor == 2) { in amdgpu_device_parse_gpu_info_fw()
2706 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data + in amdgpu_device_parse_gpu_info_fw()
2707 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); in amdgpu_device_parse_gpu_info_fw()
2708 adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box; in amdgpu_device_parse_gpu_info_fw()
2713 dev_err(adev->dev, in amdgpu_device_parse_gpu_info_fw()
2714 "Unsupported gpu_info table %d\n", hdr->header.ucode_version); in amdgpu_device_parse_gpu_info_fw()
2715 err = -EINVAL; in amdgpu_device_parse_gpu_info_fw()
2725 adev->uid_info = kzalloc(sizeof(struct amdgpu_uid), GFP_KERNEL); in amdgpu_uid_init()
2726 if (!adev->uid_info) { in amdgpu_uid_init()
2727 dev_warn(adev->dev, "Failed to allocate memory for UID\n"); in amdgpu_uid_init()
2730 adev->uid_info->adev = adev; in amdgpu_uid_init()
2736 kfree(adev->uid_info); in amdgpu_uid_fini()
2737 adev->uid_info = NULL; in amdgpu_uid_fini()
2741 * amdgpu_device_ip_early_init - run early init for hardware IPs
2766 switch (adev->asic_type) { in amdgpu_device_ip_early_init()
2773 adev->family = AMDGPU_FAMILY_SI; in amdgpu_device_ip_early_init()
2785 if (adev->flags & AMD_IS_APU) in amdgpu_device_ip_early_init()
2786 adev->family = AMDGPU_FAMILY_KV; in amdgpu_device_ip_early_init()
2788 adev->family = AMDGPU_FAMILY_CI; in amdgpu_device_ip_early_init()
2804 if (adev->flags & AMD_IS_APU) in amdgpu_device_ip_early_init()
2805 adev->family = AMDGPU_FAMILY_CZ; in amdgpu_device_ip_early_init()
2807 adev->family = AMDGPU_FAMILY_VI; in amdgpu_device_ip_early_init()
2823 dev_err(adev->dev, "Unsupported A0 hardware\n"); in amdgpu_device_ip_early_init()
2824 return -ENODEV; /* device unsupported - no device error */ in amdgpu_device_ip_early_init()
2830 ((adev->flags & AMD_IS_APU) == 0) && in amdgpu_device_ip_early_init()
2831 !dev_is_removable(&adev->pdev->dev)) in amdgpu_device_ip_early_init()
2832 adev->flags |= AMD_IS_PX; in amdgpu_device_ip_early_init()
2834 if (!(adev->flags & AMD_IS_APU)) { in amdgpu_device_ip_early_init()
2835 parent = pcie_find_root_port(adev->pdev); in amdgpu_device_ip_early_init()
2836 adev->has_pr3 = parent ? pci_pr3_present(parent) : false; in amdgpu_device_ip_early_init()
2839 adev->pm.pp_feature = amdgpu_pp_feature_mask; in amdgpu_device_ip_early_init()
2841 adev->pm.pp_feature &= ~PP_GFXOFF_MASK; in amdgpu_device_ip_early_init()
2842 if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID) in amdgpu_device_ip_early_init()
2843 adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK; in amdgpu_device_ip_early_init()
2845 adev->pm.pp_feature &= ~PP_PCIE_DPM_MASK; in amdgpu_device_ip_early_init()
2847 adev->virt.is_xgmi_node_migrate_enabled = false; in amdgpu_device_ip_early_init()
2849 adev->virt.is_xgmi_node_migrate_enabled = in amdgpu_device_ip_early_init()
2854 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_early_init()
2855 ip_block = &adev->ip_blocks[i]; in amdgpu_device_ip_early_init()
2858 dev_warn(adev->dev, "disabled ip block: %d <%s>\n", i, in amdgpu_device_ip_early_init()
2859 adev->ip_blocks[i].version->funcs->name); in amdgpu_device_ip_early_init()
2860 adev->ip_blocks[i].status.valid = false; in amdgpu_device_ip_early_init()
2861 } else if (ip_block->version->funcs->early_init) { in amdgpu_device_ip_early_init()
2862 r = ip_block->version->funcs->early_init(ip_block); in amdgpu_device_ip_early_init()
2863 if (r == -ENOENT) { in amdgpu_device_ip_early_init()
2864 adev->ip_blocks[i].status.valid = false; in amdgpu_device_ip_early_init()
2866 dev_err(adev->dev, in amdgpu_device_ip_early_init()
2868 adev->ip_blocks[i].version->funcs->name, in amdgpu_device_ip_early_init()
2872 adev->ip_blocks[i].status.valid = true; in amdgpu_device_ip_early_init()
2875 adev->ip_blocks[i].status.valid = true; in amdgpu_device_ip_early_init()
2878 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) { in amdgpu_device_ip_early_init()
2890 return -EINVAL; in amdgpu_device_ip_early_init()
2892 if (optional && !adev->bios) in amdgpu_device_ip_early_init()
2894 adev->dev, in amdgpu_device_ip_early_init()
2897 if (adev->bios) { in amdgpu_device_ip_early_init()
2900 dev_err(adev->dev, in amdgpu_device_ip_early_init()
2918 return -ENODEV; in amdgpu_device_ip_early_init()
2920 if (adev->gmc.xgmi.supported) in amdgpu_device_ip_early_init()
2926 if (ip_block->status.valid != false) in amdgpu_device_ip_early_init()
2929 adev->cg_flags &= amdgpu_cg_mask; in amdgpu_device_ip_early_init()
2930 adev->pg_flags &= amdgpu_pg_mask; in amdgpu_device_ip_early_init()
2939 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_hw_init_phase1()
2940 if (!adev->ip_blocks[i].status.sw) in amdgpu_device_ip_hw_init_phase1()
2942 if (adev->ip_blocks[i].status.hw) in amdgpu_device_ip_hw_init_phase1()
2945 adev, adev->ip_blocks[i].version->type)) in amdgpu_device_ip_hw_init_phase1()
2947 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || in amdgpu_device_ip_hw_init_phase1()
2948 (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) || in amdgpu_device_ip_hw_init_phase1()
2949 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) { in amdgpu_device_ip_hw_init_phase1()
2950 r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]); in amdgpu_device_ip_hw_init_phase1()
2952 dev_err(adev->dev, in amdgpu_device_ip_hw_init_phase1()
2954 adev->ip_blocks[i].version->funcs->name, in amdgpu_device_ip_hw_init_phase1()
2958 adev->ip_blocks[i].status.hw = true; in amdgpu_device_ip_hw_init_phase1()
2969 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_hw_init_phase2()
2970 if (!adev->ip_blocks[i].status.sw) in amdgpu_device_ip_hw_init_phase2()
2972 if (adev->ip_blocks[i].status.hw) in amdgpu_device_ip_hw_init_phase2()
2975 adev, adev->ip_blocks[i].version->type)) in amdgpu_device_ip_hw_init_phase2()
2977 r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]); in amdgpu_device_ip_hw_init_phase2()
2979 dev_err(adev->dev, in amdgpu_device_ip_hw_init_phase2()
2981 adev->ip_blocks[i].version->funcs->name, r); in amdgpu_device_ip_hw_init_phase2()
2984 adev->ip_blocks[i].status.hw = true; in amdgpu_device_ip_hw_init_phase2()
2996 if (adev->asic_type >= CHIP_VEGA10) { in amdgpu_device_fw_loading()
2997 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_fw_loading()
2998 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP) in amdgpu_device_fw_loading()
3005 if (!adev->ip_blocks[i].status.sw) in amdgpu_device_fw_loading()
3009 if (adev->ip_blocks[i].status.hw == true) in amdgpu_device_fw_loading()
3012 if (amdgpu_in_reset(adev) || adev->in_suspend) { in amdgpu_device_fw_loading()
3013 r = amdgpu_ip_block_resume(&adev->ip_blocks[i]); in amdgpu_device_fw_loading()
3017 r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]); in amdgpu_device_fw_loading()
3019 dev_err(adev->dev, in amdgpu_device_fw_loading()
3021 adev->ip_blocks[i] in amdgpu_device_fw_loading()
3022 .version->funcs->name, in amdgpu_device_fw_loading()
3026 adev->ip_blocks[i].status.hw = true; in amdgpu_device_fw_loading()
3032 if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA) in amdgpu_device_fw_loading()
3043 .timeout_wq = adev->reset_domain->wq, in amdgpu_device_init_schedulers()
3044 .dev = adev->dev, in amdgpu_device_init_schedulers()
3050 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_device_init_schedulers()
3053 if (!ring || ring->no_scheduler) in amdgpu_device_init_schedulers()
3056 switch (ring->funcs->type) { in amdgpu_device_init_schedulers()
3058 timeout = adev->gfx_timeout; in amdgpu_device_init_schedulers()
3061 timeout = adev->compute_timeout; in amdgpu_device_init_schedulers()
3064 timeout = adev->sdma_timeout; in amdgpu_device_init_schedulers()
3067 timeout = adev->video_timeout; in amdgpu_device_init_schedulers()
3072 args.credit_limit = ring->num_hw_submission; in amdgpu_device_init_schedulers()
3073 args.score = ring->sched_score; in amdgpu_device_init_schedulers()
3074 args.name = ring->name; in amdgpu_device_init_schedulers()
3076 r = drm_sched_init(&ring->sched, &args); in amdgpu_device_init_schedulers()
3078 dev_err(adev->dev, in amdgpu_device_init_schedulers()
3080 ring->name); in amdgpu_device_init_schedulers()
3085 dev_err(adev->dev, in amdgpu_device_init_schedulers()
3087 ring->name); in amdgpu_device_init_schedulers()
3092 dev_err(adev->dev, in amdgpu_device_init_schedulers()
3094 ring->name); in amdgpu_device_init_schedulers()
3099 if (adev->xcp_mgr) in amdgpu_device_init_schedulers()
3107 * amdgpu_device_ip_init - run init for hardware IPs
3126 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_init()
3127 if (!adev->ip_blocks[i].status.valid) in amdgpu_device_ip_init()
3129 if (adev->ip_blocks[i].version->funcs->sw_init) { in amdgpu_device_ip_init()
3130 r = adev->ip_blocks[i].version->funcs->sw_init(&adev->ip_blocks[i]); in amdgpu_device_ip_init()
3132 dev_err(adev->dev, in amdgpu_device_ip_init()
3134 adev->ip_blocks[i].version->funcs->name, in amdgpu_device_ip_init()
3139 adev->ip_blocks[i].status.sw = true; in amdgpu_device_ip_init()
3142 adev, adev->ip_blocks[i].version->type)) in amdgpu_device_ip_init()
3145 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) { in amdgpu_device_ip_init()
3147 r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]); in amdgpu_device_ip_init()
3149 dev_err(adev->dev, "hw_init %d failed %d\n", i, in amdgpu_device_ip_init()
3153 adev->ip_blocks[i].status.hw = true; in amdgpu_device_ip_init()
3154 } else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { in amdgpu_device_ip_init()
3162 dev_err(adev->dev, in amdgpu_device_ip_init()
3167 r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]); in amdgpu_device_ip_init()
3169 dev_err(adev->dev, "hw_init %d failed %d\n", i, in amdgpu_device_ip_init()
3175 dev_err(adev->dev, in amdgpu_device_ip_init()
3179 adev->ip_blocks[i].status.hw = true; in amdgpu_device_ip_init()
3182 if (adev->gfx.mcbp) { in amdgpu_device_ip_init()
3183 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj, in amdgpu_device_ip_init()
3188 dev_err(adev->dev, in amdgpu_device_ip_init()
3196 dev_err(adev->dev, "allocate seq64 failed %d\n", in amdgpu_device_ip_init()
3208 dev_err(adev->dev, "IB initialization failed (%d).\n", r); in amdgpu_device_ip_init()
3244 init_badpage = (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI); in amdgpu_device_ip_init()
3252 if (adev->gmc.xgmi.num_physical_nodes > 1) { in amdgpu_device_ip_init()
3258 r = -ENOENT; in amdgpu_device_ip_init()
3262 if (!hive->reset_domain || in amdgpu_device_ip_init()
3263 !amdgpu_reset_get_reset_domain(hive->reset_domain)) { in amdgpu_device_ip_init()
3264 r = -ENOENT; in amdgpu_device_ip_init()
3270 amdgpu_reset_put_reset_domain(adev->reset_domain); in amdgpu_device_ip_init()
3271 adev->reset_domain = hive->reset_domain; in amdgpu_device_ip_init()
3281 if (adev->mman.buffer_funcs_ring->sched.ready) in amdgpu_device_ip_init()
3285 if (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) { in amdgpu_device_ip_init()
3301 * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
3311 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM); in amdgpu_device_fill_reset_magic()
3315 * amdgpu_device_check_vram_lost - check if vram is valid
3326 if (memcmp(adev->gart.ptr, adev->reset_magic, in amdgpu_device_check_vram_lost()
3349 * amdgpu_device_set_cg_state - set clockgating for amdgpu device
3369 for (j = 0; j < adev->num_ip_blocks; j++) { in amdgpu_device_set_cg_state()
3370 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1; in amdgpu_device_set_cg_state()
3371 if (!adev->ip_blocks[i].status.late_initialized) in amdgpu_device_set_cg_state()
3374 if (adev->in_s0ix && in amdgpu_device_set_cg_state()
3375 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX || in amdgpu_device_set_cg_state()
3376 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA)) in amdgpu_device_set_cg_state()
3379 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && in amdgpu_device_set_cg_state()
3380 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE && in amdgpu_device_set_cg_state()
3381 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN && in amdgpu_device_set_cg_state()
3382 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG && in amdgpu_device_set_cg_state()
3383 adev->ip_blocks[i].version->funcs->set_clockgating_state) { in amdgpu_device_set_cg_state()
3385 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(&adev->ip_blocks[i], in amdgpu_device_set_cg_state()
3388 dev_err(adev->dev, in amdgpu_device_set_cg_state()
3390 adev->ip_blocks[i].version->funcs->name, in amdgpu_device_set_cg_state()
3408 for (j = 0; j < adev->num_ip_blocks; j++) { in amdgpu_device_set_pg_state()
3409 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1; in amdgpu_device_set_pg_state()
3410 if (!adev->ip_blocks[i].status.late_initialized) in amdgpu_device_set_pg_state()
3413 if (adev->in_s0ix && in amdgpu_device_set_pg_state()
3414 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX || in amdgpu_device_set_pg_state()
3415 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA)) in amdgpu_device_set_pg_state()
3418 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && in amdgpu_device_set_pg_state()
3419 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE && in amdgpu_device_set_pg_state()
3420 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN && in amdgpu_device_set_pg_state()
3421 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG && in amdgpu_device_set_pg_state()
3422 adev->ip_blocks[i].version->funcs->set_powergating_state) { in amdgpu_device_set_pg_state()
3424 r = adev->ip_blocks[i].version->funcs->set_powergating_state(&adev->ip_blocks[i], in amdgpu_device_set_pg_state()
3427 dev_err(adev->dev, in amdgpu_device_set_pg_state()
3429 adev->ip_blocks[i].version->funcs->name, in amdgpu_device_set_pg_state()
3456 adev = gpu_ins->adev; in amdgpu_device_enable_mgpu_fan_boost()
3457 if (!(adev->flags & AMD_IS_APU || amdgpu_sriov_multi_vf_mode(adev)) && in amdgpu_device_enable_mgpu_fan_boost()
3458 !gpu_ins->mgpu_fan_enabled) { in amdgpu_device_enable_mgpu_fan_boost()
3463 gpu_ins->mgpu_fan_enabled = 1; in amdgpu_device_enable_mgpu_fan_boost()
3474 * amdgpu_device_ip_late_init - run late init for hardware IPs
3490 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_late_init()
3491 if (!adev->ip_blocks[i].status.hw) in amdgpu_device_ip_late_init()
3493 if (adev->ip_blocks[i].version->funcs->late_init) { in amdgpu_device_ip_late_init()
3494 r = adev->ip_blocks[i].version->funcs->late_init(&adev->ip_blocks[i]); in amdgpu_device_ip_late_init()
3496 dev_err(adev->dev, in amdgpu_device_ip_late_init()
3498 adev->ip_blocks[i].version->funcs->name, in amdgpu_device_ip_late_init()
3503 adev->ip_blocks[i].status.late_initialized = true; in amdgpu_device_ip_late_init()
3508 dev_err(adev->dev, "amdgpu_ras_late_init failed %d", r); in amdgpu_device_ip_late_init()
3522 dev_err(adev->dev, "enable mgpu fan boost failed (%d).\n", r); in amdgpu_device_ip_late_init()
3526 ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1) || in amdgpu_device_ip_late_init()
3527 adev->asic_type == CHIP_ALDEBARAN)) in amdgpu_device_ip_late_init()
3530 if (adev->gmc.xgmi.num_physical_nodes > 1) { in amdgpu_device_ip_late_init()
3534 * Reset device p-state to low as this was booted with high. in amdgpu_device_ip_late_init()
3546 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) { in amdgpu_device_ip_late_init()
3549 if (gpu_instance->adev->flags & AMD_IS_APU) in amdgpu_device_ip_late_init()
3552 r = amdgpu_xgmi_set_pstate(gpu_instance->adev, in amdgpu_device_ip_late_init()
3555 dev_err(adev->dev, in amdgpu_device_ip_late_init()
3571 struct amdgpu_device *adev = ip_block->adev; in amdgpu_ip_block_hw_fini()
3574 if (!ip_block->version->funcs->hw_fini) { in amdgpu_ip_block_hw_fini()
3575 dev_err(adev->dev, "hw_fini of IP block <%s> not defined\n", in amdgpu_ip_block_hw_fini()
3576 ip_block->version->funcs->name); in amdgpu_ip_block_hw_fini()
3578 r = ip_block->version->funcs->hw_fini(ip_block); in amdgpu_ip_block_hw_fini()
3581 dev_dbg(adev->dev, in amdgpu_ip_block_hw_fini()
3583 ip_block->version->funcs->name, r); in amdgpu_ip_block_hw_fini()
3587 ip_block->status.hw = false; in amdgpu_ip_block_hw_fini()
3591 * amdgpu_device_smu_fini_early - smu hw_fini wrapper
3604 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_smu_fini_early()
3605 if (!adev->ip_blocks[i].status.hw) in amdgpu_device_smu_fini_early()
3607 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { in amdgpu_device_smu_fini_early()
3608 amdgpu_ip_block_hw_fini(&adev->ip_blocks[i]); in amdgpu_device_smu_fini_early()
3618 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_fini_early()
3619 if (!adev->ip_blocks[i].version->funcs->early_fini) in amdgpu_device_ip_fini_early()
3622 r = adev->ip_blocks[i].version->funcs->early_fini(&adev->ip_blocks[i]); in amdgpu_device_ip_fini_early()
3624 dev_dbg(adev->dev, in amdgpu_device_ip_fini_early()
3626 adev->ip_blocks[i].version->funcs->name, r); in amdgpu_device_ip_fini_early()
3639 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { in amdgpu_device_ip_fini_early()
3640 if (!adev->ip_blocks[i].status.hw) in amdgpu_device_ip_fini_early()
3643 amdgpu_ip_block_hw_fini(&adev->ip_blocks[i]); in amdgpu_device_ip_fini_early()
3648 dev_err(adev->dev, in amdgpu_device_ip_fini_early()
3656 * amdgpu_device_ip_fini - run fini for hardware IPs
3672 if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done) in amdgpu_device_ip_fini()
3675 if (adev->gmc.xgmi.num_physical_nodes > 1) in amdgpu_device_ip_fini()
3680 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { in amdgpu_device_ip_fini()
3681 if (!adev->ip_blocks[i].status.sw) in amdgpu_device_ip_fini()
3684 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { in amdgpu_device_ip_fini()
3686 amdgpu_free_static_csa(&adev->virt.csa_obj); in amdgpu_device_ip_fini()
3693 if (adev->ip_blocks[i].version->funcs->sw_fini) { in amdgpu_device_ip_fini()
3694 r = adev->ip_blocks[i].version->funcs->sw_fini(&adev->ip_blocks[i]); in amdgpu_device_ip_fini()
3697 dev_dbg(adev->dev, in amdgpu_device_ip_fini()
3699 adev->ip_blocks[i].version->funcs->name, in amdgpu_device_ip_fini()
3703 adev->ip_blocks[i].status.sw = false; in amdgpu_device_ip_fini()
3704 adev->ip_blocks[i].status.valid = false; in amdgpu_device_ip_fini()
3707 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { in amdgpu_device_ip_fini()
3708 if (!adev->ip_blocks[i].status.late_initialized) in amdgpu_device_ip_fini()
3710 if (adev->ip_blocks[i].version->funcs->late_fini) in amdgpu_device_ip_fini()
3711 adev->ip_blocks[i].version->funcs->late_fini(&adev->ip_blocks[i]); in amdgpu_device_ip_fini()
3712 adev->ip_blocks[i].status.late_initialized = false; in amdgpu_device_ip_fini()
3722 * amdgpu_device_delayed_init_work_handler - work handler for IB tests
3734 dev_err(adev->dev, "ib ring test failed (%d).\n", r); in amdgpu_device_delayed_init_work_handler()
3742 WARN_ON_ONCE(adev->gfx.gfx_off_state); in amdgpu_device_delay_enable_gfx_off()
3743 WARN_ON_ONCE(adev->gfx.gfx_off_req_count); in amdgpu_device_delay_enable_gfx_off()
3746 adev->gfx.gfx_off_state = true; in amdgpu_device_delay_enable_gfx_off()
3750 * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
3773 dev_warn(adev->dev, "Failed to disallow df cstate"); in amdgpu_device_ip_suspend_phase1()
3775 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { in amdgpu_device_ip_suspend_phase1()
3776 if (!adev->ip_blocks[i].status.valid) in amdgpu_device_ip_suspend_phase1()
3780 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE) in amdgpu_device_ip_suspend_phase1()
3784 r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]); in amdgpu_device_ip_suspend_phase1()
3793 * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
3807 if (adev->in_s0ix) in amdgpu_device_ip_suspend_phase2()
3810 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { in amdgpu_device_ip_suspend_phase2()
3811 if (!adev->ip_blocks[i].status.valid) in amdgpu_device_ip_suspend_phase2()
3814 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) in amdgpu_device_ip_suspend_phase2()
3818 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) { in amdgpu_device_ip_suspend_phase2()
3819 adev->ip_blocks[i].status.hw = false; in amdgpu_device_ip_suspend_phase2()
3825 adev, adev->ip_blocks[i].version->type)) in amdgpu_device_ip_suspend_phase2()
3831 if (adev->in_s0ix && in amdgpu_device_ip_suspend_phase2()
3832 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX && in amdgpu_device_ip_suspend_phase2()
3834 cancel_delayed_work_sync(&adev->gfx.idle_work); in amdgpu_device_ip_suspend_phase2()
3840 if (adev->in_s0ix && in amdgpu_device_ip_suspend_phase2()
3841 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP || in amdgpu_device_ip_suspend_phase2()
3842 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX || in amdgpu_device_ip_suspend_phase2()
3843 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_MES)) in amdgpu_device_ip_suspend_phase2()
3847 if (adev->in_s0ix && in amdgpu_device_ip_suspend_phase2()
3850 (adev->ip_blocks[i].version->type == in amdgpu_device_ip_suspend_phase2()
3854 /* Once swPSP provides the IMU, RLC FW binaries to TOS during cold-boot. in amdgpu_device_ip_suspend_phase2()
3855 * These are in TMR, hence are expected to be reused by PSP-TOS to reload in amdgpu_device_ip_suspend_phase2()
3857 * from here based on PMFW -> PSP message during re-init sequence. in amdgpu_device_ip_suspend_phase2()
3862 (adev->flags & AMD_IS_APU) && adev->gfx.imu.funcs && in amdgpu_device_ip_suspend_phase2()
3863 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) in amdgpu_device_ip_suspend_phase2()
3867 r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]); in amdgpu_device_ip_suspend_phase2()
3868 adev->ip_blocks[i].status.hw = false; in amdgpu_device_ip_suspend_phase2()
3872 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { in amdgpu_device_ip_suspend_phase2()
3873 r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state); in amdgpu_device_ip_suspend_phase2()
3875 dev_err(adev->dev, in amdgpu_device_ip_suspend_phase2()
3877 adev->mp1_state, r); in amdgpu_device_ip_suspend_phase2()
3888 * amdgpu_device_ip_suspend - run suspend for hardware IPs
3931 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_reinit_early_sriov()
3935 block = &adev->ip_blocks[i]; in amdgpu_device_ip_reinit_early_sriov()
3936 block->status.hw = false; in amdgpu_device_ip_reinit_early_sriov()
3940 if (block->version->type != ip_order[j] || in amdgpu_device_ip_reinit_early_sriov()
3941 !block->status.valid) in amdgpu_device_ip_reinit_early_sriov()
3944 r = block->version->funcs->hw_init(&adev->ip_blocks[i]); in amdgpu_device_ip_reinit_early_sriov()
3946 dev_err(adev->dev, "RE-INIT-early: %s failed\n", in amdgpu_device_ip_reinit_early_sriov()
3947 block->version->funcs->name); in amdgpu_device_ip_reinit_early_sriov()
3950 block->status.hw = true; in amdgpu_device_ip_reinit_early_sriov()
3980 if (block->status.valid && !block->status.hw) { in amdgpu_device_ip_reinit_late_sriov()
3981 if (block->version->type == AMD_IP_BLOCK_TYPE_SMC) { in amdgpu_device_ip_reinit_late_sriov()
3984 r = block->version->funcs->hw_init(block); in amdgpu_device_ip_reinit_late_sriov()
3988 dev_err(adev->dev, "RE-INIT-late: %s failed\n", in amdgpu_device_ip_reinit_late_sriov()
3989 block->version->funcs->name); in amdgpu_device_ip_reinit_late_sriov()
3992 block->status.hw = true; in amdgpu_device_ip_reinit_late_sriov()
4000 * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
4015 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_resume_phase1()
4016 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw) in amdgpu_device_ip_resume_phase1()
4018 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || in amdgpu_device_ip_resume_phase1()
4019 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || in amdgpu_device_ip_resume_phase1()
4020 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH || in amdgpu_device_ip_resume_phase1()
4021 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) { in amdgpu_device_ip_resume_phase1()
4023 r = amdgpu_ip_block_resume(&adev->ip_blocks[i]); in amdgpu_device_ip_resume_phase1()
4033 * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
4049 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_resume_phase2()
4050 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw) in amdgpu_device_ip_resume_phase2()
4052 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || in amdgpu_device_ip_resume_phase2()
4053 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || in amdgpu_device_ip_resume_phase2()
4054 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH || in amdgpu_device_ip_resume_phase2()
4055 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE || in amdgpu_device_ip_resume_phase2()
4056 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) in amdgpu_device_ip_resume_phase2()
4058 r = amdgpu_ip_block_resume(&adev->ip_blocks[i]); in amdgpu_device_ip_resume_phase2()
4067 * amdgpu_device_ip_resume_phase3 - run resume for hardware IPs
4083 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_resume_phase3()
4084 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw) in amdgpu_device_ip_resume_phase3()
4086 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) { in amdgpu_device_ip_resume_phase3()
4087 r = amdgpu_ip_block_resume(&adev->ip_blocks[i]); in amdgpu_device_ip_resume_phase3()
4097 * amdgpu_device_ip_resume - run resume for hardware IPs
4122 if (adev->mman.buffer_funcs_ring->sched.ready) in amdgpu_device_ip_resume()
4136 * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
4140 * Query the VBIOS data tables to determine if the board supports SR-IOV.
4145 if (adev->is_atom_fw) { in amdgpu_device_detect_sriov_bios()
4147 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS; in amdgpu_device_detect_sriov_bios()
4150 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS; in amdgpu_device_detect_sriov_bios()
4153 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS)) in amdgpu_device_detect_sriov_bios()
4159 * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
4186 * Fallback to the non-DC driver here by default so as not to in amdgpu_device_asic_has_dc_support()
4202 * Fallback to the non-DC driver here by default so as not to in amdgpu_device_asic_has_dc_support()
4212 &pdev->dev, in amdgpu_device_asic_has_dc_support()
4220 * amdgpu_device_has_dc_support - check if dc is supported
4228 if (adev->enable_virtual_display || in amdgpu_device_has_dc_support()
4229 (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)) in amdgpu_device_has_dc_support()
4232 return amdgpu_device_asic_has_dc_support(adev->pdev, adev->asic_type); in amdgpu_device_has_dc_support()
4253 task_barrier_enter(&hive->tb); in amdgpu_device_xgmi_reset_func()
4254 adev->asic_reset_res = amdgpu_device_baco_enter(adev); in amdgpu_device_xgmi_reset_func()
4256 if (adev->asic_reset_res) in amdgpu_device_xgmi_reset_func()
4259 task_barrier_exit(&hive->tb); in amdgpu_device_xgmi_reset_func()
4260 adev->asic_reset_res = amdgpu_device_baco_exit(adev); in amdgpu_device_xgmi_reset_func()
4262 if (adev->asic_reset_res) in amdgpu_device_xgmi_reset_func()
4268 task_barrier_full(&hive->tb); in amdgpu_device_xgmi_reset_func()
4269 adev->asic_reset_res = amdgpu_asic_reset(adev); in amdgpu_device_xgmi_reset_func()
4273 if (adev->asic_reset_res) in amdgpu_device_xgmi_reset_func()
4274 dev_warn(adev->dev, in amdgpu_device_xgmi_reset_func()
4276 adev->asic_reset_res, adev_to_drm(adev)->unique); in amdgpu_device_xgmi_reset_func()
4291 adev->compute_timeout = adev->gfx_timeout = msecs_to_jiffies(10000); in amdgpu_device_get_job_timeout_settings()
4292 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout; in amdgpu_device_get_job_timeout_settings()
4306 dev_warn(adev->dev, "lockup timeout disabled"); in amdgpu_device_get_job_timeout_settings()
4314 adev->gfx_timeout = timeout; in amdgpu_device_get_job_timeout_settings()
4317 adev->compute_timeout = timeout; in amdgpu_device_get_job_timeout_settings()
4320 adev->sdma_timeout = timeout; in amdgpu_device_get_job_timeout_settings()
4323 adev->video_timeout = timeout; in amdgpu_device_get_job_timeout_settings()
4331 * it should apply to all non-compute jobs. in amdgpu_device_get_job_timeout_settings()
4334 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout; in amdgpu_device_get_job_timeout_settings()
4336 adev->compute_timeout = adev->gfx_timeout; in amdgpu_device_get_job_timeout_settings()
4344 * amdgpu_device_check_iommu_direct_map - check if RAM direct mapped to GPU
4354 domain = iommu_get_domain_for_dev(adev->dev); in amdgpu_device_check_iommu_direct_map()
4355 if (!domain || domain->type == IOMMU_DOMAIN_IDENTITY) in amdgpu_device_check_iommu_direct_map()
4356 adev->ram_is_direct_mapped = true; in amdgpu_device_check_iommu_direct_map()
4361 * amdgpu_device_check_iommu_remap - Check if DMA remapping is enabled.
4371 domain = iommu_get_domain_for_dev(adev->dev); in amdgpu_device_check_iommu_remap()
4372 if (domain && (domain->type == IOMMU_DOMAIN_DMA || in amdgpu_device_check_iommu_remap()
4373 domain->type == IOMMU_DOMAIN_DMA_FQ)) in amdgpu_device_check_iommu_remap()
4383 adev->gfx.mcbp = true; in amdgpu_device_set_mcbp()
4385 adev->gfx.mcbp = false; in amdgpu_device_set_mcbp()
4388 adev->gfx.mcbp = true; in amdgpu_device_set_mcbp()
4390 if (adev->gfx.mcbp) in amdgpu_device_set_mcbp()
4391 dev_info(adev->dev, "MCBP is enabled\n"); in amdgpu_device_set_mcbp()
4395 * amdgpu_device_init - initialize the driver
4407 struct pci_dev *pdev = adev->pdev; in amdgpu_device_init()
4413 adev->shutdown = false; in amdgpu_device_init()
4414 adev->flags = flags; in amdgpu_device_init()
4417 adev->asic_type = amdgpu_force_asic_type; in amdgpu_device_init()
4419 adev->asic_type = flags & AMD_ASIC_MASK; in amdgpu_device_init()
4421 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT; in amdgpu_device_init()
4423 adev->usec_timeout *= 10; in amdgpu_device_init()
4424 adev->gmc.gart_size = 512 * 1024 * 1024; in amdgpu_device_init()
4425 adev->accel_working = false; in amdgpu_device_init()
4426 adev->num_rings = 0; in amdgpu_device_init()
4427 RCU_INIT_POINTER(adev->gang_submit, dma_fence_get_stub()); in amdgpu_device_init()
4428 adev->mman.buffer_funcs = NULL; in amdgpu_device_init()
4429 adev->mman.buffer_funcs_ring = NULL; in amdgpu_device_init()
4430 adev->vm_manager.vm_pte_funcs = NULL; in amdgpu_device_init()
4431 adev->vm_manager.vm_pte_num_scheds = 0; in amdgpu_device_init()
4432 adev->gmc.gmc_funcs = NULL; in amdgpu_device_init()
4433 adev->harvest_ip_mask = 0x0; in amdgpu_device_init()
4434 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS); in amdgpu_device_init()
4435 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); in amdgpu_device_init()
4437 adev->smc_rreg = &amdgpu_invalid_rreg; in amdgpu_device_init()
4438 adev->smc_wreg = &amdgpu_invalid_wreg; in amdgpu_device_init()
4439 adev->pcie_rreg = &amdgpu_invalid_rreg; in amdgpu_device_init()
4440 adev->pcie_wreg = &amdgpu_invalid_wreg; in amdgpu_device_init()
4441 adev->pcie_rreg_ext = &amdgpu_invalid_rreg_ext; in amdgpu_device_init()
4442 adev->pcie_wreg_ext = &amdgpu_invalid_wreg_ext; in amdgpu_device_init()
4443 adev->pciep_rreg = &amdgpu_invalid_rreg; in amdgpu_device_init()
4444 adev->pciep_wreg = &amdgpu_invalid_wreg; in amdgpu_device_init()
4445 adev->pcie_rreg64 = &amdgpu_invalid_rreg64; in amdgpu_device_init()
4446 adev->pcie_wreg64 = &amdgpu_invalid_wreg64; in amdgpu_device_init()
4447 adev->pcie_rreg64_ext = &amdgpu_invalid_rreg64_ext; in amdgpu_device_init()
4448 adev->pcie_wreg64_ext = &amdgpu_invalid_wreg64_ext; in amdgpu_device_init()
4449 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg; in amdgpu_device_init()
4450 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg; in amdgpu_device_init()
4451 adev->didt_rreg = &amdgpu_invalid_rreg; in amdgpu_device_init()
4452 adev->didt_wreg = &amdgpu_invalid_wreg; in amdgpu_device_init()
4453 adev->gc_cac_rreg = &amdgpu_invalid_rreg; in amdgpu_device_init()
4454 adev->gc_cac_wreg = &amdgpu_invalid_wreg; in amdgpu_device_init()
4455 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg; in amdgpu_device_init()
4456 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg; in amdgpu_device_init()
4459 adev->dev, in amdgpu_device_init()
4461 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device, in amdgpu_device_init()
4462 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision); in amdgpu_device_init()
4467 mutex_init(&adev->firmware.mutex); in amdgpu_device_init()
4468 mutex_init(&adev->pm.mutex); in amdgpu_device_init()
4469 mutex_init(&adev->gfx.gpu_clock_mutex); in amdgpu_device_init()
4470 mutex_init(&adev->srbm_mutex); in amdgpu_device_init()
4471 mutex_init(&adev->gfx.pipe_reserve_mutex); in amdgpu_device_init()
4472 mutex_init(&adev->gfx.gfx_off_mutex); in amdgpu_device_init()
4473 mutex_init(&adev->gfx.partition_mutex); in amdgpu_device_init()
4474 mutex_init(&adev->grbm_idx_mutex); in amdgpu_device_init()
4475 mutex_init(&adev->mn_lock); in amdgpu_device_init()
4476 mutex_init(&adev->virt.vf_errors.lock); in amdgpu_device_init()
4477 hash_init(adev->mn_hash); in amdgpu_device_init()
4478 mutex_init(&adev->psp.mutex); in amdgpu_device_init()
4479 mutex_init(&adev->notifier_lock); in amdgpu_device_init()
4480 mutex_init(&adev->pm.stable_pstate_ctx_lock); in amdgpu_device_init()
4481 mutex_init(&adev->benchmark_mutex); in amdgpu_device_init()
4482 mutex_init(&adev->gfx.reset_sem_mutex); in amdgpu_device_init()
4484 mutex_init(&adev->enforce_isolation_mutex); in amdgpu_device_init()
4486 adev->isolation[i].spearhead = dma_fence_get_stub(); in amdgpu_device_init()
4487 amdgpu_sync_create(&adev->isolation[i].active); in amdgpu_device_init()
4488 amdgpu_sync_create(&adev->isolation[i].prev); in amdgpu_device_init()
4490 mutex_init(&adev->gfx.userq_sch_mutex); in amdgpu_device_init()
4491 mutex_init(&adev->gfx.workload_profile_mutex); in amdgpu_device_init()
4492 mutex_init(&adev->vcn.workload_profile_mutex); in amdgpu_device_init()
4493 mutex_init(&adev->userq_mutex); in amdgpu_device_init()
4501 spin_lock_init(&adev->mmio_idx_lock); in amdgpu_device_init()
4502 spin_lock_init(&adev->smc_idx_lock); in amdgpu_device_init()
4503 spin_lock_init(&adev->pcie_idx_lock); in amdgpu_device_init()
4504 spin_lock_init(&adev->uvd_ctx_idx_lock); in amdgpu_device_init()
4505 spin_lock_init(&adev->didt_idx_lock); in amdgpu_device_init()
4506 spin_lock_init(&adev->gc_cac_idx_lock); in amdgpu_device_init()
4507 spin_lock_init(&adev->se_cac_idx_lock); in amdgpu_device_init()
4508 spin_lock_init(&adev->audio_endpt_idx_lock); in amdgpu_device_init()
4509 spin_lock_init(&adev->mm_stats.lock); in amdgpu_device_init()
4510 spin_lock_init(&adev->virt.rlcg_reg_lock); in amdgpu_device_init()
4511 spin_lock_init(&adev->wb.lock); in amdgpu_device_init()
4513 xa_init_flags(&adev->userq_xa, XA_FLAGS_LOCK_IRQ); in amdgpu_device_init()
4515 INIT_LIST_HEAD(&adev->reset_list); in amdgpu_device_init()
4517 INIT_LIST_HEAD(&adev->ras_list); in amdgpu_device_init()
4519 INIT_LIST_HEAD(&adev->pm.od_kobj_list); in amdgpu_device_init()
4521 INIT_LIST_HEAD(&adev->userq_mgr_list); in amdgpu_device_init()
4523 INIT_DELAYED_WORK(&adev->delayed_init_work, in amdgpu_device_init()
4525 INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work, in amdgpu_device_init()
4537 INIT_DELAYED_WORK(&adev->gfx.enforce_isolation[i].work, in amdgpu_device_init()
4539 adev->gfx.enforce_isolation[i].adev = adev; in amdgpu_device_init()
4540 adev->gfx.enforce_isolation[i].xcp_id = i; in amdgpu_device_init()
4543 INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func); in amdgpu_device_init()
4545 adev->gfx.gfx_off_req_count = 1; in amdgpu_device_init()
4546 adev->gfx.gfx_off_residency = 0; in amdgpu_device_init()
4547 adev->gfx.gfx_off_entrycount = 0; in amdgpu_device_init()
4548 adev->pm.ac_power = power_supply_is_system_supplied() > 0; in amdgpu_device_init()
4550 atomic_set(&adev->throttling_logging_enabled, 1); in amdgpu_device_init()
4553 * to avoid log flooding. "-1" is subtracted since the thermal in amdgpu_device_init()
4558 ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1); in amdgpu_device_init()
4560 ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE); in amdgpu_device_init()
4564 if (adev->asic_type >= CHIP_BONAIRE) { in amdgpu_device_init()
4565 adev->rmmio_base = pci_resource_start(adev->pdev, 5); in amdgpu_device_init()
4566 adev->rmmio_size = pci_resource_len(adev->pdev, 5); in amdgpu_device_init()
4568 adev->rmmio_base = pci_resource_start(adev->pdev, 2); in amdgpu_device_init()
4569 adev->rmmio_size = pci_resource_len(adev->pdev, 2); in amdgpu_device_init()
4573 atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN); in amdgpu_device_init()
4575 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size); in amdgpu_device_init()
4576 if (!adev->rmmio) in amdgpu_device_init()
4577 return -ENOMEM; in amdgpu_device_init()
4579 dev_info(adev->dev, "register mmio base: 0x%08X\n", in amdgpu_device_init()
4580 (uint32_t)adev->rmmio_base); in amdgpu_device_init()
4581 dev_info(adev->dev, "register mmio size: %u\n", in amdgpu_device_init()
4582 (unsigned int)adev->rmmio_size); in amdgpu_device_init()
4589 adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev"); in amdgpu_device_init()
4590 if (!adev->reset_domain) in amdgpu_device_init()
4591 return -ENOMEM; in amdgpu_device_init()
4600 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n"); in amdgpu_device_init()
4618 * No need to remove conflicting FBs for non-display class devices. in amdgpu_device_init()
4621 if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA || in amdgpu_device_init()
4622 (pdev->class >> 8) == PCI_CLASS_DISPLAY_OTHER) { in amdgpu_device_init()
4624 r = aperture_remove_conflicting_pci_devices(adev->pdev, amdgpu_kms_driver.name); in amdgpu_device_init()
4637 adev->virt.caps |= AMDGPU_VF_MMIO_ACCESS_PROTECT; in amdgpu_device_init()
4641 if (adev->gmc.xgmi.supported) { in amdgpu_device_init()
4642 r = adev->gfxhub.funcs->get_xgmi_info(adev); in amdgpu_device_init()
4649 if (adev->virt.fw_reserve.p_pf2vf) in amdgpu_device_init()
4650 adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *) in amdgpu_device_init()
4651 adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags == in amdgpu_device_init()
4656 } else if ((adev->flags & AMD_IS_APU) && in amdgpu_device_init()
4659 adev->have_atomics_support = true; in amdgpu_device_init()
4661 adev->have_atomics_support = in amdgpu_device_init()
4662 !pci_enable_atomic_ops_to_root(adev->pdev, in amdgpu_device_init()
4667 if (!adev->have_atomics_support) in amdgpu_device_init()
4668 dev_info(adev->dev, "PCIE atomic ops is not supported\n"); in amdgpu_device_init()
4682 if (adev->bios) in amdgpu_device_init()
4689 if (adev->gmc.xgmi.num_physical_nodes) { in amdgpu_device_init()
4690 dev_info(adev->dev, "Pending hive reset.\n"); in amdgpu_device_init()
4707 dev_err(adev->dev, "asic reset on init failed\n"); in amdgpu_device_init()
4714 if (!adev->bios) { in amdgpu_device_init()
4715 dev_err(adev->dev, "no vBIOS found\n"); in amdgpu_device_init()
4716 r = -EINVAL; in amdgpu_device_init()
4719 dev_info(adev->dev, "GPU posting now...\n"); in amdgpu_device_init()
4722 dev_err(adev->dev, "gpu post error!\n"); in amdgpu_device_init()
4727 if (adev->bios) { in amdgpu_device_init()
4728 if (adev->is_atom_fw) { in amdgpu_device_init()
4732 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n"); in amdgpu_device_init()
4740 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n"); in amdgpu_device_init()
4753 dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n"); in amdgpu_device_init()
4763 dev_err(adev->dev, "amdgpu_device_ip_init failed\n"); in amdgpu_device_init()
4770 dev_info(adev->dev, in amdgpu_device_init()
4772 adev->gfx.config.max_shader_engines, in amdgpu_device_init()
4773 adev->gfx.config.max_sh_per_se, in amdgpu_device_init()
4774 adev->gfx.config.max_cu_per_sh, in amdgpu_device_init()
4775 adev->gfx.cu_info.number); in amdgpu_device_init()
4777 adev->accel_working = true; in amdgpu_device_init()
4787 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps)); in amdgpu_device_init()
4799 if (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) { in amdgpu_device_init()
4802 dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n"); in amdgpu_device_init()
4808 queue_delayed_work(system_wq, &adev->delayed_init_work, in amdgpu_device_init()
4814 flush_delayed_work(&adev->delayed_init_work); in amdgpu_device_init()
4824 drm_err(&adev->ddev, in amdgpu_device_init()
4829 dev_err(adev->dev, "registering pm sysfs failed (%d).\n", r); in amdgpu_device_init()
4833 adev->ucode_sysfs_en = false; in amdgpu_device_init()
4834 dev_err(adev->dev, "Creating firmware sysfs failed (%d).\n", r); in amdgpu_device_init()
4836 adev->ucode_sysfs_en = true; in amdgpu_device_init()
4840 dev_err(adev->dev, "Could not create amdgpu device attr\n"); in amdgpu_device_init()
4842 r = devm_device_add_group(adev->dev, &amdgpu_board_attrs_group); in amdgpu_device_init()
4844 dev_err(adev->dev, in amdgpu_device_init()
4854 dev_err(adev->dev, "amdgpu_pmu_init failed\n"); in amdgpu_device_init()
4857 if (amdgpu_device_cache_pci_state(adev->pdev)) in amdgpu_device_init()
4864 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) in amdgpu_device_init()
4865 vga_client_register(adev->pdev, amdgpu_device_vga_set_decode); in amdgpu_device_init()
4869 if (px || (!dev_is_removable(&adev->pdev->dev) && in amdgpu_device_init()
4871 vga_switcheroo_register_client(adev->pdev, in amdgpu_device_init()
4875 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain); in amdgpu_device_init()
4877 if (adev->init_lvl->level == AMDGPU_INIT_LEVEL_MINIMAL_XGMI) in amdgpu_device_init()
4882 adev->pm_nb.notifier_call = amdgpu_device_pm_notifier; in amdgpu_device_init()
4883 r = register_pm_notifier(&adev->pm_nb); in amdgpu_device_init()
4898 dev_err(adev->dev, "VF exclusive mode timeout\n"); in amdgpu_device_init()
4900 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME; in amdgpu_device_init()
4901 adev->virt.ops = NULL; in amdgpu_device_init()
4902 r = -EAGAIN; in amdgpu_device_init()
4916 unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1); in amdgpu_device_unmap_mmio()
4918 /* Unmap all mapped bars - Doorbell, registers and VRAM */ in amdgpu_device_unmap_mmio()
4921 iounmap(adev->rmmio); in amdgpu_device_unmap_mmio()
4922 adev->rmmio = NULL; in amdgpu_device_unmap_mmio()
4923 if (adev->mman.aper_base_kaddr) in amdgpu_device_unmap_mmio()
4924 iounmap(adev->mman.aper_base_kaddr); in amdgpu_device_unmap_mmio()
4925 adev->mman.aper_base_kaddr = NULL; in amdgpu_device_unmap_mmio()
4928 if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) { in amdgpu_device_unmap_mmio()
4929 arch_phys_wc_del(adev->gmc.vram_mtrr); in amdgpu_device_unmap_mmio()
4930 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size); in amdgpu_device_unmap_mmio()
4935 * amdgpu_device_fini_hw - tear down the driver
4944 dev_info(adev->dev, "amdgpu: finishing device.\n"); in amdgpu_device_fini_hw()
4945 flush_delayed_work(&adev->delayed_init_work); in amdgpu_device_fini_hw()
4947 if (adev->mman.initialized) in amdgpu_device_fini_hw()
4948 drain_workqueue(adev->mman.bdev.wq); in amdgpu_device_fini_hw()
4949 adev->shutdown = true; in amdgpu_device_fini_hw()
4951 unregister_pm_notifier(&adev->pm_nb); in amdgpu_device_fini_hw()
4963 if (adev->mode_info.mode_config_initialized) { in amdgpu_device_fini_hw()
4971 if (adev->pm.sysfs_initialized) in amdgpu_device_fini_hw()
4973 if (adev->ucode_sysfs_en) in amdgpu_device_fini_hw()
4990 if (adev->mman.initialized) in amdgpu_device_fini_hw()
4991 ttm_device_clear_dma_mappings(&adev->mman.bdev); in amdgpu_device_fini_hw()
5007 amdgpu_ucode_release(&adev->firmware.gpu_info_fw); in amdgpu_device_fini_sw()
5008 adev->accel_working = false; in amdgpu_device_fini_sw()
5009 dma_fence_put(rcu_dereference_protected(adev->gang_submit, true)); in amdgpu_device_fini_sw()
5011 dma_fence_put(adev->isolation[i].spearhead); in amdgpu_device_fini_sw()
5012 amdgpu_sync_free(&adev->isolation[i].active); in amdgpu_device_fini_sw()
5013 amdgpu_sync_free(&adev->isolation[i].prev); in amdgpu_device_fini_sw()
5021 if (adev->bios) { in amdgpu_device_fini_sw()
5027 kfree(adev->fru_info); in amdgpu_device_fini_sw()
5028 adev->fru_info = NULL; in amdgpu_device_fini_sw()
5030 kfree(adev->xcp_mgr); in amdgpu_device_fini_sw()
5031 adev->xcp_mgr = NULL; in amdgpu_device_fini_sw()
5035 if (px || (!dev_is_removable(&adev->pdev->dev) && in amdgpu_device_fini_sw()
5037 vga_switcheroo_unregister_client(adev->pdev); in amdgpu_device_fini_sw()
5040 vga_switcheroo_fini_domain_pm_ops(adev->dev); in amdgpu_device_fini_sw()
5042 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) in amdgpu_device_fini_sw()
5043 vga_client_unregister(adev->pdev); in amdgpu_device_fini_sw()
5047 iounmap(adev->rmmio); in amdgpu_device_fini_sw()
5048 adev->rmmio = NULL; in amdgpu_device_fini_sw()
5054 if (adev->mman.discovery_bin) in amdgpu_device_fini_sw()
5057 amdgpu_reset_put_reset_domain(adev->reset_domain); in amdgpu_device_fini_sw()
5058 adev->reset_domain = NULL; in amdgpu_device_fini_sw()
5060 kfree(adev->pci_state); in amdgpu_device_fini_sw()
5061 kfree(adev->pcie_reset_ctx.swds_pcistate); in amdgpu_device_fini_sw()
5062 kfree(adev->pcie_reset_ctx.swus_pcistate); in amdgpu_device_fini_sw()
5066 * amdgpu_device_evict_resources - evict device resources
5079 if (!adev->in_s4 && (adev->flags & AMD_IS_APU)) in amdgpu_device_evict_resources()
5088 dev_warn(adev->dev, "evicting device resources failed\n"); in amdgpu_device_evict_resources()
5092 if (adev->in_s4) { in amdgpu_device_evict_resources()
5093 ret = ttm_device_prepare_hibernation(&adev->mman.bdev); in amdgpu_device_evict_resources()
5095 dev_err(adev->dev, "prepare hibernation failed, %d\n", ret); in amdgpu_device_evict_resources()
5104 * amdgpu_device_pm_notifier - Notification block for Suspend/Hibernate events
5120 adev->in_s4 = true; in amdgpu_device_pm_notifier()
5123 adev->in_s4 = false; in amdgpu_device_pm_notifier()
5131 * amdgpu_device_prepare - prepare for device suspend
5144 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) in amdgpu_device_prepare()
5152 flush_delayed_work(&adev->gfx.gfx_off_delay_work); in amdgpu_device_prepare()
5154 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_prepare()
5155 if (!adev->ip_blocks[i].status.valid) in amdgpu_device_prepare()
5157 if (!adev->ip_blocks[i].version->funcs->prepare_suspend) in amdgpu_device_prepare()
5159 r = adev->ip_blocks[i].version->funcs->prepare_suspend(&adev->ip_blocks[i]); in amdgpu_device_prepare()
5168 * amdgpu_device_complete - complete power state transition
5180 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_complete()
5181 if (!adev->ip_blocks[i].status.valid) in amdgpu_device_complete()
5183 if (!adev->ip_blocks[i].version->funcs->complete) in amdgpu_device_complete()
5185 adev->ip_blocks[i].version->funcs->complete(&adev->ip_blocks[i]); in amdgpu_device_complete()
5190 * amdgpu_device_suspend - initiate device suspend
5193 * @notify_clients: notify in-kernel DRM clients
5204 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) in amdgpu_device_suspend()
5207 adev->in_suspend = true; in amdgpu_device_suspend()
5210 if (!adev->in_runpm) in amdgpu_device_suspend()
5219 dev_warn(adev->dev, "smart shift update failed\n"); in amdgpu_device_suspend()
5224 cancel_delayed_work_sync(&adev->delayed_init_work); in amdgpu_device_suspend()
5230 amdgpu_amdkfd_suspend(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm); in amdgpu_device_suspend()
5252 unsigned int prev_physical_node_id = adev->gmc.xgmi.physical_node_id; in amdgpu_virt_resume()
5261 r = adev->gfxhub.funcs->get_xgmi_info(adev); in amdgpu_virt_resume()
5265 dev_info(adev->dev, "xgmi node, old id %d, new id %d\n", in amdgpu_virt_resume()
5266 prev_physical_node_id, adev->gmc.xgmi.physical_node_id); in amdgpu_virt_resume()
5268 adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev); in amdgpu_virt_resume()
5269 adev->vm_manager.vram_base_offset += in amdgpu_virt_resume()
5270 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; in amdgpu_virt_resume()
5276 * amdgpu_device_resume - initiate device resume
5279 * @notify_clients: notify in-kernel DRM clients
5302 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) in amdgpu_device_resume()
5305 if (adev->in_s0ix) in amdgpu_device_resume()
5312 dev_err(adev->dev, "amdgpu asic init failed\n"); in amdgpu_device_resume()
5318 dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r); in amdgpu_device_resume()
5322 r = amdgpu_amdkfd_resume(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm); in amdgpu_device_resume()
5334 queue_delayed_work(system_wq, &adev->delayed_init_work, in amdgpu_device_resume()
5341 if (!r && !adev->in_runpm) in amdgpu_device_resume()
5349 flush_delayed_work(&adev->delayed_init_work); in amdgpu_device_resume()
5356 if (adev->mode_info.num_crtc) { in amdgpu_device_resume()
5367 dev->dev->power.disable_depth++; in amdgpu_device_resume()
5369 if (!adev->dc_enabled) in amdgpu_device_resume()
5374 dev->dev->power.disable_depth--; in amdgpu_device_resume()
5379 adev->in_suspend = false; in amdgpu_device_resume()
5382 dev_warn(adev->dev, "smart shift update failed\n"); in amdgpu_device_resume()
5388 * amdgpu_device_ip_check_soft_reset - did soft reset succeed
5408 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_check_soft_reset()
5409 if (!adev->ip_blocks[i].status.valid) in amdgpu_device_ip_check_soft_reset()
5411 if (adev->ip_blocks[i].version->funcs->check_soft_reset) in amdgpu_device_ip_check_soft_reset()
5412 adev->ip_blocks[i].status.hang = in amdgpu_device_ip_check_soft_reset()
5413 adev->ip_blocks[i].version->funcs->check_soft_reset( in amdgpu_device_ip_check_soft_reset()
5414 &adev->ip_blocks[i]); in amdgpu_device_ip_check_soft_reset()
5415 if (adev->ip_blocks[i].status.hang) { in amdgpu_device_ip_check_soft_reset()
5416 dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name); in amdgpu_device_ip_check_soft_reset()
5424 * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
5438 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_pre_soft_reset()
5439 if (!adev->ip_blocks[i].status.valid) in amdgpu_device_ip_pre_soft_reset()
5441 if (adev->ip_blocks[i].status.hang && in amdgpu_device_ip_pre_soft_reset()
5442 adev->ip_blocks[i].version->funcs->pre_soft_reset) { in amdgpu_device_ip_pre_soft_reset()
5443 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(&adev->ip_blocks[i]); in amdgpu_device_ip_pre_soft_reset()
5453 * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
5468 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_need_full_reset()
5469 if (!adev->ip_blocks[i].status.valid) in amdgpu_device_ip_need_full_reset()
5471 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) || in amdgpu_device_ip_need_full_reset()
5472 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) || in amdgpu_device_ip_need_full_reset()
5473 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) || in amdgpu_device_ip_need_full_reset()
5474 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) || in amdgpu_device_ip_need_full_reset()
5475 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) { in amdgpu_device_ip_need_full_reset()
5476 if (adev->ip_blocks[i].status.hang) { in amdgpu_device_ip_need_full_reset()
5477 dev_info(adev->dev, "Some block need full reset!\n"); in amdgpu_device_ip_need_full_reset()
5486 * amdgpu_device_ip_soft_reset - do a soft reset
5500 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_soft_reset()
5501 if (!adev->ip_blocks[i].status.valid) in amdgpu_device_ip_soft_reset()
5503 if (adev->ip_blocks[i].status.hang && in amdgpu_device_ip_soft_reset()
5504 adev->ip_blocks[i].version->funcs->soft_reset) { in amdgpu_device_ip_soft_reset()
5505 r = adev->ip_blocks[i].version->funcs->soft_reset(&adev->ip_blocks[i]); in amdgpu_device_ip_soft_reset()
5515 * amdgpu_device_ip_post_soft_reset - clean up from soft reset
5529 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_post_soft_reset()
5530 if (!adev->ip_blocks[i].status.valid) in amdgpu_device_ip_post_soft_reset()
5532 if (adev->ip_blocks[i].status.hang && in amdgpu_device_ip_post_soft_reset()
5533 adev->ip_blocks[i].version->funcs->post_soft_reset) in amdgpu_device_ip_post_soft_reset()
5534 r = adev->ip_blocks[i].version->funcs->post_soft_reset(&adev->ip_blocks[i]); in amdgpu_device_ip_post_soft_reset()
5543 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
5557 if (test_bit(AMDGPU_HOST_FLR, &reset_context->flags)) { in amdgpu_device_reset_sriov()
5561 clear_bit(AMDGPU_HOST_FLR, &reset_context->flags); in amdgpu_device_reset_sriov()
5593 if (hive && adev->gmc.xgmi.num_physical_nodes > 1) in amdgpu_device_reset_sriov()
5604 if (adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) in amdgpu_device_reset_sriov()
5608 * bare-metal does. in amdgpu_device_reset_sriov()
5627 * amdgpu_device_has_job_running - check if there is any unfinished job
5641 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_device_has_job_running()
5653 * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
5673 if (amdgpu_gpu_recovery == -1) { in amdgpu_device_should_recover_gpu()
5674 switch (adev->asic_type) { in amdgpu_device_should_recover_gpu()
5699 dev_info(adev->dev, "GPU recovery disabled.\n"); in amdgpu_device_should_recover_gpu()
5708 if (adev->bios) in amdgpu_device_mode1_reset()
5711 dev_info(adev->dev, "GPU mode1 reset\n"); in amdgpu_device_mode1_reset()
5714 * values are used in other cases like restore after mode-2 reset. in amdgpu_device_mode1_reset()
5716 amdgpu_device_cache_pci_state(adev->pdev); in amdgpu_device_mode1_reset()
5719 pci_clear_master(adev->pdev); in amdgpu_device_mode1_reset()
5722 dev_info(adev->dev, "GPU smu mode1 reset\n"); in amdgpu_device_mode1_reset()
5725 dev_info(adev->dev, "GPU psp mode1 reset\n"); in amdgpu_device_mode1_reset()
5732 amdgpu_device_load_pci_state(adev->pdev); in amdgpu_device_mode1_reset()
5738 for (i = 0; i < adev->usec_timeout; i++) { in amdgpu_device_mode1_reset()
5739 u32 memsize = adev->nbio.funcs->get_memsize(adev); in amdgpu_device_mode1_reset()
5746 if (i >= adev->usec_timeout) { in amdgpu_device_mode1_reset()
5747 ret = -ETIMEDOUT; in amdgpu_device_mode1_reset()
5751 if (adev->bios) in amdgpu_device_mode1_reset()
5757 dev_err(adev->dev, "GPU mode1 reset failed\n"); in amdgpu_device_mode1_reset()
5765 dev_info(adev->dev, "GPU link reset\n"); in amdgpu_device_link_reset()
5780 dev_err(adev->dev, "GPU link reset failed\n"); in amdgpu_device_link_reset()
5789 struct amdgpu_device *tmp_adev = reset_context->reset_req_dev; in amdgpu_device_pre_asic_reset()
5791 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); in amdgpu_device_pre_asic_reset()
5793 if (reset_context->reset_req_dev == adev) in amdgpu_device_pre_asic_reset()
5794 job = reset_context->job; in amdgpu_device_pre_asic_reset()
5803 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_device_pre_asic_reset()
5819 if (job && job->vm) in amdgpu_device_pre_asic_reset()
5820 drm_sched_increase_karma(&job->base); in amdgpu_device_pre_asic_reset()
5824 if (r == -EOPNOTSUPP) in amdgpu_device_pre_asic_reset()
5841 dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n"); in amdgpu_device_pre_asic_reset()
5846 if (!test_bit(AMDGPU_SKIP_COREDUMP, &reset_context->flags)) { in amdgpu_device_pre_asic_reset()
5847 dev_info(tmp_adev->dev, "Dumping IP State\n"); in amdgpu_device_pre_asic_reset()
5849 for (i = 0; i < tmp_adev->num_ip_blocks; i++) in amdgpu_device_pre_asic_reset()
5850 if (tmp_adev->ip_blocks[i].version->funcs->dump_ip_state) in amdgpu_device_pre_asic_reset()
5851 tmp_adev->ip_blocks[i].version->funcs in amdgpu_device_pre_asic_reset()
5852 ->dump_ip_state((void *)&tmp_adev->ip_blocks[i]); in amdgpu_device_pre_asic_reset()
5853 dev_info(tmp_adev->dev, "Dumping IP State Completed\n"); in amdgpu_device_pre_asic_reset()
5859 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); in amdgpu_device_pre_asic_reset()
5862 &reset_context->flags); in amdgpu_device_pre_asic_reset()
5875 device_list_handle = reset_context->reset_device_list; in amdgpu_device_reinit_after_reset()
5878 return -EINVAL; in amdgpu_device_reinit_after_reset()
5880 full_reset = test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); in amdgpu_device_reinit_after_reset()
5886 if (reset_context->method == AMD_RESET_METHOD_ON_INIT) in amdgpu_device_reinit_after_reset()
5900 dev_warn(tmp_adev->dev, "asic atom init failed!"); in amdgpu_device_reinit_after_reset()
5902 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n"); in amdgpu_device_reinit_after_reset()
5910 if (!test_bit(AMDGPU_SKIP_COREDUMP, &reset_context->flags)) in amdgpu_device_reinit_after_reset()
5911 amdgpu_coredump(tmp_adev, false, vram_lost, reset_context->job); in amdgpu_device_reinit_after_reset()
5915 tmp_adev->dev, in amdgpu_device_reinit_after_reset()
5925 tmp_adev->xcp_mgr); in amdgpu_device_reinit_after_reset()
5933 if (tmp_adev->mman.buffer_funcs_ring->sched.ready) in amdgpu_device_reinit_after_reset()
5949 if (!reset_context->hive && in amdgpu_device_reinit_after_reset()
5950 tmp_adev->gmc.xgmi.num_physical_nodes > 1) in amdgpu_device_reinit_after_reset()
5973 r = -EINVAL; in amdgpu_device_reinit_after_reset()
5978 if (reset_context->hive && in amdgpu_device_reinit_after_reset()
5979 tmp_adev->gmc.xgmi.num_physical_nodes > 1) in amdgpu_device_reinit_after_reset()
5981 reset_context->hive, tmp_adev); in amdgpu_device_reinit_after_reset()
5993 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r); in amdgpu_device_reinit_after_reset()
5994 r = -EAGAIN; in amdgpu_device_reinit_after_reset()
6000 tmp_adev->asic_reset_res = r; in amdgpu_device_reinit_after_reset()
6018 reset_context->reset_device_list = device_list_handle; in amdgpu_do_asic_reset()
6021 if (r == -EOPNOTSUPP) in amdgpu_do_asic_reset()
6028 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); in amdgpu_do_asic_reset()
6029 skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags); in amdgpu_do_asic_reset()
6038 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { in amdgpu_do_asic_reset()
6040 &tmp_adev->xgmi_reset_work)) in amdgpu_do_asic_reset()
6041 r = -EALREADY; in amdgpu_do_asic_reset()
6046 dev_err(tmp_adev->dev, in amdgpu_do_asic_reset()
6048 r, adev_to_drm(tmp_adev)->unique); in amdgpu_do_asic_reset()
6057 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { in amdgpu_do_asic_reset()
6058 flush_work(&tmp_adev->xgmi_reset_work); in amdgpu_do_asic_reset()
6059 r = tmp_adev->asic_reset_res; in amdgpu_do_asic_reset()
6077 if (r == -EAGAIN) in amdgpu_do_asic_reset()
6078 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); in amdgpu_do_asic_reset()
6080 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); in amdgpu_do_asic_reset()
6092 adev->mp1_state = PP_MP1_STATE_SHUTDOWN; in amdgpu_device_set_mp1_state()
6095 adev->mp1_state = PP_MP1_STATE_RESET; in amdgpu_device_set_mp1_state()
6098 adev->mp1_state = PP_MP1_STATE_NONE; in amdgpu_device_set_mp1_state()
6106 adev->mp1_state = PP_MP1_STATE_NONE; in amdgpu_device_unset_mp1_state()
6113 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus), in amdgpu_device_resume_display_audio()
6114 adev->pdev->bus->number, 1); in amdgpu_device_resume_display_audio()
6116 pm_runtime_enable(&(p->dev)); in amdgpu_device_resume_display_audio()
6117 pm_runtime_resume(&(p->dev)); in amdgpu_device_resume_display_audio()
6136 return -EINVAL; in amdgpu_device_suspend_display_audio()
6138 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus), in amdgpu_device_suspend_display_audio()
6139 adev->pdev->bus->number, 1); in amdgpu_device_suspend_display_audio()
6141 return -ENODEV; in amdgpu_device_suspend_display_audio()
6143 expires = pm_runtime_autosuspend_expiration(&(p->dev)); in amdgpu_device_suspend_display_audio()
6153 while (!pm_runtime_status_suspended(&(p->dev))) { in amdgpu_device_suspend_display_audio()
6154 if (!pm_runtime_suspend(&(p->dev))) in amdgpu_device_suspend_display_audio()
6158 dev_warn(adev->dev, "failed to suspend display audio\n"); in amdgpu_device_suspend_display_audio()
6161 return -ETIMEDOUT; in amdgpu_device_suspend_display_audio()
6165 pm_runtime_disable(&(p->dev)); in amdgpu_device_suspend_display_audio()
6177 cancel_work(&adev->reset_work); in amdgpu_device_stop_pending_resets()
6180 if (adev->kfd.dev) in amdgpu_device_stop_pending_resets()
6181 cancel_work(&adev->kfd.reset_work); in amdgpu_device_stop_pending_resets()
6184 cancel_work(&adev->virt.flr_work); in amdgpu_device_stop_pending_resets()
6186 if (con && adev->ras_enabled) in amdgpu_device_stop_pending_resets()
6187 cancel_work(&con->recovery_work); in amdgpu_device_stop_pending_resets()
6214 if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1) && hive) { in amdgpu_device_recovery_prepare()
6215 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { in amdgpu_device_recovery_prepare()
6216 list_add_tail(&tmp_adev->reset_list, device_list); in amdgpu_device_recovery_prepare()
6217 if (adev->shutdown) in amdgpu_device_recovery_prepare()
6218 tmp_adev->shutdown = true; in amdgpu_device_recovery_prepare()
6220 tmp_adev->pcie_reset_ctx.in_link_reset = true; in amdgpu_device_recovery_prepare()
6222 if (!list_is_first(&adev->reset_list, device_list)) in amdgpu_device_recovery_prepare()
6223 list_rotate_to_front(&adev->reset_list, device_list); in amdgpu_device_recovery_prepare()
6225 list_add_tail(&adev->reset_list, device_list); in amdgpu_device_recovery_prepare()
6238 amdgpu_device_lock_reset_domain(tmp_adev->reset_domain); in amdgpu_device_recovery_get_reset_lock()
6250 amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain); in amdgpu_device_recovery_put_reset_lock()
6278 tmp_adev->pcie_reset_ctx.audio_suspended = true; in amdgpu_device_halt_activities()
6282 cancel_delayed_work_sync(&tmp_adev->delayed_init_work); in amdgpu_device_halt_activities()
6300 struct amdgpu_ring *ring = tmp_adev->rings[i]; in amdgpu_device_halt_activities()
6305 drm_sched_stop(&ring->sched, job ? &job->base : NULL); in amdgpu_device_halt_activities()
6308 amdgpu_job_stop_all_jobs_on_sched(&ring->sched); in amdgpu_device_halt_activities()
6310 atomic_inc(&tmp_adev->gpu_reset_counter); in amdgpu_device_halt_activities()
6327 dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ", in amdgpu_device_asic_reset()
6328 r, adev_to_drm(tmp_adev)->unique); in amdgpu_device_asic_reset()
6329 tmp_adev->asic_reset_res = r; in amdgpu_device_asic_reset()
6339 return -ENODEV; in amdgpu_device_asic_reset()
6342 dev_dbg(adev->dev, "Detected RAS error, wait for FLR completion\n"); in amdgpu_device_asic_reset()
6344 set_bit(AMDGPU_HOST_FLR, &reset_context->flags); in amdgpu_device_asic_reset()
6348 if (AMDGPU_RETRY_SRIOV_RESET(r) && (retry_limit--) > 0) { in amdgpu_device_asic_reset()
6353 adev->asic_reset_res = r; in amdgpu_device_asic_reset()
6356 if (r && r == -EAGAIN) in amdgpu_device_asic_reset()
6384 struct amdgpu_ring *ring = tmp_adev->rings[i]; in amdgpu_device_sched_resume()
6389 drm_sched_start(&ring->sched, 0); in amdgpu_device_sched_resume()
6395 if (tmp_adev->asic_reset_res) { in amdgpu_device_sched_resume()
6400 if (reset_context->src != AMDGPU_RESET_SRC_RAS || in amdgpu_device_sched_resume()
6403 tmp_adev->dev, in amdgpu_device_sched_resume()
6406 &tmp_adev->gpu_reset_counter), in amdgpu_device_sched_resume()
6407 tmp_adev->asic_reset_res); in amdgpu_device_sched_resume()
6410 tmp_adev->asic_reset_res); in amdgpu_device_sched_resume()
6412 r = tmp_adev->asic_reset_res; in amdgpu_device_sched_resume()
6413 tmp_adev->asic_reset_res = 0; in amdgpu_device_sched_resume()
6415 dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", in amdgpu_device_sched_resume()
6416 atomic_read(&tmp_adev->gpu_reset_counter)); in amdgpu_device_sched_resume()
6419 dev_warn(tmp_adev->dev, in amdgpu_device_sched_resume()
6441 if (!adev->kfd.init_complete) in amdgpu_device_gpu_resume()
6444 if (tmp_adev->pcie_reset_ctx.audio_suspended) in amdgpu_device_gpu_resume()
6456 * amdgpu_device_gpu_recover - reset the asic and recover scheduler
6463 * Attempt to do soft-reset or full-reset and reinitialize Asic
6483 reset_context->src != AMDGPU_RESET_SRC_RAS) { in amdgpu_device_gpu_recover()
6484 dev_dbg(adev->dev, in amdgpu_device_gpu_recover()
6486 reset_context->src); in amdgpu_device_gpu_recover()
6500 amdgpu_ras_get_context(adev)->reboot) { in amdgpu_device_gpu_recover()
6501 dev_warn(adev->dev, "Emergency reboot."); in amdgpu_device_gpu_recover()
6507 dev_info(adev->dev, "GPU %s begin!. Source: %d\n", in amdgpu_device_gpu_recover()
6509 reset_context->src); in amdgpu_device_gpu_recover()
6514 mutex_lock(&hive->hive_lock); in amdgpu_device_gpu_recover()
6516 reset_context->job = job; in amdgpu_device_gpu_recover()
6517 reset_context->hive = hive; in amdgpu_device_gpu_recover()
6539 * job->base holds a reference to parent fence in amdgpu_device_gpu_recover()
6541 if (job && dma_fence_is_signaled(&job->hw_fence.base)) { in amdgpu_device_gpu_recover()
6543 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset"); in amdgpu_device_gpu_recover()
6560 mutex_unlock(&hive->hive_lock); in amdgpu_device_gpu_recover()
6565 dev_info(adev->dev, "GPU reset end with ret = %d\n", r); in amdgpu_device_gpu_recover()
6567 atomic_set(&adev->reset_domain->reset_res, r); in amdgpu_device_gpu_recover()
6573 ti = amdgpu_vm_get_task_info_pasid(adev, job->pasid); in amdgpu_device_gpu_recover()
6576 ti ? &ti->task : NULL); in amdgpu_device_gpu_recover()
6585 * amdgpu_device_partner_bandwidth - find the bandwidth of appropriate partner
6593 * This will exclude any virtual switches and links.
6599 struct pci_dev *parent = adev->pdev; in amdgpu_device_partner_bandwidth()
6610 if (parent->vendor == PCI_VENDOR_ID_ATI) in amdgpu_device_partner_bandwidth()
6618 pcie_bandwidth_available(adev->pdev, NULL, speed, width); in amdgpu_device_partner_bandwidth()
6623 * amdgpu_device_gpu_bandwidth - find the bandwidth of the GPU
6636 struct pci_dev *parent = adev->pdev; in amdgpu_device_gpu_bandwidth()
6642 if (parent && parent->vendor == PCI_VENDOR_ID_ATI) { in amdgpu_device_gpu_bandwidth()
6647 if (parent->vendor == PCI_VENDOR_ID_ATI) { in amdgpu_device_gpu_bandwidth()
6655 *speed = pcie_get_speed_cap(adev->pdev); in amdgpu_device_gpu_bandwidth()
6656 *width = pcie_get_width_cap(adev->pdev); in amdgpu_device_gpu_bandwidth()
6661 * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
6675 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap; in amdgpu_device_get_pcie_info()
6678 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap; in amdgpu_device_get_pcie_info()
6681 if (pci_is_root_bus(adev->pdev->bus) && !amdgpu_passthrough(adev)) { in amdgpu_device_get_pcie_info()
6682 if (adev->pm.pcie_gen_mask == 0) in amdgpu_device_get_pcie_info()
6683 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK; in amdgpu_device_get_pcie_info()
6684 if (adev->pm.pcie_mlw_mask == 0) in amdgpu_device_get_pcie_info()
6685 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK; in amdgpu_device_get_pcie_info()
6689 if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask) in amdgpu_device_get_pcie_info()
6696 if (adev->pm.pcie_gen_mask == 0) { in amdgpu_device_get_pcie_info()
6699 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | in amdgpu_device_get_pcie_info()
6704 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | in amdgpu_device_get_pcie_info()
6710 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | in amdgpu_device_get_pcie_info()
6715 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | in amdgpu_device_get_pcie_info()
6719 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | in amdgpu_device_get_pcie_info()
6722 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1; in amdgpu_device_get_pcie_info()
6726 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | in amdgpu_device_get_pcie_info()
6730 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | in amdgpu_device_get_pcie_info()
6736 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | in amdgpu_device_get_pcie_info()
6741 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | in amdgpu_device_get_pcie_info()
6745 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | in amdgpu_device_get_pcie_info()
6748 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1; in amdgpu_device_get_pcie_info()
6752 if (adev->pm.pcie_mlw_mask == 0) { in amdgpu_device_get_pcie_info()
6755 adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_ASIC_PCIE_MLW_MASK; in amdgpu_device_get_pcie_info()
6759 adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X32 | in amdgpu_device_get_pcie_info()
6768 adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X16 | in amdgpu_device_get_pcie_info()
6776 adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X12 | in amdgpu_device_get_pcie_info()
6783 adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 | in amdgpu_device_get_pcie_info()
6789 adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 | in amdgpu_device_get_pcie_info()
6794 adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 | in amdgpu_device_get_pcie_info()
6798 adev->pm.pcie_mlw_mask |= CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1; in amdgpu_device_get_pcie_info()
6806 adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK; in amdgpu_device_get_pcie_info()
6810 adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 | in amdgpu_device_get_pcie_info()
6819 adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | in amdgpu_device_get_pcie_info()
6827 adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | in amdgpu_device_get_pcie_info()
6834 adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | in amdgpu_device_get_pcie_info()
6840 adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | in amdgpu_device_get_pcie_info()
6845 adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | in amdgpu_device_get_pcie_info()
6849 adev->pm.pcie_mlw_mask |= CAIL_PCIE_LINK_WIDTH_SUPPORT_X1; in amdgpu_device_get_pcie_info()
6859 * amdgpu_device_is_peer_accessible - Check peer access through PCIe BAR
6873 !adev->gmc.xgmi.connected_to_cpu && in amdgpu_device_is_peer_accessible()
6874 !(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0); in amdgpu_device_is_peer_accessible()
6876 dev_info(adev->dev, "PCIe P2P access from peer device %s is not supported by the chipset\n", in amdgpu_device_is_peer_accessible()
6877 pci_name(peer_adev->pdev)); in amdgpu_device_is_peer_accessible()
6879 bool is_large_bar = adev->gmc.visible_vram_size && in amdgpu_device_is_peer_accessible()
6880 adev->gmc.real_vram_size == adev->gmc.visible_vram_size; in amdgpu_device_is_peer_accessible()
6884 uint64_t address_mask = peer_adev->dev->dma_mask ? in amdgpu_device_is_peer_accessible()
6885 ~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1); in amdgpu_device_is_peer_accessible()
6887 adev->gmc.aper_base + adev->gmc.aper_size - 1; in amdgpu_device_is_peer_accessible()
6889 p2p_addressable = !(adev->gmc.aper_base & address_mask || in amdgpu_device_is_peer_accessible()
6903 return -ENOTSUPP; in amdgpu_device_baco_enter()
6905 if (ras && adev->ras_enabled && in amdgpu_device_baco_enter()
6906 adev->nbio.funcs->enable_doorbell_interrupt) in amdgpu_device_baco_enter()
6907 adev->nbio.funcs->enable_doorbell_interrupt(adev, false); in amdgpu_device_baco_enter()
6918 return -ENOTSUPP; in amdgpu_device_baco_exit()
6924 if (ras && adev->ras_enabled && in amdgpu_device_baco_exit()
6925 adev->nbio.funcs->enable_doorbell_interrupt) in amdgpu_device_baco_exit()
6926 adev->nbio.funcs->enable_doorbell_interrupt(adev, true); in amdgpu_device_baco_exit()
6928 if (amdgpu_passthrough(adev) && adev->nbio.funcs && in amdgpu_device_baco_exit()
6929 adev->nbio.funcs->clear_doorbell_interrupt) in amdgpu_device_baco_exit()
6930 adev->nbio.funcs->clear_doorbell_interrupt(adev); in amdgpu_device_baco_exit()
6936 * amdgpu_pci_error_detected - Called when a PCI error is detected.
6953 dev_info(adev->dev, "PCI error: detected callback!!\n"); in amdgpu_pci_error_detected()
6955 adev->pci_channel_state = state; in amdgpu_pci_error_detected()
6959 dev_info(adev->dev, "pci_channel_io_normal: state(%d)!!\n", state); in amdgpu_pci_error_detected()
6963 dev_info(adev->dev, "pci_channel_io_frozen: state(%d)!!\n", state); in amdgpu_pci_error_detected()
6969 dev_warn(adev->dev, in amdgpu_pci_error_detected()
6974 * Non-hive devices should be able to recover after in amdgpu_pci_error_detected()
6979 mutex_lock(&hive->hive_lock); in amdgpu_pci_error_detected()
6989 mutex_unlock(&hive->hive_lock); in amdgpu_pci_error_detected()
6993 dev_info(adev->dev, "pci_channel_io_perm_failure: state(%d)!!\n", state); in amdgpu_pci_error_detected()
7001 * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
7009 dev_info(adev->dev, "PCI error: mmio enabled callback!!\n"); in amdgpu_pci_mmio_enabled()
7011 /* TODO - dump whatever for debugging purposes */ in amdgpu_pci_mmio_enabled()
7022 * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
7042 dev_info(adev->dev, "PCI error: slot reset callback!!\n"); in amdgpu_pci_slot_reset()
7046 if (adev->pcie_reset_ctx.swus) in amdgpu_pci_slot_reset()
7047 link_dev = adev->pcie_reset_ctx.swus; in amdgpu_pci_slot_reset()
7049 link_dev = adev->pdev; in amdgpu_pci_slot_reset()
7055 timeout -= 10; in amdgpu_pci_slot_reset()
7060 r = -ETIME; in amdgpu_pci_slot_reset()
7069 for (i = 0; i < adev->usec_timeout; i++) { in amdgpu_pci_slot_reset()
7077 r = -ETIME; in amdgpu_pci_slot_reset()
7089 mutex_lock(&hive->hive_lock); in amdgpu_pci_slot_reset()
7091 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { in amdgpu_pci_slot_reset()
7092 tmp_adev->pcie_reset_ctx.in_link_reset = true; in amdgpu_pci_slot_reset()
7093 list_add_tail(&tmp_adev->reset_list, &device_list); in amdgpu_pci_slot_reset()
7097 list_add_tail(&adev->reset_list, &device_list); in amdgpu_pci_slot_reset()
7103 if (amdgpu_device_cache_pci_state(adev->pdev)) in amdgpu_pci_slot_reset()
7104 pci_restore_state(adev->pdev); in amdgpu_pci_slot_reset()
7105 dev_info(adev->dev, "PCIe error recovery succeeded\n"); in amdgpu_pci_slot_reset()
7107 dev_err(adev->dev, "PCIe error recovery failed, err:%d\n", r); in amdgpu_pci_slot_reset()
7116 mutex_unlock(&hive->hive_lock); in amdgpu_pci_slot_reset()
7124 * amdgpu_pci_resume() - resume normal ops after PCI reset
7138 dev_info(adev->dev, "PCI error: resume callback!!\n"); in amdgpu_pci_resume()
7141 if (adev->pci_channel_state != pci_channel_io_frozen) in amdgpu_pci_resume()
7148 mutex_lock(&hive->hive_lock); in amdgpu_pci_resume()
7149 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { in amdgpu_pci_resume()
7150 tmp_adev->pcie_reset_ctx.in_link_reset = false; in amdgpu_pci_resume()
7151 list_add_tail(&tmp_adev->reset_list, &device_list); in amdgpu_pci_resume()
7154 list_add_tail(&adev->reset_list, &device_list); in amdgpu_pci_resume()
7161 mutex_unlock(&hive->hive_lock); in amdgpu_pci_resume()
7171 swds = pci_upstream_bridge(adev->pdev); in amdgpu_device_cache_switch_state()
7172 if (!swds || swds->vendor != PCI_VENDOR_ID_ATI || in amdgpu_device_cache_switch_state()
7177 (swus->vendor != PCI_VENDOR_ID_ATI && in amdgpu_device_cache_switch_state()
7178 swus->vendor != PCI_VENDOR_ID_AMD) || in amdgpu_device_cache_switch_state()
7183 if (adev->pcie_reset_ctx.swus) in amdgpu_device_cache_switch_state()
7185 /* Upstream bridge is ATI, assume it's SWUS/DS architecture */ in amdgpu_device_cache_switch_state()
7189 adev->pcie_reset_ctx.swds_pcistate = pci_store_saved_state(swds); in amdgpu_device_cache_switch_state()
7194 adev->pcie_reset_ctx.swus_pcistate = pci_store_saved_state(swus); in amdgpu_device_cache_switch_state()
7196 adev->pcie_reset_ctx.swus = swus; in amdgpu_device_cache_switch_state()
7204 if (!adev->pcie_reset_ctx.swds_pcistate || in amdgpu_device_load_switch_state()
7205 !adev->pcie_reset_ctx.swus_pcistate) in amdgpu_device_load_switch_state()
7208 pdev = adev->pcie_reset_ctx.swus; in amdgpu_device_load_switch_state()
7209 r = pci_load_saved_state(pdev, adev->pcie_reset_ctx.swus_pcistate); in amdgpu_device_load_switch_state()
7213 dev_warn(adev->dev, "Failed to load SWUS state, err:%d\n", r); in amdgpu_device_load_switch_state()
7217 pdev = pci_upstream_bridge(adev->pdev); in amdgpu_device_load_switch_state()
7218 r = pci_load_saved_state(pdev, adev->pcie_reset_ctx.swds_pcistate); in amdgpu_device_load_switch_state()
7222 dev_warn(adev->dev, "Failed to load SWDS state, err:%d\n", r); in amdgpu_device_load_switch_state()
7236 kfree(adev->pci_state); in amdgpu_device_cache_pci_state()
7238 adev->pci_state = pci_store_saved_state(pdev); in amdgpu_device_cache_pci_state()
7240 if (!adev->pci_state) { in amdgpu_device_cache_pci_state()
7241 dev_err(adev->dev, "Failed to store PCI saved state"); in amdgpu_device_cache_pci_state()
7245 dev_warn(adev->dev, "Failed to save PCI state, err:%d\n", r); in amdgpu_device_cache_pci_state()
7260 if (!adev->pci_state) in amdgpu_device_load_pci_state()
7263 r = pci_load_saved_state(pdev, adev->pci_state); in amdgpu_device_load_pci_state()
7268 dev_warn(adev->dev, "Failed to load PCI state, err:%d\n", r); in amdgpu_device_load_pci_state()
7279 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) in amdgpu_device_flush_hdp()
7282 if (adev->gmc.xgmi.connected_to_cpu) in amdgpu_device_flush_hdp()
7285 if (ring && ring->funcs->emit_hdp_flush) in amdgpu_device_flush_hdp()
7295 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) in amdgpu_device_invalidate_hdp()
7298 if (adev->gmc.xgmi.connected_to_cpu) in amdgpu_device_invalidate_hdp()
7306 return atomic_read(&adev->reset_domain->in_gpu_reset); in amdgpu_in_reset()
7310 * amdgpu_device_halt() - bring hardware to some kind of halt state
7324 * 4. set adev->no_hw_access to avoid potential crashes after setp 5
7331 struct pci_dev *pdev = adev->pdev; in amdgpu_device_halt()
7341 adev->no_hw_access = true; in amdgpu_device_halt()
7355 address = adev->nbio.funcs->get_pcie_port_index_offset(adev); in amdgpu_device_pcie_port_rreg()
7356 data = adev->nbio.funcs->get_pcie_port_data_offset(adev); in amdgpu_device_pcie_port_rreg()
7358 spin_lock_irqsave(&adev->pcie_idx_lock, flags); in amdgpu_device_pcie_port_rreg()
7362 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); in amdgpu_device_pcie_port_rreg()
7371 address = adev->nbio.funcs->get_pcie_port_index_offset(adev); in amdgpu_device_pcie_port_wreg()
7372 data = adev->nbio.funcs->get_pcie_port_data_offset(adev); in amdgpu_device_pcie_port_wreg()
7374 spin_lock_irqsave(&adev->pcie_idx_lock, flags); in amdgpu_device_pcie_port_wreg()
7379 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); in amdgpu_device_pcie_port_wreg()
7383 * amdgpu_device_get_gang - return a reference to the current gang
7393 fence = dma_fence_get_rcu_safe(&adev->gang_submit); in amdgpu_device_get_gang()
7399 * amdgpu_device_switch_gang - switch to a new gang
7424 } while (cmpxchg((struct dma_fence __force **)&adev->gang_submit, in amdgpu_device_switch_gang()
7437 * amdgpu_device_enforce_isolation - enforce HW isolation
7450 struct amdgpu_isolation *isolation = &adev->isolation[ring->xcp_id]; in amdgpu_device_enforce_isolation()
7451 struct drm_sched_fence *f = job->base.s_fence; in amdgpu_device_enforce_isolation()
7460 if (ring->funcs->type != AMDGPU_RING_TYPE_GFX && in amdgpu_device_enforce_isolation()
7461 ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE) in amdgpu_device_enforce_isolation()
7469 owner = job->enforce_isolation ? f->owner : (void *)~0l; in amdgpu_device_enforce_isolation()
7471 mutex_lock(&adev->enforce_isolation_mutex); in amdgpu_device_enforce_isolation()
7478 if (&f->scheduled != isolation->spearhead && in amdgpu_device_enforce_isolation()
7479 !dma_fence_is_signaled(isolation->spearhead)) { in amdgpu_device_enforce_isolation()
7480 dep = isolation->spearhead; in amdgpu_device_enforce_isolation()
7484 if (isolation->owner != owner) { in amdgpu_device_enforce_isolation()
7491 if (!job->gang_submit) { in amdgpu_device_enforce_isolation()
7498 dma_fence_put(isolation->spearhead); in amdgpu_device_enforce_isolation()
7499 isolation->spearhead = dma_fence_get(&f->scheduled); in amdgpu_device_enforce_isolation()
7500 amdgpu_sync_move(&isolation->active, &isolation->prev); in amdgpu_device_enforce_isolation()
7501 trace_amdgpu_isolation(isolation->owner, owner); in amdgpu_device_enforce_isolation()
7502 isolation->owner = owner; in amdgpu_device_enforce_isolation()
7511 dep = amdgpu_sync_peek_fence(&isolation->prev, ring); in amdgpu_device_enforce_isolation()
7512 r = amdgpu_sync_fence(&isolation->active, &f->finished, GFP_NOWAIT); in amdgpu_device_enforce_isolation()
7514 dev_warn(adev->dev, "OOM tracking isolation\n"); in amdgpu_device_enforce_isolation()
7519 mutex_unlock(&adev->enforce_isolation_mutex); in amdgpu_device_enforce_isolation()
7525 switch (adev->asic_type) { in amdgpu_device_has_display_hardware()
7558 (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)) in amdgpu_device_has_display_hardware()
7571 uint32_t loop = adev->usec_timeout; in amdgpu_device_wait_on_rreg()
7575 loop = adev->usec_timeout; in amdgpu_device_wait_on_rreg()
7580 loop--; in amdgpu_device_wait_on_rreg()
7583 adev->dev, in amdgpu_device_wait_on_rreg()
7587 ret = -ETIMEDOUT; in amdgpu_device_wait_on_rreg()
7598 if (!ring || !ring->adev) in amdgpu_get_soft_full_reset_mask()
7601 if (amdgpu_device_should_recover_gpu(ring->adev)) in amdgpu_get_soft_full_reset_mask()
7604 if (unlikely(!ring->adev->debug_disable_soft_recovery) && in amdgpu_get_soft_full_reset_mask()
7605 !amdgpu_sriov_vf(ring->adev) && ring->funcs->soft_recovery) in amdgpu_get_soft_full_reset_mask()
7646 dev_err_once(uid_info->adev->dev, "Invalid UID type %d\n", in amdgpu_device_set_uid()
7652 dev_err_once(uid_info->adev->dev, "Invalid UID instance %d\n", in amdgpu_device_set_uid()
7657 if (uid_info->uid[type][inst] != 0) { in amdgpu_device_set_uid()
7659 uid_info->adev->dev, in amdgpu_device_set_uid()
7661 uid_info->uid[type][inst], type, inst); in amdgpu_device_set_uid()
7664 uid_info->uid[type][inst] = uid; in amdgpu_device_set_uid()
7674 dev_err_once(uid_info->adev->dev, "Invalid UID type %d\n", in amdgpu_device_get_uid()
7680 dev_err_once(uid_info->adev->dev, "Invalid UID instance %d\n", in amdgpu_device_get_uid()
7685 return uid_info->uid[type][inst]; in amdgpu_device_get_uid()