Lines Matching defs:adev
180 static inline bool amdgpu_ip_member_of_hwini(struct amdgpu_device *adev,
183 return (adev->init_lvl->hwini_ip_block_mask & (1U << block)) != 0;
186 void amdgpu_set_init_level(struct amdgpu_device *adev,
191 adev->init_lvl = &amdgpu_init_minimal_xgmi;
194 adev->init_lvl = &amdgpu_init_recovery;
199 adev->init_lvl = &amdgpu_init_default;
204 static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev);
221 struct amdgpu_device *adev = drm_to_adev(ddev);
222 uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
230 static int amdgpu_device_attr_sysfs_init(struct amdgpu_device *adev)
234 if (!amdgpu_sriov_vf(adev))
235 ret = sysfs_create_file(&adev->dev->kobj,
241 static void amdgpu_device_attr_sysfs_fini(struct amdgpu_device *adev)
243 if (!amdgpu_sriov_vf(adev))
244 sysfs_remove_file(&adev->dev->kobj,
254 struct amdgpu_device *adev = drm_to_adev(ddev);
260 adev, AMDGPU_REG_STATE_TYPE_XGMI, buf, count);
264 adev, AMDGPU_REG_STATE_TYPE_WAFL, buf, count);
268 adev, AMDGPU_REG_STATE_TYPE_PCIE, buf, count);
272 adev, AMDGPU_REG_STATE_TYPE_USR, buf, count);
276 adev, AMDGPU_REG_STATE_TYPE_USR_1, buf, count);
288 int amdgpu_reg_state_sysfs_init(struct amdgpu_device *adev)
292 if (!amdgpu_asic_get_reg_state_supported(adev))
295 ret = sysfs_create_bin_file(&adev->dev->kobj, &bin_attr_reg_state);
300 void amdgpu_reg_state_sysfs_fini(struct amdgpu_device *adev)
302 if (!amdgpu_asic_get_reg_state_supported(adev))
304 sysfs_remove_bin_file(&adev->dev->kobj, &bin_attr_reg_state);
314 dev_err(ip_block->adev->dev,
332 dev_err(ip_block->adev->dev,
364 struct amdgpu_device *adev = drm_to_adev(ddev);
368 if (adev->smuio.funcs && adev->smuio.funcs->get_pkg_type)
369 pkg_type = adev->smuio.funcs->get_pkg_type(adev);
398 struct amdgpu_device *adev = drm_to_adev(ddev);
400 if (adev->flags & AMD_IS_APU)
411 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
424 struct amdgpu_device *adev = drm_to_adev(dev);
426 if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
441 struct amdgpu_device *adev = drm_to_adev(dev);
446 if (adev->has_pr3 ||
447 ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
464 struct amdgpu_device *adev = drm_to_adev(dev);
466 return amdgpu_asic_supports_baco(adev);
469 void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev)
474 dev = adev_to_drm(adev);
476 adev->pm.rpm_mode = AMDGPU_RUNPM_NONE;
482 adev->pm.rpm_mode = AMDGPU_RUNPM_BAMACO;
483 dev_info(adev->dev, "Forcing BAMACO for runtime pm\n");
485 adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
486 dev_info(adev->dev, "Requested mode BAMACO not available,fallback to use BACO\n");
491 adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
492 dev_info(adev->dev, "Forcing BACO for runtime pm\n");
498 adev->pm.rpm_mode = AMDGPU_RUNPM_PX;
499 dev_info(adev->dev, "Using ATPX for runtime pm\n");
501 adev->pm.rpm_mode = AMDGPU_RUNPM_BOCO;
502 dev_info(adev->dev, "Using BOCO for runtime pm\n");
507 switch (adev->asic_type) {
514 if (!adev->gmc.noretry)
515 adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
519 adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
523 if (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) {
525 adev->pm.rpm_mode = AMDGPU_RUNPM_BAMACO;
526 dev_info(adev->dev, "Using BAMACO for runtime pm\n");
528 dev_info(adev->dev, "Using BACO for runtime pm\n");
534 dev_info(adev->dev, "runtime pm is manually disabled\n");
541 if (adev->pm.rpm_mode == AMDGPU_RUNPM_NONE)
542 dev_info(adev->dev, "Runtime PM not available\n");
566 * @adev: amdgpu_device pointer
572 void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
581 if (!drm_dev_enter(adev_to_drm(adev), &idx))
586 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
601 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
608 * @adev: amdgpu_device pointer
616 size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
624 if (!adev->mman.aper_base_kaddr)
627 last = min(pos + size, adev->gmc.visible_vram_size);
629 addr = adev->mman.aper_base_kaddr + pos;
638 amdgpu_device_flush_hdp(adev, NULL);
640 amdgpu_device_invalidate_hdp(adev, NULL);
659 * @adev: amdgpu_device pointer
665 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
671 count = amdgpu_device_aper_access(adev, pos, buf, size, write);
677 amdgpu_device_mm_access(adev, pos, buf, size, write);
686 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
688 if (adev->no_hw_access)
704 if (down_read_trylock(&adev->reset_domain->sem))
705 up_read(&adev->reset_domain->sem);
707 lockdep_assert_held(&adev->reset_domain->sem);
716 * @adev: amdgpu_device pointer
722 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
727 if (amdgpu_device_skip_hw_access(adev))
730 if ((reg * 4) < adev->rmmio_size) {
732 amdgpu_sriov_runtime(adev) &&
733 down_read_trylock(&adev->reset_domain->sem)) {
734 ret = amdgpu_kiq_rreg(adev, reg, 0);
735 up_read(&adev->reset_domain->sem);
737 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
740 ret = adev->pcie_rreg(adev, reg * 4);
743 trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
756 * @adev: amdgpu_device pointer
761 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
763 if (amdgpu_device_skip_hw_access(adev))
766 if (offset < adev->rmmio_size)
767 return (readb(adev->rmmio + offset));
775 * @adev: amdgpu_device pointer
782 uint32_t amdgpu_device_xcc_rreg(struct amdgpu_device *adev,
788 if (amdgpu_device_skip_hw_access(adev))
791 if ((reg * 4) < adev->rmmio_size) {
792 if (amdgpu_sriov_vf(adev) &&
793 !amdgpu_sriov_runtime(adev) &&
794 adev->gfx.rlc.rlcg_reg_access_supported &&
795 amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags,
798 ret = amdgpu_virt_rlcg_reg_rw(adev, reg, 0, rlcg_flag, GET_INST(GC, xcc_id));
800 amdgpu_sriov_runtime(adev) &&
801 down_read_trylock(&adev->reset_domain->sem)) {
802 ret = amdgpu_kiq_rreg(adev, reg, xcc_id);
803 up_read(&adev->reset_domain->sem);
805 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
808 ret = adev->pcie_rreg(adev, reg * 4);
823 * @adev: amdgpu_device pointer
829 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
831 if (amdgpu_device_skip_hw_access(adev))
834 if (offset < adev->rmmio_size)
835 writeb(value, adev->rmmio + offset);
843 * @adev: amdgpu_device pointer
850 void amdgpu_device_wreg(struct amdgpu_device *adev,
854 if (amdgpu_device_skip_hw_access(adev))
857 if ((reg * 4) < adev->rmmio_size) {
859 amdgpu_sriov_runtime(adev) &&
860 down_read_trylock(&adev->reset_domain->sem)) {
861 amdgpu_kiq_wreg(adev, reg, v, 0);
862 up_read(&adev->reset_domain->sem);
864 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
867 adev->pcie_wreg(adev, reg * 4, v);
870 trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
876 * @adev: amdgpu_device pointer
883 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
887 if (amdgpu_device_skip_hw_access(adev))
890 if (amdgpu_sriov_fullaccess(adev) &&
891 adev->gfx.rlc.funcs &&
892 adev->gfx.rlc.funcs->is_rlcg_access_range) {
893 if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
894 return amdgpu_sriov_wreg(adev, reg, v, 0, 0, xcc_id);
895 } else if ((reg * 4) >= adev->rmmio_size) {
896 adev->pcie_wreg(adev, reg * 4, v);
898 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
905 * @adev: amdgpu_device pointer
913 void amdgpu_device_xcc_wreg(struct amdgpu_device *adev,
919 if (amdgpu_device_skip_hw_access(adev))
922 if ((reg * 4) < adev->rmmio_size) {
923 if (amdgpu_sriov_vf(adev) &&
924 !amdgpu_sriov_runtime(adev) &&
925 adev->gfx.rlc.rlcg_reg_access_supported &&
926 amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags,
929 amdgpu_virt_rlcg_reg_rw(adev, reg, v, rlcg_flag, GET_INST(GC, xcc_id));
931 amdgpu_sriov_runtime(adev) &&
932 down_read_trylock(&adev->reset_domain->sem)) {
933 amdgpu_kiq_wreg(adev, reg, v, xcc_id);
934 up_read(&adev->reset_domain->sem);
936 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
939 adev->pcie_wreg(adev, reg * 4, v);
946 * @adev: amdgpu_device pointer
951 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
959 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
960 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
962 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
963 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
964 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
969 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
974 u32 amdgpu_device_indirect_rreg_ext(struct amdgpu_device *adev,
983 if (unlikely(!adev->nbio.funcs)) {
987 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
988 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
992 if (unlikely(!adev->nbio.funcs))
995 pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
1000 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
1001 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
1002 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
1004 pcie_index_hi_offset = (void __iomem *)adev->rmmio +
1021 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
1029 * @adev: amdgpu_device pointer
1034 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
1042 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
1043 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
1045 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
1046 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
1047 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
1057 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
1062 u64 amdgpu_device_indirect_rreg64_ext(struct amdgpu_device *adev,
1072 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
1073 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
1074 if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset))
1075 pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
1077 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
1078 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
1079 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
1081 pcie_index_hi_offset = (void __iomem *)adev->rmmio +
1107 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
1115 * @adev: amdgpu_device pointer
1120 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
1127 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
1128 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
1130 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
1131 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
1132 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
1138 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
1141 void amdgpu_device_indirect_wreg_ext(struct amdgpu_device *adev,
1149 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
1150 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
1151 if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset))
1152 pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
1156 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
1157 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
1158 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
1160 pcie_index_hi_offset = (void __iomem *)adev->rmmio +
1178 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
1184 * @adev: amdgpu_device pointer
1189 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
1196 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
1197 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
1199 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
1200 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
1201 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
1213 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
1216 void amdgpu_device_indirect_wreg64_ext(struct amdgpu_device *adev,
1225 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
1226 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
1227 if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset))
1228 pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
1230 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
1231 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
1232 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
1234 pcie_index_hi_offset = (void __iomem *)adev->rmmio +
1262 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
1268 * @adev: amdgpu_device pointer
1272 u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev)
1274 return adev->nbio.funcs->get_rev_id(adev);
1280 * @adev: amdgpu_device pointer
1287 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
1294 static uint32_t amdgpu_invalid_rreg_ext(struct amdgpu_device *adev, uint64_t reg)
1304 * @adev: amdgpu_device pointer
1311 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
1318 static void amdgpu_invalid_wreg_ext(struct amdgpu_device *adev, uint64_t reg, uint32_t v)
1328 * @adev: amdgpu_device pointer
1335 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
1342 static uint64_t amdgpu_invalid_rreg64_ext(struct amdgpu_device *adev, uint64_t reg)
1352 * @adev: amdgpu_device pointer
1359 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
1366 static void amdgpu_invalid_wreg64_ext(struct amdgpu_device *adev, uint64_t reg, uint64_t v)
1376 * @adev: amdgpu_device pointer
1384 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
1396 * @adev: amdgpu_device pointer
1404 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
1413 static uint32_t amdgpu_device_get_vbios_flags(struct amdgpu_device *adev)
1415 if (hweight32(adev->aid_mask) && (adev->flags & AMD_IS_APU))
1418 if (hweight32(adev->aid_mask) && amdgpu_passthrough(adev))
1427 * @adev: amdgpu_device pointer
1431 static int amdgpu_device_asic_init(struct amdgpu_device *adev)
1437 amdgpu_asic_pre_asic_init(adev);
1438 flags = amdgpu_device_get_vbios_flags(adev);
1441 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
1442 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
1443 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0) ||
1444 amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0)) {
1445 amdgpu_psp_wait_for_bootloader(adev);
1446 if (optional && !adev->bios)
1449 ret = amdgpu_atomfirmware_asic_init(adev, true);
1452 if (optional && !adev->bios)
1455 return amdgpu_atom_asic_init(adev->mode_info.atom_context);
1464 * @adev: amdgpu_device pointer
1469 static int amdgpu_device_mem_scratch_init(struct amdgpu_device *adev)
1471 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, PAGE_SIZE,
1474 &adev->mem_scratch.robj,
1475 &adev->mem_scratch.gpu_addr,
1476 (void **)&adev->mem_scratch.ptr);
1482 * @adev: amdgpu_device pointer
1486 static void amdgpu_device_mem_scratch_fini(struct amdgpu_device *adev)
1488 amdgpu_bo_free_kernel(&adev->mem_scratch.robj, NULL, NULL);
1494 * @adev: amdgpu_device pointer
1501 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
1521 if (adev->family >= AMDGPU_FAMILY_AI)
1533 * @adev: amdgpu_device pointer
1538 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
1540 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
1546 * @adev: amdgpu_device pointer
1550 int amdgpu_device_pci_reset(struct amdgpu_device *adev)
1552 return pci_reset_function(adev->pdev);
1564 * @adev: amdgpu_device pointer
1569 static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1571 if (adev->wb.wb_obj) {
1572 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1573 &adev->wb.gpu_addr,
1574 (void **)&adev->wb.wb);
1575 adev->wb.wb_obj = NULL;
1582 * @adev: amdgpu_device pointer
1588 static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1592 if (adev->wb.wb_obj == NULL) {
1594 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1596 &adev->wb.wb_obj, &adev->wb.gpu_addr,
1597 (void **)&adev->wb.wb);
1599 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1603 adev->wb.num_wb = AMDGPU_MAX_WB;
1604 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1607 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1616 * @adev: amdgpu_device pointer
1622 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1626 spin_lock_irqsave(&adev->wb.lock, flags);
1627 offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1628 if (offset < adev->wb.num_wb) {
1629 __set_bit(offset, adev->wb.used);
1630 spin_unlock_irqrestore(&adev->wb.lock, flags);
1634 spin_unlock_irqrestore(&adev->wb.lock, flags);
1642 * @adev: amdgpu_device pointer
1647 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1652 spin_lock_irqsave(&adev->wb.lock, flags);
1653 if (wb < adev->wb.num_wb)
1654 __clear_bit(wb, adev->wb.used);
1655 spin_unlock_irqrestore(&adev->wb.lock, flags);
1661 * @adev: amdgpu_device pointer
1667 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1669 int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
1680 if (amdgpu_sriov_vf(adev))
1685 adev->pdev->vendor == PCI_VENDOR_ID_ATI &&
1686 adev->pdev->device == 0x731f &&
1687 adev->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
1691 if (!pci_find_ext_capability(adev->pdev, PCI_EXT_CAP_ID_VNDR))
1695 if (adev->gmc.real_vram_size &&
1696 (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1700 root = adev->pdev->bus;
1715 rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1719 pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1720 pci_write_config_word(adev->pdev, PCI_COMMAND,
1724 amdgpu_doorbell_fini(adev);
1725 if (adev->asic_type >= CHIP_BONAIRE)
1726 pci_release_resource(adev->pdev, 2);
1728 pci_release_resource(adev->pdev, 0);
1730 r = pci_resize_resource(adev->pdev, 0, rbar_size);
1736 pci_assign_unassigned_bus_resources(adev->pdev->bus);
1741 r = amdgpu_doorbell_init(adev);
1742 if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1745 pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1756 * @adev: amdgpu_device pointer
1762 bool amdgpu_device_need_post(struct amdgpu_device *adev)
1766 if (amdgpu_sriov_vf(adev))
1769 flags = amdgpu_device_get_vbios_flags(adev);
1772 if ((flags & AMDGPU_VBIOS_OPTIONAL) && !adev->bios)
1775 if (amdgpu_passthrough(adev)) {
1781 if (adev->asic_type == CHIP_FIJI) {
1785 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1790 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1791 release_firmware(adev->pm.fw);
1798 if (adev->init_lvl->level == AMDGPU_INIT_LEVEL_MINIMAL_XGMI)
1801 if (adev->has_hw_reset) {
1802 adev->has_hw_reset = false;
1807 if (adev->asic_type >= CHIP_BONAIRE)
1808 return amdgpu_atombios_scratch_need_asic_init(adev);
1811 reg = amdgpu_asic_get_config_memsize(adev);
1826 bool amdgpu_device_seamless_boot_supported(struct amdgpu_device *adev)
1841 if (!(adev->flags & AMD_IS_APU))
1844 if (adev->mman.keep_stolen_vga_memory)
1847 return amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0);
1858 static bool amdgpu_device_pcie_dynamic_switching_supported(struct amdgpu_device *adev)
1864 if (dev_is_removable(adev->dev))
1876 * @adev: amdgpu_device pointer
1883 bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
1895 if (adev->flags & AMD_IS_APU)
1897 if (!(adev->pm.pp_feature & PP_PCIE_DPM_MASK))
1899 return pcie_aspm_enabled(adev->pdev);
1915 struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev));
1917 amdgpu_asic_set_vga_state(adev, state);
1928 * @adev: amdgpu_device pointer
1935 static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1945 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1954 * @adev: amdgpu_device pointer
1959 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1966 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1972 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
2002 adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
2009 adev->pm.smu_prv_buffer_size = 0;
2012 static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
2014 if (!(adev->flags & AMD_IS_APU) ||
2015 adev->asic_type < CHIP_RAVEN)
2018 switch (adev->asic_type) {
2020 if (adev->pdev->device == 0x15dd)
2021 adev->apu_flags |= AMD_APU_IS_RAVEN;
2022 if (adev->pdev->device == 0x15d8)
2023 adev->apu_flags |= AMD_APU_IS_PICASSO;
2026 if ((adev->pdev->device == 0x1636) ||
2027 (adev->pdev->device == 0x164c))
2028 adev->apu_flags |= AMD_APU_IS_RENOIR;
2030 adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
2033 adev->apu_flags |= AMD_APU_IS_VANGOGH;
2038 if ((adev->pdev->device == 0x13FE) ||
2039 (adev->pdev->device == 0x143F))
2040 adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
2052 * @adev: amdgpu_device pointer
2057 static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
2062 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
2066 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
2073 dev_warn(adev->dev, "gart size (%d) too small\n",
2080 dev_warn(adev->dev, "gtt size (%d) too small\n",
2088 dev_warn(adev->dev, "valid range is between 4 and 9\n");
2093 dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
2097 dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
2103 dev_warn(adev->dev, "invalid option for reset method, reverting to default\n");
2107 amdgpu_device_check_smu_prv_buffer_size(adev);
2109 amdgpu_device_check_vm_size(adev);
2111 amdgpu_device_check_block_size(adev);
2113 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
2116 adev->enforce_isolation[i] = !!enforce_isolation;
2207 struct amdgpu_device *adev = dev;
2210 for (i = 0; i < adev->num_ip_blocks; i++) {
2211 if (!adev->ip_blocks[i].status.valid)
2213 if (adev->ip_blocks[i].version->type != block_type)
2215 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
2217 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
2218 &adev->ip_blocks[i], state);
2221 adev->ip_blocks[i].version->funcs->name, r);
2241 struct amdgpu_device *adev = dev;
2244 for (i = 0; i < adev->num_ip_blocks; i++) {
2245 if (!adev->ip_blocks[i].status.valid)
2247 if (adev->ip_blocks[i].version->type != block_type)
2249 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
2251 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
2252 &adev->ip_blocks[i], state);
2255 adev->ip_blocks[i].version->funcs->name, r);
2263 * @adev: amdgpu_device pointer
2271 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
2276 for (i = 0; i < adev->num_ip_blocks; i++) {
2277 if (!adev->ip_blocks[i].status.valid)
2279 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
2280 adev->ip_blocks[i].version->funcs->get_clockgating_state(
2281 &adev->ip_blocks[i], flags);
2288 * @adev: amdgpu_device pointer
2294 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
2299 for (i = 0; i < adev->num_ip_blocks; i++) {
2300 if (!adev->ip_blocks[i].status.valid)
2302 if (adev->ip_blocks[i].version->type == block_type) {
2303 if (adev->ip_blocks[i].version->funcs->wait_for_idle) {
2304 r = adev->ip_blocks[i].version->funcs->wait_for_idle(
2305 &adev->ip_blocks[i]);
2319 * @adev: amdgpu_device pointer
2325 bool amdgpu_device_ip_is_valid(struct amdgpu_device *adev,
2330 for (i = 0; i < adev->num_ip_blocks; i++) {
2331 if (adev->ip_blocks[i].version->type == block_type)
2332 return adev->ip_blocks[i].status.valid;
2341 * @adev: amdgpu_device pointer
2348 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
2353 for (i = 0; i < adev->num_ip_blocks; i++)
2354 if (adev->ip_blocks[i].version->type == type)
2355 return &adev->ip_blocks[i];
2363 * @adev: amdgpu_device pointer
2371 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
2375 struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
2388 * @adev: amdgpu_device pointer
2394 int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
2402 if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
2406 if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK)
2413 dev_info(adev->dev, "detected ip block number %d <%s>\n",
2414 adev->num_ip_blocks, ip_block_version->funcs->name);
2416 adev->ip_blocks[adev->num_ip_blocks].adev = adev;
2418 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
2426 * @adev: amdgpu_device pointer
2435 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
2437 adev->enable_virtual_display = false;
2440 const char *pci_address_name = pci_name(adev->pdev);
2452 adev->enable_virtual_display = true;
2463 adev->mode_info.num_crtc = num_crtc;
2465 adev->mode_info.num_crtc = 1;
2473 adev->enable_virtual_display, adev->mode_info.num_crtc);
2479 void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev)
2481 if (amdgpu_sriov_vf(adev) && !adev->enable_virtual_display) {
2482 adev->mode_info.num_crtc = 1;
2483 adev->enable_virtual_display = true;
2485 adev->enable_virtual_display, adev->mode_info.num_crtc);
2492 * @adev: amdgpu_device pointer
2499 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
2505 adev->firmware.gpu_info_fw = NULL;
2507 if (adev->mman.discovery_bin)
2510 switch (adev->asic_type) {
2520 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
2522 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
2535 err = amdgpu_ucode_request(adev, &adev->firmware.gpu_info_fw,
2539 dev_err(adev->dev,
2545 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
2552 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
2558 if (adev->asic_type == CHIP_NAVI12)
2561 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
2562 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
2563 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
2564 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
2565 adev->gfx.config.max_texture_channel_caches =
2567 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
2568 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
2569 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
2570 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
2571 adev->gfx.config.double_offchip_lds_buf =
2573 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
2574 adev->gfx.cu_info.max_waves_per_simd =
2576 adev->gfx.cu_info.max_scratch_slots_per_cu =
2578 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
2581 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
2583 adev->gfx.config.num_sc_per_sh =
2585 adev->gfx.config.num_packer_per_sc =
2596 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
2598 adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
2603 dev_err(adev->dev,
2615 * @adev: amdgpu_device pointer
2622 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
2630 amdgpu_device_enable_virtual_display(adev);
2632 if (amdgpu_sriov_vf(adev)) {
2633 r = amdgpu_virt_request_full_gpu(adev, true);
2638 switch (adev->asic_type) {
2645 adev->family = AMDGPU_FAMILY_SI;
2646 r = si_set_ip_blocks(adev);
2657 if (adev->flags & AMD_IS_APU)
2658 adev->family = AMDGPU_FAMILY_KV;
2660 adev->family = AMDGPU_FAMILY_CI;
2662 r = cik_set_ip_blocks(adev);
2676 if (adev->flags & AMD_IS_APU)
2677 adev->family = AMDGPU_FAMILY_CZ;
2679 adev->family = AMDGPU_FAMILY_VI;
2681 r = vi_set_ip_blocks(adev);
2686 r = amdgpu_discovery_set_ip_blocks(adev);
2695 ((adev->flags & AMD_IS_APU) == 0) &&
2696 !dev_is_removable(&adev->pdev->dev))
2697 adev->flags |= AMD_IS_PX;
2699 if (!(adev->flags & AMD_IS_APU)) {
2700 parent = pcie_find_root_port(adev->pdev);
2701 adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
2705 adev->pm.pp_feature = amdgpu_pp_feature_mask;
2706 if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2707 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2708 if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2709 adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
2710 if (!amdgpu_device_pcie_dynamic_switching_supported(adev))
2711 adev->pm.pp_feature &= ~PP_PCIE_DPM_MASK;
2714 for (i = 0; i < adev->num_ip_blocks; i++) {
2715 ip_block = &adev->ip_blocks[i];
2719 i, adev->ip_blocks[i].version->funcs->name);
2720 adev->ip_blocks[i].status.valid = false;
2724 adev->ip_blocks[i].status.valid = false;
2727 adev->ip_blocks[i].version->funcs->name, r);
2730 adev->ip_blocks[i].status.valid = true;
2733 adev->ip_blocks[i].status.valid = true;
2736 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2737 r = amdgpu_device_parse_gpu_info_fw(adev);
2741 bios_flags = amdgpu_device_get_vbios_flags(adev);
2747 if (!amdgpu_get_bios(adev) && !optional)
2750 if (optional && !adev->bios)
2752 adev->dev,
2755 if (adev->bios) {
2756 r = amdgpu_atombios_init(adev);
2758 dev_err(adev->dev,
2761 adev,
2770 if (amdgpu_sriov_vf(adev))
2771 amdgpu_virt_init_data_exchange(adev);
2778 if (adev->gmc.xgmi.supported)
2779 amdgpu_xgmi_early_init(adev);
2781 ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
2783 amdgpu_amdkfd_device_probe(adev);
2785 adev->cg_flags &= amdgpu_cg_mask;
2786 adev->pg_flags &= amdgpu_pg_mask;
2791 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2795 for (i = 0; i < adev->num_ip_blocks; i++) {
2796 if (!adev->ip_blocks[i].status.sw)
2798 if (adev->ip_blocks[i].status.hw)
2801 adev, adev->ip_blocks[i].version->type))
2803 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2804 (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2805 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2806 r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
2809 adev->ip_blocks[i].version->funcs->name, r);
2812 adev->ip_blocks[i].status.hw = true;
2819 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2823 for (i = 0; i < adev->num_ip_blocks; i++) {
2824 if (!adev->ip_blocks[i].status.sw)
2826 if (adev->ip_blocks[i].status.hw)
2829 adev, adev->ip_blocks[i].version->type))
2831 r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
2834 adev->ip_blocks[i].version->funcs->name, r);
2837 adev->ip_blocks[i].status.hw = true;
2843 static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2849 if (adev->asic_type >= CHIP_VEGA10) {
2850 for (i = 0; i < adev->num_ip_blocks; i++) {
2851 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2854 if (!amdgpu_ip_member_of_hwini(adev,
2858 if (!adev->ip_blocks[i].status.sw)
2862 if (adev->ip_blocks[i].status.hw == true)
2865 if (amdgpu_in_reset(adev) || adev->in_suspend) {
2866 r = amdgpu_ip_block_resume(&adev->ip_blocks[i]);
2870 r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
2873 adev->ip_blocks[i].version->funcs->name, r);
2876 adev->ip_blocks[i].status.hw = true;
2882 if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2883 r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2888 static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
2893 .timeout_wq = adev->reset_domain->wq,
2894 .dev = adev->dev,
2900 struct amdgpu_ring *ring = adev->rings[i];
2908 timeout = adev->gfx_timeout;
2911 timeout = adev->compute_timeout;
2914 timeout = adev->sdma_timeout;
2917 timeout = adev->video_timeout;
2932 r = amdgpu_uvd_entity_init(adev, ring);
2938 r = amdgpu_vce_entity_init(adev, ring);
2946 amdgpu_xcp_update_partition_sched_list(adev);
2955 * @adev: amdgpu_device pointer
2963 static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2968 r = amdgpu_ras_init(adev);
2972 for (i = 0; i < adev->num_ip_blocks; i++) {
2973 if (!adev->ip_blocks[i].status.valid)
2975 if (adev->ip_blocks[i].version->funcs->sw_init) {
2976 r = adev->ip_blocks[i].version->funcs->sw_init(&adev->ip_blocks[i]);
2979 adev->ip_blocks[i].version->funcs->name, r);
2983 adev->ip_blocks[i].status.sw = true;
2986 adev, adev->ip_blocks[i].version->type))
2989 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2991 r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
2996 adev->ip_blocks[i].status.hw = true;
2997 } else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
3000 if (amdgpu_sriov_vf(adev))
3001 amdgpu_virt_exchange_data(adev);
3003 r = amdgpu_device_mem_scratch_init(adev);
3008 r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
3013 r = amdgpu_device_wb_init(adev);
3018 adev->ip_blocks[i].status.hw = true;
3021 if (adev->gfx.mcbp) {
3022 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
3032 r = amdgpu_seq64_init(adev);
3040 if (amdgpu_sriov_vf(adev))
3041 amdgpu_virt_init_data_exchange(adev);
3043 r = amdgpu_ib_pool_init(adev);
3045 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
3046 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
3050 r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
3054 r = amdgpu_device_ip_hw_init_phase1(adev);
3058 r = amdgpu_device_fw_loading(adev);
3062 r = amdgpu_device_ip_hw_init_phase2(adev);
3081 init_badpage = (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI);
3082 r = amdgpu_ras_recovery_init(adev, init_badpage);
3089 if (adev->gmc.xgmi.num_physical_nodes > 1) {
3090 if (amdgpu_xgmi_add_device(adev) == 0) {
3091 if (!amdgpu_sriov_vf(adev)) {
3092 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3107 amdgpu_reset_put_reset_domain(adev->reset_domain);
3108 adev->reset_domain = hive->reset_domain;
3114 r = amdgpu_device_init_schedulers(adev);
3118 if (adev->mman.buffer_funcs_ring->sched.ready)
3119 amdgpu_ttm_set_buffer_funcs_status(adev, true);
3122 if (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) {
3123 kgd2kfd_init_zone_device(adev);
3124 amdgpu_amdkfd_device_init(adev);
3127 amdgpu_fru_get_product_info(adev);
3129 if (!amdgpu_sriov_vf(adev) || amdgpu_sriov_ras_cper_en(adev))
3130 r = amdgpu_cper_init(adev);
3140 * @adev: amdgpu_device pointer
3146 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
3148 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
3154 * @adev: amdgpu_device pointer
3161 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
3163 if (memcmp(adev->gart.ptr, adev->reset_magic,
3167 if (!amdgpu_in_reset(adev))
3174 switch (amdgpu_asic_reset_method(adev)) {
3186 * @adev: amdgpu_device pointer
3196 int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
3204 for (j = 0; j < adev->num_ip_blocks; j++) {
3205 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
3206 if (!adev->ip_blocks[i].status.late_initialized)
3209 if (adev->in_s0ix &&
3210 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
3211 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
3214 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
3215 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
3216 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
3217 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
3218 adev->ip_blocks[i].version->funcs->set_clockgating_state) {
3220 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(&adev->ip_blocks[i],
3224 adev->ip_blocks[i].version->funcs->name, r);
3233 int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
3241 for (j = 0; j < adev->num_ip_blocks; j++) {
3242 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
3243 if (!adev->ip_blocks[i].status.late_initialized)
3246 if (adev->in_s0ix &&
3247 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
3248 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
3251 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
3252 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
3253 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
3254 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
3255 adev->ip_blocks[i].version->funcs->set_powergating_state) {
3257 r = adev->ip_blocks[i].version->funcs->set_powergating_state(&adev->ip_blocks[i],
3261 adev->ip_blocks[i].version->funcs->name, r);
3272 struct amdgpu_device *adev;
3287 adev = gpu_ins->adev;
3288 if (!(adev->flags & AMD_IS_APU) &&
3290 ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
3307 * @adev: amdgpu_device pointer
3316 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
3321 for (i = 0; i < adev->num_ip_blocks; i++) {
3322 if (!adev->ip_blocks[i].status.hw)
3324 if (adev->ip_blocks[i].version->funcs->late_init) {
3325 r = adev->ip_blocks[i].version->funcs->late_init(&adev->ip_blocks[i]);
3328 adev->ip_blocks[i].version->funcs->name, r);
3332 adev->ip_blocks[i].status.late_initialized = true;
3335 r = amdgpu_ras_late_init(adev);
3341 if (!amdgpu_reset_in_recovery(adev))
3342 amdgpu_ras_set_error_query_ready(adev, true);
3344 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
3345 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
3347 amdgpu_device_fill_reset_magic(adev);
3354 if (amdgpu_passthrough(adev) &&
3355 ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1) ||
3356 adev->asic_type == CHIP_ALDEBARAN))
3357 amdgpu_dpm_handle_passthrough_sbr(adev, true);
3359 if (adev->gmc.xgmi.num_physical_nodes > 1) {
3375 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
3378 if (gpu_instance->adev->flags & AMD_IS_APU)
3381 r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
3418 * @adev: amdgpu_device pointer
3422 static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev)
3426 if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(9, 0, 0))
3429 for (i = 0; i < adev->num_ip_blocks; i++) {
3430 if (!adev->ip_blocks[i].status.hw)
3432 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3433 amdgpu_ip_block_hw_fini(&adev->ip_blocks[i]);
3439 static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
3443 for (i = 0; i < adev->num_ip_blocks; i++) {
3444 if (!adev->ip_blocks[i].version->funcs->early_fini)
3447 r = adev->ip_blocks[i].version->funcs->early_fini(&adev->ip_blocks[i]);
3450 adev->ip_blocks[i].version->funcs->name, r);
3454 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
3455 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
3457 amdgpu_amdkfd_suspend(adev, false);
3460 amdgpu_device_smu_fini_early(adev);
3462 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3463 if (!adev->ip_blocks[i].status.hw)
3466 amdgpu_ip_block_hw_fini(&adev->ip_blocks[i]);
3469 if (amdgpu_sriov_vf(adev)) {
3470 if (amdgpu_virt_release_full_gpu(adev, false))
3480 * @adev: amdgpu_device pointer
3488 static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
3492 amdgpu_cper_fini(adev);
3494 if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
3495 amdgpu_virt_release_ras_err_handler_data(adev);
3497 if (adev->gmc.xgmi.num_physical_nodes > 1)
3498 amdgpu_xgmi_remove_device(adev);
3500 amdgpu_amdkfd_device_fini_sw(adev);
3502 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3503 if (!adev->ip_blocks[i].status.sw)
3506 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
3507 amdgpu_ucode_free_bo(adev);
3508 amdgpu_free_static_csa(&adev->virt.csa_obj);
3509 amdgpu_device_wb_fini(adev);
3510 amdgpu_device_mem_scratch_fini(adev);
3511 amdgpu_ib_pool_fini(adev);
3512 amdgpu_seq64_fini(adev);
3514 if (adev->ip_blocks[i].version->funcs->sw_fini) {
3515 r = adev->ip_blocks[i].version->funcs->sw_fini(&adev->ip_blocks[i]);
3519 adev->ip_blocks[i].version->funcs->name, r);
3522 adev->ip_blocks[i].status.sw = false;
3523 adev->ip_blocks[i].status.valid = false;
3526 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3527 if (!adev->ip_blocks[i].status.late_initialized)
3529 if (adev->ip_blocks[i].version->funcs->late_fini)
3530 adev->ip_blocks[i].version->funcs->late_fini(&adev->ip_blocks[i]);
3531 adev->ip_blocks[i].status.late_initialized = false;
3534 amdgpu_ras_fini(adev);
3546 struct amdgpu_device *adev =
3550 r = amdgpu_ib_ring_tests(adev);
3557 struct amdgpu_device *adev =
3560 WARN_ON_ONCE(adev->gfx.gfx_off_state);
3561 WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
3563 if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true, 0))
3564 adev->gfx.gfx_off_state = true;
3570 * @adev: amdgpu_device pointer
3578 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
3582 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
3583 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
3590 if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
3591 dev_warn(adev->dev, "Failed to disallow df cstate");
3593 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3594 if (!adev->ip_blocks[i].status.valid)
3598 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
3602 r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]);
3613 * @adev: amdgpu_device pointer
3621 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
3625 if (adev->in_s0ix)
3626 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry);
3628 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3629 if (!adev->ip_blocks[i].status.valid)
3632 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
3636 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
3637 adev->ip_blocks[i].status.hw = false;
3643 adev, adev->ip_blocks[i].version->type))
3651 if (adev->in_s0ix &&
3652 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
3653 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
3654 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_MES))
3658 if (adev->in_s0ix &&
3659 (amdgpu_ip_version(adev, SDMA0_HWIP, 0) >=
3661 (adev->ip_blocks[i].version->type ==
3672 if (amdgpu_in_reset(adev) &&
3673 (adev->flags & AMD_IS_APU) && adev->gfx.imu.funcs &&
3674 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3678 r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]);
3679 adev->ip_blocks[i].status.hw = false;
3682 if (!amdgpu_sriov_vf(adev)) {
3683 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3684 r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
3687 adev->mp1_state, r);
3700 * @adev: amdgpu_device pointer
3708 int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
3712 if (amdgpu_sriov_vf(adev)) {
3713 amdgpu_virt_fini_data_exchange(adev);
3714 amdgpu_virt_request_full_gpu(adev, false);
3717 amdgpu_ttm_set_buffer_funcs_status(adev, false);
3719 r = amdgpu_device_ip_suspend_phase1(adev);
3722 r = amdgpu_device_ip_suspend_phase2(adev);
3724 if (amdgpu_sriov_vf(adev))
3725 amdgpu_virt_release_full_gpu(adev, false);
3730 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
3741 for (i = 0; i < adev->num_ip_blocks; i++) {
3745 block = &adev->ip_blocks[i];
3754 r = block->version->funcs->hw_init(&adev->ip_blocks[i]);
3756 dev_err(adev->dev, "RE-INIT-early: %s failed\n",
3767 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
3785 block = amdgpu_device_ip_get_ip_block(adev, ip_order[i]);
3798 dev_err(adev->dev, "RE-INIT-late: %s failed\n",
3812 * @adev: amdgpu_device pointer
3821 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
3825 for (i = 0; i < adev->num_ip_blocks; i++) {
3826 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3828 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3829 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3830 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3831 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) {
3833 r = amdgpu_ip_block_resume(&adev->ip_blocks[i]);
3845 * @adev: amdgpu_device pointer
3855 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
3859 for (i = 0; i < adev->num_ip_blocks; i++) {
3860 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3862 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3863 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3864 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3865 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE ||
3866 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3868 r = amdgpu_ip_block_resume(&adev->ip_blocks[i]);
3879 * @adev: amdgpu_device pointer
3889 static int amdgpu_device_ip_resume_phase3(struct amdgpu_device *adev)
3893 for (i = 0; i < adev->num_ip_blocks; i++) {
3894 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3896 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) {
3897 r = amdgpu_ip_block_resume(&adev->ip_blocks[i]);
3909 * @adev: amdgpu_device pointer
3918 static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
3922 r = amdgpu_device_ip_resume_phase1(adev);
3926 r = amdgpu_device_fw_loading(adev);
3930 r = amdgpu_device_ip_resume_phase2(adev);
3932 if (adev->mman.buffer_funcs_ring->sched.ready)
3933 amdgpu_ttm_set_buffer_funcs_status(adev, true);
3938 amdgpu_fence_driver_hw_init(adev);
3940 r = amdgpu_device_ip_resume_phase3(adev);
3948 * @adev: amdgpu_device pointer
3952 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
3954 if (amdgpu_sriov_vf(adev)) {
3955 if (adev->is_atom_fw) {
3956 if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
3957 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3959 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
3960 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3963 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
3964 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
4028 * @adev: amdgpu_device pointer
4032 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
4034 if (adev->enable_virtual_display ||
4035 (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
4038 return amdgpu_device_asic_has_dc_support(adev->asic_type);
4043 struct amdgpu_device *adev =
4045 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
4057 if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
4060 adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
4062 if (adev->asic_reset_res)
4066 adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
4068 if (adev->asic_reset_res)
4071 amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__MMHUB);
4075 adev->asic_reset_res = amdgpu_asic_reset(adev);
4079 if (adev->asic_reset_res)
4081 adev->asic_reset_res, adev_to_drm(adev)->unique);
4085 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
4099 adev->gfx_timeout = msecs_to_jiffies(10000);
4100 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
4101 if (amdgpu_sriov_vf(adev))
4102 adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
4105 adev->compute_timeout = msecs_to_jiffies(60000);
4119 dev_warn(adev->dev, "lockup timeout disabled");
4127 adev->gfx_timeout = timeout;
4130 adev->compute_timeout = timeout;
4133 adev->sdma_timeout = timeout;
4136 adev->video_timeout = timeout;
4147 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
4148 if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
4149 adev->compute_timeout = adev->gfx_timeout;
4159 * @adev: amdgpu_device pointer
4163 static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev)
4167 domain = iommu_get_domain_for_dev(adev->dev);
4169 adev->ram_is_direct_mapped = true;
4176 * @adev: amdgpu_device pointer
4180 static bool amdgpu_device_check_iommu_remap(struct amdgpu_device *adev)
4184 domain = iommu_get_domain_for_dev(adev->dev);
4193 static void amdgpu_device_set_mcbp(struct amdgpu_device *adev)
4196 adev->gfx.mcbp = true;
4198 adev->gfx.mcbp = false;
4200 if (amdgpu_sriov_vf(adev))
4201 adev->gfx.mcbp = true;
4203 if (adev->gfx.mcbp)
4210 * @adev: amdgpu_device pointer
4217 int amdgpu_device_init(struct amdgpu_device *adev,
4220 struct drm_device *ddev = adev_to_drm(adev);
4221 struct pci_dev *pdev = adev->pdev;
4227 adev->shutdown = false;
4228 adev->flags = flags;
4231 adev->asic_type = amdgpu_force_asic_type;
4233 adev->asic_type = flags & AMD_ASIC_MASK;
4235 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
4237 adev->usec_timeout *= 10;
4238 adev->gmc.gart_size = 512 * 1024 * 1024;
4239 adev->accel_working = false;
4240 adev->num_rings = 0;
4241 RCU_INIT_POINTER(adev->gang_submit, dma_fence_get_stub());
4242 adev->mman.buffer_funcs = NULL;
4243 adev->mman.buffer_funcs_ring = NULL;
4244 adev->vm_manager.vm_pte_funcs = NULL;
4245 adev->vm_manager.vm_pte_num_scheds = 0;
4246 adev->gmc.gmc_funcs = NULL;
4247 adev->harvest_ip_mask = 0x0;
4248 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
4249 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
4251 adev->smc_rreg = &amdgpu_invalid_rreg;
4252 adev->smc_wreg = &amdgpu_invalid_wreg;
4253 adev->pcie_rreg = &amdgpu_invalid_rreg;
4254 adev->pcie_wreg = &amdgpu_invalid_wreg;
4255 adev->pcie_rreg_ext = &amdgpu_invalid_rreg_ext;
4256 adev->pcie_wreg_ext = &amdgpu_invalid_wreg_ext;
4257 adev->pciep_rreg = &amdgpu_invalid_rreg;
4258 adev->pciep_wreg = &amdgpu_invalid_wreg;
4259 adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
4260 adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
4261 adev->pcie_rreg64_ext = &amdgpu_invalid_rreg64_ext;
4262 adev->pcie_wreg64_ext = &amdgpu_invalid_wreg64_ext;
4263 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
4264 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
4265 adev->didt_rreg = &amdgpu_invalid_rreg;
4266 adev->didt_wreg = &amdgpu_invalid_wreg;
4267 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
4268 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
4269 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
4270 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
4273 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
4279 mutex_init(&adev->firmware.mutex);
4280 mutex_init(&adev->pm.mutex);
4281 mutex_init(&adev->gfx.gpu_clock_mutex);
4282 mutex_init(&adev->srbm_mutex);
4283 mutex_init(&adev->gfx.pipe_reserve_mutex);
4284 mutex_init(&adev->gfx.gfx_off_mutex);
4285 mutex_init(&adev->gfx.partition_mutex);
4286 mutex_init(&adev->grbm_idx_mutex);
4287 mutex_init(&adev->mn_lock);
4288 mutex_init(&adev->virt.vf_errors.lock);
4289 hash_init(adev->mn_hash);
4290 mutex_init(&adev->psp.mutex);
4291 mutex_init(&adev->notifier_lock);
4292 mutex_init(&adev->pm.stable_pstate_ctx_lock);
4293 mutex_init(&adev->benchmark_mutex);
4294 mutex_init(&adev->gfx.reset_sem_mutex);
4296 mutex_init(&adev->enforce_isolation_mutex);
4298 adev->isolation[i].spearhead = dma_fence_get_stub();
4299 amdgpu_sync_create(&adev->isolation[i].active);
4300 amdgpu_sync_create(&adev->isolation[i].prev);
4302 mutex_init(&adev->gfx.kfd_sch_mutex);
4303 mutex_init(&adev->gfx.workload_profile_mutex);
4304 mutex_init(&adev->vcn.workload_profile_mutex);
4306 amdgpu_device_init_apu_flags(adev);
4308 r = amdgpu_device_check_arguments(adev);
4312 spin_lock_init(&adev->mmio_idx_lock);
4313 spin_lock_init(&adev->smc_idx_lock);
4314 spin_lock_init(&adev->pcie_idx_lock);
4315 spin_lock_init(&adev->uvd_ctx_idx_lock);
4316 spin_lock_init(&adev->didt_idx_lock);
4317 spin_lock_init(&adev->gc_cac_idx_lock);
4318 spin_lock_init(&adev->se_cac_idx_lock);
4319 spin_lock_init(&adev->audio_endpt_idx_lock);
4320 spin_lock_init(&adev->mm_stats.lock);
4321 spin_lock_init(&adev->virt.rlcg_reg_lock);
4322 spin_lock_init(&adev->wb.lock);
4324 INIT_LIST_HEAD(&adev->reset_list);
4326 INIT_LIST_HEAD(&adev->ras_list);
4328 INIT_LIST_HEAD(&adev->pm.od_kobj_list);
4330 INIT_DELAYED_WORK(&adev->delayed_init_work,
4332 INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
4344 INIT_DELAYED_WORK(&adev->gfx.enforce_isolation[i].work,
4346 adev->gfx.enforce_isolation[i].adev = adev;
4347 adev->gfx.enforce_isolation[i].xcp_id = i;
4350 INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
4352 adev->gfx.gfx_off_req_count = 1;
4353 adev->gfx.gfx_off_residency = 0;
4354 adev->gfx.gfx_off_entrycount = 0;
4355 adev->pm.ac_power = power_supply_is_system_supplied() > 0;
4357 atomic_set(&adev->throttling_logging_enabled, 1);
4365 ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
4367 ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
4371 if (adev->asic_type >= CHIP_BONAIRE) {
4372 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
4373 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
4375 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
4376 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
4380 atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN);
4382 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
4383 if (!adev->rmmio)
4386 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
4387 DRM_INFO("register mmio size: %u\n", (unsigned int)adev->rmmio_size);
4394 adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev");
4395 if (!adev->reset_domain)
4399 amdgpu_virt_init(adev);
4401 amdgpu_device_get_pcie_info(adev);
4403 r = amdgpu_device_get_job_timeout_settings(adev);
4405 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
4409 amdgpu_device_set_mcbp(adev);
4416 amdgpu_set_init_level(adev, AMDGPU_INIT_LEVEL_DEFAULT);
4418 r = amdgpu_device_ip_early_init(adev);
4429 r = aperture_remove_conflicting_pci_devices(adev->pdev, amdgpu_kms_driver.name);
4435 amdgpu_gmc_tmz_set(adev);
4437 if (amdgpu_sriov_vf(adev) &&
4438 amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0))
4442 adev->virt.caps |= AMDGPU_VF_MMIO_ACCESS_PROTECT;
4444 amdgpu_gmc_noretry_set(adev);
4446 if (adev->gmc.xgmi.supported) {
4447 r = adev->gfxhub.funcs->get_xgmi_info(adev);
4453 if (amdgpu_sriov_vf(adev)) {
4454 if (adev->virt.fw_reserve.p_pf2vf)
4455 adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
4456 adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags ==
4461 } else if ((adev->flags & AMD_IS_APU) &&
4462 (amdgpu_ip_version(adev, GC_HWIP, 0) >
4464 adev->have_atomics_support = true;
4466 adev->have_atomics_support =
4467 !pci_enable_atomic_ops_to_root(adev->pdev,
4472 if (!adev->have_atomics_support)
4473 dev_info(adev->dev, "PCIE atomic ops is not supported\n");
4476 amdgpu_doorbell_init(adev);
4480 emu_soc_asic_init(adev);
4484 amdgpu_reset_init(adev);
4487 if (adev->bios)
4488 amdgpu_device_detect_sriov_bios(adev);
4493 if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
4494 if (adev->gmc.xgmi.num_physical_nodes) {
4495 dev_info(adev->dev, "Pending hive reset.\n");
4496 amdgpu_set_init_level(adev,
4498 } else if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) &&
4499 !amdgpu_device_has_display_hardware(adev)) {
4500 r = psp_gpu_reset(adev);
4507 r = amdgpu_asic_reset(adev);
4512 dev_err(adev->dev, "asic reset on init failed\n");
4518 if (amdgpu_device_need_post(adev)) {
4519 if (!adev->bios) {
4520 dev_err(adev->dev, "no vBIOS found\n");
4525 r = amdgpu_device_asic_init(adev);
4527 dev_err(adev->dev, "gpu post error!\n");
4532 if (adev->bios) {
4533 if (adev->is_atom_fw) {
4535 r = amdgpu_atomfirmware_get_clock_info(adev);
4537 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
4538 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
4543 r = amdgpu_atombios_get_clock_info(adev);
4545 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
4546 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
4550 amdgpu_i2c_init(adev);
4556 r = amdgpu_fence_driver_sw_init(adev);
4558 dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
4559 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
4564 drm_mode_config_init(adev_to_drm(adev));
4566 r = amdgpu_device_ip_init(adev);
4568 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
4569 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
4573 amdgpu_fence_driver_hw_init(adev);
4575 dev_info(adev->dev,
4577 adev->gfx.config.max_shader_engines,
4578 adev->gfx.config.max_sh_per_se,
4579 adev->gfx.config.max_cu_per_sh,
4580 adev->gfx.cu_info.number);
4582 adev->accel_working = true;
4584 amdgpu_vm_check_compute_bug(adev);
4592 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
4599 amdgpu_register_gpu_instance(adev);
4604 if (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) {
4605 r = amdgpu_device_ip_late_init(adev);
4607 dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
4608 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
4612 amdgpu_ras_resume(adev);
4613 queue_delayed_work(system_wq, &adev->delayed_init_work,
4617 if (amdgpu_sriov_vf(adev)) {
4618 amdgpu_virt_release_full_gpu(adev, true);
4619 flush_delayed_work(&adev->delayed_init_work);
4627 r = amdgpu_atombios_sysfs_init(adev);
4629 drm_err(&adev->ddev,
4632 r = amdgpu_pm_sysfs_init(adev);
4636 r = amdgpu_ucode_sysfs_init(adev);
4638 adev->ucode_sysfs_en = false;
4641 adev->ucode_sysfs_en = true;
4643 r = amdgpu_device_attr_sysfs_init(adev);
4645 dev_err(adev->dev, "Could not create amdgpu device attr\n");
4647 r = devm_device_add_group(adev->dev, &amdgpu_board_attrs_group);
4649 dev_err(adev->dev,
4652 amdgpu_fru_sysfs_init(adev);
4653 amdgpu_reg_state_sysfs_init(adev);
4654 amdgpu_xcp_cfg_sysfs_init(adev);
4657 r = amdgpu_pmu_init(adev);
4659 dev_err(adev->dev, "amdgpu_pmu_init failed\n");
4662 if (amdgpu_device_cache_pci_state(adev->pdev))
4669 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
4670 vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
4674 if (px || (!dev_is_removable(&adev->pdev->dev) &&
4676 vga_switcheroo_register_client(adev->pdev,
4680 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
4682 if (adev->init_lvl->level == AMDGPU_INIT_LEVEL_MINIMAL_XGMI)
4683 amdgpu_xgmi_reset_on_init(adev);
4685 amdgpu_device_check_iommu_direct_map(adev);
4687 adev->pm_nb.notifier_call = amdgpu_device_pm_notifier;
4688 r = register_pm_notifier(&adev->pm_nb);
4695 if (amdgpu_sriov_vf(adev))
4696 amdgpu_virt_release_full_gpu(adev, true);
4699 if (amdgpu_sriov_vf(adev) &&
4700 !amdgpu_sriov_runtime(adev) &&
4701 amdgpu_virt_mmio_blocked(adev) &&
4702 !amdgpu_virt_wait_reset(adev)) {
4703 dev_err(adev->dev, "VF exclusive mode timeout\n");
4705 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
4706 adev->virt.ops = NULL;
4709 amdgpu_release_ras_context(adev);
4712 amdgpu_vf_error_trans_all(adev);
4717 static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
4721 unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
4724 amdgpu_doorbell_fini(adev);
4726 iounmap(adev->rmmio);
4727 adev->rmmio = NULL;
4728 if (adev->mman.aper_base_kaddr)
4729 iounmap(adev->mman.aper_base_kaddr);
4730 adev->mman.aper_base_kaddr = NULL;
4733 if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
4734 arch_phys_wc_del(adev->gmc.vram_mtrr);
4735 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
4742 * @adev: amdgpu_device pointer
4747 void amdgpu_device_fini_hw(struct amdgpu_device *adev)
4749 dev_info(adev->dev, "amdgpu: finishing device.\n");
4750 flush_delayed_work(&adev->delayed_init_work);
4752 if (adev->mman.initialized)
4753 drain_workqueue(adev->mman.bdev.wq);
4754 adev->shutdown = true;
4756 unregister_pm_notifier(&adev->pm_nb);
4761 if (amdgpu_sriov_vf(adev)) {
4762 amdgpu_virt_request_full_gpu(adev, false);
4763 amdgpu_virt_fini_data_exchange(adev);
4767 amdgpu_irq_disable_all(adev);
4768 if (adev->mode_info.mode_config_initialized) {
4769 if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
4770 drm_helper_force_disable_all(adev_to_drm(adev));
4772 drm_atomic_helper_shutdown(adev_to_drm(adev));
4774 amdgpu_fence_driver_hw_fini(adev);
4776 if (adev->pm.sysfs_initialized)
4777 amdgpu_pm_sysfs_fini(adev);
4778 if (adev->ucode_sysfs_en)
4779 amdgpu_ucode_sysfs_fini(adev);
4780 amdgpu_device_attr_sysfs_fini(adev);
4781 amdgpu_fru_sysfs_fini(adev);
4783 amdgpu_reg_state_sysfs_fini(adev);
4784 amdgpu_xcp_cfg_sysfs_fini(adev);
4787 amdgpu_ras_pre_fini(adev);
4789 amdgpu_ttm_set_buffer_funcs_status(adev, false);
4791 amdgpu_device_ip_fini_early(adev);
4793 amdgpu_irq_fini_hw(adev);
4795 if (adev->mman.initialized)
4796 ttm_device_clear_dma_mappings(&adev->mman.bdev);
4798 amdgpu_gart_dummy_page_fini(adev);
4800 if (drm_dev_is_unplugged(adev_to_drm(adev)))
4801 amdgpu_device_unmap_mmio(adev);
4805 void amdgpu_device_fini_sw(struct amdgpu_device *adev)
4810 amdgpu_device_ip_fini(adev);
4811 amdgpu_fence_driver_sw_fini(adev);
4812 amdgpu_ucode_release(&adev->firmware.gpu_info_fw);
4813 adev->accel_working = false;
4814 dma_fence_put(rcu_dereference_protected(adev->gang_submit, true));
4816 dma_fence_put(adev->isolation[i].spearhead);
4817 amdgpu_sync_free(&adev->isolation[i].active);
4818 amdgpu_sync_free(&adev->isolation[i].prev);
4821 amdgpu_reset_fini(adev);
4824 amdgpu_i2c_fini(adev);
4826 if (adev->bios) {
4828 amdgpu_atombios_fini(adev);
4829 amdgpu_bios_release(adev);
4832 kfree(adev->fru_info);
4833 adev->fru_info = NULL;
4835 kfree(adev->xcp_mgr);
4836 adev->xcp_mgr = NULL;
4838 px = amdgpu_device_supports_px(adev_to_drm(adev));
4840 if (px || (!dev_is_removable(&adev->pdev->dev) &&
4842 vga_switcheroo_unregister_client(adev->pdev);
4845 vga_switcheroo_fini_domain_pm_ops(adev->dev);
4847 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
4848 vga_client_unregister(adev->pdev);
4850 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
4852 iounmap(adev->rmmio);
4853 adev->rmmio = NULL;
4854 amdgpu_doorbell_fini(adev);
4859 amdgpu_pmu_fini(adev);
4860 if (adev->mman.discovery_bin)
4861 amdgpu_discovery_fini(adev);
4863 amdgpu_reset_put_reset_domain(adev->reset_domain);
4864 adev->reset_domain = NULL;
4866 kfree(adev->pci_state);
4872 * @adev: amdgpu device object
4879 static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
4884 if (!adev->in_s4 && (adev->flags & AMD_IS_APU))
4887 ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
4909 struct amdgpu_device *adev = container_of(nb, struct amdgpu_device, pm_nb);
4914 adev->in_s4 = true;
4917 r = amdgpu_device_evict_resources(adev);
4924 drm_warn(adev_to_drm(adev), "Failed to evict resources, freeze active processes if problems occur: %d\n", r);
4942 struct amdgpu_device *adev = drm_to_adev(dev);
4945 amdgpu_choose_low_power_state(adev);
4951 r = amdgpu_device_evict_resources(adev);
4955 flush_delayed_work(&adev->gfx.gfx_off_delay_work);
4957 for (i = 0; i < adev->num_ip_blocks; i++) {
4958 if (!adev->ip_blocks[i].status.valid)
4960 if (!adev->ip_blocks[i].version->funcs->prepare_suspend)
4962 r = adev->ip_blocks[i].version->funcs->prepare_suspend(&adev->ip_blocks[i]);
4970 adev->in_s0ix = adev->in_s3 = adev->in_s4 = false;
4987 struct amdgpu_device *adev = drm_to_adev(dev);
4993 adev->in_suspend = true;
4995 if (amdgpu_sriov_vf(adev)) {
4996 amdgpu_virt_fini_data_exchange(adev);
4997 r = amdgpu_virt_request_full_gpu(adev, false);
5006 drm_client_dev_suspend(adev_to_drm(adev), false);
5008 cancel_delayed_work_sync(&adev->delayed_init_work);
5010 amdgpu_ras_suspend(adev);
5012 amdgpu_device_ip_suspend_phase1(adev);
5014 if (!adev->in_s0ix)
5015 amdgpu_amdkfd_suspend(adev, adev->in_runpm);
5017 r = amdgpu_device_evict_resources(adev);
5021 amdgpu_ttm_set_buffer_funcs_status(adev, false);
5023 amdgpu_fence_driver_hw_fini(adev);
5025 amdgpu_device_ip_suspend_phase2(adev);
5027 if (amdgpu_sriov_vf(adev))
5028 amdgpu_virt_release_full_gpu(adev, false);
5030 r = amdgpu_dpm_notify_rlc_state(adev, false);
5049 struct amdgpu_device *adev = drm_to_adev(dev);
5052 if (amdgpu_sriov_vf(adev)) {
5053 r = amdgpu_virt_request_full_gpu(adev, true);
5061 if (adev->in_s0ix)
5062 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D0Entry);
5065 if (amdgpu_device_need_post(adev)) {
5066 r = amdgpu_device_asic_init(adev);
5068 dev_err(adev->dev, "amdgpu asic init failed\n");
5071 r = amdgpu_device_ip_resume(adev);
5074 dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
5078 if (!adev->in_s0ix) {
5079 r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
5084 r = amdgpu_device_ip_late_init(adev);
5088 queue_delayed_work(system_wq, &adev->delayed_init_work,
5091 if (amdgpu_sriov_vf(adev)) {
5092 amdgpu_virt_init_data_exchange(adev);
5093 amdgpu_virt_release_full_gpu(adev, true);
5100 flush_delayed_work(&adev->delayed_init_work);
5103 drm_client_dev_resume(adev_to_drm(adev), false);
5105 amdgpu_ras_resume(adev);
5107 if (adev->mode_info.num_crtc) {
5120 if (!adev->dc_enabled)
5128 adev->in_suspend = false;
5130 if (adev->enable_mes)
5131 amdgpu_mes_self_test(adev);
5142 * @adev: amdgpu_device pointer
5149 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
5154 if (amdgpu_sriov_vf(adev))
5157 if (amdgpu_asic_need_full_reset(adev))
5160 for (i = 0; i < adev->num_ip_blocks; i++) {
5161 if (!adev->ip_blocks[i].status.valid)
5163 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
5164 adev->ip_blocks[i].status.hang =
5165 adev->ip_blocks[i].version->funcs->check_soft_reset(
5166 &adev->ip_blocks[i]);
5167 if (adev->ip_blocks[i].status.hang) {
5168 dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
5178 * @adev: amdgpu_device pointer
5186 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
5190 for (i = 0; i < adev->num_ip_blocks; i++) {
5191 if (!adev->ip_blocks[i].status.valid)
5193 if (adev->ip_blocks[i].status.hang &&
5194 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
5195 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(&adev->ip_blocks[i]);
5207 * @adev: amdgpu_device pointer
5213 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
5217 if (amdgpu_asic_need_full_reset(adev))
5220 for (i = 0; i < adev->num_ip_blocks; i++) {
5221 if (!adev->ip_blocks[i].status.valid)
5223 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
5224 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
5225 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
5226 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
5227 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
5228 if (adev->ip_blocks[i].status.hang) {
5229 dev_info(adev->dev, "Some block need full reset!\n");
5240 * @adev: amdgpu_device pointer
5248 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
5252 for (i = 0; i < adev->num_ip_blocks; i++) {
5253 if (!adev->ip_blocks[i].status.valid)
5255 if (adev->ip_blocks[i].status.hang &&
5256 adev->ip_blocks[i].version->funcs->soft_reset) {
5257 r = adev->ip_blocks[i].version->funcs->soft_reset(&adev->ip_blocks[i]);
5269 * @adev: amdgpu_device pointer
5277 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
5281 for (i = 0; i < adev->num_ip_blocks; i++) {
5282 if (!adev->ip_blocks[i].status.valid)
5284 if (adev->ip_blocks[i].status.hang &&
5285 adev->ip_blocks[i].version->funcs->post_soft_reset)
5286 r = adev->ip_blocks[i].version->funcs->post_soft_reset(&adev->ip_blocks[i]);
5297 * @adev: amdgpu_device pointer
5303 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
5310 if (!amdgpu_ras_get_fed_status(adev))
5311 amdgpu_virt_ready_to_reset(adev);
5312 amdgpu_virt_wait_reset(adev);
5314 r = amdgpu_virt_request_full_gpu(adev, true);
5316 r = amdgpu_virt_reset_gpu(adev);
5321 amdgpu_ras_clear_err_state(adev);
5322 amdgpu_irq_gpu_reset_resume_helper(adev);
5325 amdgpu_virt_post_reset(adev);
5328 r = amdgpu_device_ip_reinit_early_sriov(adev);
5332 amdgpu_virt_init_data_exchange(adev);
5334 r = amdgpu_device_fw_loading(adev);
5339 r = amdgpu_device_ip_reinit_late_sriov(adev);
5343 hive = amdgpu_get_xgmi_hive(adev);
5345 if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
5346 r = amdgpu_xgmi_update_topology(hive, adev);
5352 r = amdgpu_ib_ring_tests(adev);
5356 if (adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST)
5357 amdgpu_inc_vram_lost(adev);
5362 amdgpu_amdkfd_post_reset(adev);
5363 amdgpu_virt_release_full_gpu(adev, true);
5366 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) ||
5367 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
5368 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
5369 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0) ||
5370 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3))
5371 amdgpu_ras_resume(adev);
5373 amdgpu_virt_ras_telemetry_post_reset(adev);
5381 * @adev: amdgpu_device pointer
5388 bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
5393 struct amdgpu_ring *ring = adev->rings[i];
5407 * @adev: amdgpu_device pointer
5412 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
5419 if (!amdgpu_ras_is_poison_mode_supported(adev))
5422 if (amdgpu_sriov_vf(adev))
5426 switch (adev->asic_type) {
5451 dev_info(adev->dev, "GPU recovery disabled.\n");
5455 int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
5460 if (adev->bios)
5461 amdgpu_atombios_scratch_regs_engine_hung(adev, true);
5463 dev_info(adev->dev, "GPU mode1 reset\n");
5468 amdgpu_device_cache_pci_state(adev->pdev);
5471 pci_clear_master(adev->pdev);
5473 if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
5474 dev_info(adev->dev, "GPU smu mode1 reset\n");
5475 ret = amdgpu_dpm_mode1_reset(adev);
5477 dev_info(adev->dev, "GPU psp mode1 reset\n");
5478 ret = psp_gpu_reset(adev);
5484 amdgpu_device_load_pci_state(adev->pdev);
5485 ret = amdgpu_psp_wait_for_bootloader(adev);
5490 for (i = 0; i < adev->usec_timeout; i++) {
5491 u32 memsize = adev->nbio.funcs->get_memsize(adev);
5498 if (i >= adev->usec_timeout) {
5503 if (adev->bios)
5504 amdgpu_atombios_scratch_regs_engine_hung(adev, false);
5509 dev_err(adev->dev, "GPU mode1 reset failed\n");
5513 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
5522 if (reset_context->reset_req_dev == adev)
5525 if (amdgpu_sriov_vf(adev))
5526 amdgpu_virt_pre_reset(adev);
5528 amdgpu_fence_driver_isr_toggle(adev, true);
5532 struct amdgpu_ring *ring = adev->rings[i];
5546 amdgpu_fence_driver_isr_toggle(adev, false);
5551 r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
5559 if (!amdgpu_sriov_vf(adev)) {
5562 need_full_reset = amdgpu_device_ip_need_full_reset(adev);
5565 amdgpu_device_ip_check_soft_reset(adev)) {
5566 amdgpu_device_ip_pre_soft_reset(adev);
5567 r = amdgpu_device_ip_soft_reset(adev);
5568 amdgpu_device_ip_post_soft_reset(adev);
5569 if (r || amdgpu_device_ip_check_soft_reset(adev)) {
5570 dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
5586 r = amdgpu_device_ip_suspend(adev);
5812 static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev)
5815 switch (amdgpu_asic_reset_method(adev)) {
5817 adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
5820 adev->mp1_state = PP_MP1_STATE_RESET;
5823 adev->mp1_state = PP_MP1_STATE_NONE;
5828 static void amdgpu_device_unset_mp1_state(struct amdgpu_device *adev)
5830 amdgpu_vf_error_trans_all(adev);
5831 adev->mp1_state = PP_MP1_STATE_NONE;
5834 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
5838 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5839 adev->pdev->bus->number, 1);
5848 static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
5858 reset_method = amdgpu_asic_reset_method(adev);
5863 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5864 adev->pdev->bus->number, 1);
5883 dev_warn(adev->dev, "failed to suspend display audio\n");
5896 static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev)
5898 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5901 if (!amdgpu_sriov_vf(adev))
5902 cancel_work(&adev->reset_work);
5905 if (adev->kfd.dev)
5906 cancel_work(&adev->kfd.reset_work);
5908 if (amdgpu_sriov_vf(adev))
5909 cancel_work(&adev->virt.flr_work);
5911 if (con && adev->ras_enabled)
5936 * @adev: amdgpu_device pointer
5945 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
5962 if (amdgpu_ras_is_err_state(adev, AMDGPU_RAS_BLOCK__ANY) &&
5963 !amdgpu_sriov_vf(adev) &&
5965 dev_dbg(adev->dev,
5973 need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
5979 if (need_emergency_restart && amdgpu_ras_get_context(adev) &&
5980 amdgpu_ras_get_context(adev)->reboot) {
5987 dev_info(adev->dev, "GPU %s begin!\n",
5990 if (!amdgpu_sriov_vf(adev))
5991 hive = amdgpu_get_xgmi_hive(adev);
6000 * to put adev in the 1st position.
6003 if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1) && hive) {
6006 if (adev->shutdown)
6009 if (!list_is_first(&adev->reset_list, &device_list))
6010 list_rotate_to_front(&adev->reset_list, &device_list);
6013 list_add_tail(&adev->reset_list, &device_list);
6017 if (!amdgpu_sriov_vf(adev)) {
6090 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
6107 if (amdgpu_sriov_vf(adev)) {
6108 if (amdgpu_ras_get_fed_status(adev) || amdgpu_virt_rcvd_ras_interrupt(adev)) {
6109 dev_dbg(adev->dev, "Detected RAS error, wait for FLR completion\n");
6110 amdgpu_ras_set_fed(adev, true);
6114 r = amdgpu_device_reset_sriov(adev, reset_context);
6116 amdgpu_virt_release_full_gpu(adev, true);
6120 adev->asic_reset_res = r;
6185 if (!adev->kfd.init_complete)
6186 amdgpu_amdkfd_device_init(adev);
6207 dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
6209 atomic_set(&adev->reset_domain->reset_res, r);
6212 drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE);
6220 * @adev: amdgpu_device pointer
6228 static void amdgpu_device_partner_bandwidth(struct amdgpu_device *adev,
6232 struct pci_dev *parent = adev->pdev;
6240 if (amdgpu_device_pcie_dynamic_switching_supported(adev)) {
6251 pcie_bandwidth_available(adev->pdev, NULL, speed, width);
6258 * @adev: amdgpu_device pointer
6265 static void amdgpu_device_gpu_bandwidth(struct amdgpu_device *adev,
6269 struct pci_dev *parent = adev->pdev;
6288 *speed = pcie_get_speed_cap(adev->pdev);
6289 *width = pcie_get_width_cap(adev->pdev);
6296 * @adev: amdgpu_device pointer
6302 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
6308 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
6311 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
6314 if (pci_is_root_bus(adev->pdev->bus) && !amdgpu_passthrough(adev)) {
6315 if (adev->pm.pcie_gen_mask == 0)
6316 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
6317 if (adev->pm.pcie_mlw_mask == 0)
6318 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
6322 if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
6325 amdgpu_device_partner_bandwidth(adev, &platform_speed_cap,
6327 amdgpu_device_gpu_bandwidth(adev, &speed_cap, &link_width);
6329 if (adev->pm.pcie_gen_mask == 0) {
6332 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6337 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6343 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6348 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6352 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6355 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
6359 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6363 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6369 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6374 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6378 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6381 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
6385 if (adev->pm.pcie_mlw_mask == 0) {
6388 adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_ASIC_PCIE_MLW_MASK;
6392 adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X32 |
6401 adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X16 |
6409 adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X12 |
6416 adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 |
6422 adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 |
6427 adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
6431 adev->pm.pcie_mlw_mask |= CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1;
6439 adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
6443 adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
6452 adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
6460 adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
6467 adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
6473 adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
6478 adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
6482 adev->pm.pcie_mlw_mask |= CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
6494 * @adev: amdgpu_device pointer
6495 * @peer_adev: amdgpu_device pointer for peer device trying to access @adev
6497 * Return true if @peer_adev can access (DMA) @adev through the PCIe
6498 * BAR, i.e. @adev is "large BAR" and the BAR matches the DMA mask of
6501 bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
6506 !adev->gmc.xgmi.connected_to_cpu &&
6507 !(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0);
6509 dev_info(adev->dev, "PCIe P2P access from peer device %s is not supported by the chipset\n",
6512 bool is_large_bar = adev->gmc.visible_vram_size &&
6513 adev->gmc.real_vram_size == adev->gmc.visible_vram_size;
6520 adev->gmc.aper_base + adev->gmc.aper_size - 1;
6522 p2p_addressable = !(adev->gmc.aper_base & address_mask ||
6533 struct amdgpu_device *adev = drm_to_adev(dev);
6534 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
6539 if (ras && adev->ras_enabled &&
6540 adev->nbio.funcs->enable_doorbell_interrupt)
6541 adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
6543 return amdgpu_dpm_baco_enter(adev);
6548 struct amdgpu_device *adev = drm_to_adev(dev);
6549 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
6555 ret = amdgpu_dpm_baco_exit(adev);
6559 if (ras && adev->ras_enabled &&
6560 adev->nbio.funcs->enable_doorbell_interrupt)
6561 adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
6563 if (amdgpu_passthrough(adev) && adev->nbio.funcs &&
6564 adev->nbio.funcs->clear_doorbell_interrupt)
6565 adev->nbio.funcs->clear_doorbell_interrupt(adev);
6582 struct amdgpu_device *adev = drm_to_adev(dev);
6587 if (adev->gmc.xgmi.num_physical_nodes > 1) {
6592 adev->pci_channel_state = state;
6600 * Locking adev->reset_domain->sem will prevent any external access
6603 amdgpu_device_lock_reset_domain(adev->reset_domain);
6604 amdgpu_device_set_mp1_state(adev);
6611 struct amdgpu_ring *ring = adev->rings[i];
6618 atomic_inc(&adev->gpu_reset_counter);
6658 struct amdgpu_device *adev = drm_to_adev(dev);
6665 if ((amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
6666 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)) &&
6667 amdgpu_ras_in_recovery(adev))
6675 list_add_tail(&adev->reset_list, &device_list);
6684 for (i = 0; i < adev->usec_timeout; i++) {
6685 memsize = amdgpu_asic_get_config_memsize(adev);
6697 reset_context.reset_req_dev = adev;
6701 adev->no_hw_access = true;
6702 r = amdgpu_device_pre_asic_reset(adev, &reset_context);
6703 adev->no_hw_access = false;
6711 if (amdgpu_device_cache_pci_state(adev->pdev))
6712 pci_restore_state(adev->pdev);
6717 amdgpu_device_unset_mp1_state(adev);
6718 amdgpu_device_unlock_reset_domain(adev->reset_domain);
6734 struct amdgpu_device *adev = drm_to_adev(dev);
6741 if (adev->pci_channel_state != pci_channel_io_frozen)
6745 struct amdgpu_ring *ring = adev->rings[i];
6753 amdgpu_device_unset_mp1_state(adev);
6754 amdgpu_device_unlock_reset_domain(adev->reset_domain);
6760 struct amdgpu_device *adev = drm_to_adev(dev);
6763 if (amdgpu_sriov_vf(adev))
6768 kfree(adev->pci_state);
6770 adev->pci_state = pci_store_saved_state(pdev);
6772 if (!adev->pci_state) {
6787 struct amdgpu_device *adev = drm_to_adev(dev);
6790 if (!adev->pci_state)
6793 r = pci_load_saved_state(pdev, adev->pci_state);
6805 void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
6809 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
6812 if (adev->gmc.xgmi.connected_to_cpu)
6818 amdgpu_asic_flush_hdp(adev, ring);
6821 void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
6825 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
6828 if (adev->gmc.xgmi.connected_to_cpu)
6831 amdgpu_asic_invalidate_hdp(adev, ring);
6834 int amdgpu_in_reset(struct amdgpu_device *adev)
6836 return atomic_read(&adev->reset_domain->in_gpu_reset);
6842 * @adev: amdgpu_device pointer
6854 * 4. set adev->no_hw_access to avoid potential crashes after setp 5
6859 void amdgpu_device_halt(struct amdgpu_device *adev)
6861 struct pci_dev *pdev = adev->pdev;
6862 struct drm_device *ddev = adev_to_drm(adev);
6864 amdgpu_xcp_dev_unplug(adev);
6867 amdgpu_irq_disable_all(adev);
6869 amdgpu_fence_driver_hw_fini(adev);
6871 adev->no_hw_access = true;
6873 amdgpu_device_unmap_mmio(adev);
6879 u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
6885 address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
6886 data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
6888 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
6892 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
6896 void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
6901 address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
6902 data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
6904 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
6909 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
6914 * @adev: amdgpu_device pointer
6918 struct dma_fence *amdgpu_device_get_gang(struct amdgpu_device *adev)
6923 fence = dma_fence_get_rcu_safe(&adev->gang_submit);
6930 * @adev: amdgpu_device pointer
6937 struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
6945 old = amdgpu_device_get_gang(adev);
6954 } while (cmpxchg((struct dma_fence __force **)&adev->gang_submit,
6958 * Drop it once for the exchanged reference in adev and once for the
6968 * @adev: the amdgpu device pointer
6976 struct dma_fence *amdgpu_device_enforce_isolation(struct amdgpu_device *adev,
6980 struct amdgpu_isolation *isolation = &adev->isolation[ring->xcp_id];
7001 mutex_lock(&adev->enforce_isolation_mutex);
7022 dep = amdgpu_device_get_gang(adev);
7049 mutex_unlock(&adev->enforce_isolation_mutex);
7053 bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev)
7055 switch (adev->asic_type) {
7087 if (!amdgpu_ip_version(adev, DCE_HWIP, 0) ||
7088 (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
7094 uint32_t amdgpu_device_wait_on_rreg(struct amdgpu_device *adev,
7101 uint32_t loop = adev->usec_timeout;
7105 loop = adev->usec_timeout;
7126 if (!ring || !ring->adev)
7129 if (amdgpu_device_should_recover_gpu(ring->adev))
7132 if (unlikely(!ring->adev->debug_disable_soft_recovery) &&
7133 !amdgpu_sriov_vf(ring->adev) && ring->funcs->soft_recovery)