Lines Matching refs:adev
43 bool amdgpu_gmc_is_pdb0_enabled(struct amdgpu_device *adev) in amdgpu_gmc_is_pdb0_enabled() argument
45 return adev->gmc.xgmi.connected_to_cpu || amdgpu_virt_xgmi_migrate_enabled(adev); in amdgpu_gmc_is_pdb0_enabled()
56 int amdgpu_gmc_pdb0_alloc(struct amdgpu_device *adev) in amdgpu_gmc_pdb0_alloc() argument
60 u64 vram_size = adev->gmc.xgmi.node_segment_size * adev->gmc.xgmi.num_physical_nodes; in amdgpu_gmc_pdb0_alloc()
61 uint32_t pde0_page_shift = adev->gmc.vmid0_page_table_block_size + 21; in amdgpu_gmc_pdb0_alloc()
74 r = amdgpu_bo_create(adev, &bp, &adev->gmc.pdb0_bo); in amdgpu_gmc_pdb0_alloc()
78 r = amdgpu_bo_reserve(adev->gmc.pdb0_bo, false); in amdgpu_gmc_pdb0_alloc()
82 r = amdgpu_bo_pin(adev->gmc.pdb0_bo, AMDGPU_GEM_DOMAIN_VRAM); in amdgpu_gmc_pdb0_alloc()
85 r = amdgpu_bo_kmap(adev->gmc.pdb0_bo, &adev->gmc.ptr_pdb0); in amdgpu_gmc_pdb0_alloc()
89 amdgpu_bo_unreserve(adev->gmc.pdb0_bo); in amdgpu_gmc_pdb0_alloc()
93 amdgpu_bo_unpin(adev->gmc.pdb0_bo); in amdgpu_gmc_pdb0_alloc()
95 amdgpu_bo_unreserve(adev->gmc.pdb0_bo); in amdgpu_gmc_pdb0_alloc()
97 amdgpu_bo_unref(&adev->gmc.pdb0_bo); in amdgpu_gmc_pdb0_alloc()
114 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_gmc_get_pde_for_bo() local
128 amdgpu_gmc_get_vm_pde(adev, level, addr, flags); in amdgpu_gmc_get_pde_for_bo()
136 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_gmc_pd_addr() local
140 if (adev->asic_type >= CHIP_VEGA10) { in amdgpu_gmc_pd_addr()
162 int amdgpu_gmc_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr, in amdgpu_gmc_set_pte_pde() argument
189 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); in amdgpu_gmc_agp_addr() local
197 if (bo->ttm->dma_address[0] + PAGE_SIZE >= adev->gmc.agp_size) in amdgpu_gmc_agp_addr()
200 return adev->gmc.agp_start + bo->ttm->dma_address[0]; in amdgpu_gmc_agp_addr()
213 void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc, in amdgpu_gmc_vram_location() argument
234 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n", in amdgpu_gmc_vram_location()
255 void amdgpu_gmc_sysvm_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc) in amdgpu_gmc_sysvm_location() argument
264 if (amdgpu_virt_xgmi_migrate_enabled(adev)) { in amdgpu_gmc_sysvm_location()
275 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n", in amdgpu_gmc_sysvm_location()
278 dev_info(adev->dev, "GART: %lluM 0x%016llX - 0x%016llX\n", in amdgpu_gmc_sysvm_location()
293 void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc, in amdgpu_gmc_gart_location() argument
298 u64 max_mc_address = min(adev->gmc.mc_mask, AMDGPU_GMC_HOLE_START - 1); in amdgpu_gmc_gart_location()
307 dev_warn(adev->dev, "limiting GART\n"); in amdgpu_gmc_gart_location()
330 dev_info(adev->dev, "GART: %lluM 0x%016llX - 0x%016llX\n", in amdgpu_gmc_gart_location()
345 void amdgpu_gmc_agp_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc) in amdgpu_gmc_agp_location() argument
370 dev_info(adev->dev, "AGP: %lluM 0x%016llX - 0x%016llX\n", in amdgpu_gmc_agp_location()
385 void amdgpu_gmc_set_agp_default(struct amdgpu_device *adev, in amdgpu_gmc_set_agp_default() argument
417 bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, in amdgpu_gmc_filter_faults() argument
421 struct amdgpu_gmc *gmc = &adev->gmc; in amdgpu_gmc_filter_faults()
487 void amdgpu_gmc_filter_faults_remove(struct amdgpu_device *adev, uint64_t addr, in amdgpu_gmc_filter_faults_remove() argument
490 struct amdgpu_gmc *gmc = &adev->gmc; in amdgpu_gmc_filter_faults_remove()
499 if (adev->irq.retry_cam_enabled) in amdgpu_gmc_filter_faults_remove()
502 ih = &adev->irq.ih1; in amdgpu_gmc_filter_faults_remove()
504 last_wptr = amdgpu_ih_get_wptr(adev, ih); in amdgpu_gmc_filter_faults_remove()
508 last_ts = amdgpu_ih_decode_iv_ts(adev, ih, last_wptr, -1); in amdgpu_gmc_filter_faults_remove()
527 int amdgpu_gmc_ras_sw_init(struct amdgpu_device *adev) in amdgpu_gmc_ras_sw_init() argument
532 r = amdgpu_umc_ras_sw_init(adev); in amdgpu_gmc_ras_sw_init()
537 r = amdgpu_mmhub_ras_sw_init(adev); in amdgpu_gmc_ras_sw_init()
542 r = amdgpu_hdp_ras_sw_init(adev); in amdgpu_gmc_ras_sw_init()
547 r = amdgpu_mca_mp0_ras_sw_init(adev); in amdgpu_gmc_ras_sw_init()
551 r = amdgpu_mca_mp1_ras_sw_init(adev); in amdgpu_gmc_ras_sw_init()
555 r = amdgpu_mca_mpio_ras_sw_init(adev); in amdgpu_gmc_ras_sw_init()
560 r = amdgpu_xgmi_ras_sw_init(adev); in amdgpu_gmc_ras_sw_init()
567 int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev) in amdgpu_gmc_ras_late_init() argument
572 void amdgpu_gmc_ras_fini(struct amdgpu_device *adev) in amdgpu_gmc_ras_fini() argument
586 int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev) in amdgpu_gmc_allocate_vm_inv_eng() argument
595 for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) { in amdgpu_gmc_allocate_vm_inv_eng()
598 if (adev->enable_mes) in amdgpu_gmc_allocate_vm_inv_eng()
601 if (adev->enable_uni_mes) in amdgpu_gmc_allocate_vm_inv_eng()
604 if (adev->enable_umsch_mm) in amdgpu_gmc_allocate_vm_inv_eng()
608 for (i = 0; i < adev->num_rings; ++i) { in amdgpu_gmc_allocate_vm_inv_eng()
609 ring = adev->rings[i]; in amdgpu_gmc_allocate_vm_inv_eng()
612 if (ring == &adev->mes.ring[0] || in amdgpu_gmc_allocate_vm_inv_eng()
613 ring == &adev->mes.ring[1] || in amdgpu_gmc_allocate_vm_inv_eng()
614 ring == &adev->umsch_mm.ring || in amdgpu_gmc_allocate_vm_inv_eng()
615 ring == &adev->cper.ring_buf) in amdgpu_gmc_allocate_vm_inv_eng()
619 if (amdgpu_sdma_is_shared_inv_eng(adev, ring)) in amdgpu_gmc_allocate_vm_inv_eng()
624 dev_err(adev->dev, "no VM inv eng for ring %s\n", in amdgpu_gmc_allocate_vm_inv_eng()
632 dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n", in amdgpu_gmc_allocate_vm_inv_eng()
642 shared_ring = amdgpu_sdma_get_shared_ring(adev, ring); in amdgpu_gmc_allocate_vm_inv_eng()
645 dev_info(adev->dev, "ring %s shares VM invalidation engine %u with ring %s on hub %u\n", in amdgpu_gmc_allocate_vm_inv_eng()
654 void amdgpu_gmc_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, in amdgpu_gmc_flush_gpu_tlb() argument
657 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; in amdgpu_gmc_flush_gpu_tlb()
658 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub]; in amdgpu_gmc_flush_gpu_tlb()
664 !adev->mman.buffer_funcs_enabled || !adev->ib_pool_ready || in amdgpu_gmc_flush_gpu_tlb()
670 if (!down_read_trylock(&adev->reset_domain->sem)) in amdgpu_gmc_flush_gpu_tlb()
673 if (adev->gmc.flush_tlb_needs_extra_type_2) in amdgpu_gmc_flush_gpu_tlb()
674 adev->gmc.gmc_funcs->flush_gpu_tlb(adev, vmid, in amdgpu_gmc_flush_gpu_tlb()
677 if (adev->gmc.flush_tlb_needs_extra_type_0 && flush_type == 2) in amdgpu_gmc_flush_gpu_tlb()
678 adev->gmc.gmc_funcs->flush_gpu_tlb(adev, vmid, in amdgpu_gmc_flush_gpu_tlb()
681 adev->gmc.gmc_funcs->flush_gpu_tlb(adev, vmid, vmhub, in amdgpu_gmc_flush_gpu_tlb()
683 up_read(&adev->reset_domain->sem); in amdgpu_gmc_flush_gpu_tlb()
692 mutex_lock(&adev->mman.gtt_window_lock); in amdgpu_gmc_flush_gpu_tlb()
693 r = amdgpu_job_alloc_with_ib(ring->adev, &adev->mman.high_pr, in amdgpu_gmc_flush_gpu_tlb()
700 job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo); in amdgpu_gmc_flush_gpu_tlb()
705 mutex_unlock(&adev->mman.gtt_window_lock); in amdgpu_gmc_flush_gpu_tlb()
713 mutex_unlock(&adev->mman.gtt_window_lock); in amdgpu_gmc_flush_gpu_tlb()
714 dev_err(adev->dev, "Error flushing GPU TLB using the SDMA (%d)!\n", r); in amdgpu_gmc_flush_gpu_tlb()
717 int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid, in amdgpu_gmc_flush_gpu_tlb_pasid() argument
721 struct amdgpu_ring *ring = &adev->gfx.kiq[inst].ring; in amdgpu_gmc_flush_gpu_tlb_pasid()
722 struct amdgpu_kiq *kiq = &adev->gfx.kiq[inst]; in amdgpu_gmc_flush_gpu_tlb_pasid()
731 if (!down_read_trylock(&adev->reset_domain->sem)) in amdgpu_gmc_flush_gpu_tlb_pasid()
734 if (!adev->gmc.flush_pasid_uses_kiq || !ring->sched.ready) { in amdgpu_gmc_flush_gpu_tlb_pasid()
735 if (adev->gmc.flush_tlb_needs_extra_type_2) in amdgpu_gmc_flush_gpu_tlb_pasid()
736 adev->gmc.gmc_funcs->flush_gpu_tlb_pasid(adev, pasid, in amdgpu_gmc_flush_gpu_tlb_pasid()
740 if (adev->gmc.flush_tlb_needs_extra_type_0 && flush_type == 2) in amdgpu_gmc_flush_gpu_tlb_pasid()
741 adev->gmc.gmc_funcs->flush_gpu_tlb_pasid(adev, pasid, in amdgpu_gmc_flush_gpu_tlb_pasid()
745 adev->gmc.gmc_funcs->flush_gpu_tlb_pasid(adev, pasid, in amdgpu_gmc_flush_gpu_tlb_pasid()
753 if (adev->gmc.flush_tlb_needs_extra_type_2) in amdgpu_gmc_flush_gpu_tlb_pasid()
756 if (adev->gmc.flush_tlb_needs_extra_type_0) in amdgpu_gmc_flush_gpu_tlb_pasid()
759 spin_lock(&adev->gfx.kiq[inst].ring_lock); in amdgpu_gmc_flush_gpu_tlb_pasid()
762 spin_unlock(&adev->gfx.kiq[inst].ring_lock); in amdgpu_gmc_flush_gpu_tlb_pasid()
765 if (adev->gmc.flush_tlb_needs_extra_type_2) in amdgpu_gmc_flush_gpu_tlb_pasid()
768 if (flush_type == 2 && adev->gmc.flush_tlb_needs_extra_type_0) in amdgpu_gmc_flush_gpu_tlb_pasid()
775 spin_unlock(&adev->gfx.kiq[inst].ring_lock); in amdgpu_gmc_flush_gpu_tlb_pasid()
780 spin_unlock(&adev->gfx.kiq[inst].ring_lock); in amdgpu_gmc_flush_gpu_tlb_pasid()
786 !amdgpu_reset_pending(adev->reset_domain)) { in amdgpu_gmc_flush_gpu_tlb_pasid()
792 dev_err(adev->dev, "timeout waiting for kiq fence\n"); in amdgpu_gmc_flush_gpu_tlb_pasid()
799 up_read(&adev->reset_domain->sem); in amdgpu_gmc_flush_gpu_tlb_pasid()
803 void amdgpu_gmc_fw_reg_write_reg_wait(struct amdgpu_device *adev, in amdgpu_gmc_fw_reg_write_reg_wait() argument
808 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_inst]; in amdgpu_gmc_fw_reg_write_reg_wait()
814 if (adev->mes.ring[0].sched.ready) { in amdgpu_gmc_fw_reg_write_reg_wait()
815 amdgpu_mes_reg_write_reg_wait(adev, reg0, reg1, in amdgpu_gmc_fw_reg_write_reg_wait()
839 !amdgpu_reset_pending(adev->reset_domain)) { in amdgpu_gmc_fw_reg_write_reg_wait()
854 dev_err(adev->dev, "failed to write reg %x wait reg %x\n", reg0, reg1); in amdgpu_gmc_fw_reg_write_reg_wait()
864 void amdgpu_gmc_tmz_set(struct amdgpu_device *adev) in amdgpu_gmc_tmz_set() argument
866 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { in amdgpu_gmc_tmz_set()
877 adev->gmc.tmz_enabled = false; in amdgpu_gmc_tmz_set()
878 dev_info(adev->dev, in amdgpu_gmc_tmz_set()
881 adev->gmc.tmz_enabled = true; in amdgpu_gmc_tmz_set()
882 dev_info(adev->dev, in amdgpu_gmc_tmz_set()
907 adev->gmc.tmz_enabled = false; in amdgpu_gmc_tmz_set()
908 dev_info(adev->dev, in amdgpu_gmc_tmz_set()
911 adev->gmc.tmz_enabled = true; in amdgpu_gmc_tmz_set()
912 dev_info(adev->dev, in amdgpu_gmc_tmz_set()
917 adev->gmc.tmz_enabled = false; in amdgpu_gmc_tmz_set()
918 dev_info(adev->dev, in amdgpu_gmc_tmz_set()
931 void amdgpu_gmc_noretry_set(struct amdgpu_device *adev) in amdgpu_gmc_noretry_set() argument
933 struct amdgpu_gmc *gmc = &adev->gmc; in amdgpu_gmc_noretry_set()
934 uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0); in amdgpu_gmc_noretry_set()
944 if (!amdgpu_sriov_xnack_support(adev)) in amdgpu_gmc_noretry_set()
950 void amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device *adev, int hub_type, in amdgpu_gmc_set_vm_fault_masks() argument
956 hub = &adev->vmhub[hub_type]; in amdgpu_gmc_set_vm_fault_masks()
975 void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev) in amdgpu_gmc_get_vbios_allocations() argument
983 adev->mman.stolen_reserved_offset = 0; in amdgpu_gmc_get_vbios_allocations()
984 adev->mman.stolen_reserved_size = 0; in amdgpu_gmc_get_vbios_allocations()
994 switch (adev->asic_type) { in amdgpu_gmc_get_vbios_allocations()
996 adev->mman.keep_stolen_vga_memory = true; in amdgpu_gmc_get_vbios_allocations()
1001 if (amdgpu_sriov_vf(adev) && hypervisor_is_type(X86_HYPER_MS_HYPERV)) { in amdgpu_gmc_get_vbios_allocations()
1002 adev->mman.stolen_reserved_offset = 0x500000; in amdgpu_gmc_get_vbios_allocations()
1003 adev->mman.stolen_reserved_size = 0x200000; in amdgpu_gmc_get_vbios_allocations()
1009 adev->mman.keep_stolen_vga_memory = true; in amdgpu_gmc_get_vbios_allocations()
1012 adev->mman.keep_stolen_vga_memory = false; in amdgpu_gmc_get_vbios_allocations()
1016 if (amdgpu_sriov_vf(adev) || in amdgpu_gmc_get_vbios_allocations()
1017 !amdgpu_device_has_display_hardware(adev)) { in amdgpu_gmc_get_vbios_allocations()
1020 size = amdgpu_gmc_get_vbios_fb_size(adev); in amdgpu_gmc_get_vbios_allocations()
1022 if (adev->mman.keep_stolen_vga_memory) in amdgpu_gmc_get_vbios_allocations()
1027 if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024)) in amdgpu_gmc_get_vbios_allocations()
1031 adev->mman.stolen_vga_size = AMDGPU_VBIOS_VGA_ALLOCATION; in amdgpu_gmc_get_vbios_allocations()
1032 adev->mman.stolen_extended_size = size - adev->mman.stolen_vga_size; in amdgpu_gmc_get_vbios_allocations()
1034 adev->mman.stolen_vga_size = size; in amdgpu_gmc_get_vbios_allocations()
1035 adev->mman.stolen_extended_size = 0; in amdgpu_gmc_get_vbios_allocations()
1055 void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev) in amdgpu_gmc_init_pdb0() argument
1058 uint64_t flags = adev->gart.gart_pte_flags; //TODO it is UC. explore NC/RW? in amdgpu_gmc_init_pdb0()
1061 u64 vram_size = adev->gmc.xgmi.node_segment_size * adev->gmc.xgmi.num_physical_nodes; in amdgpu_gmc_init_pdb0()
1062 u64 pde0_page_size = (1ULL<<adev->gmc.vmid0_page_table_block_size)<<21; in amdgpu_gmc_init_pdb0()
1064 u64 gart_ptb_gpu_pa = amdgpu_gmc_vram_pa(adev, adev->gart.bo); in amdgpu_gmc_init_pdb0()
1067 if (!drm_dev_enter(adev_to_drm(adev), &idx)) in amdgpu_gmc_init_pdb0()
1073 flags |= AMDGPU_PTE_FRAG((adev->gmc.vmid0_page_table_block_size + 9*1)); in amdgpu_gmc_init_pdb0()
1074 flags |= AMDGPU_PDE_PTE_FLAG(adev); in amdgpu_gmc_init_pdb0()
1076 vram_addr = adev->vm_manager.vram_base_offset; in amdgpu_gmc_init_pdb0()
1077 if (!amdgpu_virt_xgmi_migrate_enabled(adev)) in amdgpu_gmc_init_pdb0()
1078 vram_addr -= adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; in amdgpu_gmc_init_pdb0()
1085 amdgpu_gmc_set_pte_pde(adev, adev->gmc.ptr_pdb0, i, vram_addr, flags); in amdgpu_gmc_init_pdb0()
1092 flags |= AMDGPU_PTE_SNOOPED | AMDGPU_PDE_BFS_FLAG(adev, 0); in amdgpu_gmc_init_pdb0()
1094 amdgpu_gmc_set_pte_pde(adev, adev->gmc.ptr_pdb0, i, gart_ptb_gpu_pa, flags); in amdgpu_gmc_init_pdb0()
1105 uint64_t amdgpu_gmc_vram_mc2pa(struct amdgpu_device *adev, uint64_t mc_addr) in amdgpu_gmc_vram_mc2pa() argument
1107 return mc_addr - adev->gmc.vram_start + adev->vm_manager.vram_base_offset; in amdgpu_gmc_vram_mc2pa()
1117 uint64_t amdgpu_gmc_vram_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo) in amdgpu_gmc_vram_pa() argument
1119 return amdgpu_gmc_vram_mc2pa(adev, amdgpu_bo_gpu_offset(bo)); in amdgpu_gmc_vram_pa()
1122 int amdgpu_gmc_vram_checking(struct amdgpu_device *adev) in amdgpu_gmc_vram_checking() argument
1131 ret = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE, in amdgpu_gmc_vram_checking()
1189 struct amdgpu_device *adev = drm_to_adev(ddev); in available_memory_partition_show() local
1193 for_each_inst(mode, adev->gmc.supported_nps_modes) { in available_memory_partition_show()
1207 struct amdgpu_device *adev = drm_to_adev(ddev); in current_memory_partition_store() local
1213 for_each_inst(i, adev->gmc.supported_nps_modes) { in current_memory_partition_store()
1223 if (mode == adev->gmc.gmc_funcs->query_mem_partition_mode(adev)) { in current_memory_partition_store()
1225 adev->dev, in current_memory_partition_store()
1233 hive = amdgpu_get_xgmi_hive(adev); in current_memory_partition_store()
1238 adev->gmc.requested_nps_mode = mode; in current_memory_partition_store()
1242 adev->dev, in current_memory_partition_store()
1252 struct amdgpu_device *adev = drm_to_adev(ddev); in current_memory_partition_show() local
1256 if (amdgpu_in_reset(adev)) in current_memory_partition_show()
1259 mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev); in current_memory_partition_show()
1270 int amdgpu_gmc_sysfs_init(struct amdgpu_device *adev) in amdgpu_gmc_sysfs_init() argument
1275 if (!adev->gmc.gmc_funcs->query_mem_partition_mode) in amdgpu_gmc_sysfs_init()
1278 nps_switch_support = (hweight32(adev->gmc.supported_nps_modes & in amdgpu_gmc_sysfs_init()
1284 r = device_create_file(adev->dev, in amdgpu_gmc_sysfs_init()
1290 return device_create_file(adev->dev, in amdgpu_gmc_sysfs_init()
1294 void amdgpu_gmc_sysfs_fini(struct amdgpu_device *adev) in amdgpu_gmc_sysfs_fini() argument
1296 if (!adev->gmc.gmc_funcs->query_mem_partition_mode) in amdgpu_gmc_sysfs_fini()
1299 device_remove_file(adev->dev, &dev_attr_current_memory_partition); in amdgpu_gmc_sysfs_fini()
1300 device_remove_file(adev->dev, &dev_attr_available_memory_partition); in amdgpu_gmc_sysfs_fini()
1303 int amdgpu_gmc_get_nps_memranges(struct amdgpu_device *adev, in amdgpu_gmc_get_nps_memranges() argument
1315 refresh = (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) && in amdgpu_gmc_get_nps_memranges()
1316 (adev->gmc.reset_flags & AMDGPU_GMC_INIT_RESET_NPS); in amdgpu_gmc_get_nps_memranges()
1317 ret = amdgpu_discovery_get_nps_info(adev, &nps_type, &ranges, in amdgpu_gmc_get_nps_memranges()
1328 adev->dev, in amdgpu_gmc_get_nps_memranges()
1338 adev->dev, in amdgpu_gmc_get_nps_memranges()
1353 adev->dev, in amdgpu_gmc_get_nps_memranges()
1366 adev->vm_manager.vram_base_offset) >> in amdgpu_gmc_get_nps_memranges()
1370 adev->vm_manager.vram_base_offset) >> in amdgpu_gmc_get_nps_memranges()
1384 int amdgpu_gmc_request_memory_partition(struct amdgpu_device *adev, in amdgpu_gmc_request_memory_partition() argument
1388 if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU)) in amdgpu_gmc_request_memory_partition()
1391 if (!adev->psp.funcs) { in amdgpu_gmc_request_memory_partition()
1392 dev_err(adev->dev, in amdgpu_gmc_request_memory_partition()
1397 return psp_memory_partition(&adev->psp, nps_mode); in amdgpu_gmc_request_memory_partition()
1400 static inline bool amdgpu_gmc_need_nps_switch_req(struct amdgpu_device *adev, in amdgpu_gmc_need_nps_switch_req() argument
1404 return (((BIT(req_nps_mode) & adev->gmc.supported_nps_modes) == in amdgpu_gmc_need_nps_switch_req()
1409 void amdgpu_gmc_prepare_nps_mode_change(struct amdgpu_device *adev) in amdgpu_gmc_prepare_nps_mode_change() argument
1414 if (amdgpu_sriov_vf(adev) || !adev->gmc.supported_nps_modes || in amdgpu_gmc_prepare_nps_mode_change()
1415 !adev->gmc.gmc_funcs->request_mem_partition_mode) in amdgpu_gmc_prepare_nps_mode_change()
1418 cur_nps_mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev); in amdgpu_gmc_prepare_nps_mode_change()
1419 hive = amdgpu_get_xgmi_hive(adev); in amdgpu_gmc_prepare_nps_mode_change()
1422 if (!amdgpu_gmc_need_nps_switch_req(adev, req_nps_mode, in amdgpu_gmc_prepare_nps_mode_change()
1427 r = amdgpu_xgmi_request_nps_change(adev, hive, req_nps_mode); in amdgpu_gmc_prepare_nps_mode_change()
1432 req_nps_mode = adev->gmc.requested_nps_mode; in amdgpu_gmc_prepare_nps_mode_change()
1433 if (!amdgpu_gmc_need_nps_switch_req(adev, req_nps_mode, cur_nps_mode)) in amdgpu_gmc_prepare_nps_mode_change()
1437 r = adev->gmc.gmc_funcs->request_mem_partition_mode(adev, req_nps_mode); in amdgpu_gmc_prepare_nps_mode_change()
1440 dev_err(adev->dev, "NPS mode change request failed\n"); in amdgpu_gmc_prepare_nps_mode_change()
1443 adev->dev, in amdgpu_gmc_prepare_nps_mode_change()
1447 bool amdgpu_gmc_need_reset_on_init(struct amdgpu_device *adev) in amdgpu_gmc_need_reset_on_init() argument
1449 if (adev->gmc.gmc_funcs->need_reset_on_init) in amdgpu_gmc_need_reset_on_init()
1450 return adev->gmc.gmc_funcs->need_reset_on_init(adev); in amdgpu_gmc_need_reset_on_init()
1456 amdgpu_gmc_get_vf_memory_partition(struct amdgpu_device *adev) in amdgpu_gmc_get_vf_memory_partition() argument
1458 switch (adev->gmc.num_mem_partitions) { in amdgpu_gmc_get_vf_memory_partition()
1475 amdgpu_gmc_get_memory_partition(struct amdgpu_device *adev, u32 *supp_modes) in amdgpu_gmc_get_memory_partition() argument
1479 if (adev->nbio.funcs && in amdgpu_gmc_get_memory_partition()
1480 adev->nbio.funcs->get_memory_partition_mode) in amdgpu_gmc_get_memory_partition()
1481 mode = adev->nbio.funcs->get_memory_partition_mode(adev, in amdgpu_gmc_get_memory_partition()
1484 dev_warn(adev->dev, "memory partition mode query is not supported\n"); in amdgpu_gmc_get_memory_partition()
1490 amdgpu_gmc_query_memory_partition(struct amdgpu_device *adev) in amdgpu_gmc_query_memory_partition() argument
1492 if (amdgpu_sriov_vf(adev)) in amdgpu_gmc_query_memory_partition()
1493 return amdgpu_gmc_get_vf_memory_partition(adev); in amdgpu_gmc_query_memory_partition()
1495 return amdgpu_gmc_get_memory_partition(adev, NULL); in amdgpu_gmc_query_memory_partition()
1498 static bool amdgpu_gmc_validate_partition_info(struct amdgpu_device *adev) in amdgpu_gmc_validate_partition_info() argument
1504 mode = amdgpu_gmc_get_memory_partition(adev, &supp_modes); in amdgpu_gmc_validate_partition_info()
1514 valid = (adev->gmc.num_mem_partitions == 1); in amdgpu_gmc_validate_partition_info()
1517 valid = (adev->gmc.num_mem_partitions == 2); in amdgpu_gmc_validate_partition_info()
1520 valid = (adev->gmc.num_mem_partitions == 3 || in amdgpu_gmc_validate_partition_info()
1521 adev->gmc.num_mem_partitions == 4); in amdgpu_gmc_validate_partition_info()
1524 valid = (adev->gmc.num_mem_partitions == 8); in amdgpu_gmc_validate_partition_info()
1546 amdgpu_gmc_init_acpi_mem_ranges(struct amdgpu_device *adev, in amdgpu_gmc_init_acpi_mem_ranges() argument
1555 num_xcc = NUM_XCC(adev->gfx.xcc_mask); in amdgpu_gmc_init_acpi_mem_ranges()
1559 ret = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info); in amdgpu_gmc_init_acpi_mem_ranges()
1580 adev->gmc.num_mem_partitions = num_ranges; in amdgpu_gmc_init_acpi_mem_ranges()
1583 void amdgpu_gmc_init_sw_mem_ranges(struct amdgpu_device *adev, in amdgpu_gmc_init_sw_mem_ranges() argument
1590 mode = amdgpu_gmc_query_memory_partition(adev); in amdgpu_gmc_init_sw_mem_ranges()
1594 adev->gmc.num_mem_partitions = 0; in amdgpu_gmc_init_sw_mem_ranges()
1597 adev->gmc.num_mem_partitions = 1; in amdgpu_gmc_init_sw_mem_ranges()
1600 adev->gmc.num_mem_partitions = 2; in amdgpu_gmc_init_sw_mem_ranges()
1603 if (adev->flags & AMD_IS_APU) in amdgpu_gmc_init_sw_mem_ranges()
1604 adev->gmc.num_mem_partitions = 3; in amdgpu_gmc_init_sw_mem_ranges()
1606 adev->gmc.num_mem_partitions = 4; in amdgpu_gmc_init_sw_mem_ranges()
1609 adev->gmc.num_mem_partitions = 8; in amdgpu_gmc_init_sw_mem_ranges()
1612 adev->gmc.num_mem_partitions = 1; in amdgpu_gmc_init_sw_mem_ranges()
1617 r = amdgpu_gmc_get_nps_memranges(adev, mem_ranges, in amdgpu_gmc_init_sw_mem_ranges()
1618 &adev->gmc.num_mem_partitions); in amdgpu_gmc_init_sw_mem_ranges()
1621 for (i = 1; i < adev->gmc.num_mem_partitions; ++i) { in amdgpu_gmc_init_sw_mem_ranges()
1628 if (!adev->gmc.num_mem_partitions) { in amdgpu_gmc_init_sw_mem_ranges()
1629 dev_warn(adev->dev, in amdgpu_gmc_init_sw_mem_ranges()
1631 adev->gmc.num_mem_partitions = 1; in amdgpu_gmc_init_sw_mem_ranges()
1634 size = (adev->gmc.real_vram_size + SZ_16M) >> AMDGPU_GPU_PAGE_SHIFT; in amdgpu_gmc_init_sw_mem_ranges()
1635 size /= adev->gmc.num_mem_partitions; in amdgpu_gmc_init_sw_mem_ranges()
1637 for (i = 0; i < adev->gmc.num_mem_partitions; ++i) { in amdgpu_gmc_init_sw_mem_ranges()
1645 l = adev->gmc.num_mem_partitions - 1; in amdgpu_gmc_init_sw_mem_ranges()
1650 (adev->gmc.real_vram_size >> AMDGPU_GPU_PAGE_SHIFT) - 1; in amdgpu_gmc_init_sw_mem_ranges()
1652 adev->gmc.real_vram_size - in amdgpu_gmc_init_sw_mem_ranges()
1656 int amdgpu_gmc_init_mem_ranges(struct amdgpu_device *adev) in amdgpu_gmc_init_mem_ranges() argument
1660 adev->gmc.mem_partitions = kcalloc(AMDGPU_MAX_MEM_RANGES, in amdgpu_gmc_init_mem_ranges()
1663 if (!adev->gmc.mem_partitions) in amdgpu_gmc_init_mem_ranges()
1666 if (adev->gmc.is_app_apu) in amdgpu_gmc_init_mem_ranges()
1667 amdgpu_gmc_init_acpi_mem_ranges(adev, adev->gmc.mem_partitions); in amdgpu_gmc_init_mem_ranges()
1669 amdgpu_gmc_init_sw_mem_ranges(adev, adev->gmc.mem_partitions); in amdgpu_gmc_init_mem_ranges()
1671 if (amdgpu_sriov_vf(adev)) in amdgpu_gmc_init_mem_ranges()
1674 valid = amdgpu_gmc_validate_partition_info(adev); in amdgpu_gmc_init_mem_ranges()
1677 dev_warn(adev->dev, in amdgpu_gmc_init_mem_ranges()