Lines Matching full:gmc

510 				if (!adev->gmc.noretry && !amdgpu_passthrough(adev))  in amdgpu_device_detect_runtime_pm_mode()
624 last = min(pos + size, adev->gmc.visible_vram_size); in amdgpu_device_aper_access()
1673 int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size); in amdgpu_device_resize_fb_bar()
1704 if (adev->gmc.real_vram_size && in amdgpu_device_resize_fb_bar()
1705 (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size)) in amdgpu_device_resize_fb_bar()
2460 [AMD_IP_BLOCK_TYPE_GMC] = "gmc",
2920 if (adev->gmc.xgmi.supported) in amdgpu_device_ip_early_init()
3146 /* need to do common hw init early so everything is set up for gmc */ in amdgpu_device_ip_init()
3155 /* need to do gmc hw init early so we can allocate gpu mem */ in amdgpu_device_ip_init()
3181 /* right after GMC hw init, we create CSA */ in amdgpu_device_ip_init()
3252 if (adev->gmc.xgmi.num_physical_nodes > 1) { in amdgpu_device_ip_init()
3526 ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1) || in amdgpu_device_ip_late_init()
3530 if (adev->gmc.xgmi.num_physical_nodes > 1) { in amdgpu_device_ip_late_init()
3546 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) { in amdgpu_device_ip_late_init()
3675 if (adev->gmc.xgmi.num_physical_nodes > 1) in amdgpu_device_ip_fini()
4006 * COMMON, GMC, and IH. resume puts the hardware into a functional state
4039 * all blocks except COMMON, GMC, and IH. resume puts the hardware into a
4424 adev->gmc.gart_size = 512 * 1024 * 1024; in amdgpu_device_init()
4432 adev->gmc.gmc_funcs = NULL; in amdgpu_device_init()
4641 if (adev->gmc.xgmi.supported) { in amdgpu_device_init()
4689 if (adev->gmc.xgmi.num_physical_nodes) { in amdgpu_device_init()
4928 if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) { in amdgpu_device_unmap_mmio()
4929 arch_phys_wc_del(adev->gmc.vram_mtrr); in amdgpu_device_unmap_mmio()
4930 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size); in amdgpu_device_unmap_mmio()
5252 unsigned int prev_physical_node_id = adev->gmc.xgmi.physical_node_id; in amdgpu_virt_resume()
5266 prev_physical_node_id, adev->gmc.xgmi.physical_node_id); in amdgpu_virt_resume()
5270 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; in amdgpu_virt_resume()
5593 if (hive && adev->gmc.xgmi.num_physical_nodes > 1) in amdgpu_device_reset_sriov()
5950 tmp_adev->gmc.xgmi.num_physical_nodes > 1) in amdgpu_device_reinit_after_reset()
5979 tmp_adev->gmc.xgmi.num_physical_nodes > 1) in amdgpu_device_reinit_after_reset()
6038 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { in amdgpu_do_asic_reset()
6057 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { in amdgpu_do_asic_reset()
6214 if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1) && hive) { in amdgpu_device_recovery_prepare()
6215 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { in amdgpu_device_recovery_prepare()
6873 !adev->gmc.xgmi.connected_to_cpu && in amdgpu_device_is_peer_accessible()
6879 bool is_large_bar = adev->gmc.visible_vram_size && in amdgpu_device_is_peer_accessible()
6880 adev->gmc.real_vram_size == adev->gmc.visible_vram_size; in amdgpu_device_is_peer_accessible()
6887 adev->gmc.aper_base + adev->gmc.aper_size - 1; in amdgpu_device_is_peer_accessible()
6889 p2p_addressable = !(adev->gmc.aper_base & address_mask || in amdgpu_device_is_peer_accessible()
7091 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { in amdgpu_pci_slot_reset()
7149 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { in amdgpu_pci_resume()
7282 if (adev->gmc.xgmi.connected_to_cpu) in amdgpu_device_flush_hdp()
7298 if (adev->gmc.xgmi.connected_to_cpu) in amdgpu_device_invalidate_hdp()