Lines Matching full:gmc
442 return sysfs_emit(buf, "%llu\n", adev->gmc.xgmi.node_id); in amdgpu_xgmi_show_device_id()
453 return sysfs_emit(buf, "%u\n", adev->gmc.xgmi.physical_node_id); in amdgpu_xgmi_show_physical_id()
502 if (top->nodes[i].node_id == adev->gmc.xgmi.node_id) { in amdgpu_xgmi_show_connected_port_num()
670 if (!adev->gmc.xgmi.hive_id) in amdgpu_get_xgmi_hive()
681 if (hive->hive_id == adev->gmc.xgmi.hive_id) in amdgpu_get_xgmi_hive()
733 hive->hive_id = adev->gmc.xgmi.hive_id; in amdgpu_get_xgmi_hive()
805 request_adev->gmc.xgmi.node_id, in amdgpu_xgmi_set_pstate()
806 request_adev->gmc.xgmi.hive_id, ret); in amdgpu_xgmi_set_pstate()
837 adev->gmc.xgmi.node_id, in amdgpu_xgmi_update_topology()
838 adev->gmc.xgmi.hive_id, ret); in amdgpu_xgmi_update_topology()
857 if (!adev->gmc.xgmi.supported) in amdgpu_xgmi_get_hops_count()
861 if (top->nodes[i].node_id == peer_adev->gmc.xgmi.node_id) in amdgpu_xgmi_get_hops_count()
865 peer_adev->gmc.xgmi.physical_node_id); in amdgpu_xgmi_get_hops_count()
876 int num_lanes = adev->gmc.xgmi.max_width; in amdgpu_xgmi_get_bandwidth()
877 int speed = adev->gmc.xgmi.max_speed; in amdgpu_xgmi_get_bandwidth()
886 if (!adev->gmc.xgmi.supported) in amdgpu_xgmi_get_bandwidth()
897 if (top->nodes[i].node_id != peer_adev->gmc.xgmi.node_id) in amdgpu_xgmi_get_bandwidth()
907 peer_adev->gmc.xgmi.physical_node_id); in amdgpu_xgmi_get_bandwidth()
929 if (top->nodes[i].node_id == peer_adev->gmc.xgmi.node_id) in amdgpu_xgmi_get_is_sharing_enabled()
947 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { in amdgpu_xgmi_initialize_hive_get_data_partition()
968 if (peer_info->nodes[i].node_id == adev->gmc.xgmi.node_id) { in amdgpu_xgmi_fill_topology_info()
970 if (top_info->nodes[j].node_id == peer_adev->gmc.xgmi.node_id) { in amdgpu_xgmi_fill_topology_info()
992 if (!adev->gmc.xgmi.supported) in amdgpu_xgmi_add_device()
1003 ret = psp_xgmi_get_hive_id(&adev->psp, &adev->gmc.xgmi.hive_id); in amdgpu_xgmi_add_device()
1010 ret = psp_xgmi_get_node_id(&adev->psp, &adev->gmc.xgmi.node_id); in amdgpu_xgmi_add_device()
1017 adev->gmc.xgmi.hive_id = 16; in amdgpu_xgmi_add_device()
1018 adev->gmc.xgmi.node_id = adev->gmc.xgmi.physical_node_id + 16; in amdgpu_xgmi_add_device()
1026 adev->gmc.xgmi.node_id, adev->gmc.xgmi.hive_id); in amdgpu_xgmi_add_device()
1033 list_add_tail(&adev->gmc.xgmi.head, &hive->device_list); in amdgpu_xgmi_add_device()
1042 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { in amdgpu_xgmi_add_device()
1047 adev->gmc.xgmi.node_id; in amdgpu_xgmi_add_device()
1063 adev->gmc.xgmi.node_id, in amdgpu_xgmi_add_device()
1064 adev->gmc.xgmi.hive_id, ret); in amdgpu_xgmi_add_device()
1070 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { in amdgpu_xgmi_add_device()
1075 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { in amdgpu_xgmi_add_device()
1081 tmp_adev->gmc.xgmi.node_id, in amdgpu_xgmi_add_device()
1082 tmp_adev->gmc.xgmi.hive_id, ret); in amdgpu_xgmi_add_device()
1098 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { in amdgpu_xgmi_add_device()
1104 tmp_adev->gmc.xgmi.node_id, in amdgpu_xgmi_add_device()
1105 tmp_adev->gmc.xgmi.hive_id, ret); in amdgpu_xgmi_add_device()
1127 adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id); in amdgpu_xgmi_add_device()
1131 adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id, in amdgpu_xgmi_add_device()
1142 if (!adev->gmc.xgmi.supported) in amdgpu_xgmi_remove_device()
1153 list_del(&adev->gmc.xgmi.head); in amdgpu_xgmi_remove_device()
1228 if (!adev->gmc.xgmi.supported || in amdgpu_xgmi_ras_late_init()
1229 adev->gmc.xgmi.num_physical_nodes == 0) in amdgpu_xgmi_ras_late_init()
1261 struct amdgpu_xgmi *xgmi = &adev->gmc.xgmi; in amdgpu_xgmi_get_relative_phy_addr()
1625 if (!adev->gmc.xgmi.ras) in amdgpu_xgmi_ras_sw_init()
1628 ras = adev->gmc.xgmi.ras; in amdgpu_xgmi_ras_sw_init()
1638 adev->gmc.xgmi.ras_if = &ras->ras_block.ras_comm; in amdgpu_xgmi_ras_sw_init()
1655 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) in amdgpu_xgmi_reset_on_init_work()
1673 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { in amdgpu_xgmi_reset_on_init_work()
1701 if (num_devs == adev->gmc.xgmi.num_physical_nodes) { in amdgpu_xgmi_reset_on_init()
1735 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { in amdgpu_xgmi_request_nps_change()
1736 r = adev->gmc.gmc_funcs->request_mem_partition_mode( in amdgpu_xgmi_request_nps_change()
1744 adev->gmc.gmc_funcs->query_mem_partition_mode(tmp_adev); in amdgpu_xgmi_request_nps_change()
1746 tmp_adev, &hive->device_list, gmc.xgmi.head) in amdgpu_xgmi_request_nps_change()
1747 adev->gmc.gmc_funcs->request_mem_partition_mode( in amdgpu_xgmi_request_nps_change()
1761 adev->gmc.xgmi.hive_id && in amdgpu_xgmi_same_hive()
1762 adev->gmc.xgmi.hive_id == bo_adev->gmc.xgmi.hive_id); in amdgpu_xgmi_same_hive()
1767 if (!adev->gmc.xgmi.supported) in amdgpu_xgmi_early_init()
1775 adev->gmc.xgmi.max_speed = 25; in amdgpu_xgmi_early_init()
1776 adev->gmc.xgmi.max_width = 16; in amdgpu_xgmi_early_init()
1782 adev->gmc.xgmi.max_speed = 32; in amdgpu_xgmi_early_init()
1783 adev->gmc.xgmi.max_width = 16; in amdgpu_xgmi_early_init()
1793 adev->gmc.xgmi.max_speed = max_speed; in amgpu_xgmi_set_max_speed_width()
1794 adev->gmc.xgmi.max_width = max_width; in amgpu_xgmi_set_max_speed_width()