| /linux/drivers/gpu/drm/xe/ |
| H A D | xe_sriov_pf_migration.c | 130 u8 gt_id; in pf_migration_data_ready() local 132 for_each_gt(gt, xe, gt_id) { in pf_migration_data_ready() 148 u8 gt_id; in pf_migration_consume() local 150 for_each_gt(gt, xe, gt_id) { in pf_migration_consume() 205 if (data->hdr.tile_id != 0 || data->hdr.gt_id != 0) in pf_handle_descriptor() 221 u8 gt_id; in pf_handle_trailer() local 223 if (data->hdr.tile_id != 0 || data->hdr.gt_id != 0) in pf_handle_trailer() 230 for_each_gt(gt, xe, gt_id) in pf_handle_trailer() 260 gt = xe_device_get_gt(xe, data->hdr.gt_id); in xe_sriov_pf_migration_restore_produce() 263 vfid, data->hdr.type, data->hdr.tile_id, data->hdr.gt_id); in xe_sriov_pf_migration_restore_produce() [all …]
|
| H A D | xe_query.c | 55 u8 gt_id; in calc_hw_engine_info_size() local 58 for_each_gt(gt, xe, gt_id) in calc_hw_engine_info_size() 145 if (eci->gt_id >= xe->info.max_gt_per_tile) in query_engine_cycles() 148 gt = xe_device_get_gt(xe, eci->gt_id); in query_engine_cycles() 193 u8 gt_id; in query_engines() local 207 for_each_gt(gt, xe, gt_id) in query_engines() 216 engines->engines[i].instance.gt_id = gt->info.id; in query_engines() 391 gt_list->gt_list[iter].gt_id = gt->info.id; in query_gt_list() 521 topo.gt_id = id; in query_gt_topology() 595 u8 gt_id; in query_uc_fw_version() local [all …]
|
| H A D | xe_drm_client.c | 276 unsigned long gt_id; in any_engine() local 278 for_each_gt(gt, xe, gt_id) { in any_engine() 317 unsigned long class, i, gt_id, capacity[XE_ENGINE_CLASS_MAX] = { }; in show_run_ticks() local 364 for_each_gt(gt, xe, gt_id) in show_run_ticks()
|
| H A D | xe_sriov_packet.c | 121 struct xe_gt *gt = xe_device_get_gt(data->xe, data->hdr.gt_id); in pkt_init() 166 int xe_sriov_packet_init(struct xe_sriov_packet *data, u8 tile_id, u8 gt_id, in xe_sriov_packet_init() argument 172 data->hdr.gt_id = gt_id; in xe_sriov_packet_init()
|
| H A D | xe_pmu.c | 108 eci.gt_id = config_to_gt_id(config); in event_to_hwe() 160 static bool event_supported(struct xe_pmu *pmu, unsigned int gt_id, in event_supported() argument 164 struct xe_gt *gt = xe_device_get_gt(xe, gt_id); in event_supported()
|
| H A D | xe_exec_queue.c | 1031 u16 gt_id; in calc_validate_logical_mask() local 1053 if (XE_IOCTL_DBG(xe, n && eci[n].gt_id != gt_id) || in calc_validate_logical_mask() 1058 gt_id = eci[n].gt_id; in calc_validate_logical_mask() 1118 if (XE_IOCTL_DBG(xe, !xe_device_get_gt(xe, eci[0].gt_id))) in xe_exec_queue_create_ioctl()
|
| H A D | xe_hw_engine.c | 1095 struct xe_gt *gt = xe_device_get_gt(xe, eci.gt_id); in xe_hw_engine_lookup() 1107 return xe_gt_hw_engine(xe_device_get_gt(xe, eci.gt_id), in xe_hw_engine_lookup()
|
| H A D | xe_oa.c | 1162 int gt_id, i; in xe_oa_lookup_oa_unit() local 1164 for_each_gt(gt, oa->xe, gt_id) { in xe_oa_lookup_oa_unit() 2730 int gt_id; in xe_oa_print_oa_units() local 2732 for_each_gt(gt, oa->xe, gt_id) in xe_oa_print_oa_units()
|
| /linux/drivers/gpu/drm/i915/ |
| H A D | i915_pmu.c | 203 static u64 read_sample(struct i915_pmu *pmu, unsigned int gt_id, int sample) in read_sample() argument 205 return pmu->sample[gt_id][sample].cur; in read_sample() 209 store_sample(struct i915_pmu *pmu, unsigned int gt_id, int sample, u64 val) in store_sample() argument 211 pmu->sample[gt_id][sample].cur = val; in store_sample() 215 add_sample_mult(struct i915_pmu *pmu, unsigned int gt_id, int sample, u32 val, u32 mul) in add_sample_mult() argument 217 pmu->sample[gt_id][sample].cur += mul_u32_u32(val, mul); in add_sample_mult() 223 const unsigned int gt_id = gt->info.id; in get_rc6() local 238 store_sample(pmu, gt_id, __I915_SAMPLE_RC6, val); in get_rc6() 247 val = ktime_since_raw(pmu->sleep_last[gt_id]); in get_rc6() 248 val += read_sample(pmu, gt_id, __I915_SAMPLE_RC6); in get_rc6() [all …]
|
| /linux/include/uapi/drm/ |
| H A D | xe_drm.h | 255 __u16 gt_id; member 461 __u16 gt_id; member 555 __u16 gt_id; member 1726 __u16 gt_id; member
|