Lines Matching refs:xe
50 static size_t calc_hw_engine_info_size(struct xe_device *xe) in calc_hw_engine_info_size() argument
58 for_each_gt(gt, xe, gt_id) in calc_hw_engine_info_size()
115 query_engine_cycles(struct xe_device *xe, in query_engine_cycles() argument
126 if (IS_SRIOV_VF(xe)) in query_engine_cycles()
132 } else if (XE_IOCTL_DBG(xe, query->size != size)) { in query_engine_cycles()
145 if (eci->gt_id >= xe->info.max_gt_per_tile) in query_engine_cycles()
148 gt = xe_device_get_gt(xe, eci->gt_id); in query_engine_cycles()
168 if (GRAPHICS_VER(xe) >= 20) in query_engine_cycles()
183 static int query_engines(struct xe_device *xe, in query_engines() argument
186 size_t size = calc_hw_engine_info_size(xe); in query_engines()
199 } else if (XE_IOCTL_DBG(xe, query->size != size)) { in query_engines()
207 for_each_gt(gt, xe, gt_id) in query_engines()
232 static size_t calc_mem_regions_size(struct xe_device *xe) in calc_mem_regions_size() argument
238 if (ttm_manager_type(&xe->ttm, i)) in calc_mem_regions_size()
244 static int query_mem_regions(struct xe_device *xe, in query_mem_regions() argument
247 size_t size = calc_mem_regions_size(xe); in query_mem_regions()
257 } else if (XE_IOCTL_DBG(xe, query->size != size)) { in query_mem_regions()
262 if (XE_IOCTL_DBG(xe, !mem_regions)) in query_mem_regions()
265 man = ttm_manager_type(&xe->ttm, XE_PL_TT); in query_mem_regions()
279 man = ttm_manager_type(&xe->ttm, i); in query_mem_regions()
286 xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? in query_mem_regions()
312 static int query_config(struct xe_device *xe, struct drm_xe_device_query *query) in query_config() argument
324 } else if (XE_IOCTL_DBG(xe, query->size != size)) { in query_config()
334 xe->info.devid | (xe->info.revid << 16); in query_config()
335 if (xe->mem.vram) in query_config()
338 if (xe->info.has_usm && IS_ENABLED(CONFIG_DRM_XE_GPUSVM)) in query_config()
341 if (GRAPHICS_VER(xe) >= 20) in query_config()
347 xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K; in query_config()
348 config->info[DRM_XE_QUERY_CONFIG_VA_BITS] = xe->info.va_bits; in query_config()
350 xe_exec_queue_device_get_max_priority(xe); in query_config()
361 static int query_gt_list(struct xe_device *xe, struct drm_xe_device_query *query) in query_gt_list() argument
365 xe->info.gt_count * sizeof(struct drm_xe_gt); in query_gt_list()
375 } else if (XE_IOCTL_DBG(xe, query->size != size)) { in query_gt_list()
383 gt_list->num_gt = xe->info.gt_count; in query_gt_list()
385 for_each_gt(gt, xe, id) { in query_gt_list()
407 if (!IS_DGFX(xe)) in query_gt_list()
412 gt_list->gt_list[iter].far_mem_regions = xe->info.mem_region_mask ^ in query_gt_list()
434 static int query_hwconfig(struct xe_device *xe, in query_hwconfig() argument
437 struct xe_gt *gt = xe_root_mmio_gt(xe); in query_hwconfig()
445 } else if (XE_IOCTL_DBG(xe, query->size != size)) { in query_hwconfig()
464 static size_t calc_topo_query_size(struct xe_device *xe) in calc_topo_query_size() argument
470 for_each_gt(gt, xe, id) { in calc_topo_query_size()
502 static int query_gt_topology(struct xe_device *xe, in query_gt_topology() argument
506 size_t size = calc_topo_query_size(xe); in query_gt_topology()
514 } else if (XE_IOCTL_DBG(xe, query->size != size)) { in query_gt_topology()
518 for_each_gt(gt, xe, id) { in query_gt_topology()
562 query_uc_fw_version(struct xe_device *xe, struct drm_xe_device_query *query) in query_uc_fw_version() argument
572 } else if (XE_IOCTL_DBG(xe, query->size != size)) { in query_uc_fw_version()
579 if (XE_IOCTL_DBG(xe, resp.pad || resp.pad2 || resp.reserved)) in query_uc_fw_version()
584 struct xe_guc *guc = &xe->tiles[0].primary_gt->uc.guc; in query_uc_fw_version()
593 if (MEDIA_VER(xe) >= 13) { in query_uc_fw_version()
597 for_each_tile(tile, xe, gt_id) { in query_uc_fw_version()
604 media_gt = xe->tiles[0].primary_gt; in query_uc_fw_version()
634 static size_t calc_oa_unit_query_size(struct xe_device *xe) in calc_oa_unit_query_size() argument
640 for_each_gt(gt, xe, id) { in calc_oa_unit_query_size()
651 static int query_oa_units(struct xe_device *xe, in query_oa_units() argument
655 size_t size = calc_oa_unit_query_size(xe); in query_oa_units()
668 } else if (XE_IOCTL_DBG(xe, query->size != size)) { in query_oa_units()
677 for_each_gt(gt, xe, gt_id) { in query_oa_units()
714 static int query_pxp_status(struct xe_device *xe, struct drm_xe_device_query *query) in query_pxp_status() argument
724 } else if (XE_IOCTL_DBG(xe, query->size != size)) { in query_pxp_status()
728 ret = xe_pxp_get_readiness_status(xe->pxp); in query_pxp_status()
741 static int query_eu_stall(struct xe_device *xe, in query_eu_stall() argument
751 if (!xe_eu_stall_supported_on_platform(xe)) in query_eu_stall()
760 } else if (XE_IOCTL_DBG(xe, query->size != size)) { in query_eu_stall()
770 info->record_size = xe_eu_stall_data_record_size(xe); in query_eu_stall()
780 static int (* const xe_query_funcs[])(struct xe_device *xe,
797 struct xe_device *xe = to_xe_device(dev); in xe_query_ioctl() local
801 if (XE_IOCTL_DBG(xe, query->extensions) || in xe_query_ioctl()
802 XE_IOCTL_DBG(xe, query->reserved[0] || query->reserved[1])) in xe_query_ioctl()
805 if (XE_IOCTL_DBG(xe, query->query >= ARRAY_SIZE(xe_query_funcs))) in xe_query_ioctl()
809 if (XE_IOCTL_DBG(xe, !xe_query_funcs[idx])) in xe_query_ioctl()
812 return xe_query_funcs[idx](xe, query); in xe_query_ioctl()