Lines Matching +full:0 +full:xe

50 static size_t calc_hw_engine_info_size(struct xe_device *xe)  in calc_hw_engine_info_size()  argument
56 int i = 0; in calc_hw_engine_info_size()
58 for_each_gt(gt, xe, gt_id) in calc_hw_engine_info_size()
97 u32 upper, lower, old_upper, loop = 0; in hwe_read_timestamp()
115 query_engine_cycles(struct xe_device *xe, in query_engine_cycles() argument
127 if (IS_SRIOV_VF(xe)) in query_engine_cycles()
130 if (query->size == 0) { in query_engine_cycles()
132 return 0; in query_engine_cycles()
133 } else if (XE_IOCTL_DBG(xe, query->size != size)) { in query_engine_cycles()
146 if (eci->gt_id >= xe->info.max_gt_per_tile) in query_engine_cycles()
149 gt = xe_device_get_gt(xe, eci->gt_id); in query_engine_cycles()
172 if (GRAPHICS_VER(xe) >= 20) in query_engine_cycles()
184 return 0; in query_engine_cycles()
187 static int query_engines(struct xe_device *xe, in query_engines() argument
190 size_t size = calc_hw_engine_info_size(xe); in query_engines()
198 int i = 0; in query_engines()
200 if (query->size == 0) { in query_engines()
202 return 0; in query_engines()
203 } else if (XE_IOCTL_DBG(xe, query->size != size)) { in query_engines()
211 for_each_gt(gt, xe, gt_id) in query_engines()
233 return 0; in query_engines()
236 static size_t calc_mem_regions_size(struct xe_device *xe) in calc_mem_regions_size() argument
242 if (ttm_manager_type(&xe->ttm, i)) in calc_mem_regions_size()
248 static int query_mem_regions(struct xe_device *xe, in query_mem_regions() argument
251 size_t size = calc_mem_regions_size(xe); in query_mem_regions()
258 if (query->size == 0) { in query_mem_regions()
260 return 0; in query_mem_regions()
261 } else if (XE_IOCTL_DBG(xe, query->size != size)) { in query_mem_regions()
266 if (XE_IOCTL_DBG(xe, !mem_regions)) in query_mem_regions()
269 man = ttm_manager_type(&xe->ttm, XE_PL_TT); in query_mem_regions()
270 mem_regions->mem_regions[0].mem_class = DRM_XE_MEM_REGION_CLASS_SYSMEM; in query_mem_regions()
276 mem_regions->mem_regions[0].instance = 0; in query_mem_regions()
277 mem_regions->mem_regions[0].min_page_size = PAGE_SIZE; in query_mem_regions()
278 mem_regions->mem_regions[0].total_size = man->size << PAGE_SHIFT; in query_mem_regions()
279 mem_regions->mem_regions[0].used = ttm_resource_manager_usage(man); in query_mem_regions()
283 man = ttm_manager_type(&xe->ttm, i); in query_mem_regions()
290 xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? in query_mem_regions()
308 ret = 0; in query_mem_regions()
316 static int query_config(struct xe_device *xe, struct drm_xe_device_query *query) in query_config() argument
325 if (query->size == 0) { in query_config()
327 return 0; in query_config()
328 } else if (XE_IOCTL_DBG(xe, query->size != size)) { in query_config()
338 xe->info.devid | (xe->info.revid << 16); in query_config()
339 if (xe->mem.vram) in query_config()
342 if (xe->info.has_usm && IS_ENABLED(CONFIG_DRM_XE_GPUSVM)) in query_config()
348 xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K; in query_config()
349 config->info[DRM_XE_QUERY_CONFIG_VA_BITS] = xe->info.va_bits; in query_config()
351 xe_exec_queue_device_get_max_priority(xe); in query_config()
359 return 0; in query_config()
362 static int query_gt_list(struct xe_device *xe, struct drm_xe_device_query *query) in query_gt_list() argument
366 xe->info.gt_count * sizeof(struct drm_xe_gt); in query_gt_list()
370 int iter = 0; in query_gt_list()
373 if (query->size == 0) { in query_gt_list()
375 return 0; in query_gt_list()
376 } else if (XE_IOCTL_DBG(xe, query->size != size)) { in query_gt_list()
384 gt_list->num_gt = xe->info.gt_count; in query_gt_list()
386 for_each_gt(gt, xe, id) { in query_gt_list()
401 * Bit 0 -> System Memory in query_gt_list()
408 if (!IS_DGFX(xe)) in query_gt_list()
409 gt_list->gt_list[iter].near_mem_regions = 0x1; in query_gt_list()
413 gt_list->gt_list[iter].far_mem_regions = xe->info.mem_region_mask ^ in query_gt_list()
432 return 0; in query_gt_list()
435 static int query_hwconfig(struct xe_device *xe, in query_hwconfig() argument
438 struct xe_gt *gt = xe_root_mmio_gt(xe); in query_hwconfig()
443 if (query->size == 0) { in query_hwconfig()
445 return 0; in query_hwconfig()
446 } else if (XE_IOCTL_DBG(xe, query->size != size)) { in query_hwconfig()
462 return 0; in query_hwconfig()
465 static size_t calc_topo_query_size(struct xe_device *xe) in calc_topo_query_size() argument
468 size_t query_size = 0; in calc_topo_query_size()
471 for_each_gt(gt, xe, id) { in calc_topo_query_size()
500 return 0; in copy_mask()
503 static int query_gt_topology(struct xe_device *xe, in query_gt_topology() argument
507 size_t size = calc_topo_query_size(xe); in query_gt_topology()
512 if (query->size == 0) { in query_gt_topology()
514 return 0; in query_gt_topology()
515 } else if (XE_IOCTL_DBG(xe, query->size != size)) { in query_gt_topology()
519 for_each_gt(gt, xe, id) { in query_gt_topology()
559 return 0; in query_gt_topology()
563 query_uc_fw_version(struct xe_device *xe, struct drm_xe_device_query *query) in query_uc_fw_version() argument
570 if (query->size == 0) { in query_uc_fw_version()
572 return 0; in query_uc_fw_version()
573 } else if (XE_IOCTL_DBG(xe, query->size != size)) { in query_uc_fw_version()
580 if (XE_IOCTL_DBG(xe, resp.pad || resp.pad2 || resp.reserved)) in query_uc_fw_version()
585 struct xe_guc *guc = &xe->tiles[0].primary_gt->uc.guc; in query_uc_fw_version()
594 if (MEDIA_VER(xe) >= 13) { in query_uc_fw_version()
598 for_each_tile(tile, xe, gt_id) { in query_uc_fw_version()
605 media_gt = xe->tiles[0].primary_gt; in query_uc_fw_version()
621 resp.branch_ver = 0; in query_uc_fw_version()
632 return 0; in query_uc_fw_version()
635 static size_t calc_oa_unit_query_size(struct xe_device *xe) in calc_oa_unit_query_size() argument
641 for_each_gt(gt, xe, id) { in calc_oa_unit_query_size()
642 for (i = 0; i < gt->oa.num_oa_units; i++) { in calc_oa_unit_query_size()
652 static int query_oa_units(struct xe_device *xe, in query_oa_units() argument
656 size_t size = calc_oa_unit_query_size(xe); in query_oa_units()
666 if (query->size == 0) { in query_oa_units()
668 return 0; in query_oa_units()
669 } else if (XE_IOCTL_DBG(xe, query->size != size)) { in query_oa_units()
677 pdu = (u8 *)&qoa->oa_units[0]; in query_oa_units()
678 for_each_gt(gt, xe, gt_id) { in query_oa_units()
679 for (i = 0; i < gt->oa.num_oa_units; i++) { in query_oa_units()
690 j = 0; in query_oa_units()
702 pdu += sizeof(*du) + j * sizeof(du->eci[0]); in query_oa_units()
710 return ret ? -EFAULT : 0; in query_oa_units()
713 static int query_pxp_status(struct xe_device *xe, struct drm_xe_device_query *query) in query_pxp_status() argument
717 struct drm_xe_query_pxp_status resp = { 0 }; in query_pxp_status()
720 if (query->size == 0) { in query_pxp_status()
722 return 0; in query_pxp_status()
723 } else if (XE_IOCTL_DBG(xe, query->size != size)) { in query_pxp_status()
727 ret = xe_pxp_get_readiness_status(xe->pxp); in query_pxp_status()
728 if (ret < 0) in query_pxp_status()
737 return 0; in query_pxp_status()
740 static int query_eu_stall(struct xe_device *xe, in query_eu_stall() argument
750 if (!xe_eu_stall_supported_on_platform(xe)) in query_eu_stall()
756 if (query->size == 0) { in query_eu_stall()
758 return 0; in query_eu_stall()
759 } else if (XE_IOCTL_DBG(xe, query->size != size)) { in query_eu_stall()
769 info->record_size = xe_eu_stall_data_record_size(xe); in query_eu_stall()
776 return ret ? -EFAULT : 0; in query_eu_stall()
779 static int (* const xe_query_funcs[])(struct xe_device *xe,
796 struct xe_device *xe = to_xe_device(dev); in xe_query_ioctl() local
800 if (XE_IOCTL_DBG(xe, query->extensions) || in xe_query_ioctl()
801 XE_IOCTL_DBG(xe, query->reserved[0] || query->reserved[1])) in xe_query_ioctl()
804 if (XE_IOCTL_DBG(xe, query->query >= ARRAY_SIZE(xe_query_funcs))) in xe_query_ioctl()
808 if (XE_IOCTL_DBG(xe, !xe_query_funcs[idx])) in xe_query_ioctl()
811 return xe_query_funcs[idx](xe, query); in xe_query_ioctl()