Lines Matching +full:0 +full:xe

45 static size_t calc_hw_engine_info_size(struct xe_device *xe)  in calc_hw_engine_info_size()  argument
51 int i = 0; in calc_hw_engine_info_size()
53 for_each_gt(gt, xe, gt_id) in calc_hw_engine_info_size()
92 u32 upper, lower, old_upper, loop = 0; in hwe_read_timestamp()
110 query_engine_cycles(struct xe_device *xe, in query_engine_cycles() argument
122 if (query->size == 0) { in query_engine_cycles()
124 return 0; in query_engine_cycles()
125 } else if (XE_IOCTL_DBG(xe, query->size != size)) { in query_engine_cycles()
141 gt = xe_device_get_gt(xe, eci->gt_id); in query_engine_cycles()
164 if (GRAPHICS_VER(xe) >= 20) in query_engine_cycles()
176 return 0; in query_engine_cycles()
179 static int query_engines(struct xe_device *xe, in query_engines() argument
182 size_t size = calc_hw_engine_info_size(xe); in query_engines()
190 int i = 0; in query_engines()
192 if (query->size == 0) { in query_engines()
194 return 0; in query_engines()
195 } else if (XE_IOCTL_DBG(xe, query->size != size)) { in query_engines()
203 for_each_gt(gt, xe, gt_id) in query_engines()
225 return 0; in query_engines()
228 static size_t calc_mem_regions_size(struct xe_device *xe) in calc_mem_regions_size() argument
234 if (ttm_manager_type(&xe->ttm, i)) in calc_mem_regions_size()
240 static int query_mem_regions(struct xe_device *xe, in query_mem_regions() argument
243 size_t size = calc_mem_regions_size(xe); in query_mem_regions()
250 if (query->size == 0) { in query_mem_regions()
252 return 0; in query_mem_regions()
253 } else if (XE_IOCTL_DBG(xe, query->size != size)) { in query_mem_regions()
258 if (XE_IOCTL_DBG(xe, !mem_regions)) in query_mem_regions()
261 man = ttm_manager_type(&xe->ttm, XE_PL_TT); in query_mem_regions()
262 mem_regions->mem_regions[0].mem_class = DRM_XE_MEM_REGION_CLASS_SYSMEM; in query_mem_regions()
268 mem_regions->mem_regions[0].instance = 0; in query_mem_regions()
269 mem_regions->mem_regions[0].min_page_size = PAGE_SIZE; in query_mem_regions()
270 mem_regions->mem_regions[0].total_size = man->size << PAGE_SHIFT; in query_mem_regions()
272 mem_regions->mem_regions[0].used = ttm_resource_manager_usage(man); in query_mem_regions()
276 man = ttm_manager_type(&xe->ttm, i); in query_mem_regions()
283 xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? in query_mem_regions()
303 ret = 0; in query_mem_regions()
311 static int query_config(struct xe_device *xe, struct drm_xe_device_query *query) in query_config() argument
320 if (query->size == 0) { in query_config()
322 return 0; in query_config()
323 } else if (XE_IOCTL_DBG(xe, query->size != size)) { in query_config()
333 xe->info.devid | (xe->info.revid << 16); in query_config()
334 if (xe_device_get_root_tile(xe)->mem.vram.usable_size) in query_config()
338 xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K; in query_config()
339 config->info[DRM_XE_QUERY_CONFIG_VA_BITS] = xe->info.va_bits; in query_config()
341 xe_exec_queue_device_get_max_priority(xe); in query_config()
349 return 0; in query_config()
352 static int query_gt_list(struct xe_device *xe, struct drm_xe_device_query *query) in query_gt_list() argument
356 xe->info.gt_count * sizeof(struct drm_xe_gt); in query_gt_list()
362 if (query->size == 0) { in query_gt_list()
364 return 0; in query_gt_list()
365 } else if (XE_IOCTL_DBG(xe, query->size != size)) { in query_gt_list()
373 gt_list->num_gt = xe->info.gt_count; in query_gt_list()
375 for_each_gt(gt, xe, id) { in query_gt_list()
390 * Bit 0 -> System Memory in query_gt_list()
397 if (!IS_DGFX(xe)) in query_gt_list()
398 gt_list->gt_list[id].near_mem_regions = 0x1; in query_gt_list()
402 gt_list->gt_list[id].far_mem_regions = xe->info.mem_region_mask ^ in query_gt_list()
419 return 0; in query_gt_list()
422 static int query_hwconfig(struct xe_device *xe, in query_hwconfig() argument
425 struct xe_gt *gt = xe_root_mmio_gt(xe); in query_hwconfig()
430 if (query->size == 0) { in query_hwconfig()
432 return 0; in query_hwconfig()
433 } else if (XE_IOCTL_DBG(xe, query->size != size)) { in query_hwconfig()
449 return 0; in query_hwconfig()
452 static size_t calc_topo_query_size(struct xe_device *xe) in calc_topo_query_size() argument
455 size_t query_size = 0; in calc_topo_query_size()
458 for_each_gt(gt, xe, id) { in calc_topo_query_size()
487 return 0; in copy_mask()
490 static int query_gt_topology(struct xe_device *xe, in query_gt_topology() argument
494 size_t size = calc_topo_query_size(xe); in query_gt_topology()
499 if (query->size == 0) { in query_gt_topology()
501 return 0; in query_gt_topology()
502 } else if (XE_IOCTL_DBG(xe, query->size != size)) { in query_gt_topology()
506 for_each_gt(gt, xe, id) { in query_gt_topology()
546 return 0; in query_gt_topology()
550 query_uc_fw_version(struct xe_device *xe, struct drm_xe_device_query *query) in query_uc_fw_version() argument
557 if (query->size == 0) { in query_uc_fw_version()
559 return 0; in query_uc_fw_version()
560 } else if (XE_IOCTL_DBG(xe, query->size != size)) { in query_uc_fw_version()
567 if (XE_IOCTL_DBG(xe, resp.pad || resp.pad2 || resp.reserved)) in query_uc_fw_version()
572 struct xe_guc *guc = &xe->tiles[0].primary_gt->uc.guc; in query_uc_fw_version()
581 if (MEDIA_VER(xe) >= 13) { in query_uc_fw_version()
585 for_each_tile(tile, xe, gt_id) { in query_uc_fw_version()
592 media_gt = xe->tiles[0].primary_gt; in query_uc_fw_version()
608 resp.branch_ver = 0; in query_uc_fw_version()
619 return 0; in query_uc_fw_version()
622 static size_t calc_oa_unit_query_size(struct xe_device *xe) in calc_oa_unit_query_size() argument
628 for_each_gt(gt, xe, id) { in calc_oa_unit_query_size()
629 for (i = 0; i < gt->oa.num_oa_units; i++) { in calc_oa_unit_query_size()
639 static int query_oa_units(struct xe_device *xe, in query_oa_units() argument
643 size_t size = calc_oa_unit_query_size(xe); in query_oa_units()
653 if (query->size == 0) { in query_oa_units()
655 return 0; in query_oa_units()
656 } else if (XE_IOCTL_DBG(xe, query->size != size)) { in query_oa_units()
664 pdu = (u8 *)&qoa->oa_units[0]; in query_oa_units()
665 for_each_gt(gt, xe, gt_id) { in query_oa_units()
666 for (i = 0; i < gt->oa.num_oa_units; i++) { in query_oa_units()
675 j = 0; in query_oa_units()
687 pdu += sizeof(*du) + j * sizeof(du->eci[0]); in query_oa_units()
695 return ret ? -EFAULT : 0; in query_oa_units()
698 static int (* const xe_query_funcs[])(struct xe_device *xe,
713 struct xe_device *xe = to_xe_device(dev); in xe_query_ioctl() local
717 if (XE_IOCTL_DBG(xe, query->extensions) || in xe_query_ioctl()
718 XE_IOCTL_DBG(xe, query->reserved[0] || query->reserved[1])) in xe_query_ioctl()
721 if (XE_IOCTL_DBG(xe, query->query >= ARRAY_SIZE(xe_query_funcs))) in xe_query_ioctl()
725 if (XE_IOCTL_DBG(xe, !xe_query_funcs[idx])) in xe_query_ioctl()
728 return xe_query_funcs[idx](xe, query); in xe_query_ioctl()