xref: /linux/drivers/gpu/drm/xe/xe_query.c (revision fc2591175507709191c2010a7eb466837496750d)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_query.h"
7 
8 #include <linux/nospec.h>
9 #include <linux/sched/clock.h>
10 
11 #include <drm/ttm/ttm_placement.h>
12 #include <generated/xe_wa_oob.h>
13 #include <uapi/drm/xe_drm.h>
14 
15 #include "regs/xe_engine_regs.h"
16 #include "regs/xe_gt_regs.h"
17 #include "xe_bo.h"
18 #include "xe_device.h"
19 #include "xe_eu_stall.h"
20 #include "xe_exec_queue.h"
21 #include "xe_force_wake.h"
22 #include "xe_ggtt.h"
23 #include "xe_gt.h"
24 #include "xe_gt_topology.h"
25 #include "xe_guc_hwconfig.h"
26 #include "xe_macros.h"
27 #include "xe_mmio.h"
28 #include "xe_oa.h"
29 #include "xe_pxp.h"
30 #include "xe_ttm_vram_mgr.h"
31 #include "xe_vram_types.h"
32 #include "xe_wa.h"
33 
34 static const u16 xe_to_user_engine_class[] = {
35 	[XE_ENGINE_CLASS_RENDER] = DRM_XE_ENGINE_CLASS_RENDER,
36 	[XE_ENGINE_CLASS_COPY] = DRM_XE_ENGINE_CLASS_COPY,
37 	[XE_ENGINE_CLASS_VIDEO_DECODE] = DRM_XE_ENGINE_CLASS_VIDEO_DECODE,
38 	[XE_ENGINE_CLASS_VIDEO_ENHANCE] = DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE,
39 	[XE_ENGINE_CLASS_COMPUTE] = DRM_XE_ENGINE_CLASS_COMPUTE,
40 };
41 
42 static const enum xe_engine_class user_to_xe_engine_class[] = {
43 	[DRM_XE_ENGINE_CLASS_RENDER] = XE_ENGINE_CLASS_RENDER,
44 	[DRM_XE_ENGINE_CLASS_COPY] = XE_ENGINE_CLASS_COPY,
45 	[DRM_XE_ENGINE_CLASS_VIDEO_DECODE] = XE_ENGINE_CLASS_VIDEO_DECODE,
46 	[DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE] = XE_ENGINE_CLASS_VIDEO_ENHANCE,
47 	[DRM_XE_ENGINE_CLASS_COMPUTE] = XE_ENGINE_CLASS_COMPUTE,
48 };
49 
50 static size_t calc_hw_engine_info_size(struct xe_device *xe)
51 {
52 	struct xe_hw_engine *hwe;
53 	enum xe_hw_engine_id id;
54 	struct xe_gt *gt;
55 	u8 gt_id;
56 	int i = 0;
57 
58 	for_each_gt(gt, xe, gt_id)
59 		for_each_hw_engine(hwe, gt, id) {
60 			if (xe_hw_engine_is_reserved(hwe))
61 				continue;
62 			i++;
63 		}
64 
65 	return sizeof(struct drm_xe_query_engines) +
66 		i * sizeof(struct drm_xe_engine);
67 }
68 
69 typedef u64 (*__ktime_func_t)(void);
70 static __ktime_func_t __clock_id_to_func(clockid_t clk_id)
71 {
72 	/*
73 	 * Use logic same as the perf subsystem to allow user to select the
74 	 * reference clock id to be used for timestamps.
75 	 */
76 	switch (clk_id) {
77 	case CLOCK_MONOTONIC:
78 		return &ktime_get_ns;
79 	case CLOCK_MONOTONIC_RAW:
80 		return &ktime_get_raw_ns;
81 	case CLOCK_REALTIME:
82 		return &ktime_get_real_ns;
83 	case CLOCK_BOOTTIME:
84 		return &ktime_get_boottime_ns;
85 	case CLOCK_TAI:
86 		return &ktime_get_clocktai_ns;
87 	default:
88 		return NULL;
89 	}
90 }
91 
92 static void
93 hwe_read_timestamp(struct xe_hw_engine *hwe, u64 *engine_ts, u64 *cpu_ts,
94 		   u64 *cpu_delta, __ktime_func_t cpu_clock)
95 {
96 	struct xe_mmio *mmio = &hwe->gt->mmio;
97 	u32 upper, lower, old_upper, loop = 0;
98 	struct xe_reg upper_reg = RING_TIMESTAMP_UDW(hwe->mmio_base),
99 		      lower_reg = RING_TIMESTAMP(hwe->mmio_base);
100 
101 	upper = xe_mmio_read32(mmio, upper_reg);
102 	do {
103 		*cpu_delta = local_clock();
104 		*cpu_ts = cpu_clock();
105 		lower = xe_mmio_read32(mmio, lower_reg);
106 		*cpu_delta = local_clock() - *cpu_delta;
107 		old_upper = upper;
108 		upper = xe_mmio_read32(mmio, upper_reg);
109 	} while (upper != old_upper && loop++ < 2);
110 
111 	*engine_ts = (u64)upper << 32 | lower;
112 }
113 
114 static int
115 query_engine_cycles(struct xe_device *xe,
116 		    struct drm_xe_device_query *query)
117 {
118 	struct drm_xe_query_engine_cycles __user *query_ptr;
119 	struct drm_xe_engine_class_instance *eci;
120 	struct drm_xe_query_engine_cycles resp;
121 	size_t size = sizeof(resp);
122 	__ktime_func_t cpu_clock;
123 	struct xe_hw_engine *hwe;
124 	struct xe_gt *gt;
125 
126 	if (IS_SRIOV_VF(xe))
127 		return -EOPNOTSUPP;
128 
129 	if (query->size == 0) {
130 		query->size = size;
131 		return 0;
132 	} else if (XE_IOCTL_DBG(xe, query->size != size)) {
133 		return -EINVAL;
134 	}
135 
136 	query_ptr = u64_to_user_ptr(query->data);
137 	if (copy_from_user(&resp, query_ptr, size))
138 		return -EFAULT;
139 
140 	cpu_clock = __clock_id_to_func(resp.clockid);
141 	if (!cpu_clock)
142 		return -EINVAL;
143 
144 	eci = &resp.eci;
145 	gt = xe_device_get_gt(xe, eci->gt_id);
146 	if (!gt)
147 		return -EINVAL;
148 
149 	if (eci->engine_class >= ARRAY_SIZE(user_to_xe_engine_class))
150 		return -EINVAL;
151 
152 	hwe = xe_gt_hw_engine(gt, user_to_xe_engine_class[eci->engine_class],
153 			      eci->engine_instance, true);
154 	if (!hwe)
155 		return -EINVAL;
156 
157 	xe_with_force_wake(fw_ref, gt_to_fw(gt), XE_FORCEWAKE_ALL) {
158 		if (!xe_force_wake_ref_has_domain(fw_ref.domains, XE_FORCEWAKE_ALL))
159 			return -EIO;
160 
161 		hwe_read_timestamp(hwe, &resp.engine_cycles, &resp.cpu_timestamp,
162 				   &resp.cpu_delta, cpu_clock);
163 	}
164 
165 	if (GRAPHICS_VER(xe) >= 20)
166 		resp.width = 64;
167 	else
168 		resp.width = 36;
169 
170 	/* Only write to the output fields of user query */
171 	if (put_user(resp.cpu_timestamp, &query_ptr->cpu_timestamp) ||
172 	    put_user(resp.cpu_delta, &query_ptr->cpu_delta) ||
173 	    put_user(resp.engine_cycles, &query_ptr->engine_cycles) ||
174 	    put_user(resp.width, &query_ptr->width))
175 		return -EFAULT;
176 
177 	return 0;
178 }
179 
180 static int query_engines(struct xe_device *xe,
181 			 struct drm_xe_device_query *query)
182 {
183 	size_t size = calc_hw_engine_info_size(xe);
184 	struct drm_xe_query_engines __user *query_ptr =
185 		u64_to_user_ptr(query->data);
186 	struct drm_xe_query_engines *engines;
187 	struct xe_hw_engine *hwe;
188 	enum xe_hw_engine_id id;
189 	struct xe_gt *gt;
190 	u8 gt_id;
191 	int i = 0;
192 
193 	if (query->size == 0) {
194 		query->size = size;
195 		return 0;
196 	} else if (XE_IOCTL_DBG(xe, query->size != size)) {
197 		return -EINVAL;
198 	}
199 
200 	engines = kzalloc(size, GFP_KERNEL);
201 	if (!engines)
202 		return -ENOMEM;
203 
204 	for_each_gt(gt, xe, gt_id)
205 		for_each_hw_engine(hwe, gt, id) {
206 			if (xe_hw_engine_is_reserved(hwe))
207 				continue;
208 
209 			engines->engines[i].instance.engine_class =
210 				xe_to_user_engine_class[hwe->class];
211 			engines->engines[i].instance.engine_instance =
212 				hwe->logical_instance;
213 			engines->engines[i].instance.gt_id = gt->info.id;
214 
215 			i++;
216 		}
217 
218 	engines->num_engines = i;
219 
220 	if (copy_to_user(query_ptr, engines, size)) {
221 		kfree(engines);
222 		return -EFAULT;
223 	}
224 	kfree(engines);
225 
226 	return 0;
227 }
228 
229 static size_t calc_mem_regions_size(struct xe_device *xe)
230 {
231 	u32 num_managers = 1;
232 	int i;
233 
234 	for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i)
235 		if (ttm_manager_type(&xe->ttm, i))
236 			num_managers++;
237 
238 	return offsetof(struct drm_xe_query_mem_regions, mem_regions[num_managers]);
239 }
240 
241 static int query_mem_regions(struct xe_device *xe,
242 			    struct drm_xe_device_query *query)
243 {
244 	size_t size = calc_mem_regions_size(xe);
245 	struct drm_xe_query_mem_regions *mem_regions;
246 	struct drm_xe_query_mem_regions __user *query_ptr =
247 		u64_to_user_ptr(query->data);
248 	struct ttm_resource_manager *man;
249 	int ret, i;
250 
251 	if (query->size == 0) {
252 		query->size = size;
253 		return 0;
254 	} else if (XE_IOCTL_DBG(xe, query->size != size)) {
255 		return -EINVAL;
256 	}
257 
258 	mem_regions = kzalloc(size, GFP_KERNEL);
259 	if (XE_IOCTL_DBG(xe, !mem_regions))
260 		return -ENOMEM;
261 
262 	man = ttm_manager_type(&xe->ttm, XE_PL_TT);
263 	mem_regions->mem_regions[0].mem_class = DRM_XE_MEM_REGION_CLASS_SYSMEM;
264 	/*
265 	 * The instance needs to be a unique number that represents the index
266 	 * in the placement mask used at xe_gem_create_ioctl() for the
267 	 * xe_bo_create() placement.
268 	 */
269 	mem_regions->mem_regions[0].instance = 0;
270 	mem_regions->mem_regions[0].min_page_size = PAGE_SIZE;
271 	mem_regions->mem_regions[0].total_size = man->size << PAGE_SHIFT;
272 	mem_regions->mem_regions[0].used = ttm_resource_manager_usage(man);
273 	mem_regions->num_mem_regions = 1;
274 
275 	for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) {
276 		man = ttm_manager_type(&xe->ttm, i);
277 		if (man) {
278 			mem_regions->mem_regions[mem_regions->num_mem_regions].mem_class =
279 				DRM_XE_MEM_REGION_CLASS_VRAM;
280 			mem_regions->mem_regions[mem_regions->num_mem_regions].instance =
281 				mem_regions->num_mem_regions;
282 			mem_regions->mem_regions[mem_regions->num_mem_regions].min_page_size =
283 				xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ?
284 				SZ_64K : PAGE_SIZE;
285 			mem_regions->mem_regions[mem_regions->num_mem_regions].total_size =
286 				man->size;
287 
288 			xe_ttm_vram_get_used(man,
289 					     &mem_regions->mem_regions
290 					     [mem_regions->num_mem_regions].used,
291 					     &mem_regions->mem_regions
292 					     [mem_regions->num_mem_regions].cpu_visible_used);
293 
294 			mem_regions->mem_regions[mem_regions->num_mem_regions].cpu_visible_size =
295 				xe_ttm_vram_get_cpu_visible_size(man);
296 			mem_regions->num_mem_regions++;
297 		}
298 	}
299 
300 	if (!copy_to_user(query_ptr, mem_regions, size))
301 		ret = 0;
302 	else
303 		ret = -ENOSPC;
304 
305 	kfree(mem_regions);
306 	return ret;
307 }
308 
309 static int query_config(struct xe_device *xe, struct drm_xe_device_query *query)
310 {
311 	const u32 num_params = DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY + 1;
312 	size_t size =
313 		sizeof(struct drm_xe_query_config) + num_params * sizeof(u64);
314 	struct drm_xe_query_config __user *query_ptr =
315 		u64_to_user_ptr(query->data);
316 	struct drm_xe_query_config *config;
317 
318 	if (query->size == 0) {
319 		query->size = size;
320 		return 0;
321 	} else if (XE_IOCTL_DBG(xe, query->size != size)) {
322 		return -EINVAL;
323 	}
324 
325 	config = kzalloc(size, GFP_KERNEL);
326 	if (!config)
327 		return -ENOMEM;
328 
329 	config->num_params = num_params;
330 	config->info[DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID] =
331 		xe->info.devid | (xe->info.revid << 16);
332 	if (xe->mem.vram)
333 		config->info[DRM_XE_QUERY_CONFIG_FLAGS] |=
334 			DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM;
335 	if (xe->info.has_usm && IS_ENABLED(CONFIG_DRM_XE_GPUSVM))
336 		config->info[DRM_XE_QUERY_CONFIG_FLAGS] |=
337 			DRM_XE_QUERY_CONFIG_FLAG_HAS_CPU_ADDR_MIRROR;
338 	if (GRAPHICS_VER(xe) >= 20)
339 		config->info[DRM_XE_QUERY_CONFIG_FLAGS] |=
340 			DRM_XE_QUERY_CONFIG_FLAG_HAS_NO_COMPRESSION_HINT;
341 	config->info[DRM_XE_QUERY_CONFIG_FLAGS] |=
342 			DRM_XE_QUERY_CONFIG_FLAG_HAS_LOW_LATENCY;
343 	config->info[DRM_XE_QUERY_CONFIG_FLAGS] |=
344 		DRM_XE_QUERY_CONFIG_FLAG_HAS_DISABLE_STATE_CACHE_PERF_FIX;
345 	config->info[DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT] =
346 		xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K;
347 	config->info[DRM_XE_QUERY_CONFIG_VA_BITS] = xe->info.va_bits;
348 	config->info[DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY] =
349 		xe_exec_queue_device_get_max_priority(xe);
350 
351 	if (copy_to_user(query_ptr, config, size)) {
352 		kfree(config);
353 		return -EFAULT;
354 	}
355 	kfree(config);
356 
357 	return 0;
358 }
359 
360 static int query_gt_list(struct xe_device *xe, struct drm_xe_device_query *query)
361 {
362 	struct xe_gt *gt;
363 	size_t size = sizeof(struct drm_xe_query_gt_list) +
364 		xe->info.gt_count * sizeof(struct drm_xe_gt);
365 	struct drm_xe_query_gt_list __user *query_ptr =
366 		u64_to_user_ptr(query->data);
367 	struct drm_xe_query_gt_list *gt_list;
368 	int iter = 0;
369 	u8 id;
370 
371 	if (query->size == 0) {
372 		query->size = size;
373 		return 0;
374 	} else if (XE_IOCTL_DBG(xe, query->size != size)) {
375 		return -EINVAL;
376 	}
377 
378 	gt_list = kzalloc(size, GFP_KERNEL);
379 	if (!gt_list)
380 		return -ENOMEM;
381 
382 	gt_list->num_gt = xe->info.gt_count;
383 
384 	for_each_gt(gt, xe, id) {
385 		if (xe_gt_is_media_type(gt))
386 			gt_list->gt_list[iter].type = DRM_XE_QUERY_GT_TYPE_MEDIA;
387 		else
388 			gt_list->gt_list[iter].type = DRM_XE_QUERY_GT_TYPE_MAIN;
389 		gt_list->gt_list[iter].tile_id = gt_to_tile(gt)->id;
390 		gt_list->gt_list[iter].gt_id = gt->info.id;
391 		gt_list->gt_list[iter].reference_clock = gt->info.reference_clock;
392 		/*
393 		 * The mem_regions indexes in the mask below need to
394 		 * directly identify the struct
395 		 * drm_xe_query_mem_regions' instance constructed at
396 		 * query_mem_regions()
397 		 *
398 		 * For our current platforms:
399 		 * Bit 0 -> System Memory
400 		 * Bit 1 -> VRAM0 on Tile0
401 		 * Bit 2 -> VRAM1 on Tile1
402 		 * However the uAPI is generic and it's userspace's
403 		 * responsibility to check the mem_class, without any
404 		 * assumption.
405 		 */
406 		if (!IS_DGFX(xe))
407 			gt_list->gt_list[iter].near_mem_regions = 0x1;
408 		else
409 			gt_list->gt_list[iter].near_mem_regions =
410 				BIT(gt_to_tile(gt)->mem.vram->id) << 1;
411 		gt_list->gt_list[iter].far_mem_regions = xe->info.mem_region_mask ^
412 			gt_list->gt_list[iter].near_mem_regions;
413 
414 		gt_list->gt_list[iter].ip_ver_major =
415 			REG_FIELD_GET(GMD_ID_ARCH_MASK, gt->info.gmdid);
416 		gt_list->gt_list[iter].ip_ver_minor =
417 			REG_FIELD_GET(GMD_ID_RELEASE_MASK, gt->info.gmdid);
418 		gt_list->gt_list[iter].ip_ver_rev =
419 			REG_FIELD_GET(GMD_ID_REVID, gt->info.gmdid);
420 
421 		iter++;
422 	}
423 
424 	if (copy_to_user(query_ptr, gt_list, size)) {
425 		kfree(gt_list);
426 		return -EFAULT;
427 	}
428 	kfree(gt_list);
429 
430 	return 0;
431 }
432 
433 static int query_hwconfig(struct xe_device *xe,
434 			  struct drm_xe_device_query *query)
435 {
436 	struct xe_gt *gt = xe_root_mmio_gt(xe);
437 	size_t size = gt ? xe_guc_hwconfig_size(&gt->uc.guc) : 0;
438 	void __user *query_ptr = u64_to_user_ptr(query->data);
439 	void *hwconfig;
440 
441 	if (query->size == 0) {
442 		query->size = size;
443 		return 0;
444 	} else if (XE_IOCTL_DBG(xe, query->size != size)) {
445 		return -EINVAL;
446 	}
447 
448 	hwconfig = kzalloc(size, GFP_KERNEL);
449 	if (!hwconfig)
450 		return -ENOMEM;
451 
452 	xe_guc_hwconfig_copy(&gt->uc.guc, hwconfig);
453 
454 	if (copy_to_user(query_ptr, hwconfig, size)) {
455 		kfree(hwconfig);
456 		return -EFAULT;
457 	}
458 	kfree(hwconfig);
459 
460 	return 0;
461 }
462 
463 static size_t calc_topo_query_size(struct xe_device *xe)
464 {
465 	struct xe_gt *gt;
466 	size_t query_size = 0;
467 	int id;
468 
469 	for_each_gt(gt, xe, id) {
470 		query_size += 3 * sizeof(struct drm_xe_query_topology_mask) +
471 			sizeof_field(struct xe_gt, fuse_topo.g_dss_mask) +
472 			sizeof_field(struct xe_gt, fuse_topo.c_dss_mask) +
473 			sizeof_field(struct xe_gt, fuse_topo.eu_mask_per_dss);
474 
475 		/* L3bank mask may not be available for some GTs */
476 		if (xe_gt_topology_report_l3(gt))
477 			query_size += sizeof(struct drm_xe_query_topology_mask) +
478 				sizeof_field(struct xe_gt, fuse_topo.l3_bank_mask);
479 	}
480 
481 	return query_size;
482 }
483 
484 static int copy_mask(void __user **ptr,
485 		     struct drm_xe_query_topology_mask *topo,
486 		     void *mask, size_t mask_size)
487 {
488 	topo->num_bytes = mask_size;
489 
490 	if (copy_to_user(*ptr, topo, sizeof(*topo)))
491 		return -EFAULT;
492 	*ptr += sizeof(*topo);
493 
494 	if (copy_to_user(*ptr, mask, mask_size))
495 		return -EFAULT;
496 	*ptr += mask_size;
497 
498 	return 0;
499 }
500 
501 static int query_gt_topology(struct xe_device *xe,
502 			     struct drm_xe_device_query *query)
503 {
504 	void __user *query_ptr = u64_to_user_ptr(query->data);
505 	size_t size = calc_topo_query_size(xe);
506 	struct drm_xe_query_topology_mask topo;
507 	struct xe_gt *gt;
508 	int id;
509 
510 	if (query->size == 0) {
511 		query->size = size;
512 		return 0;
513 	} else if (XE_IOCTL_DBG(xe, query->size != size)) {
514 		return -EINVAL;
515 	}
516 
517 	for_each_gt(gt, xe, id) {
518 		int err;
519 
520 		topo.gt_id = id;
521 
522 		topo.type = DRM_XE_TOPO_DSS_GEOMETRY;
523 		err = copy_mask(&query_ptr, &topo, gt->fuse_topo.g_dss_mask,
524 				sizeof(gt->fuse_topo.g_dss_mask));
525 		if (err)
526 			return err;
527 
528 		topo.type = DRM_XE_TOPO_DSS_COMPUTE;
529 		err = copy_mask(&query_ptr, &topo, gt->fuse_topo.c_dss_mask,
530 				sizeof(gt->fuse_topo.c_dss_mask));
531 		if (err)
532 			return err;
533 
534 		/*
535 		 * If the kernel doesn't have a way to obtain a correct L3bank
536 		 * mask, then it's better to omit L3 from the query rather than
537 		 * reporting bogus or zeroed information to userspace.
538 		 */
539 		if (xe_gt_topology_report_l3(gt)) {
540 			topo.type = DRM_XE_TOPO_L3_BANK;
541 			err = copy_mask(&query_ptr, &topo, gt->fuse_topo.l3_bank_mask,
542 					sizeof(gt->fuse_topo.l3_bank_mask));
543 			if (err)
544 				return err;
545 		}
546 
547 		topo.type = gt->fuse_topo.eu_type == XE_GT_EU_TYPE_SIMD16 ?
548 			DRM_XE_TOPO_SIMD16_EU_PER_DSS :
549 			DRM_XE_TOPO_EU_PER_DSS;
550 		err = copy_mask(&query_ptr, &topo,
551 				gt->fuse_topo.eu_mask_per_dss,
552 				sizeof(gt->fuse_topo.eu_mask_per_dss));
553 		if (err)
554 			return err;
555 	}
556 
557 	return 0;
558 }
559 
560 static int
561 query_uc_fw_version(struct xe_device *xe, struct drm_xe_device_query *query)
562 {
563 	struct drm_xe_query_uc_fw_version __user *query_ptr = u64_to_user_ptr(query->data);
564 	size_t size = sizeof(struct drm_xe_query_uc_fw_version);
565 	struct drm_xe_query_uc_fw_version resp;
566 	struct xe_uc_fw_version *version = NULL;
567 
568 	if (query->size == 0) {
569 		query->size = size;
570 		return 0;
571 	} else if (XE_IOCTL_DBG(xe, query->size != size)) {
572 		return -EINVAL;
573 	}
574 
575 	if (copy_from_user(&resp, query_ptr, size))
576 		return -EFAULT;
577 
578 	if (XE_IOCTL_DBG(xe, resp.pad || resp.pad2 || resp.reserved))
579 		return -EINVAL;
580 
581 	switch (resp.uc_type) {
582 	case XE_QUERY_UC_TYPE_GUC_SUBMISSION: {
583 		struct xe_guc *guc = &xe->tiles[0].primary_gt->uc.guc;
584 
585 		version = &guc->fw.versions.found[XE_UC_FW_VER_COMPATIBILITY];
586 		break;
587 	}
588 	case XE_QUERY_UC_TYPE_HUC: {
589 		struct xe_gt *media_gt = NULL;
590 		struct xe_huc *huc;
591 
592 		if (MEDIA_VER(xe) >= 13) {
593 			struct xe_tile *tile;
594 			u8 gt_id;
595 
596 			for_each_tile(tile, xe, gt_id) {
597 				if (tile->media_gt) {
598 					media_gt = tile->media_gt;
599 					break;
600 				}
601 			}
602 		} else {
603 			media_gt = xe->tiles[0].primary_gt;
604 		}
605 
606 		if (!media_gt)
607 			break;
608 
609 		huc = &media_gt->uc.huc;
610 		if (huc->fw.status == XE_UC_FIRMWARE_RUNNING)
611 			version = &huc->fw.versions.found[XE_UC_FW_VER_RELEASE];
612 		break;
613 	}
614 	default:
615 		return -EINVAL;
616 	}
617 
618 	if (version) {
619 		resp.branch_ver = 0;
620 		resp.major_ver = version->major;
621 		resp.minor_ver = version->minor;
622 		resp.patch_ver = version->patch;
623 	} else {
624 		return -ENODEV;
625 	}
626 
627 	if (copy_to_user(query_ptr, &resp, size))
628 		return -EFAULT;
629 
630 	return 0;
631 }
632 
633 static size_t calc_oa_unit_query_size(struct xe_device *xe)
634 {
635 	size_t size = sizeof(struct drm_xe_query_oa_units);
636 	struct xe_gt *gt;
637 	int i, id;
638 
639 	for_each_gt(gt, xe, id) {
640 		for (i = 0; i < gt->oa.num_oa_units; i++) {
641 			size += sizeof(struct drm_xe_oa_unit);
642 			size += gt->oa.oa_unit[i].num_engines *
643 				sizeof(struct drm_xe_engine_class_instance);
644 		}
645 	}
646 
647 	return size;
648 }
649 
650 static int query_oa_units(struct xe_device *xe,
651 			  struct drm_xe_device_query *query)
652 {
653 	void __user *query_ptr = u64_to_user_ptr(query->data);
654 	size_t size = calc_oa_unit_query_size(xe);
655 	struct drm_xe_query_oa_units *qoa;
656 	enum xe_hw_engine_id hwe_id;
657 	struct drm_xe_oa_unit *du;
658 	struct xe_hw_engine *hwe;
659 	struct xe_oa_unit *u;
660 	int gt_id, i, j, ret;
661 	struct xe_gt *gt;
662 	u8 *pdu;
663 
664 	if (query->size == 0) {
665 		query->size = size;
666 		return 0;
667 	} else if (XE_IOCTL_DBG(xe, query->size != size)) {
668 		return -EINVAL;
669 	}
670 
671 	qoa = kzalloc(size, GFP_KERNEL);
672 	if (!qoa)
673 		return -ENOMEM;
674 
675 	pdu = (u8 *)&qoa->oa_units[0];
676 	for_each_gt(gt, xe, gt_id) {
677 		for (i = 0; i < gt->oa.num_oa_units; i++) {
678 			u = &gt->oa.oa_unit[i];
679 			du = (struct drm_xe_oa_unit *)pdu;
680 
681 			du->oa_unit_id = u->oa_unit_id;
682 			du->oa_unit_type = u->type;
683 			du->oa_timestamp_freq = xe_oa_timestamp_frequency(gt);
684 			du->capabilities = DRM_XE_OA_CAPS_BASE | DRM_XE_OA_CAPS_SYNCS |
685 					   DRM_XE_OA_CAPS_OA_BUFFER_SIZE |
686 					   DRM_XE_OA_CAPS_WAIT_NUM_REPORTS |
687 					   DRM_XE_OA_CAPS_OAM |
688 					   DRM_XE_OA_CAPS_OA_UNIT_GT_ID;
689 			du->gt_id = u->gt->info.id;
690 			j = 0;
691 			for_each_hw_engine(hwe, gt, hwe_id) {
692 				if (!xe_hw_engine_is_reserved(hwe) &&
693 				    xe_oa_unit_id(hwe) == u->oa_unit_id) {
694 					du->eci[j].engine_class =
695 						xe_to_user_engine_class[hwe->class];
696 					du->eci[j].engine_instance = hwe->logical_instance;
697 					du->eci[j].gt_id = gt->info.id;
698 					j++;
699 				}
700 			}
701 			du->num_engines = j;
702 			pdu += sizeof(*du) + j * sizeof(du->eci[0]);
703 			qoa->num_oa_units++;
704 		}
705 	}
706 
707 	ret = copy_to_user(query_ptr, qoa, size);
708 	kfree(qoa);
709 
710 	return ret ? -EFAULT : 0;
711 }
712 
713 static int query_pxp_status(struct xe_device *xe, struct drm_xe_device_query *query)
714 {
715 	struct drm_xe_query_pxp_status __user *query_ptr = u64_to_user_ptr(query->data);
716 	size_t size = sizeof(struct drm_xe_query_pxp_status);
717 	struct drm_xe_query_pxp_status resp = { 0 };
718 	int ret;
719 
720 	if (query->size == 0) {
721 		query->size = size;
722 		return 0;
723 	} else if (XE_IOCTL_DBG(xe, query->size != size)) {
724 		return -EINVAL;
725 	}
726 
727 	ret = xe_pxp_get_readiness_status(xe->pxp);
728 	if (ret < 0)
729 		return ret;
730 
731 	resp.status = ret;
732 	resp.supported_session_types = BIT(DRM_XE_PXP_TYPE_HWDRM);
733 
734 	if (copy_to_user(query_ptr, &resp, size))
735 		return -EFAULT;
736 
737 	return 0;
738 }
739 
740 static int query_eu_stall(struct xe_device *xe,
741 			  struct drm_xe_device_query *query)
742 {
743 	void __user *query_ptr = u64_to_user_ptr(query->data);
744 	struct drm_xe_query_eu_stall *info;
745 	size_t size, array_size;
746 	const u64 *rates;
747 	u32 num_rates;
748 	int ret;
749 
750 	if (!xe_eu_stall_supported_on_platform(xe))
751 		return -ENODEV;
752 
753 	array_size = xe_eu_stall_get_sampling_rates(&num_rates, &rates);
754 	size = sizeof(struct drm_xe_query_eu_stall) + array_size;
755 
756 	if (query->size == 0) {
757 		query->size = size;
758 		return 0;
759 	} else if (XE_IOCTL_DBG(xe, query->size != size)) {
760 		return -EINVAL;
761 	}
762 
763 	info = kzalloc(size, GFP_KERNEL);
764 	if (!info)
765 		return -ENOMEM;
766 
767 	info->num_sampling_rates = num_rates;
768 	info->capabilities = DRM_XE_EU_STALL_CAPS_BASE;
769 	info->record_size = xe_eu_stall_data_record_size(xe);
770 	info->per_xecore_buf_size = xe_eu_stall_get_per_xecore_buf_size();
771 	memcpy(info->sampling_rates, rates, array_size);
772 
773 	ret = copy_to_user(query_ptr, info, size);
774 	kfree(info);
775 
776 	return ret ? -EFAULT : 0;
777 }
778 
779 static int (* const xe_query_funcs[])(struct xe_device *xe,
780 				      struct drm_xe_device_query *query) = {
781 	query_engines,
782 	query_mem_regions,
783 	query_config,
784 	query_gt_list,
785 	query_hwconfig,
786 	query_gt_topology,
787 	query_engine_cycles,
788 	query_uc_fw_version,
789 	query_oa_units,
790 	query_pxp_status,
791 	query_eu_stall,
792 };
793 
794 int xe_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
795 {
796 	struct xe_device *xe = to_xe_device(dev);
797 	struct drm_xe_device_query *query = data;
798 	u32 idx;
799 
800 	if (XE_IOCTL_DBG(xe, query->extensions) ||
801 	    XE_IOCTL_DBG(xe, query->reserved[0] || query->reserved[1]))
802 		return -EINVAL;
803 
804 	if (XE_IOCTL_DBG(xe, query->query >= ARRAY_SIZE(xe_query_funcs)))
805 		return -EINVAL;
806 
807 	idx = array_index_nospec(query->query, ARRAY_SIZE(xe_query_funcs));
808 	if (XE_IOCTL_DBG(xe, !xe_query_funcs[idx]))
809 		return -EINVAL;
810 
811 	return xe_query_funcs[idx](xe, query);
812 }
813