xref: /linux/drivers/gpu/drm/xe/xe_query.c (revision 68a052239fc4b351e961f698b824f7654a346091)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_query.h"
7 
8 #include <linux/nospec.h>
9 #include <linux/sched/clock.h>
10 
11 #include <drm/ttm/ttm_placement.h>
12 #include <generated/xe_wa_oob.h>
13 #include <uapi/drm/xe_drm.h>
14 
15 #include "regs/xe_engine_regs.h"
16 #include "regs/xe_gt_regs.h"
17 #include "xe_bo.h"
18 #include "xe_device.h"
19 #include "xe_eu_stall.h"
20 #include "xe_exec_queue.h"
21 #include "xe_force_wake.h"
22 #include "xe_ggtt.h"
23 #include "xe_gt.h"
24 #include "xe_gt_topology.h"
25 #include "xe_guc_hwconfig.h"
26 #include "xe_macros.h"
27 #include "xe_mmio.h"
28 #include "xe_oa.h"
29 #include "xe_pxp.h"
30 #include "xe_ttm_vram_mgr.h"
31 #include "xe_vram_types.h"
32 #include "xe_wa.h"
33 
34 static const u16 xe_to_user_engine_class[] = {
35 	[XE_ENGINE_CLASS_RENDER] = DRM_XE_ENGINE_CLASS_RENDER,
36 	[XE_ENGINE_CLASS_COPY] = DRM_XE_ENGINE_CLASS_COPY,
37 	[XE_ENGINE_CLASS_VIDEO_DECODE] = DRM_XE_ENGINE_CLASS_VIDEO_DECODE,
38 	[XE_ENGINE_CLASS_VIDEO_ENHANCE] = DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE,
39 	[XE_ENGINE_CLASS_COMPUTE] = DRM_XE_ENGINE_CLASS_COMPUTE,
40 };
41 
42 static const enum xe_engine_class user_to_xe_engine_class[] = {
43 	[DRM_XE_ENGINE_CLASS_RENDER] = XE_ENGINE_CLASS_RENDER,
44 	[DRM_XE_ENGINE_CLASS_COPY] = XE_ENGINE_CLASS_COPY,
45 	[DRM_XE_ENGINE_CLASS_VIDEO_DECODE] = XE_ENGINE_CLASS_VIDEO_DECODE,
46 	[DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE] = XE_ENGINE_CLASS_VIDEO_ENHANCE,
47 	[DRM_XE_ENGINE_CLASS_COMPUTE] = XE_ENGINE_CLASS_COMPUTE,
48 };
49 
50 static size_t calc_hw_engine_info_size(struct xe_device *xe)
51 {
52 	struct xe_hw_engine *hwe;
53 	enum xe_hw_engine_id id;
54 	struct xe_gt *gt;
55 	u8 gt_id;
56 	int i = 0;
57 
58 	for_each_gt(gt, xe, gt_id)
59 		for_each_hw_engine(hwe, gt, id) {
60 			if (xe_hw_engine_is_reserved(hwe))
61 				continue;
62 			i++;
63 		}
64 
65 	return sizeof(struct drm_xe_query_engines) +
66 		i * sizeof(struct drm_xe_engine);
67 }
68 
69 typedef u64 (*__ktime_func_t)(void);
70 static __ktime_func_t __clock_id_to_func(clockid_t clk_id)
71 {
72 	/*
73 	 * Use logic same as the perf subsystem to allow user to select the
74 	 * reference clock id to be used for timestamps.
75 	 */
76 	switch (clk_id) {
77 	case CLOCK_MONOTONIC:
78 		return &ktime_get_ns;
79 	case CLOCK_MONOTONIC_RAW:
80 		return &ktime_get_raw_ns;
81 	case CLOCK_REALTIME:
82 		return &ktime_get_real_ns;
83 	case CLOCK_BOOTTIME:
84 		return &ktime_get_boottime_ns;
85 	case CLOCK_TAI:
86 		return &ktime_get_clocktai_ns;
87 	default:
88 		return NULL;
89 	}
90 }
91 
92 static void
93 hwe_read_timestamp(struct xe_hw_engine *hwe, u64 *engine_ts, u64 *cpu_ts,
94 		   u64 *cpu_delta, __ktime_func_t cpu_clock)
95 {
96 	struct xe_mmio *mmio = &hwe->gt->mmio;
97 	u32 upper, lower, old_upper, loop = 0;
98 	struct xe_reg upper_reg = RING_TIMESTAMP_UDW(hwe->mmio_base),
99 		      lower_reg = RING_TIMESTAMP(hwe->mmio_base);
100 
101 	upper = xe_mmio_read32(mmio, upper_reg);
102 	do {
103 		*cpu_delta = local_clock();
104 		*cpu_ts = cpu_clock();
105 		lower = xe_mmio_read32(mmio, lower_reg);
106 		*cpu_delta = local_clock() - *cpu_delta;
107 		old_upper = upper;
108 		upper = xe_mmio_read32(mmio, upper_reg);
109 	} while (upper != old_upper && loop++ < 2);
110 
111 	*engine_ts = (u64)upper << 32 | lower;
112 }
113 
114 static int
115 query_engine_cycles(struct xe_device *xe,
116 		    struct drm_xe_device_query *query)
117 {
118 	struct drm_xe_query_engine_cycles __user *query_ptr;
119 	struct drm_xe_engine_class_instance *eci;
120 	struct drm_xe_query_engine_cycles resp;
121 	size_t size = sizeof(resp);
122 	__ktime_func_t cpu_clock;
123 	struct xe_hw_engine *hwe;
124 	struct xe_gt *gt;
125 	unsigned int fw_ref;
126 
127 	if (IS_SRIOV_VF(xe))
128 		return -EOPNOTSUPP;
129 
130 	if (query->size == 0) {
131 		query->size = size;
132 		return 0;
133 	} else if (XE_IOCTL_DBG(xe, query->size != size)) {
134 		return -EINVAL;
135 	}
136 
137 	query_ptr = u64_to_user_ptr(query->data);
138 	if (copy_from_user(&resp, query_ptr, size))
139 		return -EFAULT;
140 
141 	cpu_clock = __clock_id_to_func(resp.clockid);
142 	if (!cpu_clock)
143 		return -EINVAL;
144 
145 	eci = &resp.eci;
146 	if (eci->gt_id >= xe->info.max_gt_per_tile)
147 		return -EINVAL;
148 
149 	gt = xe_device_get_gt(xe, eci->gt_id);
150 	if (!gt)
151 		return -EINVAL;
152 
153 	if (eci->engine_class >= ARRAY_SIZE(user_to_xe_engine_class))
154 		return -EINVAL;
155 
156 	hwe = xe_gt_hw_engine(gt, user_to_xe_engine_class[eci->engine_class],
157 			      eci->engine_instance, true);
158 	if (!hwe)
159 		return -EINVAL;
160 
161 	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
162 	if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL))  {
163 		xe_force_wake_put(gt_to_fw(gt), fw_ref);
164 		return -EIO;
165 	}
166 
167 	hwe_read_timestamp(hwe, &resp.engine_cycles, &resp.cpu_timestamp,
168 			   &resp.cpu_delta, cpu_clock);
169 
170 	xe_force_wake_put(gt_to_fw(gt), fw_ref);
171 
172 	if (GRAPHICS_VER(xe) >= 20)
173 		resp.width = 64;
174 	else
175 		resp.width = 36;
176 
177 	/* Only write to the output fields of user query */
178 	if (put_user(resp.cpu_timestamp, &query_ptr->cpu_timestamp) ||
179 	    put_user(resp.cpu_delta, &query_ptr->cpu_delta) ||
180 	    put_user(resp.engine_cycles, &query_ptr->engine_cycles) ||
181 	    put_user(resp.width, &query_ptr->width))
182 		return -EFAULT;
183 
184 	return 0;
185 }
186 
187 static int query_engines(struct xe_device *xe,
188 			 struct drm_xe_device_query *query)
189 {
190 	size_t size = calc_hw_engine_info_size(xe);
191 	struct drm_xe_query_engines __user *query_ptr =
192 		u64_to_user_ptr(query->data);
193 	struct drm_xe_query_engines *engines;
194 	struct xe_hw_engine *hwe;
195 	enum xe_hw_engine_id id;
196 	struct xe_gt *gt;
197 	u8 gt_id;
198 	int i = 0;
199 
200 	if (query->size == 0) {
201 		query->size = size;
202 		return 0;
203 	} else if (XE_IOCTL_DBG(xe, query->size != size)) {
204 		return -EINVAL;
205 	}
206 
207 	engines = kzalloc(size, GFP_KERNEL);
208 	if (!engines)
209 		return -ENOMEM;
210 
211 	for_each_gt(gt, xe, gt_id)
212 		for_each_hw_engine(hwe, gt, id) {
213 			if (xe_hw_engine_is_reserved(hwe))
214 				continue;
215 
216 			engines->engines[i].instance.engine_class =
217 				xe_to_user_engine_class[hwe->class];
218 			engines->engines[i].instance.engine_instance =
219 				hwe->logical_instance;
220 			engines->engines[i].instance.gt_id = gt->info.id;
221 
222 			i++;
223 		}
224 
225 	engines->num_engines = i;
226 
227 	if (copy_to_user(query_ptr, engines, size)) {
228 		kfree(engines);
229 		return -EFAULT;
230 	}
231 	kfree(engines);
232 
233 	return 0;
234 }
235 
236 static size_t calc_mem_regions_size(struct xe_device *xe)
237 {
238 	u32 num_managers = 1;
239 	int i;
240 
241 	for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i)
242 		if (ttm_manager_type(&xe->ttm, i))
243 			num_managers++;
244 
245 	return offsetof(struct drm_xe_query_mem_regions, mem_regions[num_managers]);
246 }
247 
248 static int query_mem_regions(struct xe_device *xe,
249 			    struct drm_xe_device_query *query)
250 {
251 	size_t size = calc_mem_regions_size(xe);
252 	struct drm_xe_query_mem_regions *mem_regions;
253 	struct drm_xe_query_mem_regions __user *query_ptr =
254 		u64_to_user_ptr(query->data);
255 	struct ttm_resource_manager *man;
256 	int ret, i;
257 
258 	if (query->size == 0) {
259 		query->size = size;
260 		return 0;
261 	} else if (XE_IOCTL_DBG(xe, query->size != size)) {
262 		return -EINVAL;
263 	}
264 
265 	mem_regions = kzalloc(size, GFP_KERNEL);
266 	if (XE_IOCTL_DBG(xe, !mem_regions))
267 		return -ENOMEM;
268 
269 	man = ttm_manager_type(&xe->ttm, XE_PL_TT);
270 	mem_regions->mem_regions[0].mem_class = DRM_XE_MEM_REGION_CLASS_SYSMEM;
271 	/*
272 	 * The instance needs to be a unique number that represents the index
273 	 * in the placement mask used at xe_gem_create_ioctl() for the
274 	 * xe_bo_create() placement.
275 	 */
276 	mem_regions->mem_regions[0].instance = 0;
277 	mem_regions->mem_regions[0].min_page_size = PAGE_SIZE;
278 	mem_regions->mem_regions[0].total_size = man->size << PAGE_SHIFT;
279 	if (perfmon_capable())
280 		mem_regions->mem_regions[0].used = ttm_resource_manager_usage(man);
281 	mem_regions->num_mem_regions = 1;
282 
283 	for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) {
284 		man = ttm_manager_type(&xe->ttm, i);
285 		if (man) {
286 			mem_regions->mem_regions[mem_regions->num_mem_regions].mem_class =
287 				DRM_XE_MEM_REGION_CLASS_VRAM;
288 			mem_regions->mem_regions[mem_regions->num_mem_regions].instance =
289 				mem_regions->num_mem_regions;
290 			mem_regions->mem_regions[mem_regions->num_mem_regions].min_page_size =
291 				xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ?
292 				SZ_64K : PAGE_SIZE;
293 			mem_regions->mem_regions[mem_regions->num_mem_regions].total_size =
294 				man->size;
295 
296 			if (perfmon_capable()) {
297 				xe_ttm_vram_get_used(man,
298 					&mem_regions->mem_regions
299 					[mem_regions->num_mem_regions].used,
300 					&mem_regions->mem_regions
301 					[mem_regions->num_mem_regions].cpu_visible_used);
302 			}
303 
304 			mem_regions->mem_regions[mem_regions->num_mem_regions].cpu_visible_size =
305 				xe_ttm_vram_get_cpu_visible_size(man);
306 			mem_regions->num_mem_regions++;
307 		}
308 	}
309 
310 	if (!copy_to_user(query_ptr, mem_regions, size))
311 		ret = 0;
312 	else
313 		ret = -ENOSPC;
314 
315 	kfree(mem_regions);
316 	return ret;
317 }
318 
319 static int query_config(struct xe_device *xe, struct drm_xe_device_query *query)
320 {
321 	const u32 num_params = DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY + 1;
322 	size_t size =
323 		sizeof(struct drm_xe_query_config) + num_params * sizeof(u64);
324 	struct drm_xe_query_config __user *query_ptr =
325 		u64_to_user_ptr(query->data);
326 	struct drm_xe_query_config *config;
327 
328 	if (query->size == 0) {
329 		query->size = size;
330 		return 0;
331 	} else if (XE_IOCTL_DBG(xe, query->size != size)) {
332 		return -EINVAL;
333 	}
334 
335 	config = kzalloc(size, GFP_KERNEL);
336 	if (!config)
337 		return -ENOMEM;
338 
339 	config->num_params = num_params;
340 	config->info[DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID] =
341 		xe->info.devid | (xe->info.revid << 16);
342 	if (xe->mem.vram)
343 		config->info[DRM_XE_QUERY_CONFIG_FLAGS] |=
344 			DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM;
345 	if (xe->info.has_usm && IS_ENABLED(CONFIG_DRM_XE_GPUSVM))
346 		config->info[DRM_XE_QUERY_CONFIG_FLAGS] |=
347 			DRM_XE_QUERY_CONFIG_FLAG_HAS_CPU_ADDR_MIRROR;
348 	config->info[DRM_XE_QUERY_CONFIG_FLAGS] |=
349 			DRM_XE_QUERY_CONFIG_FLAG_HAS_LOW_LATENCY;
350 	config->info[DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT] =
351 		xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K;
352 	config->info[DRM_XE_QUERY_CONFIG_VA_BITS] = xe->info.va_bits;
353 	config->info[DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY] =
354 		xe_exec_queue_device_get_max_priority(xe);
355 
356 	if (copy_to_user(query_ptr, config, size)) {
357 		kfree(config);
358 		return -EFAULT;
359 	}
360 	kfree(config);
361 
362 	return 0;
363 }
364 
365 static int query_gt_list(struct xe_device *xe, struct drm_xe_device_query *query)
366 {
367 	struct xe_gt *gt;
368 	size_t size = sizeof(struct drm_xe_query_gt_list) +
369 		xe->info.gt_count * sizeof(struct drm_xe_gt);
370 	struct drm_xe_query_gt_list __user *query_ptr =
371 		u64_to_user_ptr(query->data);
372 	struct drm_xe_query_gt_list *gt_list;
373 	int iter = 0;
374 	u8 id;
375 
376 	if (query->size == 0) {
377 		query->size = size;
378 		return 0;
379 	} else if (XE_IOCTL_DBG(xe, query->size != size)) {
380 		return -EINVAL;
381 	}
382 
383 	gt_list = kzalloc(size, GFP_KERNEL);
384 	if (!gt_list)
385 		return -ENOMEM;
386 
387 	gt_list->num_gt = xe->info.gt_count;
388 
389 	for_each_gt(gt, xe, id) {
390 		if (xe_gt_is_media_type(gt))
391 			gt_list->gt_list[iter].type = DRM_XE_QUERY_GT_TYPE_MEDIA;
392 		else
393 			gt_list->gt_list[iter].type = DRM_XE_QUERY_GT_TYPE_MAIN;
394 		gt_list->gt_list[iter].tile_id = gt_to_tile(gt)->id;
395 		gt_list->gt_list[iter].gt_id = gt->info.id;
396 		gt_list->gt_list[iter].reference_clock = gt->info.reference_clock;
397 		/*
398 		 * The mem_regions indexes in the mask below need to
399 		 * directly identify the struct
400 		 * drm_xe_query_mem_regions' instance constructed at
401 		 * query_mem_regions()
402 		 *
403 		 * For our current platforms:
404 		 * Bit 0 -> System Memory
405 		 * Bit 1 -> VRAM0 on Tile0
406 		 * Bit 2 -> VRAM1 on Tile1
407 		 * However the uAPI is generic and it's userspace's
408 		 * responsibility to check the mem_class, without any
409 		 * assumption.
410 		 */
411 		if (!IS_DGFX(xe))
412 			gt_list->gt_list[iter].near_mem_regions = 0x1;
413 		else
414 			gt_list->gt_list[iter].near_mem_regions =
415 				BIT(gt_to_tile(gt)->mem.vram->id) << 1;
416 		gt_list->gt_list[iter].far_mem_regions = xe->info.mem_region_mask ^
417 			gt_list->gt_list[iter].near_mem_regions;
418 
419 		gt_list->gt_list[iter].ip_ver_major =
420 			REG_FIELD_GET(GMD_ID_ARCH_MASK, gt->info.gmdid);
421 		gt_list->gt_list[iter].ip_ver_minor =
422 			REG_FIELD_GET(GMD_ID_RELEASE_MASK, gt->info.gmdid);
423 		gt_list->gt_list[iter].ip_ver_rev =
424 			REG_FIELD_GET(GMD_ID_REVID, gt->info.gmdid);
425 
426 		iter++;
427 	}
428 
429 	if (copy_to_user(query_ptr, gt_list, size)) {
430 		kfree(gt_list);
431 		return -EFAULT;
432 	}
433 	kfree(gt_list);
434 
435 	return 0;
436 }
437 
438 static int query_hwconfig(struct xe_device *xe,
439 			  struct drm_xe_device_query *query)
440 {
441 	struct xe_gt *gt = xe_root_mmio_gt(xe);
442 	size_t size = xe_guc_hwconfig_size(&gt->uc.guc);
443 	void __user *query_ptr = u64_to_user_ptr(query->data);
444 	void *hwconfig;
445 
446 	if (query->size == 0) {
447 		query->size = size;
448 		return 0;
449 	} else if (XE_IOCTL_DBG(xe, query->size != size)) {
450 		return -EINVAL;
451 	}
452 
453 	hwconfig = kzalloc(size, GFP_KERNEL);
454 	if (!hwconfig)
455 		return -ENOMEM;
456 
457 	xe_guc_hwconfig_copy(&gt->uc.guc, hwconfig);
458 
459 	if (copy_to_user(query_ptr, hwconfig, size)) {
460 		kfree(hwconfig);
461 		return -EFAULT;
462 	}
463 	kfree(hwconfig);
464 
465 	return 0;
466 }
467 
468 static size_t calc_topo_query_size(struct xe_device *xe)
469 {
470 	struct xe_gt *gt;
471 	size_t query_size = 0;
472 	int id;
473 
474 	for_each_gt(gt, xe, id) {
475 		query_size += 3 * sizeof(struct drm_xe_query_topology_mask) +
476 			sizeof_field(struct xe_gt, fuse_topo.g_dss_mask) +
477 			sizeof_field(struct xe_gt, fuse_topo.c_dss_mask) +
478 			sizeof_field(struct xe_gt, fuse_topo.eu_mask_per_dss);
479 
480 		/* L3bank mask may not be available for some GTs */
481 		if (xe_gt_topology_report_l3(gt))
482 			query_size += sizeof(struct drm_xe_query_topology_mask) +
483 				sizeof_field(struct xe_gt, fuse_topo.l3_bank_mask);
484 	}
485 
486 	return query_size;
487 }
488 
489 static int copy_mask(void __user **ptr,
490 		     struct drm_xe_query_topology_mask *topo,
491 		     void *mask, size_t mask_size)
492 {
493 	topo->num_bytes = mask_size;
494 
495 	if (copy_to_user(*ptr, topo, sizeof(*topo)))
496 		return -EFAULT;
497 	*ptr += sizeof(topo);
498 
499 	if (copy_to_user(*ptr, mask, mask_size))
500 		return -EFAULT;
501 	*ptr += mask_size;
502 
503 	return 0;
504 }
505 
506 static int query_gt_topology(struct xe_device *xe,
507 			     struct drm_xe_device_query *query)
508 {
509 	void __user *query_ptr = u64_to_user_ptr(query->data);
510 	size_t size = calc_topo_query_size(xe);
511 	struct drm_xe_query_topology_mask topo;
512 	struct xe_gt *gt;
513 	int id;
514 
515 	if (query->size == 0) {
516 		query->size = size;
517 		return 0;
518 	} else if (XE_IOCTL_DBG(xe, query->size != size)) {
519 		return -EINVAL;
520 	}
521 
522 	for_each_gt(gt, xe, id) {
523 		int err;
524 
525 		topo.gt_id = id;
526 
527 		topo.type = DRM_XE_TOPO_DSS_GEOMETRY;
528 		err = copy_mask(&query_ptr, &topo, gt->fuse_topo.g_dss_mask,
529 				sizeof(gt->fuse_topo.g_dss_mask));
530 		if (err)
531 			return err;
532 
533 		topo.type = DRM_XE_TOPO_DSS_COMPUTE;
534 		err = copy_mask(&query_ptr, &topo, gt->fuse_topo.c_dss_mask,
535 				sizeof(gt->fuse_topo.c_dss_mask));
536 		if (err)
537 			return err;
538 
539 		/*
540 		 * If the kernel doesn't have a way to obtain a correct L3bank
541 		 * mask, then it's better to omit L3 from the query rather than
542 		 * reporting bogus or zeroed information to userspace.
543 		 */
544 		if (xe_gt_topology_report_l3(gt)) {
545 			topo.type = DRM_XE_TOPO_L3_BANK;
546 			err = copy_mask(&query_ptr, &topo, gt->fuse_topo.l3_bank_mask,
547 					sizeof(gt->fuse_topo.l3_bank_mask));
548 			if (err)
549 				return err;
550 		}
551 
552 		topo.type = gt->fuse_topo.eu_type == XE_GT_EU_TYPE_SIMD16 ?
553 			DRM_XE_TOPO_SIMD16_EU_PER_DSS :
554 			DRM_XE_TOPO_EU_PER_DSS;
555 		err = copy_mask(&query_ptr, &topo,
556 				gt->fuse_topo.eu_mask_per_dss,
557 				sizeof(gt->fuse_topo.eu_mask_per_dss));
558 		if (err)
559 			return err;
560 	}
561 
562 	return 0;
563 }
564 
565 static int
566 query_uc_fw_version(struct xe_device *xe, struct drm_xe_device_query *query)
567 {
568 	struct drm_xe_query_uc_fw_version __user *query_ptr = u64_to_user_ptr(query->data);
569 	size_t size = sizeof(struct drm_xe_query_uc_fw_version);
570 	struct drm_xe_query_uc_fw_version resp;
571 	struct xe_uc_fw_version *version = NULL;
572 
573 	if (query->size == 0) {
574 		query->size = size;
575 		return 0;
576 	} else if (XE_IOCTL_DBG(xe, query->size != size)) {
577 		return -EINVAL;
578 	}
579 
580 	if (copy_from_user(&resp, query_ptr, size))
581 		return -EFAULT;
582 
583 	if (XE_IOCTL_DBG(xe, resp.pad || resp.pad2 || resp.reserved))
584 		return -EINVAL;
585 
586 	switch (resp.uc_type) {
587 	case XE_QUERY_UC_TYPE_GUC_SUBMISSION: {
588 		struct xe_guc *guc = &xe->tiles[0].primary_gt->uc.guc;
589 
590 		version = &guc->fw.versions.found[XE_UC_FW_VER_COMPATIBILITY];
591 		break;
592 	}
593 	case XE_QUERY_UC_TYPE_HUC: {
594 		struct xe_gt *media_gt = NULL;
595 		struct xe_huc *huc;
596 
597 		if (MEDIA_VER(xe) >= 13) {
598 			struct xe_tile *tile;
599 			u8 gt_id;
600 
601 			for_each_tile(tile, xe, gt_id) {
602 				if (tile->media_gt) {
603 					media_gt = tile->media_gt;
604 					break;
605 				}
606 			}
607 		} else {
608 			media_gt = xe->tiles[0].primary_gt;
609 		}
610 
611 		if (!media_gt)
612 			break;
613 
614 		huc = &media_gt->uc.huc;
615 		if (huc->fw.status == XE_UC_FIRMWARE_RUNNING)
616 			version = &huc->fw.versions.found[XE_UC_FW_VER_RELEASE];
617 		break;
618 	}
619 	default:
620 		return -EINVAL;
621 	}
622 
623 	if (version) {
624 		resp.branch_ver = 0;
625 		resp.major_ver = version->major;
626 		resp.minor_ver = version->minor;
627 		resp.patch_ver = version->patch;
628 	} else {
629 		return -ENODEV;
630 	}
631 
632 	if (copy_to_user(query_ptr, &resp, size))
633 		return -EFAULT;
634 
635 	return 0;
636 }
637 
638 static size_t calc_oa_unit_query_size(struct xe_device *xe)
639 {
640 	size_t size = sizeof(struct drm_xe_query_oa_units);
641 	struct xe_gt *gt;
642 	int i, id;
643 
644 	for_each_gt(gt, xe, id) {
645 		for (i = 0; i < gt->oa.num_oa_units; i++) {
646 			size += sizeof(struct drm_xe_oa_unit);
647 			size += gt->oa.oa_unit[i].num_engines *
648 				sizeof(struct drm_xe_engine_class_instance);
649 		}
650 	}
651 
652 	return size;
653 }
654 
655 static int query_oa_units(struct xe_device *xe,
656 			  struct drm_xe_device_query *query)
657 {
658 	void __user *query_ptr = u64_to_user_ptr(query->data);
659 	size_t size = calc_oa_unit_query_size(xe);
660 	struct drm_xe_query_oa_units *qoa;
661 	enum xe_hw_engine_id hwe_id;
662 	struct drm_xe_oa_unit *du;
663 	struct xe_hw_engine *hwe;
664 	struct xe_oa_unit *u;
665 	int gt_id, i, j, ret;
666 	struct xe_gt *gt;
667 	u8 *pdu;
668 
669 	if (query->size == 0) {
670 		query->size = size;
671 		return 0;
672 	} else if (XE_IOCTL_DBG(xe, query->size != size)) {
673 		return -EINVAL;
674 	}
675 
676 	qoa = kzalloc(size, GFP_KERNEL);
677 	if (!qoa)
678 		return -ENOMEM;
679 
680 	pdu = (u8 *)&qoa->oa_units[0];
681 	for_each_gt(gt, xe, gt_id) {
682 		for (i = 0; i < gt->oa.num_oa_units; i++) {
683 			u = &gt->oa.oa_unit[i];
684 			du = (struct drm_xe_oa_unit *)pdu;
685 
686 			du->oa_unit_id = u->oa_unit_id;
687 			du->oa_unit_type = u->type;
688 			du->oa_timestamp_freq = xe_oa_timestamp_frequency(gt);
689 			du->capabilities = DRM_XE_OA_CAPS_BASE | DRM_XE_OA_CAPS_SYNCS |
690 					   DRM_XE_OA_CAPS_OA_BUFFER_SIZE |
691 					   DRM_XE_OA_CAPS_WAIT_NUM_REPORTS |
692 					   DRM_XE_OA_CAPS_OAM;
693 			j = 0;
694 			for_each_hw_engine(hwe, gt, hwe_id) {
695 				if (!xe_hw_engine_is_reserved(hwe) &&
696 				    xe_oa_unit_id(hwe) == u->oa_unit_id) {
697 					du->eci[j].engine_class =
698 						xe_to_user_engine_class[hwe->class];
699 					du->eci[j].engine_instance = hwe->logical_instance;
700 					du->eci[j].gt_id = gt->info.id;
701 					j++;
702 				}
703 			}
704 			du->num_engines = j;
705 			pdu += sizeof(*du) + j * sizeof(du->eci[0]);
706 			qoa->num_oa_units++;
707 		}
708 	}
709 
710 	ret = copy_to_user(query_ptr, qoa, size);
711 	kfree(qoa);
712 
713 	return ret ? -EFAULT : 0;
714 }
715 
716 static int query_pxp_status(struct xe_device *xe, struct drm_xe_device_query *query)
717 {
718 	struct drm_xe_query_pxp_status __user *query_ptr = u64_to_user_ptr(query->data);
719 	size_t size = sizeof(struct drm_xe_query_pxp_status);
720 	struct drm_xe_query_pxp_status resp = { 0 };
721 	int ret;
722 
723 	if (query->size == 0) {
724 		query->size = size;
725 		return 0;
726 	} else if (XE_IOCTL_DBG(xe, query->size != size)) {
727 		return -EINVAL;
728 	}
729 
730 	ret = xe_pxp_get_readiness_status(xe->pxp);
731 	if (ret < 0)
732 		return ret;
733 
734 	resp.status = ret;
735 	resp.supported_session_types = BIT(DRM_XE_PXP_TYPE_HWDRM);
736 
737 	if (copy_to_user(query_ptr, &resp, size))
738 		return -EFAULT;
739 
740 	return 0;
741 }
742 
743 static int query_eu_stall(struct xe_device *xe,
744 			  struct drm_xe_device_query *query)
745 {
746 	void __user *query_ptr = u64_to_user_ptr(query->data);
747 	struct drm_xe_query_eu_stall *info;
748 	size_t size, array_size;
749 	const u64 *rates;
750 	u32 num_rates;
751 	int ret;
752 
753 	if (!xe_eu_stall_supported_on_platform(xe))
754 		return -ENODEV;
755 
756 	array_size = xe_eu_stall_get_sampling_rates(&num_rates, &rates);
757 	size = sizeof(struct drm_xe_query_eu_stall) + array_size;
758 
759 	if (query->size == 0) {
760 		query->size = size;
761 		return 0;
762 	} else if (XE_IOCTL_DBG(xe, query->size != size)) {
763 		return -EINVAL;
764 	}
765 
766 	info = kzalloc(size, GFP_KERNEL);
767 	if (!info)
768 		return -ENOMEM;
769 
770 	info->num_sampling_rates = num_rates;
771 	info->capabilities = DRM_XE_EU_STALL_CAPS_BASE;
772 	info->record_size = xe_eu_stall_data_record_size(xe);
773 	info->per_xecore_buf_size = xe_eu_stall_get_per_xecore_buf_size();
774 	memcpy(info->sampling_rates, rates, array_size);
775 
776 	ret = copy_to_user(query_ptr, info, size);
777 	kfree(info);
778 
779 	return ret ? -EFAULT : 0;
780 }
781 
782 static int (* const xe_query_funcs[])(struct xe_device *xe,
783 				      struct drm_xe_device_query *query) = {
784 	query_engines,
785 	query_mem_regions,
786 	query_config,
787 	query_gt_list,
788 	query_hwconfig,
789 	query_gt_topology,
790 	query_engine_cycles,
791 	query_uc_fw_version,
792 	query_oa_units,
793 	query_pxp_status,
794 	query_eu_stall,
795 };
796 
797 int xe_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
798 {
799 	struct xe_device *xe = to_xe_device(dev);
800 	struct drm_xe_device_query *query = data;
801 	u32 idx;
802 
803 	if (XE_IOCTL_DBG(xe, query->extensions) ||
804 	    XE_IOCTL_DBG(xe, query->reserved[0] || query->reserved[1]))
805 		return -EINVAL;
806 
807 	if (XE_IOCTL_DBG(xe, query->query >= ARRAY_SIZE(xe_query_funcs)))
808 		return -EINVAL;
809 
810 	idx = array_index_nospec(query->query, ARRAY_SIZE(xe_query_funcs));
811 	if (XE_IOCTL_DBG(xe, !xe_query_funcs[idx]))
812 		return -EINVAL;
813 
814 	return xe_query_funcs[idx](xe, query);
815 }
816