xref: /linux/drivers/gpu/drm/xe/xe_query.c (revision dd08ebf6c3525a7ea2186e636df064ea47281987)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include <drm/xe_drm.h>
7 #include <drm/ttm/ttm_placement.h>
8 #include <linux/nospec.h>
9 
10 #include "xe_bo.h"
11 #include "xe_device.h"
12 #include "xe_gt.h"
13 #include "xe_macros.h"
14 #include "xe_query.h"
15 #include "xe_ggtt.h"
16 #include "xe_guc_hwconfig.h"
17 
18 static const enum xe_engine_class xe_to_user_engine_class[] = {
19 	[XE_ENGINE_CLASS_RENDER] = DRM_XE_ENGINE_CLASS_RENDER,
20 	[XE_ENGINE_CLASS_COPY] = DRM_XE_ENGINE_CLASS_COPY,
21 	[XE_ENGINE_CLASS_VIDEO_DECODE] = DRM_XE_ENGINE_CLASS_VIDEO_DECODE,
22 	[XE_ENGINE_CLASS_VIDEO_ENHANCE] = DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE,
23 	[XE_ENGINE_CLASS_COMPUTE] = DRM_XE_ENGINE_CLASS_COMPUTE,
24 };
25 
26 static size_t calc_hw_engine_info_size(struct xe_device *xe)
27 {
28 	struct xe_hw_engine *hwe;
29 	enum xe_hw_engine_id id;
30 	struct xe_gt *gt;
31 	u8 gt_id;
32 	int i = 0;
33 
34 	for_each_gt(gt, xe, gt_id)
35 		for_each_hw_engine(hwe, gt, id) {
36 			if (xe_hw_engine_is_reserved(hwe))
37 				continue;
38 			i++;
39 		}
40 
41 	return i * sizeof(struct drm_xe_engine_class_instance);
42 }
43 
44 static int query_engines(struct xe_device *xe,
45 			 struct drm_xe_device_query *query)
46 {
47 	size_t size = calc_hw_engine_info_size(xe);
48 	struct drm_xe_engine_class_instance __user *query_ptr =
49 		u64_to_user_ptr(query->data);
50 	struct drm_xe_engine_class_instance *hw_engine_info;
51 	struct xe_hw_engine *hwe;
52 	enum xe_hw_engine_id id;
53 	struct xe_gt *gt;
54 	u8 gt_id;
55 	int i = 0;
56 
57 	if (query->size == 0) {
58 		query->size = size;
59 		return 0;
60 	} else if (XE_IOCTL_ERR(xe, query->size != size)) {
61 		return -EINVAL;
62 	}
63 
64 	hw_engine_info = kmalloc(size, GFP_KERNEL);
65 	if (XE_IOCTL_ERR(xe, !hw_engine_info))
66 		return -ENOMEM;
67 
68 	for_each_gt(gt, xe, gt_id)
69 		for_each_hw_engine(hwe, gt, id) {
70 			if (xe_hw_engine_is_reserved(hwe))
71 				continue;
72 
73 			hw_engine_info[i].engine_class =
74 				xe_to_user_engine_class[hwe->class];
75 			hw_engine_info[i].engine_instance =
76 				hwe->logical_instance;
77 			hw_engine_info[i++].gt_id = gt->info.id;
78 		}
79 
80 	if (copy_to_user(query_ptr, hw_engine_info, size)) {
81 		kfree(hw_engine_info);
82 		return -EFAULT;
83 	}
84 	kfree(hw_engine_info);
85 
86 	return 0;
87 }
88 
89 static size_t calc_memory_usage_size(struct xe_device *xe)
90 {
91 	u32 num_managers = 1;
92 	int i;
93 
94 	for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i)
95 		if (ttm_manager_type(&xe->ttm, i))
96 			num_managers++;
97 
98 	return offsetof(struct drm_xe_query_mem_usage, regions[num_managers]);
99 }
100 
101 static int query_memory_usage(struct xe_device *xe,
102 			      struct drm_xe_device_query *query)
103 {
104 	size_t size = calc_memory_usage_size(xe);
105 	struct drm_xe_query_mem_usage *usage;
106 	struct drm_xe_query_mem_usage __user *query_ptr =
107 		u64_to_user_ptr(query->data);
108 	struct ttm_resource_manager *man;
109 	int ret, i;
110 
111 	if (query->size == 0) {
112 		query->size = size;
113 		return 0;
114 	} else if (XE_IOCTL_ERR(xe, query->size != size)) {
115 		return -EINVAL;
116 	}
117 
118 	usage = kmalloc(size, GFP_KERNEL);
119 	if (XE_IOCTL_ERR(xe, !usage))
120 		return -ENOMEM;
121 
122 	usage->pad = 0;
123 
124 	man = ttm_manager_type(&xe->ttm, XE_PL_TT);
125 	usage->regions[0].mem_class = XE_MEM_REGION_CLASS_SYSMEM;
126 	usage->regions[0].instance = 0;
127 	usage->regions[0].pad = 0;
128 	usage->regions[0].min_page_size = PAGE_SIZE;
129 	usage->regions[0].max_page_size = PAGE_SIZE;
130 	usage->regions[0].total_size = man->size << PAGE_SHIFT;
131 	usage->regions[0].used = ttm_resource_manager_usage(man);
132 	usage->num_regions = 1;
133 
134 	for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) {
135 		man = ttm_manager_type(&xe->ttm, i);
136 		if (man) {
137 			usage->regions[usage->num_regions].mem_class =
138 				XE_MEM_REGION_CLASS_VRAM;
139 			usage->regions[usage->num_regions].instance =
140 				usage->num_regions;
141 			usage->regions[usage->num_regions].pad = 0;
142 			usage->regions[usage->num_regions].min_page_size =
143 				xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ?
144 				SZ_64K : PAGE_SIZE;
145 			usage->regions[usage->num_regions].max_page_size =
146 				SZ_1G;
147 			usage->regions[usage->num_regions].total_size =
148 				man->size;
149 			usage->regions[usage->num_regions++].used =
150 				ttm_resource_manager_usage(man);
151 		}
152 	}
153 
154 	if (!copy_to_user(query_ptr, usage, size))
155 		ret = 0;
156 	else
157 		ret = -ENOSPC;
158 
159 	kfree(usage);
160 	return ret;
161 }
162 
163 static int query_config(struct xe_device *xe, struct drm_xe_device_query *query)
164 {
165 	u32 num_params = XE_QUERY_CONFIG_NUM_PARAM;
166 	size_t size =
167 		sizeof(struct drm_xe_query_config) + num_params * sizeof(u64);
168 	struct drm_xe_query_config __user *query_ptr =
169 		u64_to_user_ptr(query->data);
170 	struct drm_xe_query_config *config;
171 
172 	if (query->size == 0) {
173 		query->size = size;
174 		return 0;
175 	} else if (XE_IOCTL_ERR(xe, query->size != size)) {
176 		return -EINVAL;
177 	}
178 
179 	config = kzalloc(size, GFP_KERNEL);
180 	if (XE_IOCTL_ERR(xe, !config))
181 		return -ENOMEM;
182 
183 	config->num_params = num_params;
184 	config->info[XE_QUERY_CONFIG_REV_AND_DEVICE_ID] =
185 		xe->info.devid | (xe->info.revid << 16);
186 	if (to_gt(xe)->mem.vram.size)
187 		config->info[XE_QUERY_CONFIG_FLAGS] =
188 			XE_QUERY_CONFIG_FLAGS_HAS_VRAM;
189 	if (xe->info.enable_guc)
190 		config->info[XE_QUERY_CONFIG_FLAGS] |=
191 			XE_QUERY_CONFIG_FLAGS_USE_GUC;
192 	config->info[XE_QUERY_CONFIG_MIN_ALIGNEMENT] =
193 		xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K;
194 	config->info[XE_QUERY_CONFIG_VA_BITS] = 12 +
195 		(9 * (xe->info.vm_max_level + 1));
196 	config->info[XE_QUERY_CONFIG_GT_COUNT] = xe->info.tile_count;
197 	config->info[XE_QUERY_CONFIG_MEM_REGION_COUNT] =
198 		hweight_long(xe->info.mem_region_mask);
199 
200 	if (copy_to_user(query_ptr, config, size)) {
201 		kfree(config);
202 		return -EFAULT;
203 	}
204 	kfree(config);
205 
206 	return 0;
207 }
208 
209 static int query_gts(struct xe_device *xe, struct drm_xe_device_query *query)
210 {
211 	struct xe_gt *gt;
212 	size_t size = sizeof(struct drm_xe_query_gts) +
213 		xe->info.tile_count * sizeof(struct drm_xe_query_gt);
214 	struct drm_xe_query_gts __user *query_ptr =
215 		u64_to_user_ptr(query->data);
216 	struct drm_xe_query_gts *gts;
217 	u8 id;
218 
219 	if (query->size == 0) {
220 		query->size = size;
221 		return 0;
222 	} else if (XE_IOCTL_ERR(xe, query->size != size)) {
223 		return -EINVAL;
224 	}
225 
226 	gts = kzalloc(size, GFP_KERNEL);
227 	if (XE_IOCTL_ERR(xe, !gts))
228 		return -ENOMEM;
229 
230 	gts->num_gt = xe->info.tile_count;
231 	for_each_gt(gt, xe, id) {
232 		if (id == 0)
233 			gts->gts[id].type = XE_QUERY_GT_TYPE_MAIN;
234 		else if (xe_gt_is_media_type(gt))
235 			gts->gts[id].type = XE_QUERY_GT_TYPE_MEDIA;
236 		else
237 			gts->gts[id].type = XE_QUERY_GT_TYPE_REMOTE;
238 		gts->gts[id].instance = id;
239 		gts->gts[id].clock_freq = gt->info.clock_freq;
240 		if (!IS_DGFX(xe))
241 			gts->gts[id].native_mem_regions = 0x1;
242 		else
243 			gts->gts[id].native_mem_regions =
244 				BIT(gt->info.vram_id) << 1;
245 		gts->gts[id].slow_mem_regions = xe->info.mem_region_mask ^
246 			gts->gts[id].native_mem_regions;
247 	}
248 
249 	if (copy_to_user(query_ptr, gts, size)) {
250 		kfree(gts);
251 		return -EFAULT;
252 	}
253 	kfree(gts);
254 
255 	return 0;
256 }
257 
258 static int query_hwconfig(struct xe_device *xe,
259 			  struct drm_xe_device_query *query)
260 {
261 	struct xe_gt *gt = xe_device_get_gt(xe, 0);
262 	size_t size = xe_guc_hwconfig_size(&gt->uc.guc);
263 	void __user *query_ptr = u64_to_user_ptr(query->data);
264 	void *hwconfig;
265 
266 	if (query->size == 0) {
267 		query->size = size;
268 		return 0;
269 	} else if (XE_IOCTL_ERR(xe, query->size != size)) {
270 		return -EINVAL;
271 	}
272 
273 	hwconfig = kzalloc(size, GFP_KERNEL);
274 	if (XE_IOCTL_ERR(xe, !hwconfig))
275 		return -ENOMEM;
276 
277 	xe_device_mem_access_get(xe);
278 	xe_guc_hwconfig_copy(&gt->uc.guc, hwconfig);
279 	xe_device_mem_access_put(xe);
280 
281 	if (copy_to_user(query_ptr, hwconfig, size)) {
282 		kfree(hwconfig);
283 		return -EFAULT;
284 	}
285 	kfree(hwconfig);
286 
287 	return 0;
288 }
289 
290 static size_t calc_topo_query_size(struct xe_device *xe)
291 {
292 	return xe->info.tile_count *
293 		(3 * sizeof(struct drm_xe_query_topology_mask) +
294 		 sizeof_field(struct xe_gt, fuse_topo.g_dss_mask) +
295 		 sizeof_field(struct xe_gt, fuse_topo.c_dss_mask) +
296 		 sizeof_field(struct xe_gt, fuse_topo.eu_mask_per_dss));
297 }
298 
299 static void __user *copy_mask(void __user *ptr,
300 			      struct drm_xe_query_topology_mask *topo,
301 			      void *mask, size_t mask_size)
302 {
303 	topo->num_bytes = mask_size;
304 
305 	if (copy_to_user(ptr, topo, sizeof(*topo)))
306 		return ERR_PTR(-EFAULT);
307 	ptr += sizeof(topo);
308 
309 	if (copy_to_user(ptr, mask, mask_size))
310 		return ERR_PTR(-EFAULT);
311 	ptr += mask_size;
312 
313 	return ptr;
314 }
315 
316 static int query_gt_topology(struct xe_device *xe,
317 			     struct drm_xe_device_query *query)
318 {
319 	void __user *query_ptr = u64_to_user_ptr(query->data);
320 	size_t size = calc_topo_query_size(xe);
321 	struct drm_xe_query_topology_mask topo;
322 	struct xe_gt *gt;
323 	int id;
324 
325 	if (query->size == 0) {
326 		query->size = size;
327 		return 0;
328 	} else if (XE_IOCTL_ERR(xe, query->size != size)) {
329 		return -EINVAL;
330 	}
331 
332 	for_each_gt(gt, xe, id) {
333 		topo.gt_id = id;
334 
335 		topo.type = XE_TOPO_DSS_GEOMETRY;
336 		query_ptr = copy_mask(query_ptr, &topo,
337 				      gt->fuse_topo.g_dss_mask,
338 				      sizeof(gt->fuse_topo.g_dss_mask));
339 		if (IS_ERR(query_ptr))
340 			return PTR_ERR(query_ptr);
341 
342 		topo.type = XE_TOPO_DSS_COMPUTE;
343 		query_ptr = copy_mask(query_ptr, &topo,
344 				      gt->fuse_topo.c_dss_mask,
345 				      sizeof(gt->fuse_topo.c_dss_mask));
346 		if (IS_ERR(query_ptr))
347 			return PTR_ERR(query_ptr);
348 
349 		topo.type = XE_TOPO_EU_PER_DSS;
350 		query_ptr = copy_mask(query_ptr, &topo,
351 				      gt->fuse_topo.eu_mask_per_dss,
352 				      sizeof(gt->fuse_topo.eu_mask_per_dss));
353 		if (IS_ERR(query_ptr))
354 			return PTR_ERR(query_ptr);
355 	}
356 
357 	return 0;
358 }
359 
360 static int (* const xe_query_funcs[])(struct xe_device *xe,
361 				      struct drm_xe_device_query *query) = {
362 	query_engines,
363 	query_memory_usage,
364 	query_config,
365 	query_gts,
366 	query_hwconfig,
367 	query_gt_topology,
368 };
369 
370 int xe_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
371 {
372 	struct xe_device *xe = to_xe_device(dev);
373 	struct drm_xe_device_query *query = data;
374 	u32 idx;
375 
376 	if (XE_IOCTL_ERR(xe, query->extensions != 0))
377 		return -EINVAL;
378 
379 	if (XE_IOCTL_ERR(xe, query->query > ARRAY_SIZE(xe_query_funcs)))
380 		return -EINVAL;
381 
382 	idx = array_index_nospec(query->query, ARRAY_SIZE(xe_query_funcs));
383 	if (XE_IOCTL_ERR(xe, !xe_query_funcs[idx]))
384 		return -EINVAL;
385 
386 	return xe_query_funcs[idx](xe, query);
387 }
388