xref: /linux/drivers/gpu/drm/xe/xe_query.c (revision 08516de501fae647fb29bf3b62718de56cc24014)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_query.h"
7 
8 #include <linux/nospec.h>
9 
10 #include <drm/ttm/ttm_placement.h>
11 #include <drm/xe_drm.h>
12 
13 #include "xe_bo.h"
14 #include "xe_device.h"
15 #include "xe_engine.h"
16 #include "xe_ggtt.h"
17 #include "xe_gt.h"
18 #include "xe_guc_hwconfig.h"
19 #include "xe_macros.h"
20 
21 static const enum xe_engine_class xe_to_user_engine_class[] = {
22 	[XE_ENGINE_CLASS_RENDER] = DRM_XE_ENGINE_CLASS_RENDER,
23 	[XE_ENGINE_CLASS_COPY] = DRM_XE_ENGINE_CLASS_COPY,
24 	[XE_ENGINE_CLASS_VIDEO_DECODE] = DRM_XE_ENGINE_CLASS_VIDEO_DECODE,
25 	[XE_ENGINE_CLASS_VIDEO_ENHANCE] = DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE,
26 	[XE_ENGINE_CLASS_COMPUTE] = DRM_XE_ENGINE_CLASS_COMPUTE,
27 };
28 
29 static size_t calc_hw_engine_info_size(struct xe_device *xe)
30 {
31 	struct xe_hw_engine *hwe;
32 	enum xe_hw_engine_id id;
33 	struct xe_gt *gt;
34 	u8 gt_id;
35 	int i = 0;
36 
37 	for_each_gt(gt, xe, gt_id)
38 		for_each_hw_engine(hwe, gt, id) {
39 			if (xe_hw_engine_is_reserved(hwe))
40 				continue;
41 			i++;
42 		}
43 
44 	return i * sizeof(struct drm_xe_engine_class_instance);
45 }
46 
47 static int query_engines(struct xe_device *xe,
48 			 struct drm_xe_device_query *query)
49 {
50 	size_t size = calc_hw_engine_info_size(xe);
51 	struct drm_xe_engine_class_instance __user *query_ptr =
52 		u64_to_user_ptr(query->data);
53 	struct drm_xe_engine_class_instance *hw_engine_info;
54 	struct xe_hw_engine *hwe;
55 	enum xe_hw_engine_id id;
56 	struct xe_gt *gt;
57 	u8 gt_id;
58 	int i = 0;
59 
60 	if (query->size == 0) {
61 		query->size = size;
62 		return 0;
63 	} else if (XE_IOCTL_ERR(xe, query->size != size)) {
64 		return -EINVAL;
65 	}
66 
67 	hw_engine_info = kmalloc(size, GFP_KERNEL);
68 	if (XE_IOCTL_ERR(xe, !hw_engine_info))
69 		return -ENOMEM;
70 
71 	for_each_gt(gt, xe, gt_id)
72 		for_each_hw_engine(hwe, gt, id) {
73 			if (xe_hw_engine_is_reserved(hwe))
74 				continue;
75 
76 			hw_engine_info[i].engine_class =
77 				xe_to_user_engine_class[hwe->class];
78 			hw_engine_info[i].engine_instance =
79 				hwe->logical_instance;
80 			hw_engine_info[i++].gt_id = gt->info.id;
81 		}
82 
83 	if (copy_to_user(query_ptr, hw_engine_info, size)) {
84 		kfree(hw_engine_info);
85 		return -EFAULT;
86 	}
87 	kfree(hw_engine_info);
88 
89 	return 0;
90 }
91 
92 static size_t calc_memory_usage_size(struct xe_device *xe)
93 {
94 	u32 num_managers = 1;
95 	int i;
96 
97 	for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i)
98 		if (ttm_manager_type(&xe->ttm, i))
99 			num_managers++;
100 
101 	return offsetof(struct drm_xe_query_mem_usage, regions[num_managers]);
102 }
103 
104 static int query_memory_usage(struct xe_device *xe,
105 			      struct drm_xe_device_query *query)
106 {
107 	size_t size = calc_memory_usage_size(xe);
108 	struct drm_xe_query_mem_usage *usage;
109 	struct drm_xe_query_mem_usage __user *query_ptr =
110 		u64_to_user_ptr(query->data);
111 	struct ttm_resource_manager *man;
112 	int ret, i;
113 
114 	if (query->size == 0) {
115 		query->size = size;
116 		return 0;
117 	} else if (XE_IOCTL_ERR(xe, query->size != size)) {
118 		return -EINVAL;
119 	}
120 
121 	usage = kzalloc(size, GFP_KERNEL);
122 	if (XE_IOCTL_ERR(xe, !usage))
123 		return -ENOMEM;
124 
125 	man = ttm_manager_type(&xe->ttm, XE_PL_TT);
126 	usage->regions[0].mem_class = XE_MEM_REGION_CLASS_SYSMEM;
127 	usage->regions[0].instance = 0;
128 	usage->regions[0].min_page_size = PAGE_SIZE;
129 	usage->regions[0].max_page_size = PAGE_SIZE;
130 	usage->regions[0].total_size = man->size << PAGE_SHIFT;
131 	usage->regions[0].used = ttm_resource_manager_usage(man);
132 	usage->num_regions = 1;
133 
134 	for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) {
135 		man = ttm_manager_type(&xe->ttm, i);
136 		if (man) {
137 			usage->regions[usage->num_regions].mem_class =
138 				XE_MEM_REGION_CLASS_VRAM;
139 			usage->regions[usage->num_regions].instance =
140 				usage->num_regions;
141 			usage->regions[usage->num_regions].min_page_size =
142 				xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ?
143 				SZ_64K : PAGE_SIZE;
144 			usage->regions[usage->num_regions].max_page_size =
145 				SZ_1G;
146 			usage->regions[usage->num_regions].total_size =
147 				man->size;
148 			usage->regions[usage->num_regions++].used =
149 				ttm_resource_manager_usage(man);
150 		}
151 	}
152 
153 	if (!copy_to_user(query_ptr, usage, size))
154 		ret = 0;
155 	else
156 		ret = -ENOSPC;
157 
158 	kfree(usage);
159 	return ret;
160 }
161 
162 static int query_config(struct xe_device *xe, struct drm_xe_device_query *query)
163 {
164 	u32 num_params = XE_QUERY_CONFIG_NUM_PARAM;
165 	size_t size =
166 		sizeof(struct drm_xe_query_config) + num_params * sizeof(u64);
167 	struct drm_xe_query_config __user *query_ptr =
168 		u64_to_user_ptr(query->data);
169 	struct drm_xe_query_config *config;
170 
171 	if (query->size == 0) {
172 		query->size = size;
173 		return 0;
174 	} else if (XE_IOCTL_ERR(xe, query->size != size)) {
175 		return -EINVAL;
176 	}
177 
178 	config = kzalloc(size, GFP_KERNEL);
179 	if (XE_IOCTL_ERR(xe, !config))
180 		return -ENOMEM;
181 
182 	config->num_params = num_params;
183 	config->info[XE_QUERY_CONFIG_REV_AND_DEVICE_ID] =
184 		xe->info.devid | (xe->info.revid << 16);
185 	if (xe_device_get_root_tile(xe)->mem.vram.size)
186 		config->info[XE_QUERY_CONFIG_FLAGS] =
187 			XE_QUERY_CONFIG_FLAGS_HAS_VRAM;
188 	if (xe->info.enable_guc)
189 		config->info[XE_QUERY_CONFIG_FLAGS] |=
190 			XE_QUERY_CONFIG_FLAGS_USE_GUC;
191 	config->info[XE_QUERY_CONFIG_MIN_ALIGNEMENT] =
192 		xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K;
193 	config->info[XE_QUERY_CONFIG_VA_BITS] = 12 +
194 		(9 * (xe->info.vm_max_level + 1));
195 	config->info[XE_QUERY_CONFIG_GT_COUNT] = xe->info.gt_count;
196 	config->info[XE_QUERY_CONFIG_MEM_REGION_COUNT] =
197 		hweight_long(xe->info.mem_region_mask);
198 	config->info[XE_QUERY_CONFIG_MAX_ENGINE_PRIORITY] =
199 		xe_engine_device_get_max_priority(xe);
200 
201 	if (copy_to_user(query_ptr, config, size)) {
202 		kfree(config);
203 		return -EFAULT;
204 	}
205 	kfree(config);
206 
207 	return 0;
208 }
209 
210 static int query_gts(struct xe_device *xe, struct drm_xe_device_query *query)
211 {
212 	struct xe_gt *gt;
213 	size_t size = sizeof(struct drm_xe_query_gts) +
214 		xe->info.gt_count * sizeof(struct drm_xe_query_gt);
215 	struct drm_xe_query_gts __user *query_ptr =
216 		u64_to_user_ptr(query->data);
217 	struct drm_xe_query_gts *gts;
218 	u8 id;
219 
220 	if (query->size == 0) {
221 		query->size = size;
222 		return 0;
223 	} else if (XE_IOCTL_ERR(xe, query->size != size)) {
224 		return -EINVAL;
225 	}
226 
227 	gts = kzalloc(size, GFP_KERNEL);
228 	if (XE_IOCTL_ERR(xe, !gts))
229 		return -ENOMEM;
230 
231 	gts->num_gt = xe->info.gt_count;
232 	for_each_gt(gt, xe, id) {
233 		if (xe_gt_is_media_type(gt))
234 			gts->gts[id].type = XE_QUERY_GT_TYPE_MEDIA;
235 		else if (gt_to_tile(gt)->id > 0)
236 			gts->gts[id].type = XE_QUERY_GT_TYPE_REMOTE;
237 		else
238 			gts->gts[id].type = XE_QUERY_GT_TYPE_MAIN;
239 		gts->gts[id].instance = id;
240 		gts->gts[id].clock_freq = gt->info.clock_freq;
241 		if (!IS_DGFX(xe))
242 			gts->gts[id].native_mem_regions = 0x1;
243 		else
244 			gts->gts[id].native_mem_regions =
245 				BIT(gt_to_tile(gt)->id) << 1;
246 		gts->gts[id].slow_mem_regions = xe->info.mem_region_mask ^
247 			gts->gts[id].native_mem_regions;
248 	}
249 
250 	if (copy_to_user(query_ptr, gts, size)) {
251 		kfree(gts);
252 		return -EFAULT;
253 	}
254 	kfree(gts);
255 
256 	return 0;
257 }
258 
259 static int query_hwconfig(struct xe_device *xe,
260 			  struct drm_xe_device_query *query)
261 {
262 	struct xe_gt *gt = xe_root_mmio_gt(xe);
263 	size_t size = xe_guc_hwconfig_size(&gt->uc.guc);
264 	void __user *query_ptr = u64_to_user_ptr(query->data);
265 	void *hwconfig;
266 
267 	if (query->size == 0) {
268 		query->size = size;
269 		return 0;
270 	} else if (XE_IOCTL_ERR(xe, query->size != size)) {
271 		return -EINVAL;
272 	}
273 
274 	hwconfig = kzalloc(size, GFP_KERNEL);
275 	if (XE_IOCTL_ERR(xe, !hwconfig))
276 		return -ENOMEM;
277 
278 	xe_device_mem_access_get(xe);
279 	xe_guc_hwconfig_copy(&gt->uc.guc, hwconfig);
280 	xe_device_mem_access_put(xe);
281 
282 	if (copy_to_user(query_ptr, hwconfig, size)) {
283 		kfree(hwconfig);
284 		return -EFAULT;
285 	}
286 	kfree(hwconfig);
287 
288 	return 0;
289 }
290 
291 static size_t calc_topo_query_size(struct xe_device *xe)
292 {
293 	return xe->info.gt_count *
294 		(3 * sizeof(struct drm_xe_query_topology_mask) +
295 		 sizeof_field(struct xe_gt, fuse_topo.g_dss_mask) +
296 		 sizeof_field(struct xe_gt, fuse_topo.c_dss_mask) +
297 		 sizeof_field(struct xe_gt, fuse_topo.eu_mask_per_dss));
298 }
299 
300 static void __user *copy_mask(void __user *ptr,
301 			      struct drm_xe_query_topology_mask *topo,
302 			      void *mask, size_t mask_size)
303 {
304 	topo->num_bytes = mask_size;
305 
306 	if (copy_to_user(ptr, topo, sizeof(*topo)))
307 		return ERR_PTR(-EFAULT);
308 	ptr += sizeof(topo);
309 
310 	if (copy_to_user(ptr, mask, mask_size))
311 		return ERR_PTR(-EFAULT);
312 	ptr += mask_size;
313 
314 	return ptr;
315 }
316 
317 static int query_gt_topology(struct xe_device *xe,
318 			     struct drm_xe_device_query *query)
319 {
320 	void __user *query_ptr = u64_to_user_ptr(query->data);
321 	size_t size = calc_topo_query_size(xe);
322 	struct drm_xe_query_topology_mask topo;
323 	struct xe_gt *gt;
324 	int id;
325 
326 	if (query->size == 0) {
327 		query->size = size;
328 		return 0;
329 	} else if (XE_IOCTL_ERR(xe, query->size != size)) {
330 		return -EINVAL;
331 	}
332 
333 	for_each_gt(gt, xe, id) {
334 		topo.gt_id = id;
335 
336 		topo.type = XE_TOPO_DSS_GEOMETRY;
337 		query_ptr = copy_mask(query_ptr, &topo,
338 				      gt->fuse_topo.g_dss_mask,
339 				      sizeof(gt->fuse_topo.g_dss_mask));
340 		if (IS_ERR(query_ptr))
341 			return PTR_ERR(query_ptr);
342 
343 		topo.type = XE_TOPO_DSS_COMPUTE;
344 		query_ptr = copy_mask(query_ptr, &topo,
345 				      gt->fuse_topo.c_dss_mask,
346 				      sizeof(gt->fuse_topo.c_dss_mask));
347 		if (IS_ERR(query_ptr))
348 			return PTR_ERR(query_ptr);
349 
350 		topo.type = XE_TOPO_EU_PER_DSS;
351 		query_ptr = copy_mask(query_ptr, &topo,
352 				      gt->fuse_topo.eu_mask_per_dss,
353 				      sizeof(gt->fuse_topo.eu_mask_per_dss));
354 		if (IS_ERR(query_ptr))
355 			return PTR_ERR(query_ptr);
356 	}
357 
358 	return 0;
359 }
360 
361 static int (* const xe_query_funcs[])(struct xe_device *xe,
362 				      struct drm_xe_device_query *query) = {
363 	query_engines,
364 	query_memory_usage,
365 	query_config,
366 	query_gts,
367 	query_hwconfig,
368 	query_gt_topology,
369 };
370 
371 int xe_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
372 {
373 	struct xe_device *xe = to_xe_device(dev);
374 	struct drm_xe_device_query *query = data;
375 	u32 idx;
376 
377 	if (XE_IOCTL_ERR(xe, query->extensions) ||
378 	    XE_IOCTL_ERR(xe, query->reserved[0] || query->reserved[1]))
379 		return -EINVAL;
380 
381 	if (XE_IOCTL_ERR(xe, query->query > ARRAY_SIZE(xe_query_funcs)))
382 		return -EINVAL;
383 
384 	idx = array_index_nospec(query->query, ARRAY_SIZE(xe_query_funcs));
385 	if (XE_IOCTL_ERR(xe, !xe_query_funcs[idx]))
386 		return -EINVAL;
387 
388 	return xe_query_funcs[idx](xe, query);
389 }
390