xref: /linux/drivers/gpu/drm/imagination/pvr_context.c (revision 0ea5c948cb64bab5bc7a5516774eb8536f05aa0d)
1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
2 /* Copyright (c) 2023 Imagination Technologies Ltd. */
3 
4 #include "pvr_cccb.h"
5 #include "pvr_context.h"
6 #include "pvr_device.h"
7 #include "pvr_drv.h"
8 #include "pvr_gem.h"
9 #include "pvr_job.h"
10 #include "pvr_power.h"
11 #include "pvr_rogue_fwif.h"
12 #include "pvr_rogue_fwif_common.h"
13 #include "pvr_rogue_fwif_resetframework.h"
14 #include "pvr_stream.h"
15 #include "pvr_stream_defs.h"
16 #include "pvr_vm.h"
17 
18 #include <drm/drm_auth.h>
19 #include <drm/drm_managed.h>
20 #include <linux/errno.h>
21 #include <linux/kernel.h>
22 #include <linux/sched.h>
23 #include <linux/slab.h>
24 #include <linux/string.h>
25 #include <linux/types.h>
26 #include <linux/xarray.h>
27 
28 static int
remap_priority(struct pvr_file * pvr_file,s32 uapi_priority,enum pvr_context_priority * priority_out)29 remap_priority(struct pvr_file *pvr_file, s32 uapi_priority,
30 	       enum pvr_context_priority *priority_out)
31 {
32 	switch (uapi_priority) {
33 	case DRM_PVR_CTX_PRIORITY_LOW:
34 		*priority_out = PVR_CTX_PRIORITY_LOW;
35 		break;
36 	case DRM_PVR_CTX_PRIORITY_NORMAL:
37 		*priority_out = PVR_CTX_PRIORITY_MEDIUM;
38 		break;
39 	case DRM_PVR_CTX_PRIORITY_HIGH:
40 		if (!capable(CAP_SYS_NICE) && !drm_is_current_master(from_pvr_file(pvr_file)))
41 			return -EACCES;
42 		*priority_out = PVR_CTX_PRIORITY_HIGH;
43 		break;
44 	default:
45 		return -EINVAL;
46 	}
47 
48 	return 0;
49 }
50 
get_fw_obj_size(enum drm_pvr_ctx_type type)51 static int get_fw_obj_size(enum drm_pvr_ctx_type type)
52 {
53 	switch (type) {
54 	case DRM_PVR_CTX_TYPE_RENDER:
55 		return sizeof(struct rogue_fwif_fwrendercontext);
56 	case DRM_PVR_CTX_TYPE_COMPUTE:
57 		return sizeof(struct rogue_fwif_fwcomputecontext);
58 	case DRM_PVR_CTX_TYPE_TRANSFER_FRAG:
59 		return sizeof(struct rogue_fwif_fwtransfercontext);
60 	}
61 
62 	return -EINVAL;
63 }
64 
65 static int
process_static_context_state(struct pvr_device * pvr_dev,const struct pvr_stream_cmd_defs * cmd_defs,u64 stream_user_ptr,u32 stream_size,void * dest)66 process_static_context_state(struct pvr_device *pvr_dev, const struct pvr_stream_cmd_defs *cmd_defs,
67 			     u64 stream_user_ptr, u32 stream_size, void *dest)
68 {
69 	void *stream;
70 	int err;
71 
72 	stream = kzalloc(stream_size, GFP_KERNEL);
73 	if (!stream)
74 		return -ENOMEM;
75 
76 	if (copy_from_user(stream, u64_to_user_ptr(stream_user_ptr), stream_size)) {
77 		err = -EFAULT;
78 		goto err_free;
79 	}
80 
81 	err = pvr_stream_process(pvr_dev, cmd_defs, stream, stream_size, dest);
82 	if (err)
83 		goto err_free;
84 
85 	kfree(stream);
86 
87 	return 0;
88 
89 err_free:
90 	kfree(stream);
91 
92 	return err;
93 }
94 
init_render_fw_objs(struct pvr_context * ctx,struct drm_pvr_ioctl_create_context_args * args,void * fw_ctx_map)95 static int init_render_fw_objs(struct pvr_context *ctx,
96 			       struct drm_pvr_ioctl_create_context_args *args,
97 			       void *fw_ctx_map)
98 {
99 	struct rogue_fwif_static_rendercontext_state *static_rendercontext_state;
100 	struct rogue_fwif_fwrendercontext *fw_render_context = fw_ctx_map;
101 
102 	if (!args->static_context_state_len)
103 		return -EINVAL;
104 
105 	static_rendercontext_state = &fw_render_context->static_render_context_state;
106 
107 	/* Copy static render context state from userspace. */
108 	return process_static_context_state(ctx->pvr_dev,
109 					    &pvr_static_render_context_state_stream,
110 					    args->static_context_state,
111 					    args->static_context_state_len,
112 					    &static_rendercontext_state->ctxswitch_regs[0]);
113 }
114 
init_compute_fw_objs(struct pvr_context * ctx,struct drm_pvr_ioctl_create_context_args * args,void * fw_ctx_map)115 static int init_compute_fw_objs(struct pvr_context *ctx,
116 				struct drm_pvr_ioctl_create_context_args *args,
117 				void *fw_ctx_map)
118 {
119 	struct rogue_fwif_fwcomputecontext *fw_compute_context = fw_ctx_map;
120 	struct rogue_fwif_cdm_registers_cswitch *ctxswitch_regs;
121 
122 	if (!args->static_context_state_len)
123 		return -EINVAL;
124 
125 	ctxswitch_regs = &fw_compute_context->static_compute_context_state.ctxswitch_regs;
126 
127 	/* Copy static render context state from userspace. */
128 	return process_static_context_state(ctx->pvr_dev,
129 					    &pvr_static_compute_context_state_stream,
130 					    args->static_context_state,
131 					    args->static_context_state_len,
132 					    ctxswitch_regs);
133 }
134 
init_transfer_fw_objs(struct pvr_context * ctx,struct drm_pvr_ioctl_create_context_args * args,void * fw_ctx_map)135 static int init_transfer_fw_objs(struct pvr_context *ctx,
136 				 struct drm_pvr_ioctl_create_context_args *args,
137 				 void *fw_ctx_map)
138 {
139 	if (args->static_context_state_len)
140 		return -EINVAL;
141 
142 	return 0;
143 }
144 
init_fw_objs(struct pvr_context * ctx,struct drm_pvr_ioctl_create_context_args * args,void * fw_ctx_map)145 static int init_fw_objs(struct pvr_context *ctx,
146 			struct drm_pvr_ioctl_create_context_args *args,
147 			void *fw_ctx_map)
148 {
149 	switch (ctx->type) {
150 	case DRM_PVR_CTX_TYPE_RENDER:
151 		return init_render_fw_objs(ctx, args, fw_ctx_map);
152 	case DRM_PVR_CTX_TYPE_COMPUTE:
153 		return init_compute_fw_objs(ctx, args, fw_ctx_map);
154 	case DRM_PVR_CTX_TYPE_TRANSFER_FRAG:
155 		return init_transfer_fw_objs(ctx, args, fw_ctx_map);
156 	}
157 
158 	return -EINVAL;
159 }
160 
161 static void
ctx_fw_data_init(void * cpu_ptr,void * priv)162 ctx_fw_data_init(void *cpu_ptr, void *priv)
163 {
164 	struct pvr_context *ctx = priv;
165 
166 	memcpy(cpu_ptr, ctx->data, ctx->data_size);
167 }
168 
169 /**
170  * pvr_context_destroy_queues() - Destroy all queues attached to a context.
171  * @ctx: Context to destroy queues on.
172  *
173  * Should be called when the last reference to a context object is dropped.
174  * It releases all resources attached to the queues bound to this context.
175  */
pvr_context_destroy_queues(struct pvr_context * ctx)176 static void pvr_context_destroy_queues(struct pvr_context *ctx)
177 {
178 	switch (ctx->type) {
179 	case DRM_PVR_CTX_TYPE_RENDER:
180 		pvr_queue_destroy(ctx->queues.fragment);
181 		pvr_queue_destroy(ctx->queues.geometry);
182 		break;
183 	case DRM_PVR_CTX_TYPE_COMPUTE:
184 		pvr_queue_destroy(ctx->queues.compute);
185 		break;
186 	case DRM_PVR_CTX_TYPE_TRANSFER_FRAG:
187 		pvr_queue_destroy(ctx->queues.transfer);
188 		break;
189 	}
190 }
191 
192 /**
193  * pvr_context_create_queues() - Create all queues attached to a context.
194  * @ctx: Context to create queues on.
195  * @args: Context creation arguments passed by userspace.
196  * @fw_ctx_map: CPU mapping of the FW context object.
197  *
198  * Return:
199  *  * 0 on success, or
200  *  * A negative error code otherwise.
201  */
pvr_context_create_queues(struct pvr_context * ctx,struct drm_pvr_ioctl_create_context_args * args,void * fw_ctx_map)202 static int pvr_context_create_queues(struct pvr_context *ctx,
203 				     struct drm_pvr_ioctl_create_context_args *args,
204 				     void *fw_ctx_map)
205 {
206 	int err;
207 
208 	switch (ctx->type) {
209 	case DRM_PVR_CTX_TYPE_RENDER:
210 		ctx->queues.geometry = pvr_queue_create(ctx, DRM_PVR_JOB_TYPE_GEOMETRY,
211 							args, fw_ctx_map);
212 		if (IS_ERR(ctx->queues.geometry)) {
213 			err = PTR_ERR(ctx->queues.geometry);
214 			ctx->queues.geometry = NULL;
215 			goto err_destroy_queues;
216 		}
217 
218 		ctx->queues.fragment = pvr_queue_create(ctx, DRM_PVR_JOB_TYPE_FRAGMENT,
219 							args, fw_ctx_map);
220 		if (IS_ERR(ctx->queues.fragment)) {
221 			err = PTR_ERR(ctx->queues.fragment);
222 			ctx->queues.fragment = NULL;
223 			goto err_destroy_queues;
224 		}
225 		return 0;
226 
227 	case DRM_PVR_CTX_TYPE_COMPUTE:
228 		ctx->queues.compute = pvr_queue_create(ctx, DRM_PVR_JOB_TYPE_COMPUTE,
229 						       args, fw_ctx_map);
230 		if (IS_ERR(ctx->queues.compute)) {
231 			err = PTR_ERR(ctx->queues.compute);
232 			ctx->queues.compute = NULL;
233 			goto err_destroy_queues;
234 		}
235 		return 0;
236 
237 	case DRM_PVR_CTX_TYPE_TRANSFER_FRAG:
238 		ctx->queues.transfer = pvr_queue_create(ctx, DRM_PVR_JOB_TYPE_TRANSFER_FRAG,
239 							args, fw_ctx_map);
240 		if (IS_ERR(ctx->queues.transfer)) {
241 			err = PTR_ERR(ctx->queues.transfer);
242 			ctx->queues.transfer = NULL;
243 			goto err_destroy_queues;
244 		}
245 		return 0;
246 	}
247 
248 	return -EINVAL;
249 
250 err_destroy_queues:
251 	pvr_context_destroy_queues(ctx);
252 	return err;
253 }
254 
255 /**
256  * pvr_context_kill_queues() - Kill queues attached to context.
257  * @ctx: Context to kill queues on.
258  *
259  * Killing the queues implies making them unusable for future jobs, while still
260  * letting the currently submitted jobs a chance to finish. Queue resources will
261  * stay around until pvr_context_destroy_queues() is called.
262  */
pvr_context_kill_queues(struct pvr_context * ctx)263 static void pvr_context_kill_queues(struct pvr_context *ctx)
264 {
265 	switch (ctx->type) {
266 	case DRM_PVR_CTX_TYPE_RENDER:
267 		pvr_queue_kill(ctx->queues.fragment);
268 		pvr_queue_kill(ctx->queues.geometry);
269 		break;
270 	case DRM_PVR_CTX_TYPE_COMPUTE:
271 		pvr_queue_kill(ctx->queues.compute);
272 		break;
273 	case DRM_PVR_CTX_TYPE_TRANSFER_FRAG:
274 		pvr_queue_kill(ctx->queues.transfer);
275 		break;
276 	}
277 }
278 
279 /**
280  * pvr_context_create() - Create a context.
281  * @pvr_file: File to attach the created context to.
282  * @args: Context creation arguments.
283  *
284  * Return:
285  *  * 0 on success, or
286  *  * A negative error code on failure.
287  */
pvr_context_create(struct pvr_file * pvr_file,struct drm_pvr_ioctl_create_context_args * args)288 int pvr_context_create(struct pvr_file *pvr_file, struct drm_pvr_ioctl_create_context_args *args)
289 {
290 	struct pvr_device *pvr_dev = pvr_file->pvr_dev;
291 	struct pvr_context *ctx;
292 	int ctx_size;
293 	int err;
294 
295 	/* Context creation flags are currently unused and must be zero. */
296 	if (args->flags)
297 		return -EINVAL;
298 
299 	ctx_size = get_fw_obj_size(args->type);
300 	if (ctx_size < 0)
301 		return ctx_size;
302 
303 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
304 	if (!ctx)
305 		return -ENOMEM;
306 
307 	ctx->data_size = ctx_size;
308 	ctx->type = args->type;
309 	ctx->flags = args->flags;
310 	ctx->pvr_dev = pvr_dev;
311 	kref_init(&ctx->ref_count);
312 
313 	err = remap_priority(pvr_file, args->priority, &ctx->priority);
314 	if (err)
315 		goto err_free_ctx;
316 
317 	ctx->vm_ctx = pvr_vm_context_lookup(pvr_file, args->vm_context_handle);
318 	if (IS_ERR(ctx->vm_ctx)) {
319 		err = PTR_ERR(ctx->vm_ctx);
320 		goto err_free_ctx;
321 	}
322 
323 	ctx->data = kzalloc(ctx_size, GFP_KERNEL);
324 	if (!ctx->data) {
325 		err = -ENOMEM;
326 		goto err_put_vm;
327 	}
328 
329 	err = pvr_context_create_queues(ctx, args, ctx->data);
330 	if (err)
331 		goto err_free_ctx_data;
332 
333 	err = init_fw_objs(ctx, args, ctx->data);
334 	if (err)
335 		goto err_destroy_queues;
336 
337 	err = pvr_fw_object_create(pvr_dev, ctx_size, PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
338 				   ctx_fw_data_init, ctx, &ctx->fw_obj);
339 	if (err)
340 		goto err_free_ctx_data;
341 
342 	err = xa_alloc(&pvr_dev->ctx_ids, &ctx->ctx_id, ctx, xa_limit_32b, GFP_KERNEL);
343 	if (err)
344 		goto err_destroy_fw_obj;
345 
346 	err = xa_alloc(&pvr_file->ctx_handles, &args->handle, ctx, xa_limit_32b, GFP_KERNEL);
347 	if (err) {
348 		/*
349 		 * It's possible that another thread could have taken a reference on the context at
350 		 * this point as it is in the ctx_ids xarray. Therefore instead of directly
351 		 * destroying the context, drop a reference instead.
352 		 */
353 		pvr_context_put(ctx);
354 		return err;
355 	}
356 
357 	return 0;
358 
359 err_destroy_fw_obj:
360 	pvr_fw_object_destroy(ctx->fw_obj);
361 
362 err_destroy_queues:
363 	pvr_context_destroy_queues(ctx);
364 
365 err_free_ctx_data:
366 	kfree(ctx->data);
367 
368 err_put_vm:
369 	pvr_vm_context_put(ctx->vm_ctx);
370 
371 err_free_ctx:
372 	kfree(ctx);
373 	return err;
374 }
375 
376 static void
pvr_context_release(struct kref * ref_count)377 pvr_context_release(struct kref *ref_count)
378 {
379 	struct pvr_context *ctx =
380 		container_of(ref_count, struct pvr_context, ref_count);
381 	struct pvr_device *pvr_dev = ctx->pvr_dev;
382 
383 	xa_erase(&pvr_dev->ctx_ids, ctx->ctx_id);
384 	pvr_context_destroy_queues(ctx);
385 	pvr_fw_object_destroy(ctx->fw_obj);
386 	kfree(ctx->data);
387 	pvr_vm_context_put(ctx->vm_ctx);
388 	kfree(ctx);
389 }
390 
391 /**
392  * pvr_context_put() - Release reference on context
393  * @ctx: Target context.
394  */
395 void
pvr_context_put(struct pvr_context * ctx)396 pvr_context_put(struct pvr_context *ctx)
397 {
398 	if (ctx)
399 		kref_put(&ctx->ref_count, pvr_context_release);
400 }
401 
402 /**
403  * pvr_context_destroy() - Destroy context
404  * @pvr_file: Pointer to pvr_file structure.
405  * @handle: Userspace context handle.
406  *
407  * Removes context from context list and drops initial reference. Context will
408  * then be destroyed once all outstanding references are dropped.
409  *
410  * Return:
411  *  * 0 on success, or
412  *  * -%EINVAL if context not in context list.
413  */
414 int
pvr_context_destroy(struct pvr_file * pvr_file,u32 handle)415 pvr_context_destroy(struct pvr_file *pvr_file, u32 handle)
416 {
417 	struct pvr_context *ctx = xa_erase(&pvr_file->ctx_handles, handle);
418 
419 	if (!ctx)
420 		return -EINVAL;
421 
422 	/* Make sure nothing can be queued to the queues after that point. */
423 	pvr_context_kill_queues(ctx);
424 
425 	/* Release the reference held by the handle set. */
426 	pvr_context_put(ctx);
427 
428 	return 0;
429 }
430 
431 /**
432  * pvr_destroy_contexts_for_file: Destroy any contexts associated with the given file
433  * @pvr_file: Pointer to pvr_file structure.
434  *
435  * Removes all contexts associated with @pvr_file from the device context list and drops initial
436  * references. Contexts will then be destroyed once all outstanding references are dropped.
437  */
pvr_destroy_contexts_for_file(struct pvr_file * pvr_file)438 void pvr_destroy_contexts_for_file(struct pvr_file *pvr_file)
439 {
440 	struct pvr_context *ctx;
441 	unsigned long handle;
442 
443 	xa_for_each(&pvr_file->ctx_handles, handle, ctx)
444 		pvr_context_destroy(pvr_file, handle);
445 }
446 
447 /**
448  * pvr_context_device_init() - Device level initialization for queue related resources.
449  * @pvr_dev: The device to initialize.
450  */
pvr_context_device_init(struct pvr_device * pvr_dev)451 void pvr_context_device_init(struct pvr_device *pvr_dev)
452 {
453 	xa_init_flags(&pvr_dev->ctx_ids, XA_FLAGS_ALLOC1);
454 }
455 
456 /**
457  * pvr_context_device_fini() - Device level cleanup for queue related resources.
458  * @pvr_dev: The device to cleanup.
459  */
pvr_context_device_fini(struct pvr_device * pvr_dev)460 void pvr_context_device_fini(struct pvr_device *pvr_dev)
461 {
462 	WARN_ON(!xa_empty(&pvr_dev->ctx_ids));
463 	xa_destroy(&pvr_dev->ctx_ids);
464 }
465