xref: /linux/drivers/gpu/drm/imagination/pvr_context.c (revision 7f4f3b14e8079ecde096bd734af10e30d40c27b7)
1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
2 /* Copyright (c) 2023 Imagination Technologies Ltd. */
3 
4 #include "pvr_cccb.h"
5 #include "pvr_context.h"
6 #include "pvr_device.h"
7 #include "pvr_drv.h"
8 #include "pvr_gem.h"
9 #include "pvr_job.h"
10 #include "pvr_power.h"
11 #include "pvr_rogue_fwif.h"
12 #include "pvr_rogue_fwif_common.h"
13 #include "pvr_rogue_fwif_resetframework.h"
14 #include "pvr_stream.h"
15 #include "pvr_stream_defs.h"
16 #include "pvr_vm.h"
17 
18 #include <drm/drm_auth.h>
19 #include <drm/drm_managed.h>
20 
21 #include <linux/bug.h>
22 #include <linux/errno.h>
23 #include <linux/kernel.h>
24 #include <linux/list.h>
25 #include <linux/sched.h>
26 #include <linux/slab.h>
27 #include <linux/spinlock.h>
28 #include <linux/string.h>
29 #include <linux/types.h>
30 #include <linux/xarray.h>
31 
32 static int
33 remap_priority(struct pvr_file *pvr_file, s32 uapi_priority,
34 	       enum pvr_context_priority *priority_out)
35 {
36 	switch (uapi_priority) {
37 	case DRM_PVR_CTX_PRIORITY_LOW:
38 		*priority_out = PVR_CTX_PRIORITY_LOW;
39 		break;
40 	case DRM_PVR_CTX_PRIORITY_NORMAL:
41 		*priority_out = PVR_CTX_PRIORITY_MEDIUM;
42 		break;
43 	case DRM_PVR_CTX_PRIORITY_HIGH:
44 		if (!capable(CAP_SYS_NICE) && !drm_is_current_master(from_pvr_file(pvr_file)))
45 			return -EACCES;
46 		*priority_out = PVR_CTX_PRIORITY_HIGH;
47 		break;
48 	default:
49 		return -EINVAL;
50 	}
51 
52 	return 0;
53 }
54 
55 static int get_fw_obj_size(enum drm_pvr_ctx_type type)
56 {
57 	switch (type) {
58 	case DRM_PVR_CTX_TYPE_RENDER:
59 		return sizeof(struct rogue_fwif_fwrendercontext);
60 	case DRM_PVR_CTX_TYPE_COMPUTE:
61 		return sizeof(struct rogue_fwif_fwcomputecontext);
62 	case DRM_PVR_CTX_TYPE_TRANSFER_FRAG:
63 		return sizeof(struct rogue_fwif_fwtransfercontext);
64 	}
65 
66 	return -EINVAL;
67 }
68 
69 static int
70 process_static_context_state(struct pvr_device *pvr_dev, const struct pvr_stream_cmd_defs *cmd_defs,
71 			     u64 stream_user_ptr, u32 stream_size, void *dest)
72 {
73 	void *stream;
74 	int err;
75 
76 	stream = memdup_user(u64_to_user_ptr(stream_user_ptr), stream_size);
77 	if (IS_ERR(stream))
78 		return PTR_ERR(stream);
79 
80 	err = pvr_stream_process(pvr_dev, cmd_defs, stream, stream_size, dest);
81 
82 	kfree(stream);
83 
84 	return err;
85 }
86 
87 static int init_render_fw_objs(struct pvr_context *ctx,
88 			       struct drm_pvr_ioctl_create_context_args *args,
89 			       void *fw_ctx_map)
90 {
91 	struct rogue_fwif_static_rendercontext_state *static_rendercontext_state;
92 	struct rogue_fwif_fwrendercontext *fw_render_context = fw_ctx_map;
93 
94 	if (!args->static_context_state_len)
95 		return -EINVAL;
96 
97 	static_rendercontext_state = &fw_render_context->static_render_context_state;
98 
99 	/* Copy static render context state from userspace. */
100 	return process_static_context_state(ctx->pvr_dev,
101 					    &pvr_static_render_context_state_stream,
102 					    args->static_context_state,
103 					    args->static_context_state_len,
104 					    &static_rendercontext_state->ctxswitch_regs[0]);
105 }
106 
107 static int init_compute_fw_objs(struct pvr_context *ctx,
108 				struct drm_pvr_ioctl_create_context_args *args,
109 				void *fw_ctx_map)
110 {
111 	struct rogue_fwif_fwcomputecontext *fw_compute_context = fw_ctx_map;
112 	struct rogue_fwif_cdm_registers_cswitch *ctxswitch_regs;
113 
114 	if (!args->static_context_state_len)
115 		return -EINVAL;
116 
117 	ctxswitch_regs = &fw_compute_context->static_compute_context_state.ctxswitch_regs;
118 
119 	/* Copy static render context state from userspace. */
120 	return process_static_context_state(ctx->pvr_dev,
121 					    &pvr_static_compute_context_state_stream,
122 					    args->static_context_state,
123 					    args->static_context_state_len,
124 					    ctxswitch_regs);
125 }
126 
127 static int init_transfer_fw_objs(struct pvr_context *ctx,
128 				 struct drm_pvr_ioctl_create_context_args *args,
129 				 void *fw_ctx_map)
130 {
131 	if (args->static_context_state_len)
132 		return -EINVAL;
133 
134 	return 0;
135 }
136 
137 static int init_fw_objs(struct pvr_context *ctx,
138 			struct drm_pvr_ioctl_create_context_args *args,
139 			void *fw_ctx_map)
140 {
141 	switch (ctx->type) {
142 	case DRM_PVR_CTX_TYPE_RENDER:
143 		return init_render_fw_objs(ctx, args, fw_ctx_map);
144 	case DRM_PVR_CTX_TYPE_COMPUTE:
145 		return init_compute_fw_objs(ctx, args, fw_ctx_map);
146 	case DRM_PVR_CTX_TYPE_TRANSFER_FRAG:
147 		return init_transfer_fw_objs(ctx, args, fw_ctx_map);
148 	}
149 
150 	return -EINVAL;
151 }
152 
153 static void
154 ctx_fw_data_init(void *cpu_ptr, void *priv)
155 {
156 	struct pvr_context *ctx = priv;
157 
158 	memcpy(cpu_ptr, ctx->data, ctx->data_size);
159 }
160 
161 /**
162  * pvr_context_destroy_queues() - Destroy all queues attached to a context.
163  * @ctx: Context to destroy queues on.
164  *
165  * Should be called when the last reference to a context object is dropped.
166  * It releases all resources attached to the queues bound to this context.
167  */
168 static void pvr_context_destroy_queues(struct pvr_context *ctx)
169 {
170 	switch (ctx->type) {
171 	case DRM_PVR_CTX_TYPE_RENDER:
172 		pvr_queue_destroy(ctx->queues.fragment);
173 		pvr_queue_destroy(ctx->queues.geometry);
174 		break;
175 	case DRM_PVR_CTX_TYPE_COMPUTE:
176 		pvr_queue_destroy(ctx->queues.compute);
177 		break;
178 	case DRM_PVR_CTX_TYPE_TRANSFER_FRAG:
179 		pvr_queue_destroy(ctx->queues.transfer);
180 		break;
181 	}
182 }
183 
184 /**
185  * pvr_context_create_queues() - Create all queues attached to a context.
186  * @ctx: Context to create queues on.
187  * @args: Context creation arguments passed by userspace.
188  * @fw_ctx_map: CPU mapping of the FW context object.
189  *
190  * Return:
191  *  * 0 on success, or
192  *  * A negative error code otherwise.
193  */
194 static int pvr_context_create_queues(struct pvr_context *ctx,
195 				     struct drm_pvr_ioctl_create_context_args *args,
196 				     void *fw_ctx_map)
197 {
198 	int err;
199 
200 	switch (ctx->type) {
201 	case DRM_PVR_CTX_TYPE_RENDER:
202 		ctx->queues.geometry = pvr_queue_create(ctx, DRM_PVR_JOB_TYPE_GEOMETRY,
203 							args, fw_ctx_map);
204 		if (IS_ERR(ctx->queues.geometry)) {
205 			err = PTR_ERR(ctx->queues.geometry);
206 			ctx->queues.geometry = NULL;
207 			goto err_destroy_queues;
208 		}
209 
210 		ctx->queues.fragment = pvr_queue_create(ctx, DRM_PVR_JOB_TYPE_FRAGMENT,
211 							args, fw_ctx_map);
212 		if (IS_ERR(ctx->queues.fragment)) {
213 			err = PTR_ERR(ctx->queues.fragment);
214 			ctx->queues.fragment = NULL;
215 			goto err_destroy_queues;
216 		}
217 		return 0;
218 
219 	case DRM_PVR_CTX_TYPE_COMPUTE:
220 		ctx->queues.compute = pvr_queue_create(ctx, DRM_PVR_JOB_TYPE_COMPUTE,
221 						       args, fw_ctx_map);
222 		if (IS_ERR(ctx->queues.compute)) {
223 			err = PTR_ERR(ctx->queues.compute);
224 			ctx->queues.compute = NULL;
225 			goto err_destroy_queues;
226 		}
227 		return 0;
228 
229 	case DRM_PVR_CTX_TYPE_TRANSFER_FRAG:
230 		ctx->queues.transfer = pvr_queue_create(ctx, DRM_PVR_JOB_TYPE_TRANSFER_FRAG,
231 							args, fw_ctx_map);
232 		if (IS_ERR(ctx->queues.transfer)) {
233 			err = PTR_ERR(ctx->queues.transfer);
234 			ctx->queues.transfer = NULL;
235 			goto err_destroy_queues;
236 		}
237 		return 0;
238 	}
239 
240 	return -EINVAL;
241 
242 err_destroy_queues:
243 	pvr_context_destroy_queues(ctx);
244 	return err;
245 }
246 
247 /**
248  * pvr_context_kill_queues() - Kill queues attached to context.
249  * @ctx: Context to kill queues on.
250  *
251  * Killing the queues implies making them unusable for future jobs, while still
252  * letting the currently submitted jobs a chance to finish. Queue resources will
253  * stay around until pvr_context_destroy_queues() is called.
254  */
255 static void pvr_context_kill_queues(struct pvr_context *ctx)
256 {
257 	switch (ctx->type) {
258 	case DRM_PVR_CTX_TYPE_RENDER:
259 		pvr_queue_kill(ctx->queues.fragment);
260 		pvr_queue_kill(ctx->queues.geometry);
261 		break;
262 	case DRM_PVR_CTX_TYPE_COMPUTE:
263 		pvr_queue_kill(ctx->queues.compute);
264 		break;
265 	case DRM_PVR_CTX_TYPE_TRANSFER_FRAG:
266 		pvr_queue_kill(ctx->queues.transfer);
267 		break;
268 	}
269 }
270 
271 /**
272  * pvr_context_create() - Create a context.
273  * @pvr_file: File to attach the created context to.
274  * @args: Context creation arguments.
275  *
276  * Return:
277  *  * 0 on success, or
278  *  * A negative error code on failure.
279  */
280 int pvr_context_create(struct pvr_file *pvr_file, struct drm_pvr_ioctl_create_context_args *args)
281 {
282 	struct pvr_device *pvr_dev = pvr_file->pvr_dev;
283 	struct pvr_context *ctx;
284 	int ctx_size;
285 	int err;
286 
287 	/* Context creation flags are currently unused and must be zero. */
288 	if (args->flags)
289 		return -EINVAL;
290 
291 	ctx_size = get_fw_obj_size(args->type);
292 	if (ctx_size < 0)
293 		return ctx_size;
294 
295 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
296 	if (!ctx)
297 		return -ENOMEM;
298 
299 	ctx->data_size = ctx_size;
300 	ctx->type = args->type;
301 	ctx->flags = args->flags;
302 	ctx->pvr_dev = pvr_dev;
303 	kref_init(&ctx->ref_count);
304 
305 	err = remap_priority(pvr_file, args->priority, &ctx->priority);
306 	if (err)
307 		goto err_free_ctx;
308 
309 	ctx->vm_ctx = pvr_vm_context_lookup(pvr_file, args->vm_context_handle);
310 	if (IS_ERR(ctx->vm_ctx)) {
311 		err = PTR_ERR(ctx->vm_ctx);
312 		goto err_free_ctx;
313 	}
314 
315 	ctx->data = kzalloc(ctx_size, GFP_KERNEL);
316 	if (!ctx->data) {
317 		err = -ENOMEM;
318 		goto err_put_vm;
319 	}
320 
321 	err = pvr_context_create_queues(ctx, args, ctx->data);
322 	if (err)
323 		goto err_free_ctx_data;
324 
325 	err = init_fw_objs(ctx, args, ctx->data);
326 	if (err)
327 		goto err_destroy_queues;
328 
329 	err = pvr_fw_object_create(pvr_dev, ctx_size, PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
330 				   ctx_fw_data_init, ctx, &ctx->fw_obj);
331 	if (err)
332 		goto err_free_ctx_data;
333 
334 	err = xa_alloc(&pvr_dev->ctx_ids, &ctx->ctx_id, ctx, xa_limit_32b, GFP_KERNEL);
335 	if (err)
336 		goto err_destroy_fw_obj;
337 
338 	err = xa_alloc(&pvr_file->ctx_handles, &args->handle, ctx, xa_limit_32b, GFP_KERNEL);
339 	if (err) {
340 		/*
341 		 * It's possible that another thread could have taken a reference on the context at
342 		 * this point as it is in the ctx_ids xarray. Therefore instead of directly
343 		 * destroying the context, drop a reference instead.
344 		 */
345 		pvr_context_put(ctx);
346 		return err;
347 	}
348 
349 	spin_lock(&pvr_dev->ctx_list_lock);
350 	list_add_tail(&ctx->file_link, &pvr_file->contexts);
351 	spin_unlock(&pvr_dev->ctx_list_lock);
352 
353 	return 0;
354 
355 err_destroy_fw_obj:
356 	pvr_fw_object_destroy(ctx->fw_obj);
357 
358 err_destroy_queues:
359 	pvr_context_destroy_queues(ctx);
360 
361 err_free_ctx_data:
362 	kfree(ctx->data);
363 
364 err_put_vm:
365 	pvr_vm_context_put(ctx->vm_ctx);
366 
367 err_free_ctx:
368 	kfree(ctx);
369 	return err;
370 }
371 
372 static void
373 pvr_context_release(struct kref *ref_count)
374 {
375 	struct pvr_context *ctx =
376 		container_of(ref_count, struct pvr_context, ref_count);
377 	struct pvr_device *pvr_dev = ctx->pvr_dev;
378 
379 	WARN_ON(in_interrupt());
380 	spin_lock(&pvr_dev->ctx_list_lock);
381 	list_del(&ctx->file_link);
382 	spin_unlock(&pvr_dev->ctx_list_lock);
383 
384 	xa_erase(&pvr_dev->ctx_ids, ctx->ctx_id);
385 	pvr_context_destroy_queues(ctx);
386 	pvr_fw_object_destroy(ctx->fw_obj);
387 	kfree(ctx->data);
388 	pvr_vm_context_put(ctx->vm_ctx);
389 	kfree(ctx);
390 }
391 
392 /**
393  * pvr_context_put() - Release reference on context
394  * @ctx: Target context.
395  */
396 void
397 pvr_context_put(struct pvr_context *ctx)
398 {
399 	if (ctx)
400 		kref_put(&ctx->ref_count, pvr_context_release);
401 }
402 
403 /**
404  * pvr_context_destroy() - Destroy context
405  * @pvr_file: Pointer to pvr_file structure.
406  * @handle: Userspace context handle.
407  *
408  * Removes context from context list and drops initial reference. Context will
409  * then be destroyed once all outstanding references are dropped.
410  *
411  * Return:
412  *  * 0 on success, or
413  *  * -%EINVAL if context not in context list.
414  */
415 int
416 pvr_context_destroy(struct pvr_file *pvr_file, u32 handle)
417 {
418 	struct pvr_context *ctx = xa_erase(&pvr_file->ctx_handles, handle);
419 
420 	if (!ctx)
421 		return -EINVAL;
422 
423 	/* Make sure nothing can be queued to the queues after that point. */
424 	pvr_context_kill_queues(ctx);
425 
426 	/* Release the reference held by the handle set. */
427 	pvr_context_put(ctx);
428 
429 	return 0;
430 }
431 
432 /**
433  * pvr_destroy_contexts_for_file: Destroy any contexts associated with the given file
434  * @pvr_file: Pointer to pvr_file structure.
435  *
436  * Removes all contexts associated with @pvr_file from the device context list and drops initial
437  * references. Contexts will then be destroyed once all outstanding references are dropped.
438  */
439 void pvr_destroy_contexts_for_file(struct pvr_file *pvr_file)
440 {
441 	struct pvr_device *pvr_dev = pvr_file->pvr_dev;
442 	struct pvr_context *ctx;
443 	unsigned long handle;
444 
445 	xa_for_each(&pvr_file->ctx_handles, handle, ctx)
446 		pvr_context_destroy(pvr_file, handle);
447 
448 	spin_lock(&pvr_dev->ctx_list_lock);
449 	ctx = list_first_entry(&pvr_file->contexts, struct pvr_context, file_link);
450 
451 	while (!list_entry_is_head(ctx, &pvr_file->contexts, file_link)) {
452 		list_del_init(&ctx->file_link);
453 
454 		if (pvr_context_get_if_referenced(ctx)) {
455 			spin_unlock(&pvr_dev->ctx_list_lock);
456 
457 			pvr_vm_unmap_all(ctx->vm_ctx);
458 
459 			pvr_context_put(ctx);
460 			spin_lock(&pvr_dev->ctx_list_lock);
461 		}
462 		ctx = list_first_entry(&pvr_file->contexts, struct pvr_context, file_link);
463 	}
464 	spin_unlock(&pvr_dev->ctx_list_lock);
465 }
466 
467 /**
468  * pvr_context_device_init() - Device level initialization for queue related resources.
469  * @pvr_dev: The device to initialize.
470  */
471 void pvr_context_device_init(struct pvr_device *pvr_dev)
472 {
473 	xa_init_flags(&pvr_dev->ctx_ids, XA_FLAGS_ALLOC1);
474 	spin_lock_init(&pvr_dev->ctx_list_lock);
475 }
476 
477 /**
478  * pvr_context_device_fini() - Device level cleanup for queue related resources.
479  * @pvr_dev: The device to cleanup.
480  */
481 void pvr_context_device_fini(struct pvr_device *pvr_dev)
482 {
483 	WARN_ON(!xa_empty(&pvr_dev->ctx_ids));
484 	xa_destroy(&pvr_dev->ctx_ids);
485 }
486