xref: /linux/drivers/gpu/drm/imagination/pvr_context.c (revision 89713ce5518eda6b370c7a17edbcab4f97a39f68)
1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
2 /* Copyright (c) 2023 Imagination Technologies Ltd. */
3 
4 #include "pvr_cccb.h"
5 #include "pvr_context.h"
6 #include "pvr_device.h"
7 #include "pvr_drv.h"
8 #include "pvr_gem.h"
9 #include "pvr_job.h"
10 #include "pvr_power.h"
11 #include "pvr_rogue_fwif.h"
12 #include "pvr_rogue_fwif_common.h"
13 #include "pvr_rogue_fwif_resetframework.h"
14 #include "pvr_stream.h"
15 #include "pvr_stream_defs.h"
16 #include "pvr_vm.h"
17 
18 #include <drm/drm_auth.h>
19 #include <drm/drm_managed.h>
20 #include <linux/errno.h>
21 #include <linux/kernel.h>
22 #include <linux/sched.h>
23 #include <linux/slab.h>
24 #include <linux/string.h>
25 #include <linux/types.h>
26 #include <linux/xarray.h>
27 
28 static int
29 remap_priority(struct pvr_file *pvr_file, s32 uapi_priority,
30 	       enum pvr_context_priority *priority_out)
31 {
32 	switch (uapi_priority) {
33 	case DRM_PVR_CTX_PRIORITY_LOW:
34 		*priority_out = PVR_CTX_PRIORITY_LOW;
35 		break;
36 	case DRM_PVR_CTX_PRIORITY_NORMAL:
37 		*priority_out = PVR_CTX_PRIORITY_MEDIUM;
38 		break;
39 	case DRM_PVR_CTX_PRIORITY_HIGH:
40 		if (!capable(CAP_SYS_NICE) && !drm_is_current_master(from_pvr_file(pvr_file)))
41 			return -EACCES;
42 		*priority_out = PVR_CTX_PRIORITY_HIGH;
43 		break;
44 	default:
45 		return -EINVAL;
46 	}
47 
48 	return 0;
49 }
50 
51 static int get_fw_obj_size(enum drm_pvr_ctx_type type)
52 {
53 	switch (type) {
54 	case DRM_PVR_CTX_TYPE_RENDER:
55 		return sizeof(struct rogue_fwif_fwrendercontext);
56 	case DRM_PVR_CTX_TYPE_COMPUTE:
57 		return sizeof(struct rogue_fwif_fwcomputecontext);
58 	case DRM_PVR_CTX_TYPE_TRANSFER_FRAG:
59 		return sizeof(struct rogue_fwif_fwtransfercontext);
60 	}
61 
62 	return -EINVAL;
63 }
64 
65 static int
66 process_static_context_state(struct pvr_device *pvr_dev, const struct pvr_stream_cmd_defs *cmd_defs,
67 			     u64 stream_user_ptr, u32 stream_size, void *dest)
68 {
69 	void *stream;
70 	int err;
71 
72 	stream = memdup_user(u64_to_user_ptr(stream_user_ptr), stream_size);
73 	if (IS_ERR(stream))
74 		return PTR_ERR(stream);
75 
76 	err = pvr_stream_process(pvr_dev, cmd_defs, stream, stream_size, dest);
77 
78 	kfree(stream);
79 
80 	return err;
81 }
82 
83 static int init_render_fw_objs(struct pvr_context *ctx,
84 			       struct drm_pvr_ioctl_create_context_args *args,
85 			       void *fw_ctx_map)
86 {
87 	struct rogue_fwif_static_rendercontext_state *static_rendercontext_state;
88 	struct rogue_fwif_fwrendercontext *fw_render_context = fw_ctx_map;
89 
90 	if (!args->static_context_state_len)
91 		return -EINVAL;
92 
93 	static_rendercontext_state = &fw_render_context->static_render_context_state;
94 
95 	/* Copy static render context state from userspace. */
96 	return process_static_context_state(ctx->pvr_dev,
97 					    &pvr_static_render_context_state_stream,
98 					    args->static_context_state,
99 					    args->static_context_state_len,
100 					    &static_rendercontext_state->ctxswitch_regs[0]);
101 }
102 
103 static int init_compute_fw_objs(struct pvr_context *ctx,
104 				struct drm_pvr_ioctl_create_context_args *args,
105 				void *fw_ctx_map)
106 {
107 	struct rogue_fwif_fwcomputecontext *fw_compute_context = fw_ctx_map;
108 	struct rogue_fwif_cdm_registers_cswitch *ctxswitch_regs;
109 
110 	if (!args->static_context_state_len)
111 		return -EINVAL;
112 
113 	ctxswitch_regs = &fw_compute_context->static_compute_context_state.ctxswitch_regs;
114 
115 	/* Copy static render context state from userspace. */
116 	return process_static_context_state(ctx->pvr_dev,
117 					    &pvr_static_compute_context_state_stream,
118 					    args->static_context_state,
119 					    args->static_context_state_len,
120 					    ctxswitch_regs);
121 }
122 
123 static int init_transfer_fw_objs(struct pvr_context *ctx,
124 				 struct drm_pvr_ioctl_create_context_args *args,
125 				 void *fw_ctx_map)
126 {
127 	if (args->static_context_state_len)
128 		return -EINVAL;
129 
130 	return 0;
131 }
132 
133 static int init_fw_objs(struct pvr_context *ctx,
134 			struct drm_pvr_ioctl_create_context_args *args,
135 			void *fw_ctx_map)
136 {
137 	switch (ctx->type) {
138 	case DRM_PVR_CTX_TYPE_RENDER:
139 		return init_render_fw_objs(ctx, args, fw_ctx_map);
140 	case DRM_PVR_CTX_TYPE_COMPUTE:
141 		return init_compute_fw_objs(ctx, args, fw_ctx_map);
142 	case DRM_PVR_CTX_TYPE_TRANSFER_FRAG:
143 		return init_transfer_fw_objs(ctx, args, fw_ctx_map);
144 	}
145 
146 	return -EINVAL;
147 }
148 
149 static void
150 ctx_fw_data_init(void *cpu_ptr, void *priv)
151 {
152 	struct pvr_context *ctx = priv;
153 
154 	memcpy(cpu_ptr, ctx->data, ctx->data_size);
155 }
156 
157 /**
158  * pvr_context_destroy_queues() - Destroy all queues attached to a context.
159  * @ctx: Context to destroy queues on.
160  *
161  * Should be called when the last reference to a context object is dropped.
162  * It releases all resources attached to the queues bound to this context.
163  */
164 static void pvr_context_destroy_queues(struct pvr_context *ctx)
165 {
166 	switch (ctx->type) {
167 	case DRM_PVR_CTX_TYPE_RENDER:
168 		pvr_queue_destroy(ctx->queues.fragment);
169 		pvr_queue_destroy(ctx->queues.geometry);
170 		break;
171 	case DRM_PVR_CTX_TYPE_COMPUTE:
172 		pvr_queue_destroy(ctx->queues.compute);
173 		break;
174 	case DRM_PVR_CTX_TYPE_TRANSFER_FRAG:
175 		pvr_queue_destroy(ctx->queues.transfer);
176 		break;
177 	}
178 }
179 
180 /**
181  * pvr_context_create_queues() - Create all queues attached to a context.
182  * @ctx: Context to create queues on.
183  * @args: Context creation arguments passed by userspace.
184  * @fw_ctx_map: CPU mapping of the FW context object.
185  *
186  * Return:
187  *  * 0 on success, or
188  *  * A negative error code otherwise.
189  */
190 static int pvr_context_create_queues(struct pvr_context *ctx,
191 				     struct drm_pvr_ioctl_create_context_args *args,
192 				     void *fw_ctx_map)
193 {
194 	int err;
195 
196 	switch (ctx->type) {
197 	case DRM_PVR_CTX_TYPE_RENDER:
198 		ctx->queues.geometry = pvr_queue_create(ctx, DRM_PVR_JOB_TYPE_GEOMETRY,
199 							args, fw_ctx_map);
200 		if (IS_ERR(ctx->queues.geometry)) {
201 			err = PTR_ERR(ctx->queues.geometry);
202 			ctx->queues.geometry = NULL;
203 			goto err_destroy_queues;
204 		}
205 
206 		ctx->queues.fragment = pvr_queue_create(ctx, DRM_PVR_JOB_TYPE_FRAGMENT,
207 							args, fw_ctx_map);
208 		if (IS_ERR(ctx->queues.fragment)) {
209 			err = PTR_ERR(ctx->queues.fragment);
210 			ctx->queues.fragment = NULL;
211 			goto err_destroy_queues;
212 		}
213 		return 0;
214 
215 	case DRM_PVR_CTX_TYPE_COMPUTE:
216 		ctx->queues.compute = pvr_queue_create(ctx, DRM_PVR_JOB_TYPE_COMPUTE,
217 						       args, fw_ctx_map);
218 		if (IS_ERR(ctx->queues.compute)) {
219 			err = PTR_ERR(ctx->queues.compute);
220 			ctx->queues.compute = NULL;
221 			goto err_destroy_queues;
222 		}
223 		return 0;
224 
225 	case DRM_PVR_CTX_TYPE_TRANSFER_FRAG:
226 		ctx->queues.transfer = pvr_queue_create(ctx, DRM_PVR_JOB_TYPE_TRANSFER_FRAG,
227 							args, fw_ctx_map);
228 		if (IS_ERR(ctx->queues.transfer)) {
229 			err = PTR_ERR(ctx->queues.transfer);
230 			ctx->queues.transfer = NULL;
231 			goto err_destroy_queues;
232 		}
233 		return 0;
234 	}
235 
236 	return -EINVAL;
237 
238 err_destroy_queues:
239 	pvr_context_destroy_queues(ctx);
240 	return err;
241 }
242 
243 /**
244  * pvr_context_kill_queues() - Kill queues attached to context.
245  * @ctx: Context to kill queues on.
246  *
247  * Killing the queues implies making them unusable for future jobs, while still
248  * letting the currently submitted jobs a chance to finish. Queue resources will
249  * stay around until pvr_context_destroy_queues() is called.
250  */
251 static void pvr_context_kill_queues(struct pvr_context *ctx)
252 {
253 	switch (ctx->type) {
254 	case DRM_PVR_CTX_TYPE_RENDER:
255 		pvr_queue_kill(ctx->queues.fragment);
256 		pvr_queue_kill(ctx->queues.geometry);
257 		break;
258 	case DRM_PVR_CTX_TYPE_COMPUTE:
259 		pvr_queue_kill(ctx->queues.compute);
260 		break;
261 	case DRM_PVR_CTX_TYPE_TRANSFER_FRAG:
262 		pvr_queue_kill(ctx->queues.transfer);
263 		break;
264 	}
265 }
266 
267 /**
268  * pvr_context_create() - Create a context.
269  * @pvr_file: File to attach the created context to.
270  * @args: Context creation arguments.
271  *
272  * Return:
273  *  * 0 on success, or
274  *  * A negative error code on failure.
275  */
276 int pvr_context_create(struct pvr_file *pvr_file, struct drm_pvr_ioctl_create_context_args *args)
277 {
278 	struct pvr_device *pvr_dev = pvr_file->pvr_dev;
279 	struct pvr_context *ctx;
280 	int ctx_size;
281 	int err;
282 
283 	/* Context creation flags are currently unused and must be zero. */
284 	if (args->flags)
285 		return -EINVAL;
286 
287 	ctx_size = get_fw_obj_size(args->type);
288 	if (ctx_size < 0)
289 		return ctx_size;
290 
291 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
292 	if (!ctx)
293 		return -ENOMEM;
294 
295 	ctx->data_size = ctx_size;
296 	ctx->type = args->type;
297 	ctx->flags = args->flags;
298 	ctx->pvr_dev = pvr_dev;
299 	kref_init(&ctx->ref_count);
300 
301 	err = remap_priority(pvr_file, args->priority, &ctx->priority);
302 	if (err)
303 		goto err_free_ctx;
304 
305 	ctx->vm_ctx = pvr_vm_context_lookup(pvr_file, args->vm_context_handle);
306 	if (IS_ERR(ctx->vm_ctx)) {
307 		err = PTR_ERR(ctx->vm_ctx);
308 		goto err_free_ctx;
309 	}
310 
311 	ctx->data = kzalloc(ctx_size, GFP_KERNEL);
312 	if (!ctx->data) {
313 		err = -ENOMEM;
314 		goto err_put_vm;
315 	}
316 
317 	err = pvr_context_create_queues(ctx, args, ctx->data);
318 	if (err)
319 		goto err_free_ctx_data;
320 
321 	err = init_fw_objs(ctx, args, ctx->data);
322 	if (err)
323 		goto err_destroy_queues;
324 
325 	err = pvr_fw_object_create(pvr_dev, ctx_size, PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
326 				   ctx_fw_data_init, ctx, &ctx->fw_obj);
327 	if (err)
328 		goto err_free_ctx_data;
329 
330 	err = xa_alloc(&pvr_dev->ctx_ids, &ctx->ctx_id, ctx, xa_limit_32b, GFP_KERNEL);
331 	if (err)
332 		goto err_destroy_fw_obj;
333 
334 	err = xa_alloc(&pvr_file->ctx_handles, &args->handle, ctx, xa_limit_32b, GFP_KERNEL);
335 	if (err) {
336 		/*
337 		 * It's possible that another thread could have taken a reference on the context at
338 		 * this point as it is in the ctx_ids xarray. Therefore instead of directly
339 		 * destroying the context, drop a reference instead.
340 		 */
341 		pvr_context_put(ctx);
342 		return err;
343 	}
344 
345 	return 0;
346 
347 err_destroy_fw_obj:
348 	pvr_fw_object_destroy(ctx->fw_obj);
349 
350 err_destroy_queues:
351 	pvr_context_destroy_queues(ctx);
352 
353 err_free_ctx_data:
354 	kfree(ctx->data);
355 
356 err_put_vm:
357 	pvr_vm_context_put(ctx->vm_ctx);
358 
359 err_free_ctx:
360 	kfree(ctx);
361 	return err;
362 }
363 
364 static void
365 pvr_context_release(struct kref *ref_count)
366 {
367 	struct pvr_context *ctx =
368 		container_of(ref_count, struct pvr_context, ref_count);
369 	struct pvr_device *pvr_dev = ctx->pvr_dev;
370 
371 	xa_erase(&pvr_dev->ctx_ids, ctx->ctx_id);
372 	pvr_context_destroy_queues(ctx);
373 	pvr_fw_object_destroy(ctx->fw_obj);
374 	kfree(ctx->data);
375 	pvr_vm_context_put(ctx->vm_ctx);
376 	kfree(ctx);
377 }
378 
379 /**
380  * pvr_context_put() - Release reference on context
381  * @ctx: Target context.
382  */
383 void
384 pvr_context_put(struct pvr_context *ctx)
385 {
386 	if (ctx)
387 		kref_put(&ctx->ref_count, pvr_context_release);
388 }
389 
390 /**
391  * pvr_context_destroy() - Destroy context
392  * @pvr_file: Pointer to pvr_file structure.
393  * @handle: Userspace context handle.
394  *
395  * Removes context from context list and drops initial reference. Context will
396  * then be destroyed once all outstanding references are dropped.
397  *
398  * Return:
399  *  * 0 on success, or
400  *  * -%EINVAL if context not in context list.
401  */
402 int
403 pvr_context_destroy(struct pvr_file *pvr_file, u32 handle)
404 {
405 	struct pvr_context *ctx = xa_erase(&pvr_file->ctx_handles, handle);
406 
407 	if (!ctx)
408 		return -EINVAL;
409 
410 	/* Make sure nothing can be queued to the queues after that point. */
411 	pvr_context_kill_queues(ctx);
412 
413 	/* Release the reference held by the handle set. */
414 	pvr_context_put(ctx);
415 
416 	return 0;
417 }
418 
419 /**
420  * pvr_destroy_contexts_for_file: Destroy any contexts associated with the given file
421  * @pvr_file: Pointer to pvr_file structure.
422  *
423  * Removes all contexts associated with @pvr_file from the device context list and drops initial
424  * references. Contexts will then be destroyed once all outstanding references are dropped.
425  */
426 void pvr_destroy_contexts_for_file(struct pvr_file *pvr_file)
427 {
428 	struct pvr_context *ctx;
429 	unsigned long handle;
430 
431 	xa_for_each(&pvr_file->ctx_handles, handle, ctx)
432 		pvr_context_destroy(pvr_file, handle);
433 }
434 
435 /**
436  * pvr_context_device_init() - Device level initialization for queue related resources.
437  * @pvr_dev: The device to initialize.
438  */
439 void pvr_context_device_init(struct pvr_device *pvr_dev)
440 {
441 	xa_init_flags(&pvr_dev->ctx_ids, XA_FLAGS_ALLOC1);
442 }
443 
444 /**
445  * pvr_context_device_fini() - Device level cleanup for queue related resources.
446  * @pvr_dev: The device to cleanup.
447  */
448 void pvr_context_device_fini(struct pvr_device *pvr_dev)
449 {
450 	WARN_ON(!xa_empty(&pvr_dev->ctx_ids));
451 	xa_destroy(&pvr_dev->ctx_ids);
452 }
453