xref: /linux/drivers/gpu/drm/msm/msm_submitqueue.c (revision 07fdad3a93756b872da7b53647715c48d0f4a2d0)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2017 The Linux Foundation. All rights reserved.
3  */
4 
5 #include <linux/kref.h>
6 #include <linux/uaccess.h>
7 
8 #include "msm_gpu.h"
9 
10 int msm_context_set_sysprof(struct msm_context *ctx, struct msm_gpu *gpu, int sysprof)
11 {
12 	/*
13 	 * Since pm_runtime and sysprof_active are both refcounts, we
14 	 * call apply the new value first, and then unwind the previous
15 	 * value
16 	 */
17 
18 	switch (sysprof) {
19 	default:
20 		return UERR(EINVAL, gpu->dev, "Invalid sysprof: %d", sysprof);
21 	case 2:
22 		pm_runtime_get_sync(&gpu->pdev->dev);
23 		fallthrough;
24 	case 1:
25 		refcount_inc(&gpu->sysprof_active);
26 		fallthrough;
27 	case 0:
28 		break;
29 	}
30 
31 	/* unwind old value: */
32 	switch (ctx->sysprof) {
33 	case 2:
34 		pm_runtime_put_autosuspend(&gpu->pdev->dev);
35 		fallthrough;
36 	case 1:
37 		refcount_dec(&gpu->sysprof_active);
38 		fallthrough;
39 	case 0:
40 		break;
41 	}
42 
43 	/* Some gpu families require additional setup for sysprof */
44 	if (gpu->funcs->sysprof_setup)
45 		gpu->funcs->sysprof_setup(gpu);
46 
47 	ctx->sysprof = sysprof;
48 
49 	return 0;
50 }
51 
52 void __msm_context_destroy(struct kref *kref)
53 {
54 	struct msm_context *ctx = container_of(kref,
55 		struct msm_context, ref);
56 	int i;
57 
58 	for (i = 0; i < ARRAY_SIZE(ctx->entities); i++) {
59 		if (!ctx->entities[i])
60 			continue;
61 
62 		drm_sched_entity_destroy(ctx->entities[i]);
63 		kfree(ctx->entities[i]);
64 	}
65 
66 	drm_gpuvm_put(ctx->vm);
67 	kfree(ctx->comm);
68 	kfree(ctx->cmdline);
69 	kfree(ctx);
70 }
71 
72 void msm_submitqueue_destroy(struct kref *kref)
73 {
74 	struct msm_gpu_submitqueue *queue = container_of(kref,
75 		struct msm_gpu_submitqueue, ref);
76 
77 	idr_destroy(&queue->fence_idr);
78 
79 	if (queue->entity == &queue->_vm_bind_entity[0])
80 		drm_sched_entity_destroy(queue->entity);
81 
82 	msm_context_put(queue->ctx);
83 
84 	kfree(queue);
85 }
86 
87 struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_context *ctx,
88 		u32 id)
89 {
90 	struct msm_gpu_submitqueue *entry;
91 
92 	if (!ctx)
93 		return NULL;
94 
95 	read_lock(&ctx->queuelock);
96 
97 	list_for_each_entry(entry, &ctx->submitqueues, node) {
98 		if (entry->id == id) {
99 			kref_get(&entry->ref);
100 			read_unlock(&ctx->queuelock);
101 
102 			return entry;
103 		}
104 	}
105 
106 	read_unlock(&ctx->queuelock);
107 	return NULL;
108 }
109 
110 void msm_submitqueue_close(struct msm_context *ctx)
111 {
112 	struct msm_gpu_submitqueue *queue, *tmp;
113 
114 	if (!ctx)
115 		return;
116 
117 	/*
118 	 * No lock needed in close and there won't
119 	 * be any more user ioctls coming our way
120 	 */
121 	list_for_each_entry_safe(queue, tmp, &ctx->submitqueues, node) {
122 		if (queue->entity == &queue->_vm_bind_entity[0])
123 			drm_sched_entity_flush(queue->entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
124 		list_del(&queue->node);
125 		msm_submitqueue_put(queue);
126 	}
127 
128 	if (!ctx->vm)
129 		return;
130 
131 	msm_gem_vm_close(ctx->vm);
132 }
133 
134 static struct drm_sched_entity *
135 get_sched_entity(struct msm_context *ctx, struct msm_ringbuffer *ring,
136 		 unsigned ring_nr, enum drm_sched_priority sched_prio)
137 {
138 	static DEFINE_MUTEX(entity_lock);
139 	unsigned idx = (ring_nr * NR_SCHED_PRIORITIES) + sched_prio;
140 
141 	/* We should have already validated that the requested priority is
142 	 * valid by the time we get here.
143 	 */
144 	if (WARN_ON(idx >= ARRAY_SIZE(ctx->entities)))
145 		return ERR_PTR(-EINVAL);
146 
147 	mutex_lock(&entity_lock);
148 
149 	if (!ctx->entities[idx]) {
150 		struct drm_sched_entity *entity;
151 		struct drm_gpu_scheduler *sched = &ring->sched;
152 		int ret;
153 
154 		entity = kzalloc(sizeof(*ctx->entities[idx]), GFP_KERNEL);
155 
156 		ret = drm_sched_entity_init(entity, sched_prio, &sched, 1, NULL);
157 		if (ret) {
158 			mutex_unlock(&entity_lock);
159 			kfree(entity);
160 			return ERR_PTR(ret);
161 		}
162 
163 		ctx->entities[idx] = entity;
164 	}
165 
166 	mutex_unlock(&entity_lock);
167 
168 	return ctx->entities[idx];
169 }
170 
171 int msm_submitqueue_create(struct drm_device *drm, struct msm_context *ctx,
172 		u32 prio, u32 flags, u32 *id)
173 {
174 	struct msm_drm_private *priv = drm->dev_private;
175 	struct msm_gpu_submitqueue *queue;
176 	enum drm_sched_priority sched_prio;
177 	unsigned ring_nr;
178 	int ret;
179 
180 	if (!ctx)
181 		return -ENODEV;
182 
183 	if (!priv->gpu)
184 		return -ENODEV;
185 
186 	if (flags & MSM_SUBMITQUEUE_VM_BIND) {
187 		unsigned sz;
188 
189 		/* Not allowed for kernel managed VMs (ie. kernel allocs VA) */
190 		if (!msm_context_is_vmbind(ctx))
191 			return -EINVAL;
192 
193 		if (prio)
194 			return -EINVAL;
195 
196 		sz = struct_size(queue, _vm_bind_entity, 1);
197 		queue = kzalloc(sz, GFP_KERNEL);
198 	} else {
199 		extern int enable_preemption;
200 		bool preemption_supported =
201 			priv->gpu->nr_rings == 1 && enable_preemption != 0;
202 
203 		if (flags & MSM_SUBMITQUEUE_ALLOW_PREEMPT && preemption_supported)
204 			return -EINVAL;
205 
206 		ret = msm_gpu_convert_priority(priv->gpu, prio, &ring_nr, &sched_prio);
207 		if (ret)
208 			return ret;
209 
210 		queue = kzalloc(sizeof(*queue), GFP_KERNEL);
211 	}
212 
213 	if (!queue)
214 		return -ENOMEM;
215 
216 	kref_init(&queue->ref);
217 	queue->flags = flags;
218 
219 	if (flags & MSM_SUBMITQUEUE_VM_BIND) {
220 		struct drm_gpu_scheduler *sched = &to_msm_vm(msm_context_vm(drm, ctx))->sched;
221 
222 		queue->entity = &queue->_vm_bind_entity[0];
223 
224 		drm_sched_entity_init(queue->entity, DRM_SCHED_PRIORITY_KERNEL,
225 				      &sched, 1, NULL);
226 	} else {
227 		queue->ring_nr = ring_nr;
228 
229 		queue->entity = get_sched_entity(ctx, priv->gpu->rb[ring_nr],
230 						 ring_nr, sched_prio);
231 	}
232 
233 	if (IS_ERR(queue->entity)) {
234 		ret = PTR_ERR(queue->entity);
235 		kfree(queue);
236 		return ret;
237 	}
238 
239 	write_lock(&ctx->queuelock);
240 
241 	queue->ctx = msm_context_get(ctx);
242 	queue->id = ctx->queueid++;
243 
244 	if (id)
245 		*id = queue->id;
246 
247 	idr_init(&queue->fence_idr);
248 	spin_lock_init(&queue->idr_lock);
249 	mutex_init(&queue->lock);
250 
251 	list_add_tail(&queue->node, &ctx->submitqueues);
252 
253 	write_unlock(&ctx->queuelock);
254 
255 	return 0;
256 }
257 
258 /*
259  * Create the default submit-queue (id==0), used for backwards compatibility
260  * for userspace that pre-dates the introduction of submitqueues.
261  */
262 int msm_submitqueue_init(struct drm_device *drm, struct msm_context *ctx)
263 {
264 	struct msm_drm_private *priv = drm->dev_private;
265 	int default_prio, max_priority;
266 
267 	if (!priv->gpu)
268 		return -ENODEV;
269 
270 	max_priority = (priv->gpu->nr_rings * NR_SCHED_PRIORITIES) - 1;
271 
272 	/*
273 	 * Pick a medium priority level as default.  Lower numeric value is
274 	 * higher priority, so round-up to pick a priority that is not higher
275 	 * than the middle priority level.
276 	 */
277 	default_prio = DIV_ROUND_UP(max_priority, 2);
278 
279 	return msm_submitqueue_create(drm, ctx, default_prio, 0, NULL);
280 }
281 
282 static int msm_submitqueue_query_faults(struct msm_gpu_submitqueue *queue,
283 		struct drm_msm_submitqueue_query *args)
284 {
285 	size_t size = min_t(size_t, args->len, sizeof(queue->faults));
286 	int ret;
287 
288 	/* If a zero length was passed in, return the data size we expect */
289 	if (!args->len) {
290 		args->len = sizeof(queue->faults);
291 		return 0;
292 	}
293 
294 	/* Set the length to the actual size of the data */
295 	args->len = size;
296 
297 	ret = copy_to_user(u64_to_user_ptr(args->data), &queue->faults, size);
298 
299 	return ret ? -EFAULT : 0;
300 }
301 
302 int msm_submitqueue_query(struct drm_device *drm, struct msm_context *ctx,
303 		struct drm_msm_submitqueue_query *args)
304 {
305 	struct msm_gpu_submitqueue *queue;
306 	int ret = -EINVAL;
307 
308 	if (args->pad)
309 		return -EINVAL;
310 
311 	queue = msm_submitqueue_get(ctx, args->id);
312 	if (!queue)
313 		return -ENOENT;
314 
315 	if (args->param == MSM_SUBMITQUEUE_PARAM_FAULTS)
316 		ret = msm_submitqueue_query_faults(queue, args);
317 
318 	msm_submitqueue_put(queue);
319 
320 	return ret;
321 }
322 
323 int msm_submitqueue_remove(struct msm_context *ctx, u32 id)
324 {
325 	struct msm_gpu_submitqueue *entry;
326 
327 	if (!ctx)
328 		return 0;
329 
330 	/*
331 	 * id 0 is the "default" queue and can't be destroyed
332 	 * by the user
333 	 */
334 	if (!id)
335 		return -ENOENT;
336 
337 	write_lock(&ctx->queuelock);
338 
339 	list_for_each_entry(entry, &ctx->submitqueues, node) {
340 		if (entry->id == id) {
341 			list_del(&entry->node);
342 			write_unlock(&ctx->queuelock);
343 
344 			msm_submitqueue_put(entry);
345 			return 0;
346 		}
347 	}
348 
349 	write_unlock(&ctx->queuelock);
350 	return -ENOENT;
351 }
352 
353