xref: /linux/drivers/gpu/drm/msm/msm_gem_submit.c (revision 791d3ef2e11100449837dc0b6fe884e60ca3a484)
1 /*
2  * Copyright (C) 2013 Red Hat
3  * Author: Rob Clark <robdclark@gmail.com>
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published by
7  * the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program.  If not, see <http://www.gnu.org/licenses/>.
16  */
17 
18 #include <linux/sync_file.h>
19 
20 #include "msm_drv.h"
21 #include "msm_gpu.h"
22 #include "msm_gem.h"
23 
24 /*
25  * Cmdstream submission:
26  */
27 
28 /* make sure these don't conflict w/ MSM_SUBMIT_BO_x */
29 #define BO_VALID    0x8000   /* is current addr in cmdstream correct/valid? */
30 #define BO_LOCKED   0x4000
31 #define BO_PINNED   0x2000
32 
33 static struct msm_gem_submit *submit_create(struct drm_device *dev,
34 		struct msm_gpu *gpu, struct msm_gpu_submitqueue *queue,
35 		uint32_t nr_bos, uint32_t nr_cmds)
36 {
37 	struct msm_gem_submit *submit;
38 	uint64_t sz = sizeof(*submit) + ((u64)nr_bos * sizeof(submit->bos[0])) +
39 		((u64)nr_cmds * sizeof(submit->cmd[0]));
40 
41 	if (sz > SIZE_MAX)
42 		return NULL;
43 
44 	submit = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
45 	if (!submit)
46 		return NULL;
47 
48 	submit->dev = dev;
49 	submit->gpu = gpu;
50 	submit->fence = NULL;
51 	submit->pid = get_pid(task_pid(current));
52 	submit->cmd = (void *)&submit->bos[nr_bos];
53 	submit->queue = queue;
54 	submit->ring = gpu->rb[queue->prio];
55 
56 	/* initially, until copy_from_user() and bo lookup succeeds: */
57 	submit->nr_bos = 0;
58 	submit->nr_cmds = 0;
59 
60 	INIT_LIST_HEAD(&submit->node);
61 	INIT_LIST_HEAD(&submit->bo_list);
62 	ww_acquire_init(&submit->ticket, &reservation_ww_class);
63 
64 	return submit;
65 }
66 
67 void msm_gem_submit_free(struct msm_gem_submit *submit)
68 {
69 	dma_fence_put(submit->fence);
70 	list_del(&submit->node);
71 	put_pid(submit->pid);
72 	msm_submitqueue_put(submit->queue);
73 
74 	kfree(submit);
75 }
76 
77 static inline unsigned long __must_check
78 copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
79 {
80 	if (access_ok(VERIFY_READ, from, n))
81 		return __copy_from_user_inatomic(to, from, n);
82 	return -EFAULT;
83 }
84 
85 static int submit_lookup_objects(struct msm_gem_submit *submit,
86 		struct drm_msm_gem_submit *args, struct drm_file *file)
87 {
88 	unsigned i;
89 	int ret = 0;
90 
91 	spin_lock(&file->table_lock);
92 	pagefault_disable();
93 
94 	for (i = 0; i < args->nr_bos; i++) {
95 		struct drm_msm_gem_submit_bo submit_bo;
96 		struct drm_gem_object *obj;
97 		struct msm_gem_object *msm_obj;
98 		void __user *userptr =
99 			u64_to_user_ptr(args->bos + (i * sizeof(submit_bo)));
100 
101 		/* make sure we don't have garbage flags, in case we hit
102 		 * error path before flags is initialized:
103 		 */
104 		submit->bos[i].flags = 0;
105 
106 		if (copy_from_user_inatomic(&submit_bo, userptr, sizeof(submit_bo))) {
107 			pagefault_enable();
108 			spin_unlock(&file->table_lock);
109 			if (copy_from_user(&submit_bo, userptr, sizeof(submit_bo))) {
110 				ret = -EFAULT;
111 				goto out;
112 			}
113 			spin_lock(&file->table_lock);
114 			pagefault_disable();
115 		}
116 
117 		if ((submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) ||
118 			!(submit_bo.flags & MSM_SUBMIT_BO_FLAGS)) {
119 			DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
120 			ret = -EINVAL;
121 			goto out_unlock;
122 		}
123 
124 		submit->bos[i].flags = submit_bo.flags;
125 		/* in validate_objects() we figure out if this is true: */
126 		submit->bos[i].iova  = submit_bo.presumed;
127 
128 		/* normally use drm_gem_object_lookup(), but for bulk lookup
129 		 * all under single table_lock just hit object_idr directly:
130 		 */
131 		obj = idr_find(&file->object_idr, submit_bo.handle);
132 		if (!obj) {
133 			DRM_ERROR("invalid handle %u at index %u\n", submit_bo.handle, i);
134 			ret = -EINVAL;
135 			goto out_unlock;
136 		}
137 
138 		msm_obj = to_msm_bo(obj);
139 
140 		if (!list_empty(&msm_obj->submit_entry)) {
141 			DRM_ERROR("handle %u at index %u already on submit list\n",
142 					submit_bo.handle, i);
143 			ret = -EINVAL;
144 			goto out_unlock;
145 		}
146 
147 		drm_gem_object_reference(obj);
148 
149 		submit->bos[i].obj = msm_obj;
150 
151 		list_add_tail(&msm_obj->submit_entry, &submit->bo_list);
152 	}
153 
154 out_unlock:
155 	pagefault_enable();
156 	spin_unlock(&file->table_lock);
157 
158 out:
159 	submit->nr_bos = i;
160 
161 	return ret;
162 }
163 
164 static void submit_unlock_unpin_bo(struct msm_gem_submit *submit,
165 		int i, bool backoff)
166 {
167 	struct msm_gem_object *msm_obj = submit->bos[i].obj;
168 
169 	if (submit->bos[i].flags & BO_PINNED)
170 		msm_gem_put_iova(&msm_obj->base, submit->gpu->aspace);
171 
172 	if (submit->bos[i].flags & BO_LOCKED)
173 		ww_mutex_unlock(&msm_obj->resv->lock);
174 
175 	if (backoff && !(submit->bos[i].flags & BO_VALID))
176 		submit->bos[i].iova = 0;
177 
178 	submit->bos[i].flags &= ~(BO_LOCKED | BO_PINNED);
179 }
180 
181 /* This is where we make sure all the bo's are reserved and pin'd: */
182 static int submit_lock_objects(struct msm_gem_submit *submit)
183 {
184 	int contended, slow_locked = -1, i, ret = 0;
185 
186 retry:
187 	for (i = 0; i < submit->nr_bos; i++) {
188 		struct msm_gem_object *msm_obj = submit->bos[i].obj;
189 
190 		if (slow_locked == i)
191 			slow_locked = -1;
192 
193 		contended = i;
194 
195 		if (!(submit->bos[i].flags & BO_LOCKED)) {
196 			ret = ww_mutex_lock_interruptible(&msm_obj->resv->lock,
197 					&submit->ticket);
198 			if (ret)
199 				goto fail;
200 			submit->bos[i].flags |= BO_LOCKED;
201 		}
202 	}
203 
204 	ww_acquire_done(&submit->ticket);
205 
206 	return 0;
207 
208 fail:
209 	for (; i >= 0; i--)
210 		submit_unlock_unpin_bo(submit, i, true);
211 
212 	if (slow_locked > 0)
213 		submit_unlock_unpin_bo(submit, slow_locked, true);
214 
215 	if (ret == -EDEADLK) {
216 		struct msm_gem_object *msm_obj = submit->bos[contended].obj;
217 		/* we lost out in a seqno race, lock and retry.. */
218 		ret = ww_mutex_lock_slow_interruptible(&msm_obj->resv->lock,
219 				&submit->ticket);
220 		if (!ret) {
221 			submit->bos[contended].flags |= BO_LOCKED;
222 			slow_locked = contended;
223 			goto retry;
224 		}
225 	}
226 
227 	return ret;
228 }
229 
230 static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
231 {
232 	int i, ret = 0;
233 
234 	for (i = 0; i < submit->nr_bos; i++) {
235 		struct msm_gem_object *msm_obj = submit->bos[i].obj;
236 		bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE;
237 
238 		if (!write) {
239 			/* NOTE: _reserve_shared() must happen before
240 			 * _add_shared_fence(), which makes this a slightly
241 			 * strange place to call it.  OTOH this is a
242 			 * convenient can-fail point to hook it in.
243 			 */
244 			ret = reservation_object_reserve_shared(msm_obj->resv);
245 			if (ret)
246 				return ret;
247 		}
248 
249 		if (no_implicit)
250 			continue;
251 
252 		ret = msm_gem_sync_object(&msm_obj->base, submit->ring->fctx,
253 			write);
254 		if (ret)
255 			break;
256 	}
257 
258 	return ret;
259 }
260 
261 static int submit_pin_objects(struct msm_gem_submit *submit)
262 {
263 	int i, ret = 0;
264 
265 	submit->valid = true;
266 
267 	for (i = 0; i < submit->nr_bos; i++) {
268 		struct msm_gem_object *msm_obj = submit->bos[i].obj;
269 		uint64_t iova;
270 
271 		/* if locking succeeded, pin bo: */
272 		ret = msm_gem_get_iova(&msm_obj->base,
273 				submit->gpu->aspace, &iova);
274 
275 		if (ret)
276 			break;
277 
278 		submit->bos[i].flags |= BO_PINNED;
279 
280 		if (iova == submit->bos[i].iova) {
281 			submit->bos[i].flags |= BO_VALID;
282 		} else {
283 			submit->bos[i].iova = iova;
284 			/* iova changed, so address in cmdstream is not valid: */
285 			submit->bos[i].flags &= ~BO_VALID;
286 			submit->valid = false;
287 		}
288 	}
289 
290 	return ret;
291 }
292 
293 static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
294 		struct msm_gem_object **obj, uint64_t *iova, bool *valid)
295 {
296 	if (idx >= submit->nr_bos) {
297 		DRM_ERROR("invalid buffer index: %u (out of %u)\n",
298 				idx, submit->nr_bos);
299 		return -EINVAL;
300 	}
301 
302 	if (obj)
303 		*obj = submit->bos[idx].obj;
304 	if (iova)
305 		*iova = submit->bos[idx].iova;
306 	if (valid)
307 		*valid = !!(submit->bos[idx].flags & BO_VALID);
308 
309 	return 0;
310 }
311 
312 /* process the reloc's and patch up the cmdstream as needed: */
313 static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *obj,
314 		uint32_t offset, uint32_t nr_relocs, uint64_t relocs)
315 {
316 	uint32_t i, last_offset = 0;
317 	uint32_t *ptr;
318 	int ret = 0;
319 
320 	if (offset % 4) {
321 		DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset);
322 		return -EINVAL;
323 	}
324 
325 	/* For now, just map the entire thing.  Eventually we probably
326 	 * to do it page-by-page, w/ kmap() if not vmap()d..
327 	 */
328 	ptr = msm_gem_get_vaddr(&obj->base);
329 
330 	if (IS_ERR(ptr)) {
331 		ret = PTR_ERR(ptr);
332 		DBG("failed to map: %d", ret);
333 		return ret;
334 	}
335 
336 	for (i = 0; i < nr_relocs; i++) {
337 		struct drm_msm_gem_submit_reloc submit_reloc;
338 		void __user *userptr =
339 			u64_to_user_ptr(relocs + (i * sizeof(submit_reloc)));
340 		uint32_t off;
341 		uint64_t iova;
342 		bool valid;
343 
344 		if (copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc))) {
345 			ret = -EFAULT;
346 			goto out;
347 		}
348 
349 		if (submit_reloc.submit_offset % 4) {
350 			DRM_ERROR("non-aligned reloc offset: %u\n",
351 					submit_reloc.submit_offset);
352 			ret = -EINVAL;
353 			goto out;
354 		}
355 
356 		/* offset in dwords: */
357 		off = submit_reloc.submit_offset / 4;
358 
359 		if ((off >= (obj->base.size / 4)) ||
360 				(off < last_offset)) {
361 			DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
362 			ret = -EINVAL;
363 			goto out;
364 		}
365 
366 		ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova, &valid);
367 		if (ret)
368 			goto out;
369 
370 		if (valid)
371 			continue;
372 
373 		iova += submit_reloc.reloc_offset;
374 
375 		if (submit_reloc.shift < 0)
376 			iova >>= -submit_reloc.shift;
377 		else
378 			iova <<= submit_reloc.shift;
379 
380 		ptr[off] = iova | submit_reloc.or;
381 
382 		last_offset = off;
383 	}
384 
385 out:
386 	msm_gem_put_vaddr(&obj->base);
387 
388 	return ret;
389 }
390 
391 static void submit_cleanup(struct msm_gem_submit *submit)
392 {
393 	unsigned i;
394 
395 	for (i = 0; i < submit->nr_bos; i++) {
396 		struct msm_gem_object *msm_obj = submit->bos[i].obj;
397 		submit_unlock_unpin_bo(submit, i, false);
398 		list_del_init(&msm_obj->submit_entry);
399 		drm_gem_object_unreference(&msm_obj->base);
400 	}
401 
402 	ww_acquire_fini(&submit->ticket);
403 }
404 
405 int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
406 		struct drm_file *file)
407 {
408 	struct msm_drm_private *priv = dev->dev_private;
409 	struct drm_msm_gem_submit *args = data;
410 	struct msm_file_private *ctx = file->driver_priv;
411 	struct msm_gem_submit *submit;
412 	struct msm_gpu *gpu = priv->gpu;
413 	struct dma_fence *in_fence = NULL;
414 	struct sync_file *sync_file = NULL;
415 	struct msm_gpu_submitqueue *queue;
416 	struct msm_ringbuffer *ring;
417 	int out_fence_fd = -1;
418 	unsigned i;
419 	int ret;
420 
421 	if (!gpu)
422 		return -ENXIO;
423 
424 	/* for now, we just have 3d pipe.. eventually this would need to
425 	 * be more clever to dispatch to appropriate gpu module:
426 	 */
427 	if (MSM_PIPE_ID(args->flags) != MSM_PIPE_3D0)
428 		return -EINVAL;
429 
430 	if (MSM_PIPE_FLAGS(args->flags) & ~MSM_SUBMIT_FLAGS)
431 		return -EINVAL;
432 
433 	if (args->flags & MSM_SUBMIT_SUDO) {
434 		if (!IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) ||
435 		    !capable(CAP_SYS_RAWIO))
436 			return -EINVAL;
437 	}
438 
439 	queue = msm_submitqueue_get(ctx, args->queueid);
440 	if (!queue)
441 		return -ENOENT;
442 
443 	ring = gpu->rb[queue->prio];
444 
445 	if (args->flags & MSM_SUBMIT_FENCE_FD_IN) {
446 		in_fence = sync_file_get_fence(args->fence_fd);
447 
448 		if (!in_fence)
449 			return -EINVAL;
450 
451 		/*
452 		 * Wait if the fence is from a foreign context, or if the fence
453 		 * array contains any fence from a foreign context.
454 		 */
455 		if (!dma_fence_match_context(in_fence, ring->fctx->context)) {
456 			ret = dma_fence_wait(in_fence, true);
457 			if (ret)
458 				return ret;
459 		}
460 	}
461 
462 	ret = mutex_lock_interruptible(&dev->struct_mutex);
463 	if (ret)
464 		return ret;
465 
466 	if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
467 		out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
468 		if (out_fence_fd < 0) {
469 			ret = out_fence_fd;
470 			goto out_unlock;
471 		}
472 	}
473 
474 	submit = submit_create(dev, gpu, queue, args->nr_bos, args->nr_cmds);
475 	if (!submit) {
476 		ret = -ENOMEM;
477 		goto out_unlock;
478 	}
479 
480 	if (args->flags & MSM_SUBMIT_SUDO)
481 		submit->in_rb = true;
482 
483 	ret = submit_lookup_objects(submit, args, file);
484 	if (ret)
485 		goto out;
486 
487 	ret = submit_lock_objects(submit);
488 	if (ret)
489 		goto out;
490 
491 	ret = submit_fence_sync(submit, !!(args->flags & MSM_SUBMIT_NO_IMPLICIT));
492 	if (ret)
493 		goto out;
494 
495 	ret = submit_pin_objects(submit);
496 	if (ret)
497 		goto out;
498 
499 	for (i = 0; i < args->nr_cmds; i++) {
500 		struct drm_msm_gem_submit_cmd submit_cmd;
501 		void __user *userptr =
502 			u64_to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
503 		struct msm_gem_object *msm_obj;
504 		uint64_t iova;
505 
506 		ret = copy_from_user(&submit_cmd, userptr, sizeof(submit_cmd));
507 		if (ret) {
508 			ret = -EFAULT;
509 			goto out;
510 		}
511 
512 		/* validate input from userspace: */
513 		switch (submit_cmd.type) {
514 		case MSM_SUBMIT_CMD_BUF:
515 		case MSM_SUBMIT_CMD_IB_TARGET_BUF:
516 		case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
517 			break;
518 		default:
519 			DRM_ERROR("invalid type: %08x\n", submit_cmd.type);
520 			ret = -EINVAL;
521 			goto out;
522 		}
523 
524 		ret = submit_bo(submit, submit_cmd.submit_idx,
525 				&msm_obj, &iova, NULL);
526 		if (ret)
527 			goto out;
528 
529 		if (submit_cmd.size % 4) {
530 			DRM_ERROR("non-aligned cmdstream buffer size: %u\n",
531 					submit_cmd.size);
532 			ret = -EINVAL;
533 			goto out;
534 		}
535 
536 		if (!submit_cmd.size ||
537 			((submit_cmd.size + submit_cmd.submit_offset) >
538 				msm_obj->base.size)) {
539 			DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size);
540 			ret = -EINVAL;
541 			goto out;
542 		}
543 
544 		submit->cmd[i].type = submit_cmd.type;
545 		submit->cmd[i].size = submit_cmd.size / 4;
546 		submit->cmd[i].iova = iova + submit_cmd.submit_offset;
547 		submit->cmd[i].idx  = submit_cmd.submit_idx;
548 
549 		if (submit->valid)
550 			continue;
551 
552 		ret = submit_reloc(submit, msm_obj, submit_cmd.submit_offset,
553 				submit_cmd.nr_relocs, submit_cmd.relocs);
554 		if (ret)
555 			goto out;
556 	}
557 
558 	submit->nr_cmds = i;
559 
560 	submit->fence = msm_fence_alloc(ring->fctx);
561 	if (IS_ERR(submit->fence)) {
562 		ret = PTR_ERR(submit->fence);
563 		submit->fence = NULL;
564 		goto out;
565 	}
566 
567 	if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
568 		sync_file = sync_file_create(submit->fence);
569 		if (!sync_file) {
570 			ret = -ENOMEM;
571 			goto out;
572 		}
573 	}
574 
575 	msm_gpu_submit(gpu, submit, ctx);
576 
577 	args->fence = submit->fence->seqno;
578 
579 	if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
580 		fd_install(out_fence_fd, sync_file->file);
581 		args->fence_fd = out_fence_fd;
582 	}
583 
584 out:
585 	if (in_fence)
586 		dma_fence_put(in_fence);
587 	submit_cleanup(submit);
588 	if (ret)
589 		msm_gem_submit_free(submit);
590 out_unlock:
591 	if (ret && (out_fence_fd >= 0))
592 		put_unused_fd(out_fence_fd);
593 	mutex_unlock(&dev->struct_mutex);
594 	return ret;
595 }
596