xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c (revision 0a94608f0f7de9b1135ffea3546afe68eafef57f)
1 /*
2  * Copyright 2008 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22  * DEALINGS IN THE SOFTWARE.
23  *
24  * Authors:
25  *    Jerome Glisse <glisse@freedesktop.org>
26  */
27 
28 #include <linux/file.h>
29 #include <linux/pagemap.h>
30 #include <linux/sync_file.h>
31 #include <linux/dma-buf.h>
32 
33 #include <drm/amdgpu_drm.h>
34 #include <drm/drm_syncobj.h>
35 #include "amdgpu_cs.h"
36 #include "amdgpu.h"
37 #include "amdgpu_trace.h"
38 #include "amdgpu_gmc.h"
39 #include "amdgpu_gem.h"
40 #include "amdgpu_ras.h"
41 
42 static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
43 				      struct drm_amdgpu_cs_chunk_fence *data,
44 				      uint32_t *offset)
45 {
46 	struct drm_gem_object *gobj;
47 	struct amdgpu_bo *bo;
48 	unsigned long size;
49 	int r;
50 
51 	gobj = drm_gem_object_lookup(p->filp, data->handle);
52 	if (gobj == NULL)
53 		return -EINVAL;
54 
55 	bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
56 	p->uf_entry.priority = 0;
57 	p->uf_entry.tv.bo = &bo->tbo;
58 	/* One for TTM and two for the CS job */
59 	p->uf_entry.tv.num_shared = 3;
60 
61 	drm_gem_object_put(gobj);
62 
63 	size = amdgpu_bo_size(bo);
64 	if (size != PAGE_SIZE || (data->offset + 8) > size) {
65 		r = -EINVAL;
66 		goto error_unref;
67 	}
68 
69 	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
70 		r = -EINVAL;
71 		goto error_unref;
72 	}
73 
74 	*offset = data->offset;
75 
76 	return 0;
77 
78 error_unref:
79 	amdgpu_bo_unref(&bo);
80 	return r;
81 }
82 
83 static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p,
84 				      struct drm_amdgpu_bo_list_in *data)
85 {
86 	int r;
87 	struct drm_amdgpu_bo_list_entry *info = NULL;
88 
89 	r = amdgpu_bo_create_list_entry_array(data, &info);
90 	if (r)
91 		return r;
92 
93 	r = amdgpu_bo_list_create(p->adev, p->filp, info, data->bo_number,
94 				  &p->bo_list);
95 	if (r)
96 		goto error_free;
97 
98 	kvfree(info);
99 	return 0;
100 
101 error_free:
102 	kvfree(info);
103 
104 	return r;
105 }
106 
107 static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs *cs)
108 {
109 	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
110 	struct amdgpu_vm *vm = &fpriv->vm;
111 	uint64_t *chunk_array_user;
112 	uint64_t *chunk_array;
113 	unsigned size, num_ibs = 0;
114 	uint32_t uf_offset = 0;
115 	int i;
116 	int ret;
117 
118 	if (cs->in.num_chunks == 0)
119 		return -EINVAL;
120 
121 	chunk_array = kvmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
122 	if (!chunk_array)
123 		return -ENOMEM;
124 
125 	p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
126 	if (!p->ctx) {
127 		ret = -EINVAL;
128 		goto free_chunk;
129 	}
130 
131 	mutex_lock(&p->ctx->lock);
132 
133 	/* skip guilty context job */
134 	if (atomic_read(&p->ctx->guilty) == 1) {
135 		ret = -ECANCELED;
136 		goto free_chunk;
137 	}
138 
139 	/* get chunks */
140 	chunk_array_user = u64_to_user_ptr(cs->in.chunks);
141 	if (copy_from_user(chunk_array, chunk_array_user,
142 			   sizeof(uint64_t)*cs->in.num_chunks)) {
143 		ret = -EFAULT;
144 		goto free_chunk;
145 	}
146 
147 	p->nchunks = cs->in.num_chunks;
148 	p->chunks = kvmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
149 			    GFP_KERNEL);
150 	if (!p->chunks) {
151 		ret = -ENOMEM;
152 		goto free_chunk;
153 	}
154 
155 	for (i = 0; i < p->nchunks; i++) {
156 		struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL;
157 		struct drm_amdgpu_cs_chunk user_chunk;
158 		uint32_t __user *cdata;
159 
160 		chunk_ptr = u64_to_user_ptr(chunk_array[i]);
161 		if (copy_from_user(&user_chunk, chunk_ptr,
162 				       sizeof(struct drm_amdgpu_cs_chunk))) {
163 			ret = -EFAULT;
164 			i--;
165 			goto free_partial_kdata;
166 		}
167 		p->chunks[i].chunk_id = user_chunk.chunk_id;
168 		p->chunks[i].length_dw = user_chunk.length_dw;
169 
170 		size = p->chunks[i].length_dw;
171 		cdata = u64_to_user_ptr(user_chunk.chunk_data);
172 
173 		p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
174 		if (p->chunks[i].kdata == NULL) {
175 			ret = -ENOMEM;
176 			i--;
177 			goto free_partial_kdata;
178 		}
179 		size *= sizeof(uint32_t);
180 		if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
181 			ret = -EFAULT;
182 			goto free_partial_kdata;
183 		}
184 
185 		switch (p->chunks[i].chunk_id) {
186 		case AMDGPU_CHUNK_ID_IB:
187 			++num_ibs;
188 			break;
189 
190 		case AMDGPU_CHUNK_ID_FENCE:
191 			size = sizeof(struct drm_amdgpu_cs_chunk_fence);
192 			if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
193 				ret = -EINVAL;
194 				goto free_partial_kdata;
195 			}
196 
197 			ret = amdgpu_cs_user_fence_chunk(p, p->chunks[i].kdata,
198 							 &uf_offset);
199 			if (ret)
200 				goto free_partial_kdata;
201 
202 			break;
203 
204 		case AMDGPU_CHUNK_ID_BO_HANDLES:
205 			size = sizeof(struct drm_amdgpu_bo_list_in);
206 			if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
207 				ret = -EINVAL;
208 				goto free_partial_kdata;
209 			}
210 
211 			ret = amdgpu_cs_bo_handles_chunk(p, p->chunks[i].kdata);
212 			if (ret)
213 				goto free_partial_kdata;
214 
215 			break;
216 
217 		case AMDGPU_CHUNK_ID_DEPENDENCIES:
218 		case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
219 		case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
220 		case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
221 		case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
222 		case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
223 			break;
224 
225 		default:
226 			ret = -EINVAL;
227 			goto free_partial_kdata;
228 		}
229 	}
230 
231 	ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job, vm);
232 	if (ret)
233 		goto free_all_kdata;
234 
235 	if (p->ctx->vram_lost_counter != p->job->vram_lost_counter) {
236 		ret = -ECANCELED;
237 		goto free_all_kdata;
238 	}
239 
240 	if (p->uf_entry.tv.bo)
241 		p->job->uf_addr = uf_offset;
242 	kvfree(chunk_array);
243 
244 	/* Use this opportunity to fill in task info for the vm */
245 	amdgpu_vm_set_task_info(vm);
246 
247 	return 0;
248 
249 free_all_kdata:
250 	i = p->nchunks - 1;
251 free_partial_kdata:
252 	for (; i >= 0; i--)
253 		kvfree(p->chunks[i].kdata);
254 	kvfree(p->chunks);
255 	p->chunks = NULL;
256 	p->nchunks = 0;
257 free_chunk:
258 	kvfree(chunk_array);
259 
260 	return ret;
261 }
262 
263 /* Convert microseconds to bytes. */
264 static u64 us_to_bytes(struct amdgpu_device *adev, s64 us)
265 {
266 	if (us <= 0 || !adev->mm_stats.log2_max_MBps)
267 		return 0;
268 
269 	/* Since accum_us is incremented by a million per second, just
270 	 * multiply it by the number of MB/s to get the number of bytes.
271 	 */
272 	return us << adev->mm_stats.log2_max_MBps;
273 }
274 
275 static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes)
276 {
277 	if (!adev->mm_stats.log2_max_MBps)
278 		return 0;
279 
280 	return bytes >> adev->mm_stats.log2_max_MBps;
281 }
282 
283 /* Returns how many bytes TTM can move right now. If no bytes can be moved,
284  * it returns 0. If it returns non-zero, it's OK to move at least one buffer,
285  * which means it can go over the threshold once. If that happens, the driver
286  * will be in debt and no other buffer migrations can be done until that debt
287  * is repaid.
288  *
289  * This approach allows moving a buffer of any size (it's important to allow
290  * that).
291  *
292  * The currency is simply time in microseconds and it increases as the clock
293  * ticks. The accumulated microseconds (us) are converted to bytes and
294  * returned.
295  */
296 static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
297 					      u64 *max_bytes,
298 					      u64 *max_vis_bytes)
299 {
300 	s64 time_us, increment_us;
301 	u64 free_vram, total_vram, used_vram;
302 	/* Allow a maximum of 200 accumulated ms. This is basically per-IB
303 	 * throttling.
304 	 *
305 	 * It means that in order to get full max MBps, at least 5 IBs per
306 	 * second must be submitted and not more than 200ms apart from each
307 	 * other.
308 	 */
309 	const s64 us_upper_bound = 200000;
310 
311 	if (!adev->mm_stats.log2_max_MBps) {
312 		*max_bytes = 0;
313 		*max_vis_bytes = 0;
314 		return;
315 	}
316 
317 	total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size);
318 	used_vram = ttm_resource_manager_usage(&adev->mman.vram_mgr.manager);
319 	free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
320 
321 	spin_lock(&adev->mm_stats.lock);
322 
323 	/* Increase the amount of accumulated us. */
324 	time_us = ktime_to_us(ktime_get());
325 	increment_us = time_us - adev->mm_stats.last_update_us;
326 	adev->mm_stats.last_update_us = time_us;
327 	adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us,
328 				      us_upper_bound);
329 
330 	/* This prevents the short period of low performance when the VRAM
331 	 * usage is low and the driver is in debt or doesn't have enough
332 	 * accumulated us to fill VRAM quickly.
333 	 *
334 	 * The situation can occur in these cases:
335 	 * - a lot of VRAM is freed by userspace
336 	 * - the presence of a big buffer causes a lot of evictions
337 	 *   (solution: split buffers into smaller ones)
338 	 *
339 	 * If 128 MB or 1/8th of VRAM is free, start filling it now by setting
340 	 * accum_us to a positive number.
341 	 */
342 	if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) {
343 		s64 min_us;
344 
345 		/* Be more aggressive on dGPUs. Try to fill a portion of free
346 		 * VRAM now.
347 		 */
348 		if (!(adev->flags & AMD_IS_APU))
349 			min_us = bytes_to_us(adev, free_vram / 4);
350 		else
351 			min_us = 0; /* Reset accum_us on APUs. */
352 
353 		adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us);
354 	}
355 
356 	/* This is set to 0 if the driver is in debt to disallow (optional)
357 	 * buffer moves.
358 	 */
359 	*max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
360 
361 	/* Do the same for visible VRAM if half of it is free */
362 	if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) {
363 		u64 total_vis_vram = adev->gmc.visible_vram_size;
364 		u64 used_vis_vram =
365 		  amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr);
366 
367 		if (used_vis_vram < total_vis_vram) {
368 			u64 free_vis_vram = total_vis_vram - used_vis_vram;
369 			adev->mm_stats.accum_us_vis = min(adev->mm_stats.accum_us_vis +
370 							  increment_us, us_upper_bound);
371 
372 			if (free_vis_vram >= total_vis_vram / 2)
373 				adev->mm_stats.accum_us_vis =
374 					max(bytes_to_us(adev, free_vis_vram / 2),
375 					    adev->mm_stats.accum_us_vis);
376 		}
377 
378 		*max_vis_bytes = us_to_bytes(adev, adev->mm_stats.accum_us_vis);
379 	} else {
380 		*max_vis_bytes = 0;
381 	}
382 
383 	spin_unlock(&adev->mm_stats.lock);
384 }
385 
386 /* Report how many bytes have really been moved for the last command
387  * submission. This can result in a debt that can stop buffer migrations
388  * temporarily.
389  */
390 void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
391 				  u64 num_vis_bytes)
392 {
393 	spin_lock(&adev->mm_stats.lock);
394 	adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes);
395 	adev->mm_stats.accum_us_vis -= bytes_to_us(adev, num_vis_bytes);
396 	spin_unlock(&adev->mm_stats.lock);
397 }
398 
399 static int amdgpu_cs_bo_validate(void *param, struct amdgpu_bo *bo)
400 {
401 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
402 	struct amdgpu_cs_parser *p = param;
403 	struct ttm_operation_ctx ctx = {
404 		.interruptible = true,
405 		.no_wait_gpu = false,
406 		.resv = bo->tbo.base.resv
407 	};
408 	uint32_t domain;
409 	int r;
410 
411 	if (bo->tbo.pin_count)
412 		return 0;
413 
414 	/* Don't move this buffer if we have depleted our allowance
415 	 * to move it. Don't move anything if the threshold is zero.
416 	 */
417 	if (p->bytes_moved < p->bytes_moved_threshold &&
418 	    (!bo->tbo.base.dma_buf ||
419 	    list_empty(&bo->tbo.base.dma_buf->attachments))) {
420 		if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
421 		    (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
422 			/* And don't move a CPU_ACCESS_REQUIRED BO to limited
423 			 * visible VRAM if we've depleted our allowance to do
424 			 * that.
425 			 */
426 			if (p->bytes_moved_vis < p->bytes_moved_vis_threshold)
427 				domain = bo->preferred_domains;
428 			else
429 				domain = bo->allowed_domains;
430 		} else {
431 			domain = bo->preferred_domains;
432 		}
433 	} else {
434 		domain = bo->allowed_domains;
435 	}
436 
437 retry:
438 	amdgpu_bo_placement_from_domain(bo, domain);
439 	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
440 
441 	p->bytes_moved += ctx.bytes_moved;
442 	if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
443 	    amdgpu_bo_in_cpu_visible_vram(bo))
444 		p->bytes_moved_vis += ctx.bytes_moved;
445 
446 	if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
447 		domain = bo->allowed_domains;
448 		goto retry;
449 	}
450 
451 	return r;
452 }
453 
454 static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
455 			    struct list_head *validated)
456 {
457 	struct ttm_operation_ctx ctx = { true, false };
458 	struct amdgpu_bo_list_entry *lobj;
459 	int r;
460 
461 	list_for_each_entry(lobj, validated, tv.head) {
462 		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(lobj->tv.bo);
463 		struct mm_struct *usermm;
464 
465 		usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
466 		if (usermm && usermm != current->mm)
467 			return -EPERM;
468 
469 		if (amdgpu_ttm_tt_is_userptr(bo->tbo.ttm) &&
470 		    lobj->user_invalidated && lobj->user_pages) {
471 			amdgpu_bo_placement_from_domain(bo,
472 							AMDGPU_GEM_DOMAIN_CPU);
473 			r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
474 			if (r)
475 				return r;
476 
477 			amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
478 						     lobj->user_pages);
479 		}
480 
481 		r = amdgpu_cs_bo_validate(p, bo);
482 		if (r)
483 			return r;
484 
485 		kvfree(lobj->user_pages);
486 		lobj->user_pages = NULL;
487 	}
488 	return 0;
489 }
490 
491 static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
492 				union drm_amdgpu_cs *cs)
493 {
494 	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
495 	struct amdgpu_vm *vm = &fpriv->vm;
496 	struct amdgpu_bo_list_entry *e;
497 	struct list_head duplicates;
498 	struct amdgpu_bo *gds;
499 	struct amdgpu_bo *gws;
500 	struct amdgpu_bo *oa;
501 	int r;
502 
503 	INIT_LIST_HEAD(&p->validated);
504 
505 	/* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */
506 	if (cs->in.bo_list_handle) {
507 		if (p->bo_list)
508 			return -EINVAL;
509 
510 		r = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle,
511 				       &p->bo_list);
512 		if (r)
513 			return r;
514 	} else if (!p->bo_list) {
515 		/* Create a empty bo_list when no handle is provided */
516 		r = amdgpu_bo_list_create(p->adev, p->filp, NULL, 0,
517 					  &p->bo_list);
518 		if (r)
519 			return r;
520 	}
521 
522 	/* One for TTM and one for the CS job */
523 	amdgpu_bo_list_for_each_entry(e, p->bo_list)
524 		e->tv.num_shared = 2;
525 
526 	amdgpu_bo_list_get_list(p->bo_list, &p->validated);
527 
528 	INIT_LIST_HEAD(&duplicates);
529 	amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
530 
531 	if (p->uf_entry.tv.bo && !ttm_to_amdgpu_bo(p->uf_entry.tv.bo)->parent)
532 		list_add(&p->uf_entry.tv.head, &p->validated);
533 
534 	/* Get userptr backing pages. If pages are updated after registered
535 	 * in amdgpu_gem_userptr_ioctl(), amdgpu_cs_list_validate() will do
536 	 * amdgpu_ttm_backend_bind() to flush and invalidate new pages
537 	 */
538 	amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
539 		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
540 		bool userpage_invalidated = false;
541 		int i;
542 
543 		e->user_pages = kvmalloc_array(bo->tbo.ttm->num_pages,
544 					sizeof(struct page *),
545 					GFP_KERNEL | __GFP_ZERO);
546 		if (!e->user_pages) {
547 			DRM_ERROR("kvmalloc_array failure\n");
548 			r = -ENOMEM;
549 			goto out_free_user_pages;
550 		}
551 
552 		r = amdgpu_ttm_tt_get_user_pages(bo, e->user_pages);
553 		if (r) {
554 			kvfree(e->user_pages);
555 			e->user_pages = NULL;
556 			goto out_free_user_pages;
557 		}
558 
559 		for (i = 0; i < bo->tbo.ttm->num_pages; i++) {
560 			if (bo->tbo.ttm->pages[i] != e->user_pages[i]) {
561 				userpage_invalidated = true;
562 				break;
563 			}
564 		}
565 		e->user_invalidated = userpage_invalidated;
566 	}
567 
568 	r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
569 				   &duplicates);
570 	if (unlikely(r != 0)) {
571 		if (r != -ERESTARTSYS)
572 			DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
573 		goto out_free_user_pages;
574 	}
575 
576 	amdgpu_bo_list_for_each_entry(e, p->bo_list) {
577 		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
578 
579 		e->bo_va = amdgpu_vm_bo_find(vm, bo);
580 	}
581 
582 	/* Move fence waiting after getting reservation lock of
583 	 * PD root. Then there is no need on a ctx mutex lock.
584 	 */
585 	r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entity);
586 	if (unlikely(r != 0)) {
587 		if (r != -ERESTARTSYS)
588 			DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n");
589 		goto error_validate;
590 	}
591 
592 	amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
593 					  &p->bytes_moved_vis_threshold);
594 	p->bytes_moved = 0;
595 	p->bytes_moved_vis = 0;
596 
597 	r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm,
598 				      amdgpu_cs_bo_validate, p);
599 	if (r) {
600 		DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n");
601 		goto error_validate;
602 	}
603 
604 	r = amdgpu_cs_list_validate(p, &duplicates);
605 	if (r)
606 		goto error_validate;
607 
608 	r = amdgpu_cs_list_validate(p, &p->validated);
609 	if (r)
610 		goto error_validate;
611 
612 	amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
613 				     p->bytes_moved_vis);
614 
615 	gds = p->bo_list->gds_obj;
616 	gws = p->bo_list->gws_obj;
617 	oa = p->bo_list->oa_obj;
618 
619 	if (gds) {
620 		p->job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT;
621 		p->job->gds_size = amdgpu_bo_size(gds) >> PAGE_SHIFT;
622 	}
623 	if (gws) {
624 		p->job->gws_base = amdgpu_bo_gpu_offset(gws) >> PAGE_SHIFT;
625 		p->job->gws_size = amdgpu_bo_size(gws) >> PAGE_SHIFT;
626 	}
627 	if (oa) {
628 		p->job->oa_base = amdgpu_bo_gpu_offset(oa) >> PAGE_SHIFT;
629 		p->job->oa_size = amdgpu_bo_size(oa) >> PAGE_SHIFT;
630 	}
631 
632 	if (!r && p->uf_entry.tv.bo) {
633 		struct amdgpu_bo *uf = ttm_to_amdgpu_bo(p->uf_entry.tv.bo);
634 
635 		r = amdgpu_ttm_alloc_gart(&uf->tbo);
636 		p->job->uf_addr += amdgpu_bo_gpu_offset(uf);
637 	}
638 
639 error_validate:
640 	if (r)
641 		ttm_eu_backoff_reservation(&p->ticket, &p->validated);
642 
643 out_free_user_pages:
644 	if (r) {
645 		amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
646 			struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
647 
648 			if (!e->user_pages)
649 				continue;
650 			amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
651 			kvfree(e->user_pages);
652 			e->user_pages = NULL;
653 		}
654 	}
655 	return r;
656 }
657 
658 static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
659 {
660 	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
661 	struct amdgpu_bo_list_entry *e;
662 	int r;
663 
664 	list_for_each_entry(e, &p->validated, tv.head) {
665 		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
666 		struct dma_resv *resv = bo->tbo.base.resv;
667 		enum amdgpu_sync_mode sync_mode;
668 
669 		sync_mode = amdgpu_bo_explicit_sync(bo) ?
670 			AMDGPU_SYNC_EXPLICIT : AMDGPU_SYNC_NE_OWNER;
671 		r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, sync_mode,
672 				     &fpriv->vm);
673 		if (r)
674 			return r;
675 	}
676 	return 0;
677 }
678 
679 /**
680  * amdgpu_cs_parser_fini() - clean parser states
681  * @parser:	parser structure holding parsing context.
682  * @error:	error number
683  * @backoff:	indicator to backoff the reservation
684  *
685  * If error is set then unvalidate buffer, otherwise just free memory
686  * used by parsing context.
687  **/
688 static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
689 				  bool backoff)
690 {
691 	unsigned i;
692 
693 	if (error && backoff)
694 		ttm_eu_backoff_reservation(&parser->ticket,
695 					   &parser->validated);
696 
697 	for (i = 0; i < parser->num_post_deps; i++) {
698 		drm_syncobj_put(parser->post_deps[i].syncobj);
699 		kfree(parser->post_deps[i].chain);
700 	}
701 	kfree(parser->post_deps);
702 
703 	dma_fence_put(parser->fence);
704 
705 	if (parser->ctx) {
706 		mutex_unlock(&parser->ctx->lock);
707 		amdgpu_ctx_put(parser->ctx);
708 	}
709 	if (parser->bo_list)
710 		amdgpu_bo_list_put(parser->bo_list);
711 
712 	for (i = 0; i < parser->nchunks; i++)
713 		kvfree(parser->chunks[i].kdata);
714 	kvfree(parser->chunks);
715 	if (parser->job)
716 		amdgpu_job_free(parser->job);
717 	if (parser->uf_entry.tv.bo) {
718 		struct amdgpu_bo *uf = ttm_to_amdgpu_bo(parser->uf_entry.tv.bo);
719 
720 		amdgpu_bo_unref(&uf);
721 	}
722 }
723 
724 static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
725 {
726 	struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
727 	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
728 	struct amdgpu_device *adev = p->adev;
729 	struct amdgpu_vm *vm = &fpriv->vm;
730 	struct amdgpu_bo_list_entry *e;
731 	struct amdgpu_bo_va *bo_va;
732 	struct amdgpu_bo *bo;
733 	int r;
734 
735 	/* Only for UVD/VCE VM emulation */
736 	if (ring->funcs->parse_cs || ring->funcs->patch_cs_in_place) {
737 		unsigned i, j;
738 
739 		for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) {
740 			struct drm_amdgpu_cs_chunk_ib *chunk_ib;
741 			struct amdgpu_bo_va_mapping *m;
742 			struct amdgpu_bo *aobj = NULL;
743 			struct amdgpu_cs_chunk *chunk;
744 			uint64_t offset, va_start;
745 			struct amdgpu_ib *ib;
746 			uint8_t *kptr;
747 
748 			chunk = &p->chunks[i];
749 			ib = &p->job->ibs[j];
750 			chunk_ib = chunk->kdata;
751 
752 			if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
753 				continue;
754 
755 			va_start = chunk_ib->va_start & AMDGPU_GMC_HOLE_MASK;
756 			r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
757 			if (r) {
758 				DRM_ERROR("IB va_start is invalid\n");
759 				return r;
760 			}
761 
762 			if ((va_start + chunk_ib->ib_bytes) >
763 			    (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
764 				DRM_ERROR("IB va_start+ib_bytes is invalid\n");
765 				return -EINVAL;
766 			}
767 
768 			/* the IB should be reserved at this point */
769 			r = amdgpu_bo_kmap(aobj, (void **)&kptr);
770 			if (r) {
771 				return r;
772 			}
773 
774 			offset = m->start * AMDGPU_GPU_PAGE_SIZE;
775 			kptr += va_start - offset;
776 
777 			if (ring->funcs->parse_cs) {
778 				memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
779 				amdgpu_bo_kunmap(aobj);
780 
781 				r = amdgpu_ring_parse_cs(ring, p, p->job, ib);
782 				if (r)
783 					return r;
784 			} else {
785 				ib->ptr = (uint32_t *)kptr;
786 				r = amdgpu_ring_patch_cs_in_place(ring, p, p->job, ib);
787 				amdgpu_bo_kunmap(aobj);
788 				if (r)
789 					return r;
790 			}
791 
792 			j++;
793 		}
794 	}
795 
796 	if (!p->job->vm)
797 		return amdgpu_cs_sync_rings(p);
798 
799 
800 	r = amdgpu_vm_clear_freed(adev, vm, NULL);
801 	if (r)
802 		return r;
803 
804 	r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false);
805 	if (r)
806 		return r;
807 
808 	r = amdgpu_sync_fence(&p->job->sync, fpriv->prt_va->last_pt_update);
809 	if (r)
810 		return r;
811 
812 	if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
813 		bo_va = fpriv->csa_va;
814 		BUG_ON(!bo_va);
815 		r = amdgpu_vm_bo_update(adev, bo_va, false);
816 		if (r)
817 			return r;
818 
819 		r = amdgpu_sync_fence(&p->job->sync, bo_va->last_pt_update);
820 		if (r)
821 			return r;
822 	}
823 
824 	amdgpu_bo_list_for_each_entry(e, p->bo_list) {
825 		/* ignore duplicates */
826 		bo = ttm_to_amdgpu_bo(e->tv.bo);
827 		if (!bo)
828 			continue;
829 
830 		bo_va = e->bo_va;
831 		if (bo_va == NULL)
832 			continue;
833 
834 		r = amdgpu_vm_bo_update(adev, bo_va, false);
835 		if (r)
836 			return r;
837 
838 		r = amdgpu_sync_fence(&p->job->sync, bo_va->last_pt_update);
839 		if (r)
840 			return r;
841 	}
842 
843 	r = amdgpu_vm_handle_moved(adev, vm);
844 	if (r)
845 		return r;
846 
847 	r = amdgpu_vm_update_pdes(adev, vm, false);
848 	if (r)
849 		return r;
850 
851 	r = amdgpu_sync_fence(&p->job->sync, vm->last_update);
852 	if (r)
853 		return r;
854 
855 	p->job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.bo);
856 
857 	if (amdgpu_vm_debug) {
858 		/* Invalidate all BOs to test for userspace bugs */
859 		amdgpu_bo_list_for_each_entry(e, p->bo_list) {
860 			struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
861 
862 			/* ignore duplicates */
863 			if (!bo)
864 				continue;
865 
866 			amdgpu_vm_bo_invalidate(adev, bo, false);
867 		}
868 	}
869 
870 	return amdgpu_cs_sync_rings(p);
871 }
872 
873 static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
874 			     struct amdgpu_cs_parser *parser)
875 {
876 	struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
877 	struct amdgpu_vm *vm = &fpriv->vm;
878 	int r, ce_preempt = 0, de_preempt = 0;
879 	struct amdgpu_ring *ring;
880 	int i, j;
881 
882 	for (i = 0, j = 0; i < parser->nchunks && j < parser->job->num_ibs; i++) {
883 		struct amdgpu_cs_chunk *chunk;
884 		struct amdgpu_ib *ib;
885 		struct drm_amdgpu_cs_chunk_ib *chunk_ib;
886 		struct drm_sched_entity *entity;
887 
888 		chunk = &parser->chunks[i];
889 		ib = &parser->job->ibs[j];
890 		chunk_ib = (struct drm_amdgpu_cs_chunk_ib *)chunk->kdata;
891 
892 		if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
893 			continue;
894 
895 		if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX &&
896 		    (amdgpu_mcbp || amdgpu_sriov_vf(adev))) {
897 			if (chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
898 				if (chunk_ib->flags & AMDGPU_IB_FLAG_CE)
899 					ce_preempt++;
900 				else
901 					de_preempt++;
902 			}
903 
904 			/* each GFX command submit allows 0 or 1 IB preemptible for CE & DE */
905 			if (ce_preempt > 1 || de_preempt > 1)
906 				return -EINVAL;
907 		}
908 
909 		r = amdgpu_ctx_get_entity(parser->ctx, chunk_ib->ip_type,
910 					  chunk_ib->ip_instance, chunk_ib->ring,
911 					  &entity);
912 		if (r)
913 			return r;
914 
915 		if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE)
916 			parser->job->preamble_status |=
917 				AMDGPU_PREAMBLE_IB_PRESENT;
918 
919 		if (parser->entity && parser->entity != entity)
920 			return -EINVAL;
921 
922 		/* Return if there is no run queue associated with this entity.
923 		 * Possibly because of disabled HW IP*/
924 		if (entity->rq == NULL)
925 			return -EINVAL;
926 
927 		parser->entity = entity;
928 
929 		ring = to_amdgpu_ring(entity->rq->sched);
930 		r =  amdgpu_ib_get(adev, vm, ring->funcs->parse_cs ?
931 				   chunk_ib->ib_bytes : 0,
932 				   AMDGPU_IB_POOL_DELAYED, ib);
933 		if (r) {
934 			DRM_ERROR("Failed to get ib !\n");
935 			return r;
936 		}
937 
938 		ib->gpu_addr = chunk_ib->va_start;
939 		ib->length_dw = chunk_ib->ib_bytes / 4;
940 		ib->flags = chunk_ib->flags;
941 
942 		j++;
943 	}
944 
945 	/* MM engine doesn't support user fences */
946 	ring = to_amdgpu_ring(parser->entity->rq->sched);
947 	if (parser->job->uf_addr && ring->funcs->no_user_fence)
948 		return -EINVAL;
949 
950 	return 0;
951 }
952 
953 static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
954 				       struct amdgpu_cs_chunk *chunk)
955 {
956 	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
957 	unsigned num_deps;
958 	int i, r;
959 	struct drm_amdgpu_cs_chunk_dep *deps;
960 
961 	deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata;
962 	num_deps = chunk->length_dw * 4 /
963 		sizeof(struct drm_amdgpu_cs_chunk_dep);
964 
965 	for (i = 0; i < num_deps; ++i) {
966 		struct amdgpu_ctx *ctx;
967 		struct drm_sched_entity *entity;
968 		struct dma_fence *fence;
969 
970 		ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id);
971 		if (ctx == NULL)
972 			return -EINVAL;
973 
974 		r = amdgpu_ctx_get_entity(ctx, deps[i].ip_type,
975 					  deps[i].ip_instance,
976 					  deps[i].ring, &entity);
977 		if (r) {
978 			amdgpu_ctx_put(ctx);
979 			return r;
980 		}
981 
982 		fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle);
983 		amdgpu_ctx_put(ctx);
984 
985 		if (IS_ERR(fence))
986 			return PTR_ERR(fence);
987 		else if (!fence)
988 			continue;
989 
990 		if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) {
991 			struct drm_sched_fence *s_fence;
992 			struct dma_fence *old = fence;
993 
994 			s_fence = to_drm_sched_fence(fence);
995 			fence = dma_fence_get(&s_fence->scheduled);
996 			dma_fence_put(old);
997 		}
998 
999 		r = amdgpu_sync_fence(&p->job->sync, fence);
1000 		dma_fence_put(fence);
1001 		if (r)
1002 			return r;
1003 	}
1004 	return 0;
1005 }
1006 
1007 static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
1008 						 uint32_t handle, u64 point,
1009 						 u64 flags)
1010 {
1011 	struct dma_fence *fence;
1012 	int r;
1013 
1014 	r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence);
1015 	if (r) {
1016 		DRM_ERROR("syncobj %u failed to find fence @ %llu (%d)!\n",
1017 			  handle, point, r);
1018 		return r;
1019 	}
1020 
1021 	r = amdgpu_sync_fence(&p->job->sync, fence);
1022 	dma_fence_put(fence);
1023 
1024 	return r;
1025 }
1026 
1027 static int amdgpu_cs_process_syncobj_in_dep(struct amdgpu_cs_parser *p,
1028 					    struct amdgpu_cs_chunk *chunk)
1029 {
1030 	struct drm_amdgpu_cs_chunk_sem *deps;
1031 	unsigned num_deps;
1032 	int i, r;
1033 
1034 	deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
1035 	num_deps = chunk->length_dw * 4 /
1036 		sizeof(struct drm_amdgpu_cs_chunk_sem);
1037 	for (i = 0; i < num_deps; ++i) {
1038 		r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle,
1039 							  0, 0);
1040 		if (r)
1041 			return r;
1042 	}
1043 
1044 	return 0;
1045 }
1046 
1047 
1048 static int amdgpu_cs_process_syncobj_timeline_in_dep(struct amdgpu_cs_parser *p,
1049 						     struct amdgpu_cs_chunk *chunk)
1050 {
1051 	struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
1052 	unsigned num_deps;
1053 	int i, r;
1054 
1055 	syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata;
1056 	num_deps = chunk->length_dw * 4 /
1057 		sizeof(struct drm_amdgpu_cs_chunk_syncobj);
1058 	for (i = 0; i < num_deps; ++i) {
1059 		r = amdgpu_syncobj_lookup_and_add_to_sync(p,
1060 							  syncobj_deps[i].handle,
1061 							  syncobj_deps[i].point,
1062 							  syncobj_deps[i].flags);
1063 		if (r)
1064 			return r;
1065 	}
1066 
1067 	return 0;
1068 }
1069 
1070 static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
1071 					     struct amdgpu_cs_chunk *chunk)
1072 {
1073 	struct drm_amdgpu_cs_chunk_sem *deps;
1074 	unsigned num_deps;
1075 	int i;
1076 
1077 	deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
1078 	num_deps = chunk->length_dw * 4 /
1079 		sizeof(struct drm_amdgpu_cs_chunk_sem);
1080 
1081 	if (p->post_deps)
1082 		return -EINVAL;
1083 
1084 	p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
1085 				     GFP_KERNEL);
1086 	p->num_post_deps = 0;
1087 
1088 	if (!p->post_deps)
1089 		return -ENOMEM;
1090 
1091 
1092 	for (i = 0; i < num_deps; ++i) {
1093 		p->post_deps[i].syncobj =
1094 			drm_syncobj_find(p->filp, deps[i].handle);
1095 		if (!p->post_deps[i].syncobj)
1096 			return -EINVAL;
1097 		p->post_deps[i].chain = NULL;
1098 		p->post_deps[i].point = 0;
1099 		p->num_post_deps++;
1100 	}
1101 
1102 	return 0;
1103 }
1104 
1105 
1106 static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p,
1107 						      struct amdgpu_cs_chunk *chunk)
1108 {
1109 	struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
1110 	unsigned num_deps;
1111 	int i;
1112 
1113 	syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata;
1114 	num_deps = chunk->length_dw * 4 /
1115 		sizeof(struct drm_amdgpu_cs_chunk_syncobj);
1116 
1117 	if (p->post_deps)
1118 		return -EINVAL;
1119 
1120 	p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
1121 				     GFP_KERNEL);
1122 	p->num_post_deps = 0;
1123 
1124 	if (!p->post_deps)
1125 		return -ENOMEM;
1126 
1127 	for (i = 0; i < num_deps; ++i) {
1128 		struct amdgpu_cs_post_dep *dep = &p->post_deps[i];
1129 
1130 		dep->chain = NULL;
1131 		if (syncobj_deps[i].point) {
1132 			dep->chain = dma_fence_chain_alloc();
1133 			if (!dep->chain)
1134 				return -ENOMEM;
1135 		}
1136 
1137 		dep->syncobj = drm_syncobj_find(p->filp,
1138 						syncobj_deps[i].handle);
1139 		if (!dep->syncobj) {
1140 			dma_fence_chain_free(dep->chain);
1141 			return -EINVAL;
1142 		}
1143 		dep->point = syncobj_deps[i].point;
1144 		p->num_post_deps++;
1145 	}
1146 
1147 	return 0;
1148 }
1149 
1150 static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
1151 				  struct amdgpu_cs_parser *p)
1152 {
1153 	int i, r;
1154 
1155 	/* TODO: Investigate why we still need the context lock */
1156 	mutex_unlock(&p->ctx->lock);
1157 
1158 	for (i = 0; i < p->nchunks; ++i) {
1159 		struct amdgpu_cs_chunk *chunk;
1160 
1161 		chunk = &p->chunks[i];
1162 
1163 		switch (chunk->chunk_id) {
1164 		case AMDGPU_CHUNK_ID_DEPENDENCIES:
1165 		case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
1166 			r = amdgpu_cs_process_fence_dep(p, chunk);
1167 			if (r)
1168 				goto out;
1169 			break;
1170 		case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
1171 			r = amdgpu_cs_process_syncobj_in_dep(p, chunk);
1172 			if (r)
1173 				goto out;
1174 			break;
1175 		case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
1176 			r = amdgpu_cs_process_syncobj_out_dep(p, chunk);
1177 			if (r)
1178 				goto out;
1179 			break;
1180 		case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
1181 			r = amdgpu_cs_process_syncobj_timeline_in_dep(p, chunk);
1182 			if (r)
1183 				goto out;
1184 			break;
1185 		case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
1186 			r = amdgpu_cs_process_syncobj_timeline_out_dep(p, chunk);
1187 			if (r)
1188 				goto out;
1189 			break;
1190 		}
1191 	}
1192 
1193 out:
1194 	mutex_lock(&p->ctx->lock);
1195 	return r;
1196 }
1197 
1198 static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
1199 {
1200 	int i;
1201 
1202 	for (i = 0; i < p->num_post_deps; ++i) {
1203 		if (p->post_deps[i].chain && p->post_deps[i].point) {
1204 			drm_syncobj_add_point(p->post_deps[i].syncobj,
1205 					      p->post_deps[i].chain,
1206 					      p->fence, p->post_deps[i].point);
1207 			p->post_deps[i].chain = NULL;
1208 		} else {
1209 			drm_syncobj_replace_fence(p->post_deps[i].syncobj,
1210 						  p->fence);
1211 		}
1212 	}
1213 }
1214 
1215 static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
1216 			    union drm_amdgpu_cs *cs)
1217 {
1218 	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1219 	struct drm_sched_entity *entity = p->entity;
1220 	struct amdgpu_bo_list_entry *e;
1221 	struct amdgpu_job *job;
1222 	uint64_t seq;
1223 	int r;
1224 
1225 	job = p->job;
1226 	p->job = NULL;
1227 
1228 	r = drm_sched_job_init(&job->base, entity, &fpriv->vm);
1229 	if (r)
1230 		goto error_unlock;
1231 
1232 	drm_sched_job_arm(&job->base);
1233 
1234 	/* No memory allocation is allowed while holding the notifier lock.
1235 	 * The lock is held until amdgpu_cs_submit is finished and fence is
1236 	 * added to BOs.
1237 	 */
1238 	mutex_lock(&p->adev->notifier_lock);
1239 
1240 	/* If userptr are invalidated after amdgpu_cs_parser_bos(), return
1241 	 * -EAGAIN, drmIoctl in libdrm will restart the amdgpu_cs_ioctl.
1242 	 */
1243 	amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
1244 		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
1245 
1246 		r |= !amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
1247 	}
1248 	if (r) {
1249 		r = -EAGAIN;
1250 		goto error_abort;
1251 	}
1252 
1253 	p->fence = dma_fence_get(&job->base.s_fence->finished);
1254 
1255 	seq = amdgpu_ctx_add_fence(p->ctx, entity, p->fence);
1256 	amdgpu_cs_post_dependencies(p);
1257 
1258 	if ((job->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) &&
1259 	    !p->ctx->preamble_presented) {
1260 		job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
1261 		p->ctx->preamble_presented = true;
1262 	}
1263 
1264 	cs->out.handle = seq;
1265 	job->uf_sequence = seq;
1266 
1267 	amdgpu_job_free_resources(job);
1268 
1269 	trace_amdgpu_cs_ioctl(job);
1270 	amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket);
1271 	drm_sched_entity_push_job(&job->base);
1272 
1273 	amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
1274 
1275 	/* Make sure all BOs are remembered as writers */
1276 	amdgpu_bo_list_for_each_entry(e, p->bo_list)
1277 		e->tv.num_shared = 0;
1278 
1279 	ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
1280 	mutex_unlock(&p->adev->notifier_lock);
1281 
1282 	return 0;
1283 
1284 error_abort:
1285 	drm_sched_job_cleanup(&job->base);
1286 	mutex_unlock(&p->adev->notifier_lock);
1287 
1288 error_unlock:
1289 	amdgpu_job_free(job);
1290 	return r;
1291 }
1292 
1293 static void trace_amdgpu_cs_ibs(struct amdgpu_cs_parser *parser)
1294 {
1295 	int i;
1296 
1297 	if (!trace_amdgpu_cs_enabled())
1298 		return;
1299 
1300 	for (i = 0; i < parser->job->num_ibs; i++)
1301 		trace_amdgpu_cs(parser, i);
1302 }
1303 
1304 int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
1305 {
1306 	struct amdgpu_device *adev = drm_to_adev(dev);
1307 	union drm_amdgpu_cs *cs = data;
1308 	struct amdgpu_cs_parser parser = {};
1309 	bool reserved_buffers = false;
1310 	int r;
1311 
1312 	if (amdgpu_ras_intr_triggered())
1313 		return -EHWPOISON;
1314 
1315 	if (!adev->accel_working)
1316 		return -EBUSY;
1317 
1318 	parser.adev = adev;
1319 	parser.filp = filp;
1320 
1321 	r = amdgpu_cs_parser_init(&parser, data);
1322 	if (r) {
1323 		if (printk_ratelimit())
1324 			DRM_ERROR("Failed to initialize parser %d!\n", r);
1325 		goto out;
1326 	}
1327 
1328 	r = amdgpu_cs_ib_fill(adev, &parser);
1329 	if (r)
1330 		goto out;
1331 
1332 	r = amdgpu_cs_dependencies(adev, &parser);
1333 	if (r) {
1334 		DRM_ERROR("Failed in the dependencies handling %d!\n", r);
1335 		goto out;
1336 	}
1337 
1338 	r = amdgpu_cs_parser_bos(&parser, data);
1339 	if (r) {
1340 		if (r == -ENOMEM)
1341 			DRM_ERROR("Not enough memory for command submission!\n");
1342 		else if (r != -ERESTARTSYS && r != -EAGAIN)
1343 			DRM_ERROR("Failed to process the buffer list %d!\n", r);
1344 		goto out;
1345 	}
1346 
1347 	reserved_buffers = true;
1348 
1349 	trace_amdgpu_cs_ibs(&parser);
1350 
1351 	r = amdgpu_cs_vm_handling(&parser);
1352 	if (r)
1353 		goto out;
1354 
1355 	r = amdgpu_cs_submit(&parser, cs);
1356 
1357 out:
1358 	amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
1359 
1360 	return r;
1361 }
1362 
1363 /**
1364  * amdgpu_cs_wait_ioctl - wait for a command submission to finish
1365  *
1366  * @dev: drm device
1367  * @data: data from userspace
1368  * @filp: file private
1369  *
1370  * Wait for the command submission identified by handle to finish.
1371  */
1372 int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
1373 			 struct drm_file *filp)
1374 {
1375 	union drm_amdgpu_wait_cs *wait = data;
1376 	unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
1377 	struct drm_sched_entity *entity;
1378 	struct amdgpu_ctx *ctx;
1379 	struct dma_fence *fence;
1380 	long r;
1381 
1382 	ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
1383 	if (ctx == NULL)
1384 		return -EINVAL;
1385 
1386 	r = amdgpu_ctx_get_entity(ctx, wait->in.ip_type, wait->in.ip_instance,
1387 				  wait->in.ring, &entity);
1388 	if (r) {
1389 		amdgpu_ctx_put(ctx);
1390 		return r;
1391 	}
1392 
1393 	fence = amdgpu_ctx_get_fence(ctx, entity, wait->in.handle);
1394 	if (IS_ERR(fence))
1395 		r = PTR_ERR(fence);
1396 	else if (fence) {
1397 		r = dma_fence_wait_timeout(fence, true, timeout);
1398 		if (r > 0 && fence->error)
1399 			r = fence->error;
1400 		dma_fence_put(fence);
1401 	} else
1402 		r = 1;
1403 
1404 	amdgpu_ctx_put(ctx);
1405 	if (r < 0)
1406 		return r;
1407 
1408 	memset(wait, 0, sizeof(*wait));
1409 	wait->out.status = (r == 0);
1410 
1411 	return 0;
1412 }
1413 
1414 /**
1415  * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence
1416  *
1417  * @adev: amdgpu device
1418  * @filp: file private
1419  * @user: drm_amdgpu_fence copied from user space
1420  */
1421 static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
1422 					     struct drm_file *filp,
1423 					     struct drm_amdgpu_fence *user)
1424 {
1425 	struct drm_sched_entity *entity;
1426 	struct amdgpu_ctx *ctx;
1427 	struct dma_fence *fence;
1428 	int r;
1429 
1430 	ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id);
1431 	if (ctx == NULL)
1432 		return ERR_PTR(-EINVAL);
1433 
1434 	r = amdgpu_ctx_get_entity(ctx, user->ip_type, user->ip_instance,
1435 				  user->ring, &entity);
1436 	if (r) {
1437 		amdgpu_ctx_put(ctx);
1438 		return ERR_PTR(r);
1439 	}
1440 
1441 	fence = amdgpu_ctx_get_fence(ctx, entity, user->seq_no);
1442 	amdgpu_ctx_put(ctx);
1443 
1444 	return fence;
1445 }
1446 
1447 int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
1448 				    struct drm_file *filp)
1449 {
1450 	struct amdgpu_device *adev = drm_to_adev(dev);
1451 	union drm_amdgpu_fence_to_handle *info = data;
1452 	struct dma_fence *fence;
1453 	struct drm_syncobj *syncobj;
1454 	struct sync_file *sync_file;
1455 	int fd, r;
1456 
1457 	fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence);
1458 	if (IS_ERR(fence))
1459 		return PTR_ERR(fence);
1460 
1461 	if (!fence)
1462 		fence = dma_fence_get_stub();
1463 
1464 	switch (info->in.what) {
1465 	case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ:
1466 		r = drm_syncobj_create(&syncobj, 0, fence);
1467 		dma_fence_put(fence);
1468 		if (r)
1469 			return r;
1470 		r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle);
1471 		drm_syncobj_put(syncobj);
1472 		return r;
1473 
1474 	case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD:
1475 		r = drm_syncobj_create(&syncobj, 0, fence);
1476 		dma_fence_put(fence);
1477 		if (r)
1478 			return r;
1479 		r = drm_syncobj_get_fd(syncobj, (int *)&info->out.handle);
1480 		drm_syncobj_put(syncobj);
1481 		return r;
1482 
1483 	case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD:
1484 		fd = get_unused_fd_flags(O_CLOEXEC);
1485 		if (fd < 0) {
1486 			dma_fence_put(fence);
1487 			return fd;
1488 		}
1489 
1490 		sync_file = sync_file_create(fence);
1491 		dma_fence_put(fence);
1492 		if (!sync_file) {
1493 			put_unused_fd(fd);
1494 			return -ENOMEM;
1495 		}
1496 
1497 		fd_install(fd, sync_file->file);
1498 		info->out.handle = fd;
1499 		return 0;
1500 
1501 	default:
1502 		dma_fence_put(fence);
1503 		return -EINVAL;
1504 	}
1505 }
1506 
1507 /**
1508  * amdgpu_cs_wait_all_fences - wait on all fences to signal
1509  *
1510  * @adev: amdgpu device
1511  * @filp: file private
1512  * @wait: wait parameters
1513  * @fences: array of drm_amdgpu_fence
1514  */
1515 static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
1516 				     struct drm_file *filp,
1517 				     union drm_amdgpu_wait_fences *wait,
1518 				     struct drm_amdgpu_fence *fences)
1519 {
1520 	uint32_t fence_count = wait->in.fence_count;
1521 	unsigned int i;
1522 	long r = 1;
1523 
1524 	for (i = 0; i < fence_count; i++) {
1525 		struct dma_fence *fence;
1526 		unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1527 
1528 		fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1529 		if (IS_ERR(fence))
1530 			return PTR_ERR(fence);
1531 		else if (!fence)
1532 			continue;
1533 
1534 		r = dma_fence_wait_timeout(fence, true, timeout);
1535 		dma_fence_put(fence);
1536 		if (r < 0)
1537 			return r;
1538 
1539 		if (r == 0)
1540 			break;
1541 
1542 		if (fence->error)
1543 			return fence->error;
1544 	}
1545 
1546 	memset(wait, 0, sizeof(*wait));
1547 	wait->out.status = (r > 0);
1548 
1549 	return 0;
1550 }
1551 
1552 /**
1553  * amdgpu_cs_wait_any_fence - wait on any fence to signal
1554  *
1555  * @adev: amdgpu device
1556  * @filp: file private
1557  * @wait: wait parameters
1558  * @fences: array of drm_amdgpu_fence
1559  */
1560 static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
1561 				    struct drm_file *filp,
1562 				    union drm_amdgpu_wait_fences *wait,
1563 				    struct drm_amdgpu_fence *fences)
1564 {
1565 	unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1566 	uint32_t fence_count = wait->in.fence_count;
1567 	uint32_t first = ~0;
1568 	struct dma_fence **array;
1569 	unsigned int i;
1570 	long r;
1571 
1572 	/* Prepare the fence array */
1573 	array = kcalloc(fence_count, sizeof(struct dma_fence *), GFP_KERNEL);
1574 
1575 	if (array == NULL)
1576 		return -ENOMEM;
1577 
1578 	for (i = 0; i < fence_count; i++) {
1579 		struct dma_fence *fence;
1580 
1581 		fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1582 		if (IS_ERR(fence)) {
1583 			r = PTR_ERR(fence);
1584 			goto err_free_fence_array;
1585 		} else if (fence) {
1586 			array[i] = fence;
1587 		} else { /* NULL, the fence has been already signaled */
1588 			r = 1;
1589 			first = i;
1590 			goto out;
1591 		}
1592 	}
1593 
1594 	r = dma_fence_wait_any_timeout(array, fence_count, true, timeout,
1595 				       &first);
1596 	if (r < 0)
1597 		goto err_free_fence_array;
1598 
1599 out:
1600 	memset(wait, 0, sizeof(*wait));
1601 	wait->out.status = (r > 0);
1602 	wait->out.first_signaled = first;
1603 
1604 	if (first < fence_count && array[first])
1605 		r = array[first]->error;
1606 	else
1607 		r = 0;
1608 
1609 err_free_fence_array:
1610 	for (i = 0; i < fence_count; i++)
1611 		dma_fence_put(array[i]);
1612 	kfree(array);
1613 
1614 	return r;
1615 }
1616 
1617 /**
1618  * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish
1619  *
1620  * @dev: drm device
1621  * @data: data from userspace
1622  * @filp: file private
1623  */
1624 int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
1625 				struct drm_file *filp)
1626 {
1627 	struct amdgpu_device *adev = drm_to_adev(dev);
1628 	union drm_amdgpu_wait_fences *wait = data;
1629 	uint32_t fence_count = wait->in.fence_count;
1630 	struct drm_amdgpu_fence *fences_user;
1631 	struct drm_amdgpu_fence *fences;
1632 	int r;
1633 
1634 	/* Get the fences from userspace */
1635 	fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
1636 			GFP_KERNEL);
1637 	if (fences == NULL)
1638 		return -ENOMEM;
1639 
1640 	fences_user = u64_to_user_ptr(wait->in.fences);
1641 	if (copy_from_user(fences, fences_user,
1642 		sizeof(struct drm_amdgpu_fence) * fence_count)) {
1643 		r = -EFAULT;
1644 		goto err_free_fences;
1645 	}
1646 
1647 	if (wait->in.wait_all)
1648 		r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences);
1649 	else
1650 		r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences);
1651 
1652 err_free_fences:
1653 	kfree(fences);
1654 
1655 	return r;
1656 }
1657 
1658 /**
1659  * amdgpu_cs_find_mapping - find bo_va for VM address
1660  *
1661  * @parser: command submission parser context
1662  * @addr: VM address
1663  * @bo: resulting BO of the mapping found
1664  * @map: Placeholder to return found BO mapping
1665  *
1666  * Search the buffer objects in the command submission context for a certain
1667  * virtual memory address. Returns allocation structure when found, NULL
1668  * otherwise.
1669  */
1670 int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
1671 			   uint64_t addr, struct amdgpu_bo **bo,
1672 			   struct amdgpu_bo_va_mapping **map)
1673 {
1674 	struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
1675 	struct ttm_operation_ctx ctx = { false, false };
1676 	struct amdgpu_vm *vm = &fpriv->vm;
1677 	struct amdgpu_bo_va_mapping *mapping;
1678 	int r;
1679 
1680 	addr /= AMDGPU_GPU_PAGE_SIZE;
1681 
1682 	mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
1683 	if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo)
1684 		return -EINVAL;
1685 
1686 	*bo = mapping->bo_va->base.bo;
1687 	*map = mapping;
1688 
1689 	/* Double check that the BO is reserved by this CS */
1690 	if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->ticket)
1691 		return -EINVAL;
1692 
1693 	if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
1694 		(*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1695 		amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
1696 		r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
1697 		if (r)
1698 			return r;
1699 	}
1700 
1701 	return amdgpu_ttm_alloc_gart(&(*bo)->tbo);
1702 }
1703