1 /*
2 * Copyright 2008 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Jerome Glisse <glisse@freedesktop.org>
26 */
27
28 #include <linux/file.h>
29 #include <linux/pagemap.h>
30 #include <linux/sync_file.h>
31 #include <linux/dma-buf.h>
32 #include <linux/hmm.h>
33
34 #include <drm/amdgpu_drm.h>
35 #include <drm/drm_syncobj.h>
36 #include <drm/ttm/ttm_tt.h>
37
38 #include "amdgpu_cs.h"
39 #include "amdgpu.h"
40 #include "amdgpu_trace.h"
41 #include "amdgpu_gmc.h"
42 #include "amdgpu_gem.h"
43 #include "amdgpu_ras.h"
44
amdgpu_cs_parser_init(struct amdgpu_cs_parser * p,struct amdgpu_device * adev,struct drm_file * filp,union drm_amdgpu_cs * cs)45 static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p,
46 struct amdgpu_device *adev,
47 struct drm_file *filp,
48 union drm_amdgpu_cs *cs)
49 {
50 struct amdgpu_fpriv *fpriv = filp->driver_priv;
51
52 if (cs->in.num_chunks == 0)
53 return -EINVAL;
54
55 memset(p, 0, sizeof(*p));
56 p->adev = adev;
57 p->filp = filp;
58
59 p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
60 if (!p->ctx)
61 return -EINVAL;
62
63 if (atomic_read(&p->ctx->guilty)) {
64 amdgpu_ctx_put(p->ctx);
65 return -ECANCELED;
66 }
67
68 amdgpu_sync_create(&p->sync);
69 drm_exec_init(&p->exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
70 DRM_EXEC_IGNORE_DUPLICATES, 0);
71 return 0;
72 }
73
amdgpu_cs_job_idx(struct amdgpu_cs_parser * p,struct drm_amdgpu_cs_chunk_ib * chunk_ib)74 static int amdgpu_cs_job_idx(struct amdgpu_cs_parser *p,
75 struct drm_amdgpu_cs_chunk_ib *chunk_ib)
76 {
77 struct drm_sched_entity *entity;
78 unsigned int i;
79 int r;
80
81 r = amdgpu_ctx_get_entity(p->ctx, chunk_ib->ip_type,
82 chunk_ib->ip_instance,
83 chunk_ib->ring, &entity);
84 if (r)
85 return r;
86
87 /*
88 * Abort if there is no run queue associated with this entity.
89 * Possibly because of disabled HW IP.
90 */
91 if (entity->rq == NULL)
92 return -EINVAL;
93
94 /* Check if we can add this IB to some existing job */
95 for (i = 0; i < p->gang_size; ++i)
96 if (p->entities[i] == entity)
97 return i;
98
99 /* If not increase the gang size if possible */
100 if (i == AMDGPU_CS_GANG_SIZE)
101 return -EINVAL;
102
103 p->entities[i] = entity;
104 p->gang_size = i + 1;
105 return i;
106 }
107
amdgpu_cs_p1_ib(struct amdgpu_cs_parser * p,struct drm_amdgpu_cs_chunk_ib * chunk_ib,unsigned int * num_ibs)108 static int amdgpu_cs_p1_ib(struct amdgpu_cs_parser *p,
109 struct drm_amdgpu_cs_chunk_ib *chunk_ib,
110 unsigned int *num_ibs)
111 {
112 int r;
113
114 r = amdgpu_cs_job_idx(p, chunk_ib);
115 if (r < 0)
116 return r;
117
118 if (num_ibs[r] >= amdgpu_ring_max_ibs(chunk_ib->ip_type))
119 return -EINVAL;
120
121 ++(num_ibs[r]);
122 p->gang_leader_idx = r;
123 return 0;
124 }
125
amdgpu_cs_p1_user_fence(struct amdgpu_cs_parser * p,struct drm_amdgpu_cs_chunk_fence * data,uint32_t * offset)126 static int amdgpu_cs_p1_user_fence(struct amdgpu_cs_parser *p,
127 struct drm_amdgpu_cs_chunk_fence *data,
128 uint32_t *offset)
129 {
130 struct drm_gem_object *gobj;
131 unsigned long size;
132
133 gobj = drm_gem_object_lookup(p->filp, data->handle);
134 if (gobj == NULL)
135 return -EINVAL;
136
137 p->uf_bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
138 drm_gem_object_put(gobj);
139
140 size = amdgpu_bo_size(p->uf_bo);
141 if (size != PAGE_SIZE || data->offset > (size - 8))
142 return -EINVAL;
143
144 if (amdgpu_ttm_tt_get_usermm(p->uf_bo->tbo.ttm))
145 return -EINVAL;
146
147 *offset = data->offset;
148 return 0;
149 }
150
amdgpu_cs_p1_bo_handles(struct amdgpu_cs_parser * p,struct drm_amdgpu_bo_list_in * data)151 static int amdgpu_cs_p1_bo_handles(struct amdgpu_cs_parser *p,
152 struct drm_amdgpu_bo_list_in *data)
153 {
154 struct drm_amdgpu_bo_list_entry *info;
155 int r;
156
157 r = amdgpu_bo_create_list_entry_array(data, &info);
158 if (r)
159 return r;
160
161 r = amdgpu_bo_list_create(p->adev, p->filp, info, data->bo_number,
162 &p->bo_list);
163 if (r)
164 goto error_free;
165
166 kvfree(info);
167 return 0;
168
169 error_free:
170 kvfree(info);
171
172 return r;
173 }
174
175 /* Copy the data from userspace and go over it the first time */
amdgpu_cs_pass1(struct amdgpu_cs_parser * p,union drm_amdgpu_cs * cs)176 static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
177 union drm_amdgpu_cs *cs)
178 {
179 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
180 unsigned int num_ibs[AMDGPU_CS_GANG_SIZE] = { };
181 struct amdgpu_vm *vm = &fpriv->vm;
182 uint64_t *chunk_array;
183 uint32_t uf_offset = 0;
184 size_t size;
185 int ret;
186 int i;
187
188 chunk_array = memdup_array_user(u64_to_user_ptr(cs->in.chunks),
189 cs->in.num_chunks,
190 sizeof(uint64_t));
191 if (IS_ERR(chunk_array))
192 return PTR_ERR(chunk_array);
193
194 p->nchunks = cs->in.num_chunks;
195 p->chunks = kvmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
196 GFP_KERNEL);
197 if (!p->chunks) {
198 ret = -ENOMEM;
199 goto free_chunk;
200 }
201
202 for (i = 0; i < p->nchunks; i++) {
203 struct drm_amdgpu_cs_chunk __user *chunk_ptr = NULL;
204 struct drm_amdgpu_cs_chunk user_chunk;
205
206 chunk_ptr = u64_to_user_ptr(chunk_array[i]);
207 if (copy_from_user(&user_chunk, chunk_ptr,
208 sizeof(struct drm_amdgpu_cs_chunk))) {
209 ret = -EFAULT;
210 i--;
211 goto free_partial_kdata;
212 }
213 p->chunks[i].chunk_id = user_chunk.chunk_id;
214 p->chunks[i].length_dw = user_chunk.length_dw;
215
216 size = p->chunks[i].length_dw;
217
218 p->chunks[i].kdata = vmemdup_array_user(u64_to_user_ptr(user_chunk.chunk_data),
219 size,
220 sizeof(uint32_t));
221 if (IS_ERR(p->chunks[i].kdata)) {
222 ret = PTR_ERR(p->chunks[i].kdata);
223 i--;
224 goto free_partial_kdata;
225 }
226 size *= sizeof(uint32_t);
227
228 /* Assume the worst on the following checks */
229 ret = -EINVAL;
230 switch (p->chunks[i].chunk_id) {
231 case AMDGPU_CHUNK_ID_IB:
232 if (size < sizeof(struct drm_amdgpu_cs_chunk_ib))
233 goto free_partial_kdata;
234
235 ret = amdgpu_cs_p1_ib(p, p->chunks[i].kdata, num_ibs);
236 if (ret)
237 goto free_partial_kdata;
238 break;
239
240 case AMDGPU_CHUNK_ID_FENCE:
241 if (size < sizeof(struct drm_amdgpu_cs_chunk_fence))
242 goto free_partial_kdata;
243
244 ret = amdgpu_cs_p1_user_fence(p, p->chunks[i].kdata,
245 &uf_offset);
246 if (ret)
247 goto free_partial_kdata;
248 break;
249
250 case AMDGPU_CHUNK_ID_BO_HANDLES:
251 if (size < sizeof(struct drm_amdgpu_bo_list_in))
252 goto free_partial_kdata;
253
254 /* Only a single BO list is allowed to simplify handling. */
255 if (p->bo_list)
256 goto free_partial_kdata;
257
258 ret = amdgpu_cs_p1_bo_handles(p, p->chunks[i].kdata);
259 if (ret)
260 goto free_partial_kdata;
261 break;
262
263 case AMDGPU_CHUNK_ID_DEPENDENCIES:
264 case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
265 case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
266 case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
267 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
268 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
269 case AMDGPU_CHUNK_ID_CP_GFX_SHADOW:
270 break;
271
272 default:
273 goto free_partial_kdata;
274 }
275 }
276
277 if (!p->gang_size || (amdgpu_sriov_vf(p->adev) && p->gang_size > 1)) {
278 ret = -EINVAL;
279 goto free_all_kdata;
280 }
281
282 for (i = 0; i < p->gang_size; ++i) {
283 ret = amdgpu_job_alloc(p->adev, vm, p->entities[i], vm,
284 num_ibs[i], &p->jobs[i],
285 p->filp->client_id);
286 if (ret)
287 goto free_all_kdata;
288 switch (p->adev->enforce_isolation[fpriv->xcp_id]) {
289 case AMDGPU_ENFORCE_ISOLATION_DISABLE:
290 default:
291 p->jobs[i]->enforce_isolation = false;
292 p->jobs[i]->run_cleaner_shader = false;
293 break;
294 case AMDGPU_ENFORCE_ISOLATION_ENABLE:
295 p->jobs[i]->enforce_isolation = true;
296 p->jobs[i]->run_cleaner_shader = true;
297 break;
298 case AMDGPU_ENFORCE_ISOLATION_ENABLE_LEGACY:
299 p->jobs[i]->enforce_isolation = true;
300 p->jobs[i]->run_cleaner_shader = false;
301 break;
302 case AMDGPU_ENFORCE_ISOLATION_NO_CLEANER_SHADER:
303 p->jobs[i]->enforce_isolation = true;
304 p->jobs[i]->run_cleaner_shader = false;
305 break;
306 }
307 }
308 p->gang_leader = p->jobs[p->gang_leader_idx];
309
310 if (p->ctx->generation != p->gang_leader->generation) {
311 ret = -ECANCELED;
312 goto free_all_kdata;
313 }
314
315 if (p->uf_bo)
316 p->gang_leader->uf_addr = uf_offset;
317 kvfree(chunk_array);
318
319 /* Use this opportunity to fill in task info for the vm */
320 amdgpu_vm_set_task_info(vm);
321
322 return 0;
323
324 free_all_kdata:
325 i = p->nchunks - 1;
326 free_partial_kdata:
327 for (; i >= 0; i--)
328 kvfree(p->chunks[i].kdata);
329 kvfree(p->chunks);
330 p->chunks = NULL;
331 p->nchunks = 0;
332 free_chunk:
333 kvfree(chunk_array);
334
335 return ret;
336 }
337
amdgpu_cs_p2_ib(struct amdgpu_cs_parser * p,struct amdgpu_cs_chunk * chunk,unsigned int * ce_preempt,unsigned int * de_preempt)338 static int amdgpu_cs_p2_ib(struct amdgpu_cs_parser *p,
339 struct amdgpu_cs_chunk *chunk,
340 unsigned int *ce_preempt,
341 unsigned int *de_preempt)
342 {
343 struct drm_amdgpu_cs_chunk_ib *chunk_ib = chunk->kdata;
344 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
345 struct amdgpu_vm *vm = &fpriv->vm;
346 struct amdgpu_ring *ring;
347 struct amdgpu_job *job;
348 struct amdgpu_ib *ib;
349 int r;
350
351 r = amdgpu_cs_job_idx(p, chunk_ib);
352 if (r < 0)
353 return r;
354
355 job = p->jobs[r];
356 ring = amdgpu_job_ring(job);
357 ib = &job->ibs[job->num_ibs++];
358
359 /* submissions to kernel queues are disabled */
360 if (ring->no_user_submission)
361 return -EINVAL;
362
363 /* MM engine doesn't support user fences */
364 if (p->uf_bo && ring->funcs->no_user_fence)
365 return -EINVAL;
366
367 if (!p->adev->debug_enable_ce_cs &&
368 chunk_ib->flags & AMDGPU_IB_FLAG_CE) {
369 dev_err_ratelimited(p->adev->dev, "CE CS is blocked, use debug=0x400 to override\n");
370 return -EINVAL;
371 }
372
373 if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX &&
374 chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
375 if (chunk_ib->flags & AMDGPU_IB_FLAG_CE)
376 (*ce_preempt)++;
377 else
378 (*de_preempt)++;
379
380 /* Each GFX command submit allows only 1 IB max
381 * preemptible for CE & DE */
382 if (*ce_preempt > 1 || *de_preempt > 1)
383 return -EINVAL;
384 }
385
386 if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE)
387 job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT;
388
389 r = amdgpu_ib_get(p->adev, vm, ring->funcs->parse_cs ?
390 chunk_ib->ib_bytes : 0,
391 AMDGPU_IB_POOL_DELAYED, ib);
392 if (r) {
393 drm_err(adev_to_drm(p->adev), "Failed to get ib !\n");
394 return r;
395 }
396
397 ib->gpu_addr = chunk_ib->va_start;
398 ib->length_dw = chunk_ib->ib_bytes / 4;
399 ib->flags = chunk_ib->flags;
400 return 0;
401 }
402
amdgpu_cs_p2_dependencies(struct amdgpu_cs_parser * p,struct amdgpu_cs_chunk * chunk)403 static int amdgpu_cs_p2_dependencies(struct amdgpu_cs_parser *p,
404 struct amdgpu_cs_chunk *chunk)
405 {
406 struct drm_amdgpu_cs_chunk_dep *deps = chunk->kdata;
407 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
408 unsigned int num_deps;
409 int i, r;
410
411 num_deps = chunk->length_dw * 4 /
412 sizeof(struct drm_amdgpu_cs_chunk_dep);
413
414 for (i = 0; i < num_deps; ++i) {
415 struct amdgpu_ctx *ctx;
416 struct drm_sched_entity *entity;
417 struct dma_fence *fence;
418
419 ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id);
420 if (ctx == NULL)
421 return -EINVAL;
422
423 r = amdgpu_ctx_get_entity(ctx, deps[i].ip_type,
424 deps[i].ip_instance,
425 deps[i].ring, &entity);
426 if (r) {
427 amdgpu_ctx_put(ctx);
428 return r;
429 }
430
431 fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle);
432 amdgpu_ctx_put(ctx);
433
434 if (IS_ERR(fence))
435 return PTR_ERR(fence);
436 else if (!fence)
437 continue;
438
439 if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) {
440 struct drm_sched_fence *s_fence;
441 struct dma_fence *old = fence;
442
443 s_fence = to_drm_sched_fence(fence);
444 fence = dma_fence_get(&s_fence->scheduled);
445 dma_fence_put(old);
446 }
447
448 r = amdgpu_sync_fence(&p->sync, fence, GFP_KERNEL);
449 dma_fence_put(fence);
450 if (r)
451 return r;
452 }
453 return 0;
454 }
455
amdgpu_syncobj_lookup_and_add(struct amdgpu_cs_parser * p,uint32_t handle,u64 point,u64 flags)456 static int amdgpu_syncobj_lookup_and_add(struct amdgpu_cs_parser *p,
457 uint32_t handle, u64 point,
458 u64 flags)
459 {
460 struct dma_fence *fence;
461 int r;
462
463 r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence);
464 if (r) {
465 drm_err(adev_to_drm(p->adev), "syncobj %u failed to find fence @ %llu (%d)!\n",
466 handle, point, r);
467 return r;
468 }
469
470 r = amdgpu_sync_fence(&p->sync, fence, GFP_KERNEL);
471 dma_fence_put(fence);
472 return r;
473 }
474
amdgpu_cs_p2_syncobj_in(struct amdgpu_cs_parser * p,struct amdgpu_cs_chunk * chunk)475 static int amdgpu_cs_p2_syncobj_in(struct amdgpu_cs_parser *p,
476 struct amdgpu_cs_chunk *chunk)
477 {
478 struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata;
479 unsigned int num_deps;
480 int i, r;
481
482 num_deps = chunk->length_dw * 4 /
483 sizeof(struct drm_amdgpu_cs_chunk_sem);
484 for (i = 0; i < num_deps; ++i) {
485 r = amdgpu_syncobj_lookup_and_add(p, deps[i].handle, 0, 0);
486 if (r)
487 return r;
488 }
489
490 return 0;
491 }
492
amdgpu_cs_p2_syncobj_timeline_wait(struct amdgpu_cs_parser * p,struct amdgpu_cs_chunk * chunk)493 static int amdgpu_cs_p2_syncobj_timeline_wait(struct amdgpu_cs_parser *p,
494 struct amdgpu_cs_chunk *chunk)
495 {
496 struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata;
497 unsigned int num_deps;
498 int i, r;
499
500 num_deps = chunk->length_dw * 4 /
501 sizeof(struct drm_amdgpu_cs_chunk_syncobj);
502 for (i = 0; i < num_deps; ++i) {
503 r = amdgpu_syncobj_lookup_and_add(p, syncobj_deps[i].handle,
504 syncobj_deps[i].point,
505 syncobj_deps[i].flags);
506 if (r)
507 return r;
508 }
509
510 return 0;
511 }
512
amdgpu_cs_p2_syncobj_out(struct amdgpu_cs_parser * p,struct amdgpu_cs_chunk * chunk)513 static int amdgpu_cs_p2_syncobj_out(struct amdgpu_cs_parser *p,
514 struct amdgpu_cs_chunk *chunk)
515 {
516 struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata;
517 unsigned int num_deps;
518 int i;
519
520 num_deps = chunk->length_dw * 4 /
521 sizeof(struct drm_amdgpu_cs_chunk_sem);
522
523 if (p->post_deps)
524 return -EINVAL;
525
526 p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
527 GFP_KERNEL);
528 p->num_post_deps = 0;
529
530 if (!p->post_deps)
531 return -ENOMEM;
532
533
534 for (i = 0; i < num_deps; ++i) {
535 p->post_deps[i].syncobj =
536 drm_syncobj_find(p->filp, deps[i].handle);
537 if (!p->post_deps[i].syncobj)
538 return -EINVAL;
539 p->post_deps[i].chain = NULL;
540 p->post_deps[i].point = 0;
541 p->num_post_deps++;
542 }
543
544 return 0;
545 }
546
amdgpu_cs_p2_syncobj_timeline_signal(struct amdgpu_cs_parser * p,struct amdgpu_cs_chunk * chunk)547 static int amdgpu_cs_p2_syncobj_timeline_signal(struct amdgpu_cs_parser *p,
548 struct amdgpu_cs_chunk *chunk)
549 {
550 struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata;
551 unsigned int num_deps;
552 int i;
553
554 num_deps = chunk->length_dw * 4 /
555 sizeof(struct drm_amdgpu_cs_chunk_syncobj);
556
557 if (p->post_deps)
558 return -EINVAL;
559
560 p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
561 GFP_KERNEL);
562 p->num_post_deps = 0;
563
564 if (!p->post_deps)
565 return -ENOMEM;
566
567 for (i = 0; i < num_deps; ++i) {
568 struct amdgpu_cs_post_dep *dep = &p->post_deps[i];
569
570 dep->chain = NULL;
571 if (syncobj_deps[i].point) {
572 dep->chain = dma_fence_chain_alloc();
573 if (!dep->chain)
574 return -ENOMEM;
575 }
576
577 dep->syncobj = drm_syncobj_find(p->filp,
578 syncobj_deps[i].handle);
579 if (!dep->syncobj) {
580 dma_fence_chain_free(dep->chain);
581 return -EINVAL;
582 }
583 dep->point = syncobj_deps[i].point;
584 p->num_post_deps++;
585 }
586
587 return 0;
588 }
589
amdgpu_cs_p2_shadow(struct amdgpu_cs_parser * p,struct amdgpu_cs_chunk * chunk)590 static int amdgpu_cs_p2_shadow(struct amdgpu_cs_parser *p,
591 struct amdgpu_cs_chunk *chunk)
592 {
593 struct drm_amdgpu_cs_chunk_cp_gfx_shadow *shadow = chunk->kdata;
594 int i;
595
596 if (shadow->flags & ~AMDGPU_CS_CHUNK_CP_GFX_SHADOW_FLAGS_INIT_SHADOW)
597 return -EINVAL;
598
599 for (i = 0; i < p->gang_size; ++i) {
600 p->jobs[i]->shadow_va = shadow->shadow_va;
601 p->jobs[i]->csa_va = shadow->csa_va;
602 p->jobs[i]->gds_va = shadow->gds_va;
603 p->jobs[i]->init_shadow =
604 shadow->flags & AMDGPU_CS_CHUNK_CP_GFX_SHADOW_FLAGS_INIT_SHADOW;
605 }
606
607 return 0;
608 }
609
amdgpu_cs_pass2(struct amdgpu_cs_parser * p)610 static int amdgpu_cs_pass2(struct amdgpu_cs_parser *p)
611 {
612 unsigned int ce_preempt = 0, de_preempt = 0;
613 int i, r;
614
615 for (i = 0; i < p->nchunks; ++i) {
616 struct amdgpu_cs_chunk *chunk;
617
618 chunk = &p->chunks[i];
619
620 switch (chunk->chunk_id) {
621 case AMDGPU_CHUNK_ID_IB:
622 r = amdgpu_cs_p2_ib(p, chunk, &ce_preempt, &de_preempt);
623 if (r)
624 return r;
625 break;
626 case AMDGPU_CHUNK_ID_DEPENDENCIES:
627 case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
628 r = amdgpu_cs_p2_dependencies(p, chunk);
629 if (r)
630 return r;
631 break;
632 case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
633 r = amdgpu_cs_p2_syncobj_in(p, chunk);
634 if (r)
635 return r;
636 break;
637 case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
638 r = amdgpu_cs_p2_syncobj_out(p, chunk);
639 if (r)
640 return r;
641 break;
642 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
643 r = amdgpu_cs_p2_syncobj_timeline_wait(p, chunk);
644 if (r)
645 return r;
646 break;
647 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
648 r = amdgpu_cs_p2_syncobj_timeline_signal(p, chunk);
649 if (r)
650 return r;
651 break;
652 case AMDGPU_CHUNK_ID_CP_GFX_SHADOW:
653 r = amdgpu_cs_p2_shadow(p, chunk);
654 if (r)
655 return r;
656 break;
657 }
658 }
659
660 return 0;
661 }
662
663 /* Convert microseconds to bytes. */
us_to_bytes(struct amdgpu_device * adev,s64 us)664 static u64 us_to_bytes(struct amdgpu_device *adev, s64 us)
665 {
666 if (us <= 0 || !adev->mm_stats.log2_max_MBps)
667 return 0;
668
669 /* Since accum_us is incremented by a million per second, just
670 * multiply it by the number of MB/s to get the number of bytes.
671 */
672 return us << adev->mm_stats.log2_max_MBps;
673 }
674
bytes_to_us(struct amdgpu_device * adev,u64 bytes)675 static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes)
676 {
677 if (!adev->mm_stats.log2_max_MBps)
678 return 0;
679
680 return bytes >> adev->mm_stats.log2_max_MBps;
681 }
682
683 /* Returns how many bytes TTM can move right now. If no bytes can be moved,
684 * it returns 0. If it returns non-zero, it's OK to move at least one buffer,
685 * which means it can go over the threshold once. If that happens, the driver
686 * will be in debt and no other buffer migrations can be done until that debt
687 * is repaid.
688 *
689 * This approach allows moving a buffer of any size (it's important to allow
690 * that).
691 *
692 * The currency is simply time in microseconds and it increases as the clock
693 * ticks. The accumulated microseconds (us) are converted to bytes and
694 * returned.
695 */
amdgpu_cs_get_threshold_for_moves(struct amdgpu_device * adev,u64 * max_bytes,u64 * max_vis_bytes)696 static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
697 u64 *max_bytes,
698 u64 *max_vis_bytes)
699 {
700 s64 time_us, increment_us;
701 u64 free_vram, total_vram, used_vram;
702 /* Allow a maximum of 200 accumulated ms. This is basically per-IB
703 * throttling.
704 *
705 * It means that in order to get full max MBps, at least 5 IBs per
706 * second must be submitted and not more than 200ms apart from each
707 * other.
708 */
709 const s64 us_upper_bound = 200000;
710
711 if ((!adev->mm_stats.log2_max_MBps) || !ttm_resource_manager_used(&adev->mman.vram_mgr.manager)) {
712 *max_bytes = 0;
713 *max_vis_bytes = 0;
714 return;
715 }
716
717 total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size);
718 used_vram = ttm_resource_manager_usage(&adev->mman.vram_mgr.manager);
719 free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
720
721 spin_lock(&adev->mm_stats.lock);
722
723 /* Increase the amount of accumulated us. */
724 time_us = ktime_to_us(ktime_get());
725 increment_us = time_us - adev->mm_stats.last_update_us;
726 adev->mm_stats.last_update_us = time_us;
727 adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us,
728 us_upper_bound);
729
730 /* This prevents the short period of low performance when the VRAM
731 * usage is low and the driver is in debt or doesn't have enough
732 * accumulated us to fill VRAM quickly.
733 *
734 * The situation can occur in these cases:
735 * - a lot of VRAM is freed by userspace
736 * - the presence of a big buffer causes a lot of evictions
737 * (solution: split buffers into smaller ones)
738 *
739 * If 128 MB or 1/8th of VRAM is free, start filling it now by setting
740 * accum_us to a positive number.
741 */
742 if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) {
743 s64 min_us;
744
745 /* Be more aggressive on dGPUs. Try to fill a portion of free
746 * VRAM now.
747 */
748 if (!(adev->flags & AMD_IS_APU))
749 min_us = bytes_to_us(adev, free_vram / 4);
750 else
751 min_us = 0; /* Reset accum_us on APUs. */
752
753 adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us);
754 }
755
756 /* This is set to 0 if the driver is in debt to disallow (optional)
757 * buffer moves.
758 */
759 *max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
760
761 /* Do the same for visible VRAM if half of it is free */
762 if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) {
763 u64 total_vis_vram = adev->gmc.visible_vram_size;
764 u64 used_vis_vram =
765 amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr);
766
767 if (used_vis_vram < total_vis_vram) {
768 u64 free_vis_vram = total_vis_vram - used_vis_vram;
769
770 adev->mm_stats.accum_us_vis = min(adev->mm_stats.accum_us_vis +
771 increment_us, us_upper_bound);
772
773 if (free_vis_vram >= total_vis_vram / 2)
774 adev->mm_stats.accum_us_vis =
775 max(bytes_to_us(adev, free_vis_vram / 2),
776 adev->mm_stats.accum_us_vis);
777 }
778
779 *max_vis_bytes = us_to_bytes(adev, adev->mm_stats.accum_us_vis);
780 } else {
781 *max_vis_bytes = 0;
782 }
783
784 spin_unlock(&adev->mm_stats.lock);
785 }
786
787 /* Report how many bytes have really been moved for the last command
788 * submission. This can result in a debt that can stop buffer migrations
789 * temporarily.
790 */
amdgpu_cs_report_moved_bytes(struct amdgpu_device * adev,u64 num_bytes,u64 num_vis_bytes)791 void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
792 u64 num_vis_bytes)
793 {
794 spin_lock(&adev->mm_stats.lock);
795 adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes);
796 adev->mm_stats.accum_us_vis -= bytes_to_us(adev, num_vis_bytes);
797 spin_unlock(&adev->mm_stats.lock);
798 }
799
amdgpu_cs_bo_validate(void * param,struct amdgpu_bo * bo)800 static int amdgpu_cs_bo_validate(void *param, struct amdgpu_bo *bo)
801 {
802 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
803 struct amdgpu_cs_parser *p = param;
804 struct ttm_operation_ctx ctx = {
805 .interruptible = true,
806 .no_wait_gpu = false,
807 .resv = bo->tbo.base.resv
808 };
809 uint32_t domain;
810 int r;
811
812 if (bo->tbo.pin_count)
813 return 0;
814
815 /* Don't move this buffer if we have depleted our allowance
816 * to move it. Don't move anything if the threshold is zero.
817 */
818 if (p->bytes_moved < p->bytes_moved_threshold &&
819 (!bo->tbo.base.dma_buf ||
820 list_empty(&bo->tbo.base.dma_buf->attachments))) {
821 if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
822 (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
823 /* And don't move a CPU_ACCESS_REQUIRED BO to limited
824 * visible VRAM if we've depleted our allowance to do
825 * that.
826 */
827 if (p->bytes_moved_vis < p->bytes_moved_vis_threshold)
828 domain = bo->preferred_domains;
829 else
830 domain = bo->allowed_domains;
831 } else {
832 domain = bo->preferred_domains;
833 }
834 } else {
835 domain = bo->allowed_domains;
836 }
837
838 retry:
839 amdgpu_bo_placement_from_domain(bo, domain);
840 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
841
842 p->bytes_moved += ctx.bytes_moved;
843 if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
844 amdgpu_res_cpu_visible(adev, bo->tbo.resource))
845 p->bytes_moved_vis += ctx.bytes_moved;
846
847 if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
848 domain = bo->allowed_domains;
849 goto retry;
850 }
851
852 return r;
853 }
854
amdgpu_cs_parser_bos(struct amdgpu_cs_parser * p,union drm_amdgpu_cs * cs)855 static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
856 union drm_amdgpu_cs *cs)
857 {
858 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
859 struct ttm_operation_ctx ctx = { true, false };
860 struct amdgpu_vm *vm = &fpriv->vm;
861 struct amdgpu_bo_list_entry *e;
862 struct drm_gem_object *obj;
863 unsigned long index;
864 unsigned int i;
865 int r;
866
867 /* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */
868 if (cs->in.bo_list_handle) {
869 if (p->bo_list)
870 return -EINVAL;
871
872 r = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle,
873 &p->bo_list);
874 if (r)
875 return r;
876 } else if (!p->bo_list) {
877 /* Create a empty bo_list when no handle is provided */
878 r = amdgpu_bo_list_create(p->adev, p->filp, NULL, 0,
879 &p->bo_list);
880 if (r)
881 return r;
882 }
883
884 mutex_lock(&p->bo_list->bo_list_mutex);
885
886 /* Get userptr backing pages. If pages are updated after registered
887 * in amdgpu_gem_userptr_ioctl(), amdgpu_cs_list_validate() will do
888 * amdgpu_ttm_backend_bind() to flush and invalidate new pages
889 */
890 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
891 bool userpage_invalidated = false;
892 struct amdgpu_bo *bo = e->bo;
893
894 r = amdgpu_ttm_tt_get_user_pages(bo, &e->range);
895 if (r)
896 goto out_free_user_pages;
897
898 for (i = 0; i < bo->tbo.ttm->num_pages; i++) {
899 if (bo->tbo.ttm->pages[i] != hmm_pfn_to_page(e->range->hmm_pfns[i])) {
900 userpage_invalidated = true;
901 break;
902 }
903 }
904 e->user_invalidated = userpage_invalidated;
905 }
906
907 drm_exec_until_all_locked(&p->exec) {
908 r = amdgpu_vm_lock_pd(&fpriv->vm, &p->exec, 1 + p->gang_size);
909 drm_exec_retry_on_contention(&p->exec);
910 if (unlikely(r))
911 goto out_free_user_pages;
912
913 amdgpu_bo_list_for_each_entry(e, p->bo_list) {
914 /* One fence for TTM and one for each CS job */
915 r = drm_exec_prepare_obj(&p->exec, &e->bo->tbo.base,
916 1 + p->gang_size);
917 drm_exec_retry_on_contention(&p->exec);
918 if (unlikely(r))
919 goto out_free_user_pages;
920
921 e->bo_va = amdgpu_vm_bo_find(vm, e->bo);
922 }
923
924 if (p->uf_bo) {
925 r = drm_exec_prepare_obj(&p->exec, &p->uf_bo->tbo.base,
926 1 + p->gang_size);
927 drm_exec_retry_on_contention(&p->exec);
928 if (unlikely(r))
929 goto out_free_user_pages;
930 }
931 }
932
933 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
934 struct mm_struct *usermm;
935
936 usermm = amdgpu_ttm_tt_get_usermm(e->bo->tbo.ttm);
937 if (usermm && usermm != current->mm) {
938 r = -EPERM;
939 goto out_free_user_pages;
940 }
941
942 if (amdgpu_ttm_tt_is_userptr(e->bo->tbo.ttm) &&
943 e->user_invalidated) {
944 amdgpu_bo_placement_from_domain(e->bo,
945 AMDGPU_GEM_DOMAIN_CPU);
946 r = ttm_bo_validate(&e->bo->tbo, &e->bo->placement,
947 &ctx);
948 if (r)
949 goto out_free_user_pages;
950
951 amdgpu_ttm_tt_set_user_pages(e->bo->tbo.ttm,
952 e->range);
953 }
954 }
955
956 amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
957 &p->bytes_moved_vis_threshold);
958 p->bytes_moved = 0;
959 p->bytes_moved_vis = 0;
960
961 r = amdgpu_vm_validate(p->adev, &fpriv->vm, NULL,
962 amdgpu_cs_bo_validate, p);
963 if (r) {
964 drm_err(adev_to_drm(p->adev), "amdgpu_vm_validate() failed.\n");
965 goto out_free_user_pages;
966 }
967
968 drm_exec_for_each_locked_object(&p->exec, index, obj) {
969 r = amdgpu_cs_bo_validate(p, gem_to_amdgpu_bo(obj));
970 if (unlikely(r))
971 goto out_free_user_pages;
972 }
973
974 if (p->uf_bo) {
975 r = amdgpu_ttm_alloc_gart(&p->uf_bo->tbo);
976 if (unlikely(r))
977 goto out_free_user_pages;
978
979 p->gang_leader->uf_addr += amdgpu_bo_gpu_offset(p->uf_bo);
980 }
981
982 amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
983 p->bytes_moved_vis);
984
985 for (i = 0; i < p->gang_size; ++i)
986 amdgpu_job_set_resources(p->jobs[i], p->bo_list->gds_obj,
987 p->bo_list->gws_obj,
988 p->bo_list->oa_obj);
989 return 0;
990
991 out_free_user_pages:
992 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
993 struct amdgpu_bo *bo = e->bo;
994
995 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, e->range);
996 e->range = NULL;
997 }
998 mutex_unlock(&p->bo_list->bo_list_mutex);
999 return r;
1000 }
1001
trace_amdgpu_cs_ibs(struct amdgpu_cs_parser * p)1002 static void trace_amdgpu_cs_ibs(struct amdgpu_cs_parser *p)
1003 {
1004 int i, j;
1005
1006 if (!trace_amdgpu_cs_enabled())
1007 return;
1008
1009 for (i = 0; i < p->gang_size; ++i) {
1010 struct amdgpu_job *job = p->jobs[i];
1011
1012 for (j = 0; j < job->num_ibs; ++j)
1013 trace_amdgpu_cs(p, job, &job->ibs[j]);
1014 }
1015 }
1016
amdgpu_cs_patch_ibs(struct amdgpu_cs_parser * p,struct amdgpu_job * job)1017 static int amdgpu_cs_patch_ibs(struct amdgpu_cs_parser *p,
1018 struct amdgpu_job *job)
1019 {
1020 struct amdgpu_ring *ring = amdgpu_job_ring(job);
1021 unsigned int i;
1022 int r;
1023
1024 /* Only for UVD/VCE VM emulation */
1025 if (!ring->funcs->parse_cs && !ring->funcs->patch_cs_in_place)
1026 return 0;
1027
1028 for (i = 0; i < job->num_ibs; ++i) {
1029 struct amdgpu_ib *ib = &job->ibs[i];
1030 struct amdgpu_bo_va_mapping *m;
1031 struct amdgpu_bo *aobj;
1032 uint64_t va_start;
1033 uint8_t *kptr;
1034
1035 va_start = ib->gpu_addr & AMDGPU_GMC_HOLE_MASK;
1036 r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
1037 if (r) {
1038 drm_err(adev_to_drm(p->adev), "IB va_start is invalid\n");
1039 return r;
1040 }
1041
1042 if ((va_start + ib->length_dw * 4) >
1043 (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
1044 drm_err(adev_to_drm(p->adev), "IB va_start+ib_bytes is invalid\n");
1045 return -EINVAL;
1046 }
1047
1048 /* the IB should be reserved at this point */
1049 r = amdgpu_bo_kmap(aobj, (void **)&kptr);
1050 if (r)
1051 return r;
1052
1053 kptr += va_start - (m->start * AMDGPU_GPU_PAGE_SIZE);
1054
1055 if (ring->funcs->parse_cs) {
1056 memcpy(ib->ptr, kptr, ib->length_dw * 4);
1057 amdgpu_bo_kunmap(aobj);
1058
1059 r = amdgpu_ring_parse_cs(ring, p, job, ib);
1060 if (r)
1061 return r;
1062
1063 if (ib->sa_bo)
1064 ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
1065 } else {
1066 ib->ptr = (uint32_t *)kptr;
1067 r = amdgpu_ring_patch_cs_in_place(ring, p, job, ib);
1068 amdgpu_bo_kunmap(aobj);
1069 if (r)
1070 return r;
1071 }
1072 }
1073
1074 return 0;
1075 }
1076
amdgpu_cs_patch_jobs(struct amdgpu_cs_parser * p)1077 static int amdgpu_cs_patch_jobs(struct amdgpu_cs_parser *p)
1078 {
1079 unsigned int i;
1080 int r;
1081
1082 for (i = 0; i < p->gang_size; ++i) {
1083 r = amdgpu_cs_patch_ibs(p, p->jobs[i]);
1084 if (r)
1085 return r;
1086 }
1087 return 0;
1088 }
1089
amdgpu_cs_vm_handling(struct amdgpu_cs_parser * p)1090 static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
1091 {
1092 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1093 struct amdgpu_job *job = p->gang_leader;
1094 struct amdgpu_device *adev = p->adev;
1095 struct amdgpu_vm *vm = &fpriv->vm;
1096 struct amdgpu_bo_list_entry *e;
1097 struct amdgpu_bo_va *bo_va;
1098 unsigned int i;
1099 int r;
1100
1101 /*
1102 * We can't use gang submit on with reserved VMIDs when the VM changes
1103 * can't be invalidated by more than one engine at the same time.
1104 */
1105 if (p->gang_size > 1 && !adev->vm_manager.concurrent_flush) {
1106 for (i = 0; i < p->gang_size; ++i) {
1107 struct drm_sched_entity *entity = p->entities[i];
1108 struct drm_gpu_scheduler *sched = entity->rq->sched;
1109 struct amdgpu_ring *ring = to_amdgpu_ring(sched);
1110
1111 if (amdgpu_vmid_uses_reserved(vm, ring->vm_hub))
1112 return -EINVAL;
1113 }
1114 }
1115
1116 if (!amdgpu_vm_ready(vm))
1117 return -EINVAL;
1118
1119 r = amdgpu_vm_clear_freed(adev, vm, NULL);
1120 if (r)
1121 return r;
1122
1123 r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false);
1124 if (r)
1125 return r;
1126
1127 r = amdgpu_sync_fence(&p->sync, fpriv->prt_va->last_pt_update,
1128 GFP_KERNEL);
1129 if (r)
1130 return r;
1131
1132 if (fpriv->csa_va) {
1133 bo_va = fpriv->csa_va;
1134 BUG_ON(!bo_va);
1135 r = amdgpu_vm_bo_update(adev, bo_va, false);
1136 if (r)
1137 return r;
1138
1139 r = amdgpu_sync_fence(&p->sync, bo_va->last_pt_update,
1140 GFP_KERNEL);
1141 if (r)
1142 return r;
1143 }
1144
1145 /* FIXME: In theory this loop shouldn't be needed any more when
1146 * amdgpu_vm_handle_moved handles all moved BOs that are reserved
1147 * with p->ticket. But removing it caused test regressions, so I'm
1148 * leaving it here for now.
1149 */
1150 amdgpu_bo_list_for_each_entry(e, p->bo_list) {
1151 bo_va = e->bo_va;
1152 if (bo_va == NULL)
1153 continue;
1154
1155 r = amdgpu_vm_bo_update(adev, bo_va, false);
1156 if (r)
1157 return r;
1158
1159 r = amdgpu_sync_fence(&p->sync, bo_va->last_pt_update,
1160 GFP_KERNEL);
1161 if (r)
1162 return r;
1163 }
1164
1165 r = amdgpu_vm_handle_moved(adev, vm, &p->exec.ticket);
1166 if (r)
1167 return r;
1168
1169 r = amdgpu_vm_update_pdes(adev, vm, false);
1170 if (r)
1171 return r;
1172
1173 r = amdgpu_sync_fence(&p->sync, vm->last_update, GFP_KERNEL);
1174 if (r)
1175 return r;
1176
1177 for (i = 0; i < p->gang_size; ++i) {
1178 job = p->jobs[i];
1179
1180 if (!job->vm)
1181 continue;
1182
1183 job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.bo);
1184 }
1185
1186 if (adev->debug_vm) {
1187 /* Invalidate all BOs to test for userspace bugs */
1188 amdgpu_bo_list_for_each_entry(e, p->bo_list) {
1189 struct amdgpu_bo *bo = e->bo;
1190
1191 /* ignore duplicates */
1192 if (!bo)
1193 continue;
1194
1195 amdgpu_vm_bo_invalidate(bo, false);
1196 }
1197 }
1198
1199 return 0;
1200 }
1201
amdgpu_cs_sync_rings(struct amdgpu_cs_parser * p)1202 static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
1203 {
1204 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1205 struct drm_gpu_scheduler *sched;
1206 struct drm_gem_object *obj;
1207 struct dma_fence *fence;
1208 unsigned long index;
1209 unsigned int i;
1210 int r;
1211
1212 r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_leader_idx]);
1213 if (r) {
1214 if (r != -ERESTARTSYS)
1215 drm_err(adev_to_drm(p->adev), "amdgpu_ctx_wait_prev_fence failed.\n");
1216 return r;
1217 }
1218
1219 drm_exec_for_each_locked_object(&p->exec, index, obj) {
1220 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
1221
1222 struct dma_resv *resv = bo->tbo.base.resv;
1223 enum amdgpu_sync_mode sync_mode;
1224
1225 sync_mode = amdgpu_bo_explicit_sync(bo) ?
1226 AMDGPU_SYNC_EXPLICIT : AMDGPU_SYNC_NE_OWNER;
1227 r = amdgpu_sync_resv(p->adev, &p->sync, resv, sync_mode,
1228 &fpriv->vm);
1229 if (r)
1230 return r;
1231 }
1232
1233 for (i = 0; i < p->gang_size; ++i) {
1234 r = amdgpu_sync_push_to_job(&p->sync, p->jobs[i]);
1235 if (r)
1236 return r;
1237 }
1238
1239 sched = p->gang_leader->base.entity->rq->sched;
1240 while ((fence = amdgpu_sync_get_fence(&p->sync))) {
1241 struct drm_sched_fence *s_fence = to_drm_sched_fence(fence);
1242
1243 /*
1244 * When we have an dependency it might be necessary to insert a
1245 * pipeline sync to make sure that all caches etc are flushed and the
1246 * next job actually sees the results from the previous one
1247 * before we start executing on the same scheduler ring.
1248 */
1249 if (!s_fence || s_fence->sched != sched) {
1250 dma_fence_put(fence);
1251 continue;
1252 }
1253
1254 r = amdgpu_sync_fence(&p->gang_leader->explicit_sync, fence,
1255 GFP_KERNEL);
1256 dma_fence_put(fence);
1257 if (r)
1258 return r;
1259 }
1260 return 0;
1261 }
1262
amdgpu_cs_post_dependencies(struct amdgpu_cs_parser * p)1263 static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
1264 {
1265 int i;
1266
1267 for (i = 0; i < p->num_post_deps; ++i) {
1268 if (p->post_deps[i].chain && p->post_deps[i].point) {
1269 drm_syncobj_add_point(p->post_deps[i].syncobj,
1270 p->post_deps[i].chain,
1271 p->fence, p->post_deps[i].point);
1272 p->post_deps[i].chain = NULL;
1273 } else {
1274 drm_syncobj_replace_fence(p->post_deps[i].syncobj,
1275 p->fence);
1276 }
1277 }
1278 }
1279
amdgpu_cs_submit(struct amdgpu_cs_parser * p,union drm_amdgpu_cs * cs)1280 static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
1281 union drm_amdgpu_cs *cs)
1282 {
1283 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1284 struct amdgpu_job *leader = p->gang_leader;
1285 struct amdgpu_bo_list_entry *e;
1286 struct drm_gem_object *gobj;
1287 unsigned long index;
1288 unsigned int i;
1289 uint64_t seq;
1290 int r;
1291
1292 for (i = 0; i < p->gang_size; ++i)
1293 drm_sched_job_arm(&p->jobs[i]->base);
1294
1295 for (i = 0; i < p->gang_size; ++i) {
1296 struct dma_fence *fence;
1297
1298 if (p->jobs[i] == leader)
1299 continue;
1300
1301 fence = &p->jobs[i]->base.s_fence->scheduled;
1302 dma_fence_get(fence);
1303 r = drm_sched_job_add_dependency(&leader->base, fence);
1304 if (r) {
1305 dma_fence_put(fence);
1306 return r;
1307 }
1308 }
1309
1310 if (p->gang_size > 1) {
1311 for (i = 0; i < p->gang_size; ++i)
1312 amdgpu_job_set_gang_leader(p->jobs[i], leader);
1313 }
1314
1315 /* No memory allocation is allowed while holding the notifier lock.
1316 * The lock is held until amdgpu_cs_submit is finished and fence is
1317 * added to BOs.
1318 */
1319 mutex_lock(&p->adev->notifier_lock);
1320
1321 /* If userptr are invalidated after amdgpu_cs_parser_bos(), return
1322 * -EAGAIN, drmIoctl in libdrm will restart the amdgpu_cs_ioctl.
1323 */
1324 r = 0;
1325 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
1326 r |= !amdgpu_ttm_tt_get_user_pages_done(e->bo->tbo.ttm,
1327 e->range);
1328 e->range = NULL;
1329 }
1330 if (r) {
1331 r = -EAGAIN;
1332 mutex_unlock(&p->adev->notifier_lock);
1333 return r;
1334 }
1335
1336 p->fence = dma_fence_get(&leader->base.s_fence->finished);
1337 drm_exec_for_each_locked_object(&p->exec, index, gobj) {
1338
1339 ttm_bo_move_to_lru_tail_unlocked(&gem_to_amdgpu_bo(gobj)->tbo);
1340
1341 /* Everybody except for the gang leader uses READ */
1342 for (i = 0; i < p->gang_size; ++i) {
1343 if (p->jobs[i] == leader)
1344 continue;
1345
1346 dma_resv_add_fence(gobj->resv,
1347 &p->jobs[i]->base.s_fence->finished,
1348 DMA_RESV_USAGE_READ);
1349 }
1350
1351 /* The gang leader as remembered as writer */
1352 dma_resv_add_fence(gobj->resv, p->fence, DMA_RESV_USAGE_WRITE);
1353 }
1354
1355 seq = amdgpu_ctx_add_fence(p->ctx, p->entities[p->gang_leader_idx],
1356 p->fence);
1357 amdgpu_cs_post_dependencies(p);
1358
1359 if ((leader->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) &&
1360 !p->ctx->preamble_presented) {
1361 leader->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
1362 p->ctx->preamble_presented = true;
1363 }
1364
1365 cs->out.handle = seq;
1366 leader->uf_sequence = seq;
1367
1368 amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->exec.ticket);
1369 for (i = 0; i < p->gang_size; ++i) {
1370 amdgpu_job_free_resources(p->jobs[i]);
1371 trace_amdgpu_cs_ioctl(p->jobs[i]);
1372 drm_sched_entity_push_job(&p->jobs[i]->base);
1373 p->jobs[i] = NULL;
1374 }
1375
1376 amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
1377
1378 mutex_unlock(&p->adev->notifier_lock);
1379 mutex_unlock(&p->bo_list->bo_list_mutex);
1380 return 0;
1381 }
1382
1383 /* Cleanup the parser structure */
amdgpu_cs_parser_fini(struct amdgpu_cs_parser * parser)1384 static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser)
1385 {
1386 unsigned int i;
1387
1388 amdgpu_sync_free(&parser->sync);
1389 drm_exec_fini(&parser->exec);
1390
1391 for (i = 0; i < parser->num_post_deps; i++) {
1392 drm_syncobj_put(parser->post_deps[i].syncobj);
1393 kfree(parser->post_deps[i].chain);
1394 }
1395 kfree(parser->post_deps);
1396
1397 dma_fence_put(parser->fence);
1398
1399 if (parser->ctx)
1400 amdgpu_ctx_put(parser->ctx);
1401 if (parser->bo_list)
1402 amdgpu_bo_list_put(parser->bo_list);
1403
1404 for (i = 0; i < parser->nchunks; i++)
1405 kvfree(parser->chunks[i].kdata);
1406 kvfree(parser->chunks);
1407 for (i = 0; i < parser->gang_size; ++i) {
1408 if (parser->jobs[i])
1409 amdgpu_job_free(parser->jobs[i]);
1410 }
1411 amdgpu_bo_unref(&parser->uf_bo);
1412 }
1413
amdgpu_cs_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)1414 int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
1415 {
1416 struct amdgpu_device *adev = drm_to_adev(dev);
1417 struct amdgpu_cs_parser parser;
1418 int r;
1419
1420 if (amdgpu_ras_intr_triggered())
1421 return -EHWPOISON;
1422
1423 if (!adev->accel_working)
1424 return -EBUSY;
1425
1426 r = amdgpu_cs_parser_init(&parser, adev, filp, data);
1427 if (r) {
1428 drm_err_ratelimited(dev, "Failed to initialize parser %d!\n", r);
1429 return r;
1430 }
1431
1432 r = amdgpu_cs_pass1(&parser, data);
1433 if (r)
1434 goto error_fini;
1435
1436 r = amdgpu_cs_pass2(&parser);
1437 if (r)
1438 goto error_fini;
1439
1440 r = amdgpu_cs_parser_bos(&parser, data);
1441 if (r) {
1442 if (r == -ENOMEM)
1443 drm_err(dev, "Not enough memory for command submission!\n");
1444 else if (r != -ERESTARTSYS && r != -EAGAIN)
1445 drm_dbg(dev, "Failed to process the buffer list %d!\n", r);
1446 goto error_fini;
1447 }
1448
1449 r = amdgpu_cs_patch_jobs(&parser);
1450 if (r)
1451 goto error_backoff;
1452
1453 r = amdgpu_cs_vm_handling(&parser);
1454 if (r)
1455 goto error_backoff;
1456
1457 r = amdgpu_cs_sync_rings(&parser);
1458 if (r)
1459 goto error_backoff;
1460
1461 trace_amdgpu_cs_ibs(&parser);
1462
1463 r = amdgpu_cs_submit(&parser, data);
1464 if (r)
1465 goto error_backoff;
1466
1467 amdgpu_cs_parser_fini(&parser);
1468 return 0;
1469
1470 error_backoff:
1471 mutex_unlock(&parser.bo_list->bo_list_mutex);
1472
1473 error_fini:
1474 amdgpu_cs_parser_fini(&parser);
1475 return r;
1476 }
1477
1478 /**
1479 * amdgpu_cs_wait_ioctl - wait for a command submission to finish
1480 *
1481 * @dev: drm device
1482 * @data: data from userspace
1483 * @filp: file private
1484 *
1485 * Wait for the command submission identified by handle to finish.
1486 */
amdgpu_cs_wait_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)1487 int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
1488 struct drm_file *filp)
1489 {
1490 union drm_amdgpu_wait_cs *wait = data;
1491 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
1492 struct drm_sched_entity *entity;
1493 struct amdgpu_ctx *ctx;
1494 struct dma_fence *fence;
1495 long r;
1496
1497 ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
1498 if (ctx == NULL)
1499 return -EINVAL;
1500
1501 r = amdgpu_ctx_get_entity(ctx, wait->in.ip_type, wait->in.ip_instance,
1502 wait->in.ring, &entity);
1503 if (r) {
1504 amdgpu_ctx_put(ctx);
1505 return r;
1506 }
1507
1508 fence = amdgpu_ctx_get_fence(ctx, entity, wait->in.handle);
1509 if (IS_ERR(fence))
1510 r = PTR_ERR(fence);
1511 else if (fence) {
1512 r = dma_fence_wait_timeout(fence, true, timeout);
1513 if (r > 0 && fence->error)
1514 r = fence->error;
1515 dma_fence_put(fence);
1516 } else
1517 r = 1;
1518
1519 amdgpu_ctx_put(ctx);
1520 if (r < 0)
1521 return r;
1522
1523 memset(wait, 0, sizeof(*wait));
1524 wait->out.status = (r == 0);
1525
1526 return 0;
1527 }
1528
1529 /**
1530 * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence
1531 *
1532 * @adev: amdgpu device
1533 * @filp: file private
1534 * @user: drm_amdgpu_fence copied from user space
1535 */
amdgpu_cs_get_fence(struct amdgpu_device * adev,struct drm_file * filp,struct drm_amdgpu_fence * user)1536 static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
1537 struct drm_file *filp,
1538 struct drm_amdgpu_fence *user)
1539 {
1540 struct drm_sched_entity *entity;
1541 struct amdgpu_ctx *ctx;
1542 struct dma_fence *fence;
1543 int r;
1544
1545 ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id);
1546 if (ctx == NULL)
1547 return ERR_PTR(-EINVAL);
1548
1549 r = amdgpu_ctx_get_entity(ctx, user->ip_type, user->ip_instance,
1550 user->ring, &entity);
1551 if (r) {
1552 amdgpu_ctx_put(ctx);
1553 return ERR_PTR(r);
1554 }
1555
1556 fence = amdgpu_ctx_get_fence(ctx, entity, user->seq_no);
1557 amdgpu_ctx_put(ctx);
1558
1559 return fence;
1560 }
1561
amdgpu_cs_fence_to_handle_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)1562 int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
1563 struct drm_file *filp)
1564 {
1565 struct amdgpu_device *adev = drm_to_adev(dev);
1566 union drm_amdgpu_fence_to_handle *info = data;
1567 struct dma_fence *fence;
1568 struct drm_syncobj *syncobj;
1569 struct sync_file *sync_file;
1570 int fd, r;
1571
1572 fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence);
1573 if (IS_ERR(fence))
1574 return PTR_ERR(fence);
1575
1576 if (!fence)
1577 fence = dma_fence_get_stub();
1578
1579 switch (info->in.what) {
1580 case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ:
1581 r = drm_syncobj_create(&syncobj, 0, fence);
1582 dma_fence_put(fence);
1583 if (r)
1584 return r;
1585 r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle);
1586 drm_syncobj_put(syncobj);
1587 return r;
1588
1589 case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD:
1590 r = drm_syncobj_create(&syncobj, 0, fence);
1591 dma_fence_put(fence);
1592 if (r)
1593 return r;
1594 r = drm_syncobj_get_fd(syncobj, (int *)&info->out.handle);
1595 drm_syncobj_put(syncobj);
1596 return r;
1597
1598 case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD:
1599 fd = get_unused_fd_flags(O_CLOEXEC);
1600 if (fd < 0) {
1601 dma_fence_put(fence);
1602 return fd;
1603 }
1604
1605 sync_file = sync_file_create(fence);
1606 dma_fence_put(fence);
1607 if (!sync_file) {
1608 put_unused_fd(fd);
1609 return -ENOMEM;
1610 }
1611
1612 fd_install(fd, sync_file->file);
1613 info->out.handle = fd;
1614 return 0;
1615
1616 default:
1617 dma_fence_put(fence);
1618 return -EINVAL;
1619 }
1620 }
1621
1622 /**
1623 * amdgpu_cs_wait_all_fences - wait on all fences to signal
1624 *
1625 * @adev: amdgpu device
1626 * @filp: file private
1627 * @wait: wait parameters
1628 * @fences: array of drm_amdgpu_fence
1629 */
amdgpu_cs_wait_all_fences(struct amdgpu_device * adev,struct drm_file * filp,union drm_amdgpu_wait_fences * wait,struct drm_amdgpu_fence * fences)1630 static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
1631 struct drm_file *filp,
1632 union drm_amdgpu_wait_fences *wait,
1633 struct drm_amdgpu_fence *fences)
1634 {
1635 uint32_t fence_count = wait->in.fence_count;
1636 unsigned int i;
1637 long r = 1;
1638
1639 for (i = 0; i < fence_count; i++) {
1640 struct dma_fence *fence;
1641 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1642
1643 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1644 if (IS_ERR(fence))
1645 return PTR_ERR(fence);
1646 else if (!fence)
1647 continue;
1648
1649 r = dma_fence_wait_timeout(fence, true, timeout);
1650 if (r > 0 && fence->error)
1651 r = fence->error;
1652
1653 dma_fence_put(fence);
1654 if (r < 0)
1655 return r;
1656
1657 if (r == 0)
1658 break;
1659 }
1660
1661 memset(wait, 0, sizeof(*wait));
1662 wait->out.status = (r > 0);
1663
1664 return 0;
1665 }
1666
1667 /**
1668 * amdgpu_cs_wait_any_fence - wait on any fence to signal
1669 *
1670 * @adev: amdgpu device
1671 * @filp: file private
1672 * @wait: wait parameters
1673 * @fences: array of drm_amdgpu_fence
1674 */
amdgpu_cs_wait_any_fence(struct amdgpu_device * adev,struct drm_file * filp,union drm_amdgpu_wait_fences * wait,struct drm_amdgpu_fence * fences)1675 static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
1676 struct drm_file *filp,
1677 union drm_amdgpu_wait_fences *wait,
1678 struct drm_amdgpu_fence *fences)
1679 {
1680 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1681 uint32_t fence_count = wait->in.fence_count;
1682 uint32_t first = ~0;
1683 struct dma_fence **array;
1684 unsigned int i;
1685 long r;
1686
1687 /* Prepare the fence array */
1688 array = kcalloc(fence_count, sizeof(struct dma_fence *), GFP_KERNEL);
1689
1690 if (array == NULL)
1691 return -ENOMEM;
1692
1693 for (i = 0; i < fence_count; i++) {
1694 struct dma_fence *fence;
1695
1696 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1697 if (IS_ERR(fence)) {
1698 r = PTR_ERR(fence);
1699 goto err_free_fence_array;
1700 } else if (fence) {
1701 array[i] = fence;
1702 } else { /* NULL, the fence has been already signaled */
1703 r = 1;
1704 first = i;
1705 goto out;
1706 }
1707 }
1708
1709 r = dma_fence_wait_any_timeout(array, fence_count, true, timeout,
1710 &first);
1711 if (r < 0)
1712 goto err_free_fence_array;
1713
1714 out:
1715 memset(wait, 0, sizeof(*wait));
1716 wait->out.status = (r > 0);
1717 wait->out.first_signaled = first;
1718
1719 if (first < fence_count && array[first])
1720 r = array[first]->error;
1721 else
1722 r = 0;
1723
1724 err_free_fence_array:
1725 for (i = 0; i < fence_count; i++)
1726 dma_fence_put(array[i]);
1727 kfree(array);
1728
1729 return r;
1730 }
1731
1732 /**
1733 * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish
1734 *
1735 * @dev: drm device
1736 * @data: data from userspace
1737 * @filp: file private
1738 */
amdgpu_cs_wait_fences_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)1739 int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
1740 struct drm_file *filp)
1741 {
1742 struct amdgpu_device *adev = drm_to_adev(dev);
1743 union drm_amdgpu_wait_fences *wait = data;
1744 struct drm_amdgpu_fence *fences;
1745 int r;
1746
1747 /* Get the fences from userspace */
1748 fences = memdup_array_user(u64_to_user_ptr(wait->in.fences),
1749 wait->in.fence_count,
1750 sizeof(struct drm_amdgpu_fence));
1751 if (IS_ERR(fences))
1752 return PTR_ERR(fences);
1753
1754 if (wait->in.wait_all)
1755 r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences);
1756 else
1757 r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences);
1758
1759 kfree(fences);
1760
1761 return r;
1762 }
1763
1764 /**
1765 * amdgpu_cs_find_mapping - find bo_va for VM address
1766 *
1767 * @parser: command submission parser context
1768 * @addr: VM address
1769 * @bo: resulting BO of the mapping found
1770 * @map: Placeholder to return found BO mapping
1771 *
1772 * Search the buffer objects in the command submission context for a certain
1773 * virtual memory address. Returns allocation structure when found, NULL
1774 * otherwise.
1775 */
amdgpu_cs_find_mapping(struct amdgpu_cs_parser * parser,uint64_t addr,struct amdgpu_bo ** bo,struct amdgpu_bo_va_mapping ** map)1776 int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
1777 uint64_t addr, struct amdgpu_bo **bo,
1778 struct amdgpu_bo_va_mapping **map)
1779 {
1780 struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
1781 struct ttm_operation_ctx ctx = { false, false };
1782 struct amdgpu_vm *vm = &fpriv->vm;
1783 struct amdgpu_bo_va_mapping *mapping;
1784 int i, r;
1785
1786 addr /= AMDGPU_GPU_PAGE_SIZE;
1787
1788 mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
1789 if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo)
1790 return -EINVAL;
1791
1792 *bo = mapping->bo_va->base.bo;
1793 *map = mapping;
1794
1795 /* Double check that the BO is reserved by this CS */
1796 if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->exec.ticket)
1797 return -EINVAL;
1798
1799 /* Make sure VRAM is allocated contigiously */
1800 (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1801 if ((*bo)->tbo.resource->mem_type == TTM_PL_VRAM &&
1802 !((*bo)->tbo.resource->placement & TTM_PL_FLAG_CONTIGUOUS)) {
1803
1804 amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
1805 for (i = 0; i < (*bo)->placement.num_placement; i++)
1806 (*bo)->placements[i].flags |= TTM_PL_FLAG_CONTIGUOUS;
1807 r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
1808 if (r)
1809 return r;
1810 }
1811
1812 return amdgpu_ttm_alloc_gart(&(*bo)->tbo);
1813 }
1814