1 /* 2 * Copyright 2008 Jerome Glisse. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice (including the next 13 * paragraph) shall be included in all copies or substantial portions of the 14 * Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22 * DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: 25 * Jerome Glisse <glisse@freedesktop.org> 26 */ 27 28 #include <linux/file.h> 29 #include <linux/pagemap.h> 30 #include <linux/sync_file.h> 31 #include <linux/dma-buf.h> 32 33 #include <drm/amdgpu_drm.h> 34 #include <drm/drm_syncobj.h> 35 #include <drm/ttm/ttm_tt.h> 36 37 #include "amdgpu_cs.h" 38 #include "amdgpu.h" 39 #include "amdgpu_trace.h" 40 #include "amdgpu_gmc.h" 41 #include "amdgpu_gem.h" 42 #include "amdgpu_ras.h" 43 #include "amdgpu_hmm.h" 44 45 static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, 46 struct amdgpu_device *adev, 47 struct drm_file *filp, 48 union drm_amdgpu_cs *cs) 49 { 50 struct amdgpu_fpriv *fpriv = filp->driver_priv; 51 52 if (cs->in.num_chunks == 0) 53 return -EINVAL; 54 55 memset(p, 0, sizeof(*p)); 56 p->adev = adev; 57 p->filp = filp; 58 59 p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id); 60 if (!p->ctx) 61 return -EINVAL; 62 63 if (atomic_read(&p->ctx->guilty)) { 64 amdgpu_ctx_put(p->ctx); 65 return -ECANCELED; 66 } 67 68 amdgpu_sync_create(&p->sync); 69 drm_exec_init(&p->exec, DRM_EXEC_INTERRUPTIBLE_WAIT | 70 DRM_EXEC_IGNORE_DUPLICATES, 0); 71 return 0; 72 } 73 74 static int amdgpu_cs_job_idx(struct amdgpu_cs_parser *p, 75 struct drm_amdgpu_cs_chunk_ib *chunk_ib) 76 { 77 struct drm_sched_entity *entity; 78 unsigned int i; 79 int r; 80 81 r = amdgpu_ctx_get_entity(p->ctx, chunk_ib->ip_type, 82 chunk_ib->ip_instance, 83 chunk_ib->ring, &entity); 84 if (r) 85 return r; 86 87 /* 88 * Abort if there is no run queue associated with this entity. 89 * Possibly because of disabled HW IP. 90 */ 91 if (entity->rq == NULL) 92 return -EINVAL; 93 94 /* Check if we can add this IB to some existing job */ 95 for (i = 0; i < p->gang_size; ++i) 96 if (p->entities[i] == entity) 97 return i; 98 99 /* If not increase the gang size if possible */ 100 if (i == AMDGPU_CS_GANG_SIZE) 101 return -EINVAL; 102 103 p->entities[i] = entity; 104 p->gang_size = i + 1; 105 return i; 106 } 107 108 static int amdgpu_cs_p1_ib(struct amdgpu_cs_parser *p, 109 struct drm_amdgpu_cs_chunk_ib *chunk_ib, 110 unsigned int *num_ibs) 111 { 112 int r; 113 114 r = amdgpu_cs_job_idx(p, chunk_ib); 115 if (r < 0) 116 return r; 117 118 if (num_ibs[r] >= amdgpu_ring_max_ibs(chunk_ib->ip_type)) 119 return -EINVAL; 120 121 ++(num_ibs[r]); 122 p->gang_leader_idx = r; 123 return 0; 124 } 125 126 static int amdgpu_cs_p1_user_fence(struct amdgpu_cs_parser *p, 127 struct drm_amdgpu_cs_chunk_fence *data, 128 uint32_t *offset) 129 { 130 struct drm_gem_object *gobj; 131 unsigned long size; 132 133 gobj = drm_gem_object_lookup(p->filp, data->handle); 134 if (gobj == NULL) 135 return -EINVAL; 136 137 p->uf_bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); 138 drm_gem_object_put(gobj); 139 140 size = amdgpu_bo_size(p->uf_bo); 141 if (size != PAGE_SIZE || data->offset > (size - 8)) 142 return -EINVAL; 143 144 if (amdgpu_ttm_tt_get_usermm(p->uf_bo->tbo.ttm)) 145 return -EINVAL; 146 147 *offset = data->offset; 148 return 0; 149 } 150 151 static int amdgpu_cs_p1_bo_handles(struct amdgpu_cs_parser *p, 152 struct drm_amdgpu_bo_list_in *data) 153 { 154 struct drm_amdgpu_bo_list_entry *info; 155 int r; 156 157 r = amdgpu_bo_create_list_entry_array(data, &info); 158 if (r) 159 return r; 160 161 r = amdgpu_bo_list_create(p->adev, p->filp, info, data->bo_number, 162 &p->bo_list); 163 if (r) 164 goto error_free; 165 166 kvfree(info); 167 return 0; 168 169 error_free: 170 kvfree(info); 171 172 return r; 173 } 174 175 /* Copy the data from userspace and go over it the first time */ 176 static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p, 177 union drm_amdgpu_cs *cs) 178 { 179 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 180 unsigned int num_ibs[AMDGPU_CS_GANG_SIZE] = { }; 181 struct amdgpu_vm *vm = &fpriv->vm; 182 uint64_t *chunk_array; 183 uint32_t uf_offset = 0; 184 size_t size; 185 int ret; 186 int i; 187 188 chunk_array = memdup_array_user(u64_to_user_ptr(cs->in.chunks), 189 cs->in.num_chunks, 190 sizeof(uint64_t)); 191 if (IS_ERR(chunk_array)) 192 return PTR_ERR(chunk_array); 193 194 p->nchunks = cs->in.num_chunks; 195 p->chunks = kvmalloc_objs(struct amdgpu_cs_chunk, p->nchunks); 196 if (!p->chunks) { 197 ret = -ENOMEM; 198 goto free_chunk; 199 } 200 201 for (i = 0; i < p->nchunks; i++) { 202 struct drm_amdgpu_cs_chunk __user *chunk_ptr = NULL; 203 struct drm_amdgpu_cs_chunk user_chunk; 204 205 chunk_ptr = u64_to_user_ptr(chunk_array[i]); 206 if (copy_from_user(&user_chunk, chunk_ptr, 207 sizeof(struct drm_amdgpu_cs_chunk))) { 208 ret = -EFAULT; 209 i--; 210 goto free_partial_kdata; 211 } 212 p->chunks[i].chunk_id = user_chunk.chunk_id; 213 p->chunks[i].length_dw = user_chunk.length_dw; 214 215 size = p->chunks[i].length_dw; 216 217 p->chunks[i].kdata = vmemdup_array_user(u64_to_user_ptr(user_chunk.chunk_data), 218 size, 219 sizeof(uint32_t)); 220 if (IS_ERR(p->chunks[i].kdata)) { 221 ret = PTR_ERR(p->chunks[i].kdata); 222 i--; 223 goto free_partial_kdata; 224 } 225 size *= sizeof(uint32_t); 226 227 /* Assume the worst on the following checks */ 228 ret = -EINVAL; 229 switch (p->chunks[i].chunk_id) { 230 case AMDGPU_CHUNK_ID_IB: 231 if (size < sizeof(struct drm_amdgpu_cs_chunk_ib)) 232 goto free_partial_kdata; 233 234 ret = amdgpu_cs_p1_ib(p, p->chunks[i].kdata, num_ibs); 235 if (ret) 236 goto free_partial_kdata; 237 break; 238 239 case AMDGPU_CHUNK_ID_FENCE: 240 if (size < sizeof(struct drm_amdgpu_cs_chunk_fence)) 241 goto free_partial_kdata; 242 243 ret = amdgpu_cs_p1_user_fence(p, p->chunks[i].kdata, 244 &uf_offset); 245 if (ret) 246 goto free_partial_kdata; 247 break; 248 249 case AMDGPU_CHUNK_ID_BO_HANDLES: 250 if (size < sizeof(struct drm_amdgpu_bo_list_in)) 251 goto free_partial_kdata; 252 253 /* Only a single BO list is allowed to simplify handling. */ 254 if (p->bo_list) 255 goto free_partial_kdata; 256 257 ret = amdgpu_cs_p1_bo_handles(p, p->chunks[i].kdata); 258 if (ret) 259 goto free_partial_kdata; 260 break; 261 262 case AMDGPU_CHUNK_ID_DEPENDENCIES: 263 case AMDGPU_CHUNK_ID_SYNCOBJ_IN: 264 case AMDGPU_CHUNK_ID_SYNCOBJ_OUT: 265 case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES: 266 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT: 267 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL: 268 case AMDGPU_CHUNK_ID_CP_GFX_SHADOW: 269 break; 270 271 default: 272 goto free_partial_kdata; 273 } 274 } 275 276 if (!p->gang_size || (amdgpu_sriov_vf(p->adev) && p->gang_size > 1)) { 277 ret = -EINVAL; 278 goto free_all_kdata; 279 } 280 281 for (i = 0; i < p->gang_size; ++i) { 282 ret = amdgpu_job_alloc(p->adev, vm, p->entities[i], vm, 283 num_ibs[i], &p->jobs[i], 284 p->filp->client_id); 285 if (ret) 286 goto free_all_kdata; 287 switch (p->adev->enforce_isolation[fpriv->xcp_id]) { 288 case AMDGPU_ENFORCE_ISOLATION_DISABLE: 289 default: 290 p->jobs[i]->enforce_isolation = false; 291 p->jobs[i]->run_cleaner_shader = false; 292 break; 293 case AMDGPU_ENFORCE_ISOLATION_ENABLE: 294 p->jobs[i]->enforce_isolation = true; 295 p->jobs[i]->run_cleaner_shader = true; 296 break; 297 case AMDGPU_ENFORCE_ISOLATION_ENABLE_LEGACY: 298 p->jobs[i]->enforce_isolation = true; 299 p->jobs[i]->run_cleaner_shader = false; 300 break; 301 case AMDGPU_ENFORCE_ISOLATION_NO_CLEANER_SHADER: 302 p->jobs[i]->enforce_isolation = true; 303 p->jobs[i]->run_cleaner_shader = false; 304 break; 305 } 306 } 307 p->gang_leader = p->jobs[p->gang_leader_idx]; 308 309 if (p->ctx->generation != p->gang_leader->generation) { 310 ret = -ECANCELED; 311 goto free_all_kdata; 312 } 313 314 if (p->uf_bo) 315 p->gang_leader->uf_addr = uf_offset; 316 kvfree(chunk_array); 317 318 /* Use this opportunity to fill in task info for the vm */ 319 amdgpu_vm_set_task_info(vm); 320 321 return 0; 322 323 free_all_kdata: 324 i = p->nchunks - 1; 325 free_partial_kdata: 326 for (; i >= 0; i--) 327 kvfree(p->chunks[i].kdata); 328 kvfree(p->chunks); 329 p->chunks = NULL; 330 p->nchunks = 0; 331 free_chunk: 332 kvfree(chunk_array); 333 334 return ret; 335 } 336 337 static int amdgpu_cs_p2_ib(struct amdgpu_cs_parser *p, 338 struct amdgpu_cs_chunk *chunk, 339 unsigned int *ce_preempt, 340 unsigned int *de_preempt) 341 { 342 struct drm_amdgpu_cs_chunk_ib *chunk_ib = chunk->kdata; 343 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 344 struct amdgpu_vm *vm = &fpriv->vm; 345 struct amdgpu_ring *ring; 346 struct amdgpu_job *job; 347 struct amdgpu_ib *ib; 348 int r; 349 350 r = amdgpu_cs_job_idx(p, chunk_ib); 351 if (r < 0) 352 return r; 353 354 job = p->jobs[r]; 355 ring = amdgpu_job_ring(job); 356 ib = &job->ibs[job->num_ibs++]; 357 358 /* submissions to kernel queues are disabled */ 359 if (ring->no_user_submission) 360 return -EINVAL; 361 362 /* MM engine doesn't support user fences */ 363 if (p->uf_bo && ring->funcs->no_user_fence) 364 return -EINVAL; 365 366 if (!p->adev->debug_enable_ce_cs && 367 chunk_ib->flags & AMDGPU_IB_FLAG_CE) { 368 dev_err_ratelimited(p->adev->dev, "CE CS is blocked, use debug=0x400 to override\n"); 369 return -EINVAL; 370 } 371 372 if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX && 373 chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) { 374 if (chunk_ib->flags & AMDGPU_IB_FLAG_CE) 375 (*ce_preempt)++; 376 else 377 (*de_preempt)++; 378 379 /* Each GFX command submit allows only 1 IB max 380 * preemptible for CE & DE */ 381 if (*ce_preempt > 1 || *de_preempt > 1) 382 return -EINVAL; 383 } 384 385 if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE) 386 job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT; 387 388 r = amdgpu_ib_get(p->adev, vm, ring->funcs->parse_cs ? 389 chunk_ib->ib_bytes : 0, 390 AMDGPU_IB_POOL_DELAYED, ib); 391 if (r) { 392 drm_err(adev_to_drm(p->adev), "Failed to get ib !\n"); 393 return r; 394 } 395 396 ib->gpu_addr = chunk_ib->va_start; 397 ib->length_dw = chunk_ib->ib_bytes / 4; 398 ib->flags = chunk_ib->flags; 399 return 0; 400 } 401 402 static int amdgpu_cs_p2_dependencies(struct amdgpu_cs_parser *p, 403 struct amdgpu_cs_chunk *chunk) 404 { 405 struct drm_amdgpu_cs_chunk_dep *deps = chunk->kdata; 406 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 407 unsigned int num_deps; 408 int i, r; 409 410 num_deps = chunk->length_dw * 4 / 411 sizeof(struct drm_amdgpu_cs_chunk_dep); 412 413 for (i = 0; i < num_deps; ++i) { 414 struct amdgpu_ctx *ctx; 415 struct drm_sched_entity *entity; 416 struct dma_fence *fence; 417 418 ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id); 419 if (ctx == NULL) 420 return -EINVAL; 421 422 r = amdgpu_ctx_get_entity(ctx, deps[i].ip_type, 423 deps[i].ip_instance, 424 deps[i].ring, &entity); 425 if (r) { 426 amdgpu_ctx_put(ctx); 427 return r; 428 } 429 430 fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle); 431 amdgpu_ctx_put(ctx); 432 433 if (IS_ERR(fence)) 434 return PTR_ERR(fence); 435 else if (!fence) 436 continue; 437 438 if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) { 439 struct drm_sched_fence *s_fence; 440 struct dma_fence *old = fence; 441 442 s_fence = to_drm_sched_fence(fence); 443 fence = dma_fence_get(&s_fence->scheduled); 444 dma_fence_put(old); 445 } 446 447 r = amdgpu_sync_fence(&p->sync, fence, GFP_KERNEL); 448 dma_fence_put(fence); 449 if (r) 450 return r; 451 } 452 return 0; 453 } 454 455 static int amdgpu_syncobj_lookup_and_add(struct amdgpu_cs_parser *p, 456 uint32_t handle, u64 point, 457 u64 flags) 458 { 459 struct dma_fence *fence; 460 int r; 461 462 r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence); 463 if (r) { 464 drm_err(adev_to_drm(p->adev), "syncobj %u failed to find fence @ %llu (%d)!\n", 465 handle, point, r); 466 return r; 467 } 468 469 r = amdgpu_sync_fence(&p->sync, fence, GFP_KERNEL); 470 dma_fence_put(fence); 471 return r; 472 } 473 474 static int amdgpu_cs_p2_syncobj_in(struct amdgpu_cs_parser *p, 475 struct amdgpu_cs_chunk *chunk) 476 { 477 struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata; 478 unsigned int num_deps; 479 int i, r; 480 481 num_deps = chunk->length_dw * 4 / 482 sizeof(struct drm_amdgpu_cs_chunk_sem); 483 for (i = 0; i < num_deps; ++i) { 484 r = amdgpu_syncobj_lookup_and_add(p, deps[i].handle, 0, 0); 485 if (r) 486 return r; 487 } 488 489 return 0; 490 } 491 492 static int amdgpu_cs_p2_syncobj_timeline_wait(struct amdgpu_cs_parser *p, 493 struct amdgpu_cs_chunk *chunk) 494 { 495 struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata; 496 unsigned int num_deps; 497 int i, r; 498 499 num_deps = chunk->length_dw * 4 / 500 sizeof(struct drm_amdgpu_cs_chunk_syncobj); 501 for (i = 0; i < num_deps; ++i) { 502 r = amdgpu_syncobj_lookup_and_add(p, syncobj_deps[i].handle, 503 syncobj_deps[i].point, 504 syncobj_deps[i].flags); 505 if (r) 506 return r; 507 } 508 509 return 0; 510 } 511 512 static int amdgpu_cs_p2_syncobj_out(struct amdgpu_cs_parser *p, 513 struct amdgpu_cs_chunk *chunk) 514 { 515 struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata; 516 unsigned int num_deps; 517 int i; 518 519 num_deps = chunk->length_dw * 4 / 520 sizeof(struct drm_amdgpu_cs_chunk_sem); 521 522 if (p->post_deps) 523 return -EINVAL; 524 525 p->post_deps = kmalloc_objs(*p->post_deps, num_deps); 526 p->num_post_deps = 0; 527 528 if (!p->post_deps) 529 return -ENOMEM; 530 531 532 for (i = 0; i < num_deps; ++i) { 533 p->post_deps[i].syncobj = 534 drm_syncobj_find(p->filp, deps[i].handle); 535 if (!p->post_deps[i].syncobj) 536 return -EINVAL; 537 p->post_deps[i].chain = NULL; 538 p->post_deps[i].point = 0; 539 p->num_post_deps++; 540 } 541 542 return 0; 543 } 544 545 static int amdgpu_cs_p2_syncobj_timeline_signal(struct amdgpu_cs_parser *p, 546 struct amdgpu_cs_chunk *chunk) 547 { 548 struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata; 549 unsigned int num_deps; 550 int i; 551 552 num_deps = chunk->length_dw * 4 / 553 sizeof(struct drm_amdgpu_cs_chunk_syncobj); 554 555 if (p->post_deps) 556 return -EINVAL; 557 558 p->post_deps = kmalloc_objs(*p->post_deps, num_deps); 559 p->num_post_deps = 0; 560 561 if (!p->post_deps) 562 return -ENOMEM; 563 564 for (i = 0; i < num_deps; ++i) { 565 struct amdgpu_cs_post_dep *dep = &p->post_deps[i]; 566 567 dep->chain = NULL; 568 if (syncobj_deps[i].point) { 569 dep->chain = dma_fence_chain_alloc(); 570 if (!dep->chain) 571 return -ENOMEM; 572 } 573 574 dep->syncobj = drm_syncobj_find(p->filp, 575 syncobj_deps[i].handle); 576 if (!dep->syncobj) { 577 dma_fence_chain_free(dep->chain); 578 return -EINVAL; 579 } 580 dep->point = syncobj_deps[i].point; 581 p->num_post_deps++; 582 } 583 584 return 0; 585 } 586 587 static int amdgpu_cs_p2_shadow(struct amdgpu_cs_parser *p, 588 struct amdgpu_cs_chunk *chunk) 589 { 590 struct drm_amdgpu_cs_chunk_cp_gfx_shadow *shadow = chunk->kdata; 591 int i; 592 593 if (shadow->flags & ~AMDGPU_CS_CHUNK_CP_GFX_SHADOW_FLAGS_INIT_SHADOW) 594 return -EINVAL; 595 596 for (i = 0; i < p->gang_size; ++i) { 597 p->jobs[i]->shadow_va = shadow->shadow_va; 598 p->jobs[i]->csa_va = shadow->csa_va; 599 p->jobs[i]->gds_va = shadow->gds_va; 600 p->jobs[i]->init_shadow = 601 shadow->flags & AMDGPU_CS_CHUNK_CP_GFX_SHADOW_FLAGS_INIT_SHADOW; 602 } 603 604 return 0; 605 } 606 607 static int amdgpu_cs_pass2(struct amdgpu_cs_parser *p) 608 { 609 unsigned int ce_preempt = 0, de_preempt = 0; 610 int i, r; 611 612 for (i = 0; i < p->nchunks; ++i) { 613 struct amdgpu_cs_chunk *chunk; 614 615 chunk = &p->chunks[i]; 616 617 switch (chunk->chunk_id) { 618 case AMDGPU_CHUNK_ID_IB: 619 r = amdgpu_cs_p2_ib(p, chunk, &ce_preempt, &de_preempt); 620 if (r) 621 return r; 622 break; 623 case AMDGPU_CHUNK_ID_DEPENDENCIES: 624 case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES: 625 r = amdgpu_cs_p2_dependencies(p, chunk); 626 if (r) 627 return r; 628 break; 629 case AMDGPU_CHUNK_ID_SYNCOBJ_IN: 630 r = amdgpu_cs_p2_syncobj_in(p, chunk); 631 if (r) 632 return r; 633 break; 634 case AMDGPU_CHUNK_ID_SYNCOBJ_OUT: 635 r = amdgpu_cs_p2_syncobj_out(p, chunk); 636 if (r) 637 return r; 638 break; 639 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT: 640 r = amdgpu_cs_p2_syncobj_timeline_wait(p, chunk); 641 if (r) 642 return r; 643 break; 644 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL: 645 r = amdgpu_cs_p2_syncobj_timeline_signal(p, chunk); 646 if (r) 647 return r; 648 break; 649 case AMDGPU_CHUNK_ID_CP_GFX_SHADOW: 650 r = amdgpu_cs_p2_shadow(p, chunk); 651 if (r) 652 return r; 653 break; 654 } 655 } 656 657 return 0; 658 } 659 660 /* Convert microseconds to bytes. */ 661 static u64 us_to_bytes(struct amdgpu_device *adev, s64 us) 662 { 663 if (us <= 0 || !adev->mm_stats.log2_max_MBps) 664 return 0; 665 666 /* Since accum_us is incremented by a million per second, just 667 * multiply it by the number of MB/s to get the number of bytes. 668 */ 669 return us << adev->mm_stats.log2_max_MBps; 670 } 671 672 static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes) 673 { 674 if (!adev->mm_stats.log2_max_MBps) 675 return 0; 676 677 return bytes >> adev->mm_stats.log2_max_MBps; 678 } 679 680 /* Returns how many bytes TTM can move right now. If no bytes can be moved, 681 * it returns 0. If it returns non-zero, it's OK to move at least one buffer, 682 * which means it can go over the threshold once. If that happens, the driver 683 * will be in debt and no other buffer migrations can be done until that debt 684 * is repaid. 685 * 686 * This approach allows moving a buffer of any size (it's important to allow 687 * that). 688 * 689 * The currency is simply time in microseconds and it increases as the clock 690 * ticks. The accumulated microseconds (us) are converted to bytes and 691 * returned. 692 */ 693 static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev, 694 u64 *max_bytes, 695 u64 *max_vis_bytes) 696 { 697 s64 time_us, increment_us; 698 u64 free_vram, total_vram, used_vram; 699 /* Allow a maximum of 200 accumulated ms. This is basically per-IB 700 * throttling. 701 * 702 * It means that in order to get full max MBps, at least 5 IBs per 703 * second must be submitted and not more than 200ms apart from each 704 * other. 705 */ 706 const s64 us_upper_bound = 200000; 707 708 if ((!adev->mm_stats.log2_max_MBps) || !ttm_resource_manager_used(&adev->mman.vram_mgr.manager)) { 709 *max_bytes = 0; 710 *max_vis_bytes = 0; 711 return; 712 } 713 714 total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size); 715 used_vram = ttm_resource_manager_usage(&adev->mman.vram_mgr.manager); 716 free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram; 717 718 spin_lock(&adev->mm_stats.lock); 719 720 /* Increase the amount of accumulated us. */ 721 time_us = ktime_to_us(ktime_get()); 722 increment_us = time_us - adev->mm_stats.last_update_us; 723 adev->mm_stats.last_update_us = time_us; 724 adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us, 725 us_upper_bound); 726 727 /* This prevents the short period of low performance when the VRAM 728 * usage is low and the driver is in debt or doesn't have enough 729 * accumulated us to fill VRAM quickly. 730 * 731 * The situation can occur in these cases: 732 * - a lot of VRAM is freed by userspace 733 * - the presence of a big buffer causes a lot of evictions 734 * (solution: split buffers into smaller ones) 735 * 736 * If 128 MB or 1/8th of VRAM is free, start filling it now by setting 737 * accum_us to a positive number. 738 */ 739 if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) { 740 s64 min_us; 741 742 /* Be more aggressive on dGPUs. Try to fill a portion of free 743 * VRAM now. 744 */ 745 if (!(adev->flags & AMD_IS_APU)) 746 min_us = bytes_to_us(adev, free_vram / 4); 747 else 748 min_us = 0; /* Reset accum_us on APUs. */ 749 750 adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us); 751 } 752 753 /* This is set to 0 if the driver is in debt to disallow (optional) 754 * buffer moves. 755 */ 756 *max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us); 757 758 /* Do the same for visible VRAM if half of it is free */ 759 if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) { 760 u64 total_vis_vram = adev->gmc.visible_vram_size; 761 u64 used_vis_vram = 762 amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr); 763 764 if (used_vis_vram < total_vis_vram) { 765 u64 free_vis_vram = total_vis_vram - used_vis_vram; 766 767 adev->mm_stats.accum_us_vis = min(adev->mm_stats.accum_us_vis + 768 increment_us, us_upper_bound); 769 770 if (free_vis_vram >= total_vis_vram / 2) 771 adev->mm_stats.accum_us_vis = 772 max(bytes_to_us(adev, free_vis_vram / 2), 773 adev->mm_stats.accum_us_vis); 774 } 775 776 *max_vis_bytes = us_to_bytes(adev, adev->mm_stats.accum_us_vis); 777 } else { 778 *max_vis_bytes = 0; 779 } 780 781 spin_unlock(&adev->mm_stats.lock); 782 } 783 784 /* Report how many bytes have really been moved for the last command 785 * submission. This can result in a debt that can stop buffer migrations 786 * temporarily. 787 */ 788 void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes, 789 u64 num_vis_bytes) 790 { 791 spin_lock(&adev->mm_stats.lock); 792 adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes); 793 adev->mm_stats.accum_us_vis -= bytes_to_us(adev, num_vis_bytes); 794 spin_unlock(&adev->mm_stats.lock); 795 } 796 797 static int amdgpu_cs_bo_validate(void *param, struct amdgpu_bo *bo) 798 { 799 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 800 struct amdgpu_cs_parser *p = param; 801 struct ttm_operation_ctx ctx = { 802 .interruptible = true, 803 .no_wait_gpu = false, 804 .resv = bo->tbo.base.resv 805 }; 806 uint32_t domain; 807 int r; 808 809 if (bo->tbo.pin_count) 810 return 0; 811 812 /* Don't move this buffer if we have depleted our allowance 813 * to move it. Don't move anything if the threshold is zero. 814 */ 815 if (p->bytes_moved < p->bytes_moved_threshold && 816 (!bo->tbo.base.dma_buf || 817 list_empty(&bo->tbo.base.dma_buf->attachments))) { 818 if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && 819 (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) { 820 /* And don't move a CPU_ACCESS_REQUIRED BO to limited 821 * visible VRAM if we've depleted our allowance to do 822 * that. 823 */ 824 if (p->bytes_moved_vis < p->bytes_moved_vis_threshold) 825 domain = bo->preferred_domains; 826 else 827 domain = bo->allowed_domains; 828 } else { 829 domain = bo->preferred_domains; 830 } 831 } else { 832 domain = bo->allowed_domains; 833 } 834 835 retry: 836 amdgpu_bo_placement_from_domain(bo, domain); 837 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 838 839 p->bytes_moved += ctx.bytes_moved; 840 if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && 841 amdgpu_res_cpu_visible(adev, bo->tbo.resource)) 842 p->bytes_moved_vis += ctx.bytes_moved; 843 844 if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) { 845 domain = bo->allowed_domains; 846 goto retry; 847 } 848 849 return r; 850 } 851 852 static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, 853 union drm_amdgpu_cs *cs) 854 { 855 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 856 struct ttm_operation_ctx ctx = { true, false }; 857 struct amdgpu_vm *vm = &fpriv->vm; 858 struct amdgpu_bo_list_entry *e; 859 struct drm_gem_object *obj; 860 unsigned long index; 861 unsigned int i; 862 int r; 863 864 /* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */ 865 if (cs->in.bo_list_handle) { 866 if (p->bo_list) 867 return -EINVAL; 868 869 r = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle, 870 &p->bo_list); 871 if (r) 872 return r; 873 } else if (!p->bo_list) { 874 /* Create a empty bo_list when no handle is provided */ 875 r = amdgpu_bo_list_create(p->adev, p->filp, NULL, 0, 876 &p->bo_list); 877 if (r) 878 return r; 879 } 880 881 mutex_lock(&p->bo_list->bo_list_mutex); 882 883 /* Get userptr backing pages. If pages are updated after registered 884 * in amdgpu_gem_userptr_ioctl(), amdgpu_cs_list_validate() will do 885 * amdgpu_ttm_backend_bind() to flush and invalidate new pages 886 */ 887 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { 888 bool userpage_invalidated = false; 889 struct amdgpu_bo *bo = e->bo; 890 891 e->range = amdgpu_hmm_range_alloc(NULL); 892 if (unlikely(!e->range)) { 893 r = -ENOMEM; 894 goto out_free_user_pages; 895 } 896 897 r = amdgpu_ttm_tt_get_user_pages(bo, e->range); 898 if (r) 899 goto out_free_user_pages; 900 901 for (i = 0; i < bo->tbo.ttm->num_pages; i++) { 902 if (bo->tbo.ttm->pages[i] != 903 hmm_pfn_to_page(e->range->hmm_range.hmm_pfns[i])) { 904 userpage_invalidated = true; 905 break; 906 } 907 } 908 e->user_invalidated = userpage_invalidated; 909 } 910 911 drm_exec_until_all_locked(&p->exec) { 912 r = amdgpu_vm_lock_pd(&fpriv->vm, &p->exec, 1 + p->gang_size); 913 drm_exec_retry_on_contention(&p->exec); 914 if (unlikely(r)) 915 goto out_free_user_pages; 916 917 amdgpu_bo_list_for_each_entry(e, p->bo_list) { 918 /* One fence for TTM and one for each CS job */ 919 r = drm_exec_prepare_obj(&p->exec, &e->bo->tbo.base, 920 1 + p->gang_size); 921 drm_exec_retry_on_contention(&p->exec); 922 if (unlikely(r)) 923 goto out_free_user_pages; 924 925 e->bo_va = amdgpu_vm_bo_find(vm, e->bo); 926 } 927 928 if (p->uf_bo) { 929 r = drm_exec_prepare_obj(&p->exec, &p->uf_bo->tbo.base, 930 1 + p->gang_size); 931 drm_exec_retry_on_contention(&p->exec); 932 if (unlikely(r)) 933 goto out_free_user_pages; 934 } 935 } 936 937 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { 938 struct mm_struct *usermm; 939 940 usermm = amdgpu_ttm_tt_get_usermm(e->bo->tbo.ttm); 941 if (usermm && usermm != current->mm) { 942 r = -EPERM; 943 goto out_free_user_pages; 944 } 945 946 if (amdgpu_ttm_tt_is_userptr(e->bo->tbo.ttm) && 947 e->user_invalidated) { 948 amdgpu_bo_placement_from_domain(e->bo, 949 AMDGPU_GEM_DOMAIN_CPU); 950 r = ttm_bo_validate(&e->bo->tbo, &e->bo->placement, 951 &ctx); 952 if (r) 953 goto out_free_user_pages; 954 955 amdgpu_ttm_tt_set_user_pages(e->bo->tbo.ttm, 956 e->range); 957 } 958 } 959 960 amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold, 961 &p->bytes_moved_vis_threshold); 962 p->bytes_moved = 0; 963 p->bytes_moved_vis = 0; 964 965 r = amdgpu_vm_validate(p->adev, &fpriv->vm, NULL, 966 amdgpu_cs_bo_validate, p); 967 if (r) { 968 drm_err(adev_to_drm(p->adev), "amdgpu_vm_validate() failed.\n"); 969 goto out_free_user_pages; 970 } 971 972 drm_exec_for_each_locked_object(&p->exec, index, obj) { 973 r = amdgpu_cs_bo_validate(p, gem_to_amdgpu_bo(obj)); 974 if (unlikely(r)) 975 goto out_free_user_pages; 976 } 977 978 if (p->uf_bo) { 979 r = amdgpu_ttm_alloc_gart(&p->uf_bo->tbo); 980 if (unlikely(r)) 981 goto out_free_user_pages; 982 983 p->gang_leader->uf_addr += amdgpu_bo_gpu_offset(p->uf_bo); 984 } 985 986 amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved, 987 p->bytes_moved_vis); 988 989 for (i = 0; i < p->gang_size; ++i) 990 amdgpu_job_set_resources(p->jobs[i], p->bo_list->gds_obj, 991 p->bo_list->gws_obj, 992 p->bo_list->oa_obj); 993 return 0; 994 995 out_free_user_pages: 996 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { 997 amdgpu_hmm_range_free(e->range); 998 e->range = NULL; 999 } 1000 mutex_unlock(&p->bo_list->bo_list_mutex); 1001 return r; 1002 } 1003 1004 static void trace_amdgpu_cs_ibs(struct amdgpu_cs_parser *p) 1005 { 1006 int i, j; 1007 1008 if (!trace_amdgpu_cs_enabled()) 1009 return; 1010 1011 for (i = 0; i < p->gang_size; ++i) { 1012 struct amdgpu_job *job = p->jobs[i]; 1013 1014 for (j = 0; j < job->num_ibs; ++j) 1015 trace_amdgpu_cs(p, job, &job->ibs[j]); 1016 } 1017 } 1018 1019 static int amdgpu_cs_patch_ibs(struct amdgpu_cs_parser *p, 1020 struct amdgpu_job *job) 1021 { 1022 struct amdgpu_ring *ring = amdgpu_job_ring(job); 1023 struct amdgpu_device *adev = ring->adev; 1024 unsigned int i; 1025 int r; 1026 1027 /* Only for UVD/VCE VM emulation */ 1028 if (!ring->funcs->parse_cs && !ring->funcs->patch_cs_in_place) 1029 return 0; 1030 1031 for (i = 0; i < job->num_ibs; ++i) { 1032 struct amdgpu_ib *ib = &job->ibs[i]; 1033 struct amdgpu_bo_va_mapping *m; 1034 struct amdgpu_bo *aobj; 1035 uint64_t va_start; 1036 uint8_t *kptr; 1037 1038 va_start = ib->gpu_addr & AMDGPU_GMC_HOLE_MASK; 1039 r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m); 1040 if (r) { 1041 drm_err(adev_to_drm(p->adev), "IB va_start is invalid\n"); 1042 return r; 1043 } 1044 1045 if ((va_start + ib->length_dw * 4) > 1046 (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) { 1047 drm_err(adev_to_drm(p->adev), "IB va_start+ib_bytes is invalid\n"); 1048 return -EINVAL; 1049 } 1050 1051 /* the IB should be reserved at this point */ 1052 r = amdgpu_bo_kmap(aobj, (void **)&kptr); 1053 if (r) 1054 return r; 1055 1056 kptr += va_start - (m->start * AMDGPU_GPU_PAGE_SIZE); 1057 1058 if (ring->funcs->parse_cs) { 1059 memcpy(ib->ptr, kptr, ib->length_dw * 4); 1060 amdgpu_bo_kunmap(aobj); 1061 1062 r = amdgpu_ring_parse_cs(ring, p, job, ib); 1063 if (r) 1064 return r; 1065 1066 if (ib->sa_bo) 1067 ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo); 1068 } else { 1069 ib->ptr = (uint32_t *)kptr; 1070 r = amdgpu_ring_patch_cs_in_place(ring, p, job, ib); 1071 amdgpu_bo_kunmap(aobj); 1072 if (r) 1073 return r; 1074 } 1075 } 1076 1077 return 0; 1078 } 1079 1080 static int amdgpu_cs_patch_jobs(struct amdgpu_cs_parser *p) 1081 { 1082 unsigned int i; 1083 int r; 1084 1085 for (i = 0; i < p->gang_size; ++i) { 1086 r = amdgpu_cs_patch_ibs(p, p->jobs[i]); 1087 if (r) 1088 return r; 1089 } 1090 return 0; 1091 } 1092 1093 static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) 1094 { 1095 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 1096 struct amdgpu_job *job = p->gang_leader; 1097 struct amdgpu_device *adev = p->adev; 1098 struct amdgpu_vm *vm = &fpriv->vm; 1099 struct amdgpu_bo_list_entry *e; 1100 struct amdgpu_bo_va *bo_va; 1101 unsigned int i; 1102 int r; 1103 1104 /* 1105 * We can't use gang submit on with reserved VMIDs when the VM changes 1106 * can't be invalidated by more than one engine at the same time. 1107 */ 1108 if (p->gang_size > 1 && !adev->vm_manager.concurrent_flush) { 1109 for (i = 0; i < p->gang_size; ++i) { 1110 struct drm_sched_entity *entity = p->entities[i]; 1111 struct drm_gpu_scheduler *sched = entity->rq->sched; 1112 struct amdgpu_ring *ring = to_amdgpu_ring(sched); 1113 1114 if (amdgpu_vmid_uses_reserved(vm, ring->vm_hub)) 1115 return -EINVAL; 1116 } 1117 } 1118 1119 if (!amdgpu_vm_ready(vm)) 1120 return -EINVAL; 1121 1122 r = amdgpu_vm_clear_freed(adev, vm, NULL); 1123 if (r) 1124 return r; 1125 1126 r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false); 1127 if (r) 1128 return r; 1129 1130 r = amdgpu_sync_fence(&p->sync, fpriv->prt_va->last_pt_update, 1131 GFP_KERNEL); 1132 if (r) 1133 return r; 1134 1135 if (fpriv->csa_va) { 1136 bo_va = fpriv->csa_va; 1137 BUG_ON(!bo_va); 1138 r = amdgpu_vm_bo_update(adev, bo_va, false); 1139 if (r) 1140 return r; 1141 1142 r = amdgpu_sync_fence(&p->sync, bo_va->last_pt_update, 1143 GFP_KERNEL); 1144 if (r) 1145 return r; 1146 } 1147 1148 /* FIXME: In theory this loop shouldn't be needed any more when 1149 * amdgpu_vm_handle_moved handles all moved BOs that are reserved 1150 * with p->ticket. But removing it caused test regressions, so I'm 1151 * leaving it here for now. 1152 */ 1153 amdgpu_bo_list_for_each_entry(e, p->bo_list) { 1154 bo_va = e->bo_va; 1155 if (bo_va == NULL) 1156 continue; 1157 1158 r = amdgpu_vm_bo_update(adev, bo_va, false); 1159 if (r) 1160 return r; 1161 1162 r = amdgpu_sync_fence(&p->sync, bo_va->last_pt_update, 1163 GFP_KERNEL); 1164 if (r) 1165 return r; 1166 } 1167 1168 r = amdgpu_vm_handle_moved(adev, vm, &p->exec.ticket); 1169 if (r) 1170 return r; 1171 1172 r = amdgpu_vm_update_pdes(adev, vm, false); 1173 if (r) 1174 return r; 1175 1176 r = amdgpu_sync_fence(&p->sync, vm->last_update, GFP_KERNEL); 1177 if (r) 1178 return r; 1179 1180 for (i = 0; i < p->gang_size; ++i) { 1181 job = p->jobs[i]; 1182 1183 if (!job->vm) 1184 continue; 1185 1186 job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.bo); 1187 } 1188 1189 if (adev->debug_vm) { 1190 /* Invalidate all BOs to test for userspace bugs */ 1191 amdgpu_bo_list_for_each_entry(e, p->bo_list) { 1192 struct amdgpu_bo *bo = e->bo; 1193 1194 /* ignore duplicates */ 1195 if (!bo) 1196 continue; 1197 1198 amdgpu_vm_bo_invalidate(bo, false); 1199 } 1200 } 1201 1202 return 0; 1203 } 1204 1205 static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p) 1206 { 1207 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 1208 struct drm_gpu_scheduler *sched; 1209 struct drm_gem_object *obj; 1210 struct dma_fence *fence; 1211 unsigned long index; 1212 unsigned int i; 1213 int r; 1214 1215 r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_leader_idx]); 1216 if (r) { 1217 if (r != -ERESTARTSYS) 1218 drm_err(adev_to_drm(p->adev), "amdgpu_ctx_wait_prev_fence failed.\n"); 1219 return r; 1220 } 1221 1222 drm_exec_for_each_locked_object(&p->exec, index, obj) { 1223 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 1224 1225 struct dma_resv *resv = bo->tbo.base.resv; 1226 enum amdgpu_sync_mode sync_mode; 1227 1228 sync_mode = amdgpu_bo_explicit_sync(bo) ? 1229 AMDGPU_SYNC_EXPLICIT : AMDGPU_SYNC_NE_OWNER; 1230 r = amdgpu_sync_resv(p->adev, &p->sync, resv, sync_mode, 1231 &fpriv->vm); 1232 if (r) 1233 return r; 1234 } 1235 1236 for (i = 0; i < p->gang_size; ++i) { 1237 r = amdgpu_sync_push_to_job(&p->sync, p->jobs[i]); 1238 if (r) 1239 return r; 1240 } 1241 1242 sched = p->gang_leader->base.entity->rq->sched; 1243 while ((fence = amdgpu_sync_get_fence(&p->sync))) { 1244 struct drm_sched_fence *s_fence = to_drm_sched_fence(fence); 1245 1246 /* 1247 * When we have an dependency it might be necessary to insert a 1248 * pipeline sync to make sure that all caches etc are flushed and the 1249 * next job actually sees the results from the previous one 1250 * before we start executing on the same scheduler ring. 1251 */ 1252 if (!s_fence || s_fence->sched != sched) { 1253 dma_fence_put(fence); 1254 continue; 1255 } 1256 1257 r = amdgpu_sync_fence(&p->gang_leader->explicit_sync, fence, 1258 GFP_KERNEL); 1259 dma_fence_put(fence); 1260 if (r) 1261 return r; 1262 } 1263 return 0; 1264 } 1265 1266 static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p) 1267 { 1268 int i; 1269 1270 for (i = 0; i < p->num_post_deps; ++i) { 1271 if (p->post_deps[i].chain && p->post_deps[i].point) { 1272 drm_syncobj_add_point(p->post_deps[i].syncobj, 1273 p->post_deps[i].chain, 1274 p->fence, p->post_deps[i].point); 1275 p->post_deps[i].chain = NULL; 1276 } else { 1277 drm_syncobj_replace_fence(p->post_deps[i].syncobj, 1278 p->fence); 1279 } 1280 } 1281 } 1282 1283 static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, 1284 union drm_amdgpu_cs *cs) 1285 { 1286 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 1287 struct amdgpu_job *leader = p->gang_leader; 1288 struct amdgpu_bo_list_entry *e; 1289 struct drm_gem_object *gobj; 1290 unsigned long index; 1291 unsigned int i; 1292 uint64_t seq; 1293 int r; 1294 1295 for (i = 0; i < p->gang_size; ++i) 1296 drm_sched_job_arm(&p->jobs[i]->base); 1297 1298 for (i = 0; i < p->gang_size; ++i) { 1299 struct dma_fence *fence; 1300 1301 if (p->jobs[i] == leader) 1302 continue; 1303 1304 fence = &p->jobs[i]->base.s_fence->scheduled; 1305 dma_fence_get(fence); 1306 r = drm_sched_job_add_dependency(&leader->base, fence); 1307 if (r) { 1308 dma_fence_put(fence); 1309 return r; 1310 } 1311 } 1312 1313 if (p->gang_size > 1) { 1314 for (i = 0; i < p->gang_size; ++i) 1315 amdgpu_job_set_gang_leader(p->jobs[i], leader); 1316 } 1317 1318 /* No memory allocation is allowed while holding the notifier lock. 1319 * The lock is held until amdgpu_cs_submit is finished and fence is 1320 * added to BOs. 1321 */ 1322 mutex_lock(&p->adev->notifier_lock); 1323 1324 /* If userptr are invalidated after amdgpu_cs_parser_bos(), return 1325 * -EAGAIN, drmIoctl in libdrm will restart the amdgpu_cs_ioctl. 1326 */ 1327 r = 0; 1328 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { 1329 r |= !amdgpu_hmm_range_valid(e->range); 1330 amdgpu_hmm_range_free(e->range); 1331 e->range = NULL; 1332 } 1333 if (r) { 1334 r = -EAGAIN; 1335 mutex_unlock(&p->adev->notifier_lock); 1336 return r; 1337 } 1338 1339 p->fence = dma_fence_get(&leader->base.s_fence->finished); 1340 drm_exec_for_each_locked_object(&p->exec, index, gobj) { 1341 1342 ttm_bo_move_to_lru_tail_unlocked(&gem_to_amdgpu_bo(gobj)->tbo); 1343 1344 /* Everybody except for the gang leader uses READ */ 1345 for (i = 0; i < p->gang_size; ++i) { 1346 if (p->jobs[i] == leader) 1347 continue; 1348 1349 dma_resv_add_fence(gobj->resv, 1350 &p->jobs[i]->base.s_fence->finished, 1351 DMA_RESV_USAGE_READ); 1352 } 1353 1354 /* The gang leader as remembered as writer */ 1355 dma_resv_add_fence(gobj->resv, p->fence, DMA_RESV_USAGE_WRITE); 1356 } 1357 1358 seq = amdgpu_ctx_add_fence(p->ctx, p->entities[p->gang_leader_idx], 1359 p->fence); 1360 amdgpu_cs_post_dependencies(p); 1361 1362 if ((leader->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) && 1363 !p->ctx->preamble_presented) { 1364 leader->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST; 1365 p->ctx->preamble_presented = true; 1366 } 1367 1368 cs->out.handle = seq; 1369 leader->uf_sequence = seq; 1370 1371 amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->exec.ticket); 1372 for (i = 0; i < p->gang_size; ++i) { 1373 amdgpu_job_free_resources(p->jobs[i]); 1374 trace_amdgpu_cs_ioctl(p->jobs[i]); 1375 drm_sched_entity_push_job(&p->jobs[i]->base); 1376 p->jobs[i] = NULL; 1377 } 1378 1379 amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm); 1380 1381 mutex_unlock(&p->adev->notifier_lock); 1382 mutex_unlock(&p->bo_list->bo_list_mutex); 1383 return 0; 1384 } 1385 1386 /* Cleanup the parser structure */ 1387 static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser) 1388 { 1389 unsigned int i; 1390 1391 amdgpu_sync_free(&parser->sync); 1392 drm_exec_fini(&parser->exec); 1393 1394 for (i = 0; i < parser->num_post_deps; i++) { 1395 drm_syncobj_put(parser->post_deps[i].syncobj); 1396 kfree(parser->post_deps[i].chain); 1397 } 1398 kfree(parser->post_deps); 1399 1400 dma_fence_put(parser->fence); 1401 1402 if (parser->ctx) 1403 amdgpu_ctx_put(parser->ctx); 1404 if (parser->bo_list) 1405 amdgpu_bo_list_put(parser->bo_list); 1406 1407 for (i = 0; i < parser->nchunks; i++) 1408 kvfree(parser->chunks[i].kdata); 1409 kvfree(parser->chunks); 1410 for (i = 0; i < parser->gang_size; ++i) { 1411 if (parser->jobs[i]) 1412 amdgpu_job_free(parser->jobs[i]); 1413 } 1414 amdgpu_bo_unref(&parser->uf_bo); 1415 } 1416 1417 int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) 1418 { 1419 struct amdgpu_device *adev = drm_to_adev(dev); 1420 struct amdgpu_cs_parser parser; 1421 int r; 1422 1423 if (amdgpu_ras_intr_triggered()) 1424 return -EHWPOISON; 1425 1426 if (!adev->accel_working) 1427 return -EBUSY; 1428 1429 r = amdgpu_cs_parser_init(&parser, adev, filp, data); 1430 if (r) { 1431 drm_err_ratelimited(dev, "Failed to initialize parser %d!\n", r); 1432 return r; 1433 } 1434 1435 r = amdgpu_cs_pass1(&parser, data); 1436 if (r) 1437 goto error_fini; 1438 1439 r = amdgpu_cs_pass2(&parser); 1440 if (r) 1441 goto error_fini; 1442 1443 r = amdgpu_cs_parser_bos(&parser, data); 1444 if (r) { 1445 if (r == -ENOMEM) 1446 drm_err(dev, "Not enough memory for command submission!\n"); 1447 else if (r != -ERESTARTSYS && r != -EAGAIN) 1448 drm_dbg(dev, "Failed to process the buffer list %d!\n", r); 1449 goto error_fini; 1450 } 1451 1452 r = amdgpu_cs_patch_jobs(&parser); 1453 if (r) 1454 goto error_backoff; 1455 1456 r = amdgpu_cs_vm_handling(&parser); 1457 if (r) 1458 goto error_backoff; 1459 1460 r = amdgpu_cs_sync_rings(&parser); 1461 if (r) 1462 goto error_backoff; 1463 1464 trace_amdgpu_cs_ibs(&parser); 1465 1466 r = amdgpu_cs_submit(&parser, data); 1467 if (r) 1468 goto error_backoff; 1469 1470 amdgpu_cs_parser_fini(&parser); 1471 return 0; 1472 1473 error_backoff: 1474 mutex_unlock(&parser.bo_list->bo_list_mutex); 1475 1476 error_fini: 1477 amdgpu_cs_parser_fini(&parser); 1478 return r; 1479 } 1480 1481 /** 1482 * amdgpu_cs_wait_ioctl - wait for a command submission to finish 1483 * 1484 * @dev: drm device 1485 * @data: data from userspace 1486 * @filp: file private 1487 * 1488 * Wait for the command submission identified by handle to finish. 1489 */ 1490 int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, 1491 struct drm_file *filp) 1492 { 1493 union drm_amdgpu_wait_cs *wait = data; 1494 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout); 1495 struct drm_sched_entity *entity; 1496 struct amdgpu_ctx *ctx; 1497 struct dma_fence *fence; 1498 long r; 1499 1500 ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id); 1501 if (ctx == NULL) 1502 return -EINVAL; 1503 1504 r = amdgpu_ctx_get_entity(ctx, wait->in.ip_type, wait->in.ip_instance, 1505 wait->in.ring, &entity); 1506 if (r) { 1507 amdgpu_ctx_put(ctx); 1508 return r; 1509 } 1510 1511 fence = amdgpu_ctx_get_fence(ctx, entity, wait->in.handle); 1512 if (IS_ERR(fence)) 1513 r = PTR_ERR(fence); 1514 else if (fence) { 1515 r = dma_fence_wait_timeout(fence, true, timeout); 1516 if (r > 0 && fence->error) 1517 r = fence->error; 1518 dma_fence_put(fence); 1519 } else 1520 r = 1; 1521 1522 amdgpu_ctx_put(ctx); 1523 if (r < 0) 1524 return r; 1525 1526 memset(wait, 0, sizeof(*wait)); 1527 wait->out.status = (r == 0); 1528 1529 return 0; 1530 } 1531 1532 /** 1533 * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence 1534 * 1535 * @adev: amdgpu device 1536 * @filp: file private 1537 * @user: drm_amdgpu_fence copied from user space 1538 */ 1539 static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev, 1540 struct drm_file *filp, 1541 struct drm_amdgpu_fence *user) 1542 { 1543 struct drm_sched_entity *entity; 1544 struct amdgpu_ctx *ctx; 1545 struct dma_fence *fence; 1546 int r; 1547 1548 ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id); 1549 if (ctx == NULL) 1550 return ERR_PTR(-EINVAL); 1551 1552 r = amdgpu_ctx_get_entity(ctx, user->ip_type, user->ip_instance, 1553 user->ring, &entity); 1554 if (r) { 1555 amdgpu_ctx_put(ctx); 1556 return ERR_PTR(r); 1557 } 1558 1559 fence = amdgpu_ctx_get_fence(ctx, entity, user->seq_no); 1560 amdgpu_ctx_put(ctx); 1561 1562 return fence; 1563 } 1564 1565 int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data, 1566 struct drm_file *filp) 1567 { 1568 struct amdgpu_device *adev = drm_to_adev(dev); 1569 union drm_amdgpu_fence_to_handle *info = data; 1570 struct dma_fence *fence; 1571 struct drm_syncobj *syncobj; 1572 struct sync_file *sync_file; 1573 int fd, r; 1574 1575 fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence); 1576 if (IS_ERR(fence)) 1577 return PTR_ERR(fence); 1578 1579 if (!fence) 1580 fence = dma_fence_get_stub(); 1581 1582 switch (info->in.what) { 1583 case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ: 1584 r = drm_syncobj_create(&syncobj, 0, fence); 1585 dma_fence_put(fence); 1586 if (r) 1587 return r; 1588 r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle); 1589 drm_syncobj_put(syncobj); 1590 return r; 1591 1592 case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD: 1593 r = drm_syncobj_create(&syncobj, 0, fence); 1594 dma_fence_put(fence); 1595 if (r) 1596 return r; 1597 r = drm_syncobj_get_fd(syncobj, (int *)&info->out.handle); 1598 drm_syncobj_put(syncobj); 1599 return r; 1600 1601 case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD: 1602 fd = get_unused_fd_flags(O_CLOEXEC); 1603 if (fd < 0) { 1604 dma_fence_put(fence); 1605 return fd; 1606 } 1607 1608 sync_file = sync_file_create(fence); 1609 dma_fence_put(fence); 1610 if (!sync_file) { 1611 put_unused_fd(fd); 1612 return -ENOMEM; 1613 } 1614 1615 fd_install(fd, sync_file->file); 1616 info->out.handle = fd; 1617 return 0; 1618 1619 default: 1620 dma_fence_put(fence); 1621 return -EINVAL; 1622 } 1623 } 1624 1625 /** 1626 * amdgpu_cs_wait_all_fences - wait on all fences to signal 1627 * 1628 * @adev: amdgpu device 1629 * @filp: file private 1630 * @wait: wait parameters 1631 * @fences: array of drm_amdgpu_fence 1632 */ 1633 static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev, 1634 struct drm_file *filp, 1635 union drm_amdgpu_wait_fences *wait, 1636 struct drm_amdgpu_fence *fences) 1637 { 1638 uint32_t fence_count = wait->in.fence_count; 1639 unsigned int i; 1640 long r = 1; 1641 1642 for (i = 0; i < fence_count; i++) { 1643 struct dma_fence *fence; 1644 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns); 1645 1646 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]); 1647 if (IS_ERR(fence)) 1648 return PTR_ERR(fence); 1649 else if (!fence) 1650 continue; 1651 1652 r = dma_fence_wait_timeout(fence, true, timeout); 1653 if (r > 0 && fence->error) 1654 r = fence->error; 1655 1656 dma_fence_put(fence); 1657 if (r < 0) 1658 return r; 1659 1660 if (r == 0) 1661 break; 1662 } 1663 1664 memset(wait, 0, sizeof(*wait)); 1665 wait->out.status = (r > 0); 1666 1667 return 0; 1668 } 1669 1670 /** 1671 * amdgpu_cs_wait_any_fence - wait on any fence to signal 1672 * 1673 * @adev: amdgpu device 1674 * @filp: file private 1675 * @wait: wait parameters 1676 * @fences: array of drm_amdgpu_fence 1677 */ 1678 static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev, 1679 struct drm_file *filp, 1680 union drm_amdgpu_wait_fences *wait, 1681 struct drm_amdgpu_fence *fences) 1682 { 1683 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns); 1684 uint32_t fence_count = wait->in.fence_count; 1685 uint32_t first = ~0; 1686 struct dma_fence **array; 1687 unsigned int i; 1688 long r; 1689 1690 /* Prepare the fence array */ 1691 array = kzalloc_objs(struct dma_fence *, fence_count); 1692 1693 if (array == NULL) 1694 return -ENOMEM; 1695 1696 for (i = 0; i < fence_count; i++) { 1697 struct dma_fence *fence; 1698 1699 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]); 1700 if (IS_ERR(fence)) { 1701 r = PTR_ERR(fence); 1702 goto err_free_fence_array; 1703 } else if (fence) { 1704 array[i] = fence; 1705 } else { /* NULL, the fence has been already signaled */ 1706 r = 1; 1707 first = i; 1708 goto out; 1709 } 1710 } 1711 1712 r = dma_fence_wait_any_timeout(array, fence_count, true, timeout, 1713 &first); 1714 if (r < 0) 1715 goto err_free_fence_array; 1716 1717 out: 1718 memset(wait, 0, sizeof(*wait)); 1719 wait->out.status = (r > 0); 1720 wait->out.first_signaled = first; 1721 1722 if (first < fence_count && array[first]) 1723 r = array[first]->error; 1724 else 1725 r = 0; 1726 1727 err_free_fence_array: 1728 for (i = 0; i < fence_count; i++) 1729 dma_fence_put(array[i]); 1730 kfree(array); 1731 1732 return r; 1733 } 1734 1735 /** 1736 * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish 1737 * 1738 * @dev: drm device 1739 * @data: data from userspace 1740 * @filp: file private 1741 */ 1742 int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data, 1743 struct drm_file *filp) 1744 { 1745 struct amdgpu_device *adev = drm_to_adev(dev); 1746 union drm_amdgpu_wait_fences *wait = data; 1747 struct drm_amdgpu_fence *fences; 1748 int r; 1749 1750 /* Get the fences from userspace */ 1751 fences = memdup_array_user(u64_to_user_ptr(wait->in.fences), 1752 wait->in.fence_count, 1753 sizeof(struct drm_amdgpu_fence)); 1754 if (IS_ERR(fences)) 1755 return PTR_ERR(fences); 1756 1757 if (wait->in.wait_all) 1758 r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences); 1759 else 1760 r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences); 1761 1762 kfree(fences); 1763 1764 return r; 1765 } 1766 1767 /** 1768 * amdgpu_cs_find_mapping - find bo_va for VM address 1769 * 1770 * @parser: command submission parser context 1771 * @addr: VM address 1772 * @bo: resulting BO of the mapping found 1773 * @map: Placeholder to return found BO mapping 1774 * 1775 * Search the buffer objects in the command submission context for a certain 1776 * virtual memory address. Returns allocation structure when found, NULL 1777 * otherwise. 1778 */ 1779 int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, 1780 uint64_t addr, struct amdgpu_bo **bo, 1781 struct amdgpu_bo_va_mapping **map) 1782 { 1783 struct amdgpu_fpriv *fpriv = parser->filp->driver_priv; 1784 struct ttm_operation_ctx ctx = { false, false }; 1785 struct amdgpu_vm *vm = &fpriv->vm; 1786 struct amdgpu_bo_va_mapping *mapping; 1787 int i, r; 1788 1789 addr /= AMDGPU_GPU_PAGE_SIZE; 1790 1791 mapping = amdgpu_vm_bo_lookup_mapping(vm, addr); 1792 if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo) 1793 return -EINVAL; 1794 1795 *bo = mapping->bo_va->base.bo; 1796 *map = mapping; 1797 1798 /* Double check that the BO is reserved by this CS */ 1799 if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->exec.ticket) 1800 return -EINVAL; 1801 1802 /* Make sure VRAM is allocated contigiously */ 1803 (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; 1804 if ((*bo)->tbo.resource->mem_type == TTM_PL_VRAM && 1805 !((*bo)->tbo.resource->placement & TTM_PL_FLAG_CONTIGUOUS)) { 1806 1807 amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains); 1808 for (i = 0; i < (*bo)->placement.num_placement; i++) 1809 (*bo)->placements[i].flags |= TTM_PL_FLAG_CONTIGUOUS; 1810 r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx); 1811 if (r) 1812 return r; 1813 } 1814 1815 return amdgpu_ttm_alloc_gart(&(*bo)->tbo); 1816 } 1817