1 /* 2 * Copyright 2008 Jerome Glisse. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice (including the next 13 * paragraph) shall be included in all copies or substantial portions of the 14 * Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22 * DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: 25 * Jerome Glisse <glisse@freedesktop.org> 26 */ 27 28 #include <linux/file.h> 29 #include <linux/pagemap.h> 30 #include <linux/sync_file.h> 31 #include <linux/dma-buf.h> 32 #include <linux/hmm.h> 33 34 #include <drm/amdgpu_drm.h> 35 #include <drm/drm_syncobj.h> 36 #include <drm/ttm/ttm_tt.h> 37 38 #include "amdgpu_cs.h" 39 #include "amdgpu.h" 40 #include "amdgpu_trace.h" 41 #include "amdgpu_gmc.h" 42 #include "amdgpu_gem.h" 43 #include "amdgpu_ras.h" 44 45 static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, 46 struct amdgpu_device *adev, 47 struct drm_file *filp, 48 union drm_amdgpu_cs *cs) 49 { 50 struct amdgpu_fpriv *fpriv = filp->driver_priv; 51 52 if (cs->in.num_chunks == 0) 53 return -EINVAL; 54 55 memset(p, 0, sizeof(*p)); 56 p->adev = adev; 57 p->filp = filp; 58 59 p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id); 60 if (!p->ctx) 61 return -EINVAL; 62 63 if (atomic_read(&p->ctx->guilty)) { 64 amdgpu_ctx_put(p->ctx); 65 return -ECANCELED; 66 } 67 68 amdgpu_sync_create(&p->sync); 69 drm_exec_init(&p->exec, DRM_EXEC_INTERRUPTIBLE_WAIT | 70 DRM_EXEC_IGNORE_DUPLICATES, 0); 71 return 0; 72 } 73 74 static int amdgpu_cs_job_idx(struct amdgpu_cs_parser *p, 75 struct drm_amdgpu_cs_chunk_ib *chunk_ib) 76 { 77 struct drm_sched_entity *entity; 78 unsigned int i; 79 int r; 80 81 r = amdgpu_ctx_get_entity(p->ctx, chunk_ib->ip_type, 82 chunk_ib->ip_instance, 83 chunk_ib->ring, &entity); 84 if (r) 85 return r; 86 87 /* 88 * Abort if there is no run queue associated with this entity. 89 * Possibly because of disabled HW IP. 90 */ 91 if (entity->rq == NULL) 92 return -EINVAL; 93 94 /* Check if we can add this IB to some existing job */ 95 for (i = 0; i < p->gang_size; ++i) 96 if (p->entities[i] == entity) 97 return i; 98 99 /* If not increase the gang size if possible */ 100 if (i == AMDGPU_CS_GANG_SIZE) 101 return -EINVAL; 102 103 p->entities[i] = entity; 104 p->gang_size = i + 1; 105 return i; 106 } 107 108 static int amdgpu_cs_p1_ib(struct amdgpu_cs_parser *p, 109 struct drm_amdgpu_cs_chunk_ib *chunk_ib, 110 unsigned int *num_ibs) 111 { 112 int r; 113 114 r = amdgpu_cs_job_idx(p, chunk_ib); 115 if (r < 0) 116 return r; 117 118 if (num_ibs[r] >= amdgpu_ring_max_ibs(chunk_ib->ip_type)) 119 return -EINVAL; 120 121 ++(num_ibs[r]); 122 p->gang_leader_idx = r; 123 return 0; 124 } 125 126 static int amdgpu_cs_p1_user_fence(struct amdgpu_cs_parser *p, 127 struct drm_amdgpu_cs_chunk_fence *data, 128 uint32_t *offset) 129 { 130 struct drm_gem_object *gobj; 131 unsigned long size; 132 133 gobj = drm_gem_object_lookup(p->filp, data->handle); 134 if (gobj == NULL) 135 return -EINVAL; 136 137 p->uf_bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); 138 drm_gem_object_put(gobj); 139 140 size = amdgpu_bo_size(p->uf_bo); 141 if (size != PAGE_SIZE || data->offset > (size - 8)) 142 return -EINVAL; 143 144 if (amdgpu_ttm_tt_get_usermm(p->uf_bo->tbo.ttm)) 145 return -EINVAL; 146 147 *offset = data->offset; 148 return 0; 149 } 150 151 static int amdgpu_cs_p1_bo_handles(struct amdgpu_cs_parser *p, 152 struct drm_amdgpu_bo_list_in *data) 153 { 154 struct drm_amdgpu_bo_list_entry *info; 155 int r; 156 157 r = amdgpu_bo_create_list_entry_array(data, &info); 158 if (r) 159 return r; 160 161 r = amdgpu_bo_list_create(p->adev, p->filp, info, data->bo_number, 162 &p->bo_list); 163 if (r) 164 goto error_free; 165 166 kvfree(info); 167 return 0; 168 169 error_free: 170 kvfree(info); 171 172 return r; 173 } 174 175 /* Copy the data from userspace and go over it the first time */ 176 static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p, 177 union drm_amdgpu_cs *cs) 178 { 179 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 180 unsigned int num_ibs[AMDGPU_CS_GANG_SIZE] = { }; 181 struct amdgpu_vm *vm = &fpriv->vm; 182 uint64_t *chunk_array; 183 uint32_t uf_offset = 0; 184 size_t size; 185 int ret; 186 int i; 187 188 chunk_array = memdup_array_user(u64_to_user_ptr(cs->in.chunks), 189 cs->in.num_chunks, 190 sizeof(uint64_t)); 191 if (IS_ERR(chunk_array)) 192 return PTR_ERR(chunk_array); 193 194 p->nchunks = cs->in.num_chunks; 195 p->chunks = kvmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk), 196 GFP_KERNEL); 197 if (!p->chunks) { 198 ret = -ENOMEM; 199 goto free_chunk; 200 } 201 202 for (i = 0; i < p->nchunks; i++) { 203 struct drm_amdgpu_cs_chunk __user *chunk_ptr = NULL; 204 struct drm_amdgpu_cs_chunk user_chunk; 205 206 chunk_ptr = u64_to_user_ptr(chunk_array[i]); 207 if (copy_from_user(&user_chunk, chunk_ptr, 208 sizeof(struct drm_amdgpu_cs_chunk))) { 209 ret = -EFAULT; 210 i--; 211 goto free_partial_kdata; 212 } 213 p->chunks[i].chunk_id = user_chunk.chunk_id; 214 p->chunks[i].length_dw = user_chunk.length_dw; 215 216 size = p->chunks[i].length_dw; 217 218 p->chunks[i].kdata = vmemdup_array_user(u64_to_user_ptr(user_chunk.chunk_data), 219 size, 220 sizeof(uint32_t)); 221 if (IS_ERR(p->chunks[i].kdata)) { 222 ret = PTR_ERR(p->chunks[i].kdata); 223 i--; 224 goto free_partial_kdata; 225 } 226 size *= sizeof(uint32_t); 227 228 /* Assume the worst on the following checks */ 229 ret = -EINVAL; 230 switch (p->chunks[i].chunk_id) { 231 case AMDGPU_CHUNK_ID_IB: 232 if (size < sizeof(struct drm_amdgpu_cs_chunk_ib)) 233 goto free_partial_kdata; 234 235 ret = amdgpu_cs_p1_ib(p, p->chunks[i].kdata, num_ibs); 236 if (ret) 237 goto free_partial_kdata; 238 break; 239 240 case AMDGPU_CHUNK_ID_FENCE: 241 if (size < sizeof(struct drm_amdgpu_cs_chunk_fence)) 242 goto free_partial_kdata; 243 244 ret = amdgpu_cs_p1_user_fence(p, p->chunks[i].kdata, 245 &uf_offset); 246 if (ret) 247 goto free_partial_kdata; 248 break; 249 250 case AMDGPU_CHUNK_ID_BO_HANDLES: 251 if (size < sizeof(struct drm_amdgpu_bo_list_in)) 252 goto free_partial_kdata; 253 254 /* Only a single BO list is allowed to simplify handling. */ 255 if (p->bo_list) 256 goto free_partial_kdata; 257 258 ret = amdgpu_cs_p1_bo_handles(p, p->chunks[i].kdata); 259 if (ret) 260 goto free_partial_kdata; 261 break; 262 263 case AMDGPU_CHUNK_ID_DEPENDENCIES: 264 case AMDGPU_CHUNK_ID_SYNCOBJ_IN: 265 case AMDGPU_CHUNK_ID_SYNCOBJ_OUT: 266 case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES: 267 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT: 268 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL: 269 case AMDGPU_CHUNK_ID_CP_GFX_SHADOW: 270 break; 271 272 default: 273 goto free_partial_kdata; 274 } 275 } 276 277 if (!p->gang_size || (amdgpu_sriov_vf(p->adev) && p->gang_size > 1)) { 278 ret = -EINVAL; 279 goto free_all_kdata; 280 } 281 282 for (i = 0; i < p->gang_size; ++i) { 283 ret = amdgpu_job_alloc(p->adev, vm, p->entities[i], vm, 284 num_ibs[i], &p->jobs[i], 285 p->filp->client_id); 286 if (ret) 287 goto free_all_kdata; 288 switch (p->adev->enforce_isolation[fpriv->xcp_id]) { 289 case AMDGPU_ENFORCE_ISOLATION_DISABLE: 290 default: 291 p->jobs[i]->enforce_isolation = false; 292 p->jobs[i]->run_cleaner_shader = false; 293 break; 294 case AMDGPU_ENFORCE_ISOLATION_ENABLE: 295 p->jobs[i]->enforce_isolation = true; 296 p->jobs[i]->run_cleaner_shader = true; 297 break; 298 case AMDGPU_ENFORCE_ISOLATION_ENABLE_LEGACY: 299 p->jobs[i]->enforce_isolation = true; 300 p->jobs[i]->run_cleaner_shader = false; 301 break; 302 case AMDGPU_ENFORCE_ISOLATION_NO_CLEANER_SHADER: 303 p->jobs[i]->enforce_isolation = true; 304 p->jobs[i]->run_cleaner_shader = false; 305 break; 306 } 307 } 308 p->gang_leader = p->jobs[p->gang_leader_idx]; 309 310 if (p->ctx->generation != p->gang_leader->generation) { 311 ret = -ECANCELED; 312 goto free_all_kdata; 313 } 314 315 if (p->uf_bo) 316 p->gang_leader->uf_addr = uf_offset; 317 kvfree(chunk_array); 318 319 /* Use this opportunity to fill in task info for the vm */ 320 amdgpu_vm_set_task_info(vm); 321 322 return 0; 323 324 free_all_kdata: 325 i = p->nchunks - 1; 326 free_partial_kdata: 327 for (; i >= 0; i--) 328 kvfree(p->chunks[i].kdata); 329 kvfree(p->chunks); 330 p->chunks = NULL; 331 p->nchunks = 0; 332 free_chunk: 333 kvfree(chunk_array); 334 335 return ret; 336 } 337 338 static int amdgpu_cs_p2_ib(struct amdgpu_cs_parser *p, 339 struct amdgpu_cs_chunk *chunk, 340 unsigned int *ce_preempt, 341 unsigned int *de_preempt) 342 { 343 struct drm_amdgpu_cs_chunk_ib *chunk_ib = chunk->kdata; 344 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 345 struct amdgpu_vm *vm = &fpriv->vm; 346 struct amdgpu_ring *ring; 347 struct amdgpu_job *job; 348 struct amdgpu_ib *ib; 349 int r; 350 351 r = amdgpu_cs_job_idx(p, chunk_ib); 352 if (r < 0) 353 return r; 354 355 job = p->jobs[r]; 356 ring = amdgpu_job_ring(job); 357 ib = &job->ibs[job->num_ibs++]; 358 359 /* submissions to kernel queues are disabled */ 360 if (ring->no_user_submission) 361 return -EINVAL; 362 363 /* MM engine doesn't support user fences */ 364 if (p->uf_bo && ring->funcs->no_user_fence) 365 return -EINVAL; 366 367 if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX && 368 chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) { 369 if (chunk_ib->flags & AMDGPU_IB_FLAG_CE) 370 (*ce_preempt)++; 371 else 372 (*de_preempt)++; 373 374 /* Each GFX command submit allows only 1 IB max 375 * preemptible for CE & DE */ 376 if (*ce_preempt > 1 || *de_preempt > 1) 377 return -EINVAL; 378 } 379 380 if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE) 381 job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT; 382 383 r = amdgpu_ib_get(p->adev, vm, ring->funcs->parse_cs ? 384 chunk_ib->ib_bytes : 0, 385 AMDGPU_IB_POOL_DELAYED, ib); 386 if (r) { 387 drm_err(adev_to_drm(p->adev), "Failed to get ib !\n"); 388 return r; 389 } 390 391 ib->gpu_addr = chunk_ib->va_start; 392 ib->length_dw = chunk_ib->ib_bytes / 4; 393 ib->flags = chunk_ib->flags; 394 return 0; 395 } 396 397 static int amdgpu_cs_p2_dependencies(struct amdgpu_cs_parser *p, 398 struct amdgpu_cs_chunk *chunk) 399 { 400 struct drm_amdgpu_cs_chunk_dep *deps = chunk->kdata; 401 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 402 unsigned int num_deps; 403 int i, r; 404 405 num_deps = chunk->length_dw * 4 / 406 sizeof(struct drm_amdgpu_cs_chunk_dep); 407 408 for (i = 0; i < num_deps; ++i) { 409 struct amdgpu_ctx *ctx; 410 struct drm_sched_entity *entity; 411 struct dma_fence *fence; 412 413 ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id); 414 if (ctx == NULL) 415 return -EINVAL; 416 417 r = amdgpu_ctx_get_entity(ctx, deps[i].ip_type, 418 deps[i].ip_instance, 419 deps[i].ring, &entity); 420 if (r) { 421 amdgpu_ctx_put(ctx); 422 return r; 423 } 424 425 fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle); 426 amdgpu_ctx_put(ctx); 427 428 if (IS_ERR(fence)) 429 return PTR_ERR(fence); 430 else if (!fence) 431 continue; 432 433 if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) { 434 struct drm_sched_fence *s_fence; 435 struct dma_fence *old = fence; 436 437 s_fence = to_drm_sched_fence(fence); 438 fence = dma_fence_get(&s_fence->scheduled); 439 dma_fence_put(old); 440 } 441 442 r = amdgpu_sync_fence(&p->sync, fence, GFP_KERNEL); 443 dma_fence_put(fence); 444 if (r) 445 return r; 446 } 447 return 0; 448 } 449 450 static int amdgpu_syncobj_lookup_and_add(struct amdgpu_cs_parser *p, 451 uint32_t handle, u64 point, 452 u64 flags) 453 { 454 struct dma_fence *fence; 455 int r; 456 457 r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence); 458 if (r) { 459 drm_err(adev_to_drm(p->adev), "syncobj %u failed to find fence @ %llu (%d)!\n", 460 handle, point, r); 461 return r; 462 } 463 464 r = amdgpu_sync_fence(&p->sync, fence, GFP_KERNEL); 465 dma_fence_put(fence); 466 return r; 467 } 468 469 static int amdgpu_cs_p2_syncobj_in(struct amdgpu_cs_parser *p, 470 struct amdgpu_cs_chunk *chunk) 471 { 472 struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata; 473 unsigned int num_deps; 474 int i, r; 475 476 num_deps = chunk->length_dw * 4 / 477 sizeof(struct drm_amdgpu_cs_chunk_sem); 478 for (i = 0; i < num_deps; ++i) { 479 r = amdgpu_syncobj_lookup_and_add(p, deps[i].handle, 0, 0); 480 if (r) 481 return r; 482 } 483 484 return 0; 485 } 486 487 static int amdgpu_cs_p2_syncobj_timeline_wait(struct amdgpu_cs_parser *p, 488 struct amdgpu_cs_chunk *chunk) 489 { 490 struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata; 491 unsigned int num_deps; 492 int i, r; 493 494 num_deps = chunk->length_dw * 4 / 495 sizeof(struct drm_amdgpu_cs_chunk_syncobj); 496 for (i = 0; i < num_deps; ++i) { 497 r = amdgpu_syncobj_lookup_and_add(p, syncobj_deps[i].handle, 498 syncobj_deps[i].point, 499 syncobj_deps[i].flags); 500 if (r) 501 return r; 502 } 503 504 return 0; 505 } 506 507 static int amdgpu_cs_p2_syncobj_out(struct amdgpu_cs_parser *p, 508 struct amdgpu_cs_chunk *chunk) 509 { 510 struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata; 511 unsigned int num_deps; 512 int i; 513 514 num_deps = chunk->length_dw * 4 / 515 sizeof(struct drm_amdgpu_cs_chunk_sem); 516 517 if (p->post_deps) 518 return -EINVAL; 519 520 p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps), 521 GFP_KERNEL); 522 p->num_post_deps = 0; 523 524 if (!p->post_deps) 525 return -ENOMEM; 526 527 528 for (i = 0; i < num_deps; ++i) { 529 p->post_deps[i].syncobj = 530 drm_syncobj_find(p->filp, deps[i].handle); 531 if (!p->post_deps[i].syncobj) 532 return -EINVAL; 533 p->post_deps[i].chain = NULL; 534 p->post_deps[i].point = 0; 535 p->num_post_deps++; 536 } 537 538 return 0; 539 } 540 541 static int amdgpu_cs_p2_syncobj_timeline_signal(struct amdgpu_cs_parser *p, 542 struct amdgpu_cs_chunk *chunk) 543 { 544 struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata; 545 unsigned int num_deps; 546 int i; 547 548 num_deps = chunk->length_dw * 4 / 549 sizeof(struct drm_amdgpu_cs_chunk_syncobj); 550 551 if (p->post_deps) 552 return -EINVAL; 553 554 p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps), 555 GFP_KERNEL); 556 p->num_post_deps = 0; 557 558 if (!p->post_deps) 559 return -ENOMEM; 560 561 for (i = 0; i < num_deps; ++i) { 562 struct amdgpu_cs_post_dep *dep = &p->post_deps[i]; 563 564 dep->chain = NULL; 565 if (syncobj_deps[i].point) { 566 dep->chain = dma_fence_chain_alloc(); 567 if (!dep->chain) 568 return -ENOMEM; 569 } 570 571 dep->syncobj = drm_syncobj_find(p->filp, 572 syncobj_deps[i].handle); 573 if (!dep->syncobj) { 574 dma_fence_chain_free(dep->chain); 575 return -EINVAL; 576 } 577 dep->point = syncobj_deps[i].point; 578 p->num_post_deps++; 579 } 580 581 return 0; 582 } 583 584 static int amdgpu_cs_p2_shadow(struct amdgpu_cs_parser *p, 585 struct amdgpu_cs_chunk *chunk) 586 { 587 struct drm_amdgpu_cs_chunk_cp_gfx_shadow *shadow = chunk->kdata; 588 int i; 589 590 if (shadow->flags & ~AMDGPU_CS_CHUNK_CP_GFX_SHADOW_FLAGS_INIT_SHADOW) 591 return -EINVAL; 592 593 for (i = 0; i < p->gang_size; ++i) { 594 p->jobs[i]->shadow_va = shadow->shadow_va; 595 p->jobs[i]->csa_va = shadow->csa_va; 596 p->jobs[i]->gds_va = shadow->gds_va; 597 p->jobs[i]->init_shadow = 598 shadow->flags & AMDGPU_CS_CHUNK_CP_GFX_SHADOW_FLAGS_INIT_SHADOW; 599 } 600 601 return 0; 602 } 603 604 static int amdgpu_cs_pass2(struct amdgpu_cs_parser *p) 605 { 606 unsigned int ce_preempt = 0, de_preempt = 0; 607 int i, r; 608 609 for (i = 0; i < p->nchunks; ++i) { 610 struct amdgpu_cs_chunk *chunk; 611 612 chunk = &p->chunks[i]; 613 614 switch (chunk->chunk_id) { 615 case AMDGPU_CHUNK_ID_IB: 616 r = amdgpu_cs_p2_ib(p, chunk, &ce_preempt, &de_preempt); 617 if (r) 618 return r; 619 break; 620 case AMDGPU_CHUNK_ID_DEPENDENCIES: 621 case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES: 622 r = amdgpu_cs_p2_dependencies(p, chunk); 623 if (r) 624 return r; 625 break; 626 case AMDGPU_CHUNK_ID_SYNCOBJ_IN: 627 r = amdgpu_cs_p2_syncobj_in(p, chunk); 628 if (r) 629 return r; 630 break; 631 case AMDGPU_CHUNK_ID_SYNCOBJ_OUT: 632 r = amdgpu_cs_p2_syncobj_out(p, chunk); 633 if (r) 634 return r; 635 break; 636 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT: 637 r = amdgpu_cs_p2_syncobj_timeline_wait(p, chunk); 638 if (r) 639 return r; 640 break; 641 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL: 642 r = amdgpu_cs_p2_syncobj_timeline_signal(p, chunk); 643 if (r) 644 return r; 645 break; 646 case AMDGPU_CHUNK_ID_CP_GFX_SHADOW: 647 r = amdgpu_cs_p2_shadow(p, chunk); 648 if (r) 649 return r; 650 break; 651 } 652 } 653 654 return 0; 655 } 656 657 /* Convert microseconds to bytes. */ 658 static u64 us_to_bytes(struct amdgpu_device *adev, s64 us) 659 { 660 if (us <= 0 || !adev->mm_stats.log2_max_MBps) 661 return 0; 662 663 /* Since accum_us is incremented by a million per second, just 664 * multiply it by the number of MB/s to get the number of bytes. 665 */ 666 return us << adev->mm_stats.log2_max_MBps; 667 } 668 669 static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes) 670 { 671 if (!adev->mm_stats.log2_max_MBps) 672 return 0; 673 674 return bytes >> adev->mm_stats.log2_max_MBps; 675 } 676 677 /* Returns how many bytes TTM can move right now. If no bytes can be moved, 678 * it returns 0. If it returns non-zero, it's OK to move at least one buffer, 679 * which means it can go over the threshold once. If that happens, the driver 680 * will be in debt and no other buffer migrations can be done until that debt 681 * is repaid. 682 * 683 * This approach allows moving a buffer of any size (it's important to allow 684 * that). 685 * 686 * The currency is simply time in microseconds and it increases as the clock 687 * ticks. The accumulated microseconds (us) are converted to bytes and 688 * returned. 689 */ 690 static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev, 691 u64 *max_bytes, 692 u64 *max_vis_bytes) 693 { 694 s64 time_us, increment_us; 695 u64 free_vram, total_vram, used_vram; 696 /* Allow a maximum of 200 accumulated ms. This is basically per-IB 697 * throttling. 698 * 699 * It means that in order to get full max MBps, at least 5 IBs per 700 * second must be submitted and not more than 200ms apart from each 701 * other. 702 */ 703 const s64 us_upper_bound = 200000; 704 705 if (!adev->mm_stats.log2_max_MBps) { 706 *max_bytes = 0; 707 *max_vis_bytes = 0; 708 return; 709 } 710 711 total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size); 712 used_vram = ttm_resource_manager_usage(&adev->mman.vram_mgr.manager); 713 free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram; 714 715 spin_lock(&adev->mm_stats.lock); 716 717 /* Increase the amount of accumulated us. */ 718 time_us = ktime_to_us(ktime_get()); 719 increment_us = time_us - adev->mm_stats.last_update_us; 720 adev->mm_stats.last_update_us = time_us; 721 adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us, 722 us_upper_bound); 723 724 /* This prevents the short period of low performance when the VRAM 725 * usage is low and the driver is in debt or doesn't have enough 726 * accumulated us to fill VRAM quickly. 727 * 728 * The situation can occur in these cases: 729 * - a lot of VRAM is freed by userspace 730 * - the presence of a big buffer causes a lot of evictions 731 * (solution: split buffers into smaller ones) 732 * 733 * If 128 MB or 1/8th of VRAM is free, start filling it now by setting 734 * accum_us to a positive number. 735 */ 736 if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) { 737 s64 min_us; 738 739 /* Be more aggressive on dGPUs. Try to fill a portion of free 740 * VRAM now. 741 */ 742 if (!(adev->flags & AMD_IS_APU)) 743 min_us = bytes_to_us(adev, free_vram / 4); 744 else 745 min_us = 0; /* Reset accum_us on APUs. */ 746 747 adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us); 748 } 749 750 /* This is set to 0 if the driver is in debt to disallow (optional) 751 * buffer moves. 752 */ 753 *max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us); 754 755 /* Do the same for visible VRAM if half of it is free */ 756 if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) { 757 u64 total_vis_vram = adev->gmc.visible_vram_size; 758 u64 used_vis_vram = 759 amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr); 760 761 if (used_vis_vram < total_vis_vram) { 762 u64 free_vis_vram = total_vis_vram - used_vis_vram; 763 764 adev->mm_stats.accum_us_vis = min(adev->mm_stats.accum_us_vis + 765 increment_us, us_upper_bound); 766 767 if (free_vis_vram >= total_vis_vram / 2) 768 adev->mm_stats.accum_us_vis = 769 max(bytes_to_us(adev, free_vis_vram / 2), 770 adev->mm_stats.accum_us_vis); 771 } 772 773 *max_vis_bytes = us_to_bytes(adev, adev->mm_stats.accum_us_vis); 774 } else { 775 *max_vis_bytes = 0; 776 } 777 778 spin_unlock(&adev->mm_stats.lock); 779 } 780 781 /* Report how many bytes have really been moved for the last command 782 * submission. This can result in a debt that can stop buffer migrations 783 * temporarily. 784 */ 785 void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes, 786 u64 num_vis_bytes) 787 { 788 spin_lock(&adev->mm_stats.lock); 789 adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes); 790 adev->mm_stats.accum_us_vis -= bytes_to_us(adev, num_vis_bytes); 791 spin_unlock(&adev->mm_stats.lock); 792 } 793 794 static int amdgpu_cs_bo_validate(void *param, struct amdgpu_bo *bo) 795 { 796 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 797 struct amdgpu_cs_parser *p = param; 798 struct ttm_operation_ctx ctx = { 799 .interruptible = true, 800 .no_wait_gpu = false, 801 .resv = bo->tbo.base.resv 802 }; 803 uint32_t domain; 804 int r; 805 806 if (bo->tbo.pin_count) 807 return 0; 808 809 /* Don't move this buffer if we have depleted our allowance 810 * to move it. Don't move anything if the threshold is zero. 811 */ 812 if (p->bytes_moved < p->bytes_moved_threshold && 813 (!bo->tbo.base.dma_buf || 814 list_empty(&bo->tbo.base.dma_buf->attachments))) { 815 if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && 816 (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) { 817 /* And don't move a CPU_ACCESS_REQUIRED BO to limited 818 * visible VRAM if we've depleted our allowance to do 819 * that. 820 */ 821 if (p->bytes_moved_vis < p->bytes_moved_vis_threshold) 822 domain = bo->preferred_domains; 823 else 824 domain = bo->allowed_domains; 825 } else { 826 domain = bo->preferred_domains; 827 } 828 } else { 829 domain = bo->allowed_domains; 830 } 831 832 retry: 833 amdgpu_bo_placement_from_domain(bo, domain); 834 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 835 836 p->bytes_moved += ctx.bytes_moved; 837 if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && 838 amdgpu_res_cpu_visible(adev, bo->tbo.resource)) 839 p->bytes_moved_vis += ctx.bytes_moved; 840 841 if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) { 842 domain = bo->allowed_domains; 843 goto retry; 844 } 845 846 return r; 847 } 848 849 static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, 850 union drm_amdgpu_cs *cs) 851 { 852 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 853 struct ttm_operation_ctx ctx = { true, false }; 854 struct amdgpu_vm *vm = &fpriv->vm; 855 struct amdgpu_bo_list_entry *e; 856 struct drm_gem_object *obj; 857 unsigned long index; 858 unsigned int i; 859 int r; 860 861 /* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */ 862 if (cs->in.bo_list_handle) { 863 if (p->bo_list) 864 return -EINVAL; 865 866 r = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle, 867 &p->bo_list); 868 if (r) 869 return r; 870 } else if (!p->bo_list) { 871 /* Create a empty bo_list when no handle is provided */ 872 r = amdgpu_bo_list_create(p->adev, p->filp, NULL, 0, 873 &p->bo_list); 874 if (r) 875 return r; 876 } 877 878 mutex_lock(&p->bo_list->bo_list_mutex); 879 880 /* Get userptr backing pages. If pages are updated after registered 881 * in amdgpu_gem_userptr_ioctl(), amdgpu_cs_list_validate() will do 882 * amdgpu_ttm_backend_bind() to flush and invalidate new pages 883 */ 884 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { 885 bool userpage_invalidated = false; 886 struct amdgpu_bo *bo = e->bo; 887 int i; 888 889 r = amdgpu_ttm_tt_get_user_pages(bo, &e->range); 890 if (r) 891 goto out_free_user_pages; 892 893 for (i = 0; i < bo->tbo.ttm->num_pages; i++) { 894 if (bo->tbo.ttm->pages[i] != hmm_pfn_to_page(e->range->hmm_pfns[i])) { 895 userpage_invalidated = true; 896 break; 897 } 898 } 899 e->user_invalidated = userpage_invalidated; 900 } 901 902 drm_exec_until_all_locked(&p->exec) { 903 r = amdgpu_vm_lock_pd(&fpriv->vm, &p->exec, 1 + p->gang_size); 904 drm_exec_retry_on_contention(&p->exec); 905 if (unlikely(r)) 906 goto out_free_user_pages; 907 908 amdgpu_bo_list_for_each_entry(e, p->bo_list) { 909 /* One fence for TTM and one for each CS job */ 910 r = drm_exec_prepare_obj(&p->exec, &e->bo->tbo.base, 911 1 + p->gang_size); 912 drm_exec_retry_on_contention(&p->exec); 913 if (unlikely(r)) 914 goto out_free_user_pages; 915 916 e->bo_va = amdgpu_vm_bo_find(vm, e->bo); 917 } 918 919 if (p->uf_bo) { 920 r = drm_exec_prepare_obj(&p->exec, &p->uf_bo->tbo.base, 921 1 + p->gang_size); 922 drm_exec_retry_on_contention(&p->exec); 923 if (unlikely(r)) 924 goto out_free_user_pages; 925 } 926 } 927 928 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { 929 struct mm_struct *usermm; 930 931 usermm = amdgpu_ttm_tt_get_usermm(e->bo->tbo.ttm); 932 if (usermm && usermm != current->mm) { 933 r = -EPERM; 934 goto out_free_user_pages; 935 } 936 937 if (amdgpu_ttm_tt_is_userptr(e->bo->tbo.ttm) && 938 e->user_invalidated) { 939 amdgpu_bo_placement_from_domain(e->bo, 940 AMDGPU_GEM_DOMAIN_CPU); 941 r = ttm_bo_validate(&e->bo->tbo, &e->bo->placement, 942 &ctx); 943 if (r) 944 goto out_free_user_pages; 945 946 amdgpu_ttm_tt_set_user_pages(e->bo->tbo.ttm, 947 e->range); 948 } 949 } 950 951 amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold, 952 &p->bytes_moved_vis_threshold); 953 p->bytes_moved = 0; 954 p->bytes_moved_vis = 0; 955 956 r = amdgpu_vm_validate(p->adev, &fpriv->vm, NULL, 957 amdgpu_cs_bo_validate, p); 958 if (r) { 959 drm_err(adev_to_drm(p->adev), "amdgpu_vm_validate() failed.\n"); 960 goto out_free_user_pages; 961 } 962 963 drm_exec_for_each_locked_object(&p->exec, index, obj) { 964 r = amdgpu_cs_bo_validate(p, gem_to_amdgpu_bo(obj)); 965 if (unlikely(r)) 966 goto out_free_user_pages; 967 } 968 969 if (p->uf_bo) { 970 r = amdgpu_ttm_alloc_gart(&p->uf_bo->tbo); 971 if (unlikely(r)) 972 goto out_free_user_pages; 973 974 p->gang_leader->uf_addr += amdgpu_bo_gpu_offset(p->uf_bo); 975 } 976 977 amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved, 978 p->bytes_moved_vis); 979 980 for (i = 0; i < p->gang_size; ++i) 981 amdgpu_job_set_resources(p->jobs[i], p->bo_list->gds_obj, 982 p->bo_list->gws_obj, 983 p->bo_list->oa_obj); 984 return 0; 985 986 out_free_user_pages: 987 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { 988 struct amdgpu_bo *bo = e->bo; 989 990 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, e->range); 991 e->range = NULL; 992 } 993 mutex_unlock(&p->bo_list->bo_list_mutex); 994 return r; 995 } 996 997 static void trace_amdgpu_cs_ibs(struct amdgpu_cs_parser *p) 998 { 999 int i, j; 1000 1001 if (!trace_amdgpu_cs_enabled()) 1002 return; 1003 1004 for (i = 0; i < p->gang_size; ++i) { 1005 struct amdgpu_job *job = p->jobs[i]; 1006 1007 for (j = 0; j < job->num_ibs; ++j) 1008 trace_amdgpu_cs(p, job, &job->ibs[j]); 1009 } 1010 } 1011 1012 static int amdgpu_cs_patch_ibs(struct amdgpu_cs_parser *p, 1013 struct amdgpu_job *job) 1014 { 1015 struct amdgpu_ring *ring = amdgpu_job_ring(job); 1016 unsigned int i; 1017 int r; 1018 1019 /* Only for UVD/VCE VM emulation */ 1020 if (!ring->funcs->parse_cs && !ring->funcs->patch_cs_in_place) 1021 return 0; 1022 1023 for (i = 0; i < job->num_ibs; ++i) { 1024 struct amdgpu_ib *ib = &job->ibs[i]; 1025 struct amdgpu_bo_va_mapping *m; 1026 struct amdgpu_bo *aobj; 1027 uint64_t va_start; 1028 uint8_t *kptr; 1029 1030 va_start = ib->gpu_addr & AMDGPU_GMC_HOLE_MASK; 1031 r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m); 1032 if (r) { 1033 drm_err(adev_to_drm(p->adev), "IB va_start is invalid\n"); 1034 return r; 1035 } 1036 1037 if ((va_start + ib->length_dw * 4) > 1038 (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) { 1039 drm_err(adev_to_drm(p->adev), "IB va_start+ib_bytes is invalid\n"); 1040 return -EINVAL; 1041 } 1042 1043 /* the IB should be reserved at this point */ 1044 r = amdgpu_bo_kmap(aobj, (void **)&kptr); 1045 if (r) 1046 return r; 1047 1048 kptr += va_start - (m->start * AMDGPU_GPU_PAGE_SIZE); 1049 1050 if (ring->funcs->parse_cs) { 1051 memcpy(ib->ptr, kptr, ib->length_dw * 4); 1052 amdgpu_bo_kunmap(aobj); 1053 1054 r = amdgpu_ring_parse_cs(ring, p, job, ib); 1055 if (r) 1056 return r; 1057 1058 if (ib->sa_bo) 1059 ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo); 1060 } else { 1061 ib->ptr = (uint32_t *)kptr; 1062 r = amdgpu_ring_patch_cs_in_place(ring, p, job, ib); 1063 amdgpu_bo_kunmap(aobj); 1064 if (r) 1065 return r; 1066 } 1067 } 1068 1069 return 0; 1070 } 1071 1072 static int amdgpu_cs_patch_jobs(struct amdgpu_cs_parser *p) 1073 { 1074 unsigned int i; 1075 int r; 1076 1077 for (i = 0; i < p->gang_size; ++i) { 1078 r = amdgpu_cs_patch_ibs(p, p->jobs[i]); 1079 if (r) 1080 return r; 1081 } 1082 return 0; 1083 } 1084 1085 static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) 1086 { 1087 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 1088 struct amdgpu_job *job = p->gang_leader; 1089 struct amdgpu_device *adev = p->adev; 1090 struct amdgpu_vm *vm = &fpriv->vm; 1091 struct amdgpu_bo_list_entry *e; 1092 struct amdgpu_bo_va *bo_va; 1093 unsigned int i; 1094 int r; 1095 1096 /* 1097 * We can't use gang submit on with reserved VMIDs when the VM changes 1098 * can't be invalidated by more than one engine at the same time. 1099 */ 1100 if (p->gang_size > 1 && !adev->vm_manager.concurrent_flush) { 1101 for (i = 0; i < p->gang_size; ++i) { 1102 struct drm_sched_entity *entity = p->entities[i]; 1103 struct drm_gpu_scheduler *sched = entity->rq->sched; 1104 struct amdgpu_ring *ring = to_amdgpu_ring(sched); 1105 1106 if (amdgpu_vmid_uses_reserved(vm, ring->vm_hub)) 1107 return -EINVAL; 1108 } 1109 } 1110 1111 if (!amdgpu_vm_ready(vm)) 1112 return -EINVAL; 1113 1114 r = amdgpu_vm_clear_freed(adev, vm, NULL); 1115 if (r) 1116 return r; 1117 1118 r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false); 1119 if (r) 1120 return r; 1121 1122 r = amdgpu_sync_fence(&p->sync, fpriv->prt_va->last_pt_update, 1123 GFP_KERNEL); 1124 if (r) 1125 return r; 1126 1127 if (fpriv->csa_va) { 1128 bo_va = fpriv->csa_va; 1129 BUG_ON(!bo_va); 1130 r = amdgpu_vm_bo_update(adev, bo_va, false); 1131 if (r) 1132 return r; 1133 1134 r = amdgpu_sync_fence(&p->sync, bo_va->last_pt_update, 1135 GFP_KERNEL); 1136 if (r) 1137 return r; 1138 } 1139 1140 /* FIXME: In theory this loop shouldn't be needed any more when 1141 * amdgpu_vm_handle_moved handles all moved BOs that are reserved 1142 * with p->ticket. But removing it caused test regressions, so I'm 1143 * leaving it here for now. 1144 */ 1145 amdgpu_bo_list_for_each_entry(e, p->bo_list) { 1146 bo_va = e->bo_va; 1147 if (bo_va == NULL) 1148 continue; 1149 1150 r = amdgpu_vm_bo_update(adev, bo_va, false); 1151 if (r) 1152 return r; 1153 1154 r = amdgpu_sync_fence(&p->sync, bo_va->last_pt_update, 1155 GFP_KERNEL); 1156 if (r) 1157 return r; 1158 } 1159 1160 r = amdgpu_vm_handle_moved(adev, vm, &p->exec.ticket); 1161 if (r) 1162 return r; 1163 1164 r = amdgpu_vm_update_pdes(adev, vm, false); 1165 if (r) 1166 return r; 1167 1168 r = amdgpu_sync_fence(&p->sync, vm->last_update, GFP_KERNEL); 1169 if (r) 1170 return r; 1171 1172 for (i = 0; i < p->gang_size; ++i) { 1173 job = p->jobs[i]; 1174 1175 if (!job->vm) 1176 continue; 1177 1178 job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.bo); 1179 } 1180 1181 if (adev->debug_vm) { 1182 /* Invalidate all BOs to test for userspace bugs */ 1183 amdgpu_bo_list_for_each_entry(e, p->bo_list) { 1184 struct amdgpu_bo *bo = e->bo; 1185 1186 /* ignore duplicates */ 1187 if (!bo) 1188 continue; 1189 1190 amdgpu_vm_bo_invalidate(bo, false); 1191 } 1192 } 1193 1194 return 0; 1195 } 1196 1197 static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p) 1198 { 1199 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 1200 struct drm_gpu_scheduler *sched; 1201 struct drm_gem_object *obj; 1202 struct dma_fence *fence; 1203 unsigned long index; 1204 unsigned int i; 1205 int r; 1206 1207 r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_leader_idx]); 1208 if (r) { 1209 if (r != -ERESTARTSYS) 1210 drm_err(adev_to_drm(p->adev), "amdgpu_ctx_wait_prev_fence failed.\n"); 1211 return r; 1212 } 1213 1214 drm_exec_for_each_locked_object(&p->exec, index, obj) { 1215 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 1216 1217 struct dma_resv *resv = bo->tbo.base.resv; 1218 enum amdgpu_sync_mode sync_mode; 1219 1220 sync_mode = amdgpu_bo_explicit_sync(bo) ? 1221 AMDGPU_SYNC_EXPLICIT : AMDGPU_SYNC_NE_OWNER; 1222 r = amdgpu_sync_resv(p->adev, &p->sync, resv, sync_mode, 1223 &fpriv->vm); 1224 if (r) 1225 return r; 1226 } 1227 1228 for (i = 0; i < p->gang_size; ++i) { 1229 r = amdgpu_sync_push_to_job(&p->sync, p->jobs[i]); 1230 if (r) 1231 return r; 1232 } 1233 1234 sched = p->gang_leader->base.entity->rq->sched; 1235 while ((fence = amdgpu_sync_get_fence(&p->sync))) { 1236 struct drm_sched_fence *s_fence = to_drm_sched_fence(fence); 1237 1238 /* 1239 * When we have an dependency it might be necessary to insert a 1240 * pipeline sync to make sure that all caches etc are flushed and the 1241 * next job actually sees the results from the previous one 1242 * before we start executing on the same scheduler ring. 1243 */ 1244 if (!s_fence || s_fence->sched != sched) { 1245 dma_fence_put(fence); 1246 continue; 1247 } 1248 1249 r = amdgpu_sync_fence(&p->gang_leader->explicit_sync, fence, 1250 GFP_KERNEL); 1251 dma_fence_put(fence); 1252 if (r) 1253 return r; 1254 } 1255 return 0; 1256 } 1257 1258 static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p) 1259 { 1260 int i; 1261 1262 for (i = 0; i < p->num_post_deps; ++i) { 1263 if (p->post_deps[i].chain && p->post_deps[i].point) { 1264 drm_syncobj_add_point(p->post_deps[i].syncobj, 1265 p->post_deps[i].chain, 1266 p->fence, p->post_deps[i].point); 1267 p->post_deps[i].chain = NULL; 1268 } else { 1269 drm_syncobj_replace_fence(p->post_deps[i].syncobj, 1270 p->fence); 1271 } 1272 } 1273 } 1274 1275 static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, 1276 union drm_amdgpu_cs *cs) 1277 { 1278 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 1279 struct amdgpu_job *leader = p->gang_leader; 1280 struct amdgpu_bo_list_entry *e; 1281 struct drm_gem_object *gobj; 1282 unsigned long index; 1283 unsigned int i; 1284 uint64_t seq; 1285 int r; 1286 1287 for (i = 0; i < p->gang_size; ++i) 1288 drm_sched_job_arm(&p->jobs[i]->base); 1289 1290 for (i = 0; i < p->gang_size; ++i) { 1291 struct dma_fence *fence; 1292 1293 if (p->jobs[i] == leader) 1294 continue; 1295 1296 fence = &p->jobs[i]->base.s_fence->scheduled; 1297 dma_fence_get(fence); 1298 r = drm_sched_job_add_dependency(&leader->base, fence); 1299 if (r) { 1300 dma_fence_put(fence); 1301 return r; 1302 } 1303 } 1304 1305 if (p->gang_size > 1) { 1306 for (i = 0; i < p->gang_size; ++i) 1307 amdgpu_job_set_gang_leader(p->jobs[i], leader); 1308 } 1309 1310 /* No memory allocation is allowed while holding the notifier lock. 1311 * The lock is held until amdgpu_cs_submit is finished and fence is 1312 * added to BOs. 1313 */ 1314 mutex_lock(&p->adev->notifier_lock); 1315 1316 /* If userptr are invalidated after amdgpu_cs_parser_bos(), return 1317 * -EAGAIN, drmIoctl in libdrm will restart the amdgpu_cs_ioctl. 1318 */ 1319 r = 0; 1320 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { 1321 r |= !amdgpu_ttm_tt_get_user_pages_done(e->bo->tbo.ttm, 1322 e->range); 1323 e->range = NULL; 1324 } 1325 if (r) { 1326 r = -EAGAIN; 1327 mutex_unlock(&p->adev->notifier_lock); 1328 return r; 1329 } 1330 1331 p->fence = dma_fence_get(&leader->base.s_fence->finished); 1332 drm_exec_for_each_locked_object(&p->exec, index, gobj) { 1333 1334 ttm_bo_move_to_lru_tail_unlocked(&gem_to_amdgpu_bo(gobj)->tbo); 1335 1336 /* Everybody except for the gang leader uses READ */ 1337 for (i = 0; i < p->gang_size; ++i) { 1338 if (p->jobs[i] == leader) 1339 continue; 1340 1341 dma_resv_add_fence(gobj->resv, 1342 &p->jobs[i]->base.s_fence->finished, 1343 DMA_RESV_USAGE_READ); 1344 } 1345 1346 /* The gang leader as remembered as writer */ 1347 dma_resv_add_fence(gobj->resv, p->fence, DMA_RESV_USAGE_WRITE); 1348 } 1349 1350 seq = amdgpu_ctx_add_fence(p->ctx, p->entities[p->gang_leader_idx], 1351 p->fence); 1352 amdgpu_cs_post_dependencies(p); 1353 1354 if ((leader->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) && 1355 !p->ctx->preamble_presented) { 1356 leader->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST; 1357 p->ctx->preamble_presented = true; 1358 } 1359 1360 cs->out.handle = seq; 1361 leader->uf_sequence = seq; 1362 1363 amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->exec.ticket); 1364 for (i = 0; i < p->gang_size; ++i) { 1365 amdgpu_job_free_resources(p->jobs[i]); 1366 trace_amdgpu_cs_ioctl(p->jobs[i]); 1367 drm_sched_entity_push_job(&p->jobs[i]->base); 1368 p->jobs[i] = NULL; 1369 } 1370 1371 amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm); 1372 1373 mutex_unlock(&p->adev->notifier_lock); 1374 mutex_unlock(&p->bo_list->bo_list_mutex); 1375 return 0; 1376 } 1377 1378 /* Cleanup the parser structure */ 1379 static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser) 1380 { 1381 unsigned int i; 1382 1383 amdgpu_sync_free(&parser->sync); 1384 drm_exec_fini(&parser->exec); 1385 1386 for (i = 0; i < parser->num_post_deps; i++) { 1387 drm_syncobj_put(parser->post_deps[i].syncobj); 1388 kfree(parser->post_deps[i].chain); 1389 } 1390 kfree(parser->post_deps); 1391 1392 dma_fence_put(parser->fence); 1393 1394 if (parser->ctx) 1395 amdgpu_ctx_put(parser->ctx); 1396 if (parser->bo_list) 1397 amdgpu_bo_list_put(parser->bo_list); 1398 1399 for (i = 0; i < parser->nchunks; i++) 1400 kvfree(parser->chunks[i].kdata); 1401 kvfree(parser->chunks); 1402 for (i = 0; i < parser->gang_size; ++i) { 1403 if (parser->jobs[i]) 1404 amdgpu_job_free(parser->jobs[i]); 1405 } 1406 amdgpu_bo_unref(&parser->uf_bo); 1407 } 1408 1409 int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) 1410 { 1411 struct amdgpu_device *adev = drm_to_adev(dev); 1412 struct amdgpu_cs_parser parser; 1413 int r; 1414 1415 if (amdgpu_ras_intr_triggered()) 1416 return -EHWPOISON; 1417 1418 if (!adev->accel_working) 1419 return -EBUSY; 1420 1421 r = amdgpu_cs_parser_init(&parser, adev, filp, data); 1422 if (r) { 1423 drm_err_ratelimited(dev, "Failed to initialize parser %d!\n", r); 1424 return r; 1425 } 1426 1427 r = amdgpu_cs_pass1(&parser, data); 1428 if (r) 1429 goto error_fini; 1430 1431 r = amdgpu_cs_pass2(&parser); 1432 if (r) 1433 goto error_fini; 1434 1435 r = amdgpu_cs_parser_bos(&parser, data); 1436 if (r) { 1437 if (r == -ENOMEM) 1438 drm_err(dev, "Not enough memory for command submission!\n"); 1439 else if (r != -ERESTARTSYS && r != -EAGAIN) 1440 drm_dbg(dev, "Failed to process the buffer list %d!\n", r); 1441 goto error_fini; 1442 } 1443 1444 r = amdgpu_cs_patch_jobs(&parser); 1445 if (r) 1446 goto error_backoff; 1447 1448 r = amdgpu_cs_vm_handling(&parser); 1449 if (r) 1450 goto error_backoff; 1451 1452 r = amdgpu_cs_sync_rings(&parser); 1453 if (r) 1454 goto error_backoff; 1455 1456 trace_amdgpu_cs_ibs(&parser); 1457 1458 r = amdgpu_cs_submit(&parser, data); 1459 if (r) 1460 goto error_backoff; 1461 1462 amdgpu_cs_parser_fini(&parser); 1463 return 0; 1464 1465 error_backoff: 1466 mutex_unlock(&parser.bo_list->bo_list_mutex); 1467 1468 error_fini: 1469 amdgpu_cs_parser_fini(&parser); 1470 return r; 1471 } 1472 1473 /** 1474 * amdgpu_cs_wait_ioctl - wait for a command submission to finish 1475 * 1476 * @dev: drm device 1477 * @data: data from userspace 1478 * @filp: file private 1479 * 1480 * Wait for the command submission identified by handle to finish. 1481 */ 1482 int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, 1483 struct drm_file *filp) 1484 { 1485 union drm_amdgpu_wait_cs *wait = data; 1486 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout); 1487 struct drm_sched_entity *entity; 1488 struct amdgpu_ctx *ctx; 1489 struct dma_fence *fence; 1490 long r; 1491 1492 ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id); 1493 if (ctx == NULL) 1494 return -EINVAL; 1495 1496 r = amdgpu_ctx_get_entity(ctx, wait->in.ip_type, wait->in.ip_instance, 1497 wait->in.ring, &entity); 1498 if (r) { 1499 amdgpu_ctx_put(ctx); 1500 return r; 1501 } 1502 1503 fence = amdgpu_ctx_get_fence(ctx, entity, wait->in.handle); 1504 if (IS_ERR(fence)) 1505 r = PTR_ERR(fence); 1506 else if (fence) { 1507 r = dma_fence_wait_timeout(fence, true, timeout); 1508 if (r > 0 && fence->error) 1509 r = fence->error; 1510 dma_fence_put(fence); 1511 } else 1512 r = 1; 1513 1514 amdgpu_ctx_put(ctx); 1515 if (r < 0) 1516 return r; 1517 1518 memset(wait, 0, sizeof(*wait)); 1519 wait->out.status = (r == 0); 1520 1521 return 0; 1522 } 1523 1524 /** 1525 * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence 1526 * 1527 * @adev: amdgpu device 1528 * @filp: file private 1529 * @user: drm_amdgpu_fence copied from user space 1530 */ 1531 static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev, 1532 struct drm_file *filp, 1533 struct drm_amdgpu_fence *user) 1534 { 1535 struct drm_sched_entity *entity; 1536 struct amdgpu_ctx *ctx; 1537 struct dma_fence *fence; 1538 int r; 1539 1540 ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id); 1541 if (ctx == NULL) 1542 return ERR_PTR(-EINVAL); 1543 1544 r = amdgpu_ctx_get_entity(ctx, user->ip_type, user->ip_instance, 1545 user->ring, &entity); 1546 if (r) { 1547 amdgpu_ctx_put(ctx); 1548 return ERR_PTR(r); 1549 } 1550 1551 fence = amdgpu_ctx_get_fence(ctx, entity, user->seq_no); 1552 amdgpu_ctx_put(ctx); 1553 1554 return fence; 1555 } 1556 1557 int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data, 1558 struct drm_file *filp) 1559 { 1560 struct amdgpu_device *adev = drm_to_adev(dev); 1561 union drm_amdgpu_fence_to_handle *info = data; 1562 struct dma_fence *fence; 1563 struct drm_syncobj *syncobj; 1564 struct sync_file *sync_file; 1565 int fd, r; 1566 1567 fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence); 1568 if (IS_ERR(fence)) 1569 return PTR_ERR(fence); 1570 1571 if (!fence) 1572 fence = dma_fence_get_stub(); 1573 1574 switch (info->in.what) { 1575 case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ: 1576 r = drm_syncobj_create(&syncobj, 0, fence); 1577 dma_fence_put(fence); 1578 if (r) 1579 return r; 1580 r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle); 1581 drm_syncobj_put(syncobj); 1582 return r; 1583 1584 case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD: 1585 r = drm_syncobj_create(&syncobj, 0, fence); 1586 dma_fence_put(fence); 1587 if (r) 1588 return r; 1589 r = drm_syncobj_get_fd(syncobj, (int *)&info->out.handle); 1590 drm_syncobj_put(syncobj); 1591 return r; 1592 1593 case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD: 1594 fd = get_unused_fd_flags(O_CLOEXEC); 1595 if (fd < 0) { 1596 dma_fence_put(fence); 1597 return fd; 1598 } 1599 1600 sync_file = sync_file_create(fence); 1601 dma_fence_put(fence); 1602 if (!sync_file) { 1603 put_unused_fd(fd); 1604 return -ENOMEM; 1605 } 1606 1607 fd_install(fd, sync_file->file); 1608 info->out.handle = fd; 1609 return 0; 1610 1611 default: 1612 dma_fence_put(fence); 1613 return -EINVAL; 1614 } 1615 } 1616 1617 /** 1618 * amdgpu_cs_wait_all_fences - wait on all fences to signal 1619 * 1620 * @adev: amdgpu device 1621 * @filp: file private 1622 * @wait: wait parameters 1623 * @fences: array of drm_amdgpu_fence 1624 */ 1625 static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev, 1626 struct drm_file *filp, 1627 union drm_amdgpu_wait_fences *wait, 1628 struct drm_amdgpu_fence *fences) 1629 { 1630 uint32_t fence_count = wait->in.fence_count; 1631 unsigned int i; 1632 long r = 1; 1633 1634 for (i = 0; i < fence_count; i++) { 1635 struct dma_fence *fence; 1636 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns); 1637 1638 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]); 1639 if (IS_ERR(fence)) 1640 return PTR_ERR(fence); 1641 else if (!fence) 1642 continue; 1643 1644 r = dma_fence_wait_timeout(fence, true, timeout); 1645 if (r > 0 && fence->error) 1646 r = fence->error; 1647 1648 dma_fence_put(fence); 1649 if (r < 0) 1650 return r; 1651 1652 if (r == 0) 1653 break; 1654 } 1655 1656 memset(wait, 0, sizeof(*wait)); 1657 wait->out.status = (r > 0); 1658 1659 return 0; 1660 } 1661 1662 /** 1663 * amdgpu_cs_wait_any_fence - wait on any fence to signal 1664 * 1665 * @adev: amdgpu device 1666 * @filp: file private 1667 * @wait: wait parameters 1668 * @fences: array of drm_amdgpu_fence 1669 */ 1670 static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev, 1671 struct drm_file *filp, 1672 union drm_amdgpu_wait_fences *wait, 1673 struct drm_amdgpu_fence *fences) 1674 { 1675 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns); 1676 uint32_t fence_count = wait->in.fence_count; 1677 uint32_t first = ~0; 1678 struct dma_fence **array; 1679 unsigned int i; 1680 long r; 1681 1682 /* Prepare the fence array */ 1683 array = kcalloc(fence_count, sizeof(struct dma_fence *), GFP_KERNEL); 1684 1685 if (array == NULL) 1686 return -ENOMEM; 1687 1688 for (i = 0; i < fence_count; i++) { 1689 struct dma_fence *fence; 1690 1691 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]); 1692 if (IS_ERR(fence)) { 1693 r = PTR_ERR(fence); 1694 goto err_free_fence_array; 1695 } else if (fence) { 1696 array[i] = fence; 1697 } else { /* NULL, the fence has been already signaled */ 1698 r = 1; 1699 first = i; 1700 goto out; 1701 } 1702 } 1703 1704 r = dma_fence_wait_any_timeout(array, fence_count, true, timeout, 1705 &first); 1706 if (r < 0) 1707 goto err_free_fence_array; 1708 1709 out: 1710 memset(wait, 0, sizeof(*wait)); 1711 wait->out.status = (r > 0); 1712 wait->out.first_signaled = first; 1713 1714 if (first < fence_count && array[first]) 1715 r = array[first]->error; 1716 else 1717 r = 0; 1718 1719 err_free_fence_array: 1720 for (i = 0; i < fence_count; i++) 1721 dma_fence_put(array[i]); 1722 kfree(array); 1723 1724 return r; 1725 } 1726 1727 /** 1728 * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish 1729 * 1730 * @dev: drm device 1731 * @data: data from userspace 1732 * @filp: file private 1733 */ 1734 int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data, 1735 struct drm_file *filp) 1736 { 1737 struct amdgpu_device *adev = drm_to_adev(dev); 1738 union drm_amdgpu_wait_fences *wait = data; 1739 struct drm_amdgpu_fence *fences; 1740 int r; 1741 1742 /* Get the fences from userspace */ 1743 fences = memdup_array_user(u64_to_user_ptr(wait->in.fences), 1744 wait->in.fence_count, 1745 sizeof(struct drm_amdgpu_fence)); 1746 if (IS_ERR(fences)) 1747 return PTR_ERR(fences); 1748 1749 if (wait->in.wait_all) 1750 r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences); 1751 else 1752 r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences); 1753 1754 kfree(fences); 1755 1756 return r; 1757 } 1758 1759 /** 1760 * amdgpu_cs_find_mapping - find bo_va for VM address 1761 * 1762 * @parser: command submission parser context 1763 * @addr: VM address 1764 * @bo: resulting BO of the mapping found 1765 * @map: Placeholder to return found BO mapping 1766 * 1767 * Search the buffer objects in the command submission context for a certain 1768 * virtual memory address. Returns allocation structure when found, NULL 1769 * otherwise. 1770 */ 1771 int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, 1772 uint64_t addr, struct amdgpu_bo **bo, 1773 struct amdgpu_bo_va_mapping **map) 1774 { 1775 struct amdgpu_fpriv *fpriv = parser->filp->driver_priv; 1776 struct ttm_operation_ctx ctx = { false, false }; 1777 struct amdgpu_vm *vm = &fpriv->vm; 1778 struct amdgpu_bo_va_mapping *mapping; 1779 int i, r; 1780 1781 addr /= AMDGPU_GPU_PAGE_SIZE; 1782 1783 mapping = amdgpu_vm_bo_lookup_mapping(vm, addr); 1784 if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo) 1785 return -EINVAL; 1786 1787 *bo = mapping->bo_va->base.bo; 1788 *map = mapping; 1789 1790 /* Double check that the BO is reserved by this CS */ 1791 if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->exec.ticket) 1792 return -EINVAL; 1793 1794 /* Make sure VRAM is allocated contigiously */ 1795 (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; 1796 if ((*bo)->tbo.resource->mem_type == TTM_PL_VRAM && 1797 !((*bo)->tbo.resource->placement & TTM_PL_FLAG_CONTIGUOUS)) { 1798 1799 amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains); 1800 for (i = 0; i < (*bo)->placement.num_placement; i++) 1801 (*bo)->placements[i].flags |= TTM_PL_FLAG_CONTIGUOUS; 1802 r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx); 1803 if (r) 1804 return r; 1805 } 1806 1807 return amdgpu_ttm_alloc_gart(&(*bo)->tbo); 1808 } 1809