1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright (C) 2014-2018 Broadcom 4 * Copyright (C) 2023 Raspberry Pi 5 */ 6 7 #include <drm/drm_print.h> 8 #include <drm/drm_syncobj.h> 9 10 #include "v3d_drv.h" 11 #include "v3d_regs.h" 12 #include "v3d_trace.h" 13 14 /* Takes the reservation lock on all the BOs being referenced, so that 15 * we can attach fences and update the reservations after pushing the job 16 * to the queue. 17 * 18 * We don't lock the RCL the tile alloc/state BOs, or overflow memory 19 * (all of which are on render->unref_list). They're entirely private 20 * to v3d, so we don't attach dma-buf fences to them. 21 */ 22 static int 23 v3d_lock_bo_reservations(struct v3d_job *job, 24 struct ww_acquire_ctx *acquire_ctx) 25 { 26 int i, ret; 27 28 ret = drm_gem_lock_reservations(job->bo, job->bo_count, acquire_ctx); 29 if (ret) 30 return ret; 31 32 for (i = 0; i < job->bo_count; i++) { 33 ret = dma_resv_reserve_fences(job->bo[i]->resv, 1); 34 if (ret) 35 goto fail; 36 37 ret = drm_sched_job_add_implicit_dependencies(&job->base, 38 job->bo[i], true); 39 if (ret) 40 goto fail; 41 } 42 43 return 0; 44 45 fail: 46 drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx); 47 return ret; 48 } 49 50 /** 51 * v3d_lookup_bos() - Sets up job->bo[] with the GEM objects 52 * referenced by the job. 53 * @dev: DRM device 54 * @file_priv: DRM file for this fd 55 * @job: V3D job being set up 56 * @bo_handles: GEM handles 57 * @bo_count: Number of GEM handles passed in 58 * 59 * The command validator needs to reference BOs by their index within 60 * the submitted job's BO list. This does the validation of the job's 61 * BO list and reference counting for the lifetime of the job. 62 * 63 * Note that this function doesn't need to unreference the BOs on 64 * failure, because that will happen at `v3d_job_free()`. 65 */ 66 static int 67 v3d_lookup_bos(struct drm_device *dev, 68 struct drm_file *file_priv, 69 struct v3d_job *job, 70 u64 bo_handles, 71 u32 bo_count) 72 { 73 job->bo_count = bo_count; 74 75 if (!job->bo_count) { 76 /* See comment on bo_index for why we have to check 77 * this. 78 */ 79 drm_warn(dev, "Rendering requires BOs\n"); 80 return -EINVAL; 81 } 82 83 return drm_gem_objects_lookup(file_priv, 84 (void __user *)(uintptr_t)bo_handles, 85 job->bo_count, &job->bo); 86 } 87 88 static void 89 v3d_job_free(struct kref *ref) 90 { 91 struct v3d_job *job = container_of(ref, struct v3d_job, refcount); 92 int i; 93 94 if (job->bo) { 95 for (i = 0; i < job->bo_count; i++) 96 drm_gem_object_put(job->bo[i]); 97 kvfree(job->bo); 98 } 99 100 dma_fence_put(job->irq_fence); 101 dma_fence_put(job->done_fence); 102 103 if (job->perfmon) 104 v3d_perfmon_put(job->perfmon); 105 106 kfree(job); 107 } 108 109 static void 110 v3d_render_job_free(struct kref *ref) 111 { 112 struct v3d_render_job *job = container_of(ref, struct v3d_render_job, 113 base.refcount); 114 struct v3d_bo *bo, *save; 115 116 list_for_each_entry_safe(bo, save, &job->unref_list, unref_head) { 117 drm_gem_object_put(&bo->base.base); 118 } 119 120 v3d_job_free(ref); 121 } 122 123 void v3d_job_cleanup(struct v3d_job *job) 124 { 125 if (!job) 126 return; 127 128 drm_sched_job_cleanup(&job->base); 129 v3d_job_put(job); 130 } 131 132 void v3d_job_put(struct v3d_job *job) 133 { 134 if (!job) 135 return; 136 137 kref_put(&job->refcount, job->free); 138 } 139 140 static int 141 v3d_job_allocate(struct v3d_dev *v3d, void **container, size_t size) 142 { 143 *container = kcalloc(1, size, GFP_KERNEL); 144 if (!*container) { 145 drm_err(&v3d->drm, "Cannot allocate memory for V3D job.\n"); 146 return -ENOMEM; 147 } 148 149 return 0; 150 } 151 152 static void 153 v3d_job_deallocate(void **container) 154 { 155 kfree(*container); 156 *container = NULL; 157 } 158 159 static int 160 v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv, 161 struct v3d_job *job, void (*free)(struct kref *ref), 162 u32 in_sync, struct v3d_submit_ext *se, enum v3d_queue queue) 163 { 164 struct v3d_file_priv *v3d_priv = file_priv->driver_priv; 165 bool has_multisync = se && (se->flags & DRM_V3D_EXT_ID_MULTI_SYNC); 166 int ret, i; 167 168 job->v3d = v3d; 169 job->free = free; 170 job->file_priv = v3d_priv; 171 172 ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue], 173 1, v3d_priv, file_priv->client_id); 174 if (ret) 175 return ret; 176 177 if (has_multisync) { 178 if (se->in_sync_count && se->wait_stage == queue) { 179 struct drm_v3d_sem __user *handle = u64_to_user_ptr(se->in_syncs); 180 181 for (i = 0; i < se->in_sync_count; i++) { 182 struct drm_v3d_sem in; 183 184 if (copy_from_user(&in, handle++, sizeof(in))) { 185 ret = -EFAULT; 186 drm_dbg(&v3d->drm, "Failed to copy wait dep handle.\n"); 187 goto fail_deps; 188 } 189 ret = drm_sched_job_add_syncobj_dependency(&job->base, file_priv, in.handle, 0); 190 191 // TODO: Investigate why this was filtered out for the IOCTL. 192 if (ret && ret != -ENOENT) 193 goto fail_deps; 194 } 195 } 196 } else { 197 ret = drm_sched_job_add_syncobj_dependency(&job->base, file_priv, in_sync, 0); 198 199 // TODO: Investigate why this was filtered out for the IOCTL. 200 if (ret && ret != -ENOENT) 201 goto fail_deps; 202 } 203 204 kref_init(&job->refcount); 205 206 return 0; 207 208 fail_deps: 209 drm_sched_job_cleanup(&job->base); 210 return ret; 211 } 212 213 static void 214 v3d_push_job(struct v3d_job *job) 215 { 216 drm_sched_job_arm(&job->base); 217 218 job->done_fence = dma_fence_get(&job->base.s_fence->finished); 219 220 /* put by scheduler job completion */ 221 kref_get(&job->refcount); 222 223 drm_sched_entity_push_job(&job->base); 224 } 225 226 static void 227 v3d_attach_fences_and_unlock_reservation(struct drm_file *file_priv, 228 struct v3d_job *job, 229 struct ww_acquire_ctx *acquire_ctx, 230 u32 out_sync, 231 struct v3d_submit_ext *se, 232 struct dma_fence *done_fence) 233 { 234 struct drm_syncobj *sync_out; 235 bool has_multisync = se && (se->flags & DRM_V3D_EXT_ID_MULTI_SYNC); 236 int i; 237 238 for (i = 0; i < job->bo_count; i++) { 239 /* XXX: Use shared fences for read-only objects. */ 240 dma_resv_add_fence(job->bo[i]->resv, job->done_fence, 241 DMA_RESV_USAGE_WRITE); 242 } 243 244 drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx); 245 246 /* Update the return sync object for the job */ 247 /* If it only supports a single signal semaphore*/ 248 if (!has_multisync) { 249 sync_out = drm_syncobj_find(file_priv, out_sync); 250 if (sync_out) { 251 drm_syncobj_replace_fence(sync_out, done_fence); 252 drm_syncobj_put(sync_out); 253 } 254 return; 255 } 256 257 /* If multiple semaphores extension is supported */ 258 if (se->out_sync_count) { 259 for (i = 0; i < se->out_sync_count; i++) { 260 drm_syncobj_replace_fence(se->out_syncs[i].syncobj, 261 done_fence); 262 drm_syncobj_put(se->out_syncs[i].syncobj); 263 } 264 kvfree(se->out_syncs); 265 } 266 } 267 268 static int 269 v3d_setup_csd_jobs_and_bos(struct drm_file *file_priv, 270 struct v3d_dev *v3d, 271 struct drm_v3d_submit_csd *args, 272 struct v3d_csd_job **job, 273 struct v3d_job **clean_job, 274 struct v3d_submit_ext *se, 275 struct ww_acquire_ctx *acquire_ctx) 276 { 277 int ret; 278 279 ret = v3d_job_allocate(v3d, (void *)job, sizeof(**job)); 280 if (ret) 281 return ret; 282 283 ret = v3d_job_init(v3d, file_priv, &(*job)->base, 284 v3d_job_free, args->in_sync, se, V3D_CSD); 285 if (ret) { 286 v3d_job_deallocate((void *)job); 287 return ret; 288 } 289 290 ret = v3d_job_allocate(v3d, (void *)clean_job, sizeof(**clean_job)); 291 if (ret) 292 return ret; 293 294 ret = v3d_job_init(v3d, file_priv, *clean_job, 295 v3d_job_free, 0, NULL, V3D_CACHE_CLEAN); 296 if (ret) { 297 v3d_job_deallocate((void *)clean_job); 298 return ret; 299 } 300 301 (*job)->args = *args; 302 303 ret = v3d_lookup_bos(&v3d->drm, file_priv, *clean_job, 304 args->bo_handles, args->bo_handle_count); 305 if (ret) 306 return ret; 307 308 return v3d_lock_bo_reservations(*clean_job, acquire_ctx); 309 } 310 311 static void 312 v3d_put_multisync_post_deps(struct v3d_submit_ext *se) 313 { 314 unsigned int i; 315 316 if (!(se && se->out_sync_count)) 317 return; 318 319 for (i = 0; i < se->out_sync_count; i++) 320 drm_syncobj_put(se->out_syncs[i].syncobj); 321 kvfree(se->out_syncs); 322 } 323 324 static int 325 v3d_get_multisync_post_deps(struct drm_file *file_priv, 326 struct v3d_submit_ext *se, 327 u32 count, u64 handles) 328 { 329 struct v3d_file_priv *v3d_priv = file_priv->driver_priv; 330 struct v3d_dev *v3d = v3d_priv->v3d; 331 struct drm_v3d_sem __user *post_deps; 332 int i, ret; 333 334 if (!count) 335 return 0; 336 337 se->out_syncs = (struct v3d_submit_outsync *) 338 kvmalloc_objs(struct v3d_submit_outsync, count, 339 GFP_KERNEL); 340 if (!se->out_syncs) 341 return -ENOMEM; 342 343 post_deps = u64_to_user_ptr(handles); 344 345 for (i = 0; i < count; i++) { 346 struct drm_v3d_sem out; 347 348 if (copy_from_user(&out, post_deps++, sizeof(out))) { 349 ret = -EFAULT; 350 drm_dbg(&v3d->drm, "Failed to copy post dep handles\n"); 351 goto fail; 352 } 353 354 se->out_syncs[i].syncobj = drm_syncobj_find(file_priv, 355 out.handle); 356 if (!se->out_syncs[i].syncobj) { 357 ret = -EINVAL; 358 goto fail; 359 } 360 } 361 se->out_sync_count = count; 362 363 return 0; 364 365 fail: 366 for (i--; i >= 0; i--) 367 drm_syncobj_put(se->out_syncs[i].syncobj); 368 kvfree(se->out_syncs); 369 370 return ret; 371 } 372 373 /* Get data for multiple binary semaphores synchronization. Parse syncobj 374 * to be signaled when job completes (out_sync). 375 */ 376 static int 377 v3d_get_multisync_submit_deps(struct drm_file *file_priv, 378 struct drm_v3d_extension __user *ext, 379 struct v3d_submit_ext *se) 380 { 381 struct v3d_file_priv *v3d_priv = file_priv->driver_priv; 382 struct v3d_dev *v3d = v3d_priv->v3d; 383 struct drm_v3d_multi_sync multisync; 384 int ret; 385 386 if (se->in_sync_count || se->out_sync_count) { 387 drm_dbg(&v3d->drm, "Two multisync extensions were added to the same job."); 388 return -EINVAL; 389 } 390 391 if (copy_from_user(&multisync, ext, sizeof(multisync))) 392 return -EFAULT; 393 394 if (multisync.pad) 395 return -EINVAL; 396 397 ret = v3d_get_multisync_post_deps(file_priv, se, multisync.out_sync_count, 398 multisync.out_syncs); 399 if (ret) 400 return ret; 401 402 se->in_sync_count = multisync.in_sync_count; 403 se->in_syncs = multisync.in_syncs; 404 se->flags |= DRM_V3D_EXT_ID_MULTI_SYNC; 405 se->wait_stage = multisync.wait_stage; 406 407 return 0; 408 } 409 410 /* Returns false if the CPU job has an invalid configuration. */ 411 static bool 412 v3d_validate_cpu_job(struct drm_file *file_priv, struct v3d_cpu_job *job) 413 { 414 struct v3d_file_priv *v3d_priv = file_priv->driver_priv; 415 struct v3d_dev *v3d = v3d_priv->v3d; 416 417 if (!job) { 418 drm_dbg(&v3d->drm, "CPU job extension was attached to a GPU job.\n"); 419 return false; 420 } 421 422 if (job->job_type) { 423 drm_dbg(&v3d->drm, "Two CPU job extensions were added to the same CPU job.\n"); 424 return false; 425 } 426 427 return true; 428 } 429 430 /* Get data for the indirect CSD job submission. */ 431 static int 432 v3d_get_cpu_indirect_csd_params(struct drm_file *file_priv, 433 struct drm_v3d_extension __user *ext, 434 struct v3d_cpu_job *job) 435 { 436 struct v3d_file_priv *v3d_priv = file_priv->driver_priv; 437 struct v3d_dev *v3d = v3d_priv->v3d; 438 struct drm_v3d_indirect_csd indirect_csd; 439 struct v3d_indirect_csd_info *info = &job->indirect_csd; 440 441 if (!v3d_validate_cpu_job(file_priv, job)) 442 return -EINVAL; 443 444 if (copy_from_user(&indirect_csd, ext, sizeof(indirect_csd))) 445 return -EFAULT; 446 447 if (!v3d_has_csd(v3d)) { 448 drm_warn(&v3d->drm, "Attempting CSD submit on non-CSD hardware.\n"); 449 return -EINVAL; 450 } 451 452 job->job_type = V3D_CPU_JOB_TYPE_INDIRECT_CSD; 453 info->offset = indirect_csd.offset; 454 info->wg_size = indirect_csd.wg_size; 455 memcpy(&info->wg_uniform_offsets, &indirect_csd.wg_uniform_offsets, 456 sizeof(indirect_csd.wg_uniform_offsets)); 457 458 info->indirect = drm_gem_object_lookup(file_priv, indirect_csd.indirect); 459 460 return v3d_setup_csd_jobs_and_bos(file_priv, v3d, &indirect_csd.submit, 461 &info->job, &info->clean_job, 462 NULL, &info->acquire_ctx); 463 } 464 465 /* Get data for the query timestamp job submission. */ 466 static int 467 v3d_get_cpu_timestamp_query_params(struct drm_file *file_priv, 468 struct drm_v3d_extension __user *ext, 469 struct v3d_cpu_job *job) 470 { 471 u32 __user *offsets, *syncs; 472 struct drm_v3d_timestamp_query timestamp; 473 struct v3d_timestamp_query_info *query_info = &job->timestamp_query; 474 unsigned int i; 475 int err; 476 477 if (!v3d_validate_cpu_job(file_priv, job)) 478 return -EINVAL; 479 480 if (copy_from_user(×tamp, ext, sizeof(timestamp))) 481 return -EFAULT; 482 483 if (timestamp.pad) 484 return -EINVAL; 485 486 job->job_type = V3D_CPU_JOB_TYPE_TIMESTAMP_QUERY; 487 488 query_info->queries = kvmalloc_objs(struct v3d_timestamp_query, 489 timestamp.count, GFP_KERNEL); 490 if (!query_info->queries) 491 return -ENOMEM; 492 493 offsets = u64_to_user_ptr(timestamp.offsets); 494 syncs = u64_to_user_ptr(timestamp.syncs); 495 496 for (i = 0; i < timestamp.count; i++) { 497 u32 offset, sync; 498 499 if (get_user(offset, offsets++)) { 500 err = -EFAULT; 501 goto error; 502 } 503 504 query_info->queries[i].offset = offset; 505 506 if (get_user(sync, syncs++)) { 507 err = -EFAULT; 508 goto error; 509 } 510 511 query_info->queries[i].syncobj = drm_syncobj_find(file_priv, 512 sync); 513 if (!query_info->queries[i].syncobj) { 514 err = -ENOENT; 515 goto error; 516 } 517 } 518 query_info->count = timestamp.count; 519 520 return 0; 521 522 error: 523 v3d_timestamp_query_info_free(&job->timestamp_query, i); 524 return err; 525 } 526 527 static int 528 v3d_get_cpu_reset_timestamp_params(struct drm_file *file_priv, 529 struct drm_v3d_extension __user *ext, 530 struct v3d_cpu_job *job) 531 { 532 u32 __user *syncs; 533 struct drm_v3d_reset_timestamp_query reset; 534 struct v3d_timestamp_query_info *query_info = &job->timestamp_query; 535 unsigned int i; 536 int err; 537 538 if (!v3d_validate_cpu_job(file_priv, job)) 539 return -EINVAL; 540 541 if (copy_from_user(&reset, ext, sizeof(reset))) 542 return -EFAULT; 543 544 job->job_type = V3D_CPU_JOB_TYPE_RESET_TIMESTAMP_QUERY; 545 546 query_info->queries = kvmalloc_objs(struct v3d_timestamp_query, 547 reset.count, GFP_KERNEL); 548 if (!query_info->queries) 549 return -ENOMEM; 550 551 syncs = u64_to_user_ptr(reset.syncs); 552 553 for (i = 0; i < reset.count; i++) { 554 u32 sync; 555 556 query_info->queries[i].offset = reset.offset + 8 * i; 557 558 if (get_user(sync, syncs++)) { 559 err = -EFAULT; 560 goto error; 561 } 562 563 query_info->queries[i].syncobj = drm_syncobj_find(file_priv, 564 sync); 565 if (!query_info->queries[i].syncobj) { 566 err = -ENOENT; 567 goto error; 568 } 569 } 570 query_info->count = reset.count; 571 572 return 0; 573 574 error: 575 v3d_timestamp_query_info_free(&job->timestamp_query, i); 576 return err; 577 } 578 579 /* Get data for the copy timestamp query results job submission. */ 580 static int 581 v3d_get_cpu_copy_query_results_params(struct drm_file *file_priv, 582 struct drm_v3d_extension __user *ext, 583 struct v3d_cpu_job *job) 584 { 585 u32 __user *offsets, *syncs; 586 struct drm_v3d_copy_timestamp_query copy; 587 struct v3d_timestamp_query_info *query_info = &job->timestamp_query; 588 unsigned int i; 589 int err; 590 591 if (!v3d_validate_cpu_job(file_priv, job)) 592 return -EINVAL; 593 594 if (copy_from_user(©, ext, sizeof(copy))) 595 return -EFAULT; 596 597 if (copy.pad) 598 return -EINVAL; 599 600 job->job_type = V3D_CPU_JOB_TYPE_COPY_TIMESTAMP_QUERY; 601 602 query_info->queries = kvmalloc_objs(struct v3d_timestamp_query, 603 copy.count, GFP_KERNEL); 604 if (!query_info->queries) 605 return -ENOMEM; 606 607 offsets = u64_to_user_ptr(copy.offsets); 608 syncs = u64_to_user_ptr(copy.syncs); 609 610 for (i = 0; i < copy.count; i++) { 611 u32 offset, sync; 612 613 if (get_user(offset, offsets++)) { 614 err = -EFAULT; 615 goto error; 616 } 617 618 query_info->queries[i].offset = offset; 619 620 if (get_user(sync, syncs++)) { 621 err = -EFAULT; 622 goto error; 623 } 624 625 query_info->queries[i].syncobj = drm_syncobj_find(file_priv, 626 sync); 627 if (!query_info->queries[i].syncobj) { 628 err = -ENOENT; 629 goto error; 630 } 631 } 632 query_info->count = copy.count; 633 634 job->copy.do_64bit = copy.do_64bit; 635 job->copy.do_partial = copy.do_partial; 636 job->copy.availability_bit = copy.availability_bit; 637 job->copy.offset = copy.offset; 638 job->copy.stride = copy.stride; 639 640 return 0; 641 642 error: 643 v3d_timestamp_query_info_free(&job->timestamp_query, i); 644 return err; 645 } 646 647 static int 648 v3d_copy_query_info(struct v3d_performance_query_info *query_info, 649 unsigned int count, 650 unsigned int nperfmons, 651 u32 __user *syncs, 652 u64 __user *kperfmon_ids, 653 struct drm_file *file_priv) 654 { 655 unsigned int i, j; 656 int err; 657 658 for (i = 0; i < count; i++) { 659 struct v3d_performance_query *query = &query_info->queries[i]; 660 u32 __user *ids_pointer; 661 u32 sync, id; 662 u64 ids; 663 664 if (get_user(sync, syncs++)) { 665 err = -EFAULT; 666 goto error; 667 } 668 669 if (get_user(ids, kperfmon_ids++)) { 670 err = -EFAULT; 671 goto error; 672 } 673 674 query->kperfmon_ids = 675 kvmalloc_array(nperfmons, 676 sizeof(struct v3d_performance_query *), 677 GFP_KERNEL); 678 if (!query->kperfmon_ids) { 679 err = -ENOMEM; 680 goto error; 681 } 682 683 ids_pointer = u64_to_user_ptr(ids); 684 685 for (j = 0; j < nperfmons; j++) { 686 if (get_user(id, ids_pointer++)) { 687 kvfree(query->kperfmon_ids); 688 err = -EFAULT; 689 goto error; 690 } 691 692 query->kperfmon_ids[j] = id; 693 } 694 695 query->syncobj = drm_syncobj_find(file_priv, sync); 696 if (!query->syncobj) { 697 kvfree(query->kperfmon_ids); 698 err = -ENOENT; 699 goto error; 700 } 701 } 702 703 return 0; 704 705 error: 706 v3d_performance_query_info_free(query_info, i); 707 return err; 708 } 709 710 static int 711 v3d_get_cpu_reset_performance_params(struct drm_file *file_priv, 712 struct drm_v3d_extension __user *ext, 713 struct v3d_cpu_job *job) 714 { 715 struct v3d_performance_query_info *query_info = &job->performance_query; 716 struct drm_v3d_reset_performance_query reset; 717 int err; 718 719 if (!v3d_validate_cpu_job(file_priv, job)) 720 return -EINVAL; 721 722 if (copy_from_user(&reset, ext, sizeof(reset))) 723 return -EFAULT; 724 725 job->job_type = V3D_CPU_JOB_TYPE_RESET_PERFORMANCE_QUERY; 726 727 query_info->queries = 728 kvmalloc_objs(struct v3d_performance_query, reset.count, 729 GFP_KERNEL); 730 if (!query_info->queries) 731 return -ENOMEM; 732 733 err = v3d_copy_query_info(query_info, 734 reset.count, 735 reset.nperfmons, 736 u64_to_user_ptr(reset.syncs), 737 u64_to_user_ptr(reset.kperfmon_ids), 738 file_priv); 739 if (err) 740 return err; 741 742 query_info->count = reset.count; 743 query_info->nperfmons = reset.nperfmons; 744 745 return 0; 746 } 747 748 static int 749 v3d_get_cpu_copy_performance_query_params(struct drm_file *file_priv, 750 struct drm_v3d_extension __user *ext, 751 struct v3d_cpu_job *job) 752 { 753 struct v3d_performance_query_info *query_info = &job->performance_query; 754 struct drm_v3d_copy_performance_query copy; 755 int err; 756 757 if (!v3d_validate_cpu_job(file_priv, job)) 758 return -EINVAL; 759 760 if (copy_from_user(©, ext, sizeof(copy))) 761 return -EFAULT; 762 763 if (copy.pad) 764 return -EINVAL; 765 766 job->job_type = V3D_CPU_JOB_TYPE_COPY_PERFORMANCE_QUERY; 767 768 query_info->queries = 769 kvmalloc_objs(struct v3d_performance_query, copy.count, 770 GFP_KERNEL); 771 if (!query_info->queries) 772 return -ENOMEM; 773 774 err = v3d_copy_query_info(query_info, 775 copy.count, 776 copy.nperfmons, 777 u64_to_user_ptr(copy.syncs), 778 u64_to_user_ptr(copy.kperfmon_ids), 779 file_priv); 780 if (err) 781 return err; 782 783 query_info->count = copy.count; 784 query_info->nperfmons = copy.nperfmons; 785 query_info->ncounters = copy.ncounters; 786 787 job->copy.do_64bit = copy.do_64bit; 788 job->copy.do_partial = copy.do_partial; 789 job->copy.availability_bit = copy.availability_bit; 790 job->copy.offset = copy.offset; 791 job->copy.stride = copy.stride; 792 793 return 0; 794 } 795 796 /* Whenever userspace sets ioctl extensions, v3d_get_extensions parses data 797 * according to the extension id (name). 798 */ 799 static int 800 v3d_get_extensions(struct drm_file *file_priv, 801 u64 ext_handles, 802 struct v3d_submit_ext *se, 803 struct v3d_cpu_job *job) 804 { 805 struct v3d_file_priv *v3d_priv = file_priv->driver_priv; 806 struct v3d_dev *v3d = v3d_priv->v3d; 807 struct drm_v3d_extension __user *user_ext; 808 int ret; 809 810 user_ext = u64_to_user_ptr(ext_handles); 811 while (user_ext) { 812 struct drm_v3d_extension ext; 813 814 if (copy_from_user(&ext, user_ext, sizeof(ext))) { 815 drm_dbg(&v3d->drm, "Failed to copy submit extension\n"); 816 return -EFAULT; 817 } 818 819 switch (ext.id) { 820 case DRM_V3D_EXT_ID_MULTI_SYNC: 821 ret = v3d_get_multisync_submit_deps(file_priv, user_ext, se); 822 break; 823 case DRM_V3D_EXT_ID_CPU_INDIRECT_CSD: 824 ret = v3d_get_cpu_indirect_csd_params(file_priv, user_ext, job); 825 break; 826 case DRM_V3D_EXT_ID_CPU_TIMESTAMP_QUERY: 827 ret = v3d_get_cpu_timestamp_query_params(file_priv, user_ext, job); 828 break; 829 case DRM_V3D_EXT_ID_CPU_RESET_TIMESTAMP_QUERY: 830 ret = v3d_get_cpu_reset_timestamp_params(file_priv, user_ext, job); 831 break; 832 case DRM_V3D_EXT_ID_CPU_COPY_TIMESTAMP_QUERY: 833 ret = v3d_get_cpu_copy_query_results_params(file_priv, user_ext, job); 834 break; 835 case DRM_V3D_EXT_ID_CPU_RESET_PERFORMANCE_QUERY: 836 ret = v3d_get_cpu_reset_performance_params(file_priv, user_ext, job); 837 break; 838 case DRM_V3D_EXT_ID_CPU_COPY_PERFORMANCE_QUERY: 839 ret = v3d_get_cpu_copy_performance_query_params(file_priv, user_ext, job); 840 break; 841 default: 842 drm_dbg(&v3d->drm, "Unknown V3D extension ID: %d\n", ext.id); 843 return -EINVAL; 844 } 845 846 if (ret) 847 return ret; 848 849 user_ext = u64_to_user_ptr(ext.next); 850 } 851 852 return 0; 853 } 854 855 /** 856 * v3d_submit_cl_ioctl() - Submits a job (frame) to the V3D. 857 * @dev: DRM device 858 * @data: ioctl argument 859 * @file_priv: DRM file for this fd 860 * 861 * This is the main entrypoint for userspace to submit a 3D frame to 862 * the GPU. Userspace provides the binner command list (if 863 * applicable), and the kernel sets up the render command list to draw 864 * to the framebuffer described in the ioctl, using the command lists 865 * that the 3D engine's binner will produce. 866 */ 867 int 868 v3d_submit_cl_ioctl(struct drm_device *dev, void *data, 869 struct drm_file *file_priv) 870 { 871 struct v3d_dev *v3d = to_v3d_dev(dev); 872 struct v3d_file_priv *v3d_priv = file_priv->driver_priv; 873 struct drm_v3d_submit_cl *args = data; 874 struct v3d_submit_ext se = {0}; 875 struct v3d_bin_job *bin = NULL; 876 struct v3d_render_job *render = NULL; 877 struct v3d_job *clean_job = NULL; 878 struct v3d_job *last_job; 879 struct ww_acquire_ctx acquire_ctx; 880 int ret = 0; 881 882 trace_v3d_submit_cl_ioctl(&v3d->drm, args->rcl_start, args->rcl_end); 883 884 if (args->pad) 885 return -EINVAL; 886 887 if (args->flags && 888 args->flags & ~(DRM_V3D_SUBMIT_CL_FLUSH_CACHE | 889 DRM_V3D_SUBMIT_EXTENSION)) { 890 drm_dbg(dev, "invalid flags: %d\n", args->flags); 891 return -EINVAL; 892 } 893 894 if (args->flags & DRM_V3D_SUBMIT_EXTENSION) { 895 ret = v3d_get_extensions(file_priv, args->extensions, &se, NULL); 896 if (ret) { 897 drm_dbg(dev, "Failed to get extensions.\n"); 898 return ret; 899 } 900 } 901 902 ret = v3d_job_allocate(v3d, (void *)&render, sizeof(*render)); 903 if (ret) 904 return ret; 905 906 ret = v3d_job_init(v3d, file_priv, &render->base, 907 v3d_render_job_free, args->in_sync_rcl, &se, V3D_RENDER); 908 if (ret) { 909 v3d_job_deallocate((void *)&render); 910 goto fail; 911 } 912 913 render->start = args->rcl_start; 914 render->end = args->rcl_end; 915 INIT_LIST_HEAD(&render->unref_list); 916 917 if (args->bcl_start != args->bcl_end) { 918 ret = v3d_job_allocate(v3d, (void *)&bin, sizeof(*bin)); 919 if (ret) 920 goto fail; 921 922 ret = v3d_job_init(v3d, file_priv, &bin->base, 923 v3d_job_free, args->in_sync_bcl, &se, V3D_BIN); 924 if (ret) { 925 v3d_job_deallocate((void *)&bin); 926 goto fail; 927 } 928 929 bin->start = args->bcl_start; 930 bin->end = args->bcl_end; 931 bin->qma = args->qma; 932 bin->qms = args->qms; 933 bin->qts = args->qts; 934 bin->render = render; 935 } 936 937 if (args->flags & DRM_V3D_SUBMIT_CL_FLUSH_CACHE) { 938 ret = v3d_job_allocate(v3d, (void *)&clean_job, sizeof(*clean_job)); 939 if (ret) 940 goto fail; 941 942 ret = v3d_job_init(v3d, file_priv, clean_job, 943 v3d_job_free, 0, NULL, V3D_CACHE_CLEAN); 944 if (ret) { 945 v3d_job_deallocate((void *)&clean_job); 946 goto fail; 947 } 948 949 last_job = clean_job; 950 } else { 951 last_job = &render->base; 952 } 953 954 ret = v3d_lookup_bos(dev, file_priv, last_job, 955 args->bo_handles, args->bo_handle_count); 956 if (ret) 957 goto fail; 958 959 ret = v3d_lock_bo_reservations(last_job, &acquire_ctx); 960 if (ret) 961 goto fail; 962 963 if (args->perfmon_id) { 964 if (v3d->global_perfmon) { 965 ret = -EAGAIN; 966 goto fail_perfmon; 967 } 968 969 render->base.perfmon = v3d_perfmon_find(v3d_priv, 970 args->perfmon_id); 971 972 if (!render->base.perfmon) { 973 ret = -ENOENT; 974 goto fail_perfmon; 975 } 976 } 977 978 mutex_lock(&v3d->sched_lock); 979 if (bin) { 980 bin->base.perfmon = render->base.perfmon; 981 v3d_perfmon_get(bin->base.perfmon); 982 v3d_push_job(&bin->base); 983 984 ret = drm_sched_job_add_dependency(&render->base.base, 985 dma_fence_get(bin->base.done_fence)); 986 if (ret) 987 goto fail_unreserve; 988 } 989 990 v3d_push_job(&render->base); 991 992 if (clean_job) { 993 struct dma_fence *render_fence = 994 dma_fence_get(render->base.done_fence); 995 ret = drm_sched_job_add_dependency(&clean_job->base, 996 render_fence); 997 if (ret) 998 goto fail_unreserve; 999 clean_job->perfmon = render->base.perfmon; 1000 v3d_perfmon_get(clean_job->perfmon); 1001 v3d_push_job(clean_job); 1002 } 1003 1004 mutex_unlock(&v3d->sched_lock); 1005 1006 v3d_attach_fences_and_unlock_reservation(file_priv, 1007 last_job, 1008 &acquire_ctx, 1009 args->out_sync, 1010 &se, 1011 last_job->done_fence); 1012 1013 v3d_job_put(&bin->base); 1014 v3d_job_put(&render->base); 1015 v3d_job_put(clean_job); 1016 1017 return 0; 1018 1019 fail_unreserve: 1020 mutex_unlock(&v3d->sched_lock); 1021 fail_perfmon: 1022 drm_gem_unlock_reservations(last_job->bo, 1023 last_job->bo_count, &acquire_ctx); 1024 fail: 1025 v3d_job_cleanup((void *)bin); 1026 v3d_job_cleanup((void *)render); 1027 v3d_job_cleanup(clean_job); 1028 v3d_put_multisync_post_deps(&se); 1029 1030 return ret; 1031 } 1032 1033 /** 1034 * v3d_submit_tfu_ioctl() - Submits a TFU (texture formatting) job to the V3D. 1035 * @dev: DRM device 1036 * @data: ioctl argument 1037 * @file_priv: DRM file for this fd 1038 * 1039 * Userspace provides the register setup for the TFU, which we don't 1040 * need to validate since the TFU is behind the MMU. 1041 */ 1042 int 1043 v3d_submit_tfu_ioctl(struct drm_device *dev, void *data, 1044 struct drm_file *file_priv) 1045 { 1046 struct v3d_dev *v3d = to_v3d_dev(dev); 1047 struct drm_v3d_submit_tfu *args = data; 1048 struct v3d_submit_ext se = {0}; 1049 struct v3d_tfu_job *job = NULL; 1050 struct ww_acquire_ctx acquire_ctx; 1051 int ret = 0; 1052 1053 trace_v3d_submit_tfu_ioctl(&v3d->drm, args->iia); 1054 1055 if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) { 1056 drm_dbg(dev, "invalid flags: %d\n", args->flags); 1057 return -EINVAL; 1058 } 1059 1060 if (args->flags & DRM_V3D_SUBMIT_EXTENSION) { 1061 ret = v3d_get_extensions(file_priv, args->extensions, &se, NULL); 1062 if (ret) { 1063 drm_dbg(dev, "Failed to get extensions.\n"); 1064 return ret; 1065 } 1066 } 1067 1068 ret = v3d_job_allocate(v3d, (void *)&job, sizeof(*job)); 1069 if (ret) 1070 return ret; 1071 1072 ret = v3d_job_init(v3d, file_priv, &job->base, 1073 v3d_job_free, args->in_sync, &se, V3D_TFU); 1074 if (ret) { 1075 v3d_job_deallocate((void *)&job); 1076 goto fail; 1077 } 1078 1079 job->base.bo = kzalloc_objs(*job->base.bo, ARRAY_SIZE(args->bo_handles), 1080 GFP_KERNEL); 1081 if (!job->base.bo) { 1082 ret = -ENOMEM; 1083 goto fail; 1084 } 1085 1086 job->args = *args; 1087 1088 for (job->base.bo_count = 0; 1089 job->base.bo_count < ARRAY_SIZE(args->bo_handles); 1090 job->base.bo_count++) { 1091 struct drm_gem_object *bo; 1092 1093 if (!args->bo_handles[job->base.bo_count]) 1094 break; 1095 1096 bo = drm_gem_object_lookup(file_priv, args->bo_handles[job->base.bo_count]); 1097 if (!bo) { 1098 drm_dbg(dev, "Failed to look up GEM BO %d: %d\n", 1099 job->base.bo_count, 1100 args->bo_handles[job->base.bo_count]); 1101 ret = -ENOENT; 1102 goto fail; 1103 } 1104 job->base.bo[job->base.bo_count] = bo; 1105 } 1106 1107 ret = v3d_lock_bo_reservations(&job->base, &acquire_ctx); 1108 if (ret) 1109 goto fail; 1110 1111 mutex_lock(&v3d->sched_lock); 1112 v3d_push_job(&job->base); 1113 mutex_unlock(&v3d->sched_lock); 1114 1115 v3d_attach_fences_and_unlock_reservation(file_priv, 1116 &job->base, &acquire_ctx, 1117 args->out_sync, 1118 &se, 1119 job->base.done_fence); 1120 1121 v3d_job_put(&job->base); 1122 1123 return 0; 1124 1125 fail: 1126 v3d_job_cleanup((void *)job); 1127 v3d_put_multisync_post_deps(&se); 1128 1129 return ret; 1130 } 1131 1132 /** 1133 * v3d_submit_csd_ioctl() - Submits a CSD (compute shader) job to the V3D. 1134 * @dev: DRM device 1135 * @data: ioctl argument 1136 * @file_priv: DRM file for this fd 1137 * 1138 * Userspace provides the register setup for the CSD, which we don't 1139 * need to validate since the CSD is behind the MMU. 1140 */ 1141 int 1142 v3d_submit_csd_ioctl(struct drm_device *dev, void *data, 1143 struct drm_file *file_priv) 1144 { 1145 struct v3d_dev *v3d = to_v3d_dev(dev); 1146 struct v3d_file_priv *v3d_priv = file_priv->driver_priv; 1147 struct drm_v3d_submit_csd *args = data; 1148 struct v3d_submit_ext se = {0}; 1149 struct v3d_csd_job *job = NULL; 1150 struct v3d_job *clean_job = NULL; 1151 struct ww_acquire_ctx acquire_ctx; 1152 int ret; 1153 1154 trace_v3d_submit_csd_ioctl(&v3d->drm, args->cfg[5], args->cfg[6]); 1155 1156 if (args->pad) 1157 return -EINVAL; 1158 1159 if (!v3d_has_csd(v3d)) { 1160 drm_warn(dev, "Attempting CSD submit on non-CSD hardware\n"); 1161 return -EINVAL; 1162 } 1163 1164 if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) { 1165 drm_dbg(dev, "invalid flags: %d\n", args->flags); 1166 return -EINVAL; 1167 } 1168 1169 if (args->flags & DRM_V3D_SUBMIT_EXTENSION) { 1170 ret = v3d_get_extensions(file_priv, args->extensions, &se, NULL); 1171 if (ret) { 1172 drm_dbg(dev, "Failed to get extensions.\n"); 1173 return ret; 1174 } 1175 } 1176 1177 ret = v3d_setup_csd_jobs_and_bos(file_priv, v3d, args, 1178 &job, &clean_job, &se, 1179 &acquire_ctx); 1180 if (ret) 1181 goto fail; 1182 1183 if (args->perfmon_id) { 1184 if (v3d->global_perfmon) { 1185 ret = -EAGAIN; 1186 goto fail_perfmon; 1187 } 1188 1189 job->base.perfmon = v3d_perfmon_find(v3d_priv, 1190 args->perfmon_id); 1191 if (!job->base.perfmon) { 1192 ret = -ENOENT; 1193 goto fail_perfmon; 1194 } 1195 } 1196 1197 mutex_lock(&v3d->sched_lock); 1198 v3d_push_job(&job->base); 1199 1200 ret = drm_sched_job_add_dependency(&clean_job->base, 1201 dma_fence_get(job->base.done_fence)); 1202 if (ret) 1203 goto fail_unreserve; 1204 1205 v3d_push_job(clean_job); 1206 mutex_unlock(&v3d->sched_lock); 1207 1208 v3d_attach_fences_and_unlock_reservation(file_priv, 1209 clean_job, 1210 &acquire_ctx, 1211 args->out_sync, 1212 &se, 1213 clean_job->done_fence); 1214 1215 v3d_job_put(&job->base); 1216 v3d_job_put(clean_job); 1217 1218 return 0; 1219 1220 fail_unreserve: 1221 mutex_unlock(&v3d->sched_lock); 1222 fail_perfmon: 1223 drm_gem_unlock_reservations(clean_job->bo, clean_job->bo_count, 1224 &acquire_ctx); 1225 fail: 1226 v3d_job_cleanup((void *)job); 1227 v3d_job_cleanup(clean_job); 1228 v3d_put_multisync_post_deps(&se); 1229 1230 return ret; 1231 } 1232 1233 static const unsigned int cpu_job_bo_handle_count[] = { 1234 [V3D_CPU_JOB_TYPE_INDIRECT_CSD] = 1, 1235 [V3D_CPU_JOB_TYPE_TIMESTAMP_QUERY] = 1, 1236 [V3D_CPU_JOB_TYPE_RESET_TIMESTAMP_QUERY] = 1, 1237 [V3D_CPU_JOB_TYPE_COPY_TIMESTAMP_QUERY] = 2, 1238 [V3D_CPU_JOB_TYPE_RESET_PERFORMANCE_QUERY] = 0, 1239 [V3D_CPU_JOB_TYPE_COPY_PERFORMANCE_QUERY] = 1, 1240 }; 1241 1242 /** 1243 * v3d_submit_cpu_ioctl() - Submits a CPU job to the V3D. 1244 * @dev: DRM device 1245 * @data: ioctl argument 1246 * @file_priv: DRM file for this fd 1247 * 1248 * Userspace specifies the CPU job type and data required to perform its 1249 * operations through the drm_v3d_extension struct. 1250 */ 1251 int 1252 v3d_submit_cpu_ioctl(struct drm_device *dev, void *data, 1253 struct drm_file *file_priv) 1254 { 1255 struct v3d_dev *v3d = to_v3d_dev(dev); 1256 struct drm_v3d_submit_cpu *args = data; 1257 struct v3d_submit_ext se = {0}; 1258 struct v3d_submit_ext *out_se = NULL; 1259 struct v3d_cpu_job *cpu_job = NULL; 1260 struct v3d_csd_job *csd_job = NULL; 1261 struct v3d_job *clean_job = NULL; 1262 struct ww_acquire_ctx acquire_ctx; 1263 int ret; 1264 1265 if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) { 1266 drm_dbg(dev, "Invalid flags: %d\n", args->flags); 1267 return -EINVAL; 1268 } 1269 1270 ret = v3d_job_allocate(v3d, (void *)&cpu_job, sizeof(*cpu_job)); 1271 if (ret) 1272 return ret; 1273 1274 if (args->flags & DRM_V3D_SUBMIT_EXTENSION) { 1275 ret = v3d_get_extensions(file_priv, args->extensions, &se, cpu_job); 1276 if (ret) { 1277 drm_dbg(dev, "Failed to get extensions.\n"); 1278 goto fail; 1279 } 1280 } 1281 1282 /* Every CPU job must have a CPU job user extension */ 1283 if (!cpu_job->job_type) { 1284 drm_dbg(dev, "CPU job must have a CPU job user extension.\n"); 1285 ret = -EINVAL; 1286 goto fail; 1287 } 1288 1289 if (args->bo_handle_count != cpu_job_bo_handle_count[cpu_job->job_type]) { 1290 drm_dbg(dev, "This CPU job was not submitted with the proper number of BOs.\n"); 1291 ret = -EINVAL; 1292 goto fail; 1293 } 1294 1295 trace_v3d_submit_cpu_ioctl(&v3d->drm, cpu_job->job_type); 1296 1297 ret = v3d_job_init(v3d, file_priv, &cpu_job->base, 1298 v3d_job_free, 0, &se, V3D_CPU); 1299 if (ret) { 1300 v3d_job_deallocate((void *)&cpu_job); 1301 goto fail; 1302 } 1303 1304 clean_job = cpu_job->indirect_csd.clean_job; 1305 csd_job = cpu_job->indirect_csd.job; 1306 1307 if (args->bo_handle_count) { 1308 ret = v3d_lookup_bos(dev, file_priv, &cpu_job->base, 1309 args->bo_handles, args->bo_handle_count); 1310 if (ret) 1311 goto fail; 1312 1313 ret = v3d_lock_bo_reservations(&cpu_job->base, &acquire_ctx); 1314 if (ret) 1315 goto fail; 1316 } 1317 1318 mutex_lock(&v3d->sched_lock); 1319 v3d_push_job(&cpu_job->base); 1320 1321 switch (cpu_job->job_type) { 1322 case V3D_CPU_JOB_TYPE_INDIRECT_CSD: 1323 ret = drm_sched_job_add_dependency(&csd_job->base.base, 1324 dma_fence_get(cpu_job->base.done_fence)); 1325 if (ret) 1326 goto fail_unreserve; 1327 1328 v3d_push_job(&csd_job->base); 1329 1330 ret = drm_sched_job_add_dependency(&clean_job->base, 1331 dma_fence_get(csd_job->base.done_fence)); 1332 if (ret) 1333 goto fail_unreserve; 1334 1335 v3d_push_job(clean_job); 1336 1337 break; 1338 default: 1339 break; 1340 } 1341 mutex_unlock(&v3d->sched_lock); 1342 1343 out_se = (cpu_job->job_type == V3D_CPU_JOB_TYPE_INDIRECT_CSD) ? NULL : &se; 1344 1345 v3d_attach_fences_and_unlock_reservation(file_priv, 1346 &cpu_job->base, 1347 &acquire_ctx, 0, 1348 out_se, cpu_job->base.done_fence); 1349 1350 switch (cpu_job->job_type) { 1351 case V3D_CPU_JOB_TYPE_INDIRECT_CSD: 1352 v3d_attach_fences_and_unlock_reservation(file_priv, 1353 clean_job, 1354 &cpu_job->indirect_csd.acquire_ctx, 1355 0, &se, clean_job->done_fence); 1356 break; 1357 default: 1358 break; 1359 } 1360 1361 v3d_job_put(&cpu_job->base); 1362 v3d_job_put(&csd_job->base); 1363 v3d_job_put(clean_job); 1364 1365 return 0; 1366 1367 fail_unreserve: 1368 mutex_unlock(&v3d->sched_lock); 1369 1370 drm_gem_unlock_reservations(cpu_job->base.bo, cpu_job->base.bo_count, 1371 &acquire_ctx); 1372 1373 drm_gem_unlock_reservations(clean_job->bo, clean_job->bo_count, 1374 &cpu_job->indirect_csd.acquire_ctx); 1375 1376 fail: 1377 v3d_job_cleanup((void *)cpu_job); 1378 v3d_job_cleanup((void *)csd_job); 1379 v3d_job_cleanup(clean_job); 1380 v3d_put_multisync_post_deps(&se); 1381 kvfree(cpu_job->timestamp_query.queries); 1382 kvfree(cpu_job->performance_query.queries); 1383 1384 return ret; 1385 } 1386