1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */ 3 /* Copyright 2019 Linaro, Ltd., Rob Herring <robh@kernel.org> */ 4 /* Copyright 2019 Collabora ltd. */ 5 6 #ifdef CONFIG_ARM_ARCH_TIMER 7 #include <asm/arch_timer.h> 8 #endif 9 10 #include <linux/module.h> 11 #include <linux/of.h> 12 #include <linux/pagemap.h> 13 #include <linux/platform_device.h> 14 #include <linux/pm_runtime.h> 15 #include <drm/panfrost_drm.h> 16 #include <drm/drm_debugfs.h> 17 #include <drm/drm_drv.h> 18 #include <drm/drm_ioctl.h> 19 #include <drm/drm_syncobj.h> 20 #include <drm/drm_utils.h> 21 22 #include "panfrost_device.h" 23 #include "panfrost_gem.h" 24 #include "panfrost_mmu.h" 25 #include "panfrost_job.h" 26 #include "panfrost_gpu.h" 27 #include "panfrost_perfcnt.h" 28 29 #define JOB_REQUIREMENTS (PANFROST_JD_REQ_FS | PANFROST_JD_REQ_CYCLE_COUNT) 30 31 static bool unstable_ioctls; 32 module_param_unsafe(unstable_ioctls, bool, 0600); 33 34 static int panfrost_ioctl_query_timestamp(struct panfrost_device *pfdev, 35 u64 *arg) 36 { 37 int ret; 38 39 ret = pm_runtime_resume_and_get(pfdev->dev); 40 if (ret) 41 return ret; 42 43 panfrost_cycle_counter_get(pfdev); 44 *arg = panfrost_timestamp_read(pfdev); 45 panfrost_cycle_counter_put(pfdev); 46 47 pm_runtime_put(pfdev->dev); 48 return 0; 49 } 50 51 static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct drm_file *file) 52 { 53 struct drm_panfrost_get_param *param = data; 54 struct panfrost_device *pfdev = ddev->dev_private; 55 int ret; 56 57 if (param->pad != 0) 58 return -EINVAL; 59 60 #define PANFROST_FEATURE(name, member) \ 61 case DRM_PANFROST_PARAM_ ## name: \ 62 param->value = pfdev->features.member; \ 63 break 64 #define PANFROST_FEATURE_ARRAY(name, member, max) \ 65 case DRM_PANFROST_PARAM_ ## name ## 0 ... \ 66 DRM_PANFROST_PARAM_ ## name ## max: \ 67 param->value = pfdev->features.member[param->param - \ 68 DRM_PANFROST_PARAM_ ## name ## 0]; \ 69 break 70 71 switch (param->param) { 72 PANFROST_FEATURE(GPU_PROD_ID, id); 73 PANFROST_FEATURE(GPU_REVISION, revision); 74 PANFROST_FEATURE(SHADER_PRESENT, shader_present); 75 PANFROST_FEATURE(TILER_PRESENT, tiler_present); 76 PANFROST_FEATURE(L2_PRESENT, l2_present); 77 PANFROST_FEATURE(STACK_PRESENT, stack_present); 78 PANFROST_FEATURE(AS_PRESENT, as_present); 79 PANFROST_FEATURE(JS_PRESENT, js_present); 80 PANFROST_FEATURE(L2_FEATURES, l2_features); 81 PANFROST_FEATURE(CORE_FEATURES, core_features); 82 PANFROST_FEATURE(TILER_FEATURES, tiler_features); 83 PANFROST_FEATURE(MEM_FEATURES, mem_features); 84 PANFROST_FEATURE(MMU_FEATURES, mmu_features); 85 PANFROST_FEATURE(THREAD_FEATURES, thread_features); 86 PANFROST_FEATURE(MAX_THREADS, max_threads); 87 PANFROST_FEATURE(THREAD_MAX_WORKGROUP_SZ, 88 thread_max_workgroup_sz); 89 PANFROST_FEATURE(THREAD_MAX_BARRIER_SZ, 90 thread_max_barrier_sz); 91 PANFROST_FEATURE(COHERENCY_FEATURES, coherency_features); 92 PANFROST_FEATURE(AFBC_FEATURES, afbc_features); 93 PANFROST_FEATURE_ARRAY(TEXTURE_FEATURES, texture_features, 3); 94 PANFROST_FEATURE_ARRAY(JS_FEATURES, js_features, 15); 95 PANFROST_FEATURE(NR_CORE_GROUPS, nr_core_groups); 96 PANFROST_FEATURE(THREAD_TLS_ALLOC, thread_tls_alloc); 97 98 case DRM_PANFROST_PARAM_SYSTEM_TIMESTAMP: 99 ret = panfrost_ioctl_query_timestamp(pfdev, ¶m->value); 100 if (ret) 101 return ret; 102 break; 103 104 case DRM_PANFROST_PARAM_SYSTEM_TIMESTAMP_FREQUENCY: 105 #ifdef CONFIG_ARM_ARCH_TIMER 106 param->value = arch_timer_get_cntfrq(); 107 #else 108 param->value = 0; 109 #endif 110 break; 111 112 case DRM_PANFROST_PARAM_ALLOWED_JM_CTX_PRIORITIES: 113 param->value = BIT(PANFROST_JM_CTX_PRIORITY_LOW) | 114 BIT(PANFROST_JM_CTX_PRIORITY_MEDIUM); 115 116 if (panfrost_high_prio_allowed(file)) 117 param->value |= BIT(PANFROST_JM_CTX_PRIORITY_HIGH); 118 break; 119 120 default: 121 return -EINVAL; 122 } 123 124 return 0; 125 } 126 127 static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data, 128 struct drm_file *file) 129 { 130 struct panfrost_file_priv *priv = file->driver_priv; 131 struct panfrost_gem_object *bo; 132 struct drm_panfrost_create_bo *args = data; 133 struct panfrost_gem_mapping *mapping; 134 int ret; 135 136 if (!args->size || args->pad || 137 (args->flags & ~(PANFROST_BO_NOEXEC | PANFROST_BO_HEAP))) 138 return -EINVAL; 139 140 /* Heaps should never be executable */ 141 if ((args->flags & PANFROST_BO_HEAP) && 142 !(args->flags & PANFROST_BO_NOEXEC)) 143 return -EINVAL; 144 145 bo = panfrost_gem_create(dev, args->size, args->flags); 146 if (IS_ERR(bo)) 147 return PTR_ERR(bo); 148 149 ret = drm_gem_handle_create(file, &bo->base.base, &args->handle); 150 if (ret) 151 goto out; 152 153 mapping = panfrost_gem_mapping_get(bo, priv); 154 if (mapping) { 155 args->offset = mapping->mmnode.start << PAGE_SHIFT; 156 panfrost_gem_mapping_put(mapping); 157 } else { 158 /* This can only happen if the handle from 159 * drm_gem_handle_create() has already been guessed and freed 160 * by user space 161 */ 162 ret = -EINVAL; 163 } 164 165 out: 166 drm_gem_object_put(&bo->base.base); 167 return ret; 168 } 169 170 /** 171 * panfrost_lookup_bos() - Sets up job->bo[] with the GEM objects 172 * referenced by the job. 173 * @dev: DRM device 174 * @file_priv: DRM file for this fd 175 * @args: IOCTL args 176 * @job: job being set up 177 * 178 * Resolve handles from userspace to BOs and attach them to job. 179 * 180 * Note that this function doesn't need to unreference the BOs on 181 * failure, because that will happen at panfrost_job_cleanup() time. 182 */ 183 static int 184 panfrost_lookup_bos(struct drm_device *dev, 185 struct drm_file *file_priv, 186 struct drm_panfrost_submit *args, 187 struct panfrost_job *job) 188 { 189 struct panfrost_file_priv *priv = file_priv->driver_priv; 190 struct panfrost_gem_object *bo; 191 unsigned int i; 192 int ret; 193 194 job->bo_count = args->bo_handle_count; 195 196 if (!job->bo_count) 197 return 0; 198 199 ret = drm_gem_objects_lookup(file_priv, 200 (void __user *)(uintptr_t)args->bo_handles, 201 job->bo_count, &job->bos); 202 if (ret) 203 return ret; 204 205 job->mappings = kvmalloc_array(job->bo_count, 206 sizeof(struct panfrost_gem_mapping *), 207 GFP_KERNEL | __GFP_ZERO); 208 if (!job->mappings) 209 return -ENOMEM; 210 211 for (i = 0; i < job->bo_count; i++) { 212 struct panfrost_gem_mapping *mapping; 213 214 bo = to_panfrost_bo(job->bos[i]); 215 mapping = panfrost_gem_mapping_get(bo, priv); 216 if (!mapping) { 217 ret = -EINVAL; 218 break; 219 } 220 221 atomic_inc(&bo->gpu_usecount); 222 job->mappings[i] = mapping; 223 } 224 225 return ret; 226 } 227 228 /** 229 * panfrost_copy_in_sync() - Sets up job->deps with the sync objects 230 * referenced by the job. 231 * @dev: DRM device 232 * @file_priv: DRM file for this fd 233 * @args: IOCTL args 234 * @job: job being set up 235 * 236 * Resolve syncobjs from userspace to fences and attach them to job. 237 * 238 * Note that this function doesn't need to unreference the fences on 239 * failure, because that will happen at panfrost_job_cleanup() time. 240 */ 241 static int 242 panfrost_copy_in_sync(struct drm_device *dev, 243 struct drm_file *file_priv, 244 struct drm_panfrost_submit *args, 245 struct panfrost_job *job) 246 { 247 u32 *handles; 248 int ret = 0; 249 int i, in_fence_count; 250 251 in_fence_count = args->in_sync_count; 252 253 if (!in_fence_count) 254 return 0; 255 256 handles = kvmalloc_array(in_fence_count, sizeof(u32), GFP_KERNEL); 257 if (!handles) { 258 ret = -ENOMEM; 259 DRM_DEBUG("Failed to allocate incoming syncobj handles\n"); 260 goto fail; 261 } 262 263 if (copy_from_user(handles, 264 (void __user *)(uintptr_t)args->in_syncs, 265 in_fence_count * sizeof(u32))) { 266 ret = -EFAULT; 267 DRM_DEBUG("Failed to copy in syncobj handles\n"); 268 goto fail; 269 } 270 271 for (i = 0; i < in_fence_count; i++) { 272 ret = drm_sched_job_add_syncobj_dependency(&job->base, file_priv, 273 handles[i], 0); 274 if (ret) 275 goto fail; 276 } 277 278 fail: 279 kvfree(handles); 280 return ret; 281 } 282 283 static int panfrost_ioctl_submit(struct drm_device *dev, void *data, 284 struct drm_file *file) 285 { 286 struct panfrost_device *pfdev = dev->dev_private; 287 struct panfrost_file_priv *file_priv = file->driver_priv; 288 struct drm_panfrost_submit *args = data; 289 struct drm_syncobj *sync_out = NULL; 290 struct panfrost_jm_ctx *jm_ctx; 291 struct panfrost_job *job; 292 int ret = 0, slot; 293 294 if (args->pad) 295 return -EINVAL; 296 297 if (!args->jc) 298 return -EINVAL; 299 300 if (args->requirements & ~JOB_REQUIREMENTS) 301 return -EINVAL; 302 303 if (args->out_sync > 0) { 304 sync_out = drm_syncobj_find(file, args->out_sync); 305 if (!sync_out) 306 return -ENODEV; 307 } 308 309 jm_ctx = panfrost_jm_ctx_from_handle(file, args->jm_ctx_handle); 310 if (!jm_ctx) { 311 ret = -EINVAL; 312 goto out_put_syncout; 313 } 314 315 job = kzalloc(sizeof(*job), GFP_KERNEL); 316 if (!job) { 317 ret = -ENOMEM; 318 goto out_put_jm_ctx; 319 } 320 321 kref_init(&job->refcount); 322 323 job->pfdev = pfdev; 324 job->jc = args->jc; 325 job->requirements = args->requirements; 326 job->flush_id = panfrost_gpu_get_latest_flush_id(pfdev); 327 job->mmu = file_priv->mmu; 328 job->ctx = panfrost_jm_ctx_get(jm_ctx); 329 job->engine_usage = &file_priv->engine_usage; 330 331 slot = panfrost_job_get_slot(job); 332 333 ret = drm_sched_job_init(&job->base, 334 &jm_ctx->slot_entity[slot], 335 1, NULL, file->client_id); 336 if (ret) 337 goto out_put_job; 338 339 ret = panfrost_copy_in_sync(dev, file, args, job); 340 if (ret) 341 goto out_cleanup_job; 342 343 ret = panfrost_lookup_bos(dev, file, args, job); 344 if (ret) 345 goto out_cleanup_job; 346 347 ret = panfrost_job_push(job); 348 if (ret) 349 goto out_cleanup_job; 350 351 /* Update the return sync object for the job */ 352 if (sync_out) 353 drm_syncobj_replace_fence(sync_out, job->render_done_fence); 354 355 out_cleanup_job: 356 if (ret) 357 drm_sched_job_cleanup(&job->base); 358 out_put_job: 359 panfrost_job_put(job); 360 out_put_jm_ctx: 361 panfrost_jm_ctx_put(jm_ctx); 362 out_put_syncout: 363 if (sync_out) 364 drm_syncobj_put(sync_out); 365 366 return ret; 367 } 368 369 static int 370 panfrost_ioctl_wait_bo(struct drm_device *dev, void *data, 371 struct drm_file *file_priv) 372 { 373 long ret; 374 struct drm_panfrost_wait_bo *args = data; 375 struct drm_gem_object *gem_obj; 376 unsigned long timeout = drm_timeout_abs_to_jiffies(args->timeout_ns); 377 378 if (args->pad) 379 return -EINVAL; 380 381 gem_obj = drm_gem_object_lookup(file_priv, args->handle); 382 if (!gem_obj) 383 return -ENOENT; 384 385 ret = dma_resv_wait_timeout(gem_obj->resv, DMA_RESV_USAGE_READ, 386 true, timeout); 387 if (!ret) 388 ret = timeout ? -ETIMEDOUT : -EBUSY; 389 390 drm_gem_object_put(gem_obj); 391 392 return ret; 393 } 394 395 static int panfrost_ioctl_mmap_bo(struct drm_device *dev, void *data, 396 struct drm_file *file_priv) 397 { 398 struct drm_panfrost_mmap_bo *args = data; 399 struct drm_gem_object *gem_obj; 400 int ret; 401 402 if (args->flags != 0) { 403 DRM_INFO("unknown mmap_bo flags: %d\n", args->flags); 404 return -EINVAL; 405 } 406 407 gem_obj = drm_gem_object_lookup(file_priv, args->handle); 408 if (!gem_obj) { 409 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); 410 return -ENOENT; 411 } 412 413 /* Don't allow mmapping of heap objects as pages are not pinned. */ 414 if (to_panfrost_bo(gem_obj)->is_heap) { 415 ret = -EINVAL; 416 goto out; 417 } 418 419 ret = drm_gem_create_mmap_offset(gem_obj); 420 if (ret == 0) 421 args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node); 422 423 out: 424 drm_gem_object_put(gem_obj); 425 return ret; 426 } 427 428 static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data, 429 struct drm_file *file_priv) 430 { 431 struct panfrost_file_priv *priv = file_priv->driver_priv; 432 struct drm_panfrost_get_bo_offset *args = data; 433 struct panfrost_gem_mapping *mapping; 434 struct drm_gem_object *gem_obj; 435 struct panfrost_gem_object *bo; 436 437 gem_obj = drm_gem_object_lookup(file_priv, args->handle); 438 if (!gem_obj) { 439 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); 440 return -ENOENT; 441 } 442 bo = to_panfrost_bo(gem_obj); 443 444 mapping = panfrost_gem_mapping_get(bo, priv); 445 drm_gem_object_put(gem_obj); 446 447 if (!mapping) 448 return -EINVAL; 449 450 args->offset = mapping->mmnode.start << PAGE_SHIFT; 451 panfrost_gem_mapping_put(mapping); 452 return 0; 453 } 454 455 static int panfrost_ioctl_madvise(struct drm_device *dev, void *data, 456 struct drm_file *file_priv) 457 { 458 struct panfrost_file_priv *priv = file_priv->driver_priv; 459 struct drm_panfrost_madvise *args = data; 460 struct panfrost_device *pfdev = dev->dev_private; 461 struct drm_gem_object *gem_obj; 462 struct panfrost_gem_object *bo; 463 int ret = 0; 464 465 gem_obj = drm_gem_object_lookup(file_priv, args->handle); 466 if (!gem_obj) { 467 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); 468 return -ENOENT; 469 } 470 471 bo = to_panfrost_bo(gem_obj); 472 473 ret = dma_resv_lock_interruptible(bo->base.base.resv, NULL); 474 if (ret) 475 goto out_put_object; 476 477 mutex_lock(&pfdev->shrinker_lock); 478 mutex_lock(&bo->mappings.lock); 479 if (args->madv == PANFROST_MADV_DONTNEED) { 480 struct panfrost_gem_mapping *first; 481 482 first = list_first_entry(&bo->mappings.list, 483 struct panfrost_gem_mapping, 484 node); 485 486 /* 487 * If we want to mark the BO purgeable, there must be only one 488 * user: the caller FD. 489 * We could do something smarter and mark the BO purgeable only 490 * when all its users have marked it purgeable, but globally 491 * visible/shared BOs are likely to never be marked purgeable 492 * anyway, so let's not bother. 493 */ 494 if (!list_is_singular(&bo->mappings.list) || 495 WARN_ON_ONCE(first->mmu != priv->mmu)) { 496 ret = -EINVAL; 497 goto out_unlock_mappings; 498 } 499 } 500 501 args->retained = drm_gem_shmem_madvise_locked(&bo->base, args->madv); 502 503 if (args->retained) { 504 if (args->madv == PANFROST_MADV_DONTNEED) 505 list_move_tail(&bo->base.madv_list, 506 &pfdev->shrinker_list); 507 else if (args->madv == PANFROST_MADV_WILLNEED) 508 list_del_init(&bo->base.madv_list); 509 } 510 511 out_unlock_mappings: 512 mutex_unlock(&bo->mappings.lock); 513 mutex_unlock(&pfdev->shrinker_lock); 514 dma_resv_unlock(bo->base.base.resv); 515 out_put_object: 516 drm_gem_object_put(gem_obj); 517 return ret; 518 } 519 520 static int panfrost_ioctl_set_label_bo(struct drm_device *ddev, void *data, 521 struct drm_file *file) 522 { 523 struct drm_panfrost_set_label_bo *args = data; 524 struct drm_gem_object *obj; 525 const char *label = NULL; 526 int ret = 0; 527 528 if (args->pad) 529 return -EINVAL; 530 531 obj = drm_gem_object_lookup(file, args->handle); 532 if (!obj) 533 return -ENOENT; 534 535 if (args->label) { 536 label = strndup_user(u64_to_user_ptr(args->label), 537 PANFROST_BO_LABEL_MAXLEN); 538 if (IS_ERR(label)) { 539 ret = PTR_ERR(label); 540 if (ret == -EINVAL) 541 ret = -E2BIG; 542 goto err_put_obj; 543 } 544 } 545 546 /* 547 * We treat passing a label of length 0 and passing a NULL label 548 * differently, because even though they might seem conceptually 549 * similar, future uses of the BO label might expect a different 550 * behaviour in each case. 551 */ 552 panfrost_gem_set_label(obj, label); 553 554 err_put_obj: 555 drm_gem_object_put(obj); 556 557 return ret; 558 } 559 560 static int panfrost_ioctl_jm_ctx_create(struct drm_device *dev, void *data, 561 struct drm_file *file) 562 { 563 return panfrost_jm_ctx_create(file, data); 564 } 565 566 static int panfrost_ioctl_jm_ctx_destroy(struct drm_device *dev, void *data, 567 struct drm_file *file) 568 { 569 const struct drm_panfrost_jm_ctx_destroy *args = data; 570 571 if (args->pad) 572 return -EINVAL; 573 574 /* We can't destroy the default context created when the file is opened. */ 575 if (!args->handle) 576 return -EINVAL; 577 578 return panfrost_jm_ctx_destroy(file, args->handle); 579 } 580 581 int panfrost_unstable_ioctl_check(void) 582 { 583 if (!unstable_ioctls) 584 return -ENOSYS; 585 586 return 0; 587 } 588 589 static int 590 panfrost_open(struct drm_device *dev, struct drm_file *file) 591 { 592 int ret; 593 struct panfrost_device *pfdev = dev->dev_private; 594 struct panfrost_file_priv *panfrost_priv; 595 596 panfrost_priv = kzalloc(sizeof(*panfrost_priv), GFP_KERNEL); 597 if (!panfrost_priv) 598 return -ENOMEM; 599 600 panfrost_priv->pfdev = pfdev; 601 file->driver_priv = panfrost_priv; 602 603 panfrost_priv->mmu = panfrost_mmu_ctx_create(pfdev); 604 if (IS_ERR(panfrost_priv->mmu)) { 605 ret = PTR_ERR(panfrost_priv->mmu); 606 goto err_free; 607 } 608 609 ret = panfrost_job_open(file); 610 if (ret) 611 goto err_job; 612 613 return 0; 614 615 err_job: 616 panfrost_mmu_ctx_put(panfrost_priv->mmu); 617 err_free: 618 kfree(panfrost_priv); 619 return ret; 620 } 621 622 static void 623 panfrost_postclose(struct drm_device *dev, struct drm_file *file) 624 { 625 struct panfrost_file_priv *panfrost_priv = file->driver_priv; 626 627 panfrost_perfcnt_close(file); 628 panfrost_job_close(file); 629 630 panfrost_mmu_ctx_put(panfrost_priv->mmu); 631 kfree(panfrost_priv); 632 } 633 634 static const struct drm_ioctl_desc panfrost_drm_driver_ioctls[] = { 635 #define PANFROST_IOCTL(n, func, flags) \ 636 DRM_IOCTL_DEF_DRV(PANFROST_##n, panfrost_ioctl_##func, flags) 637 638 PANFROST_IOCTL(SUBMIT, submit, DRM_RENDER_ALLOW), 639 PANFROST_IOCTL(WAIT_BO, wait_bo, DRM_RENDER_ALLOW), 640 PANFROST_IOCTL(CREATE_BO, create_bo, DRM_RENDER_ALLOW), 641 PANFROST_IOCTL(MMAP_BO, mmap_bo, DRM_RENDER_ALLOW), 642 PANFROST_IOCTL(GET_PARAM, get_param, DRM_RENDER_ALLOW), 643 PANFROST_IOCTL(GET_BO_OFFSET, get_bo_offset, DRM_RENDER_ALLOW), 644 PANFROST_IOCTL(PERFCNT_ENABLE, perfcnt_enable, DRM_RENDER_ALLOW), 645 PANFROST_IOCTL(PERFCNT_DUMP, perfcnt_dump, DRM_RENDER_ALLOW), 646 PANFROST_IOCTL(MADVISE, madvise, DRM_RENDER_ALLOW), 647 PANFROST_IOCTL(SET_LABEL_BO, set_label_bo, DRM_RENDER_ALLOW), 648 PANFROST_IOCTL(JM_CTX_CREATE, jm_ctx_create, DRM_RENDER_ALLOW), 649 PANFROST_IOCTL(JM_CTX_DESTROY, jm_ctx_destroy, DRM_RENDER_ALLOW), 650 }; 651 652 static void panfrost_gpu_show_fdinfo(struct panfrost_device *pfdev, 653 struct panfrost_file_priv *panfrost_priv, 654 struct drm_printer *p) 655 { 656 int i; 657 658 /* 659 * IMPORTANT NOTE: drm-cycles and drm-engine measurements are not 660 * accurate, as they only provide a rough estimation of the number of 661 * GPU cycles and CPU time spent in a given context. This is due to two 662 * different factors: 663 * - Firstly, we must consider the time the CPU and then the kernel 664 * takes to process the GPU interrupt, which means additional time and 665 * GPU cycles will be added in excess to the real figure. 666 * - Secondly, the pipelining done by the Job Manager (2 job slots per 667 * engine) implies there is no way to know exactly how much time each 668 * job spent on the GPU. 669 */ 670 671 static const char * const engine_names[] = { 672 "fragment", "vertex-tiler", "compute-only" 673 }; 674 675 BUILD_BUG_ON(ARRAY_SIZE(engine_names) != NUM_JOB_SLOTS); 676 677 for (i = 0; i < NUM_JOB_SLOTS - 1; i++) { 678 if (pfdev->profile_mode) { 679 drm_printf(p, "drm-engine-%s:\t%llu ns\n", 680 engine_names[i], panfrost_priv->engine_usage.elapsed_ns[i]); 681 drm_printf(p, "drm-cycles-%s:\t%llu\n", 682 engine_names[i], panfrost_priv->engine_usage.cycles[i]); 683 } 684 drm_printf(p, "drm-maxfreq-%s:\t%lu Hz\n", 685 engine_names[i], pfdev->pfdevfreq.fast_rate); 686 drm_printf(p, "drm-curfreq-%s:\t%lu Hz\n", 687 engine_names[i], pfdev->pfdevfreq.current_frequency); 688 } 689 } 690 691 static void panfrost_show_fdinfo(struct drm_printer *p, struct drm_file *file) 692 { 693 struct drm_device *dev = file->minor->dev; 694 struct panfrost_device *pfdev = dev->dev_private; 695 696 panfrost_gpu_show_fdinfo(pfdev, file->driver_priv, p); 697 698 drm_show_memory_stats(p, file); 699 } 700 701 static const struct file_operations panfrost_drm_driver_fops = { 702 .owner = THIS_MODULE, 703 DRM_GEM_FOPS, 704 .show_fdinfo = drm_show_fdinfo, 705 }; 706 707 #ifdef CONFIG_DEBUG_FS 708 static int panthor_gems_show(struct seq_file *m, void *data) 709 { 710 struct drm_info_node *node = m->private; 711 struct drm_device *dev = node->minor->dev; 712 struct panfrost_device *pfdev = dev->dev_private; 713 714 panfrost_gem_debugfs_print_bos(pfdev, m); 715 716 return 0; 717 } 718 719 static void show_panfrost_jm_ctx(struct panfrost_jm_ctx *jm_ctx, u32 handle, 720 struct seq_file *m) 721 { 722 struct drm_device *ddev = ((struct drm_info_node *)m->private)->minor->dev; 723 const char *prio = "UNKNOWN"; 724 725 static const char * const prios[] = { 726 [DRM_SCHED_PRIORITY_HIGH] = "HIGH", 727 [DRM_SCHED_PRIORITY_NORMAL] = "NORMAL", 728 [DRM_SCHED_PRIORITY_LOW] = "LOW", 729 }; 730 731 if (jm_ctx->slot_entity[0].priority != 732 jm_ctx->slot_entity[1].priority) 733 drm_warn(ddev, "Slot priorities should be the same in a single context"); 734 735 if (jm_ctx->slot_entity[0].priority < ARRAY_SIZE(prios)) 736 prio = prios[jm_ctx->slot_entity[0].priority]; 737 738 seq_printf(m, " JM context %u: priority %s\n", handle, prio); 739 } 740 741 static int show_file_jm_ctxs(struct panfrost_file_priv *pfile, 742 struct seq_file *m) 743 { 744 struct panfrost_jm_ctx *jm_ctx; 745 unsigned long i; 746 747 xa_lock(&pfile->jm_ctxs); 748 xa_for_each(&pfile->jm_ctxs, i, jm_ctx) { 749 jm_ctx = panfrost_jm_ctx_get(jm_ctx); 750 xa_unlock(&pfile->jm_ctxs); 751 show_panfrost_jm_ctx(jm_ctx, i, m); 752 panfrost_jm_ctx_put(jm_ctx); 753 xa_lock(&pfile->jm_ctxs); 754 } 755 xa_unlock(&pfile->jm_ctxs); 756 757 return 0; 758 } 759 760 static struct drm_info_list panthor_debugfs_list[] = { 761 {"gems", panthor_gems_show, 0, NULL}, 762 }; 763 764 static int panthor_gems_debugfs_init(struct drm_minor *minor) 765 { 766 drm_debugfs_create_files(panthor_debugfs_list, 767 ARRAY_SIZE(panthor_debugfs_list), 768 minor->debugfs_root, minor); 769 770 return 0; 771 } 772 773 static int show_each_file(struct seq_file *m, void *arg) 774 { 775 struct drm_info_node *node = (struct drm_info_node *)m->private; 776 struct drm_device *ddev = node->minor->dev; 777 int (*show)(struct panfrost_file_priv *, struct seq_file *) = 778 node->info_ent->data; 779 struct drm_file *file; 780 int ret; 781 782 ret = mutex_lock_interruptible(&ddev->filelist_mutex); 783 if (ret) 784 return ret; 785 786 list_for_each_entry(file, &ddev->filelist, lhead) { 787 struct task_struct *task; 788 struct panfrost_file_priv *pfile = file->driver_priv; 789 struct pid *pid; 790 791 /* 792 * Although we have a valid reference on file->pid, that does 793 * not guarantee that the task_struct who called get_pid() is 794 * still alive (e.g. get_pid(current) => fork() => exit()). 795 * Therefore, we need to protect this ->comm access using RCU. 796 */ 797 rcu_read_lock(); 798 pid = rcu_dereference(file->pid); 799 task = pid_task(pid, PIDTYPE_TGID); 800 seq_printf(m, "client_id %8llu pid %8d command %s:\n", 801 file->client_id, pid_nr(pid), 802 task ? task->comm : "<unknown>"); 803 rcu_read_unlock(); 804 805 ret = show(pfile, m); 806 if (ret < 0) 807 break; 808 809 seq_puts(m, "\n"); 810 } 811 812 mutex_unlock(&ddev->filelist_mutex); 813 return ret; 814 } 815 816 static struct drm_info_list panfrost_sched_debugfs_list[] = { 817 { "sched_ctxs", show_each_file, 0, show_file_jm_ctxs }, 818 }; 819 820 static void panfrost_sched_debugfs_init(struct drm_minor *minor) 821 { 822 drm_debugfs_create_files(panfrost_sched_debugfs_list, 823 ARRAY_SIZE(panfrost_sched_debugfs_list), 824 minor->debugfs_root, minor); 825 } 826 827 static void panfrost_debugfs_init(struct drm_minor *minor) 828 { 829 panthor_gems_debugfs_init(minor); 830 panfrost_sched_debugfs_init(minor); 831 } 832 #endif 833 834 /* 835 * Panfrost driver version: 836 * - 1.0 - initial interface 837 * - 1.1 - adds HEAP and NOEXEC flags for CREATE_BO 838 * - 1.2 - adds AFBC_FEATURES query 839 * - 1.3 - adds JD_REQ_CYCLE_COUNT job requirement for SUBMIT 840 * - adds SYSTEM_TIMESTAMP and SYSTEM_TIMESTAMP_FREQUENCY queries 841 * - 1.4 - adds SET_LABEL_BO 842 * - 1.5 - adds JM_CTX_{CREATE,DESTROY} ioctls and extend SUBMIT to allow 843 * context creation with configurable priorities/affinity 844 */ 845 static const struct drm_driver panfrost_drm_driver = { 846 .driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ, 847 .open = panfrost_open, 848 .postclose = panfrost_postclose, 849 .show_fdinfo = panfrost_show_fdinfo, 850 .ioctls = panfrost_drm_driver_ioctls, 851 .num_ioctls = ARRAY_SIZE(panfrost_drm_driver_ioctls), 852 .fops = &panfrost_drm_driver_fops, 853 .name = "panfrost", 854 .desc = "panfrost DRM", 855 .major = 1, 856 .minor = 5, 857 858 .gem_create_object = panfrost_gem_create_object, 859 .gem_prime_import_sg_table = panfrost_gem_prime_import_sg_table, 860 #ifdef CONFIG_DEBUG_FS 861 .debugfs_init = panfrost_debugfs_init, 862 #endif 863 }; 864 865 static int panfrost_probe(struct platform_device *pdev) 866 { 867 struct panfrost_device *pfdev; 868 struct drm_device *ddev; 869 int err; 870 871 pfdev = devm_kzalloc(&pdev->dev, sizeof(*pfdev), GFP_KERNEL); 872 if (!pfdev) 873 return -ENOMEM; 874 875 pfdev->pdev = pdev; 876 pfdev->dev = &pdev->dev; 877 878 platform_set_drvdata(pdev, pfdev); 879 880 pfdev->comp = of_device_get_match_data(&pdev->dev); 881 if (!pfdev->comp) 882 return -ENODEV; 883 884 pfdev->coherent = device_get_dma_attr(&pdev->dev) == DEV_DMA_COHERENT; 885 886 /* Allocate and initialize the DRM device. */ 887 ddev = drm_dev_alloc(&panfrost_drm_driver, &pdev->dev); 888 if (IS_ERR(ddev)) 889 return PTR_ERR(ddev); 890 891 ddev->dev_private = pfdev; 892 pfdev->ddev = ddev; 893 894 mutex_init(&pfdev->shrinker_lock); 895 INIT_LIST_HEAD(&pfdev->shrinker_list); 896 897 err = panfrost_device_init(pfdev); 898 if (err) { 899 if (err != -EPROBE_DEFER) 900 dev_err(&pdev->dev, "Fatal error during GPU init\n"); 901 goto err_out0; 902 } 903 904 pm_runtime_set_active(pfdev->dev); 905 pm_runtime_mark_last_busy(pfdev->dev); 906 pm_runtime_enable(pfdev->dev); 907 pm_runtime_set_autosuspend_delay(pfdev->dev, 50); /* ~3 frames */ 908 pm_runtime_use_autosuspend(pfdev->dev); 909 910 /* 911 * Register the DRM device with the core and the connectors with 912 * sysfs 913 */ 914 err = drm_dev_register(ddev, 0); 915 if (err < 0) 916 goto err_out1; 917 918 err = panfrost_gem_shrinker_init(ddev); 919 if (err) 920 goto err_out2; 921 922 return 0; 923 924 err_out2: 925 drm_dev_unregister(ddev); 926 err_out1: 927 pm_runtime_disable(pfdev->dev); 928 panfrost_device_fini(pfdev); 929 pm_runtime_set_suspended(pfdev->dev); 930 err_out0: 931 drm_dev_put(ddev); 932 return err; 933 } 934 935 static void panfrost_remove(struct platform_device *pdev) 936 { 937 struct panfrost_device *pfdev = platform_get_drvdata(pdev); 938 struct drm_device *ddev = pfdev->ddev; 939 940 drm_dev_unregister(ddev); 941 panfrost_gem_shrinker_cleanup(ddev); 942 943 pm_runtime_get_sync(pfdev->dev); 944 pm_runtime_disable(pfdev->dev); 945 panfrost_device_fini(pfdev); 946 pm_runtime_set_suspended(pfdev->dev); 947 948 drm_dev_put(ddev); 949 } 950 951 static ssize_t profiling_show(struct device *dev, 952 struct device_attribute *attr, char *buf) 953 { 954 struct panfrost_device *pfdev = dev_get_drvdata(dev); 955 956 return sysfs_emit(buf, "%d\n", pfdev->profile_mode); 957 } 958 959 static ssize_t profiling_store(struct device *dev, 960 struct device_attribute *attr, 961 const char *buf, size_t len) 962 { 963 struct panfrost_device *pfdev = dev_get_drvdata(dev); 964 bool value; 965 int err; 966 967 err = kstrtobool(buf, &value); 968 if (err) 969 return err; 970 971 pfdev->profile_mode = value; 972 973 return len; 974 } 975 976 static DEVICE_ATTR_RW(profiling); 977 978 static struct attribute *panfrost_attrs[] = { 979 &dev_attr_profiling.attr, 980 NULL, 981 }; 982 983 ATTRIBUTE_GROUPS(panfrost); 984 985 /* 986 * The OPP core wants the supply names to be NULL terminated, but we need the 987 * correct num_supplies value for regulator core. Hence, we NULL terminate here 988 * and then initialize num_supplies with ARRAY_SIZE - 1. 989 */ 990 static const char * const default_supplies[] = { "mali", NULL }; 991 static const struct panfrost_compatible default_data = { 992 .num_supplies = ARRAY_SIZE(default_supplies) - 1, 993 .supply_names = default_supplies, 994 .num_pm_domains = 1, /* optional */ 995 .pm_domain_names = NULL, 996 }; 997 998 static const struct panfrost_compatible allwinner_h616_data = { 999 .num_supplies = ARRAY_SIZE(default_supplies) - 1, 1000 .supply_names = default_supplies, 1001 .num_pm_domains = 1, 1002 .pm_features = BIT(GPU_PM_RT), 1003 }; 1004 1005 static const struct panfrost_compatible amlogic_data = { 1006 .num_supplies = ARRAY_SIZE(default_supplies) - 1, 1007 .supply_names = default_supplies, 1008 .vendor_quirk = panfrost_gpu_amlogic_quirk, 1009 }; 1010 1011 static const char * const mediatek_pm_domains[] = { "core0", "core1", "core2", 1012 "core3", "core4" }; 1013 /* 1014 * The old data with two power supplies for MT8183 is here only to 1015 * keep retro-compatibility with older devicetrees, as DVFS will 1016 * not work with this one. 1017 * 1018 * On new devicetrees please use the _b variant with a single and 1019 * coupled regulators instead. 1020 */ 1021 static const char * const legacy_supplies[] = { "mali", "sram", NULL }; 1022 static const struct panfrost_compatible mediatek_mt8183_data = { 1023 .num_supplies = ARRAY_SIZE(legacy_supplies) - 1, 1024 .supply_names = legacy_supplies, 1025 .num_pm_domains = 3, 1026 .pm_domain_names = mediatek_pm_domains, 1027 }; 1028 1029 static const struct panfrost_compatible mediatek_mt8183_b_data = { 1030 .num_supplies = ARRAY_SIZE(default_supplies) - 1, 1031 .supply_names = default_supplies, 1032 .num_pm_domains = 3, 1033 .pm_domain_names = mediatek_pm_domains, 1034 .pm_features = BIT(GPU_PM_CLK_DIS) | BIT(GPU_PM_VREG_OFF), 1035 }; 1036 1037 static const struct panfrost_compatible mediatek_mt8186_data = { 1038 .num_supplies = ARRAY_SIZE(default_supplies) - 1, 1039 .supply_names = default_supplies, 1040 .num_pm_domains = 2, 1041 .pm_domain_names = mediatek_pm_domains, 1042 .pm_features = BIT(GPU_PM_CLK_DIS) | BIT(GPU_PM_VREG_OFF), 1043 }; 1044 1045 static const struct panfrost_compatible mediatek_mt8188_data = { 1046 .num_supplies = ARRAY_SIZE(default_supplies) - 1, 1047 .supply_names = default_supplies, 1048 .num_pm_domains = 3, 1049 .pm_domain_names = mediatek_pm_domains, 1050 .pm_features = BIT(GPU_PM_CLK_DIS) | BIT(GPU_PM_VREG_OFF), 1051 .gpu_quirks = BIT(GPU_QUIRK_FORCE_AARCH64_PGTABLE), 1052 }; 1053 1054 static const struct panfrost_compatible mediatek_mt8192_data = { 1055 .num_supplies = ARRAY_SIZE(default_supplies) - 1, 1056 .supply_names = default_supplies, 1057 .num_pm_domains = 5, 1058 .pm_domain_names = mediatek_pm_domains, 1059 .pm_features = BIT(GPU_PM_CLK_DIS) | BIT(GPU_PM_VREG_OFF), 1060 .gpu_quirks = BIT(GPU_QUIRK_FORCE_AARCH64_PGTABLE), 1061 }; 1062 1063 static const struct panfrost_compatible mediatek_mt8370_data = { 1064 .num_supplies = ARRAY_SIZE(default_supplies) - 1, 1065 .supply_names = default_supplies, 1066 .num_pm_domains = 2, 1067 .pm_domain_names = mediatek_pm_domains, 1068 .pm_features = BIT(GPU_PM_CLK_DIS) | BIT(GPU_PM_VREG_OFF), 1069 .gpu_quirks = BIT(GPU_QUIRK_FORCE_AARCH64_PGTABLE), 1070 }; 1071 1072 static const struct of_device_id dt_match[] = { 1073 /* Set first to probe before the generic compatibles */ 1074 { .compatible = "amlogic,meson-gxm-mali", 1075 .data = &amlogic_data, }, 1076 { .compatible = "amlogic,meson-g12a-mali", 1077 .data = &amlogic_data, }, 1078 { .compatible = "arm,mali-t604", .data = &default_data, }, 1079 { .compatible = "arm,mali-t624", .data = &default_data, }, 1080 { .compatible = "arm,mali-t628", .data = &default_data, }, 1081 { .compatible = "arm,mali-t720", .data = &default_data, }, 1082 { .compatible = "arm,mali-t760", .data = &default_data, }, 1083 { .compatible = "arm,mali-t820", .data = &default_data, }, 1084 { .compatible = "arm,mali-t830", .data = &default_data, }, 1085 { .compatible = "arm,mali-t860", .data = &default_data, }, 1086 { .compatible = "arm,mali-t880", .data = &default_data, }, 1087 { .compatible = "arm,mali-bifrost", .data = &default_data, }, 1088 { .compatible = "arm,mali-valhall-jm", .data = &default_data, }, 1089 { .compatible = "mediatek,mt8183-mali", .data = &mediatek_mt8183_data }, 1090 { .compatible = "mediatek,mt8183b-mali", .data = &mediatek_mt8183_b_data }, 1091 { .compatible = "mediatek,mt8186-mali", .data = &mediatek_mt8186_data }, 1092 { .compatible = "mediatek,mt8188-mali", .data = &mediatek_mt8188_data }, 1093 { .compatible = "mediatek,mt8192-mali", .data = &mediatek_mt8192_data }, 1094 { .compatible = "mediatek,mt8370-mali", .data = &mediatek_mt8370_data }, 1095 { .compatible = "allwinner,sun50i-h616-mali", .data = &allwinner_h616_data }, 1096 {} 1097 }; 1098 MODULE_DEVICE_TABLE(of, dt_match); 1099 1100 static struct platform_driver panfrost_driver = { 1101 .probe = panfrost_probe, 1102 .remove = panfrost_remove, 1103 .driver = { 1104 .name = "panfrost", 1105 .pm = pm_ptr(&panfrost_pm_ops), 1106 .of_match_table = dt_match, 1107 .dev_groups = panfrost_groups, 1108 }, 1109 }; 1110 module_platform_driver(panfrost_driver); 1111 1112 MODULE_AUTHOR("Panfrost Project Developers"); 1113 MODULE_DESCRIPTION("Panfrost DRM Driver"); 1114 MODULE_LICENSE("GPL v2"); 1115 MODULE_SOFTDEP("pre: governor_simpleondemand"); 1116