1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */ 3 /* Copyright 2019 Linaro, Ltd., Rob Herring <robh@kernel.org> */ 4 /* Copyright 2019 Collabora ltd. */ 5 6 #ifdef CONFIG_ARM_ARCH_TIMER 7 #include <asm/arch_timer.h> 8 #endif 9 10 #include <linux/module.h> 11 #include <linux/of.h> 12 #include <linux/pagemap.h> 13 #include <linux/platform_device.h> 14 #include <linux/pm_runtime.h> 15 #include <drm/panfrost_drm.h> 16 #include <drm/drm_debugfs.h> 17 #include <drm/drm_drv.h> 18 #include <drm/drm_ioctl.h> 19 #include <drm/drm_print.h> 20 #include <drm/drm_syncobj.h> 21 #include <drm/drm_utils.h> 22 23 #include "panfrost_device.h" 24 #include "panfrost_gem.h" 25 #include "panfrost_mmu.h" 26 #include "panfrost_job.h" 27 #include "panfrost_gpu.h" 28 #include "panfrost_perfcnt.h" 29 30 #define JOB_REQUIREMENTS (PANFROST_JD_REQ_FS | PANFROST_JD_REQ_CYCLE_COUNT) 31 32 static bool unstable_ioctls; 33 module_param_unsafe(unstable_ioctls, bool, 0600); 34 35 static int panfrost_ioctl_query_timestamp(struct panfrost_device *pfdev, 36 u64 *arg) 37 { 38 int ret; 39 40 ret = pm_runtime_resume_and_get(pfdev->base.dev); 41 if (ret) 42 return ret; 43 44 panfrost_cycle_counter_get(pfdev); 45 *arg = panfrost_timestamp_read(pfdev); 46 panfrost_cycle_counter_put(pfdev); 47 48 pm_runtime_put(pfdev->base.dev); 49 return 0; 50 } 51 52 static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct drm_file *file) 53 { 54 struct drm_panfrost_get_param *param = data; 55 struct panfrost_device *pfdev = to_panfrost_device(ddev); 56 int ret; 57 58 if (param->pad != 0) 59 return -EINVAL; 60 61 #define PANFROST_FEATURE(name, member) \ 62 case DRM_PANFROST_PARAM_ ## name: \ 63 param->value = pfdev->features.member; \ 64 break 65 #define PANFROST_FEATURE_ARRAY(name, member, max) \ 66 case DRM_PANFROST_PARAM_ ## name ## 0 ... \ 67 DRM_PANFROST_PARAM_ ## name ## max: \ 68 param->value = pfdev->features.member[param->param - \ 69 DRM_PANFROST_PARAM_ ## name ## 0]; \ 70 break 71 72 switch (param->param) { 73 PANFROST_FEATURE(GPU_PROD_ID, id); 74 PANFROST_FEATURE(GPU_REVISION, revision); 75 PANFROST_FEATURE(SHADER_PRESENT, shader_present); 76 PANFROST_FEATURE(TILER_PRESENT, tiler_present); 77 PANFROST_FEATURE(L2_PRESENT, l2_present); 78 PANFROST_FEATURE(STACK_PRESENT, stack_present); 79 PANFROST_FEATURE(AS_PRESENT, as_present); 80 PANFROST_FEATURE(JS_PRESENT, js_present); 81 PANFROST_FEATURE(L2_FEATURES, l2_features); 82 PANFROST_FEATURE(CORE_FEATURES, core_features); 83 PANFROST_FEATURE(TILER_FEATURES, tiler_features); 84 PANFROST_FEATURE(MEM_FEATURES, mem_features); 85 PANFROST_FEATURE(MMU_FEATURES, mmu_features); 86 PANFROST_FEATURE(THREAD_FEATURES, thread_features); 87 PANFROST_FEATURE(MAX_THREADS, max_threads); 88 PANFROST_FEATURE(THREAD_MAX_WORKGROUP_SZ, 89 thread_max_workgroup_sz); 90 PANFROST_FEATURE(THREAD_MAX_BARRIER_SZ, 91 thread_max_barrier_sz); 92 PANFROST_FEATURE(COHERENCY_FEATURES, coherency_features); 93 PANFROST_FEATURE(AFBC_FEATURES, afbc_features); 94 PANFROST_FEATURE_ARRAY(TEXTURE_FEATURES, texture_features, 3); 95 PANFROST_FEATURE_ARRAY(JS_FEATURES, js_features, 15); 96 PANFROST_FEATURE(NR_CORE_GROUPS, nr_core_groups); 97 PANFROST_FEATURE(THREAD_TLS_ALLOC, thread_tls_alloc); 98 99 case DRM_PANFROST_PARAM_SYSTEM_TIMESTAMP: 100 ret = panfrost_ioctl_query_timestamp(pfdev, ¶m->value); 101 if (ret) 102 return ret; 103 break; 104 105 case DRM_PANFROST_PARAM_SYSTEM_TIMESTAMP_FREQUENCY: 106 #ifdef CONFIG_ARM_ARCH_TIMER 107 param->value = arch_timer_get_cntfrq(); 108 #else 109 param->value = 0; 110 #endif 111 break; 112 113 case DRM_PANFROST_PARAM_ALLOWED_JM_CTX_PRIORITIES: 114 param->value = BIT(PANFROST_JM_CTX_PRIORITY_LOW) | 115 BIT(PANFROST_JM_CTX_PRIORITY_MEDIUM); 116 117 if (panfrost_high_prio_allowed(file)) 118 param->value |= BIT(PANFROST_JM_CTX_PRIORITY_HIGH); 119 break; 120 121 default: 122 return -EINVAL; 123 } 124 125 return 0; 126 } 127 128 static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data, 129 struct drm_file *file) 130 { 131 struct panfrost_file_priv *priv = file->driver_priv; 132 struct panfrost_gem_object *bo; 133 struct drm_panfrost_create_bo *args = data; 134 struct panfrost_gem_mapping *mapping; 135 int ret; 136 137 if (!args->size || args->pad || 138 (args->flags & ~(PANFROST_BO_NOEXEC | PANFROST_BO_HEAP))) 139 return -EINVAL; 140 141 /* Heaps should never be executable */ 142 if ((args->flags & PANFROST_BO_HEAP) && 143 !(args->flags & PANFROST_BO_NOEXEC)) 144 return -EINVAL; 145 146 bo = panfrost_gem_create(dev, args->size, args->flags); 147 if (IS_ERR(bo)) 148 return PTR_ERR(bo); 149 150 ret = drm_gem_handle_create(file, &bo->base.base, &args->handle); 151 if (ret) 152 goto out; 153 154 mapping = panfrost_gem_mapping_get(bo, priv); 155 if (mapping) { 156 args->offset = mapping->mmnode.start << PAGE_SHIFT; 157 panfrost_gem_mapping_put(mapping); 158 } else { 159 /* This can only happen if the handle from 160 * drm_gem_handle_create() has already been guessed and freed 161 * by user space 162 */ 163 ret = -EINVAL; 164 } 165 166 out: 167 drm_gem_object_put(&bo->base.base); 168 return ret; 169 } 170 171 /** 172 * panfrost_lookup_bos() - Sets up job->bo[] with the GEM objects 173 * referenced by the job. 174 * @dev: DRM device 175 * @file_priv: DRM file for this fd 176 * @args: IOCTL args 177 * @job: job being set up 178 * 179 * Resolve handles from userspace to BOs and attach them to job. 180 * 181 * Note that this function doesn't need to unreference the BOs on 182 * failure, because that will happen at panfrost_job_cleanup() time. 183 */ 184 static int 185 panfrost_lookup_bos(struct drm_device *dev, 186 struct drm_file *file_priv, 187 struct drm_panfrost_submit *args, 188 struct panfrost_job *job) 189 { 190 struct panfrost_file_priv *priv = file_priv->driver_priv; 191 struct panfrost_gem_object *bo; 192 unsigned int i; 193 int ret; 194 195 job->bo_count = args->bo_handle_count; 196 197 if (!job->bo_count) 198 return 0; 199 200 ret = drm_gem_objects_lookup(file_priv, 201 (void __user *)(uintptr_t)args->bo_handles, 202 job->bo_count, &job->bos); 203 if (ret) 204 return ret; 205 206 job->mappings = kvmalloc_array(job->bo_count, 207 sizeof(struct panfrost_gem_mapping *), 208 GFP_KERNEL | __GFP_ZERO); 209 if (!job->mappings) 210 return -ENOMEM; 211 212 for (i = 0; i < job->bo_count; i++) { 213 struct panfrost_gem_mapping *mapping; 214 215 bo = to_panfrost_bo(job->bos[i]); 216 mapping = panfrost_gem_mapping_get(bo, priv); 217 if (!mapping) { 218 ret = -EINVAL; 219 break; 220 } 221 222 atomic_inc(&bo->gpu_usecount); 223 job->mappings[i] = mapping; 224 } 225 226 return ret; 227 } 228 229 /** 230 * panfrost_copy_in_sync() - Sets up job->deps with the sync objects 231 * referenced by the job. 232 * @dev: DRM device 233 * @file_priv: DRM file for this fd 234 * @args: IOCTL args 235 * @job: job being set up 236 * 237 * Resolve syncobjs from userspace to fences and attach them to job. 238 * 239 * Note that this function doesn't need to unreference the fences on 240 * failure, because that will happen at panfrost_job_cleanup() time. 241 */ 242 static int 243 panfrost_copy_in_sync(struct drm_device *dev, 244 struct drm_file *file_priv, 245 struct drm_panfrost_submit *args, 246 struct panfrost_job *job) 247 { 248 u32 *handles; 249 int ret = 0; 250 int i, in_fence_count; 251 252 in_fence_count = args->in_sync_count; 253 254 if (!in_fence_count) 255 return 0; 256 257 handles = kvmalloc_array(in_fence_count, sizeof(u32), GFP_KERNEL); 258 if (!handles) { 259 ret = -ENOMEM; 260 DRM_DEBUG("Failed to allocate incoming syncobj handles\n"); 261 goto fail; 262 } 263 264 if (copy_from_user(handles, 265 (void __user *)(uintptr_t)args->in_syncs, 266 in_fence_count * sizeof(u32))) { 267 ret = -EFAULT; 268 DRM_DEBUG("Failed to copy in syncobj handles\n"); 269 goto fail; 270 } 271 272 for (i = 0; i < in_fence_count; i++) { 273 ret = drm_sched_job_add_syncobj_dependency(&job->base, file_priv, 274 handles[i], 0); 275 if (ret) 276 goto fail; 277 } 278 279 fail: 280 kvfree(handles); 281 return ret; 282 } 283 284 static int panfrost_ioctl_submit(struct drm_device *dev, void *data, 285 struct drm_file *file) 286 { 287 struct panfrost_device *pfdev = to_panfrost_device(dev); 288 struct panfrost_file_priv *file_priv = file->driver_priv; 289 struct drm_panfrost_submit *args = data; 290 struct drm_syncobj *sync_out = NULL; 291 struct panfrost_jm_ctx *jm_ctx; 292 struct panfrost_job *job; 293 int ret = 0, slot; 294 295 if (args->pad) 296 return -EINVAL; 297 298 if (!args->jc) 299 return -EINVAL; 300 301 if (args->requirements & ~JOB_REQUIREMENTS) 302 return -EINVAL; 303 304 if (args->out_sync > 0) { 305 sync_out = drm_syncobj_find(file, args->out_sync); 306 if (!sync_out) 307 return -ENODEV; 308 } 309 310 jm_ctx = panfrost_jm_ctx_from_handle(file, args->jm_ctx_handle); 311 if (!jm_ctx) { 312 ret = -EINVAL; 313 goto out_put_syncout; 314 } 315 316 job = kzalloc(sizeof(*job), GFP_KERNEL); 317 if (!job) { 318 ret = -ENOMEM; 319 goto out_put_jm_ctx; 320 } 321 322 kref_init(&job->refcount); 323 324 job->pfdev = pfdev; 325 job->jc = args->jc; 326 job->requirements = args->requirements; 327 job->flush_id = panfrost_gpu_get_latest_flush_id(pfdev); 328 job->mmu = file_priv->mmu; 329 job->ctx = panfrost_jm_ctx_get(jm_ctx); 330 job->engine_usage = &file_priv->engine_usage; 331 332 slot = panfrost_job_get_slot(job); 333 334 ret = drm_sched_job_init(&job->base, 335 &jm_ctx->slot_entity[slot], 336 1, NULL, file->client_id); 337 if (ret) 338 goto out_put_job; 339 340 ret = panfrost_copy_in_sync(dev, file, args, job); 341 if (ret) 342 goto out_cleanup_job; 343 344 ret = panfrost_lookup_bos(dev, file, args, job); 345 if (ret) 346 goto out_cleanup_job; 347 348 ret = panfrost_job_push(job); 349 if (ret) 350 goto out_cleanup_job; 351 352 /* Update the return sync object for the job */ 353 if (sync_out) 354 drm_syncobj_replace_fence(sync_out, job->render_done_fence); 355 356 out_cleanup_job: 357 if (ret) 358 drm_sched_job_cleanup(&job->base); 359 out_put_job: 360 panfrost_job_put(job); 361 out_put_jm_ctx: 362 panfrost_jm_ctx_put(jm_ctx); 363 out_put_syncout: 364 if (sync_out) 365 drm_syncobj_put(sync_out); 366 367 return ret; 368 } 369 370 static int 371 panfrost_ioctl_wait_bo(struct drm_device *dev, void *data, 372 struct drm_file *file_priv) 373 { 374 long ret; 375 struct drm_panfrost_wait_bo *args = data; 376 struct drm_gem_object *gem_obj; 377 unsigned long timeout = drm_timeout_abs_to_jiffies(args->timeout_ns); 378 379 if (args->pad) 380 return -EINVAL; 381 382 gem_obj = drm_gem_object_lookup(file_priv, args->handle); 383 if (!gem_obj) 384 return -ENOENT; 385 386 ret = dma_resv_wait_timeout(gem_obj->resv, DMA_RESV_USAGE_READ, 387 true, timeout); 388 if (!ret) 389 ret = timeout ? -ETIMEDOUT : -EBUSY; 390 391 drm_gem_object_put(gem_obj); 392 393 return ret; 394 } 395 396 static int panfrost_ioctl_mmap_bo(struct drm_device *dev, void *data, 397 struct drm_file *file_priv) 398 { 399 struct drm_panfrost_mmap_bo *args = data; 400 struct drm_gem_object *gem_obj; 401 int ret; 402 403 if (args->flags != 0) { 404 DRM_INFO("unknown mmap_bo flags: %d\n", args->flags); 405 return -EINVAL; 406 } 407 408 gem_obj = drm_gem_object_lookup(file_priv, args->handle); 409 if (!gem_obj) { 410 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); 411 return -ENOENT; 412 } 413 414 /* Don't allow mmapping of heap objects as pages are not pinned. */ 415 if (to_panfrost_bo(gem_obj)->is_heap) { 416 ret = -EINVAL; 417 goto out; 418 } 419 420 ret = drm_gem_create_mmap_offset(gem_obj); 421 if (ret == 0) 422 args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node); 423 424 out: 425 drm_gem_object_put(gem_obj); 426 return ret; 427 } 428 429 static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data, 430 struct drm_file *file_priv) 431 { 432 struct panfrost_file_priv *priv = file_priv->driver_priv; 433 struct drm_panfrost_get_bo_offset *args = data; 434 struct panfrost_gem_mapping *mapping; 435 struct drm_gem_object *gem_obj; 436 struct panfrost_gem_object *bo; 437 438 gem_obj = drm_gem_object_lookup(file_priv, args->handle); 439 if (!gem_obj) { 440 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); 441 return -ENOENT; 442 } 443 bo = to_panfrost_bo(gem_obj); 444 445 mapping = panfrost_gem_mapping_get(bo, priv); 446 drm_gem_object_put(gem_obj); 447 448 if (!mapping) 449 return -EINVAL; 450 451 args->offset = mapping->mmnode.start << PAGE_SHIFT; 452 panfrost_gem_mapping_put(mapping); 453 return 0; 454 } 455 456 static int panfrost_ioctl_madvise(struct drm_device *dev, void *data, 457 struct drm_file *file_priv) 458 { 459 struct panfrost_file_priv *priv = file_priv->driver_priv; 460 struct drm_panfrost_madvise *args = data; 461 struct panfrost_device *pfdev = to_panfrost_device(dev); 462 struct drm_gem_object *gem_obj; 463 struct panfrost_gem_object *bo; 464 int ret = 0; 465 466 gem_obj = drm_gem_object_lookup(file_priv, args->handle); 467 if (!gem_obj) { 468 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); 469 return -ENOENT; 470 } 471 472 bo = to_panfrost_bo(gem_obj); 473 474 ret = dma_resv_lock_interruptible(bo->base.base.resv, NULL); 475 if (ret) 476 goto out_put_object; 477 478 mutex_lock(&pfdev->shrinker_lock); 479 mutex_lock(&bo->mappings.lock); 480 if (args->madv == PANFROST_MADV_DONTNEED) { 481 struct panfrost_gem_mapping *first; 482 483 first = list_first_entry(&bo->mappings.list, 484 struct panfrost_gem_mapping, 485 node); 486 487 /* 488 * If we want to mark the BO purgeable, there must be only one 489 * user: the caller FD. 490 * We could do something smarter and mark the BO purgeable only 491 * when all its users have marked it purgeable, but globally 492 * visible/shared BOs are likely to never be marked purgeable 493 * anyway, so let's not bother. 494 */ 495 if (!list_is_singular(&bo->mappings.list) || 496 WARN_ON_ONCE(first->mmu != priv->mmu)) { 497 ret = -EINVAL; 498 goto out_unlock_mappings; 499 } 500 } 501 502 args->retained = drm_gem_shmem_madvise_locked(&bo->base, args->madv); 503 504 if (args->retained) { 505 if (args->madv == PANFROST_MADV_DONTNEED) 506 list_move_tail(&bo->base.madv_list, 507 &pfdev->shrinker_list); 508 else if (args->madv == PANFROST_MADV_WILLNEED) 509 list_del_init(&bo->base.madv_list); 510 } 511 512 out_unlock_mappings: 513 mutex_unlock(&bo->mappings.lock); 514 mutex_unlock(&pfdev->shrinker_lock); 515 dma_resv_unlock(bo->base.base.resv); 516 out_put_object: 517 drm_gem_object_put(gem_obj); 518 return ret; 519 } 520 521 static int panfrost_ioctl_set_label_bo(struct drm_device *ddev, void *data, 522 struct drm_file *file) 523 { 524 struct drm_panfrost_set_label_bo *args = data; 525 struct drm_gem_object *obj; 526 const char *label = NULL; 527 int ret = 0; 528 529 if (args->pad) 530 return -EINVAL; 531 532 obj = drm_gem_object_lookup(file, args->handle); 533 if (!obj) 534 return -ENOENT; 535 536 if (args->label) { 537 label = strndup_user(u64_to_user_ptr(args->label), 538 PANFROST_BO_LABEL_MAXLEN); 539 if (IS_ERR(label)) { 540 ret = PTR_ERR(label); 541 if (ret == -EINVAL) 542 ret = -E2BIG; 543 goto err_put_obj; 544 } 545 } 546 547 /* 548 * We treat passing a label of length 0 and passing a NULL label 549 * differently, because even though they might seem conceptually 550 * similar, future uses of the BO label might expect a different 551 * behaviour in each case. 552 */ 553 panfrost_gem_set_label(obj, label); 554 555 err_put_obj: 556 drm_gem_object_put(obj); 557 558 return ret; 559 } 560 561 static int panfrost_ioctl_jm_ctx_create(struct drm_device *dev, void *data, 562 struct drm_file *file) 563 { 564 return panfrost_jm_ctx_create(file, data); 565 } 566 567 static int panfrost_ioctl_jm_ctx_destroy(struct drm_device *dev, void *data, 568 struct drm_file *file) 569 { 570 const struct drm_panfrost_jm_ctx_destroy *args = data; 571 572 if (args->pad) 573 return -EINVAL; 574 575 /* We can't destroy the default context created when the file is opened. */ 576 if (!args->handle) 577 return -EINVAL; 578 579 return panfrost_jm_ctx_destroy(file, args->handle); 580 } 581 582 int panfrost_unstable_ioctl_check(void) 583 { 584 if (!unstable_ioctls) 585 return -ENOSYS; 586 587 return 0; 588 } 589 590 static int 591 panfrost_open(struct drm_device *dev, struct drm_file *file) 592 { 593 int ret; 594 struct panfrost_device *pfdev = to_panfrost_device(dev); 595 struct panfrost_file_priv *panfrost_priv; 596 597 panfrost_priv = kzalloc(sizeof(*panfrost_priv), GFP_KERNEL); 598 if (!panfrost_priv) 599 return -ENOMEM; 600 601 panfrost_priv->pfdev = pfdev; 602 file->driver_priv = panfrost_priv; 603 604 panfrost_priv->mmu = panfrost_mmu_ctx_create(pfdev); 605 if (IS_ERR(panfrost_priv->mmu)) { 606 ret = PTR_ERR(panfrost_priv->mmu); 607 goto err_free; 608 } 609 610 ret = panfrost_jm_open(file); 611 if (ret) 612 goto err_job; 613 614 return 0; 615 616 err_job: 617 panfrost_mmu_ctx_put(panfrost_priv->mmu); 618 err_free: 619 kfree(panfrost_priv); 620 return ret; 621 } 622 623 static void 624 panfrost_postclose(struct drm_device *dev, struct drm_file *file) 625 { 626 struct panfrost_file_priv *panfrost_priv = file->driver_priv; 627 628 panfrost_perfcnt_close(file); 629 panfrost_jm_close(file); 630 631 panfrost_mmu_ctx_put(panfrost_priv->mmu); 632 kfree(panfrost_priv); 633 } 634 635 static const struct drm_ioctl_desc panfrost_drm_driver_ioctls[] = { 636 #define PANFROST_IOCTL(n, func, flags) \ 637 DRM_IOCTL_DEF_DRV(PANFROST_##n, panfrost_ioctl_##func, flags) 638 639 PANFROST_IOCTL(SUBMIT, submit, DRM_RENDER_ALLOW), 640 PANFROST_IOCTL(WAIT_BO, wait_bo, DRM_RENDER_ALLOW), 641 PANFROST_IOCTL(CREATE_BO, create_bo, DRM_RENDER_ALLOW), 642 PANFROST_IOCTL(MMAP_BO, mmap_bo, DRM_RENDER_ALLOW), 643 PANFROST_IOCTL(GET_PARAM, get_param, DRM_RENDER_ALLOW), 644 PANFROST_IOCTL(GET_BO_OFFSET, get_bo_offset, DRM_RENDER_ALLOW), 645 PANFROST_IOCTL(PERFCNT_ENABLE, perfcnt_enable, DRM_RENDER_ALLOW), 646 PANFROST_IOCTL(PERFCNT_DUMP, perfcnt_dump, DRM_RENDER_ALLOW), 647 PANFROST_IOCTL(MADVISE, madvise, DRM_RENDER_ALLOW), 648 PANFROST_IOCTL(SET_LABEL_BO, set_label_bo, DRM_RENDER_ALLOW), 649 PANFROST_IOCTL(JM_CTX_CREATE, jm_ctx_create, DRM_RENDER_ALLOW), 650 PANFROST_IOCTL(JM_CTX_DESTROY, jm_ctx_destroy, DRM_RENDER_ALLOW), 651 }; 652 653 static void panfrost_gpu_show_fdinfo(struct panfrost_device *pfdev, 654 struct panfrost_file_priv *panfrost_priv, 655 struct drm_printer *p) 656 { 657 int i; 658 659 /* 660 * IMPORTANT NOTE: drm-cycles and drm-engine measurements are not 661 * accurate, as they only provide a rough estimation of the number of 662 * GPU cycles and CPU time spent in a given context. This is due to two 663 * different factors: 664 * - Firstly, we must consider the time the CPU and then the kernel 665 * takes to process the GPU interrupt, which means additional time and 666 * GPU cycles will be added in excess to the real figure. 667 * - Secondly, the pipelining done by the Job Manager (2 job slots per 668 * engine) implies there is no way to know exactly how much time each 669 * job spent on the GPU. 670 */ 671 672 for (i = 0; i < NUM_JOB_SLOTS - 1; i++) { 673 if (pfdev->profile_mode) { 674 drm_printf(p, "drm-engine-%s:\t%llu ns\n", 675 panfrost_engine_names[i], 676 panfrost_priv->engine_usage.elapsed_ns[i]); 677 drm_printf(p, "drm-cycles-%s:\t%llu\n", 678 panfrost_engine_names[i], 679 panfrost_priv->engine_usage.cycles[i]); 680 } 681 drm_printf(p, "drm-maxfreq-%s:\t%lu Hz\n", 682 panfrost_engine_names[i], pfdev->pfdevfreq.fast_rate); 683 drm_printf(p, "drm-curfreq-%s:\t%lu Hz\n", 684 panfrost_engine_names[i], pfdev->pfdevfreq.current_frequency); 685 } 686 } 687 688 static void panfrost_show_fdinfo(struct drm_printer *p, struct drm_file *file) 689 { 690 struct panfrost_device *pfdev = to_panfrost_device(file->minor->dev); 691 692 panfrost_gpu_show_fdinfo(pfdev, file->driver_priv, p); 693 694 drm_show_memory_stats(p, file); 695 } 696 697 static const struct file_operations panfrost_drm_driver_fops = { 698 .owner = THIS_MODULE, 699 DRM_GEM_FOPS, 700 .show_fdinfo = drm_show_fdinfo, 701 }; 702 703 #ifdef CONFIG_DEBUG_FS 704 static int panthor_gems_show(struct seq_file *m, void *data) 705 { 706 struct drm_info_node *node = m->private; 707 struct panfrost_device *pfdev = to_panfrost_device(node->minor->dev); 708 709 panfrost_gem_debugfs_print_bos(pfdev, m); 710 711 return 0; 712 } 713 714 static void show_panfrost_jm_ctx(struct panfrost_jm_ctx *jm_ctx, u32 handle, 715 struct seq_file *m) 716 { 717 struct drm_device *ddev = ((struct drm_info_node *)m->private)->minor->dev; 718 const char *prio = "UNKNOWN"; 719 720 static const char * const prios[] = { 721 [DRM_SCHED_PRIORITY_HIGH] = "HIGH", 722 [DRM_SCHED_PRIORITY_NORMAL] = "NORMAL", 723 [DRM_SCHED_PRIORITY_LOW] = "LOW", 724 }; 725 726 if (jm_ctx->slot_entity[0].priority != 727 jm_ctx->slot_entity[1].priority) 728 drm_warn(ddev, "Slot priorities should be the same in a single context"); 729 730 if (jm_ctx->slot_entity[0].priority < ARRAY_SIZE(prios)) 731 prio = prios[jm_ctx->slot_entity[0].priority]; 732 733 seq_printf(m, " JM context %u: priority %s\n", handle, prio); 734 } 735 736 static int show_file_jm_ctxs(struct panfrost_file_priv *pfile, 737 struct seq_file *m) 738 { 739 struct panfrost_jm_ctx *jm_ctx; 740 unsigned long i; 741 742 xa_lock(&pfile->jm_ctxs); 743 xa_for_each(&pfile->jm_ctxs, i, jm_ctx) { 744 jm_ctx = panfrost_jm_ctx_get(jm_ctx); 745 xa_unlock(&pfile->jm_ctxs); 746 show_panfrost_jm_ctx(jm_ctx, i, m); 747 panfrost_jm_ctx_put(jm_ctx); 748 xa_lock(&pfile->jm_ctxs); 749 } 750 xa_unlock(&pfile->jm_ctxs); 751 752 return 0; 753 } 754 755 static struct drm_info_list panthor_debugfs_list[] = { 756 {"gems", 757 panthor_gems_show, 0, NULL}, 758 }; 759 760 static int panthor_gems_debugfs_init(struct drm_minor *minor) 761 { 762 drm_debugfs_create_files(panthor_debugfs_list, 763 ARRAY_SIZE(panthor_debugfs_list), 764 minor->debugfs_root, minor); 765 766 return 0; 767 } 768 769 static int show_each_file(struct seq_file *m, void *arg) 770 { 771 struct drm_info_node *node = (struct drm_info_node *)m->private; 772 struct drm_device *ddev = node->minor->dev; 773 int (*show)(struct panfrost_file_priv *, struct seq_file *) = 774 node->info_ent->data; 775 struct drm_file *file; 776 int ret; 777 778 ret = mutex_lock_interruptible(&ddev->filelist_mutex); 779 if (ret) 780 return ret; 781 782 list_for_each_entry(file, &ddev->filelist, lhead) { 783 struct task_struct *task; 784 struct panfrost_file_priv *pfile = file->driver_priv; 785 struct pid *pid; 786 787 /* 788 * Although we have a valid reference on file->pid, that does 789 * not guarantee that the task_struct who called get_pid() is 790 * still alive (e.g. get_pid(current) => fork() => exit()). 791 * Therefore, we need to protect this ->comm access using RCU. 792 */ 793 rcu_read_lock(); 794 pid = rcu_dereference(file->pid); 795 task = pid_task(pid, PIDTYPE_TGID); 796 seq_printf(m, "client_id %8llu pid %8d command %s:\n", 797 file->client_id, pid_nr(pid), 798 task ? task->comm : "<unknown>"); 799 rcu_read_unlock(); 800 801 ret = show(pfile, m); 802 if (ret < 0) 803 break; 804 805 seq_puts(m, "\n"); 806 } 807 808 mutex_unlock(&ddev->filelist_mutex); 809 return ret; 810 } 811 812 static struct drm_info_list panfrost_sched_debugfs_list[] = { 813 { "sched_ctxs", show_each_file, 0, show_file_jm_ctxs }, 814 }; 815 816 static void panfrost_sched_debugfs_init(struct drm_minor *minor) 817 { 818 drm_debugfs_create_files(panfrost_sched_debugfs_list, 819 ARRAY_SIZE(panfrost_sched_debugfs_list), 820 minor->debugfs_root, minor); 821 } 822 823 static void panfrost_debugfs_init(struct drm_minor *minor) 824 { 825 panthor_gems_debugfs_init(minor); 826 panfrost_sched_debugfs_init(minor); 827 } 828 #endif 829 830 /* 831 * Panfrost driver version: 832 * - 1.0 - initial interface 833 * - 1.1 - adds HEAP and NOEXEC flags for CREATE_BO 834 * - 1.2 - adds AFBC_FEATURES query 835 * - 1.3 - adds JD_REQ_CYCLE_COUNT job requirement for SUBMIT 836 * - adds SYSTEM_TIMESTAMP and SYSTEM_TIMESTAMP_FREQUENCY queries 837 * - 1.4 - adds SET_LABEL_BO 838 * - 1.5 - adds JM_CTX_{CREATE,DESTROY} ioctls and extend SUBMIT to allow 839 * context creation with configurable priorities/affinity 840 */ 841 static const struct drm_driver panfrost_drm_driver = { 842 .driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ, 843 .open = panfrost_open, 844 .postclose = panfrost_postclose, 845 .show_fdinfo = panfrost_show_fdinfo, 846 .ioctls = panfrost_drm_driver_ioctls, 847 .num_ioctls = ARRAY_SIZE(panfrost_drm_driver_ioctls), 848 .fops = &panfrost_drm_driver_fops, 849 .name = "panfrost", 850 .desc = "panfrost DRM", 851 .major = 1, 852 .minor = 5, 853 854 .gem_create_object = panfrost_gem_create_object, 855 .gem_prime_import_sg_table = panfrost_gem_prime_import_sg_table, 856 #ifdef CONFIG_DEBUG_FS 857 .debugfs_init = panfrost_debugfs_init, 858 #endif 859 }; 860 861 static int panfrost_probe(struct platform_device *pdev) 862 { 863 struct panfrost_device *pfdev; 864 int err; 865 866 pfdev = devm_drm_dev_alloc(&pdev->dev, &panfrost_drm_driver, 867 struct panfrost_device, base); 868 if (IS_ERR(pfdev)) 869 return PTR_ERR(pfdev); 870 871 platform_set_drvdata(pdev, pfdev); 872 873 pfdev->comp = of_device_get_match_data(&pdev->dev); 874 if (!pfdev->comp) 875 return -ENODEV; 876 877 pfdev->coherent = device_get_dma_attr(&pdev->dev) == DEV_DMA_COHERENT; 878 879 mutex_init(&pfdev->shrinker_lock); 880 INIT_LIST_HEAD(&pfdev->shrinker_list); 881 882 err = panfrost_device_init(pfdev); 883 if (err) { 884 if (err != -EPROBE_DEFER) 885 dev_err(&pdev->dev, "Fatal error during GPU init\n"); 886 goto err_out0; 887 } 888 889 pm_runtime_set_active(pfdev->base.dev); 890 pm_runtime_mark_last_busy(pfdev->base.dev); 891 pm_runtime_enable(pfdev->base.dev); 892 pm_runtime_set_autosuspend_delay(pfdev->base.dev, 50); /* ~3 frames */ 893 pm_runtime_use_autosuspend(pfdev->base.dev); 894 895 /* 896 * Register the DRM device with the core and the connectors with 897 * sysfs 898 */ 899 err = drm_dev_register(&pfdev->base, 0); 900 if (err < 0) 901 goto err_out1; 902 903 err = panfrost_gem_shrinker_init(&pfdev->base); 904 if (err) 905 goto err_out2; 906 907 return 0; 908 909 err_out2: 910 drm_dev_unregister(&pfdev->base); 911 err_out1: 912 pm_runtime_disable(pfdev->base.dev); 913 panfrost_device_fini(pfdev); 914 pm_runtime_set_suspended(pfdev->base.dev); 915 err_out0: 916 return err; 917 } 918 919 static void panfrost_remove(struct platform_device *pdev) 920 { 921 struct panfrost_device *pfdev = platform_get_drvdata(pdev); 922 923 drm_dev_unregister(&pfdev->base); 924 panfrost_gem_shrinker_cleanup(&pfdev->base); 925 926 pm_runtime_get_sync(pfdev->base.dev); 927 pm_runtime_disable(pfdev->base.dev); 928 panfrost_device_fini(pfdev); 929 pm_runtime_set_suspended(pfdev->base.dev); 930 } 931 932 static ssize_t profiling_show(struct device *dev, 933 struct device_attribute *attr, char *buf) 934 { 935 struct panfrost_device *pfdev = dev_get_drvdata(dev); 936 937 return sysfs_emit(buf, "%d\n", pfdev->profile_mode); 938 } 939 940 static ssize_t profiling_store(struct device *dev, 941 struct device_attribute *attr, 942 const char *buf, size_t len) 943 { 944 struct panfrost_device *pfdev = dev_get_drvdata(dev); 945 bool value; 946 int err; 947 948 err = kstrtobool(buf, &value); 949 if (err) 950 return err; 951 952 pfdev->profile_mode = value; 953 954 return len; 955 } 956 957 static DEVICE_ATTR_RW(profiling); 958 959 static struct attribute *panfrost_attrs[] = { 960 &dev_attr_profiling.attr, 961 NULL, 962 }; 963 964 ATTRIBUTE_GROUPS(panfrost); 965 966 /* 967 * The OPP core wants the supply names to be NULL terminated, but we need the 968 * correct num_supplies value for regulator core. Hence, we NULL terminate here 969 * and then initialize num_supplies with ARRAY_SIZE - 1. 970 */ 971 static const char * const default_supplies[] = { "mali", NULL }; 972 static const struct panfrost_compatible default_data = { 973 .num_supplies = ARRAY_SIZE(default_supplies) - 1, 974 .supply_names = default_supplies, 975 .num_pm_domains = 1, /* optional */ 976 .pm_domain_names = NULL, 977 }; 978 979 static const struct panfrost_compatible allwinner_h616_data = { 980 .num_supplies = ARRAY_SIZE(default_supplies) - 1, 981 .supply_names = default_supplies, 982 .num_pm_domains = 1, 983 .pm_features = BIT(GPU_PM_RT), 984 }; 985 986 static const struct panfrost_compatible amlogic_data = { 987 .num_supplies = ARRAY_SIZE(default_supplies) - 1, 988 .supply_names = default_supplies, 989 .vendor_quirk = panfrost_gpu_amlogic_quirk, 990 }; 991 992 static const char * const mediatek_pm_domains[] = { "core0", "core1", "core2", 993 "core3", "core4" }; 994 /* 995 * The old data with two power supplies for MT8183 is here only to 996 * keep retro-compatibility with older devicetrees, as DVFS will 997 * not work with this one. 998 * 999 * On new devicetrees please use the _b variant with a single and 1000 * coupled regulators instead. 1001 */ 1002 static const char * const legacy_supplies[] = { "mali", "sram", NULL }; 1003 static const struct panfrost_compatible mediatek_mt8183_data = { 1004 .num_supplies = ARRAY_SIZE(legacy_supplies) - 1, 1005 .supply_names = legacy_supplies, 1006 .num_pm_domains = 3, 1007 .pm_domain_names = mediatek_pm_domains, 1008 }; 1009 1010 static const struct panfrost_compatible mediatek_mt8183_b_data = { 1011 .num_supplies = ARRAY_SIZE(default_supplies) - 1, 1012 .supply_names = default_supplies, 1013 .num_pm_domains = 3, 1014 .pm_domain_names = mediatek_pm_domains, 1015 .pm_features = BIT(GPU_PM_CLK_DIS) | BIT(GPU_PM_VREG_OFF), 1016 }; 1017 1018 static const struct panfrost_compatible mediatek_mt8186_data = { 1019 .num_supplies = ARRAY_SIZE(default_supplies) - 1, 1020 .supply_names = default_supplies, 1021 .num_pm_domains = 2, 1022 .pm_domain_names = mediatek_pm_domains, 1023 .pm_features = BIT(GPU_PM_CLK_DIS) | BIT(GPU_PM_VREG_OFF), 1024 }; 1025 1026 static const struct panfrost_compatible mediatek_mt8188_data = { 1027 .num_supplies = ARRAY_SIZE(default_supplies) - 1, 1028 .supply_names = default_supplies, 1029 .num_pm_domains = 3, 1030 .pm_domain_names = mediatek_pm_domains, 1031 .pm_features = BIT(GPU_PM_CLK_DIS) | BIT(GPU_PM_VREG_OFF), 1032 .gpu_quirks = BIT(GPU_QUIRK_FORCE_AARCH64_PGTABLE), 1033 }; 1034 1035 static const struct panfrost_compatible mediatek_mt8192_data = { 1036 .num_supplies = ARRAY_SIZE(default_supplies) - 1, 1037 .supply_names = default_supplies, 1038 .num_pm_domains = 5, 1039 .pm_domain_names = mediatek_pm_domains, 1040 .pm_features = BIT(GPU_PM_CLK_DIS) | BIT(GPU_PM_VREG_OFF), 1041 .gpu_quirks = BIT(GPU_QUIRK_FORCE_AARCH64_PGTABLE), 1042 }; 1043 1044 static const struct panfrost_compatible mediatek_mt8370_data = { 1045 .num_supplies = ARRAY_SIZE(default_supplies) - 1, 1046 .supply_names = default_supplies, 1047 .num_pm_domains = 2, 1048 .pm_domain_names = mediatek_pm_domains, 1049 .pm_features = BIT(GPU_PM_CLK_DIS) | BIT(GPU_PM_VREG_OFF), 1050 .gpu_quirks = BIT(GPU_QUIRK_FORCE_AARCH64_PGTABLE), 1051 }; 1052 1053 static const struct of_device_id dt_match[] = { 1054 /* Set first to probe before the generic compatibles */ 1055 { .compatible = "amlogic,meson-gxm-mali", 1056 .data = &amlogic_data, }, 1057 { .compatible = "amlogic,meson-g12a-mali", 1058 .data = &amlogic_data, }, 1059 { .compatible = "arm,mali-t604", .data = &default_data, }, 1060 { .compatible = "arm,mali-t624", .data = &default_data, }, 1061 { .compatible = "arm,mali-t628", .data = &default_data, }, 1062 { .compatible = "arm,mali-t720", .data = &default_data, }, 1063 { .compatible = "arm,mali-t760", .data = &default_data, }, 1064 { .compatible = "arm,mali-t820", .data = &default_data, }, 1065 { .compatible = "arm,mali-t830", .data = &default_data, }, 1066 { .compatible = "arm,mali-t860", .data = &default_data, }, 1067 { .compatible = "arm,mali-t880", .data = &default_data, }, 1068 { .compatible = "arm,mali-bifrost", .data = &default_data, }, 1069 { .compatible = "arm,mali-valhall-jm", .data = &default_data, }, 1070 { .compatible = "mediatek,mt8183-mali", .data = &mediatek_mt8183_data }, 1071 { .compatible = "mediatek,mt8183b-mali", .data = &mediatek_mt8183_b_data }, 1072 { .compatible = "mediatek,mt8186-mali", .data = &mediatek_mt8186_data }, 1073 { .compatible = "mediatek,mt8188-mali", .data = &mediatek_mt8188_data }, 1074 { .compatible = "mediatek,mt8192-mali", .data = &mediatek_mt8192_data }, 1075 { .compatible = "mediatek,mt8370-mali", .data = &mediatek_mt8370_data }, 1076 { .compatible = "allwinner,sun50i-h616-mali", .data = &allwinner_h616_data }, 1077 {} 1078 }; 1079 MODULE_DEVICE_TABLE(of, dt_match); 1080 1081 static struct platform_driver panfrost_driver = { 1082 .probe = panfrost_probe, 1083 .remove = panfrost_remove, 1084 .driver = { 1085 .name = "panfrost", 1086 .pm = pm_ptr(&panfrost_pm_ops), 1087 .of_match_table = dt_match, 1088 .dev_groups = panfrost_groups, 1089 }, 1090 }; 1091 module_platform_driver(panfrost_driver); 1092 1093 MODULE_AUTHOR("Panfrost Project Developers"); 1094 MODULE_DESCRIPTION("Panfrost DRM Driver"); 1095 MODULE_LICENSE("GPL v2"); 1096 MODULE_SOFTDEP("pre: governor_simpleondemand"); 1097