xref: /linux/drivers/gpu/drm/panfrost/panfrost_drv.c (revision face6a3615a649456eb4549f6d474221d877d604)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
3 /* Copyright 2019 Linaro, Ltd., Rob Herring <robh@kernel.org> */
4 /* Copyright 2019 Collabora ltd. */
5 
6 #ifdef CONFIG_ARM_ARCH_TIMER
7 #include <asm/arch_timer.h>
8 #endif
9 
10 #include <linux/module.h>
11 #include <linux/of.h>
12 #include <linux/pagemap.h>
13 #include <linux/platform_device.h>
14 #include <linux/pm_runtime.h>
15 #include <drm/panfrost_drm.h>
16 #include <drm/drm_debugfs.h>
17 #include <drm/drm_drv.h>
18 #include <drm/drm_ioctl.h>
19 #include <drm/drm_syncobj.h>
20 #include <drm/drm_utils.h>
21 
22 #include "panfrost_device.h"
23 #include "panfrost_gem.h"
24 #include "panfrost_mmu.h"
25 #include "panfrost_job.h"
26 #include "panfrost_gpu.h"
27 #include "panfrost_perfcnt.h"
28 
29 #define JOB_REQUIREMENTS (PANFROST_JD_REQ_FS | PANFROST_JD_REQ_CYCLE_COUNT)
30 
31 static bool unstable_ioctls;
32 module_param_unsafe(unstable_ioctls, bool, 0600);
33 
34 static int panfrost_ioctl_query_timestamp(struct panfrost_device *pfdev,
35 					  u64 *arg)
36 {
37 	int ret;
38 
39 	ret = pm_runtime_resume_and_get(pfdev->base.dev);
40 	if (ret)
41 		return ret;
42 
43 	panfrost_cycle_counter_get(pfdev);
44 	*arg = panfrost_timestamp_read(pfdev);
45 	panfrost_cycle_counter_put(pfdev);
46 
47 	pm_runtime_put(pfdev->base.dev);
48 	return 0;
49 }
50 
51 static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct drm_file *file)
52 {
53 	struct drm_panfrost_get_param *param = data;
54 	struct panfrost_device *pfdev = to_panfrost_device(ddev);
55 	int ret;
56 
57 	if (param->pad != 0)
58 		return -EINVAL;
59 
60 #define PANFROST_FEATURE(name, member)			\
61 	case DRM_PANFROST_PARAM_ ## name:		\
62 		param->value = pfdev->features.member;	\
63 		break
64 #define PANFROST_FEATURE_ARRAY(name, member, max)			\
65 	case DRM_PANFROST_PARAM_ ## name ## 0 ...			\
66 		DRM_PANFROST_PARAM_ ## name ## max:			\
67 		param->value = pfdev->features.member[param->param -	\
68 			DRM_PANFROST_PARAM_ ## name ## 0];		\
69 		break
70 
71 	switch (param->param) {
72 		PANFROST_FEATURE(GPU_PROD_ID, id);
73 		PANFROST_FEATURE(GPU_REVISION, revision);
74 		PANFROST_FEATURE(SHADER_PRESENT, shader_present);
75 		PANFROST_FEATURE(TILER_PRESENT, tiler_present);
76 		PANFROST_FEATURE(L2_PRESENT, l2_present);
77 		PANFROST_FEATURE(STACK_PRESENT, stack_present);
78 		PANFROST_FEATURE(AS_PRESENT, as_present);
79 		PANFROST_FEATURE(JS_PRESENT, js_present);
80 		PANFROST_FEATURE(L2_FEATURES, l2_features);
81 		PANFROST_FEATURE(CORE_FEATURES, core_features);
82 		PANFROST_FEATURE(TILER_FEATURES, tiler_features);
83 		PANFROST_FEATURE(MEM_FEATURES, mem_features);
84 		PANFROST_FEATURE(MMU_FEATURES, mmu_features);
85 		PANFROST_FEATURE(THREAD_FEATURES, thread_features);
86 		PANFROST_FEATURE(MAX_THREADS, max_threads);
87 		PANFROST_FEATURE(THREAD_MAX_WORKGROUP_SZ,
88 				thread_max_workgroup_sz);
89 		PANFROST_FEATURE(THREAD_MAX_BARRIER_SZ,
90 				thread_max_barrier_sz);
91 		PANFROST_FEATURE(COHERENCY_FEATURES, coherency_features);
92 		PANFROST_FEATURE(AFBC_FEATURES, afbc_features);
93 		PANFROST_FEATURE_ARRAY(TEXTURE_FEATURES, texture_features, 3);
94 		PANFROST_FEATURE_ARRAY(JS_FEATURES, js_features, 15);
95 		PANFROST_FEATURE(NR_CORE_GROUPS, nr_core_groups);
96 		PANFROST_FEATURE(THREAD_TLS_ALLOC, thread_tls_alloc);
97 
98 	case DRM_PANFROST_PARAM_SYSTEM_TIMESTAMP:
99 		ret = panfrost_ioctl_query_timestamp(pfdev, &param->value);
100 		if (ret)
101 			return ret;
102 		break;
103 
104 	case DRM_PANFROST_PARAM_SYSTEM_TIMESTAMP_FREQUENCY:
105 #ifdef CONFIG_ARM_ARCH_TIMER
106 		param->value = arch_timer_get_cntfrq();
107 #else
108 		param->value = 0;
109 #endif
110 		break;
111 
112 	case DRM_PANFROST_PARAM_ALLOWED_JM_CTX_PRIORITIES:
113 		param->value = BIT(PANFROST_JM_CTX_PRIORITY_LOW) |
114 			       BIT(PANFROST_JM_CTX_PRIORITY_MEDIUM);
115 
116 		if (panfrost_high_prio_allowed(file))
117 			param->value |= BIT(PANFROST_JM_CTX_PRIORITY_HIGH);
118 		break;
119 
120 	default:
121 		return -EINVAL;
122 	}
123 
124 	return 0;
125 }
126 
127 static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
128 		struct drm_file *file)
129 {
130 	struct panfrost_file_priv *priv = file->driver_priv;
131 	struct panfrost_gem_object *bo;
132 	struct drm_panfrost_create_bo *args = data;
133 	struct panfrost_gem_mapping *mapping;
134 	int ret;
135 
136 	if (!args->size || args->pad ||
137 	    (args->flags & ~(PANFROST_BO_NOEXEC | PANFROST_BO_HEAP)))
138 		return -EINVAL;
139 
140 	/* Heaps should never be executable */
141 	if ((args->flags & PANFROST_BO_HEAP) &&
142 	    !(args->flags & PANFROST_BO_NOEXEC))
143 		return -EINVAL;
144 
145 	bo = panfrost_gem_create(dev, args->size, args->flags);
146 	if (IS_ERR(bo))
147 		return PTR_ERR(bo);
148 
149 	ret = drm_gem_handle_create(file, &bo->base.base, &args->handle);
150 	if (ret)
151 		goto out;
152 
153 	mapping = panfrost_gem_mapping_get(bo, priv);
154 	if (mapping) {
155 		args->offset = mapping->mmnode.start << PAGE_SHIFT;
156 		panfrost_gem_mapping_put(mapping);
157 	} else {
158 		/* This can only happen if the handle from
159 		 * drm_gem_handle_create() has already been guessed and freed
160 		 * by user space
161 		 */
162 		ret = -EINVAL;
163 	}
164 
165 out:
166 	drm_gem_object_put(&bo->base.base);
167 	return ret;
168 }
169 
170 /**
171  * panfrost_lookup_bos() - Sets up job->bo[] with the GEM objects
172  * referenced by the job.
173  * @dev: DRM device
174  * @file_priv: DRM file for this fd
175  * @args: IOCTL args
176  * @job: job being set up
177  *
178  * Resolve handles from userspace to BOs and attach them to job.
179  *
180  * Note that this function doesn't need to unreference the BOs on
181  * failure, because that will happen at panfrost_job_cleanup() time.
182  */
183 static int
184 panfrost_lookup_bos(struct drm_device *dev,
185 		  struct drm_file *file_priv,
186 		  struct drm_panfrost_submit *args,
187 		  struct panfrost_job *job)
188 {
189 	struct panfrost_file_priv *priv = file_priv->driver_priv;
190 	struct panfrost_gem_object *bo;
191 	unsigned int i;
192 	int ret;
193 
194 	job->bo_count = args->bo_handle_count;
195 
196 	if (!job->bo_count)
197 		return 0;
198 
199 	ret = drm_gem_objects_lookup(file_priv,
200 				     (void __user *)(uintptr_t)args->bo_handles,
201 				     job->bo_count, &job->bos);
202 	if (ret)
203 		return ret;
204 
205 	job->mappings = kvmalloc_array(job->bo_count,
206 				       sizeof(struct panfrost_gem_mapping *),
207 				       GFP_KERNEL | __GFP_ZERO);
208 	if (!job->mappings)
209 		return -ENOMEM;
210 
211 	for (i = 0; i < job->bo_count; i++) {
212 		struct panfrost_gem_mapping *mapping;
213 
214 		bo = to_panfrost_bo(job->bos[i]);
215 		mapping = panfrost_gem_mapping_get(bo, priv);
216 		if (!mapping) {
217 			ret = -EINVAL;
218 			break;
219 		}
220 
221 		atomic_inc(&bo->gpu_usecount);
222 		job->mappings[i] = mapping;
223 	}
224 
225 	return ret;
226 }
227 
228 /**
229  * panfrost_copy_in_sync() - Sets up job->deps with the sync objects
230  * referenced by the job.
231  * @dev: DRM device
232  * @file_priv: DRM file for this fd
233  * @args: IOCTL args
234  * @job: job being set up
235  *
236  * Resolve syncobjs from userspace to fences and attach them to job.
237  *
238  * Note that this function doesn't need to unreference the fences on
239  * failure, because that will happen at panfrost_job_cleanup() time.
240  */
241 static int
242 panfrost_copy_in_sync(struct drm_device *dev,
243 		  struct drm_file *file_priv,
244 		  struct drm_panfrost_submit *args,
245 		  struct panfrost_job *job)
246 {
247 	u32 *handles;
248 	int ret = 0;
249 	int i, in_fence_count;
250 
251 	in_fence_count = args->in_sync_count;
252 
253 	if (!in_fence_count)
254 		return 0;
255 
256 	handles = kvmalloc_array(in_fence_count, sizeof(u32), GFP_KERNEL);
257 	if (!handles) {
258 		ret = -ENOMEM;
259 		DRM_DEBUG("Failed to allocate incoming syncobj handles\n");
260 		goto fail;
261 	}
262 
263 	if (copy_from_user(handles,
264 			   (void __user *)(uintptr_t)args->in_syncs,
265 			   in_fence_count * sizeof(u32))) {
266 		ret = -EFAULT;
267 		DRM_DEBUG("Failed to copy in syncobj handles\n");
268 		goto fail;
269 	}
270 
271 	for (i = 0; i < in_fence_count; i++) {
272 		ret = drm_sched_job_add_syncobj_dependency(&job->base, file_priv,
273 							   handles[i], 0);
274 		if (ret)
275 			goto fail;
276 	}
277 
278 fail:
279 	kvfree(handles);
280 	return ret;
281 }
282 
283 static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
284 		struct drm_file *file)
285 {
286 	struct panfrost_device *pfdev = to_panfrost_device(dev);
287 	struct panfrost_file_priv *file_priv = file->driver_priv;
288 	struct drm_panfrost_submit *args = data;
289 	struct drm_syncobj *sync_out = NULL;
290 	struct panfrost_jm_ctx *jm_ctx;
291 	struct panfrost_job *job;
292 	int ret = 0, slot;
293 
294 	if (args->pad)
295 		return -EINVAL;
296 
297 	if (!args->jc)
298 		return -EINVAL;
299 
300 	if (args->requirements & ~JOB_REQUIREMENTS)
301 		return -EINVAL;
302 
303 	if (args->out_sync > 0) {
304 		sync_out = drm_syncobj_find(file, args->out_sync);
305 		if (!sync_out)
306 			return -ENODEV;
307 	}
308 
309 	jm_ctx = panfrost_jm_ctx_from_handle(file, args->jm_ctx_handle);
310 	if (!jm_ctx) {
311 		ret = -EINVAL;
312 		goto out_put_syncout;
313 	}
314 
315 	job = kzalloc(sizeof(*job), GFP_KERNEL);
316 	if (!job) {
317 		ret = -ENOMEM;
318 		goto out_put_jm_ctx;
319 	}
320 
321 	kref_init(&job->refcount);
322 
323 	job->pfdev = pfdev;
324 	job->jc = args->jc;
325 	job->requirements = args->requirements;
326 	job->flush_id = panfrost_gpu_get_latest_flush_id(pfdev);
327 	job->mmu = file_priv->mmu;
328 	job->ctx = panfrost_jm_ctx_get(jm_ctx);
329 	job->engine_usage = &file_priv->engine_usage;
330 
331 	slot = panfrost_job_get_slot(job);
332 
333 	ret = drm_sched_job_init(&job->base,
334 				 &jm_ctx->slot_entity[slot],
335 				 1, NULL, file->client_id);
336 	if (ret)
337 		goto out_put_job;
338 
339 	ret = panfrost_copy_in_sync(dev, file, args, job);
340 	if (ret)
341 		goto out_cleanup_job;
342 
343 	ret = panfrost_lookup_bos(dev, file, args, job);
344 	if (ret)
345 		goto out_cleanup_job;
346 
347 	ret = panfrost_job_push(job);
348 	if (ret)
349 		goto out_cleanup_job;
350 
351 	/* Update the return sync object for the job */
352 	if (sync_out)
353 		drm_syncobj_replace_fence(sync_out, job->render_done_fence);
354 
355 out_cleanup_job:
356 	if (ret)
357 		drm_sched_job_cleanup(&job->base);
358 out_put_job:
359 	panfrost_job_put(job);
360 out_put_jm_ctx:
361 	panfrost_jm_ctx_put(jm_ctx);
362 out_put_syncout:
363 	if (sync_out)
364 		drm_syncobj_put(sync_out);
365 
366 	return ret;
367 }
368 
369 static int
370 panfrost_ioctl_wait_bo(struct drm_device *dev, void *data,
371 		       struct drm_file *file_priv)
372 {
373 	long ret;
374 	struct drm_panfrost_wait_bo *args = data;
375 	struct drm_gem_object *gem_obj;
376 	unsigned long timeout = drm_timeout_abs_to_jiffies(args->timeout_ns);
377 
378 	if (args->pad)
379 		return -EINVAL;
380 
381 	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
382 	if (!gem_obj)
383 		return -ENOENT;
384 
385 	ret = dma_resv_wait_timeout(gem_obj->resv, DMA_RESV_USAGE_READ,
386 				    true, timeout);
387 	if (!ret)
388 		ret = timeout ? -ETIMEDOUT : -EBUSY;
389 
390 	drm_gem_object_put(gem_obj);
391 
392 	return ret;
393 }
394 
395 static int panfrost_ioctl_mmap_bo(struct drm_device *dev, void *data,
396 		      struct drm_file *file_priv)
397 {
398 	struct drm_panfrost_mmap_bo *args = data;
399 	struct drm_gem_object *gem_obj;
400 	int ret;
401 
402 	if (args->flags != 0) {
403 		DRM_INFO("unknown mmap_bo flags: %d\n", args->flags);
404 		return -EINVAL;
405 	}
406 
407 	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
408 	if (!gem_obj) {
409 		DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
410 		return -ENOENT;
411 	}
412 
413 	/* Don't allow mmapping of heap objects as pages are not pinned. */
414 	if (to_panfrost_bo(gem_obj)->is_heap) {
415 		ret = -EINVAL;
416 		goto out;
417 	}
418 
419 	ret = drm_gem_create_mmap_offset(gem_obj);
420 	if (ret == 0)
421 		args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
422 
423 out:
424 	drm_gem_object_put(gem_obj);
425 	return ret;
426 }
427 
428 static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data,
429 			    struct drm_file *file_priv)
430 {
431 	struct panfrost_file_priv *priv = file_priv->driver_priv;
432 	struct drm_panfrost_get_bo_offset *args = data;
433 	struct panfrost_gem_mapping *mapping;
434 	struct drm_gem_object *gem_obj;
435 	struct panfrost_gem_object *bo;
436 
437 	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
438 	if (!gem_obj) {
439 		DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
440 		return -ENOENT;
441 	}
442 	bo = to_panfrost_bo(gem_obj);
443 
444 	mapping = panfrost_gem_mapping_get(bo, priv);
445 	drm_gem_object_put(gem_obj);
446 
447 	if (!mapping)
448 		return -EINVAL;
449 
450 	args->offset = mapping->mmnode.start << PAGE_SHIFT;
451 	panfrost_gem_mapping_put(mapping);
452 	return 0;
453 }
454 
455 static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
456 				  struct drm_file *file_priv)
457 {
458 	struct panfrost_file_priv *priv = file_priv->driver_priv;
459 	struct drm_panfrost_madvise *args = data;
460 	struct panfrost_device *pfdev = to_panfrost_device(dev);
461 	struct drm_gem_object *gem_obj;
462 	struct panfrost_gem_object *bo;
463 	int ret = 0;
464 
465 	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
466 	if (!gem_obj) {
467 		DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
468 		return -ENOENT;
469 	}
470 
471 	bo = to_panfrost_bo(gem_obj);
472 
473 	ret = dma_resv_lock_interruptible(bo->base.base.resv, NULL);
474 	if (ret)
475 		goto out_put_object;
476 
477 	mutex_lock(&pfdev->shrinker_lock);
478 	mutex_lock(&bo->mappings.lock);
479 	if (args->madv == PANFROST_MADV_DONTNEED) {
480 		struct panfrost_gem_mapping *first;
481 
482 		first = list_first_entry(&bo->mappings.list,
483 					 struct panfrost_gem_mapping,
484 					 node);
485 
486 		/*
487 		 * If we want to mark the BO purgeable, there must be only one
488 		 * user: the caller FD.
489 		 * We could do something smarter and mark the BO purgeable only
490 		 * when all its users have marked it purgeable, but globally
491 		 * visible/shared BOs are likely to never be marked purgeable
492 		 * anyway, so let's not bother.
493 		 */
494 		if (!list_is_singular(&bo->mappings.list) ||
495 		    WARN_ON_ONCE(first->mmu != priv->mmu)) {
496 			ret = -EINVAL;
497 			goto out_unlock_mappings;
498 		}
499 	}
500 
501 	args->retained = drm_gem_shmem_madvise_locked(&bo->base, args->madv);
502 
503 	if (args->retained) {
504 		if (args->madv == PANFROST_MADV_DONTNEED)
505 			list_move_tail(&bo->base.madv_list,
506 				       &pfdev->shrinker_list);
507 		else if (args->madv == PANFROST_MADV_WILLNEED)
508 			list_del_init(&bo->base.madv_list);
509 	}
510 
511 out_unlock_mappings:
512 	mutex_unlock(&bo->mappings.lock);
513 	mutex_unlock(&pfdev->shrinker_lock);
514 	dma_resv_unlock(bo->base.base.resv);
515 out_put_object:
516 	drm_gem_object_put(gem_obj);
517 	return ret;
518 }
519 
520 static int panfrost_ioctl_set_label_bo(struct drm_device *ddev, void *data,
521 				       struct drm_file *file)
522 {
523 	struct drm_panfrost_set_label_bo *args = data;
524 	struct drm_gem_object *obj;
525 	const char *label = NULL;
526 	int ret = 0;
527 
528 	if (args->pad)
529 		return -EINVAL;
530 
531 	obj = drm_gem_object_lookup(file, args->handle);
532 	if (!obj)
533 		return -ENOENT;
534 
535 	if (args->label) {
536 		label = strndup_user(u64_to_user_ptr(args->label),
537 				     PANFROST_BO_LABEL_MAXLEN);
538 		if (IS_ERR(label)) {
539 			ret = PTR_ERR(label);
540 			if (ret == -EINVAL)
541 				ret = -E2BIG;
542 			goto err_put_obj;
543 		}
544 	}
545 
546 	/*
547 	 * We treat passing a label of length 0 and passing a NULL label
548 	 * differently, because even though they might seem conceptually
549 	 * similar, future uses of the BO label might expect a different
550 	 * behaviour in each case.
551 	 */
552 	panfrost_gem_set_label(obj, label);
553 
554 err_put_obj:
555 	drm_gem_object_put(obj);
556 
557 	return ret;
558 }
559 
560 static int panfrost_ioctl_jm_ctx_create(struct drm_device *dev, void *data,
561 					struct drm_file *file)
562 {
563 	return panfrost_jm_ctx_create(file, data);
564 }
565 
566 static int panfrost_ioctl_jm_ctx_destroy(struct drm_device *dev, void *data,
567 					 struct drm_file *file)
568 {
569 	const struct drm_panfrost_jm_ctx_destroy *args = data;
570 
571 	if (args->pad)
572 		return -EINVAL;
573 
574 	/* We can't destroy the default context created when the file is opened. */
575 	if (!args->handle)
576 		return -EINVAL;
577 
578 	return panfrost_jm_ctx_destroy(file, args->handle);
579 }
580 
581 int panfrost_unstable_ioctl_check(void)
582 {
583 	if (!unstable_ioctls)
584 		return -ENOSYS;
585 
586 	return 0;
587 }
588 
589 static int
590 panfrost_open(struct drm_device *dev, struct drm_file *file)
591 {
592 	int ret;
593 	struct panfrost_device *pfdev = to_panfrost_device(dev);
594 	struct panfrost_file_priv *panfrost_priv;
595 
596 	panfrost_priv = kzalloc(sizeof(*panfrost_priv), GFP_KERNEL);
597 	if (!panfrost_priv)
598 		return -ENOMEM;
599 
600 	panfrost_priv->pfdev = pfdev;
601 	file->driver_priv = panfrost_priv;
602 
603 	panfrost_priv->mmu = panfrost_mmu_ctx_create(pfdev);
604 	if (IS_ERR(panfrost_priv->mmu)) {
605 		ret = PTR_ERR(panfrost_priv->mmu);
606 		goto err_free;
607 	}
608 
609 	ret = panfrost_jm_open(file);
610 	if (ret)
611 		goto err_job;
612 
613 	return 0;
614 
615 err_job:
616 	panfrost_mmu_ctx_put(panfrost_priv->mmu);
617 err_free:
618 	kfree(panfrost_priv);
619 	return ret;
620 }
621 
622 static void
623 panfrost_postclose(struct drm_device *dev, struct drm_file *file)
624 {
625 	struct panfrost_file_priv *panfrost_priv = file->driver_priv;
626 
627 	panfrost_perfcnt_close(file);
628 	panfrost_jm_close(file);
629 
630 	panfrost_mmu_ctx_put(panfrost_priv->mmu);
631 	kfree(panfrost_priv);
632 }
633 
634 static const struct drm_ioctl_desc panfrost_drm_driver_ioctls[] = {
635 #define PANFROST_IOCTL(n, func, flags) \
636 	DRM_IOCTL_DEF_DRV(PANFROST_##n, panfrost_ioctl_##func, flags)
637 
638 	PANFROST_IOCTL(SUBMIT,		submit,		DRM_RENDER_ALLOW),
639 	PANFROST_IOCTL(WAIT_BO,		wait_bo,	DRM_RENDER_ALLOW),
640 	PANFROST_IOCTL(CREATE_BO,	create_bo,	DRM_RENDER_ALLOW),
641 	PANFROST_IOCTL(MMAP_BO,		mmap_bo,	DRM_RENDER_ALLOW),
642 	PANFROST_IOCTL(GET_PARAM,	get_param,	DRM_RENDER_ALLOW),
643 	PANFROST_IOCTL(GET_BO_OFFSET,	get_bo_offset,	DRM_RENDER_ALLOW),
644 	PANFROST_IOCTL(PERFCNT_ENABLE,	perfcnt_enable,	DRM_RENDER_ALLOW),
645 	PANFROST_IOCTL(PERFCNT_DUMP,	perfcnt_dump,	DRM_RENDER_ALLOW),
646 	PANFROST_IOCTL(MADVISE,		madvise,	DRM_RENDER_ALLOW),
647 	PANFROST_IOCTL(SET_LABEL_BO,	set_label_bo,	DRM_RENDER_ALLOW),
648 	PANFROST_IOCTL(JM_CTX_CREATE,	jm_ctx_create,	DRM_RENDER_ALLOW),
649 	PANFROST_IOCTL(JM_CTX_DESTROY,	jm_ctx_destroy,	DRM_RENDER_ALLOW),
650 };
651 
652 static void panfrost_gpu_show_fdinfo(struct panfrost_device *pfdev,
653 				     struct panfrost_file_priv *panfrost_priv,
654 				     struct drm_printer *p)
655 {
656 	int i;
657 
658 	/*
659 	 * IMPORTANT NOTE: drm-cycles and drm-engine measurements are not
660 	 * accurate, as they only provide a rough estimation of the number of
661 	 * GPU cycles and CPU time spent in a given context. This is due to two
662 	 * different factors:
663 	 * - Firstly, we must consider the time the CPU and then the kernel
664 	 *   takes to process the GPU interrupt, which means additional time and
665 	 *   GPU cycles will be added in excess to the real figure.
666 	 * - Secondly, the pipelining done by the Job Manager (2 job slots per
667 	 *   engine) implies there is no way to know exactly how much time each
668 	 *   job spent on the GPU.
669 	 */
670 
671 	for (i = 0; i < NUM_JOB_SLOTS - 1; i++) {
672 		if (pfdev->profile_mode) {
673 			drm_printf(p, "drm-engine-%s:\t%llu ns\n",
674 				   panfrost_engine_names[i],
675 				   panfrost_priv->engine_usage.elapsed_ns[i]);
676 			drm_printf(p, "drm-cycles-%s:\t%llu\n",
677 				   panfrost_engine_names[i],
678 				   panfrost_priv->engine_usage.cycles[i]);
679 		}
680 		drm_printf(p, "drm-maxfreq-%s:\t%lu Hz\n",
681 			   panfrost_engine_names[i], pfdev->pfdevfreq.fast_rate);
682 		drm_printf(p, "drm-curfreq-%s:\t%lu Hz\n",
683 			   panfrost_engine_names[i], pfdev->pfdevfreq.current_frequency);
684 	}
685 }
686 
687 static void panfrost_show_fdinfo(struct drm_printer *p, struct drm_file *file)
688 {
689 	struct panfrost_device *pfdev = to_panfrost_device(file->minor->dev);
690 
691 	panfrost_gpu_show_fdinfo(pfdev, file->driver_priv, p);
692 
693 	drm_show_memory_stats(p, file);
694 }
695 
696 static const struct file_operations panfrost_drm_driver_fops = {
697 	.owner = THIS_MODULE,
698 	DRM_GEM_FOPS,
699 	.show_fdinfo = drm_show_fdinfo,
700 };
701 
702 #ifdef CONFIG_DEBUG_FS
703 static int panthor_gems_show(struct seq_file *m, void *data)
704 {
705 	struct drm_info_node *node = m->private;
706 	struct panfrost_device *pfdev = to_panfrost_device(node->minor->dev);
707 
708 	panfrost_gem_debugfs_print_bos(pfdev, m);
709 
710 	return 0;
711 }
712 
713 static void show_panfrost_jm_ctx(struct panfrost_jm_ctx *jm_ctx, u32 handle,
714 				 struct seq_file *m)
715 {
716 	struct drm_device *ddev = ((struct drm_info_node *)m->private)->minor->dev;
717 	const char *prio = "UNKNOWN";
718 
719 	static const char * const prios[] = {
720 		[DRM_SCHED_PRIORITY_HIGH] = "HIGH",
721 		[DRM_SCHED_PRIORITY_NORMAL] = "NORMAL",
722 		[DRM_SCHED_PRIORITY_LOW] = "LOW",
723 	};
724 
725 	if (jm_ctx->slot_entity[0].priority !=
726 	    jm_ctx->slot_entity[1].priority)
727 		drm_warn(ddev, "Slot priorities should be the same in a single context");
728 
729 	if (jm_ctx->slot_entity[0].priority < ARRAY_SIZE(prios))
730 		prio = prios[jm_ctx->slot_entity[0].priority];
731 
732 	seq_printf(m, " JM context %u: priority %s\n", handle, prio);
733 }
734 
735 static int show_file_jm_ctxs(struct panfrost_file_priv *pfile,
736 			     struct seq_file *m)
737 {
738 	struct panfrost_jm_ctx *jm_ctx;
739 	unsigned long i;
740 
741 	xa_lock(&pfile->jm_ctxs);
742 	xa_for_each(&pfile->jm_ctxs, i, jm_ctx) {
743 		jm_ctx = panfrost_jm_ctx_get(jm_ctx);
744 		xa_unlock(&pfile->jm_ctxs);
745 		show_panfrost_jm_ctx(jm_ctx, i, m);
746 		panfrost_jm_ctx_put(jm_ctx);
747 		xa_lock(&pfile->jm_ctxs);
748 	}
749 	xa_unlock(&pfile->jm_ctxs);
750 
751 	return 0;
752 }
753 
754 static struct drm_info_list panthor_debugfs_list[] = {
755 	{"gems",
756 	 panthor_gems_show, 0, NULL},
757 };
758 
759 static int panthor_gems_debugfs_init(struct drm_minor *minor)
760 {
761 	drm_debugfs_create_files(panthor_debugfs_list,
762 				 ARRAY_SIZE(panthor_debugfs_list),
763 				 minor->debugfs_root, minor);
764 
765 	return 0;
766 }
767 
768 static int show_each_file(struct seq_file *m, void *arg)
769 {
770 	struct drm_info_node *node = (struct drm_info_node *)m->private;
771 	struct drm_device *ddev = node->minor->dev;
772 	int (*show)(struct panfrost_file_priv *, struct seq_file *) =
773 		node->info_ent->data;
774 	struct drm_file *file;
775 	int ret;
776 
777 	ret = mutex_lock_interruptible(&ddev->filelist_mutex);
778 	if (ret)
779 		return ret;
780 
781 	list_for_each_entry(file, &ddev->filelist, lhead) {
782 		struct task_struct *task;
783 		struct panfrost_file_priv *pfile = file->driver_priv;
784 		struct pid *pid;
785 
786 		/*
787 		 * Although we have a valid reference on file->pid, that does
788 		 * not guarantee that the task_struct who called get_pid() is
789 		 * still alive (e.g. get_pid(current) => fork() => exit()).
790 		 * Therefore, we need to protect this ->comm access using RCU.
791 		 */
792 		rcu_read_lock();
793 		pid = rcu_dereference(file->pid);
794 		task = pid_task(pid, PIDTYPE_TGID);
795 		seq_printf(m, "client_id %8llu pid %8d command %s:\n",
796 			   file->client_id, pid_nr(pid),
797 			   task ? task->comm : "<unknown>");
798 		rcu_read_unlock();
799 
800 		ret = show(pfile, m);
801 		if (ret < 0)
802 			break;
803 
804 		seq_puts(m, "\n");
805 	}
806 
807 	mutex_unlock(&ddev->filelist_mutex);
808 	return ret;
809 }
810 
811 static struct drm_info_list panfrost_sched_debugfs_list[] = {
812 	{ "sched_ctxs", show_each_file, 0, show_file_jm_ctxs },
813 };
814 
815 static void panfrost_sched_debugfs_init(struct drm_minor *minor)
816 {
817 	drm_debugfs_create_files(panfrost_sched_debugfs_list,
818 				 ARRAY_SIZE(panfrost_sched_debugfs_list),
819 				 minor->debugfs_root, minor);
820 }
821 
822 static void panfrost_debugfs_init(struct drm_minor *minor)
823 {
824 	panthor_gems_debugfs_init(minor);
825 	panfrost_sched_debugfs_init(minor);
826 }
827 #endif
828 
829 /*
830  * Panfrost driver version:
831  * - 1.0 - initial interface
832  * - 1.1 - adds HEAP and NOEXEC flags for CREATE_BO
833  * - 1.2 - adds AFBC_FEATURES query
834  * - 1.3 - adds JD_REQ_CYCLE_COUNT job requirement for SUBMIT
835  *       - adds SYSTEM_TIMESTAMP and SYSTEM_TIMESTAMP_FREQUENCY queries
836  * - 1.4 - adds SET_LABEL_BO
837  * - 1.5 - adds JM_CTX_{CREATE,DESTROY} ioctls and extend SUBMIT to allow
838  *	   context creation with configurable priorities/affinity
839  */
840 static const struct drm_driver panfrost_drm_driver = {
841 	.driver_features	= DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ,
842 	.open			= panfrost_open,
843 	.postclose		= panfrost_postclose,
844 	.show_fdinfo		= panfrost_show_fdinfo,
845 	.ioctls			= panfrost_drm_driver_ioctls,
846 	.num_ioctls		= ARRAY_SIZE(panfrost_drm_driver_ioctls),
847 	.fops			= &panfrost_drm_driver_fops,
848 	.name			= "panfrost",
849 	.desc			= "panfrost DRM",
850 	.major			= 1,
851 	.minor			= 5,
852 
853 	.gem_create_object	= panfrost_gem_create_object,
854 	.gem_prime_import_sg_table = panfrost_gem_prime_import_sg_table,
855 #ifdef CONFIG_DEBUG_FS
856 	.debugfs_init = panfrost_debugfs_init,
857 #endif
858 };
859 
860 static int panfrost_probe(struct platform_device *pdev)
861 {
862 	struct panfrost_device *pfdev;
863 	int err;
864 
865 	pfdev = devm_drm_dev_alloc(&pdev->dev, &panfrost_drm_driver,
866 				   struct panfrost_device, base);
867 	if (IS_ERR(pfdev))
868 		return PTR_ERR(pfdev);
869 
870 	platform_set_drvdata(pdev, pfdev);
871 
872 	pfdev->comp = of_device_get_match_data(&pdev->dev);
873 	if (!pfdev->comp)
874 		return -ENODEV;
875 
876 	pfdev->coherent = device_get_dma_attr(&pdev->dev) == DEV_DMA_COHERENT;
877 
878 	mutex_init(&pfdev->shrinker_lock);
879 	INIT_LIST_HEAD(&pfdev->shrinker_list);
880 
881 	err = panfrost_device_init(pfdev);
882 	if (err) {
883 		if (err != -EPROBE_DEFER)
884 			dev_err(&pdev->dev, "Fatal error during GPU init\n");
885 		goto err_out0;
886 	}
887 
888 	pm_runtime_set_active(pfdev->base.dev);
889 	pm_runtime_mark_last_busy(pfdev->base.dev);
890 	pm_runtime_enable(pfdev->base.dev);
891 	pm_runtime_set_autosuspend_delay(pfdev->base.dev, 50); /* ~3 frames */
892 	pm_runtime_use_autosuspend(pfdev->base.dev);
893 
894 	/*
895 	 * Register the DRM device with the core and the connectors with
896 	 * sysfs
897 	 */
898 	err = drm_dev_register(&pfdev->base, 0);
899 	if (err < 0)
900 		goto err_out1;
901 
902 	err = panfrost_gem_shrinker_init(&pfdev->base);
903 	if (err)
904 		goto err_out2;
905 
906 	return 0;
907 
908 err_out2:
909 	drm_dev_unregister(&pfdev->base);
910 err_out1:
911 	pm_runtime_disable(pfdev->base.dev);
912 	panfrost_device_fini(pfdev);
913 	pm_runtime_set_suspended(pfdev->base.dev);
914 err_out0:
915 	return err;
916 }
917 
918 static void panfrost_remove(struct platform_device *pdev)
919 {
920 	struct panfrost_device *pfdev = platform_get_drvdata(pdev);
921 
922 	drm_dev_unregister(&pfdev->base);
923 	panfrost_gem_shrinker_cleanup(&pfdev->base);
924 
925 	pm_runtime_get_sync(pfdev->base.dev);
926 	pm_runtime_disable(pfdev->base.dev);
927 	panfrost_device_fini(pfdev);
928 	pm_runtime_set_suspended(pfdev->base.dev);
929 }
930 
931 static ssize_t profiling_show(struct device *dev,
932 			      struct device_attribute *attr, char *buf)
933 {
934 	struct panfrost_device *pfdev = dev_get_drvdata(dev);
935 
936 	return sysfs_emit(buf, "%d\n", pfdev->profile_mode);
937 }
938 
939 static ssize_t profiling_store(struct device *dev,
940 			       struct device_attribute *attr,
941 			       const char *buf, size_t len)
942 {
943 	struct panfrost_device *pfdev = dev_get_drvdata(dev);
944 	bool value;
945 	int err;
946 
947 	err = kstrtobool(buf, &value);
948 	if (err)
949 		return err;
950 
951 	pfdev->profile_mode = value;
952 
953 	return len;
954 }
955 
956 static DEVICE_ATTR_RW(profiling);
957 
958 static struct attribute *panfrost_attrs[] = {
959 	&dev_attr_profiling.attr,
960 	NULL,
961 };
962 
963 ATTRIBUTE_GROUPS(panfrost);
964 
965 /*
966  * The OPP core wants the supply names to be NULL terminated, but we need the
967  * correct num_supplies value for regulator core. Hence, we NULL terminate here
968  * and then initialize num_supplies with ARRAY_SIZE - 1.
969  */
970 static const char * const default_supplies[] = { "mali", NULL };
971 static const struct panfrost_compatible default_data = {
972 	.num_supplies = ARRAY_SIZE(default_supplies) - 1,
973 	.supply_names = default_supplies,
974 	.num_pm_domains = 1, /* optional */
975 	.pm_domain_names = NULL,
976 };
977 
978 static const struct panfrost_compatible allwinner_h616_data = {
979 	.num_supplies = ARRAY_SIZE(default_supplies) - 1,
980 	.supply_names = default_supplies,
981 	.num_pm_domains = 1,
982 	.pm_features = BIT(GPU_PM_RT),
983 };
984 
985 static const struct panfrost_compatible amlogic_data = {
986 	.num_supplies = ARRAY_SIZE(default_supplies) - 1,
987 	.supply_names = default_supplies,
988 	.vendor_quirk = panfrost_gpu_amlogic_quirk,
989 };
990 
991 static const char * const mediatek_pm_domains[] = { "core0", "core1", "core2",
992 						    "core3", "core4" };
993 /*
994  * The old data with two power supplies for MT8183 is here only to
995  * keep retro-compatibility with older devicetrees, as DVFS will
996  * not work with this one.
997  *
998  * On new devicetrees please use the _b variant with a single and
999  * coupled regulators instead.
1000  */
1001 static const char * const legacy_supplies[] = { "mali", "sram", NULL };
1002 static const struct panfrost_compatible mediatek_mt8183_data = {
1003 	.num_supplies = ARRAY_SIZE(legacy_supplies) - 1,
1004 	.supply_names = legacy_supplies,
1005 	.num_pm_domains = 3,
1006 	.pm_domain_names = mediatek_pm_domains,
1007 };
1008 
1009 static const struct panfrost_compatible mediatek_mt8183_b_data = {
1010 	.num_supplies = ARRAY_SIZE(default_supplies) - 1,
1011 	.supply_names = default_supplies,
1012 	.num_pm_domains = 3,
1013 	.pm_domain_names = mediatek_pm_domains,
1014 	.pm_features = BIT(GPU_PM_CLK_DIS) | BIT(GPU_PM_VREG_OFF),
1015 };
1016 
1017 static const struct panfrost_compatible mediatek_mt8186_data = {
1018 	.num_supplies = ARRAY_SIZE(default_supplies) - 1,
1019 	.supply_names = default_supplies,
1020 	.num_pm_domains = 2,
1021 	.pm_domain_names = mediatek_pm_domains,
1022 	.pm_features = BIT(GPU_PM_CLK_DIS) | BIT(GPU_PM_VREG_OFF),
1023 };
1024 
1025 static const struct panfrost_compatible mediatek_mt8188_data = {
1026 	.num_supplies = ARRAY_SIZE(default_supplies) - 1,
1027 	.supply_names = default_supplies,
1028 	.num_pm_domains = 3,
1029 	.pm_domain_names = mediatek_pm_domains,
1030 	.pm_features = BIT(GPU_PM_CLK_DIS) | BIT(GPU_PM_VREG_OFF),
1031 	.gpu_quirks = BIT(GPU_QUIRK_FORCE_AARCH64_PGTABLE),
1032 };
1033 
1034 static const struct panfrost_compatible mediatek_mt8192_data = {
1035 	.num_supplies = ARRAY_SIZE(default_supplies) - 1,
1036 	.supply_names = default_supplies,
1037 	.num_pm_domains = 5,
1038 	.pm_domain_names = mediatek_pm_domains,
1039 	.pm_features = BIT(GPU_PM_CLK_DIS) | BIT(GPU_PM_VREG_OFF),
1040 	.gpu_quirks = BIT(GPU_QUIRK_FORCE_AARCH64_PGTABLE),
1041 };
1042 
1043 static const struct panfrost_compatible mediatek_mt8370_data = {
1044 	.num_supplies = ARRAY_SIZE(default_supplies) - 1,
1045 	.supply_names = default_supplies,
1046 	.num_pm_domains = 2,
1047 	.pm_domain_names = mediatek_pm_domains,
1048 	.pm_features = BIT(GPU_PM_CLK_DIS) | BIT(GPU_PM_VREG_OFF),
1049 	.gpu_quirks = BIT(GPU_QUIRK_FORCE_AARCH64_PGTABLE),
1050 };
1051 
1052 static const struct of_device_id dt_match[] = {
1053 	/* Set first to probe before the generic compatibles */
1054 	{ .compatible = "amlogic,meson-gxm-mali",
1055 	  .data = &amlogic_data, },
1056 	{ .compatible = "amlogic,meson-g12a-mali",
1057 	  .data = &amlogic_data, },
1058 	{ .compatible = "arm,mali-t604", .data = &default_data, },
1059 	{ .compatible = "arm,mali-t624", .data = &default_data, },
1060 	{ .compatible = "arm,mali-t628", .data = &default_data, },
1061 	{ .compatible = "arm,mali-t720", .data = &default_data, },
1062 	{ .compatible = "arm,mali-t760", .data = &default_data, },
1063 	{ .compatible = "arm,mali-t820", .data = &default_data, },
1064 	{ .compatible = "arm,mali-t830", .data = &default_data, },
1065 	{ .compatible = "arm,mali-t860", .data = &default_data, },
1066 	{ .compatible = "arm,mali-t880", .data = &default_data, },
1067 	{ .compatible = "arm,mali-bifrost", .data = &default_data, },
1068 	{ .compatible = "arm,mali-valhall-jm", .data = &default_data, },
1069 	{ .compatible = "mediatek,mt8183-mali", .data = &mediatek_mt8183_data },
1070 	{ .compatible = "mediatek,mt8183b-mali", .data = &mediatek_mt8183_b_data },
1071 	{ .compatible = "mediatek,mt8186-mali", .data = &mediatek_mt8186_data },
1072 	{ .compatible = "mediatek,mt8188-mali", .data = &mediatek_mt8188_data },
1073 	{ .compatible = "mediatek,mt8192-mali", .data = &mediatek_mt8192_data },
1074 	{ .compatible = "mediatek,mt8370-mali", .data = &mediatek_mt8370_data },
1075 	{ .compatible = "allwinner,sun50i-h616-mali", .data = &allwinner_h616_data },
1076 	{}
1077 };
1078 MODULE_DEVICE_TABLE(of, dt_match);
1079 
1080 static struct platform_driver panfrost_driver = {
1081 	.probe		= panfrost_probe,
1082 	.remove		= panfrost_remove,
1083 	.driver		= {
1084 		.name	= "panfrost",
1085 		.pm	= pm_ptr(&panfrost_pm_ops),
1086 		.of_match_table = dt_match,
1087 		.dev_groups = panfrost_groups,
1088 	},
1089 };
1090 module_platform_driver(panfrost_driver);
1091 
1092 MODULE_AUTHOR("Panfrost Project Developers");
1093 MODULE_DESCRIPTION("Panfrost DRM Driver");
1094 MODULE_LICENSE("GPL v2");
1095 MODULE_SOFTDEP("pre: governor_simpleondemand");
1096