xref: /linux/drivers/gpu/drm/v3d/v3d_drv.h (revision e64b9cc293ae710c815c2de1ec9dcaa0784a8017)
1 // SPDX-License-Identifier: GPL-2.0+
2 /* Copyright (C) 2015-2018 Broadcom */
3 
4 #include <linux/delay.h>
5 #include <linux/mutex.h>
6 #include <linux/spinlock_types.h>
7 #include <linux/workqueue.h>
8 
9 #include <drm/drm_encoder.h>
10 #include <drm/drm_gem.h>
11 #include <drm/drm_gem_shmem_helper.h>
12 #include <drm/gpu_scheduler.h>
13 
14 #include "v3d_performance_counters.h"
15 
16 #include "uapi/drm/v3d_drm.h"
17 
18 struct clk;
19 struct platform_device;
20 struct reset_control;
21 
22 #define V3D_MMU_PAGE_SHIFT 12
23 #define V3D_PAGE_FACTOR (PAGE_SIZE >> V3D_MMU_PAGE_SHIFT)
24 
25 #define V3D_MAX_QUEUES (V3D_CPU + 1)
26 
27 static inline char *v3d_queue_to_string(enum v3d_queue queue)
28 {
29 	switch (queue) {
30 	case V3D_BIN: return "bin";
31 	case V3D_RENDER: return "render";
32 	case V3D_TFU: return "tfu";
33 	case V3D_CSD: return "csd";
34 	case V3D_CACHE_CLEAN: return "cache_clean";
35 	case V3D_CPU: return "cpu";
36 	}
37 	return "UNKNOWN";
38 }
39 
40 struct v3d_stats {
41 	u64 start_ns;
42 	u64 enabled_ns;
43 	u64 jobs_completed;
44 
45 	/*
46 	 * This seqcount is used to protect the access to the GPU stats
47 	 * variables. It must be used as, while we are reading the stats,
48 	 * IRQs can happen and the stats can be updated.
49 	 */
50 	seqcount_t lock;
51 };
52 
53 struct v3d_queue_state {
54 	struct drm_gpu_scheduler sched;
55 
56 	u64 fence_context;
57 	u64 emit_seqno;
58 
59 	/* Stores the GPU stats for this queue in the global context. */
60 	struct v3d_stats stats;
61 
62 	/* Currently active job for this queue */
63 	struct v3d_job *active_job;
64 	spinlock_t queue_lock;
65 	/* Protect dma fence for signalling job completion */
66 	spinlock_t fence_lock;
67 };
68 
69 /* Performance monitor object. The perform lifetime is controlled by userspace
70  * using perfmon related ioctls. A perfmon can be attached to a submit_cl
71  * request, and when this is the case, HW perf counters will be activated just
72  * before the submit_cl is submitted to the GPU and disabled when the job is
73  * done. This way, only events related to a specific job will be counted.
74  */
75 struct v3d_perfmon {
76 	/* Tracks the number of users of the perfmon, when this counter reaches
77 	 * zero the perfmon is destroyed.
78 	 */
79 	refcount_t refcnt;
80 
81 	/* Protects perfmon stop, as it can be invoked from multiple places. */
82 	struct mutex lock;
83 
84 	/* Number of counters activated in this perfmon instance
85 	 * (should be less than DRM_V3D_MAX_PERF_COUNTERS).
86 	 */
87 	u8 ncounters;
88 
89 	/* Events counted by the HW perf counters. */
90 	u8 counters[DRM_V3D_MAX_PERF_COUNTERS];
91 
92 	/* Storage for counter values. Counters are incremented by the
93 	 * HW perf counter values every time the perfmon is attached
94 	 * to a GPU job.  This way, perfmon users don't have to
95 	 * retrieve the results after each job if they want to track
96 	 * events covering several submissions.  Note that counter
97 	 * values can't be reset, but you can fake a reset by
98 	 * destroying the perfmon and creating a new one.
99 	 */
100 	u64 values[] __counted_by(ncounters);
101 };
102 
103 enum v3d_gen {
104 	V3D_GEN_33 = 33,
105 	V3D_GEN_41 = 41,
106 	V3D_GEN_42 = 42,
107 	V3D_GEN_71 = 71,
108 };
109 
110 enum v3d_irq {
111 	V3D_CORE_IRQ,
112 	V3D_HUB_IRQ,
113 	V3D_MAX_IRQS,
114 };
115 
116 struct v3d_dev {
117 	struct drm_device drm;
118 
119 	/* Short representation (e.g. 33, 41) of the V3D tech version */
120 	enum v3d_gen ver;
121 
122 	/* Short representation (e.g. 5, 6) of the V3D tech revision */
123 	int rev;
124 
125 	bool single_irq_line;
126 
127 	int irq[V3D_MAX_IRQS];
128 
129 	struct v3d_perfmon_info perfmon_info;
130 
131 	void __iomem *hub_regs;
132 	void __iomem *core_regs[3];
133 	void __iomem *bridge_regs;
134 	void __iomem *gca_regs;
135 	void __iomem *sms_regs;
136 	struct clk *clk;
137 	struct reset_control *reset;
138 
139 	/* Virtual and DMA addresses of the single shared page table. */
140 	volatile u32 *pt;
141 	dma_addr_t pt_paddr;
142 
143 	/* Virtual and DMA addresses of the MMU's scratch page.  When
144 	 * a read or write is invalid in the MMU, it will be
145 	 * redirected here.
146 	 */
147 	void *mmu_scratch;
148 	dma_addr_t mmu_scratch_paddr;
149 	/* virtual address bits from V3D to the MMU. */
150 	int va_width;
151 
152 	/* Number of V3D cores. */
153 	u32 cores;
154 
155 	/* Allocator managing the address space.  All units are in
156 	 * number of pages.
157 	 */
158 	struct drm_mm mm;
159 	spinlock_t mm_lock;
160 
161 	struct work_struct overflow_mem_work;
162 
163 	struct v3d_queue_state queue[V3D_MAX_QUEUES];
164 
165 	/* Used to track the active perfmon if any. */
166 	struct v3d_perfmon *active_perfmon;
167 
168 	/* Protects bo_stats */
169 	struct mutex bo_lock;
170 
171 	/* Lock taken when resetting the GPU, to keep multiple
172 	 * processes from trying to park the scheduler threads and
173 	 * reset at once.
174 	 */
175 	struct mutex reset_lock;
176 
177 	/* Lock taken when creating and pushing the GPU scheduler
178 	 * jobs, to keep the sched-fence seqnos in order.
179 	 */
180 	struct mutex sched_lock;
181 
182 	/* Lock taken during a cache clean and when initiating an L2
183 	 * flush, to keep L2 flushes from interfering with the
184 	 * synchronous L2 cleans.
185 	 */
186 	struct mutex cache_clean_lock;
187 
188 	struct {
189 		u32 num_allocated;
190 		u32 pages_allocated;
191 	} bo_stats;
192 
193 	/* To support a performance analysis tool in user space, we require
194 	 * a single, globally configured performance monitor (perfmon) for
195 	 * all jobs.
196 	 */
197 	struct v3d_perfmon *global_perfmon;
198 
199 	/* Global reset counter. The counter must be incremented when
200 	 * a GPU reset happens. It must be protected by @reset_lock.
201 	 */
202 	unsigned int reset_counter;
203 };
204 
205 static inline struct v3d_dev *
206 to_v3d_dev(struct drm_device *dev)
207 {
208 	return container_of(dev, struct v3d_dev, drm);
209 }
210 
211 static inline bool
212 v3d_has_csd(struct v3d_dev *v3d)
213 {
214 	return v3d->ver >= V3D_GEN_41;
215 }
216 
217 #define v3d_to_pdev(v3d) to_platform_device((v3d)->drm.dev)
218 
219 /* The per-fd struct, which tracks the MMU mappings. */
220 struct v3d_file_priv {
221 	struct v3d_dev *v3d;
222 
223 	struct xarray perfmons;
224 
225 	struct drm_sched_entity sched_entity[V3D_MAX_QUEUES];
226 
227 	/* Stores the GPU stats for a specific queue for this fd. */
228 	struct v3d_stats stats[V3D_MAX_QUEUES];
229 
230 	/* Per-fd reset counter, must be incremented when a job submitted
231 	 * by this fd causes a GPU reset. It must be protected by
232 	 * &struct v3d_dev->reset_lock.
233 	 */
234 	unsigned int reset_counter;
235 };
236 
237 struct v3d_bo {
238 	struct drm_gem_shmem_object base;
239 
240 	struct drm_mm_node node;
241 
242 	/* List entry for the BO's position in
243 	 * v3d_render_job->unref_list
244 	 */
245 	struct list_head unref_head;
246 
247 	void *vaddr;
248 };
249 
250 static inline struct v3d_bo *
251 to_v3d_bo(struct drm_gem_object *bo)
252 {
253 	return (struct v3d_bo *)bo;
254 }
255 
256 struct v3d_fence {
257 	struct dma_fence base;
258 	struct drm_device *dev;
259 	/* v3d seqno for signaled() test */
260 	u64 seqno;
261 	enum v3d_queue queue;
262 };
263 
264 static inline struct v3d_fence *
265 to_v3d_fence(struct dma_fence *fence)
266 {
267 	return (struct v3d_fence *)fence;
268 }
269 
270 #define V3D_READ(offset) readl(v3d->hub_regs + offset)
271 #define V3D_WRITE(offset, val) writel(val, v3d->hub_regs + offset)
272 
273 #define V3D_BRIDGE_READ(offset) readl(v3d->bridge_regs + offset)
274 #define V3D_BRIDGE_WRITE(offset, val) writel(val, v3d->bridge_regs + offset)
275 
276 #define V3D_GCA_READ(offset) readl(v3d->gca_regs + offset)
277 #define V3D_GCA_WRITE(offset, val) writel(val, v3d->gca_regs + offset)
278 
279 #define V3D_SMS_IDLE				0x0
280 #define V3D_SMS_ISOLATING_FOR_RESET		0xa
281 #define V3D_SMS_RESETTING			0xb
282 #define V3D_SMS_ISOLATING_FOR_POWER_OFF	0xc
283 #define V3D_SMS_POWER_OFF_STATE		0xd
284 
285 #define V3D_SMS_READ(offset) readl(v3d->sms_regs + (offset))
286 #define V3D_SMS_WRITE(offset, val) writel(val, v3d->sms_regs + (offset))
287 
288 #define V3D_CORE_READ(core, offset) readl(v3d->core_regs[core] + offset)
289 #define V3D_CORE_WRITE(core, offset, val) writel(val, v3d->core_regs[core] + offset)
290 
291 struct v3d_job {
292 	struct drm_sched_job base;
293 
294 	struct kref refcount;
295 
296 	struct v3d_dev *v3d;
297 
298 	/* This is the array of BOs that were looked up at the start
299 	 * of submission.
300 	 */
301 	struct drm_gem_object **bo;
302 	u32 bo_count;
303 
304 	/* v3d fence to be signaled by IRQ handler when the job is complete. */
305 	struct dma_fence *irq_fence;
306 
307 	/* scheduler fence for when the job is considered complete and
308 	 * the BO reservations can be released.
309 	 */
310 	struct dma_fence *done_fence;
311 
312 	/* Pointer to a performance monitor object if the user requested it,
313 	 * NULL otherwise.
314 	 */
315 	struct v3d_perfmon *perfmon;
316 
317 	/* File descriptor of the process that submitted the job that could be used
318 	 * to collect per-process information about the GPU.
319 	 */
320 	struct v3d_file_priv *file_priv;
321 
322 	/* Callback for the freeing of the job on refcount going to 0. */
323 	void (*free)(struct kref *ref);
324 };
325 
326 struct v3d_bin_job {
327 	struct v3d_job base;
328 
329 	/* GPU virtual addresses of the start/end of the CL job. */
330 	u32 start, end;
331 
332 	u32 timedout_ctca, timedout_ctra;
333 
334 	/* Corresponding render job, for attaching our overflow memory. */
335 	struct v3d_render_job *render;
336 
337 	/* Submitted tile memory allocation start/size, tile state. */
338 	u32 qma, qms, qts;
339 };
340 
341 struct v3d_render_job {
342 	struct v3d_job base;
343 
344 	/* GPU virtual addresses of the start/end of the CL job. */
345 	u32 start, end;
346 
347 	u32 timedout_ctca, timedout_ctra;
348 
349 	/* List of overflow BOs used in the job that need to be
350 	 * released once the job is complete.
351 	 */
352 	struct list_head unref_list;
353 };
354 
355 struct v3d_tfu_job {
356 	struct v3d_job base;
357 
358 	struct drm_v3d_submit_tfu args;
359 };
360 
361 struct v3d_csd_job {
362 	struct v3d_job base;
363 
364 	u32 timedout_batches;
365 
366 	struct drm_v3d_submit_csd args;
367 };
368 
369 enum v3d_cpu_job_type {
370 	V3D_CPU_JOB_TYPE_INDIRECT_CSD = 1,
371 	V3D_CPU_JOB_TYPE_TIMESTAMP_QUERY,
372 	V3D_CPU_JOB_TYPE_RESET_TIMESTAMP_QUERY,
373 	V3D_CPU_JOB_TYPE_COPY_TIMESTAMP_QUERY,
374 	V3D_CPU_JOB_TYPE_RESET_PERFORMANCE_QUERY,
375 	V3D_CPU_JOB_TYPE_COPY_PERFORMANCE_QUERY,
376 };
377 
378 struct v3d_timestamp_query {
379 	/* Offset of this query in the timestamp BO for its value. */
380 	u32 offset;
381 
382 	/* Syncobj that indicates the timestamp availability */
383 	struct drm_syncobj *syncobj;
384 };
385 
386 struct v3d_performance_query {
387 	/* Performance monitor IDs for this query */
388 	u32 *kperfmon_ids;
389 
390 	/* Syncobj that indicates the query availability */
391 	struct drm_syncobj *syncobj;
392 };
393 
394 struct v3d_indirect_csd_info {
395 	/* Indirect CSD */
396 	struct v3d_csd_job *job;
397 
398 	/* Clean cache job associated to the Indirect CSD job */
399 	struct v3d_job *clean_job;
400 
401 	/* Offset within the BO where the workgroup counts are stored */
402 	u32 offset;
403 
404 	/* Workgroups size */
405 	u32 wg_size;
406 
407 	/* Indices of the uniforms with the workgroup dispatch counts
408 	 * in the uniform stream.
409 	 */
410 	u32 wg_uniform_offsets[3];
411 
412 	/* Indirect BO */
413 	struct drm_gem_object *indirect;
414 
415 	/* Context of the Indirect CSD job */
416 	struct ww_acquire_ctx acquire_ctx;
417 };
418 
419 struct v3d_timestamp_query_info {
420 	struct v3d_timestamp_query *queries;
421 
422 	u32 count;
423 };
424 
425 struct v3d_performance_query_info {
426 	struct v3d_performance_query *queries;
427 
428 	/* Number of performance queries */
429 	u32 count;
430 
431 	/* Number of performance monitors related to that query pool */
432 	u32 nperfmons;
433 
434 	/* Number of performance counters related to that query pool */
435 	u32 ncounters;
436 };
437 
438 struct v3d_copy_query_results_info {
439 	/* Define if should write to buffer using 64 or 32 bits */
440 	bool do_64bit;
441 
442 	/* Define if it can write to buffer even if the query is not available */
443 	bool do_partial;
444 
445 	/* Define if it should write availability bit to buffer */
446 	bool availability_bit;
447 
448 	/* Offset of the copy buffer in the BO */
449 	u32 offset;
450 
451 	/* Stride of the copy buffer in the BO */
452 	u32 stride;
453 };
454 
455 struct v3d_cpu_job {
456 	struct v3d_job base;
457 
458 	enum v3d_cpu_job_type job_type;
459 
460 	struct v3d_indirect_csd_info indirect_csd;
461 
462 	struct v3d_timestamp_query_info timestamp_query;
463 
464 	struct v3d_copy_query_results_info copy;
465 
466 	struct v3d_performance_query_info performance_query;
467 };
468 
469 typedef void (*v3d_cpu_job_fn)(struct v3d_cpu_job *);
470 
471 struct v3d_submit_outsync {
472 	struct drm_syncobj *syncobj;
473 };
474 
475 struct v3d_submit_ext {
476 	u32 flags;
477 	u32 wait_stage;
478 
479 	u32 in_sync_count;
480 	u64 in_syncs;
481 
482 	u32 out_sync_count;
483 	struct v3d_submit_outsync *out_syncs;
484 };
485 
486 /**
487  * __wait_for - magic wait macro
488  *
489  * Macro to help avoid open coding check/wait/timeout patterns. Note that it's
490  * important that we check the condition again after having timed out, since the
491  * timeout could be due to preemption or similar and we've never had a chance to
492  * check the condition before the timeout.
493  */
494 #define __wait_for(OP, COND, US, Wmin, Wmax) ({ \
495 	const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \
496 	long wait__ = (Wmin); /* recommended min for usleep is 10 us */	\
497 	int ret__;							\
498 	might_sleep();							\
499 	for (;;) {							\
500 		const bool expired__ = ktime_after(ktime_get_raw(), end__); \
501 		OP;							\
502 		/* Guarantee COND check prior to timeout */		\
503 		barrier();						\
504 		if (COND) {						\
505 			ret__ = 0;					\
506 			break;						\
507 		}							\
508 		if (expired__) {					\
509 			ret__ = -ETIMEDOUT;				\
510 			break;						\
511 		}							\
512 		usleep_range(wait__, wait__ * 2);			\
513 		if (wait__ < (Wmax))					\
514 			wait__ <<= 1;					\
515 	}								\
516 	ret__;								\
517 })
518 
519 #define _wait_for(COND, US, Wmin, Wmax)	__wait_for(, (COND), (US), (Wmin), \
520 						   (Wmax))
521 #define wait_for(COND, MS)		_wait_for((COND), (MS) * 1000, 10, 1000)
522 
523 static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
524 {
525 	/* nsecs_to_jiffies64() does not guard against overflow */
526 	if ((NSEC_PER_SEC % HZ) != 0 &&
527 	    div_u64(n, NSEC_PER_SEC) >= MAX_JIFFY_OFFSET / HZ)
528 		return MAX_JIFFY_OFFSET;
529 
530 	return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
531 }
532 
533 /* v3d_bo.c */
534 struct drm_gem_object *v3d_create_object(struct drm_device *dev, size_t size);
535 void v3d_free_object(struct drm_gem_object *gem_obj);
536 struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv,
537 			     size_t size);
538 void v3d_get_bo_vaddr(struct v3d_bo *bo);
539 void v3d_put_bo_vaddr(struct v3d_bo *bo);
540 int v3d_create_bo_ioctl(struct drm_device *dev, void *data,
541 			struct drm_file *file_priv);
542 int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data,
543 		      struct drm_file *file_priv);
544 int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data,
545 			    struct drm_file *file_priv);
546 int v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
547 		      struct drm_file *file_priv);
548 struct drm_gem_object *v3d_prime_import_sg_table(struct drm_device *dev,
549 						 struct dma_buf_attachment *attach,
550 						 struct sg_table *sgt);
551 
552 /* v3d_debugfs.c */
553 void v3d_debugfs_init(struct drm_minor *minor);
554 
555 /* v3d_drv.c */
556 void v3d_get_stats(const struct v3d_stats *stats, u64 timestamp,
557 		   u64 *active_runtime, u64 *jobs_completed);
558 
559 /* v3d_fence.c */
560 extern const struct dma_fence_ops v3d_fence_ops;
561 struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue q);
562 
563 /* v3d_gem.c */
564 extern bool super_pages;
565 int v3d_gem_init(struct drm_device *dev);
566 void v3d_gem_destroy(struct drm_device *dev);
567 void v3d_reset_sms(struct v3d_dev *v3d);
568 void v3d_reset(struct v3d_dev *v3d);
569 void v3d_invalidate_caches(struct v3d_dev *v3d);
570 void v3d_clean_caches(struct v3d_dev *v3d);
571 
572 /* v3d_submit.c */
573 void v3d_job_cleanup(struct v3d_job *job);
574 void v3d_job_put(struct v3d_job *job);
575 int v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
576 			struct drm_file *file_priv);
577 int v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
578 			 struct drm_file *file_priv);
579 int v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
580 			 struct drm_file *file_priv);
581 int v3d_submit_cpu_ioctl(struct drm_device *dev, void *data,
582 			 struct drm_file *file_priv);
583 
584 /* v3d_irq.c */
585 int v3d_irq_init(struct v3d_dev *v3d);
586 void v3d_irq_enable(struct v3d_dev *v3d);
587 void v3d_irq_disable(struct v3d_dev *v3d);
588 void v3d_irq_reset(struct v3d_dev *v3d);
589 
590 /* v3d_mmu.c */
591 int v3d_mmu_flush_all(struct v3d_dev *v3d);
592 int v3d_mmu_set_page_table(struct v3d_dev *v3d);
593 void v3d_mmu_insert_ptes(struct v3d_bo *bo);
594 void v3d_mmu_remove_ptes(struct v3d_bo *bo);
595 
596 /* v3d_sched.c */
597 void v3d_timestamp_query_info_free(struct v3d_timestamp_query_info *query_info,
598 				   unsigned int count);
599 void v3d_performance_query_info_free(struct v3d_performance_query_info *query_info,
600 				     unsigned int count);
601 void v3d_job_update_stats(struct v3d_job *job, enum v3d_queue q);
602 int v3d_sched_init(struct v3d_dev *v3d);
603 void v3d_sched_fini(struct v3d_dev *v3d);
604 
605 /* v3d_perfmon.c */
606 void v3d_perfmon_init(struct v3d_dev *v3d);
607 void v3d_perfmon_get(struct v3d_perfmon *perfmon);
608 void v3d_perfmon_put(struct v3d_perfmon *perfmon);
609 void v3d_perfmon_start(struct v3d_dev *v3d, struct v3d_perfmon *perfmon);
610 void v3d_perfmon_stop(struct v3d_dev *v3d, struct v3d_perfmon *perfmon,
611 		      bool capture);
612 struct v3d_perfmon *v3d_perfmon_find(struct v3d_file_priv *v3d_priv, int id);
613 void v3d_perfmon_open_file(struct v3d_file_priv *v3d_priv);
614 void v3d_perfmon_close_file(struct v3d_file_priv *v3d_priv);
615 int v3d_perfmon_create_ioctl(struct drm_device *dev, void *data,
616 			     struct drm_file *file_priv);
617 int v3d_perfmon_destroy_ioctl(struct drm_device *dev, void *data,
618 			      struct drm_file *file_priv);
619 int v3d_perfmon_get_values_ioctl(struct drm_device *dev, void *data,
620 				 struct drm_file *file_priv);
621 int v3d_perfmon_get_counter_ioctl(struct drm_device *dev, void *data,
622 				  struct drm_file *file_priv);
623 int v3d_perfmon_set_global_ioctl(struct drm_device *dev, void *data,
624 				 struct drm_file *file_priv);
625 
626 /* v3d_sysfs.c */
627 int v3d_sysfs_init(struct device *dev);
628 void v3d_sysfs_destroy(struct device *dev);
629