xref: /linux/drivers/gpu/drm/msm/msm_ringbuffer.h (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2013 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  */
6 
7 #ifndef __MSM_RINGBUFFER_H__
8 #define __MSM_RINGBUFFER_H__
9 
10 #include "drm/gpu_scheduler.h"
11 #include "msm_drv.h"
12 
13 #define rbmemptr(ring, member)  \
14 	((ring)->memptrs_iova + offsetof(struct msm_rbmemptrs, member))
15 
16 #define rbmemptr_stats(ring, index, member) \
17 	(rbmemptr((ring), stats) + \
18 	 ((index) * sizeof(struct msm_gpu_submit_stats)) + \
19 	 offsetof(struct msm_gpu_submit_stats, member))
20 
21 struct msm_gpu_submit_stats {
22 	u64 cpcycles_start;
23 	u64 cpcycles_end;
24 	u64 alwayson_start;
25 	u64 alwayson_end;
26 };
27 
28 #define MSM_GPU_SUBMIT_STATS_COUNT 64
29 
30 struct msm_rbmemptrs {
31 	volatile uint32_t rptr;
32 	volatile uint32_t fence;
33 	/* Introduced on A7xx */
34 	volatile uint32_t bv_rptr;
35 	volatile uint32_t bv_fence;
36 
37 	volatile struct msm_gpu_submit_stats stats[MSM_GPU_SUBMIT_STATS_COUNT];
38 	volatile u64 ttbr0;
39 	volatile u32 context_idr;
40 };
41 
42 struct msm_cp_state {
43 	uint64_t ib1_base, ib2_base;
44 	uint32_t ib1_rem, ib2_rem;
45 };
46 
47 struct msm_ringbuffer {
48 	struct msm_gpu *gpu;
49 	int id;
50 	struct drm_gem_object *bo;
51 	uint32_t *start, *end, *cur, *next;
52 
53 	/*
54 	 * The job scheduler for this ring.
55 	 */
56 	struct drm_gpu_scheduler sched;
57 
58 	/*
59 	 * List of in-flight submits on this ring.  Protected by submit_lock.
60 	 *
61 	 * Currently just submits that are already written into the ring, not
62 	 * submits that are still in drm_gpu_scheduler's queues.  At a later
63 	 * step we could probably move to letting drm_gpu_scheduler manage
64 	 * hangcheck detection and keep track of submit jobs that are in-
65 	 * flight.
66 	 */
67 	struct list_head submits;
68 	spinlock_t submit_lock;
69 
70 	uint64_t iova;
71 	uint32_t hangcheck_fence;
72 	struct msm_rbmemptrs *memptrs;
73 	uint64_t memptrs_iova;
74 	struct msm_fence_context *fctx;
75 
76 	/**
77 	 * hangcheck_progress_retries:
78 	 *
79 	 * The number of extra hangcheck duration cycles that we have given
80 	 * due to it appearing that the GPU is making forward progress.
81 	 *
82 	 * For GPU generations which support progress detection (see.
83 	 * msm_gpu_funcs::progress()), if the GPU appears to be making progress
84 	 * (ie. the CP has advanced in the command stream, we'll allow up to
85 	 * DRM_MSM_HANGCHECK_PROGRESS_RETRIES expirations of the hangcheck timer
86 	 * before killing the job.  But to detect progress we need two sample
87 	 * points, so the duration of the hangcheck timer is halved.  In other
88 	 * words we'll let the submit run for up to:
89 	 *
90 	 * (DRM_MSM_HANGCHECK_DEFAULT_PERIOD / 2) * (DRM_MSM_HANGCHECK_PROGRESS_RETRIES + 1)
91 	 */
92 	int hangcheck_progress_retries;
93 
94 	/**
95 	 * last_cp_state: The state of the CP at the last call to gpu->progress()
96 	 */
97 	struct msm_cp_state last_cp_state;
98 
99 	/*
100 	 * preempt_lock protects preemption and serializes wptr updates against
101 	 * preemption.  Can be aquired from irq context.
102 	 */
103 	spinlock_t preempt_lock;
104 
105 	/*
106 	 * Whether we skipped writing wptr and it needs to be updated in the
107 	 * future when the ring becomes current.
108 	 */
109 	bool restore_wptr;
110 
111 	/**
112 	 * cur_ctx_seqno:
113 	 *
114 	 * The ctx->seqno value of the last context to submit to this ring
115 	 * Tracked by seqno rather than pointer value to avoid dangling
116 	 * pointers, and cases where a ctx can be freed and a new one created
117 	 * with the same address.
118 	 */
119 	int cur_ctx_seqno;
120 };
121 
122 struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
123 		void *memptrs, uint64_t memptrs_iova);
124 void msm_ringbuffer_destroy(struct msm_ringbuffer *ring);
125 
126 /* ringbuffer helpers (the parts that are same for a3xx/a2xx/z180..) */
127 
128 static inline void
129 OUT_RING(struct msm_ringbuffer *ring, uint32_t data)
130 {
131 	/*
132 	 * ring->next points to the current command being written - it won't be
133 	 * committed as ring->cur until the flush
134 	 */
135 	if (ring->next == ring->end)
136 		ring->next = ring->start;
137 	*(ring->next++) = data;
138 }
139 
140 #endif /* __MSM_RINGBUFFER_H__ */
141