xref: /linux/drivers/gpu/drm/msm/adreno/a6xx_gpu.h (revision 0d362c7fa165106b4facafb23906108a9db4206a)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2017, 2019 The Linux Foundation. All rights reserved. */
3 
4 #ifndef __A6XX_GPU_H__
5 #define __A6XX_GPU_H__
6 
7 
8 #include "adreno_gpu.h"
9 #include "a6xx_enums.xml.h"
10 #include "a7xx_enums.xml.h"
11 #include "a6xx_perfcntrs.xml.h"
12 #include "a7xx_perfcntrs.xml.h"
13 #include "a6xx.xml.h"
14 
15 #include "a6xx_gmu.h"
16 
17 extern bool hang_debug;
18 
19 struct cpu_gpu_lock {
20 	uint32_t gpu_req;
21 	uint32_t cpu_req;
22 	uint32_t turn;
23 	union {
24 		struct {
25 			uint16_t list_length;
26 			uint16_t list_offset;
27 		};
28 		struct {
29 			uint8_t ifpc_list_len;
30 			uint8_t preemption_list_len;
31 			uint16_t dynamic_list_len;
32 		};
33 	};
34 	uint64_t regs[62];
35 };
36 
37 /**
38  * struct a6xx_info - a6xx specific information from device table
39  *
40  * @hwcg: hw clock gating register sequence
41  * @protect: CP_PROTECT settings
42  * @pwrup_reglist pwrup reglist for preemption
43  */
44 struct a6xx_info {
45 	const struct adreno_reglist *hwcg;
46 	const struct adreno_protect *protect;
47 	const struct adreno_reglist_list *pwrup_reglist;
48 	const struct adreno_reglist_pipe_list *dyn_pwrup_reglist;
49 	const struct adreno_reglist_list *ifpc_reglist;
50 	const struct adreno_reglist *gbif_cx;
51 	const struct adreno_reglist_pipe *nonctxt_reglist;
52 	u32 max_slices;
53 	u32 gmu_chipid;
54 	u32 gmu_cgc_mode;
55 	u32 prim_fifo_threshold;
56 	const struct a6xx_bcm *bcms;
57 };
58 
59 struct a6xx_gpu {
60 	struct adreno_gpu base;
61 
62 	struct drm_gem_object *sqe_bo;
63 	uint64_t sqe_iova;
64 	struct drm_gem_object *aqe_bo;
65 	uint64_t aqe_iova;
66 
67 	struct msm_ringbuffer *cur_ring;
68 	struct msm_ringbuffer *next_ring;
69 
70 	struct drm_gem_object *preempt_bo[MSM_GPU_MAX_RINGS];
71 	void *preempt[MSM_GPU_MAX_RINGS];
72 	uint64_t preempt_iova[MSM_GPU_MAX_RINGS];
73 	struct drm_gem_object *preempt_smmu_bo[MSM_GPU_MAX_RINGS];
74 	void *preempt_smmu[MSM_GPU_MAX_RINGS];
75 	uint64_t preempt_smmu_iova[MSM_GPU_MAX_RINGS];
76 	uint32_t last_seqno[MSM_GPU_MAX_RINGS];
77 
78 	atomic_t preempt_state;
79 	spinlock_t eval_lock;
80 	struct timer_list preempt_timer;
81 
82 	unsigned int preempt_level;
83 	bool uses_gmem;
84 	bool skip_save_restore;
85 
86 	struct drm_gem_object *preempt_postamble_bo;
87 	void *preempt_postamble_ptr;
88 	uint64_t preempt_postamble_iova;
89 	uint64_t preempt_postamble_len;
90 	bool postamble_enabled;
91 
92 	struct a6xx_gmu gmu;
93 
94 	struct drm_gem_object *shadow_bo;
95 	uint64_t shadow_iova;
96 	uint32_t *shadow;
97 
98 	struct drm_gem_object *pwrup_reglist_bo;
99 	void *pwrup_reglist_ptr;
100 	uint64_t pwrup_reglist_iova;
101 	bool pwrup_reglist_emitted;
102 
103 	bool has_whereami;
104 
105 	void __iomem *llc_mmio;
106 	void *llc_slice;
107 	void *htw_llc_slice;
108 	bool have_mmu500;
109 	bool hung;
110 
111 	u32 cached_aperture;
112 	spinlock_t aperture_lock;
113 
114 	u32 slice_mask;
115 };
116 
117 #define to_a6xx_gpu(x) container_of(x, struct a6xx_gpu, base)
118 
119 /*
120  * In order to do lockless preemption we use a simple state machine to progress
121  * through the process.
122  *
123  * PREEMPT_NONE - no preemption in progress.  Next state START.
124  * PREEMPT_START - The trigger is evaluating if preemption is possible. Next
125  * states: TRIGGERED, NONE
126  * PREEMPT_FINISH - An intermediate state before moving back to NONE. Next
127  * state: NONE.
128  * PREEMPT_TRIGGERED: A preemption has been executed on the hardware. Next
129  * states: FAULTED, PENDING
130  * PREEMPT_FAULTED: A preemption timed out (never completed). This will trigger
131  * recovery.  Next state: N/A
132  * PREEMPT_PENDING: Preemption complete interrupt fired - the callback is
133  * checking the success of the operation. Next state: FAULTED, NONE.
134  */
135 
136 enum a6xx_preempt_state {
137 	PREEMPT_NONE = 0,
138 	PREEMPT_START,
139 	PREEMPT_FINISH,
140 	PREEMPT_TRIGGERED,
141 	PREEMPT_FAULTED,
142 	PREEMPT_PENDING,
143 };
144 
145 /*
146  * struct a6xx_preempt_record is a shared buffer between the microcode and the
147  * CPU to store the state for preemption. The record itself is much larger
148  * (2112k) but most of that is used by the CP for storage.
149  *
150  * There is a preemption record assigned per ringbuffer. When the CPU triggers a
151  * preemption, it fills out the record with the useful information (wptr, ring
152  * base, etc) and the microcode uses that information to set up the CP following
153  * the preemption.  When a ring is switched out, the CP will save the ringbuffer
154  * state back to the record. In this way, once the records are properly set up
155  * the CPU can quickly switch back and forth between ringbuffers by only
156  * updating a few registers (often only the wptr).
157  *
158  * These are the CPU aware registers in the record:
159  * @magic: Must always be 0xAE399D6EUL
160  * @info: Type of the record - written 0 by the CPU, updated by the CP
161  * @errno: preemption error record
162  * @data: Data field in YIELD and SET_MARKER packets, Written and used by CP
163  * @cntl: Value of RB_CNTL written by CPU, save/restored by CP
164  * @rptr: Value of RB_RPTR written by CPU, save/restored by CP
165  * @wptr: Value of RB_WPTR written by CPU, save/restored by CP
166  * @_pad: Reserved/padding
167  * @rptr_addr: Value of RB_RPTR_ADDR_LO|HI written by CPU, save/restored by CP
168  * @rbase: Value of RB_BASE written by CPU, save/restored by CP
169  * @counter: GPU address of the storage area for the preemption counters
170  * @bv_rptr_addr: Value of BV_RB_RPTR_ADDR_LO|HI written by CPU, save/restored by CP
171  */
172 struct a6xx_preempt_record {
173 	u32 magic;
174 	u32 info;
175 	u32 errno;
176 	u32 data;
177 	u32 cntl;
178 	u32 rptr;
179 	u32 wptr;
180 	u32 _pad;
181 	u64 rptr_addr;
182 	u64 rbase;
183 	u64 counter;
184 	u64 bv_rptr_addr;
185 };
186 
187 #define A6XX_PREEMPT_RECORD_MAGIC 0xAE399D6EUL
188 
189 #define PREEMPT_SMMU_INFO_SIZE 4096
190 
191 #define PREEMPT_RECORD_SIZE(adreno_gpu) \
192 	((adreno_gpu->info->preempt_record_size) == 0 ? \
193 	 4192 * SZ_1K : (adreno_gpu->info->preempt_record_size))
194 
195 /*
196  * The preemption counter block is a storage area for the value of the
197  * preemption counters that are saved immediately before context switch. We
198  * append it on to the end of the allocation for the preemption record.
199  */
200 #define A6XX_PREEMPT_COUNTER_SIZE (16 * 4)
201 
202 struct a7xx_cp_smmu_info {
203 	u32 magic;
204 	u32 _pad4;
205 	u64 ttbr0;
206 	u32 asid;
207 	u32 context_idr;
208 	u32 context_bank;
209 };
210 
211 #define GEN7_CP_SMMU_INFO_MAGIC 0x241350d5UL
212 
213 /*
214  * Given a register and a count, return a value to program into
215  * REG_CP_PROTECT_REG(n) - this will block both reads and writes for
216  * _len + 1 registers starting at _reg.
217  */
218 #define A6XX_PROTECT_NORDWR(_reg, _len) \
219 	((1 << 31) | \
220 	(((_len) & 0x3FFF) << 18) | ((_reg) & 0x3FFFF))
221 
222 /*
223  * Same as above, but allow reads over the range. For areas of mixed use (such
224  * as performance counters) this allows us to protect a much larger range with a
225  * single register
226  */
227 #define A6XX_PROTECT_RDONLY(_reg, _len) \
228 	((((_len) & 0x3FFF) << 18) | ((_reg) & 0x3FFFF))
229 
230 extern const struct adreno_gpu_funcs a6xx_gpu_funcs;
231 extern const struct adreno_gpu_funcs a6xx_gmuwrapper_funcs;
232 extern const struct adreno_gpu_funcs a7xx_gpu_funcs;
233 extern const struct adreno_gpu_funcs a8xx_gpu_funcs;
234 
a6xx_has_gbif(struct adreno_gpu * gpu)235 static inline bool a6xx_has_gbif(struct adreno_gpu *gpu)
236 {
237 	if(adreno_is_a630(gpu))
238 		return false;
239 
240 	return true;
241 }
242 
a6xx_llc_rmw(struct a6xx_gpu * a6xx_gpu,u32 reg,u32 mask,u32 or)243 static inline void a6xx_llc_rmw(struct a6xx_gpu *a6xx_gpu, u32 reg, u32 mask, u32 or)
244 {
245 	return msm_rmw(a6xx_gpu->llc_mmio + (reg << 2), mask, or);
246 }
247 
a6xx_llc_read(struct a6xx_gpu * a6xx_gpu,u32 reg)248 static inline u32 a6xx_llc_read(struct a6xx_gpu *a6xx_gpu, u32 reg)
249 {
250 	return readl(a6xx_gpu->llc_mmio + (reg << 2));
251 }
252 
a6xx_llc_write(struct a6xx_gpu * a6xx_gpu,u32 reg,u32 value)253 static inline void a6xx_llc_write(struct a6xx_gpu *a6xx_gpu, u32 reg, u32 value)
254 {
255 	writel(value, a6xx_gpu->llc_mmio + (reg << 2));
256 }
257 
258 #define shadowptr(_a6xx_gpu, _ring) ((_a6xx_gpu)->shadow_iova + \
259 		((_ring)->id * sizeof(uint32_t)))
260 
261 int a6xx_gmu_resume(struct a6xx_gpu *gpu);
262 int a6xx_gmu_stop(struct a6xx_gpu *gpu);
263 
264 int a6xx_gmu_wait_for_idle(struct a6xx_gmu *gmu);
265 
266 bool a6xx_gmu_isidle(struct a6xx_gmu *gmu);
267 
268 int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state);
269 void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state);
270 
271 int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node);
272 int a6xx_gmu_wrapper_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node);
273 void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu);
274 void a6xx_gmu_sysprof_setup(struct msm_gpu *gpu);
275 
276 void a6xx_preempt_init(struct msm_gpu *gpu);
277 void a6xx_preempt_hw_init(struct msm_gpu *gpu);
278 void a6xx_preempt_trigger(struct msm_gpu *gpu);
279 void a6xx_preempt_irq(struct msm_gpu *gpu);
280 void a6xx_preempt_fini(struct msm_gpu *gpu);
281 int a6xx_preempt_submitqueue_setup(struct msm_gpu *gpu,
282 		struct msm_gpu_submitqueue *queue);
283 void a6xx_preempt_submitqueue_close(struct msm_gpu *gpu,
284 		struct msm_gpu_submitqueue *queue);
285 
286 /* Return true if we are in a preempt state */
a6xx_in_preempt(struct a6xx_gpu * a6xx_gpu)287 static inline bool a6xx_in_preempt(struct a6xx_gpu *a6xx_gpu)
288 {
289 	/*
290 	 * Make sure the read to preempt_state is ordered with respect to reads
291 	 * of other variables before ...
292 	 */
293 	smp_rmb();
294 
295 	int preempt_state = atomic_read(&a6xx_gpu->preempt_state);
296 
297 	/* ... and after. */
298 	smp_rmb();
299 
300 	return !(preempt_state == PREEMPT_NONE ||
301 			preempt_state == PREEMPT_FINISH);
302 }
303 
304 void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp,
305 		       bool suspended);
306 unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu);
307 
308 void a6xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
309 		struct drm_printer *p);
310 
311 struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu);
312 int a6xx_gpu_state_put(struct msm_gpu_state *state);
313 
314 void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu, bool gx_off);
315 void a6xx_gpu_sw_reset(struct msm_gpu *gpu, bool assert);
316 int a6xx_fenced_write(struct a6xx_gpu *gpu, u32 offset, u64 value, u32 mask, bool is_64b);
317 void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
318 int a6xx_zap_shader_init(struct msm_gpu *gpu);
319 
320 void a8xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu, bool gx_off);
321 int a8xx_fault_handler(void *arg, unsigned long iova, int flags, void *data);
322 void a8xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
323 int a8xx_gmu_get_timestamp(struct msm_gpu *gpu, uint64_t *value);
324 u64 a8xx_gpu_busy(struct msm_gpu *gpu, unsigned long *out_sample_rate);
325 int a8xx_gpu_feature_probe(struct msm_gpu *gpu);
326 void a8xx_gpu_get_slice_info(struct msm_gpu *gpu);
327 int a8xx_hw_init(struct msm_gpu *gpu);
328 irqreturn_t a8xx_irq(struct msm_gpu *gpu);
329 void a8xx_llc_activate(struct a6xx_gpu *a6xx_gpu);
330 bool a8xx_progress(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
331 void a8xx_recover(struct msm_gpu *gpu);
332 #endif /* __A6XX_GPU_H__ */
333