xref: /linux/drivers/gpu/drm/etnaviv/etnaviv_gpu.h (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2015-2018 Etnaviv Project
4  */
5 
6 #ifndef __ETNAVIV_GPU_H__
7 #define __ETNAVIV_GPU_H__
8 
9 #include "etnaviv_cmdbuf.h"
10 #include "etnaviv_gem.h"
11 #include "etnaviv_mmu.h"
12 #include "etnaviv_drv.h"
13 #include "common.xml.h"
14 #include "state.xml.h"
15 
16 struct etnaviv_gem_submit;
17 struct etnaviv_vram_mapping;
18 
19 struct etnaviv_chip_identity {
20 	u32 model;
21 	u32 revision;
22 	u32 product_id;
23 	u32 customer_id;
24 	u32 eco_id;
25 
26 	/* Supported feature fields. */
27 	u32 features;
28 
29 	/* Supported minor feature fields. */
30 	u32 minor_features0;
31 	u32 minor_features1;
32 	u32 minor_features2;
33 	u32 minor_features3;
34 	u32 minor_features4;
35 	u32 minor_features5;
36 	u32 minor_features6;
37 	u32 minor_features7;
38 	u32 minor_features8;
39 	u32 minor_features9;
40 	u32 minor_features10;
41 	u32 minor_features11;
42 
43 	/* Number of streams supported. */
44 	u32 stream_count;
45 
46 	/* Total number of temporary registers per thread. */
47 	u32 register_max;
48 
49 	/* Maximum number of threads. */
50 	u32 thread_count;
51 
52 	/* Number of shader cores. */
53 	u32 shader_core_count;
54 
55 	/* Number of Neural Network cores. */
56 	u32 nn_core_count;
57 
58 	/* Size of the vertex cache. */
59 	u32 vertex_cache_size;
60 
61 	/* Number of entries in the vertex output buffer. */
62 	u32 vertex_output_buffer_size;
63 
64 	/* Number of pixel pipes. */
65 	u32 pixel_pipes;
66 
67 	/* Number of instructions. */
68 	u32 instruction_count;
69 
70 	/* Number of constants. */
71 	u32 num_constants;
72 
73 	/* Buffer size */
74 	u32 buffer_size;
75 
76 	/* Number of varyings */
77 	u8 varyings_count;
78 };
79 
80 enum etnaviv_sec_mode {
81 	ETNA_SEC_NONE = 0,
82 	ETNA_SEC_KERNEL,
83 	ETNA_SEC_TZ
84 };
85 
86 struct etnaviv_event {
87 	struct dma_fence *fence;
88 	struct etnaviv_gem_submit *submit;
89 
90 	void (*sync_point)(struct etnaviv_gpu *gpu, struct etnaviv_event *event);
91 };
92 
93 struct etnaviv_cmdbuf_suballoc;
94 struct regulator;
95 struct clk;
96 
97 #define ETNA_NR_EVENTS 30
98 
99 enum etnaviv_gpu_state {
100 	ETNA_GPU_STATE_UNKNOWN = 0,
101 	ETNA_GPU_STATE_IDENTIFIED,
102 	ETNA_GPU_STATE_RESET,
103 	ETNA_GPU_STATE_INITIALIZED,
104 	ETNA_GPU_STATE_RUNNING,
105 	ETNA_GPU_STATE_FAULT,
106 };
107 
108 struct etnaviv_gpu {
109 	struct drm_device *drm;
110 	struct thermal_cooling_device *cooling;
111 	struct device *dev;
112 	struct mutex lock;
113 	struct etnaviv_chip_identity identity;
114 	enum etnaviv_sec_mode sec_mode;
115 	struct workqueue_struct *wq;
116 	struct mutex sched_lock;
117 	struct drm_gpu_scheduler sched;
118 	enum etnaviv_gpu_state state;
119 
120 	/* 'ring'-buffer: */
121 	struct etnaviv_cmdbuf buffer;
122 	int exec_state;
123 
124 	/* event management: */
125 	DECLARE_BITMAP(event_bitmap, ETNA_NR_EVENTS);
126 	struct etnaviv_event event[ETNA_NR_EVENTS];
127 	struct completion event_free;
128 	spinlock_t event_spinlock;
129 
130 	u32 idle_mask;
131 
132 	/* Fencing support */
133 	struct xarray user_fences;
134 	u32 next_user_fence;
135 	u32 next_fence;
136 	u32 completed_fence;
137 	wait_queue_head_t fence_event;
138 	u64 fence_context;
139 	spinlock_t fence_spinlock;
140 
141 	/* worker for handling 'sync' points: */
142 	struct work_struct sync_point_work;
143 	int sync_point_event;
144 
145 	/* hang detection */
146 	u32 hangcheck_dma_addr;
147 	u32 hangcheck_fence;
148 
149 	void __iomem *mmio;
150 	int irq;
151 
152 	struct etnaviv_iommu_context *mmu_context;
153 	unsigned int flush_seq;
154 
155 	/* Power Control: */
156 	struct clk *clk_bus;
157 	struct clk *clk_reg;
158 	struct clk *clk_core;
159 	struct clk *clk_shader;
160 
161 	unsigned int freq_scale;
162 	unsigned int fe_waitcycles;
163 	unsigned long base_rate_core;
164 	unsigned long base_rate_shader;
165 };
166 
gpu_write(struct etnaviv_gpu * gpu,u32 reg,u32 data)167 static inline void gpu_write(struct etnaviv_gpu *gpu, u32 reg, u32 data)
168 {
169 	writel(data, gpu->mmio + reg);
170 }
171 
gpu_read(struct etnaviv_gpu * gpu,u32 reg)172 static inline u32 gpu_read(struct etnaviv_gpu *gpu, u32 reg)
173 {
174 	/* On some variants, such as the GC7000r6009, some FE registers
175 	 * need two reads to be consistent. Do that extra read here and
176 	 * throw away the result.
177 	 */
178 	if (reg >= VIVS_FE_DMA_STATUS && reg <= VIVS_FE_AUTO_FLUSH)
179 		readl(gpu->mmio + reg);
180 
181 	return readl(gpu->mmio + reg);
182 }
183 
gpu_fix_power_address(struct etnaviv_gpu * gpu,u32 reg)184 static inline u32 gpu_fix_power_address(struct etnaviv_gpu *gpu, u32 reg)
185 {
186 	/* Power registers in GC300 < 2.0 are offset by 0x100 */
187 	if (gpu->identity.model == chipModel_GC300 &&
188 	    gpu->identity.revision < 0x2000)
189 		reg += 0x100;
190 
191 	return reg;
192 }
193 
gpu_write_power(struct etnaviv_gpu * gpu,u32 reg,u32 data)194 static inline void gpu_write_power(struct etnaviv_gpu *gpu, u32 reg, u32 data)
195 {
196 	writel(data, gpu->mmio + gpu_fix_power_address(gpu, reg));
197 }
198 
gpu_read_power(struct etnaviv_gpu * gpu,u32 reg)199 static inline u32 gpu_read_power(struct etnaviv_gpu *gpu, u32 reg)
200 {
201 	return readl(gpu->mmio + gpu_fix_power_address(gpu, reg));
202 }
203 
204 int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value);
205 
206 int etnaviv_gpu_init(struct etnaviv_gpu *gpu);
207 bool etnaviv_fill_identity_from_hwdb(struct etnaviv_gpu *gpu);
208 
209 #ifdef CONFIG_DEBUG_FS
210 int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m);
211 #endif
212 
213 void etnaviv_gpu_recover_hang(struct etnaviv_gem_submit *submit);
214 void etnaviv_gpu_retire(struct etnaviv_gpu *gpu);
215 int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
216 	u32 fence, struct drm_etnaviv_timespec *timeout);
217 int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
218 	struct etnaviv_gem_object *etnaviv_obj,
219 	struct drm_etnaviv_timespec *timeout);
220 struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit);
221 int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu);
222 void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu);
223 int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms);
224 void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch);
225 
226 extern struct platform_driver etnaviv_gpu_driver;
227 
228 #endif /* __ETNAVIV_GPU_H__ */
229