1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* Copyright (c) 2017, 2019 The Linux Foundation. All rights reserved. */ 3 4 #ifndef __A6XX_GPU_H__ 5 #define __A6XX_GPU_H__ 6 7 8 #include "adreno_gpu.h" 9 #include "a6xx_enums.xml.h" 10 #include "a7xx_enums.xml.h" 11 #include "a6xx_perfcntrs.xml.h" 12 #include "a7xx_perfcntrs.xml.h" 13 #include "a6xx.xml.h" 14 15 #include "a6xx_gmu.h" 16 17 extern bool hang_debug; 18 19 struct cpu_gpu_lock { 20 uint32_t gpu_req; 21 uint32_t cpu_req; 22 uint32_t turn; 23 union { 24 struct { 25 uint16_t list_length; 26 uint16_t list_offset; 27 }; 28 struct { 29 uint8_t ifpc_list_len; 30 uint8_t preemption_list_len; 31 uint16_t dynamic_list_len; 32 }; 33 }; 34 uint64_t regs[62]; 35 }; 36 37 /** 38 * struct a6xx_info - a6xx specific information from device table 39 * 40 * @hwcg: hw clock gating register sequence 41 * @protect: CP_PROTECT settings 42 * @pwrup_reglist pwrup reglist for preemption 43 */ 44 struct a6xx_info { 45 const struct adreno_reglist *hwcg; 46 const struct adreno_protect *protect; 47 const struct adreno_reglist_list *pwrup_reglist; 48 const struct adreno_reglist_list *ifpc_reglist; 49 const struct adreno_reglist *gbif_cx; 50 const struct adreno_reglist_pipe *nonctxt_reglist; 51 u32 max_slices; 52 u32 gmu_chipid; 53 u32 gmu_cgc_mode; 54 u32 prim_fifo_threshold; 55 const struct a6xx_bcm *bcms; 56 }; 57 58 struct a6xx_gpu { 59 struct adreno_gpu base; 60 61 struct drm_gem_object *sqe_bo; 62 uint64_t sqe_iova; 63 struct drm_gem_object *aqe_bo; 64 uint64_t aqe_iova; 65 66 struct msm_ringbuffer *cur_ring; 67 struct msm_ringbuffer *next_ring; 68 69 struct drm_gem_object *preempt_bo[MSM_GPU_MAX_RINGS]; 70 void *preempt[MSM_GPU_MAX_RINGS]; 71 uint64_t preempt_iova[MSM_GPU_MAX_RINGS]; 72 struct drm_gem_object *preempt_smmu_bo[MSM_GPU_MAX_RINGS]; 73 void *preempt_smmu[MSM_GPU_MAX_RINGS]; 74 uint64_t preempt_smmu_iova[MSM_GPU_MAX_RINGS]; 75 uint32_t last_seqno[MSM_GPU_MAX_RINGS]; 76 77 atomic_t preempt_state; 78 spinlock_t eval_lock; 79 struct timer_list preempt_timer; 80 81 unsigned int preempt_level; 82 bool uses_gmem; 83 bool skip_save_restore; 84 85 struct drm_gem_object *preempt_postamble_bo; 86 void *preempt_postamble_ptr; 87 uint64_t preempt_postamble_iova; 88 uint64_t preempt_postamble_len; 89 bool postamble_enabled; 90 91 struct a6xx_gmu gmu; 92 93 struct drm_gem_object *shadow_bo; 94 uint64_t shadow_iova; 95 uint32_t *shadow; 96 97 struct drm_gem_object *pwrup_reglist_bo; 98 void *pwrup_reglist_ptr; 99 uint64_t pwrup_reglist_iova; 100 bool pwrup_reglist_emitted; 101 102 bool has_whereami; 103 104 void __iomem *llc_mmio; 105 void *llc_slice; 106 void *htw_llc_slice; 107 bool have_mmu500; 108 bool hung; 109 110 u32 cached_aperture; 111 spinlock_t aperture_lock; 112 113 u32 slice_mask; 114 }; 115 116 #define to_a6xx_gpu(x) container_of(x, struct a6xx_gpu, base) 117 118 /* 119 * In order to do lockless preemption we use a simple state machine to progress 120 * through the process. 121 * 122 * PREEMPT_NONE - no preemption in progress. Next state START. 123 * PREEMPT_START - The trigger is evaluating if preemption is possible. Next 124 * states: TRIGGERED, NONE 125 * PREEMPT_FINISH - An intermediate state before moving back to NONE. Next 126 * state: NONE. 127 * PREEMPT_TRIGGERED: A preemption has been executed on the hardware. Next 128 * states: FAULTED, PENDING 129 * PREEMPT_FAULTED: A preemption timed out (never completed). This will trigger 130 * recovery. Next state: N/A 131 * PREEMPT_PENDING: Preemption complete interrupt fired - the callback is 132 * checking the success of the operation. Next state: FAULTED, NONE. 133 */ 134 135 enum a6xx_preempt_state { 136 PREEMPT_NONE = 0, 137 PREEMPT_START, 138 PREEMPT_FINISH, 139 PREEMPT_TRIGGERED, 140 PREEMPT_FAULTED, 141 PREEMPT_PENDING, 142 }; 143 144 /* 145 * struct a6xx_preempt_record is a shared buffer between the microcode and the 146 * CPU to store the state for preemption. The record itself is much larger 147 * (2112k) but most of that is used by the CP for storage. 148 * 149 * There is a preemption record assigned per ringbuffer. When the CPU triggers a 150 * preemption, it fills out the record with the useful information (wptr, ring 151 * base, etc) and the microcode uses that information to set up the CP following 152 * the preemption. When a ring is switched out, the CP will save the ringbuffer 153 * state back to the record. In this way, once the records are properly set up 154 * the CPU can quickly switch back and forth between ringbuffers by only 155 * updating a few registers (often only the wptr). 156 * 157 * These are the CPU aware registers in the record: 158 * @magic: Must always be 0xAE399D6EUL 159 * @info: Type of the record - written 0 by the CPU, updated by the CP 160 * @errno: preemption error record 161 * @data: Data field in YIELD and SET_MARKER packets, Written and used by CP 162 * @cntl: Value of RB_CNTL written by CPU, save/restored by CP 163 * @rptr: Value of RB_RPTR written by CPU, save/restored by CP 164 * @wptr: Value of RB_WPTR written by CPU, save/restored by CP 165 * @_pad: Reserved/padding 166 * @rptr_addr: Value of RB_RPTR_ADDR_LO|HI written by CPU, save/restored by CP 167 * @rbase: Value of RB_BASE written by CPU, save/restored by CP 168 * @counter: GPU address of the storage area for the preemption counters 169 * @bv_rptr_addr: Value of BV_RB_RPTR_ADDR_LO|HI written by CPU, save/restored by CP 170 */ 171 struct a6xx_preempt_record { 172 u32 magic; 173 u32 info; 174 u32 errno; 175 u32 data; 176 u32 cntl; 177 u32 rptr; 178 u32 wptr; 179 u32 _pad; 180 u64 rptr_addr; 181 u64 rbase; 182 u64 counter; 183 u64 bv_rptr_addr; 184 }; 185 186 #define A6XX_PREEMPT_RECORD_MAGIC 0xAE399D6EUL 187 188 #define PREEMPT_SMMU_INFO_SIZE 4096 189 190 #define PREEMPT_RECORD_SIZE(adreno_gpu) \ 191 ((adreno_gpu->info->preempt_record_size) == 0 ? \ 192 4192 * SZ_1K : (adreno_gpu->info->preempt_record_size)) 193 194 /* 195 * The preemption counter block is a storage area for the value of the 196 * preemption counters that are saved immediately before context switch. We 197 * append it on to the end of the allocation for the preemption record. 198 */ 199 #define A6XX_PREEMPT_COUNTER_SIZE (16 * 4) 200 201 struct a7xx_cp_smmu_info { 202 u32 magic; 203 u32 _pad4; 204 u64 ttbr0; 205 u32 asid; 206 u32 context_idr; 207 u32 context_bank; 208 }; 209 210 #define GEN7_CP_SMMU_INFO_MAGIC 0x241350d5UL 211 212 /* 213 * Given a register and a count, return a value to program into 214 * REG_CP_PROTECT_REG(n) - this will block both reads and writes for 215 * _len + 1 registers starting at _reg. 216 */ 217 #define A6XX_PROTECT_NORDWR(_reg, _len) \ 218 ((1 << 31) | \ 219 (((_len) & 0x3FFF) << 18) | ((_reg) & 0x3FFFF)) 220 221 /* 222 * Same as above, but allow reads over the range. For areas of mixed use (such 223 * as performance counters) this allows us to protect a much larger range with a 224 * single register 225 */ 226 #define A6XX_PROTECT_RDONLY(_reg, _len) \ 227 ((((_len) & 0x3FFF) << 18) | ((_reg) & 0x3FFFF)) 228 229 extern const struct adreno_gpu_funcs a6xx_gpu_funcs; 230 extern const struct adreno_gpu_funcs a6xx_gmuwrapper_funcs; 231 extern const struct adreno_gpu_funcs a7xx_gpu_funcs; 232 extern const struct adreno_gpu_funcs a8xx_gpu_funcs; 233 234 static inline bool a6xx_has_gbif(struct adreno_gpu *gpu) 235 { 236 if(adreno_is_a630(gpu)) 237 return false; 238 239 return true; 240 } 241 242 static inline void a6xx_llc_rmw(struct a6xx_gpu *a6xx_gpu, u32 reg, u32 mask, u32 or) 243 { 244 return msm_rmw(a6xx_gpu->llc_mmio + (reg << 2), mask, or); 245 } 246 247 static inline u32 a6xx_llc_read(struct a6xx_gpu *a6xx_gpu, u32 reg) 248 { 249 return readl(a6xx_gpu->llc_mmio + (reg << 2)); 250 } 251 252 static inline void a6xx_llc_write(struct a6xx_gpu *a6xx_gpu, u32 reg, u32 value) 253 { 254 writel(value, a6xx_gpu->llc_mmio + (reg << 2)); 255 } 256 257 #define shadowptr(_a6xx_gpu, _ring) ((_a6xx_gpu)->shadow_iova + \ 258 ((_ring)->id * sizeof(uint32_t))) 259 260 int a6xx_gmu_resume(struct a6xx_gpu *gpu); 261 int a6xx_gmu_stop(struct a6xx_gpu *gpu); 262 263 int a6xx_gmu_wait_for_idle(struct a6xx_gmu *gmu); 264 265 bool a6xx_gmu_isidle(struct a6xx_gmu *gmu); 266 267 int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state); 268 void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state); 269 270 int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node); 271 int a6xx_gmu_wrapper_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node); 272 void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu); 273 void a6xx_gmu_sysprof_setup(struct msm_gpu *gpu); 274 275 void a6xx_preempt_init(struct msm_gpu *gpu); 276 void a6xx_preempt_hw_init(struct msm_gpu *gpu); 277 void a6xx_preempt_trigger(struct msm_gpu *gpu); 278 void a6xx_preempt_irq(struct msm_gpu *gpu); 279 void a6xx_preempt_fini(struct msm_gpu *gpu); 280 int a6xx_preempt_submitqueue_setup(struct msm_gpu *gpu, 281 struct msm_gpu_submitqueue *queue); 282 void a6xx_preempt_submitqueue_close(struct msm_gpu *gpu, 283 struct msm_gpu_submitqueue *queue); 284 285 /* Return true if we are in a preempt state */ 286 static inline bool a6xx_in_preempt(struct a6xx_gpu *a6xx_gpu) 287 { 288 /* 289 * Make sure the read to preempt_state is ordered with respect to reads 290 * of other variables before ... 291 */ 292 smp_rmb(); 293 294 int preempt_state = atomic_read(&a6xx_gpu->preempt_state); 295 296 /* ... and after. */ 297 smp_rmb(); 298 299 return !(preempt_state == PREEMPT_NONE || 300 preempt_state == PREEMPT_FINISH); 301 } 302 303 void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp, 304 bool suspended); 305 unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu); 306 307 void a6xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state, 308 struct drm_printer *p); 309 310 struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu); 311 int a6xx_gpu_state_put(struct msm_gpu_state *state); 312 313 void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu, bool gx_off); 314 void a6xx_gpu_sw_reset(struct msm_gpu *gpu, bool assert); 315 int a6xx_fenced_write(struct a6xx_gpu *gpu, u32 offset, u64 value, u32 mask, bool is_64b); 316 void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring); 317 int a6xx_zap_shader_init(struct msm_gpu *gpu); 318 319 void a8xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu, bool gx_off); 320 int a8xx_fault_handler(void *arg, unsigned long iova, int flags, void *data); 321 void a8xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring); 322 int a8xx_gmu_get_timestamp(struct msm_gpu *gpu, uint64_t *value); 323 u64 a8xx_gpu_busy(struct msm_gpu *gpu, unsigned long *out_sample_rate); 324 int a8xx_gpu_feature_probe(struct msm_gpu *gpu); 325 void a8xx_gpu_get_slice_info(struct msm_gpu *gpu); 326 int a8xx_hw_init(struct msm_gpu *gpu); 327 irqreturn_t a8xx_irq(struct msm_gpu *gpu); 328 void a8xx_llc_activate(struct a6xx_gpu *a6xx_gpu); 329 bool a8xx_progress(struct msm_gpu *gpu, struct msm_ringbuffer *ring); 330 void a8xx_recover(struct msm_gpu *gpu); 331 #endif /* __A6XX_GPU_H__ */ 332