1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright (C) 2015-2018 Etnaviv Project 4 */ 5 6 #ifndef __ETNAVIV_GPU_H__ 7 #define __ETNAVIV_GPU_H__ 8 9 #include "etnaviv_cmdbuf.h" 10 #include "etnaviv_gem.h" 11 #include "etnaviv_mmu.h" 12 #include "etnaviv_drv.h" 13 #include "common.xml.h" 14 #include "state.xml.h" 15 16 struct etnaviv_gem_submit; 17 struct etnaviv_vram_mapping; 18 19 struct etnaviv_chip_identity { 20 u32 model; 21 u32 revision; 22 u32 product_id; 23 u32 customer_id; 24 u32 eco_id; 25 26 /* Supported feature fields. */ 27 u32 features; 28 29 /* Supported minor feature fields. */ 30 u32 minor_features0; 31 u32 minor_features1; 32 u32 minor_features2; 33 u32 minor_features3; 34 u32 minor_features4; 35 u32 minor_features5; 36 u32 minor_features6; 37 u32 minor_features7; 38 u32 minor_features8; 39 u32 minor_features9; 40 u32 minor_features10; 41 u32 minor_features11; 42 43 /* Number of streams supported. */ 44 u32 stream_count; 45 46 /* Total number of temporary registers per thread. */ 47 u32 register_max; 48 49 /* Maximum number of threads. */ 50 u32 thread_count; 51 52 /* Number of shader cores. */ 53 u32 shader_core_count; 54 55 /* Number of Neural Network cores. */ 56 u32 nn_core_count; 57 58 /* Size of the vertex cache. */ 59 u32 vertex_cache_size; 60 61 /* Number of entries in the vertex output buffer. */ 62 u32 vertex_output_buffer_size; 63 64 /* Number of pixel pipes. */ 65 u32 pixel_pipes; 66 67 /* Number of instructions. */ 68 u32 instruction_count; 69 70 /* Number of constants. */ 71 u32 num_constants; 72 73 /* Buffer size */ 74 u32 buffer_size; 75 76 /* Number of varyings */ 77 u8 varyings_count; 78 }; 79 80 enum etnaviv_sec_mode { 81 ETNA_SEC_NONE = 0, 82 ETNA_SEC_KERNEL, 83 ETNA_SEC_TZ 84 }; 85 86 struct etnaviv_event { 87 struct dma_fence *fence; 88 struct etnaviv_gem_submit *submit; 89 90 void (*sync_point)(struct etnaviv_gpu *gpu, struct etnaviv_event *event); 91 }; 92 93 struct etnaviv_cmdbuf_suballoc; 94 struct regulator; 95 struct clk; 96 struct reset_control; 97 98 #define ETNA_NR_EVENTS 30 99 100 enum etnaviv_gpu_state { 101 ETNA_GPU_STATE_UNKNOWN = 0, 102 ETNA_GPU_STATE_IDENTIFIED, 103 ETNA_GPU_STATE_RESET, 104 ETNA_GPU_STATE_INITIALIZED, 105 ETNA_GPU_STATE_RUNNING, 106 ETNA_GPU_STATE_FAULT, 107 }; 108 109 struct etnaviv_gpu { 110 struct drm_device *drm; 111 struct thermal_cooling_device *cooling; 112 struct device *dev; 113 struct mutex lock; 114 struct etnaviv_chip_identity identity; 115 enum etnaviv_sec_mode sec_mode; 116 struct workqueue_struct *wq; 117 struct mutex sched_lock; 118 struct drm_gpu_scheduler sched; 119 enum etnaviv_gpu_state state; 120 121 /* 'ring'-buffer: */ 122 struct etnaviv_cmdbuf buffer; 123 int exec_state; 124 125 /* event management: */ 126 DECLARE_BITMAP(event_bitmap, ETNA_NR_EVENTS); 127 struct etnaviv_event event[ETNA_NR_EVENTS]; 128 struct completion event_free; 129 spinlock_t event_spinlock; 130 131 u32 idle_mask; 132 133 /* Fencing support */ 134 struct xarray user_fences; 135 u32 next_user_fence; 136 u32 next_fence; 137 u32 completed_fence; 138 wait_queue_head_t fence_event; 139 u64 fence_context; 140 spinlock_t fence_spinlock; 141 142 /* worker for handling 'sync' points: */ 143 struct work_struct sync_point_work; 144 int sync_point_event; 145 146 /* hang detection */ 147 u32 hangcheck_dma_addr; 148 u32 hangcheck_primid; 149 u32 hangcheck_fence; 150 151 void __iomem *mmio; 152 int irq; 153 154 struct etnaviv_iommu_context *mmu_context; 155 unsigned int flush_seq; 156 157 /* Power Control: */ 158 struct clk *clk_bus; 159 struct clk *clk_reg; 160 struct clk *clk_core; 161 struct clk *clk_shader; 162 struct reset_control *rst; 163 164 unsigned int freq_scale; 165 unsigned int fe_waitcycles; 166 unsigned long base_rate_core; 167 unsigned long base_rate_shader; 168 }; 169 170 static inline void gpu_write(struct etnaviv_gpu *gpu, u32 reg, u32 data) 171 { 172 writel(data, gpu->mmio + reg); 173 } 174 175 static inline u32 gpu_read(struct etnaviv_gpu *gpu, u32 reg) 176 { 177 /* On some variants, such as the GC7000r6009, some FE registers 178 * need two reads to be consistent. Do that extra read here and 179 * throw away the result. 180 */ 181 if (reg >= VIVS_FE_DMA_STATUS && reg <= VIVS_FE_AUTO_FLUSH) 182 readl(gpu->mmio + reg); 183 184 return readl(gpu->mmio + reg); 185 } 186 187 static inline u32 gpu_fix_power_address(struct etnaviv_gpu *gpu, u32 reg) 188 { 189 /* Power registers in GC300 < 2.0 are offset by 0x100 */ 190 if (gpu->identity.model == chipModel_GC300 && 191 gpu->identity.revision < 0x2000) 192 reg += 0x100; 193 194 return reg; 195 } 196 197 static inline void gpu_write_power(struct etnaviv_gpu *gpu, u32 reg, u32 data) 198 { 199 writel(data, gpu->mmio + gpu_fix_power_address(gpu, reg)); 200 } 201 202 static inline u32 gpu_read_power(struct etnaviv_gpu *gpu, u32 reg) 203 { 204 return readl(gpu->mmio + gpu_fix_power_address(gpu, reg)); 205 } 206 207 int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value); 208 209 int etnaviv_gpu_init(struct etnaviv_gpu *gpu); 210 bool etnaviv_fill_identity_from_hwdb(struct etnaviv_gpu *gpu); 211 212 #ifdef CONFIG_DEBUG_FS 213 int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m); 214 #endif 215 216 void etnaviv_gpu_recover_hang(struct etnaviv_gem_submit *submit); 217 void etnaviv_gpu_retire(struct etnaviv_gpu *gpu); 218 int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu, 219 u32 fence, struct drm_etnaviv_timespec *timeout); 220 int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu, 221 struct etnaviv_gem_object *etnaviv_obj, 222 struct drm_etnaviv_timespec *timeout); 223 struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit); 224 int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu); 225 void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu); 226 int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms); 227 void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch); 228 229 extern struct platform_driver etnaviv_gpu_driver; 230 231 #endif /* __ETNAVIV_GPU_H__ */ 232