1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */ 3 /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */ 4 5 #ifndef __PANFROST_DEVICE_H__ 6 #define __PANFROST_DEVICE_H__ 7 8 #include <linux/atomic.h> 9 #include <linux/io-pgtable.h> 10 #include <linux/pm.h> 11 #include <linux/regulator/consumer.h> 12 #include <linux/spinlock.h> 13 #include <drm/drm_device.h> 14 #include <drm/drm_mm.h> 15 #include <drm/gpu_scheduler.h> 16 17 #include "panfrost_devfreq.h" 18 19 struct panfrost_device; 20 struct panfrost_mmu; 21 struct panfrost_job_slot; 22 struct panfrost_job; 23 struct panfrost_perfcnt; 24 25 #define NUM_JOB_SLOTS 3 26 #define MAX_PM_DOMAINS 5 27 28 enum panfrost_drv_comp_bits { 29 PANFROST_COMP_BIT_GPU, 30 PANFROST_COMP_BIT_JOB, 31 PANFROST_COMP_BIT_MMU, 32 PANFROST_COMP_BIT_MAX 33 }; 34 35 /** 36 * enum panfrost_gpu_pm - Supported kernel power management features 37 * @GPU_PM_CLK_DIS: Allow disabling clocks during system suspend 38 * @GPU_PM_VREG_OFF: Allow turning off regulators during system suspend 39 * @GPU_PM_RT: Allow disabling clocks and asserting the reset control during 40 * system runtime suspend 41 */ 42 enum panfrost_gpu_pm { 43 GPU_PM_CLK_DIS, 44 GPU_PM_VREG_OFF, 45 GPU_PM_RT 46 }; 47 48 /** 49 * enum panfrost_gpu_quirks - GPU optional quirks 50 * @GPU_QUIRK_FORCE_AARCH64_PGTABLE: Use AARCH64_4K page table format 51 */ 52 enum panfrost_gpu_quirks { 53 GPU_QUIRK_FORCE_AARCH64_PGTABLE, 54 }; 55 56 struct panfrost_features { 57 u16 id; 58 u16 revision; 59 60 u64 shader_present; 61 u64 tiler_present; 62 u64 l2_present; 63 u64 stack_present; 64 u32 as_present; 65 u32 js_present; 66 67 u32 l2_features; 68 u32 core_features; 69 u32 tiler_features; 70 u32 mem_features; 71 u32 mmu_features; 72 u32 thread_features; 73 u32 max_threads; 74 u32 thread_max_workgroup_sz; 75 u32 thread_max_barrier_sz; 76 u32 coherency_features; 77 u32 afbc_features; 78 u32 texture_features[4]; 79 u32 js_features[16]; 80 81 u32 nr_core_groups; 82 u32 thread_tls_alloc; 83 84 unsigned long hw_features[64 / BITS_PER_LONG]; 85 unsigned long hw_issues[64 / BITS_PER_LONG]; 86 }; 87 88 /* 89 * Features that cannot be automatically detected and need matching using the 90 * compatible string, typically SoC-specific. 91 */ 92 struct panfrost_compatible { 93 /* Supplies count and names. */ 94 int num_supplies; 95 const char * const *supply_names; 96 /* 97 * Number of power domains required, note that values 0 and 1 are 98 * handled identically, as only values > 1 need special handling. 99 */ 100 int num_pm_domains; 101 /* Only required if num_pm_domains > 1. */ 102 const char * const *pm_domain_names; 103 104 /* Vendor implementation quirks callback */ 105 void (*vendor_quirk)(struct panfrost_device *pfdev); 106 107 /* Allowed PM features */ 108 u8 pm_features; 109 110 /* GPU configuration quirks */ 111 u8 gpu_quirks; 112 }; 113 114 /** 115 * struct panfrost_device_debugfs - Device-wide DebugFS tracking structures 116 */ 117 struct panfrost_device_debugfs { 118 /** @gems_list: Device-wide list of GEM objects owned by at least one file. */ 119 struct list_head gems_list; 120 121 /** @gems_lock: Serializes access to the device-wide list of GEM objects. */ 122 struct mutex gems_lock; 123 }; 124 125 struct panfrost_device { 126 struct device *dev; 127 struct drm_device *ddev; 128 struct platform_device *pdev; 129 int gpu_irq; 130 int mmu_irq; 131 132 void __iomem *iomem; 133 struct clk *clock; 134 struct clk *bus_clock; 135 struct regulator_bulk_data *regulators; 136 struct reset_control *rstc; 137 /* pm_domains for devices with more than one. */ 138 struct device *pm_domain_devs[MAX_PM_DOMAINS]; 139 struct device_link *pm_domain_links[MAX_PM_DOMAINS]; 140 bool coherent; 141 142 struct panfrost_features features; 143 const struct panfrost_compatible *comp; 144 DECLARE_BITMAP(is_suspended, PANFROST_COMP_BIT_MAX); 145 146 spinlock_t as_lock; 147 unsigned long as_in_use_mask; 148 unsigned long as_alloc_mask; 149 unsigned long as_faulty_mask; 150 struct list_head as_lru_list; 151 152 struct panfrost_job_slot *js; 153 154 struct panfrost_job *jobs[NUM_JOB_SLOTS][2]; 155 struct list_head scheduled_jobs; 156 157 struct panfrost_perfcnt *perfcnt; 158 bool profile_mode; 159 160 struct mutex sched_lock; 161 162 struct { 163 struct workqueue_struct *wq; 164 struct work_struct work; 165 atomic_t pending; 166 } reset; 167 168 struct mutex shrinker_lock; 169 struct list_head shrinker_list; 170 struct shrinker *shrinker; 171 172 struct panfrost_devfreq pfdevfreq; 173 174 struct { 175 atomic_t use_count; 176 spinlock_t lock; 177 } cycle_counter; 178 179 #ifdef CONFIG_DEBUG_FS 180 struct panfrost_device_debugfs debugfs; 181 #endif 182 }; 183 184 struct panfrost_mmu { 185 struct panfrost_device *pfdev; 186 struct kref refcount; 187 struct io_pgtable_cfg pgtbl_cfg; 188 struct io_pgtable_ops *pgtbl_ops; 189 struct drm_mm mm; 190 spinlock_t mm_lock; 191 int as; 192 atomic_t as_count; 193 struct list_head list; 194 struct { 195 u64 transtab; 196 u64 memattr; 197 u64 transcfg; 198 } cfg; 199 }; 200 201 struct panfrost_engine_usage { 202 unsigned long long elapsed_ns[NUM_JOB_SLOTS]; 203 unsigned long long cycles[NUM_JOB_SLOTS]; 204 }; 205 206 struct panfrost_file_priv { 207 struct panfrost_device *pfdev; 208 209 struct drm_sched_entity sched_entity[NUM_JOB_SLOTS]; 210 211 struct panfrost_mmu *mmu; 212 213 struct panfrost_engine_usage engine_usage; 214 }; 215 216 static inline struct panfrost_device *to_panfrost_device(struct drm_device *ddev) 217 { 218 return ddev->dev_private; 219 } 220 221 static inline int panfrost_model_cmp(struct panfrost_device *pfdev, s32 id) 222 { 223 s32 match_id = pfdev->features.id; 224 225 if (match_id & 0xf000) 226 match_id &= 0xf00f; 227 return match_id - id; 228 } 229 230 static inline bool panfrost_model_is_bifrost(struct panfrost_device *pfdev) 231 { 232 return panfrost_model_cmp(pfdev, 0x1000) >= 0; 233 } 234 235 static inline bool panfrost_model_eq(struct panfrost_device *pfdev, s32 id) 236 { 237 return !panfrost_model_cmp(pfdev, id); 238 } 239 240 int panfrost_unstable_ioctl_check(void); 241 242 int panfrost_device_init(struct panfrost_device *pfdev); 243 void panfrost_device_fini(struct panfrost_device *pfdev); 244 void panfrost_device_reset(struct panfrost_device *pfdev); 245 246 extern const struct dev_pm_ops panfrost_pm_ops; 247 248 enum drm_panfrost_exception_type { 249 DRM_PANFROST_EXCEPTION_OK = 0x00, 250 DRM_PANFROST_EXCEPTION_DONE = 0x01, 251 DRM_PANFROST_EXCEPTION_INTERRUPTED = 0x02, 252 DRM_PANFROST_EXCEPTION_STOPPED = 0x03, 253 DRM_PANFROST_EXCEPTION_TERMINATED = 0x04, 254 DRM_PANFROST_EXCEPTION_KABOOM = 0x05, 255 DRM_PANFROST_EXCEPTION_EUREKA = 0x06, 256 DRM_PANFROST_EXCEPTION_ACTIVE = 0x08, 257 DRM_PANFROST_EXCEPTION_MAX_NON_FAULT = 0x3f, 258 DRM_PANFROST_EXCEPTION_JOB_CONFIG_FAULT = 0x40, 259 DRM_PANFROST_EXCEPTION_JOB_POWER_FAULT = 0x41, 260 DRM_PANFROST_EXCEPTION_JOB_READ_FAULT = 0x42, 261 DRM_PANFROST_EXCEPTION_JOB_WRITE_FAULT = 0x43, 262 DRM_PANFROST_EXCEPTION_JOB_AFFINITY_FAULT = 0x44, 263 DRM_PANFROST_EXCEPTION_JOB_BUS_FAULT = 0x48, 264 DRM_PANFROST_EXCEPTION_INSTR_INVALID_PC = 0x50, 265 DRM_PANFROST_EXCEPTION_INSTR_INVALID_ENC = 0x51, 266 DRM_PANFROST_EXCEPTION_INSTR_TYPE_MISMATCH = 0x52, 267 DRM_PANFROST_EXCEPTION_INSTR_OPERAND_FAULT = 0x53, 268 DRM_PANFROST_EXCEPTION_INSTR_TLS_FAULT = 0x54, 269 DRM_PANFROST_EXCEPTION_INSTR_BARRIER_FAULT = 0x55, 270 DRM_PANFROST_EXCEPTION_INSTR_ALIGN_FAULT = 0x56, 271 DRM_PANFROST_EXCEPTION_DATA_INVALID_FAULT = 0x58, 272 DRM_PANFROST_EXCEPTION_TILE_RANGE_FAULT = 0x59, 273 DRM_PANFROST_EXCEPTION_ADDR_RANGE_FAULT = 0x5a, 274 DRM_PANFROST_EXCEPTION_IMPRECISE_FAULT = 0x5b, 275 DRM_PANFROST_EXCEPTION_OOM = 0x60, 276 DRM_PANFROST_EXCEPTION_OOM_AFBC = 0x61, 277 DRM_PANFROST_EXCEPTION_UNKNOWN = 0x7f, 278 DRM_PANFROST_EXCEPTION_DELAYED_BUS_FAULT = 0x80, 279 DRM_PANFROST_EXCEPTION_GPU_SHAREABILITY_FAULT = 0x88, 280 DRM_PANFROST_EXCEPTION_SYS_SHAREABILITY_FAULT = 0x89, 281 DRM_PANFROST_EXCEPTION_GPU_CACHEABILITY_FAULT = 0x8a, 282 DRM_PANFROST_EXCEPTION_TRANSLATION_FAULT_0 = 0xc0, 283 DRM_PANFROST_EXCEPTION_TRANSLATION_FAULT_1 = 0xc1, 284 DRM_PANFROST_EXCEPTION_TRANSLATION_FAULT_2 = 0xc2, 285 DRM_PANFROST_EXCEPTION_TRANSLATION_FAULT_3 = 0xc3, 286 DRM_PANFROST_EXCEPTION_TRANSLATION_FAULT_4 = 0xc4, 287 DRM_PANFROST_EXCEPTION_TRANSLATION_FAULT_IDENTITY = 0xc7, 288 DRM_PANFROST_EXCEPTION_PERM_FAULT_0 = 0xc8, 289 DRM_PANFROST_EXCEPTION_PERM_FAULT_1 = 0xc9, 290 DRM_PANFROST_EXCEPTION_PERM_FAULT_2 = 0xca, 291 DRM_PANFROST_EXCEPTION_PERM_FAULT_3 = 0xcb, 292 DRM_PANFROST_EXCEPTION_TRANSTAB_BUS_FAULT_0 = 0xd0, 293 DRM_PANFROST_EXCEPTION_TRANSTAB_BUS_FAULT_1 = 0xd1, 294 DRM_PANFROST_EXCEPTION_TRANSTAB_BUS_FAULT_2 = 0xd2, 295 DRM_PANFROST_EXCEPTION_TRANSTAB_BUS_FAULT_3 = 0xd3, 296 DRM_PANFROST_EXCEPTION_ACCESS_FLAG_0 = 0xd8, 297 DRM_PANFROST_EXCEPTION_ACCESS_FLAG_1 = 0xd9, 298 DRM_PANFROST_EXCEPTION_ACCESS_FLAG_2 = 0xda, 299 DRM_PANFROST_EXCEPTION_ACCESS_FLAG_3 = 0xdb, 300 DRM_PANFROST_EXCEPTION_ADDR_SIZE_FAULT_IN0 = 0xe0, 301 DRM_PANFROST_EXCEPTION_ADDR_SIZE_FAULT_IN1 = 0xe1, 302 DRM_PANFROST_EXCEPTION_ADDR_SIZE_FAULT_IN2 = 0xe2, 303 DRM_PANFROST_EXCEPTION_ADDR_SIZE_FAULT_IN3 = 0xe3, 304 DRM_PANFROST_EXCEPTION_ADDR_SIZE_FAULT_OUT0 = 0xe4, 305 DRM_PANFROST_EXCEPTION_ADDR_SIZE_FAULT_OUT1 = 0xe5, 306 DRM_PANFROST_EXCEPTION_ADDR_SIZE_FAULT_OUT2 = 0xe6, 307 DRM_PANFROST_EXCEPTION_ADDR_SIZE_FAULT_OUT3 = 0xe7, 308 DRM_PANFROST_EXCEPTION_MEM_ATTR_FAULT_0 = 0xe8, 309 DRM_PANFROST_EXCEPTION_MEM_ATTR_FAULT_1 = 0xe9, 310 DRM_PANFROST_EXCEPTION_MEM_ATTR_FAULT_2 = 0xea, 311 DRM_PANFROST_EXCEPTION_MEM_ATTR_FAULT_3 = 0xeb, 312 DRM_PANFROST_EXCEPTION_MEM_ATTR_NONCACHE_0 = 0xec, 313 DRM_PANFROST_EXCEPTION_MEM_ATTR_NONCACHE_1 = 0xed, 314 DRM_PANFROST_EXCEPTION_MEM_ATTR_NONCACHE_2 = 0xee, 315 DRM_PANFROST_EXCEPTION_MEM_ATTR_NONCACHE_3 = 0xef, 316 }; 317 318 static inline bool 319 panfrost_exception_is_fault(u32 exception_code) 320 { 321 return exception_code > DRM_PANFROST_EXCEPTION_MAX_NON_FAULT; 322 } 323 324 const char *panfrost_exception_name(u32 exception_code); 325 bool panfrost_exception_needs_reset(const struct panfrost_device *pfdev, 326 u32 exception_code); 327 328 static inline void 329 panfrost_device_schedule_reset(struct panfrost_device *pfdev) 330 { 331 atomic_set(&pfdev->reset.pending, 1); 332 queue_work(pfdev->reset.wq, &pfdev->reset.work); 333 } 334 335 #endif 336