1 /* SPDX-License-Identifier: GPL-2.0 or MIT */ 2 /* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */ 3 /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */ 4 /* Copyright 2023 Collabora ltd. */ 5 6 #ifndef __PANTHOR_DEVICE_H__ 7 #define __PANTHOR_DEVICE_H__ 8 9 #include <linux/atomic.h> 10 #include <linux/io-pgtable.h> 11 #include <linux/regulator/consumer.h> 12 #include <linux/pm_runtime.h> 13 #include <linux/sched.h> 14 #include <linux/spinlock.h> 15 16 #include <drm/drm_device.h> 17 #include <drm/drm_mm.h> 18 #include <drm/gpu_scheduler.h> 19 #include <drm/panthor_drm.h> 20 21 struct panthor_csf; 22 struct panthor_csf_ctx; 23 struct panthor_device; 24 struct panthor_gpu; 25 struct panthor_group_pool; 26 struct panthor_heap_pool; 27 struct panthor_job; 28 struct panthor_mmu; 29 struct panthor_fw; 30 struct panthor_perfcnt; 31 struct panthor_vm; 32 struct panthor_vm_pool; 33 34 /** 35 * enum panthor_device_pm_state - PM state 36 */ 37 enum panthor_device_pm_state { 38 /** @PANTHOR_DEVICE_PM_STATE_SUSPENDED: Device is suspended. */ 39 PANTHOR_DEVICE_PM_STATE_SUSPENDED = 0, 40 41 /** @PANTHOR_DEVICE_PM_STATE_RESUMING: Device is being resumed. */ 42 PANTHOR_DEVICE_PM_STATE_RESUMING, 43 44 /** @PANTHOR_DEVICE_PM_STATE_ACTIVE: Device is active. */ 45 PANTHOR_DEVICE_PM_STATE_ACTIVE, 46 47 /** @PANTHOR_DEVICE_PM_STATE_SUSPENDING: Device is being suspended. */ 48 PANTHOR_DEVICE_PM_STATE_SUSPENDING, 49 }; 50 51 /** 52 * struct panthor_irq - IRQ data 53 * 54 * Used to automate IRQ handling for the 3 different IRQs we have in this driver. 55 */ 56 struct panthor_irq { 57 /** @ptdev: Panthor device */ 58 struct panthor_device *ptdev; 59 60 /** @irq: IRQ number. */ 61 int irq; 62 63 /** @mask: Current mask being applied to xxx_INT_MASK. */ 64 u32 mask; 65 66 /** @suspended: Set to true when the IRQ is suspended. */ 67 atomic_t suspended; 68 }; 69 70 /** 71 * enum panthor_device_profiling_mode - Profiling state 72 */ 73 enum panthor_device_profiling_flags { 74 /** @PANTHOR_DEVICE_PROFILING_DISABLED: Profiling is disabled. */ 75 PANTHOR_DEVICE_PROFILING_DISABLED = 0, 76 77 /** @PANTHOR_DEVICE_PROFILING_CYCLES: Sampling job cycles. */ 78 PANTHOR_DEVICE_PROFILING_CYCLES = BIT(0), 79 80 /** @PANTHOR_DEVICE_PROFILING_TIMESTAMP: Sampling job timestamp. */ 81 PANTHOR_DEVICE_PROFILING_TIMESTAMP = BIT(1), 82 83 /** @PANTHOR_DEVICE_PROFILING_ALL: Sampling everything. */ 84 PANTHOR_DEVICE_PROFILING_ALL = 85 PANTHOR_DEVICE_PROFILING_CYCLES | 86 PANTHOR_DEVICE_PROFILING_TIMESTAMP, 87 }; 88 89 /** 90 * struct panthor_device - Panthor device 91 */ 92 struct panthor_device { 93 /** @base: Base drm_device. */ 94 struct drm_device base; 95 96 /** @phys_addr: Physical address of the iomem region. */ 97 phys_addr_t phys_addr; 98 99 /** @iomem: CPU mapping of the IOMEM region. */ 100 void __iomem *iomem; 101 102 /** @clks: GPU clocks. */ 103 struct { 104 /** @core: Core clock. */ 105 struct clk *core; 106 107 /** @stacks: Stacks clock. This clock is optional. */ 108 struct clk *stacks; 109 110 /** @coregroup: Core group clock. This clock is optional. */ 111 struct clk *coregroup; 112 } clks; 113 114 /** @coherent: True if the CPU/GPU are memory coherent. */ 115 bool coherent; 116 117 /** @gpu_info: GPU information. */ 118 struct drm_panthor_gpu_info gpu_info; 119 120 /** @csif_info: Command stream interface information. */ 121 struct drm_panthor_csif_info csif_info; 122 123 /** @gpu: GPU management data. */ 124 struct panthor_gpu *gpu; 125 126 /** @fw: FW management data. */ 127 struct panthor_fw *fw; 128 129 /** @mmu: MMU management data. */ 130 struct panthor_mmu *mmu; 131 132 /** @scheduler: Scheduler management data. */ 133 struct panthor_scheduler *scheduler; 134 135 /** @devfreq: Device frequency scaling management data. */ 136 struct panthor_devfreq *devfreq; 137 138 /** @unplug: Device unplug related fields. */ 139 struct { 140 /** @lock: Lock used to serialize unplug operations. */ 141 struct mutex lock; 142 143 /** 144 * @done: Completion object signaled when the unplug 145 * operation is done. 146 */ 147 struct completion done; 148 } unplug; 149 150 /** @reset: Reset related fields. */ 151 struct { 152 /** @wq: Ordered worqueud used to schedule reset operations. */ 153 struct workqueue_struct *wq; 154 155 /** @work: Reset work. */ 156 struct work_struct work; 157 158 /** @pending: Set to true if a reset is pending. */ 159 atomic_t pending; 160 161 /** 162 * @fast: True if the post_reset logic can proceed with a fast reset. 163 * 164 * A fast reset is just a reset where the driver doesn't reload the FW sections. 165 * 166 * Any time the firmware is properly suspended, a fast reset can take place. 167 * On the other hand, if the halt operation failed, the driver will reload 168 * all FW sections to make sure we start from a fresh state. 169 */ 170 bool fast; 171 } reset; 172 173 /** @pm: Power management related data. */ 174 struct { 175 /** @state: Power state. */ 176 atomic_t state; 177 178 /** 179 * @mmio_lock: Lock protecting MMIO userspace CPU mappings. 180 * 181 * This is needed to ensure we map the dummy IO pages when 182 * the device is being suspended, and the real IO pages when 183 * the device is being resumed. We can't just do with the 184 * state atomicity to deal with this race. 185 */ 186 struct mutex mmio_lock; 187 188 /** 189 * @dummy_latest_flush: Dummy LATEST_FLUSH page. 190 * 191 * Used to replace the real LATEST_FLUSH page when the GPU 192 * is suspended. 193 */ 194 struct page *dummy_latest_flush; 195 196 /** @recovery_needed: True when a resume attempt failed. */ 197 atomic_t recovery_needed; 198 } pm; 199 200 /** @profile_mask: User-set profiling flags for job accounting. */ 201 u32 profile_mask; 202 203 /** @current_frequency: Device clock frequency at present. Set by DVFS*/ 204 unsigned long current_frequency; 205 206 /** @fast_rate: Maximum device clock frequency. Set by DVFS */ 207 unsigned long fast_rate; 208 209 #ifdef CONFIG_DEBUG_FS 210 /** @gems: Device-wide list of GEM objects owned by at least one file. */ 211 struct { 212 /** @gems.lock: Protects the device-wide list of GEM objects. */ 213 struct mutex lock; 214 215 /** @node: Used to keep track of all the device's DRM objects */ 216 struct list_head node; 217 } gems; 218 #endif 219 }; 220 221 struct panthor_gpu_usage { 222 u64 time; 223 u64 cycles; 224 }; 225 226 /** 227 * struct panthor_file - Panthor file 228 */ 229 struct panthor_file { 230 /** @ptdev: Device attached to this file. */ 231 struct panthor_device *ptdev; 232 233 /** @user_mmio: User MMIO related fields. */ 234 struct { 235 /** 236 * @offset: Offset used for user MMIO mappings. 237 * 238 * This offset should not be used to check the type of mapping 239 * except in panthor_mmap(). After that point, MMIO mapping 240 * offsets have been adjusted to match 241 * DRM_PANTHOR_USER_MMIO_OFFSET and that macro should be used 242 * instead. 243 * Make sure this rule is followed at all times, because 244 * userspace is in control of the offset, and can change the 245 * value behind our back. Otherwise it can lead to erroneous 246 * branching happening in kernel space. 247 */ 248 u64 offset; 249 } user_mmio; 250 251 /** @vms: VM pool attached to this file. */ 252 struct panthor_vm_pool *vms; 253 254 /** @groups: Scheduling group pool attached to this file. */ 255 struct panthor_group_pool *groups; 256 257 /** @stats: cycle and timestamp measures for job execution. */ 258 struct panthor_gpu_usage stats; 259 }; 260 261 int panthor_device_init(struct panthor_device *ptdev); 262 void panthor_device_unplug(struct panthor_device *ptdev); 263 264 /** 265 * panthor_device_schedule_reset() - Schedules a reset operation 266 */ 267 static inline void panthor_device_schedule_reset(struct panthor_device *ptdev) 268 { 269 if (!atomic_cmpxchg(&ptdev->reset.pending, 0, 1) && 270 atomic_read(&ptdev->pm.state) == PANTHOR_DEVICE_PM_STATE_ACTIVE) 271 queue_work(ptdev->reset.wq, &ptdev->reset.work); 272 } 273 274 /** 275 * panthor_device_reset_is_pending() - Checks if a reset is pending. 276 * 277 * Return: true if a reset is pending, false otherwise. 278 */ 279 static inline bool panthor_device_reset_is_pending(struct panthor_device *ptdev) 280 { 281 return atomic_read(&ptdev->reset.pending) != 0; 282 } 283 284 int panthor_device_mmap_io(struct panthor_device *ptdev, 285 struct vm_area_struct *vma); 286 287 int panthor_device_resume(struct device *dev); 288 int panthor_device_suspend(struct device *dev); 289 290 static inline int panthor_device_resume_and_get(struct panthor_device *ptdev) 291 { 292 int ret = pm_runtime_resume_and_get(ptdev->base.dev); 293 294 /* If the resume failed, we need to clear the runtime_error, which 295 * can done by forcing the RPM state to suspended. If multiple 296 * threads called panthor_device_resume_and_get(), we only want 297 * one of them to update the state, hence the cmpxchg. Note that a 298 * thread might enter panthor_device_resume_and_get() and call 299 * pm_runtime_resume_and_get() after another thread had attempted 300 * to resume and failed. This means we will end up with an error 301 * without even attempting a resume ourselves. The only risk here 302 * is to report an error when the second resume attempt might have 303 * succeeded. Given resume errors are not expected, this is probably 304 * something we can live with. 305 */ 306 if (ret && atomic_cmpxchg(&ptdev->pm.recovery_needed, 1, 0) == 1) 307 pm_runtime_set_suspended(ptdev->base.dev); 308 309 return ret; 310 } 311 312 enum drm_panthor_exception_type { 313 DRM_PANTHOR_EXCEPTION_OK = 0x00, 314 DRM_PANTHOR_EXCEPTION_TERMINATED = 0x04, 315 DRM_PANTHOR_EXCEPTION_KABOOM = 0x05, 316 DRM_PANTHOR_EXCEPTION_EUREKA = 0x06, 317 DRM_PANTHOR_EXCEPTION_ACTIVE = 0x08, 318 DRM_PANTHOR_EXCEPTION_CS_RES_TERM = 0x0f, 319 DRM_PANTHOR_EXCEPTION_MAX_NON_FAULT = 0x3f, 320 DRM_PANTHOR_EXCEPTION_CS_CONFIG_FAULT = 0x40, 321 DRM_PANTHOR_EXCEPTION_CS_UNRECOVERABLE = 0x41, 322 DRM_PANTHOR_EXCEPTION_CS_ENDPOINT_FAULT = 0x44, 323 DRM_PANTHOR_EXCEPTION_CS_BUS_FAULT = 0x48, 324 DRM_PANTHOR_EXCEPTION_CS_INSTR_INVALID = 0x49, 325 DRM_PANTHOR_EXCEPTION_CS_CALL_STACK_OVERFLOW = 0x4a, 326 DRM_PANTHOR_EXCEPTION_CS_INHERIT_FAULT = 0x4b, 327 DRM_PANTHOR_EXCEPTION_INSTR_INVALID_PC = 0x50, 328 DRM_PANTHOR_EXCEPTION_INSTR_INVALID_ENC = 0x51, 329 DRM_PANTHOR_EXCEPTION_INSTR_BARRIER_FAULT = 0x55, 330 DRM_PANTHOR_EXCEPTION_DATA_INVALID_FAULT = 0x58, 331 DRM_PANTHOR_EXCEPTION_TILE_RANGE_FAULT = 0x59, 332 DRM_PANTHOR_EXCEPTION_ADDR_RANGE_FAULT = 0x5a, 333 DRM_PANTHOR_EXCEPTION_IMPRECISE_FAULT = 0x5b, 334 DRM_PANTHOR_EXCEPTION_OOM = 0x60, 335 DRM_PANTHOR_EXCEPTION_CSF_FW_INTERNAL_ERROR = 0x68, 336 DRM_PANTHOR_EXCEPTION_CSF_RES_EVICTION_TIMEOUT = 0x69, 337 DRM_PANTHOR_EXCEPTION_GPU_BUS_FAULT = 0x80, 338 DRM_PANTHOR_EXCEPTION_GPU_SHAREABILITY_FAULT = 0x88, 339 DRM_PANTHOR_EXCEPTION_SYS_SHAREABILITY_FAULT = 0x89, 340 DRM_PANTHOR_EXCEPTION_GPU_CACHEABILITY_FAULT = 0x8a, 341 DRM_PANTHOR_EXCEPTION_TRANSLATION_FAULT_0 = 0xc0, 342 DRM_PANTHOR_EXCEPTION_TRANSLATION_FAULT_1 = 0xc1, 343 DRM_PANTHOR_EXCEPTION_TRANSLATION_FAULT_2 = 0xc2, 344 DRM_PANTHOR_EXCEPTION_TRANSLATION_FAULT_3 = 0xc3, 345 DRM_PANTHOR_EXCEPTION_TRANSLATION_FAULT_4 = 0xc4, 346 DRM_PANTHOR_EXCEPTION_PERM_FAULT_0 = 0xc8, 347 DRM_PANTHOR_EXCEPTION_PERM_FAULT_1 = 0xc9, 348 DRM_PANTHOR_EXCEPTION_PERM_FAULT_2 = 0xca, 349 DRM_PANTHOR_EXCEPTION_PERM_FAULT_3 = 0xcb, 350 DRM_PANTHOR_EXCEPTION_ACCESS_FLAG_1 = 0xd9, 351 DRM_PANTHOR_EXCEPTION_ACCESS_FLAG_2 = 0xda, 352 DRM_PANTHOR_EXCEPTION_ACCESS_FLAG_3 = 0xdb, 353 DRM_PANTHOR_EXCEPTION_ADDR_SIZE_FAULT_IN = 0xe0, 354 DRM_PANTHOR_EXCEPTION_ADDR_SIZE_FAULT_OUT0 = 0xe4, 355 DRM_PANTHOR_EXCEPTION_ADDR_SIZE_FAULT_OUT1 = 0xe5, 356 DRM_PANTHOR_EXCEPTION_ADDR_SIZE_FAULT_OUT2 = 0xe6, 357 DRM_PANTHOR_EXCEPTION_ADDR_SIZE_FAULT_OUT3 = 0xe7, 358 DRM_PANTHOR_EXCEPTION_MEM_ATTR_FAULT_0 = 0xe8, 359 DRM_PANTHOR_EXCEPTION_MEM_ATTR_FAULT_1 = 0xe9, 360 DRM_PANTHOR_EXCEPTION_MEM_ATTR_FAULT_2 = 0xea, 361 DRM_PANTHOR_EXCEPTION_MEM_ATTR_FAULT_3 = 0xeb, 362 }; 363 364 /** 365 * panthor_exception_is_fault() - Checks if an exception is a fault. 366 * 367 * Return: true if the exception is a fault, false otherwise. 368 */ 369 static inline bool 370 panthor_exception_is_fault(u32 exception_code) 371 { 372 return exception_code > DRM_PANTHOR_EXCEPTION_MAX_NON_FAULT; 373 } 374 375 const char *panthor_exception_name(struct panthor_device *ptdev, 376 u32 exception_code); 377 378 /** 379 * PANTHOR_IRQ_HANDLER() - Define interrupt handlers and the interrupt 380 * registration function. 381 * 382 * The boiler-plate to gracefully deal with shared interrupts is 383 * auto-generated. All you have to do is call PANTHOR_IRQ_HANDLER() 384 * just after the actual handler. The handler prototype is: 385 * 386 * void (*handler)(struct panthor_device *, u32 status); 387 */ 388 #define PANTHOR_IRQ_HANDLER(__name, __reg_prefix, __handler) \ 389 static irqreturn_t panthor_ ## __name ## _irq_raw_handler(int irq, void *data) \ 390 { \ 391 struct panthor_irq *pirq = data; \ 392 struct panthor_device *ptdev = pirq->ptdev; \ 393 \ 394 if (atomic_read(&pirq->suspended)) \ 395 return IRQ_NONE; \ 396 if (!gpu_read(ptdev, __reg_prefix ## _INT_STAT)) \ 397 return IRQ_NONE; \ 398 \ 399 gpu_write(ptdev, __reg_prefix ## _INT_MASK, 0); \ 400 return IRQ_WAKE_THREAD; \ 401 } \ 402 \ 403 static irqreturn_t panthor_ ## __name ## _irq_threaded_handler(int irq, void *data) \ 404 { \ 405 struct panthor_irq *pirq = data; \ 406 struct panthor_device *ptdev = pirq->ptdev; \ 407 irqreturn_t ret = IRQ_NONE; \ 408 \ 409 while (true) { \ 410 u32 status = gpu_read(ptdev, __reg_prefix ## _INT_RAWSTAT) & pirq->mask; \ 411 \ 412 if (!status) \ 413 break; \ 414 \ 415 __handler(ptdev, status); \ 416 ret = IRQ_HANDLED; \ 417 } \ 418 \ 419 if (!atomic_read(&pirq->suspended)) \ 420 gpu_write(ptdev, __reg_prefix ## _INT_MASK, pirq->mask); \ 421 \ 422 return ret; \ 423 } \ 424 \ 425 static inline void panthor_ ## __name ## _irq_suspend(struct panthor_irq *pirq) \ 426 { \ 427 pirq->mask = 0; \ 428 gpu_write(pirq->ptdev, __reg_prefix ## _INT_MASK, 0); \ 429 synchronize_irq(pirq->irq); \ 430 atomic_set(&pirq->suspended, true); \ 431 } \ 432 \ 433 static inline void panthor_ ## __name ## _irq_resume(struct panthor_irq *pirq, u32 mask) \ 434 { \ 435 atomic_set(&pirq->suspended, false); \ 436 pirq->mask = mask; \ 437 gpu_write(pirq->ptdev, __reg_prefix ## _INT_CLEAR, mask); \ 438 gpu_write(pirq->ptdev, __reg_prefix ## _INT_MASK, mask); \ 439 } \ 440 \ 441 static int panthor_request_ ## __name ## _irq(struct panthor_device *ptdev, \ 442 struct panthor_irq *pirq, \ 443 int irq, u32 mask) \ 444 { \ 445 pirq->ptdev = ptdev; \ 446 pirq->irq = irq; \ 447 panthor_ ## __name ## _irq_resume(pirq, mask); \ 448 \ 449 return devm_request_threaded_irq(ptdev->base.dev, irq, \ 450 panthor_ ## __name ## _irq_raw_handler, \ 451 panthor_ ## __name ## _irq_threaded_handler, \ 452 IRQF_SHARED, KBUILD_MODNAME "-" # __name, \ 453 pirq); \ 454 } 455 456 extern struct workqueue_struct *panthor_cleanup_wq; 457 458 static inline void gpu_write(struct panthor_device *ptdev, u32 reg, u32 data) 459 { 460 writel(data, ptdev->iomem + reg); 461 } 462 463 static inline u32 gpu_read(struct panthor_device *ptdev, u32 reg) 464 { 465 return readl(ptdev->iomem + reg); 466 } 467 468 static inline u32 gpu_read_relaxed(struct panthor_device *ptdev, u32 reg) 469 { 470 return readl_relaxed(ptdev->iomem + reg); 471 } 472 473 static inline void gpu_write64(struct panthor_device *ptdev, u32 reg, u64 data) 474 { 475 gpu_write(ptdev, reg, lower_32_bits(data)); 476 gpu_write(ptdev, reg + 4, upper_32_bits(data)); 477 } 478 479 static inline u64 gpu_read64(struct panthor_device *ptdev, u32 reg) 480 { 481 return (gpu_read(ptdev, reg) | ((u64)gpu_read(ptdev, reg + 4) << 32)); 482 } 483 484 static inline u64 gpu_read64_relaxed(struct panthor_device *ptdev, u32 reg) 485 { 486 return (gpu_read_relaxed(ptdev, reg) | 487 ((u64)gpu_read_relaxed(ptdev, reg + 4) << 32)); 488 } 489 490 static inline u64 gpu_read64_counter(struct panthor_device *ptdev, u32 reg) 491 { 492 u32 lo, hi1, hi2; 493 do { 494 hi1 = gpu_read(ptdev, reg + 4); 495 lo = gpu_read(ptdev, reg); 496 hi2 = gpu_read(ptdev, reg + 4); 497 } while (hi1 != hi2); 498 return lo | ((u64)hi2 << 32); 499 } 500 501 #define gpu_read_poll_timeout(dev, reg, val, cond, delay_us, timeout_us) \ 502 read_poll_timeout(gpu_read, val, cond, delay_us, timeout_us, false, \ 503 dev, reg) 504 505 #define gpu_read_poll_timeout_atomic(dev, reg, val, cond, delay_us, \ 506 timeout_us) \ 507 read_poll_timeout_atomic(gpu_read, val, cond, delay_us, timeout_us, \ 508 false, dev, reg) 509 510 #define gpu_read64_poll_timeout(dev, reg, val, cond, delay_us, timeout_us) \ 511 read_poll_timeout(gpu_read64, val, cond, delay_us, timeout_us, false, \ 512 dev, reg) 513 514 #define gpu_read64_poll_timeout_atomic(dev, reg, val, cond, delay_us, \ 515 timeout_us) \ 516 read_poll_timeout_atomic(gpu_read64, val, cond, delay_us, timeout_us, \ 517 false, dev, reg) 518 519 #define gpu_read_relaxed_poll_timeout_atomic(dev, reg, val, cond, delay_us, \ 520 timeout_us) \ 521 read_poll_timeout_atomic(gpu_read_relaxed, val, cond, delay_us, \ 522 timeout_us, false, dev, reg) 523 524 #define gpu_read64_relaxed_poll_timeout(dev, reg, val, cond, delay_us, \ 525 timeout_us) \ 526 read_poll_timeout(gpu_read64_relaxed, val, cond, delay_us, timeout_us, \ 527 false, dev, reg) 528 529 #endif 530