1 // SPDX-License-Identifier: GPL-2.0 or MIT 2 /* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */ 3 /* Copyright 2019 Linaro, Ltd., Rob Herring <robh@kernel.org> */ 4 /* Copyright 2019 Collabora ltd. */ 5 6 #include <linux/bitfield.h> 7 #include <linux/bitmap.h> 8 #include <linux/delay.h> 9 #include <linux/dma-mapping.h> 10 #include <linux/interrupt.h> 11 #include <linux/io.h> 12 #include <linux/iopoll.h> 13 #include <linux/platform_device.h> 14 #include <linux/pm_runtime.h> 15 16 #include <drm/drm_drv.h> 17 #include <drm/drm_managed.h> 18 19 #include "panthor_device.h" 20 #include "panthor_gpu.h" 21 #include "panthor_regs.h" 22 23 /** 24 * struct panthor_gpu - GPU block management data. 25 */ 26 struct panthor_gpu { 27 /** @irq: GPU irq. */ 28 struct panthor_irq irq; 29 30 /** @reqs_lock: Lock protecting access to pending_reqs. */ 31 spinlock_t reqs_lock; 32 33 /** @pending_reqs: Pending GPU requests. */ 34 u32 pending_reqs; 35 36 /** @reqs_acked: GPU request wait queue. */ 37 wait_queue_head_t reqs_acked; 38 }; 39 40 /** 41 * struct panthor_model - GPU model description 42 */ 43 struct panthor_model { 44 /** @name: Model name. */ 45 const char *name; 46 47 /** @arch_major: Major version number of architecture. */ 48 u8 arch_major; 49 50 /** @product_major: Major version number of product. */ 51 u8 product_major; 52 }; 53 54 /** 55 * GPU_MODEL() - Define a GPU model. A GPU product can be uniquely identified 56 * by a combination of the major architecture version and the major product 57 * version. 58 * @_name: Name for the GPU model. 59 * @_arch_major: Architecture major. 60 * @_product_major: Product major. 61 */ 62 #define GPU_MODEL(_name, _arch_major, _product_major) \ 63 {\ 64 .name = __stringify(_name), \ 65 .arch_major = _arch_major, \ 66 .product_major = _product_major, \ 67 } 68 69 static const struct panthor_model gpu_models[] = { 70 GPU_MODEL(g610, 10, 7), 71 {}, 72 }; 73 74 #define GPU_INTERRUPTS_MASK \ 75 (GPU_IRQ_FAULT | \ 76 GPU_IRQ_PROTM_FAULT | \ 77 GPU_IRQ_RESET_COMPLETED | \ 78 GPU_IRQ_CLEAN_CACHES_COMPLETED) 79 80 static void panthor_gpu_init_info(struct panthor_device *ptdev) 81 { 82 const struct panthor_model *model; 83 u32 arch_major, product_major; 84 u32 major, minor, status; 85 unsigned int i; 86 87 ptdev->gpu_info.gpu_id = gpu_read(ptdev, GPU_ID); 88 ptdev->gpu_info.csf_id = gpu_read(ptdev, GPU_CSF_ID); 89 ptdev->gpu_info.gpu_rev = gpu_read(ptdev, GPU_REVID); 90 ptdev->gpu_info.core_features = gpu_read(ptdev, GPU_CORE_FEATURES); 91 ptdev->gpu_info.l2_features = gpu_read(ptdev, GPU_L2_FEATURES); 92 ptdev->gpu_info.tiler_features = gpu_read(ptdev, GPU_TILER_FEATURES); 93 ptdev->gpu_info.mem_features = gpu_read(ptdev, GPU_MEM_FEATURES); 94 ptdev->gpu_info.mmu_features = gpu_read(ptdev, GPU_MMU_FEATURES); 95 ptdev->gpu_info.thread_features = gpu_read(ptdev, GPU_THREAD_FEATURES); 96 ptdev->gpu_info.max_threads = gpu_read(ptdev, GPU_THREAD_MAX_THREADS); 97 ptdev->gpu_info.thread_max_workgroup_size = gpu_read(ptdev, GPU_THREAD_MAX_WORKGROUP_SIZE); 98 ptdev->gpu_info.thread_max_barrier_size = gpu_read(ptdev, GPU_THREAD_MAX_BARRIER_SIZE); 99 ptdev->gpu_info.coherency_features = gpu_read(ptdev, GPU_COHERENCY_FEATURES); 100 for (i = 0; i < 4; i++) 101 ptdev->gpu_info.texture_features[i] = gpu_read(ptdev, GPU_TEXTURE_FEATURES(i)); 102 103 ptdev->gpu_info.as_present = gpu_read(ptdev, GPU_AS_PRESENT); 104 105 ptdev->gpu_info.shader_present = gpu_read(ptdev, GPU_SHADER_PRESENT_LO); 106 ptdev->gpu_info.shader_present |= (u64)gpu_read(ptdev, GPU_SHADER_PRESENT_HI) << 32; 107 108 ptdev->gpu_info.tiler_present = gpu_read(ptdev, GPU_TILER_PRESENT_LO); 109 ptdev->gpu_info.tiler_present |= (u64)gpu_read(ptdev, GPU_TILER_PRESENT_HI) << 32; 110 111 ptdev->gpu_info.l2_present = gpu_read(ptdev, GPU_L2_PRESENT_LO); 112 ptdev->gpu_info.l2_present |= (u64)gpu_read(ptdev, GPU_L2_PRESENT_HI) << 32; 113 114 arch_major = GPU_ARCH_MAJOR(ptdev->gpu_info.gpu_id); 115 product_major = GPU_PROD_MAJOR(ptdev->gpu_info.gpu_id); 116 major = GPU_VER_MAJOR(ptdev->gpu_info.gpu_id); 117 minor = GPU_VER_MINOR(ptdev->gpu_info.gpu_id); 118 status = GPU_VER_STATUS(ptdev->gpu_info.gpu_id); 119 120 for (model = gpu_models; model->name; model++) { 121 if (model->arch_major == arch_major && 122 model->product_major == product_major) 123 break; 124 } 125 126 drm_info(&ptdev->base, 127 "mali-%s id 0x%x major 0x%x minor 0x%x status 0x%x", 128 model->name ?: "unknown", ptdev->gpu_info.gpu_id >> 16, 129 major, minor, status); 130 131 drm_info(&ptdev->base, 132 "Features: L2:%#x Tiler:%#x Mem:%#x MMU:%#x AS:%#x", 133 ptdev->gpu_info.l2_features, 134 ptdev->gpu_info.tiler_features, 135 ptdev->gpu_info.mem_features, 136 ptdev->gpu_info.mmu_features, 137 ptdev->gpu_info.as_present); 138 139 drm_info(&ptdev->base, 140 "shader_present=0x%0llx l2_present=0x%0llx tiler_present=0x%0llx", 141 ptdev->gpu_info.shader_present, ptdev->gpu_info.l2_present, 142 ptdev->gpu_info.tiler_present); 143 } 144 145 static void panthor_gpu_irq_handler(struct panthor_device *ptdev, u32 status) 146 { 147 if (status & GPU_IRQ_FAULT) { 148 u32 fault_status = gpu_read(ptdev, GPU_FAULT_STATUS); 149 u64 address = ((u64)gpu_read(ptdev, GPU_FAULT_ADDR_HI) << 32) | 150 gpu_read(ptdev, GPU_FAULT_ADDR_LO); 151 152 drm_warn(&ptdev->base, "GPU Fault 0x%08x (%s) at 0x%016llx\n", 153 fault_status, panthor_exception_name(ptdev, fault_status & 0xFF), 154 address); 155 } 156 if (status & GPU_IRQ_PROTM_FAULT) 157 drm_warn(&ptdev->base, "GPU Fault in protected mode\n"); 158 159 spin_lock(&ptdev->gpu->reqs_lock); 160 if (status & ptdev->gpu->pending_reqs) { 161 ptdev->gpu->pending_reqs &= ~status; 162 wake_up_all(&ptdev->gpu->reqs_acked); 163 } 164 spin_unlock(&ptdev->gpu->reqs_lock); 165 } 166 PANTHOR_IRQ_HANDLER(gpu, GPU, panthor_gpu_irq_handler); 167 168 /** 169 * panthor_gpu_unplug() - Called when the GPU is unplugged. 170 * @ptdev: Device to unplug. 171 */ 172 void panthor_gpu_unplug(struct panthor_device *ptdev) 173 { 174 unsigned long flags; 175 176 /* Make sure the IRQ handler is not running after that point. */ 177 panthor_gpu_irq_suspend(&ptdev->gpu->irq); 178 179 /* Wake-up all waiters. */ 180 spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags); 181 ptdev->gpu->pending_reqs = 0; 182 wake_up_all(&ptdev->gpu->reqs_acked); 183 spin_unlock_irqrestore(&ptdev->gpu->reqs_lock, flags); 184 } 185 186 /** 187 * panthor_gpu_init() - Initialize the GPU block 188 * @ptdev: Device. 189 * 190 * Return: 0 on success, a negative error code otherwise. 191 */ 192 int panthor_gpu_init(struct panthor_device *ptdev) 193 { 194 struct panthor_gpu *gpu; 195 u32 pa_bits; 196 int ret, irq; 197 198 gpu = drmm_kzalloc(&ptdev->base, sizeof(*gpu), GFP_KERNEL); 199 if (!gpu) 200 return -ENOMEM; 201 202 spin_lock_init(&gpu->reqs_lock); 203 init_waitqueue_head(&gpu->reqs_acked); 204 ptdev->gpu = gpu; 205 panthor_gpu_init_info(ptdev); 206 207 dma_set_max_seg_size(ptdev->base.dev, UINT_MAX); 208 pa_bits = GPU_MMU_FEATURES_PA_BITS(ptdev->gpu_info.mmu_features); 209 ret = dma_set_mask_and_coherent(ptdev->base.dev, DMA_BIT_MASK(pa_bits)); 210 if (ret) 211 return ret; 212 213 irq = platform_get_irq_byname(to_platform_device(ptdev->base.dev), "gpu"); 214 if (irq < 0) 215 return irq; 216 217 ret = panthor_request_gpu_irq(ptdev, &ptdev->gpu->irq, irq, GPU_INTERRUPTS_MASK); 218 if (ret) 219 return ret; 220 221 return 0; 222 } 223 224 /** 225 * panthor_gpu_block_power_off() - Power-off a specific block of the GPU 226 * @ptdev: Device. 227 * @blk_name: Block name. 228 * @pwroff_reg: Power-off register for this block. 229 * @pwrtrans_reg: Power transition register for this block. 230 * @mask: Sub-elements to power-off. 231 * @timeout_us: Timeout in microseconds. 232 * 233 * Return: 0 on success, a negative error code otherwise. 234 */ 235 int panthor_gpu_block_power_off(struct panthor_device *ptdev, 236 const char *blk_name, 237 u32 pwroff_reg, u32 pwrtrans_reg, 238 u64 mask, u32 timeout_us) 239 { 240 u32 val, i; 241 int ret; 242 243 for (i = 0; i < 2; i++) { 244 u32 mask32 = mask >> (i * 32); 245 246 if (!mask32) 247 continue; 248 249 ret = readl_relaxed_poll_timeout(ptdev->iomem + pwrtrans_reg + (i * 4), 250 val, !(mask32 & val), 251 100, timeout_us); 252 if (ret) { 253 drm_err(&ptdev->base, "timeout waiting on %s:%llx power transition", 254 blk_name, mask); 255 return ret; 256 } 257 } 258 259 if (mask & GENMASK(31, 0)) 260 gpu_write(ptdev, pwroff_reg, mask); 261 262 if (mask >> 32) 263 gpu_write(ptdev, pwroff_reg + 4, mask >> 32); 264 265 for (i = 0; i < 2; i++) { 266 u32 mask32 = mask >> (i * 32); 267 268 if (!mask32) 269 continue; 270 271 ret = readl_relaxed_poll_timeout(ptdev->iomem + pwrtrans_reg + (i * 4), 272 val, !(mask32 & val), 273 100, timeout_us); 274 if (ret) { 275 drm_err(&ptdev->base, "timeout waiting on %s:%llx power transition", 276 blk_name, mask); 277 return ret; 278 } 279 } 280 281 return 0; 282 } 283 284 /** 285 * panthor_gpu_block_power_on() - Power-on a specific block of the GPU 286 * @ptdev: Device. 287 * @blk_name: Block name. 288 * @pwron_reg: Power-on register for this block. 289 * @pwrtrans_reg: Power transition register for this block. 290 * @rdy_reg: Power transition ready register. 291 * @mask: Sub-elements to power-on. 292 * @timeout_us: Timeout in microseconds. 293 * 294 * Return: 0 on success, a negative error code otherwise. 295 */ 296 int panthor_gpu_block_power_on(struct panthor_device *ptdev, 297 const char *blk_name, 298 u32 pwron_reg, u32 pwrtrans_reg, 299 u32 rdy_reg, u64 mask, u32 timeout_us) 300 { 301 u32 val, i; 302 int ret; 303 304 for (i = 0; i < 2; i++) { 305 u32 mask32 = mask >> (i * 32); 306 307 if (!mask32) 308 continue; 309 310 ret = readl_relaxed_poll_timeout(ptdev->iomem + pwrtrans_reg + (i * 4), 311 val, !(mask32 & val), 312 100, timeout_us); 313 if (ret) { 314 drm_err(&ptdev->base, "timeout waiting on %s:%llx power transition", 315 blk_name, mask); 316 return ret; 317 } 318 } 319 320 if (mask & GENMASK(31, 0)) 321 gpu_write(ptdev, pwron_reg, mask); 322 323 if (mask >> 32) 324 gpu_write(ptdev, pwron_reg + 4, mask >> 32); 325 326 for (i = 0; i < 2; i++) { 327 u32 mask32 = mask >> (i * 32); 328 329 if (!mask32) 330 continue; 331 332 ret = readl_relaxed_poll_timeout(ptdev->iomem + rdy_reg + (i * 4), 333 val, (mask32 & val) == mask32, 334 100, timeout_us); 335 if (ret) { 336 drm_err(&ptdev->base, "timeout waiting on %s:%llx readiness", 337 blk_name, mask); 338 return ret; 339 } 340 } 341 342 return 0; 343 } 344 345 /** 346 * panthor_gpu_l2_power_on() - Power-on the L2-cache 347 * @ptdev: Device. 348 * 349 * Return: 0 on success, a negative error code otherwise. 350 */ 351 int panthor_gpu_l2_power_on(struct panthor_device *ptdev) 352 { 353 if (ptdev->gpu_info.l2_present != 1) { 354 /* 355 * Only support one core group now. 356 * ~(l2_present - 1) unsets all bits in l2_present except 357 * the bottom bit. (l2_present - 2) has all the bits in 358 * the first core group set. AND them together to generate 359 * a mask of cores in the first core group. 360 */ 361 u64 core_mask = ~(ptdev->gpu_info.l2_present - 1) & 362 (ptdev->gpu_info.l2_present - 2); 363 drm_info_once(&ptdev->base, "using only 1st core group (%lu cores from %lu)\n", 364 hweight64(core_mask), 365 hweight64(ptdev->gpu_info.shader_present)); 366 } 367 368 return panthor_gpu_power_on(ptdev, L2, 1, 20000); 369 } 370 371 /** 372 * panthor_gpu_flush_caches() - Flush caches 373 * @ptdev: Device. 374 * @l2: L2 flush type. 375 * @lsc: LSC flush type. 376 * @other: Other flush type. 377 * 378 * Return: 0 on success, a negative error code otherwise. 379 */ 380 int panthor_gpu_flush_caches(struct panthor_device *ptdev, 381 u32 l2, u32 lsc, u32 other) 382 { 383 bool timedout = false; 384 unsigned long flags; 385 386 spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags); 387 if (!drm_WARN_ON(&ptdev->base, 388 ptdev->gpu->pending_reqs & GPU_IRQ_CLEAN_CACHES_COMPLETED)) { 389 ptdev->gpu->pending_reqs |= GPU_IRQ_CLEAN_CACHES_COMPLETED; 390 gpu_write(ptdev, GPU_CMD, GPU_FLUSH_CACHES(l2, lsc, other)); 391 } 392 spin_unlock_irqrestore(&ptdev->gpu->reqs_lock, flags); 393 394 if (!wait_event_timeout(ptdev->gpu->reqs_acked, 395 !(ptdev->gpu->pending_reqs & GPU_IRQ_CLEAN_CACHES_COMPLETED), 396 msecs_to_jiffies(100))) { 397 spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags); 398 if ((ptdev->gpu->pending_reqs & GPU_IRQ_CLEAN_CACHES_COMPLETED) != 0 && 399 !(gpu_read(ptdev, GPU_INT_RAWSTAT) & GPU_IRQ_CLEAN_CACHES_COMPLETED)) 400 timedout = true; 401 else 402 ptdev->gpu->pending_reqs &= ~GPU_IRQ_CLEAN_CACHES_COMPLETED; 403 spin_unlock_irqrestore(&ptdev->gpu->reqs_lock, flags); 404 } 405 406 if (timedout) { 407 drm_err(&ptdev->base, "Flush caches timeout"); 408 return -ETIMEDOUT; 409 } 410 411 return 0; 412 } 413 414 /** 415 * panthor_gpu_soft_reset() - Issue a soft-reset 416 * @ptdev: Device. 417 * 418 * Return: 0 on success, a negative error code otherwise. 419 */ 420 int panthor_gpu_soft_reset(struct panthor_device *ptdev) 421 { 422 bool timedout = false; 423 unsigned long flags; 424 425 spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags); 426 if (!drm_WARN_ON(&ptdev->base, 427 ptdev->gpu->pending_reqs & GPU_IRQ_RESET_COMPLETED)) { 428 ptdev->gpu->pending_reqs |= GPU_IRQ_RESET_COMPLETED; 429 gpu_write(ptdev, GPU_INT_CLEAR, GPU_IRQ_RESET_COMPLETED); 430 gpu_write(ptdev, GPU_CMD, GPU_SOFT_RESET); 431 } 432 spin_unlock_irqrestore(&ptdev->gpu->reqs_lock, flags); 433 434 if (!wait_event_timeout(ptdev->gpu->reqs_acked, 435 !(ptdev->gpu->pending_reqs & GPU_IRQ_RESET_COMPLETED), 436 msecs_to_jiffies(100))) { 437 spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags); 438 if ((ptdev->gpu->pending_reqs & GPU_IRQ_RESET_COMPLETED) != 0 && 439 !(gpu_read(ptdev, GPU_INT_RAWSTAT) & GPU_IRQ_RESET_COMPLETED)) 440 timedout = true; 441 else 442 ptdev->gpu->pending_reqs &= ~GPU_IRQ_RESET_COMPLETED; 443 spin_unlock_irqrestore(&ptdev->gpu->reqs_lock, flags); 444 } 445 446 if (timedout) { 447 drm_err(&ptdev->base, "Soft reset timeout"); 448 return -ETIMEDOUT; 449 } 450 451 return 0; 452 } 453 454 /** 455 * panthor_gpu_suspend() - Suspend the GPU block. 456 * @ptdev: Device. 457 * 458 * Suspend the GPU irq. This should be called last in the suspend procedure, 459 * after all other blocks have been suspented. 460 */ 461 void panthor_gpu_suspend(struct panthor_device *ptdev) 462 { 463 /* 464 * It may be preferable to simply power down the L2, but for now just 465 * soft-reset which will leave the L2 powered down. 466 */ 467 panthor_gpu_soft_reset(ptdev); 468 panthor_gpu_irq_suspend(&ptdev->gpu->irq); 469 } 470 471 /** 472 * panthor_gpu_resume() - Resume the GPU block. 473 * @ptdev: Device. 474 * 475 * Resume the IRQ handler and power-on the L2-cache. 476 * The FW takes care of powering the other blocks. 477 */ 478 void panthor_gpu_resume(struct panthor_device *ptdev) 479 { 480 panthor_gpu_irq_resume(&ptdev->gpu->irq, GPU_INTERRUPTS_MASK); 481 panthor_gpu_l2_power_on(ptdev); 482 } 483