1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */ 3 /* Copyright 2019 Linaro, Ltd., Rob Herring <robh@kernel.org> */ 4 /* Copyright 2019 Collabora ltd. */ 5 #include <linux/bitfield.h> 6 #include <linux/bitmap.h> 7 #include <linux/delay.h> 8 #include <linux/dma-mapping.h> 9 #include <linux/interrupt.h> 10 #include <linux/io.h> 11 #include <linux/iopoll.h> 12 #include <linux/platform_device.h> 13 #include <linux/pm_runtime.h> 14 15 #include "panfrost_device.h" 16 #include "panfrost_features.h" 17 #include "panfrost_issues.h" 18 #include "panfrost_gpu.h" 19 #include "panfrost_perfcnt.h" 20 #include "panfrost_regs.h" 21 22 static irqreturn_t panfrost_gpu_irq_handler(int irq, void *data) 23 { 24 struct panfrost_device *pfdev = data; 25 u32 fault_status, state; 26 27 if (test_bit(PANFROST_COMP_BIT_GPU, pfdev->is_suspended)) 28 return IRQ_NONE; 29 30 fault_status = gpu_read(pfdev, GPU_FAULT_STATUS); 31 state = gpu_read(pfdev, GPU_INT_STAT); 32 if (!state) 33 return IRQ_NONE; 34 35 if (state & GPU_IRQ_MASK_ERROR) { 36 u64 address = (u64) gpu_read(pfdev, GPU_FAULT_ADDRESS_HI) << 32; 37 address |= gpu_read(pfdev, GPU_FAULT_ADDRESS_LO); 38 39 dev_warn(pfdev->dev, "GPU Fault 0x%08x (%s) at 0x%016llx\n", 40 fault_status, panfrost_exception_name(fault_status & 0xFF), 41 address); 42 43 if (state & GPU_IRQ_MULTIPLE_FAULT) 44 dev_warn(pfdev->dev, "There were multiple GPU faults - some have not been reported\n"); 45 46 gpu_write(pfdev, GPU_INT_MASK, 0); 47 } 48 49 if (state & GPU_IRQ_PERFCNT_SAMPLE_COMPLETED) 50 panfrost_perfcnt_sample_done(pfdev); 51 52 if (state & GPU_IRQ_CLEAN_CACHES_COMPLETED) 53 panfrost_perfcnt_clean_cache_done(pfdev); 54 55 gpu_write(pfdev, GPU_INT_CLEAR, state); 56 57 return IRQ_HANDLED; 58 } 59 60 int panfrost_gpu_soft_reset(struct panfrost_device *pfdev) 61 { 62 int ret; 63 u32 val; 64 65 gpu_write(pfdev, GPU_INT_MASK, 0); 66 gpu_write(pfdev, GPU_INT_CLEAR, GPU_IRQ_RESET_COMPLETED); 67 68 clear_bit(PANFROST_COMP_BIT_GPU, pfdev->is_suspended); 69 70 gpu_write(pfdev, GPU_CMD, GPU_CMD_SOFT_RESET); 71 ret = readl_relaxed_poll_timeout(pfdev->iomem + GPU_INT_RAWSTAT, 72 val, val & GPU_IRQ_RESET_COMPLETED, 10, 10000); 73 74 if (ret) { 75 dev_err(pfdev->dev, "gpu soft reset timed out, attempting hard reset\n"); 76 77 gpu_write(pfdev, GPU_CMD, GPU_CMD_HARD_RESET); 78 ret = readl_relaxed_poll_timeout(pfdev->iomem + GPU_INT_RAWSTAT, val, 79 val & GPU_IRQ_RESET_COMPLETED, 100, 10000); 80 if (ret) { 81 dev_err(pfdev->dev, "gpu hard reset timed out\n"); 82 return ret; 83 } 84 } 85 86 gpu_write(pfdev, GPU_INT_CLEAR, GPU_IRQ_MASK_ALL); 87 88 /* Only enable the interrupts we care about */ 89 gpu_write(pfdev, GPU_INT_MASK, 90 GPU_IRQ_MASK_ERROR | 91 GPU_IRQ_PERFCNT_SAMPLE_COMPLETED | 92 GPU_IRQ_CLEAN_CACHES_COMPLETED); 93 94 /* 95 * All in-flight jobs should have released their cycle 96 * counter references upon reset, but let us make sure 97 */ 98 if (drm_WARN_ON(pfdev->ddev, atomic_read(&pfdev->cycle_counter.use_count) != 0)) 99 atomic_set(&pfdev->cycle_counter.use_count, 0); 100 101 return 0; 102 } 103 104 void panfrost_gpu_amlogic_quirk(struct panfrost_device *pfdev) 105 { 106 /* 107 * The Amlogic integrated Mali-T820, Mali-G31 & Mali-G52 needs 108 * these undocumented bits in GPU_PWR_OVERRIDE1 to be set in order 109 * to operate correctly. 110 */ 111 gpu_write(pfdev, GPU_PWR_KEY, GPU_PWR_KEY_UNLOCK); 112 gpu_write(pfdev, GPU_PWR_OVERRIDE1, 0xfff | (0x20 << 16)); 113 } 114 115 static void panfrost_gpu_init_quirks(struct panfrost_device *pfdev) 116 { 117 u32 quirks = 0; 118 119 if (panfrost_has_hw_issue(pfdev, HW_ISSUE_8443) || 120 panfrost_has_hw_issue(pfdev, HW_ISSUE_11035)) 121 quirks |= SC_LS_PAUSEBUFFER_DISABLE; 122 123 if (panfrost_has_hw_issue(pfdev, HW_ISSUE_10327)) 124 quirks |= SC_SDC_DISABLE_OQ_DISCARD; 125 126 if (panfrost_has_hw_issue(pfdev, HW_ISSUE_10797)) 127 quirks |= SC_ENABLE_TEXGRD_FLAGS; 128 129 if (!panfrost_has_hw_issue(pfdev, GPUCORE_1619)) { 130 if (panfrost_model_cmp(pfdev, 0x750) < 0) /* T60x, T62x, T72x */ 131 quirks |= SC_LS_ATTR_CHECK_DISABLE; 132 else if (panfrost_model_cmp(pfdev, 0x880) <= 0) /* T76x, T8xx */ 133 quirks |= SC_LS_ALLOW_ATTR_TYPES; 134 } 135 136 if (panfrost_has_hw_issue(pfdev, HW_ISSUE_TTRX_2968_TTRX_3162)) 137 quirks |= SC_VAR_ALGORITHM; 138 139 if (panfrost_has_hw_feature(pfdev, HW_FEATURE_TLS_HASHING)) 140 quirks |= SC_TLS_HASH_ENABLE; 141 142 if (quirks) 143 gpu_write(pfdev, GPU_SHADER_CONFIG, quirks); 144 145 146 quirks = gpu_read(pfdev, GPU_TILER_CONFIG); 147 148 /* Set tiler clock gate override if required */ 149 if (panfrost_has_hw_issue(pfdev, HW_ISSUE_T76X_3953)) 150 quirks |= TC_CLOCK_GATE_OVERRIDE; 151 152 gpu_write(pfdev, GPU_TILER_CONFIG, quirks); 153 154 155 quirks = 0; 156 if ((panfrost_model_eq(pfdev, 0x860) || panfrost_model_eq(pfdev, 0x880)) && 157 pfdev->features.revision >= 0x2000) 158 quirks |= JM_MAX_JOB_THROTTLE_LIMIT << JM_JOB_THROTTLE_LIMIT_SHIFT; 159 else if (panfrost_model_eq(pfdev, 0x6000) && 160 pfdev->features.coherency_features == COHERENCY_ACE) 161 quirks |= (COHERENCY_ACE_LITE | COHERENCY_ACE) << 162 JM_FORCE_COHERENCY_FEATURES_SHIFT; 163 164 if (panfrost_has_hw_feature(pfdev, HW_FEATURE_IDVS_GROUP_SIZE)) 165 quirks |= JM_DEFAULT_IDVS_GROUP_SIZE << JM_IDVS_GROUP_SIZE_SHIFT; 166 167 if (quirks) 168 gpu_write(pfdev, GPU_JM_CONFIG, quirks); 169 170 /* Here goes platform specific quirks */ 171 if (pfdev->comp->vendor_quirk) 172 pfdev->comp->vendor_quirk(pfdev); 173 } 174 175 #define MAX_HW_REVS 6 176 177 struct panfrost_model { 178 const char *name; 179 u32 id; 180 u64 features; 181 u64 issues; 182 struct { 183 u32 revision; 184 u64 issues; 185 } revs[MAX_HW_REVS]; 186 }; 187 188 #define GPU_MODEL(_name, _id, ...) \ 189 {\ 190 .name = __stringify(_name), \ 191 .id = _id, \ 192 .features = hw_features_##_name, \ 193 .issues = hw_issues_##_name, \ 194 .revs = { __VA_ARGS__ }, \ 195 } 196 197 #define GPU_REV_EXT(name, _rev, _p, _s, stat) \ 198 {\ 199 .revision = (_rev) << 12 | (_p) << 4 | (_s), \ 200 .issues = hw_issues_##name##_r##_rev##p##_p##stat, \ 201 } 202 #define GPU_REV(name, r, p) GPU_REV_EXT(name, r, p, 0, ) 203 204 static const struct panfrost_model gpu_models[] = { 205 /* T60x has an oddball version */ 206 GPU_MODEL(t600, 0x600, 207 GPU_REV_EXT(t600, 0, 0, 1, _15dev0)), 208 GPU_MODEL(t620, 0x620, 209 GPU_REV(t620, 0, 1), GPU_REV(t620, 1, 0)), 210 GPU_MODEL(t720, 0x720), 211 GPU_MODEL(t760, 0x750, 212 GPU_REV(t760, 0, 0), GPU_REV(t760, 0, 1), 213 GPU_REV_EXT(t760, 0, 1, 0, _50rel0), 214 GPU_REV(t760, 0, 2), GPU_REV(t760, 0, 3)), 215 GPU_MODEL(t820, 0x820), 216 GPU_MODEL(t830, 0x830), 217 GPU_MODEL(t860, 0x860), 218 GPU_MODEL(t880, 0x880), 219 220 GPU_MODEL(g71, 0x6000, 221 GPU_REV_EXT(g71, 0, 0, 1, _05dev0)), 222 GPU_MODEL(g72, 0x6001), 223 GPU_MODEL(g51, 0x7000), 224 GPU_MODEL(g76, 0x7001), 225 GPU_MODEL(g52, 0x7002), 226 GPU_MODEL(g31, 0x7003, 227 GPU_REV(g31, 1, 0)), 228 229 GPU_MODEL(g57, 0x9001, 230 GPU_REV(g57, 0, 0)), 231 232 /* MediaTek MT8192 has a Mali-G57 with a different GPU ID from the 233 * standard. Arm's driver does not appear to handle this model. 234 * ChromeOS has a hack downstream for it. Treat it as equivalent to 235 * standard Mali-G57 for now. 236 */ 237 GPU_MODEL(g57, 0x9003, 238 GPU_REV(g57, 0, 0)), 239 }; 240 241 static void panfrost_gpu_init_features(struct panfrost_device *pfdev) 242 { 243 u32 gpu_id, num_js, major, minor, status, rev; 244 const char *name = "unknown"; 245 u64 hw_feat = 0; 246 u64 hw_issues = hw_issues_all; 247 const struct panfrost_model *model; 248 int i; 249 250 pfdev->features.l2_features = gpu_read(pfdev, GPU_L2_FEATURES); 251 pfdev->features.core_features = gpu_read(pfdev, GPU_CORE_FEATURES); 252 pfdev->features.tiler_features = gpu_read(pfdev, GPU_TILER_FEATURES); 253 pfdev->features.mem_features = gpu_read(pfdev, GPU_MEM_FEATURES); 254 pfdev->features.mmu_features = gpu_read(pfdev, GPU_MMU_FEATURES); 255 pfdev->features.thread_features = gpu_read(pfdev, GPU_THREAD_FEATURES); 256 pfdev->features.max_threads = gpu_read(pfdev, GPU_THREAD_MAX_THREADS); 257 pfdev->features.thread_max_workgroup_sz = gpu_read(pfdev, GPU_THREAD_MAX_WORKGROUP_SIZE); 258 pfdev->features.thread_max_barrier_sz = gpu_read(pfdev, GPU_THREAD_MAX_BARRIER_SIZE); 259 pfdev->features.coherency_features = gpu_read(pfdev, GPU_COHERENCY_FEATURES); 260 pfdev->features.afbc_features = gpu_read(pfdev, GPU_AFBC_FEATURES); 261 for (i = 0; i < 4; i++) 262 pfdev->features.texture_features[i] = gpu_read(pfdev, GPU_TEXTURE_FEATURES(i)); 263 264 pfdev->features.as_present = gpu_read(pfdev, GPU_AS_PRESENT); 265 266 pfdev->features.js_present = gpu_read(pfdev, GPU_JS_PRESENT); 267 num_js = hweight32(pfdev->features.js_present); 268 for (i = 0; i < num_js; i++) 269 pfdev->features.js_features[i] = gpu_read(pfdev, GPU_JS_FEATURES(i)); 270 271 pfdev->features.shader_present = gpu_read(pfdev, GPU_SHADER_PRESENT_LO); 272 pfdev->features.shader_present |= (u64)gpu_read(pfdev, GPU_SHADER_PRESENT_HI) << 32; 273 274 pfdev->features.tiler_present = gpu_read(pfdev, GPU_TILER_PRESENT_LO); 275 pfdev->features.tiler_present |= (u64)gpu_read(pfdev, GPU_TILER_PRESENT_HI) << 32; 276 277 pfdev->features.l2_present = gpu_read(pfdev, GPU_L2_PRESENT_LO); 278 pfdev->features.l2_present |= (u64)gpu_read(pfdev, GPU_L2_PRESENT_HI) << 32; 279 pfdev->features.nr_core_groups = hweight64(pfdev->features.l2_present); 280 281 pfdev->features.stack_present = gpu_read(pfdev, GPU_STACK_PRESENT_LO); 282 pfdev->features.stack_present |= (u64)gpu_read(pfdev, GPU_STACK_PRESENT_HI) << 32; 283 284 pfdev->features.thread_tls_alloc = gpu_read(pfdev, GPU_THREAD_TLS_ALLOC); 285 286 gpu_id = gpu_read(pfdev, GPU_ID); 287 pfdev->features.revision = gpu_id & 0xffff; 288 pfdev->features.id = gpu_id >> 16; 289 290 /* The T60x has an oddball ID value. Fix it up to the standard Midgard 291 * format so we (and userspace) don't have to special case it. 292 */ 293 if (pfdev->features.id == 0x6956) 294 pfdev->features.id = 0x0600; 295 296 major = (pfdev->features.revision >> 12) & 0xf; 297 minor = (pfdev->features.revision >> 4) & 0xff; 298 status = pfdev->features.revision & 0xf; 299 rev = pfdev->features.revision; 300 301 gpu_id = pfdev->features.id; 302 303 for (model = gpu_models; model->name; model++) { 304 int best = -1; 305 306 if (!panfrost_model_eq(pfdev, model->id)) 307 continue; 308 309 name = model->name; 310 hw_feat = model->features; 311 hw_issues |= model->issues; 312 for (i = 0; i < MAX_HW_REVS; i++) { 313 if (model->revs[i].revision == rev) { 314 best = i; 315 break; 316 } else if (model->revs[i].revision == (rev & ~0xf)) 317 best = i; 318 } 319 320 if (best >= 0) 321 hw_issues |= model->revs[best].issues; 322 323 break; 324 } 325 326 bitmap_from_u64(pfdev->features.hw_features, hw_feat); 327 bitmap_from_u64(pfdev->features.hw_issues, hw_issues); 328 329 dev_info(pfdev->dev, "mali-%s id 0x%x major 0x%x minor 0x%x status 0x%x", 330 name, gpu_id, major, minor, status); 331 dev_info(pfdev->dev, "features: %64pb, issues: %64pb", 332 pfdev->features.hw_features, 333 pfdev->features.hw_issues); 334 335 dev_info(pfdev->dev, "Features: L2:0x%08x Shader:0x%08x Tiler:0x%08x Mem:0x%0x MMU:0x%08x AS:0x%x JS:0x%x", 336 pfdev->features.l2_features, 337 pfdev->features.core_features, 338 pfdev->features.tiler_features, 339 pfdev->features.mem_features, 340 pfdev->features.mmu_features, 341 pfdev->features.as_present, 342 pfdev->features.js_present); 343 344 dev_info(pfdev->dev, "shader_present=0x%0llx l2_present=0x%0llx", 345 pfdev->features.shader_present, pfdev->features.l2_present); 346 } 347 348 void panfrost_cycle_counter_get(struct panfrost_device *pfdev) 349 { 350 if (atomic_inc_not_zero(&pfdev->cycle_counter.use_count)) 351 return; 352 353 spin_lock(&pfdev->cycle_counter.lock); 354 if (atomic_inc_return(&pfdev->cycle_counter.use_count) == 1) 355 gpu_write(pfdev, GPU_CMD, GPU_CMD_CYCLE_COUNT_START); 356 spin_unlock(&pfdev->cycle_counter.lock); 357 } 358 359 void panfrost_cycle_counter_put(struct panfrost_device *pfdev) 360 { 361 if (atomic_add_unless(&pfdev->cycle_counter.use_count, -1, 1)) 362 return; 363 364 spin_lock(&pfdev->cycle_counter.lock); 365 if (atomic_dec_return(&pfdev->cycle_counter.use_count) == 0) 366 gpu_write(pfdev, GPU_CMD, GPU_CMD_CYCLE_COUNT_STOP); 367 spin_unlock(&pfdev->cycle_counter.lock); 368 } 369 370 unsigned long long panfrost_cycle_counter_read(struct panfrost_device *pfdev) 371 { 372 u32 hi, lo; 373 374 do { 375 hi = gpu_read(pfdev, GPU_CYCLE_COUNT_HI); 376 lo = gpu_read(pfdev, GPU_CYCLE_COUNT_LO); 377 } while (hi != gpu_read(pfdev, GPU_CYCLE_COUNT_HI)); 378 379 return ((u64)hi << 32) | lo; 380 } 381 382 unsigned long long panfrost_timestamp_read(struct panfrost_device *pfdev) 383 { 384 u32 hi, lo; 385 386 do { 387 hi = gpu_read(pfdev, GPU_TIMESTAMP_HI); 388 lo = gpu_read(pfdev, GPU_TIMESTAMP_LO); 389 } while (hi != gpu_read(pfdev, GPU_TIMESTAMP_HI)); 390 391 return ((u64)hi << 32) | lo; 392 } 393 394 static u64 panfrost_get_core_mask(struct panfrost_device *pfdev) 395 { 396 u64 core_mask; 397 398 if (pfdev->features.l2_present == 1) 399 return U64_MAX; 400 401 /* 402 * Only support one core group now. 403 * ~(l2_present - 1) unsets all bits in l2_present except 404 * the bottom bit. (l2_present - 2) has all the bits in 405 * the first core group set. AND them together to generate 406 * a mask of cores in the first core group. 407 */ 408 core_mask = ~(pfdev->features.l2_present - 1) & 409 (pfdev->features.l2_present - 2); 410 dev_info_once(pfdev->dev, "using only 1st core group (%lu cores from %lu)\n", 411 hweight64(core_mask), 412 hweight64(pfdev->features.shader_present)); 413 414 return core_mask; 415 } 416 417 void panfrost_gpu_power_on(struct panfrost_device *pfdev) 418 { 419 int ret; 420 u32 val; 421 u64 core_mask; 422 423 panfrost_gpu_init_quirks(pfdev); 424 core_mask = panfrost_get_core_mask(pfdev); 425 426 gpu_write(pfdev, L2_PWRON_LO, pfdev->features.l2_present & core_mask); 427 ret = readl_relaxed_poll_timeout(pfdev->iomem + L2_READY_LO, 428 val, val == (pfdev->features.l2_present & core_mask), 429 10, 20000); 430 if (ret) 431 dev_err(pfdev->dev, "error powering up gpu L2"); 432 433 gpu_write(pfdev, SHADER_PWRON_LO, 434 pfdev->features.shader_present & core_mask); 435 ret = readl_relaxed_poll_timeout(pfdev->iomem + SHADER_READY_LO, 436 val, val == (pfdev->features.shader_present & core_mask), 437 10, 20000); 438 if (ret) 439 dev_err(pfdev->dev, "error powering up gpu shader"); 440 441 gpu_write(pfdev, TILER_PWRON_LO, pfdev->features.tiler_present); 442 ret = readl_relaxed_poll_timeout(pfdev->iomem + TILER_READY_LO, 443 val, val == pfdev->features.tiler_present, 10, 1000); 444 if (ret) 445 dev_err(pfdev->dev, "error powering up gpu tiler"); 446 } 447 448 void panfrost_gpu_power_off(struct panfrost_device *pfdev) 449 { 450 int ret; 451 u32 val; 452 453 gpu_write(pfdev, SHADER_PWROFF_LO, pfdev->features.shader_present); 454 ret = readl_relaxed_poll_timeout(pfdev->iomem + SHADER_PWRTRANS_LO, 455 val, !val, 1, 2000); 456 if (ret) 457 dev_err(pfdev->dev, "shader power transition timeout"); 458 459 gpu_write(pfdev, TILER_PWROFF_LO, pfdev->features.tiler_present); 460 ret = readl_relaxed_poll_timeout(pfdev->iomem + TILER_PWRTRANS_LO, 461 val, !val, 1, 2000); 462 if (ret) 463 dev_err(pfdev->dev, "tiler power transition timeout"); 464 465 gpu_write(pfdev, L2_PWROFF_LO, pfdev->features.l2_present); 466 ret = readl_poll_timeout(pfdev->iomem + L2_PWRTRANS_LO, 467 val, !val, 0, 2000); 468 if (ret) 469 dev_err(pfdev->dev, "l2 power transition timeout"); 470 } 471 472 void panfrost_gpu_suspend_irq(struct panfrost_device *pfdev) 473 { 474 set_bit(PANFROST_COMP_BIT_GPU, pfdev->is_suspended); 475 476 gpu_write(pfdev, GPU_INT_MASK, 0); 477 synchronize_irq(pfdev->gpu_irq); 478 } 479 480 int panfrost_gpu_init(struct panfrost_device *pfdev) 481 { 482 int err; 483 484 err = panfrost_gpu_soft_reset(pfdev); 485 if (err) 486 return err; 487 488 panfrost_gpu_init_features(pfdev); 489 490 err = dma_set_mask_and_coherent(pfdev->dev, 491 DMA_BIT_MASK(FIELD_GET(0xff00, pfdev->features.mmu_features))); 492 if (err) 493 return err; 494 495 dma_set_max_seg_size(pfdev->dev, UINT_MAX); 496 497 pfdev->gpu_irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "gpu"); 498 if (pfdev->gpu_irq < 0) 499 return pfdev->gpu_irq; 500 501 err = devm_request_irq(pfdev->dev, pfdev->gpu_irq, panfrost_gpu_irq_handler, 502 IRQF_SHARED, KBUILD_MODNAME "-gpu", pfdev); 503 if (err) { 504 dev_err(pfdev->dev, "failed to request gpu irq"); 505 return err; 506 } 507 508 panfrost_gpu_power_on(pfdev); 509 510 return 0; 511 } 512 513 void panfrost_gpu_fini(struct panfrost_device *pfdev) 514 { 515 panfrost_gpu_power_off(pfdev); 516 } 517 518 u32 panfrost_gpu_get_latest_flush_id(struct panfrost_device *pfdev) 519 { 520 u32 flush_id; 521 522 if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION)) { 523 /* Flush reduction only makes sense when the GPU is kept powered on between jobs */ 524 if (pm_runtime_get_if_in_use(pfdev->dev)) { 525 flush_id = gpu_read(pfdev, GPU_LATEST_FLUSH_ID); 526 pm_runtime_put(pfdev->dev); 527 return flush_id; 528 } 529 } 530 531 return 0; 532 } 533