1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2013 Red Hat 4 * Author: Rob Clark <robdclark@gmail.com> 5 * 6 * Copyright (c) 2014,2017, 2019 The Linux Foundation. All rights reserved. 7 */ 8 9 #ifndef __ADRENO_GPU_H__ 10 #define __ADRENO_GPU_H__ 11 12 #include <linux/firmware.h> 13 #include <linux/iopoll.h> 14 15 #include "msm_gpu.h" 16 17 #include "adreno_common.xml.h" 18 #include "adreno_pm4.xml.h" 19 20 extern bool snapshot_debugbus; 21 extern bool allow_vram_carveout; 22 23 enum { 24 ADRENO_FW_PM4 = 0, 25 ADRENO_FW_SQE = 0, /* a6xx */ 26 ADRENO_FW_PFP = 1, 27 ADRENO_FW_GMU = 1, /* a6xx */ 28 ADRENO_FW_GPMU = 2, 29 ADRENO_FW_MAX, 30 }; 31 32 /** 33 * @enum adreno_family: identify generation and possibly sub-generation 34 * 35 * In some cases there are distinct sub-generations within a major revision 36 * so it helps to be able to group the GPU devices by generation and if 37 * necessary sub-generation. 38 */ 39 enum adreno_family { 40 ADRENO_2XX_GEN1, /* a20x */ 41 ADRENO_2XX_GEN2, /* a22x */ 42 ADRENO_3XX, 43 ADRENO_4XX, 44 ADRENO_5XX, 45 ADRENO_6XX_GEN1, /* a630 family */ 46 ADRENO_6XX_GEN2, /* a640 family */ 47 ADRENO_6XX_GEN3, /* a650 family */ 48 ADRENO_6XX_GEN4, /* a660 family */ 49 ADRENO_7XX_GEN1, /* a730 family */ 50 ADRENO_7XX_GEN2, /* a740 family */ 51 ADRENO_7XX_GEN3, /* a750 family */ 52 }; 53 54 #define ADRENO_QUIRK_TWO_PASS_USE_WFI BIT(0) 55 #define ADRENO_QUIRK_FAULT_DETECT_MASK BIT(1) 56 #define ADRENO_QUIRK_LMLOADKILL_DISABLE BIT(2) 57 #define ADRENO_QUIRK_HAS_HW_APRIV BIT(3) 58 #define ADRENO_QUIRK_HAS_CACHED_COHERENT BIT(4) 59 #define ADRENO_QUIRK_PREEMPTION BIT(5) 60 61 /* Helper for formating the chip_id in the way that userspace tools like 62 * crashdec expect. 63 */ 64 #define ADRENO_CHIPID_FMT "u.%u.%u.%u" 65 #define ADRENO_CHIPID_ARGS(_c) \ 66 (((_c) >> 24) & 0xff), \ 67 (((_c) >> 16) & 0xff), \ 68 (((_c) >> 8) & 0xff), \ 69 ((_c) & 0xff) 70 71 struct adreno_gpu_funcs { 72 struct msm_gpu_funcs base; 73 int (*get_timestamp)(struct msm_gpu *gpu, uint64_t *value); 74 }; 75 76 struct adreno_reglist { 77 u32 offset; 78 u32 value; 79 }; 80 81 struct adreno_speedbin { 82 uint16_t fuse; 83 uint16_t speedbin; 84 }; 85 86 struct a6xx_info; 87 88 struct adreno_info { 89 const char *machine; 90 /** 91 * @chipids: Table of matching chip-ids 92 * 93 * Terminated with 0 sentinal 94 */ 95 uint32_t *chip_ids; 96 enum adreno_family family; 97 uint32_t revn; 98 const char *fw[ADRENO_FW_MAX]; 99 uint32_t gmem; 100 u64 quirks; 101 struct msm_gpu *(*init)(struct drm_device *dev); 102 const char *zapfw; 103 u32 inactive_period; 104 union { 105 const struct a6xx_info *a6xx; 106 }; 107 u64 address_space_size; 108 /** 109 * @speedbins: Optional table of fuse to speedbin mappings 110 * 111 * Consists of pairs of fuse, index mappings, terminated with 112 * {SHRT_MAX, 0} sentinal. 113 */ 114 struct adreno_speedbin *speedbins; 115 u64 preempt_record_size; 116 }; 117 118 #define ADRENO_CHIP_IDS(tbl...) (uint32_t[]) { tbl, 0 } 119 120 struct adreno_gpulist { 121 const struct adreno_info *gpus; 122 unsigned gpus_count; 123 }; 124 125 #define DECLARE_ADRENO_GPULIST(name) \ 126 const struct adreno_gpulist name ## _gpulist = { \ 127 name ## _gpus, ARRAY_SIZE(name ## _gpus) \ 128 } 129 130 /* 131 * Helper to build a speedbin table, ie. the table: 132 * fuse | speedbin 133 * -----+--------- 134 * 0 | 0 135 * 169 | 1 136 * 174 | 2 137 * 138 * would be declared as: 139 * 140 * .speedbins = ADRENO_SPEEDBINS( 141 * { 0, 0 }, 142 * { 169, 1 }, 143 * { 174, 2 }, 144 * ), 145 */ 146 #define ADRENO_SPEEDBINS(tbl...) (struct adreno_speedbin[]) { tbl {SHRT_MAX, 0} } 147 148 struct adreno_protect { 149 const uint32_t *regs; 150 uint32_t count; 151 uint32_t count_max; 152 }; 153 154 #define DECLARE_ADRENO_PROTECT(name, __count_max) \ 155 static const struct adreno_protect name = { \ 156 .regs = name ## _regs, \ 157 .count = ARRAY_SIZE(name ## _regs), \ 158 .count_max = __count_max, \ 159 }; 160 161 struct adreno_reglist_list { 162 /** @reg: List of register **/ 163 const u32 *regs; 164 /** @count: Number of registers in the list **/ 165 u32 count; 166 }; 167 168 #define DECLARE_ADRENO_REGLIST_LIST(name) \ 169 static const struct adreno_reglist_list name = { \ 170 .regs = name ## _regs, \ 171 .count = ARRAY_SIZE(name ## _regs), \ 172 }; 173 174 struct adreno_gpu { 175 struct msm_gpu base; 176 const struct adreno_info *info; 177 uint32_t chip_id; 178 uint16_t speedbin; 179 const struct adreno_gpu_funcs *funcs; 180 181 /* interesting register offsets to dump: */ 182 const unsigned int *registers; 183 184 /* 185 * Are we loading fw from legacy path? Prior to addition 186 * of gpu firmware to linux-firmware, the fw files were 187 * placed in toplevel firmware directory, following qcom's 188 * android kernel. But linux-firmware preferred they be 189 * placed in a 'qcom' subdirectory. 190 * 191 * For backwards compatibility, we try first to load from 192 * the new path, using request_firmware_direct() to avoid 193 * any potential timeout waiting for usermode helper, then 194 * fall back to the old path (with direct load). And 195 * finally fall back to request_firmware() with the new 196 * path to allow the usermode helper. 197 */ 198 enum { 199 FW_LOCATION_UNKNOWN = 0, 200 FW_LOCATION_NEW, /* /lib/firmware/qcom/$fwfile */ 201 FW_LOCATION_LEGACY, /* /lib/firmware/$fwfile */ 202 FW_LOCATION_HELPER, 203 } fwloc; 204 205 /* firmware: */ 206 const struct firmware *fw[ADRENO_FW_MAX]; 207 208 struct { 209 /** 210 * @rgb565_predicator: Unknown, introduced with A650 family, 211 * related to UBWC mode/ver 4 212 */ 213 u32 rgb565_predicator; 214 /** @uavflagprd_inv: Unknown, introduced with A650 family */ 215 u32 uavflagprd_inv; 216 /** @min_acc_len: Whether the minimum access length is 64 bits */ 217 u32 min_acc_len; 218 /** 219 * @ubwc_swizzle: Whether to enable level 1, 2 & 3 bank swizzling. 220 * 221 * UBWC 1.0 always enables all three levels. 222 * UBWC 2.0 removes level 1 bank swizzling, leaving levels 2 & 3. 223 * UBWC 4.0 adds the optional ability to disable levels 2 & 3. 224 * 225 * This is a bitmask where BIT(0) enables level 1, BIT(1) 226 * controls level 2, and BIT(2) enables level 3. 227 */ 228 u32 ubwc_swizzle; 229 /** 230 * @highest_bank_bit: Highest Bank Bit 231 * 232 * The Highest Bank Bit value represents the bit of the highest 233 * DDR bank. This should ideally use DRAM type detection. 234 */ 235 u32 highest_bank_bit; 236 u32 amsbc; 237 /** 238 * @macrotile_mode: Macrotile Mode 239 * 240 * Whether to use 4-channel macrotiling mode or the newer 241 * 8-channel macrotiling mode introduced in UBWC 3.1. 0 is 242 * 4-channel and 1 is 8-channel. 243 */ 244 u32 macrotile_mode; 245 } ubwc_config; 246 247 /* 248 * Register offsets are different between some GPUs. 249 * GPU specific offsets will be exported by GPU specific 250 * code (a3xx_gpu.c) and stored in this common location. 251 */ 252 const unsigned int *reg_offsets; 253 bool gmu_is_wrapper; 254 255 bool has_ray_tracing; 256 257 u64 uche_trap_base; 258 }; 259 #define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base) 260 261 struct adreno_ocmem { 262 struct ocmem *ocmem; 263 unsigned long base; 264 void *hdl; 265 }; 266 267 /* platform config data (ie. from DT, or pdata) */ 268 struct adreno_platform_config { 269 uint32_t chip_id; 270 const struct adreno_info *info; 271 }; 272 273 #define ADRENO_IDLE_TIMEOUT msecs_to_jiffies(1000) 274 275 #define spin_until(X) ({ \ 276 int __ret = -ETIMEDOUT; \ 277 unsigned long __t = jiffies + ADRENO_IDLE_TIMEOUT; \ 278 do { \ 279 if (X) { \ 280 __ret = 0; \ 281 break; \ 282 } \ 283 } while (time_before(jiffies, __t)); \ 284 __ret; \ 285 }) 286 287 static inline uint8_t adreno_patchid(const struct adreno_gpu *gpu) 288 { 289 /* It is probably ok to assume legacy "adreno_rev" format 290 * for all a6xx devices, but probably best to limit this 291 * to older things. 292 */ 293 WARN_ON_ONCE(gpu->info->family >= ADRENO_6XX_GEN1); 294 return gpu->chip_id & 0xff; 295 } 296 297 static inline bool adreno_is_revn(const struct adreno_gpu *gpu, uint32_t revn) 298 { 299 if (WARN_ON_ONCE(!gpu->info)) 300 return false; 301 return gpu->info->revn == revn; 302 } 303 304 static inline bool adreno_has_gmu_wrapper(const struct adreno_gpu *gpu) 305 { 306 return gpu->gmu_is_wrapper; 307 } 308 309 static inline bool adreno_is_a2xx(const struct adreno_gpu *gpu) 310 { 311 if (WARN_ON_ONCE(!gpu->info)) 312 return false; 313 return gpu->info->family <= ADRENO_2XX_GEN2; 314 } 315 316 static inline bool adreno_is_a20x(const struct adreno_gpu *gpu) 317 { 318 if (WARN_ON_ONCE(!gpu->info)) 319 return false; 320 return gpu->info->family == ADRENO_2XX_GEN1; 321 } 322 323 static inline bool adreno_is_a225(const struct adreno_gpu *gpu) 324 { 325 return adreno_is_revn(gpu, 225); 326 } 327 328 static inline bool adreno_is_a305(const struct adreno_gpu *gpu) 329 { 330 return adreno_is_revn(gpu, 305); 331 } 332 333 static inline bool adreno_is_a305b(const struct adreno_gpu *gpu) 334 { 335 return gpu->info->chip_ids[0] == 0x03000512; 336 } 337 338 static inline bool adreno_is_a306(const struct adreno_gpu *gpu) 339 { 340 /* yes, 307, because a305c is 306 */ 341 return adreno_is_revn(gpu, 307); 342 } 343 344 static inline bool adreno_is_a306a(const struct adreno_gpu *gpu) 345 { 346 /* a306a (marketing name is a308) */ 347 return adreno_is_revn(gpu, 308); 348 } 349 350 static inline bool adreno_is_a320(const struct adreno_gpu *gpu) 351 { 352 return adreno_is_revn(gpu, 320); 353 } 354 355 static inline bool adreno_is_a330(const struct adreno_gpu *gpu) 356 { 357 return adreno_is_revn(gpu, 330); 358 } 359 360 static inline bool adreno_is_a330v2(const struct adreno_gpu *gpu) 361 { 362 return adreno_is_a330(gpu) && (adreno_patchid(gpu) > 0); 363 } 364 365 static inline int adreno_is_a405(const struct adreno_gpu *gpu) 366 { 367 return adreno_is_revn(gpu, 405); 368 } 369 370 static inline int adreno_is_a420(const struct adreno_gpu *gpu) 371 { 372 return adreno_is_revn(gpu, 420); 373 } 374 375 static inline int adreno_is_a430(const struct adreno_gpu *gpu) 376 { 377 return adreno_is_revn(gpu, 430); 378 } 379 380 static inline int adreno_is_a505(const struct adreno_gpu *gpu) 381 { 382 return adreno_is_revn(gpu, 505); 383 } 384 385 static inline int adreno_is_a506(const struct adreno_gpu *gpu) 386 { 387 return adreno_is_revn(gpu, 506); 388 } 389 390 static inline int adreno_is_a508(const struct adreno_gpu *gpu) 391 { 392 return adreno_is_revn(gpu, 508); 393 } 394 395 static inline int adreno_is_a509(const struct adreno_gpu *gpu) 396 { 397 return adreno_is_revn(gpu, 509); 398 } 399 400 static inline int adreno_is_a510(const struct adreno_gpu *gpu) 401 { 402 return adreno_is_revn(gpu, 510); 403 } 404 405 static inline int adreno_is_a512(const struct adreno_gpu *gpu) 406 { 407 return adreno_is_revn(gpu, 512); 408 } 409 410 static inline int adreno_is_a530(const struct adreno_gpu *gpu) 411 { 412 return adreno_is_revn(gpu, 530); 413 } 414 415 static inline int adreno_is_a540(const struct adreno_gpu *gpu) 416 { 417 return adreno_is_revn(gpu, 540); 418 } 419 420 static inline int adreno_is_a610(const struct adreno_gpu *gpu) 421 { 422 return adreno_is_revn(gpu, 610); 423 } 424 425 static inline int adreno_is_a618(const struct adreno_gpu *gpu) 426 { 427 return adreno_is_revn(gpu, 618); 428 } 429 430 static inline int adreno_is_a619(const struct adreno_gpu *gpu) 431 { 432 return adreno_is_revn(gpu, 619); 433 } 434 435 static inline int adreno_is_a619_holi(const struct adreno_gpu *gpu) 436 { 437 return adreno_is_a619(gpu) && adreno_has_gmu_wrapper(gpu); 438 } 439 440 static inline int adreno_is_a621(const struct adreno_gpu *gpu) 441 { 442 return gpu->info->chip_ids[0] == 0x06020100; 443 } 444 445 static inline int adreno_is_a623(const struct adreno_gpu *gpu) 446 { 447 return gpu->info->chip_ids[0] == 0x06020300; 448 } 449 450 static inline int adreno_is_a630(const struct adreno_gpu *gpu) 451 { 452 return adreno_is_revn(gpu, 630); 453 } 454 455 static inline int adreno_is_a640(const struct adreno_gpu *gpu) 456 { 457 return adreno_is_revn(gpu, 640); 458 } 459 460 static inline int adreno_is_a650(const struct adreno_gpu *gpu) 461 { 462 return adreno_is_revn(gpu, 650); 463 } 464 465 static inline int adreno_is_7c3(const struct adreno_gpu *gpu) 466 { 467 return gpu->info->chip_ids[0] == 0x06030500; 468 } 469 470 static inline int adreno_is_a660(const struct adreno_gpu *gpu) 471 { 472 return adreno_is_revn(gpu, 660); 473 } 474 475 static inline int adreno_is_a680(const struct adreno_gpu *gpu) 476 { 477 return adreno_is_revn(gpu, 680); 478 } 479 480 static inline int adreno_is_a663(const struct adreno_gpu *gpu) 481 { 482 return gpu->info->chip_ids[0] == 0x06060300; 483 } 484 485 static inline int adreno_is_a690(const struct adreno_gpu *gpu) 486 { 487 return gpu->info->chip_ids[0] == 0x06090000; 488 } 489 490 static inline int adreno_is_a702(const struct adreno_gpu *gpu) 491 { 492 return gpu->info->chip_ids[0] == 0x07000200; 493 } 494 495 static inline int adreno_is_a610_family(const struct adreno_gpu *gpu) 496 { 497 if (WARN_ON_ONCE(!gpu->info)) 498 return false; 499 500 /* TODO: A612 */ 501 return adreno_is_a610(gpu) || adreno_is_a702(gpu); 502 } 503 504 /* TODO: 615/616 */ 505 static inline int adreno_is_a615_family(const struct adreno_gpu *gpu) 506 { 507 return adreno_is_a618(gpu) || 508 adreno_is_a619(gpu); 509 } 510 511 static inline int adreno_is_a630_family(const struct adreno_gpu *gpu) 512 { 513 if (WARN_ON_ONCE(!gpu->info)) 514 return false; 515 return gpu->info->family == ADRENO_6XX_GEN1; 516 } 517 518 static inline int adreno_is_a660_family(const struct adreno_gpu *gpu) 519 { 520 if (WARN_ON_ONCE(!gpu->info)) 521 return false; 522 return gpu->info->family == ADRENO_6XX_GEN4; 523 } 524 525 /* check for a650, a660, or any derivatives */ 526 static inline int adreno_is_a650_family(const struct adreno_gpu *gpu) 527 { 528 if (WARN_ON_ONCE(!gpu->info)) 529 return false; 530 return gpu->info->family == ADRENO_6XX_GEN3 || 531 gpu->info->family == ADRENO_6XX_GEN4; 532 } 533 534 static inline int adreno_is_a640_family(const struct adreno_gpu *gpu) 535 { 536 if (WARN_ON_ONCE(!gpu->info)) 537 return false; 538 return gpu->info->family == ADRENO_6XX_GEN2; 539 } 540 541 static inline int adreno_is_a730(struct adreno_gpu *gpu) 542 { 543 return gpu->info->chip_ids[0] == 0x07030001; 544 } 545 546 static inline int adreno_is_a740(struct adreno_gpu *gpu) 547 { 548 return gpu->info->chip_ids[0] == 0x43050a01; 549 } 550 551 static inline int adreno_is_a750(struct adreno_gpu *gpu) 552 { 553 return gpu->info->chip_ids[0] == 0x43051401; 554 } 555 556 static inline int adreno_is_x185(struct adreno_gpu *gpu) 557 { 558 return gpu->info->chip_ids[0] == 0x43050c01; 559 } 560 561 static inline int adreno_is_a740_family(struct adreno_gpu *gpu) 562 { 563 if (WARN_ON_ONCE(!gpu->info)) 564 return false; 565 return gpu->info->family == ADRENO_7XX_GEN2 || 566 gpu->info->family == ADRENO_7XX_GEN3; 567 } 568 569 static inline int adreno_is_a750_family(struct adreno_gpu *gpu) 570 { 571 return gpu->info->family == ADRENO_7XX_GEN3; 572 } 573 574 static inline int adreno_is_a7xx(struct adreno_gpu *gpu) 575 { 576 /* Update with non-fake (i.e. non-A702) Gen 7 GPUs */ 577 return gpu->info->family == ADRENO_7XX_GEN1 || 578 adreno_is_a740_family(gpu); 579 } 580 581 u64 adreno_private_address_space_size(struct msm_gpu *gpu); 582 int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx, 583 uint32_t param, uint64_t *value, uint32_t *len); 584 int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx, 585 uint32_t param, uint64_t value, uint32_t len); 586 const struct firmware *adreno_request_fw(struct adreno_gpu *adreno_gpu, 587 const char *fwname); 588 struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu, 589 const struct firmware *fw, u64 *iova); 590 int adreno_hw_init(struct msm_gpu *gpu); 591 void adreno_recover(struct msm_gpu *gpu); 592 void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, u32 reg); 593 bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring); 594 #if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP) 595 void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state, 596 struct drm_printer *p); 597 #endif 598 void adreno_dump_info(struct msm_gpu *gpu); 599 void adreno_dump(struct msm_gpu *gpu); 600 void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords); 601 struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu); 602 603 int adreno_gpu_ocmem_init(struct device *dev, struct adreno_gpu *adreno_gpu, 604 struct adreno_ocmem *ocmem); 605 void adreno_gpu_ocmem_cleanup(struct adreno_ocmem *ocmem); 606 607 int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, 608 struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs, 609 int nr_rings); 610 void adreno_gpu_cleanup(struct adreno_gpu *gpu); 611 int adreno_load_fw(struct adreno_gpu *adreno_gpu); 612 613 void adreno_gpu_state_destroy(struct msm_gpu_state *state); 614 615 int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state); 616 int adreno_gpu_state_put(struct msm_gpu_state *state); 617 void adreno_show_object(struct drm_printer *p, void **ptr, int len, 618 bool *encoded); 619 620 /* 621 * Common helper function to initialize the default address space for arm-smmu 622 * attached targets 623 */ 624 struct msm_gem_address_space * 625 adreno_create_address_space(struct msm_gpu *gpu, 626 struct platform_device *pdev); 627 628 struct msm_gem_address_space * 629 adreno_iommu_create_address_space(struct msm_gpu *gpu, 630 struct platform_device *pdev, 631 unsigned long quirks); 632 633 int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags, 634 struct adreno_smmu_fault_info *info, const char *block, 635 u32 scratch[4]); 636 637 int adreno_read_speedbin(struct device *dev, u32 *speedbin); 638 639 /* 640 * For a5xx and a6xx targets load the zap shader that is used to pull the GPU 641 * out of secure mode 642 */ 643 int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid); 644 645 /* ringbuffer helpers (the parts that are adreno specific) */ 646 647 static inline void 648 OUT_PKT0(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt) 649 { 650 adreno_wait_ring(ring, cnt+1); 651 OUT_RING(ring, CP_TYPE0_PKT | ((cnt-1) << 16) | (regindx & 0x7FFF)); 652 } 653 654 /* no-op packet: */ 655 static inline void 656 OUT_PKT2(struct msm_ringbuffer *ring) 657 { 658 adreno_wait_ring(ring, 1); 659 OUT_RING(ring, CP_TYPE2_PKT); 660 } 661 662 static inline void 663 OUT_PKT3(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt) 664 { 665 adreno_wait_ring(ring, cnt+1); 666 OUT_RING(ring, CP_TYPE3_PKT | ((cnt-1) << 16) | ((opcode & 0xFF) << 8)); 667 } 668 669 static inline u32 PM4_PARITY(u32 val) 670 { 671 return (0x9669 >> (0xF & (val ^ 672 (val >> 4) ^ (val >> 8) ^ (val >> 12) ^ 673 (val >> 16) ^ ((val) >> 20) ^ (val >> 24) ^ 674 (val >> 28)))) & 1; 675 } 676 677 /* Maximum number of values that can be executed for one opcode */ 678 #define TYPE4_MAX_PAYLOAD 127 679 680 #define PKT4(_reg, _cnt) \ 681 (CP_TYPE4_PKT | ((_cnt) << 0) | (PM4_PARITY((_cnt)) << 7) | \ 682 (((_reg) & 0x3FFFF) << 8) | (PM4_PARITY((_reg)) << 27)) 683 684 static inline void 685 OUT_PKT4(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt) 686 { 687 adreno_wait_ring(ring, cnt + 1); 688 OUT_RING(ring, PKT4(regindx, cnt)); 689 } 690 691 #define PKT7(opcode, cnt) \ 692 (CP_TYPE7_PKT | (cnt << 0) | (PM4_PARITY(cnt) << 15) | \ 693 ((opcode & 0x7F) << 16) | (PM4_PARITY(opcode) << 23)) 694 695 static inline void 696 OUT_PKT7(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt) 697 { 698 adreno_wait_ring(ring, cnt + 1); 699 OUT_RING(ring, PKT7(opcode, cnt)); 700 } 701 702 struct msm_gpu *a2xx_gpu_init(struct drm_device *dev); 703 struct msm_gpu *a3xx_gpu_init(struct drm_device *dev); 704 struct msm_gpu *a4xx_gpu_init(struct drm_device *dev); 705 struct msm_gpu *a5xx_gpu_init(struct drm_device *dev); 706 struct msm_gpu *a6xx_gpu_init(struct drm_device *dev); 707 708 static inline uint32_t get_wptr(struct msm_ringbuffer *ring) 709 { 710 return (ring->cur - ring->start) % (MSM_GPU_RINGBUFFER_SZ >> 2); 711 } 712 713 /* 714 * Given a register and a count, return a value to program into 715 * REG_CP_PROTECT_REG(n) - this will block both reads and writes for _len 716 * registers starting at _reg. 717 * 718 * The register base needs to be a multiple of the length. If it is not, the 719 * hardware will quietly mask off the bits for you and shift the size. For 720 * example, if you intend the protection to start at 0x07 for a length of 4 721 * (0x07-0x0A) the hardware will actually protect (0x04-0x07) which might 722 * expose registers you intended to protect! 723 */ 724 #define ADRENO_PROTECT_RW(_reg, _len) \ 725 ((1 << 30) | (1 << 29) | \ 726 ((ilog2((_len)) & 0x1F) << 24) | (((_reg) << 2) & 0xFFFFF)) 727 728 /* 729 * Same as above, but allow reads over the range. For areas of mixed use (such 730 * as performance counters) this allows us to protect a much larger range with a 731 * single register 732 */ 733 #define ADRENO_PROTECT_RDONLY(_reg, _len) \ 734 ((1 << 29) \ 735 ((ilog2((_len)) & 0x1F) << 24) | (((_reg) << 2) & 0xFFFFF)) 736 737 738 #define gpu_poll_timeout(gpu, addr, val, cond, interval, timeout) \ 739 readl_poll_timeout((gpu)->mmio + ((addr) << 2), val, cond, \ 740 interval, timeout) 741 742 #endif /* __ADRENO_GPU_H__ */ 743