| /linux/drivers/gpu/drm/msm/ |
| H A D | msm_gpu.c | 25 static int enable_pwrrail(struct msm_gpu *gpu) in enable_pwrrail() argument 27 struct drm_device *dev = gpu->dev; in enable_pwrrail() 30 if (gpu->gpu_reg) { in enable_pwrrail() 31 ret = regulator_enable(gpu->gpu_reg); in enable_pwrrail() 38 if (gpu->gpu_cx) { in enable_pwrrail() 39 ret = regulator_enable(gpu->gpu_cx); in enable_pwrrail() 49 static int disable_pwrrail(struct msm_gpu *gpu) in disable_pwrrail() argument 51 if (gpu->gpu_cx) in disable_pwrrail() 52 regulator_disable(gpu->gpu_cx); in disable_pwrrail() 53 if (gpu->gpu_reg) in disable_pwrrail() [all …]
|
| H A D | msm_gpu.h | 49 int (*get_param)(struct msm_gpu *gpu, struct msm_context *ctx, 51 int (*set_param)(struct msm_gpu *gpu, struct msm_context *ctx, 53 int (*hw_init)(struct msm_gpu *gpu); 58 int (*ucode_load)(struct msm_gpu *gpu); 60 int (*pm_suspend)(struct msm_gpu *gpu); 61 int (*pm_resume)(struct msm_gpu *gpu); 62 void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit); 63 void (*flush)(struct msm_gpu *gpu, struct msm_ringbuffer *ring); 65 struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu); 66 void (*recover)(struct msm_gpu *gpu); [all …]
|
| /linux/drivers/gpu/drm/msm/adreno/ |
| H A D | a8xx_gpu.c | 19 static void a8xx_aperture_slice_set(struct msm_gpu *gpu, enum adreno_pipe pipe, u32 slice) in a8xx_aperture_slice_set() argument 21 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a8xx_aperture_slice_set() 30 gpu_write(gpu, REG_A8XX_CP_APERTURE_CNTL_HOST, val); in a8xx_aperture_slice_set() 35 static void a8xx_aperture_acquire(struct msm_gpu *gpu, enum adreno_pipe pipe, unsigned long *flags) in a8xx_aperture_acquire() argument 37 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a8xx_aperture_acquire() 42 a8xx_aperture_slice_set(gpu, pipe, 0); in a8xx_aperture_acquire() 45 static void a8xx_aperture_release(struct msm_gpu *gpu, unsigned long flags) in a8xx_aperture_release() argument 47 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a8xx_aperture_release() 53 static void a8xx_aperture_clear(struct msm_gpu *gpu) in a8xx_aperture_clear() argument 57 a8xx_aperture_acquire(gpu, PIPE_NONE, &flags); in a8xx_aperture_clear() [all …]
|
| H A D | adreno_gpu.h | 38 * so it helps to be able to group the GPU devices by generation and if 82 int (*get_timestamp)(struct msm_gpu *gpu, uint64_t *value); 218 * of gpu firmware to linux-firmware, the fw files were 249 * GPU specific offsets will be exported by GPU specific 287 static inline uint8_t adreno_patchid(const struct adreno_gpu *gpu) in adreno_patchid() argument 293 WARN_ON_ONCE(gpu->info->family >= ADRENO_6XX_GEN1); in adreno_patchid() 294 return gpu->chip_id & 0xff; in adreno_patchid() 297 static inline bool adreno_is_revn(const struct adreno_gpu *gpu, uint32_t revn) in adreno_is_revn() argument 299 if (WARN_ON_ONCE(!gpu->info)) in adreno_is_revn() 301 return gpu->info->revn == revn; in adreno_is_revn() [all …]
|
| H A D | a3xx_gpu.c | 28 static void a3xx_dump(struct msm_gpu *gpu); 29 static bool a3xx_idle(struct msm_gpu *gpu); 31 static void a3xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) in a3xx_submit() argument 69 /* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */ in a3xx_submit() 82 adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR); in a3xx_submit() 85 static bool a3xx_me_init(struct msm_gpu *gpu) in a3xx_me_init() argument 87 struct msm_ringbuffer *ring = gpu->rb[0]; in a3xx_me_init() 108 adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR); in a3xx_me_init() 109 return a3xx_idle(gpu); in a3xx_me_init() 112 static int a3xx_hw_init(struct msm_gpu *gpu) in a3xx_hw_init() argument [all …]
|
| H A D | a6xx_gpu.c | 32 static bool fence_status_check(struct msm_gpu *gpu, u32 offset, u32 value, u32 status, u32 mask) in fence_status_check() argument 41 gpu_write(gpu, offset, value); in fence_status_check() 53 struct msm_gpu *gpu = &adreno_gpu->base; in fenced_write() local 57 gpu_write(gpu, offset, value); in fenced_write() 69 fence_status_check(gpu, offset, value, status, mask), 0, 1000)) in fenced_write() 73 gpu_write(gpu, offset, value); in fenced_write() 77 fence_status_check(gpu, offset, value, status, mask), 0, 1000)) { in fenced_write() 80 * warning will allow gpu to move to power collapse which in fenced_write() 110 static inline bool _a6xx_check_idle(struct msm_gpu *gpu) in _a6xx_check_idle() argument 112 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in _a6xx_check_idle() [all …]
|
| H A D | a5xx_gpu.c | 17 static void a5xx_dump(struct msm_gpu *gpu); 21 static void update_shadow_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) in update_shadow_rptr() argument 23 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in update_shadow_rptr() 33 void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, in a5xx_flush() argument 36 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a5xx_flush() 46 update_shadow_rptr(gpu, ring); in a5xx_flush() 63 gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr); in a5xx_flush() 66 static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit) in a5xx_submit_in_rb() argument 68 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a5xx_submit_in_rb() 115 a5xx_flush(gpu, ring, true); in a5xx_submit_in_rb() [all …]
|
| H A D | a4xx_gpu.c | 22 static void a4xx_dump(struct msm_gpu *gpu); 23 static bool a4xx_idle(struct msm_gpu *gpu); 25 static void a4xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) in a4xx_submit() argument 63 /* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */ in a4xx_submit() 69 adreno_flush(gpu, ring, REG_A4XX_CP_RB_WPTR); in a4xx_submit() 76 static void a4xx_enable_hwcg(struct msm_gpu *gpu) in a4xx_enable_hwcg() argument 78 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a4xx_enable_hwcg() 81 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_TP(i), 0x02222202); in a4xx_enable_hwcg() 83 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_TP(i), 0x00002222); in a4xx_enable_hwcg() 85 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_TP(i), 0x0E739CE7); in a4xx_enable_hwcg() [all …]
|
| H A D | adreno_device.c | 16 MODULE_PARM_DESC(snapshot_debugbus, "Include debugbus sections in GPU devcoredump (if not fused off… 24 MODULE_PARM_DESC(disable_acd, "Forcefully disable GPU ACD"); 28 MODULE_PARM_DESC(no_gpu, "Disable GPU driver register (0=enable GPU driver register (default), 1=sk… 51 /* identify gpu: */ in adreno_info() 72 struct msm_gpu *gpu = NULL; in adreno_load_gpu() local 77 gpu = dev_to_gpu(&pdev->dev); in adreno_load_gpu() 79 if (!gpu) { in adreno_load_gpu() 80 dev_err_once(dev->dev, "no GPU device was found\n"); in adreno_load_gpu() 84 adreno_gpu = to_adreno_gpu(gpu); in adreno_load_gpu() 96 if (gpu->funcs->ucode_load) { in adreno_load_gpu() [all …]
|
| H A D | a2xx_gpu.c | 10 static void a2xx_dump(struct msm_gpu *gpu); 11 static bool a2xx_idle(struct msm_gpu *gpu); 13 static void a2xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) in a2xx_submit() argument 51 adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR); in a2xx_submit() 54 static bool a2xx_me_init(struct msm_gpu *gpu) in a2xx_me_init() argument 56 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a2xx_me_init() 58 struct msm_ringbuffer *ring = gpu->rb[0]; in a2xx_me_init() 104 adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR); in a2xx_me_init() 105 return a2xx_idle(gpu); in a2xx_me_init() 108 static int a2xx_hw_init(struct msm_gpu *gpu) in a2xx_hw_init() argument [all …]
|
| H A D | a5xx_preempt.c | 25 static inline void set_preempt_state(struct a5xx_gpu *gpu, in set_preempt_state() argument 34 atomic_set(&gpu->preempt_state, new); in set_preempt_state() 40 static inline void update_wptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) in update_wptr() argument 52 gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr); in update_wptr() 56 static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu) in get_next_ring() argument 58 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in get_next_ring() 63 for (i = 0; i < gpu->nr_rings; i++) { in get_next_ring() 65 struct msm_ringbuffer *ring = gpu->rb[i]; in get_next_ring() 68 empty = (get_wptr(ring) == gpu->funcs->get_rptr(gpu, ring)); in get_next_ring() 84 struct msm_gpu *gpu = &a5xx_gpu->base.base; in a5xx_preempt_timer() local [all …]
|
| H A D | a6xx_preempt.c | 29 static inline void set_preempt_state(struct a6xx_gpu *gpu, in set_preempt_state() argument 38 atomic_set(&gpu->preempt_state, new); in set_preempt_state() 63 static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu) in get_next_ring() argument 65 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in get_next_ring() 71 for (i = 0; i < gpu->nr_rings; i++) { in get_next_ring() 73 struct msm_ringbuffer *ring = gpu->rb[i]; in get_next_ring() 76 empty = (get_wptr(ring) == gpu->funcs->get_rptr(gpu, ring)); in get_next_ring() 92 struct msm_gpu *gpu = &a6xx_gpu->base.base; in a6xx_preempt_timer() local 93 struct drm_device *dev = gpu->dev; in a6xx_preempt_timer() 98 dev_err(dev->dev, "%s: preemption timed out\n", gpu->name); in a6xx_preempt_timer() [all …]
|
| H A D | a6xx_gpu_state.c | 131 static int a6xx_crashdumper_init(struct msm_gpu *gpu, in a6xx_crashdumper_init() argument 134 dumper->ptr = msm_gem_kernel_new(gpu->dev, in a6xx_crashdumper_init() 135 SZ_1M, MSM_BO_WC, gpu->vm, in a6xx_crashdumper_init() 144 static int a6xx_crashdumper_run(struct msm_gpu *gpu, in a6xx_crashdumper_run() argument 147 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_crashdumper_run() 161 gpu_write64(gpu, REG_A6XX_CP_CRASH_DUMP_SCRIPT_BASE, dumper->iova); in a6xx_crashdumper_run() 163 gpu_write(gpu, REG_A6XX_CP_CRASH_DUMP_CNTL, 1); in a6xx_crashdumper_run() 165 ret = gpu_poll_timeout(gpu, REG_A6XX_CP_CRASH_DUMP_STATUS, val, in a6xx_crashdumper_run() 168 gpu_write(gpu, REG_A6XX_CP_CRASH_DUMP_CNTL, 0); in a6xx_crashdumper_run() 174 static int debugbus_read(struct msm_gpu *gpu, u32 block, u32 offset, in debugbus_read() argument [all …]
|
| H A D | adreno_gpu.c | 25 MODULE_PARM_DESC(address_space_size, "Override for size of processes private GPU address space"); 30 static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname, in zap_shader_load_mdt() argument 33 struct device *dev = &gpu->pdev->dev; in zap_shader_load_mdt() 78 ret = request_firmware_direct(&fw, fwname, gpu->dev->dev); in zap_shader_load_mdt() 83 fw = adreno_request_fw(to_adreno_gpu(gpu), fwname); in zap_shader_load_mdt() 133 if (signed_fwname || (to_adreno_gpu(gpu)->fwloc == FW_LOCATION_LEGACY)) { in zap_shader_load_mdt() 169 int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid) in adreno_zap_shader_load() argument 171 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in adreno_zap_shader_load() 172 struct platform_device *pdev = gpu->pdev; in adreno_zap_shader_load() 184 return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid); in adreno_zap_shader_load() [all …]
|
| /linux/sound/hda/codecs/hdmi/ |
| H A D | nvhdmi.c | 145 HDA_CODEC_ID_MODEL(0x10de0008, "GPU 08 HDMI/DP", MODEL_LEGACY), 146 HDA_CODEC_ID_MODEL(0x10de0009, "GPU 09 HDMI/DP", MODEL_LEGACY), 147 HDA_CODEC_ID_MODEL(0x10de000a, "GPU 0a HDMI/DP", MODEL_LEGACY), 148 HDA_CODEC_ID_MODEL(0x10de000b, "GPU 0b HDMI/DP", MODEL_LEGACY), 150 HDA_CODEC_ID_MODEL(0x10de000d, "GPU 0d HDMI/DP", MODEL_LEGACY), 151 HDA_CODEC_ID_MODEL(0x10de0010, "GPU 10 HDMI/DP", MODEL_LEGACY), 152 HDA_CODEC_ID_MODEL(0x10de0011, "GPU 11 HDMI/DP", MODEL_LEGACY), 153 HDA_CODEC_ID_MODEL(0x10de0012, "GPU 12 HDMI/DP", MODEL_LEGACY), 154 HDA_CODEC_ID_MODEL(0x10de0013, "GPU 13 HDMI/DP", MODEL_LEGACY), 155 HDA_CODEC_ID_MODEL(0x10de0014, "GPU 14 HDMI/DP", MODEL_LEGACY), [all …]
|
| /linux/drivers/gpu/drm/ |
| H A D | Kconfig | 31 source "drivers/gpu/drm/Kconfig.debug" 172 source "drivers/gpu/drm/clients/Kconfig" 186 source "drivers/gpu/drm/display/Kconfig" 193 GPU memory management subsystem for devices with multiple 194 GPU memory types. Will be enabled automatically if a device driver 208 GPU-VM representation providing helpers to manage a GPUs virtual 217 GPU-SVM representation providing helpers to manage a GPUs shared 268 source "drivers/gpu/drm/sysfb/Kconfig" 270 source "drivers/gpu/drm/arm/Kconfig" 272 source "drivers/gpu/drm/radeon/Kconfig" [all …]
|
| /linux/drivers/gpu/drm/etnaviv/ |
| H A D | etnaviv_sched.c | 29 dev_dbg(submit->gpu->dev, "skipping bad job\n"); in etnaviv_sched_run_job() 38 struct etnaviv_gpu *gpu = submit->gpu; in etnaviv_sched_timedout_job() local 43 * If the GPU managed to complete this jobs fence, the timeout has in etnaviv_sched_timedout_job() 50 * If the GPU is still making forward progress on the front-end (which in etnaviv_sched_timedout_job() 54 dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS); in etnaviv_sched_timedout_job() 55 change = dma_addr - gpu->hangcheck_dma_addr; in etnaviv_sched_timedout_job() 58 mutex_lock(&gpu->lock); in etnaviv_sched_timedout_job() 59 gpu_write(gpu, VIVS_MC_PROFILE_CONFIG0, in etnaviv_sched_timedout_job() 62 primid = gpu_read(gpu, VIVS_MC_PROFILE_FE_READ); in etnaviv_sched_timedout_job() 63 mutex_unlock(&gpu->lock); in etnaviv_sched_timedout_job() [all …]
|
| H A D | etnaviv_buffer.c | 91 static void etnaviv_cmd_select_pipe(struct etnaviv_gpu *gpu, in etnaviv_cmd_select_pipe() argument 96 lockdep_assert_held(&gpu->lock); in etnaviv_cmd_select_pipe() 104 if (gpu->exec_state == ETNA_PIPE_2D) in etnaviv_cmd_select_pipe() 106 else if (gpu->exec_state == ETNA_PIPE_3D) in etnaviv_cmd_select_pipe() 117 static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu, in etnaviv_buffer_dump() argument 123 dev_info(gpu->dev, "virt %p phys 0x%08x free 0x%08x\n", in etnaviv_buffer_dump() 125 &gpu->mmu_context->cmdbuf_mapping) + in etnaviv_buffer_dump() 134 * The GPU may be executing this WAIT while we're modifying it, so we have 135 * to write it in a specific order to avoid the GPU branching to somewhere 153 static u32 etnaviv_buffer_reserve(struct etnaviv_gpu *gpu, in etnaviv_buffer_reserve() argument [all …]
|
| H A D | etnaviv_gpu.h | 90 void (*sync_point)(struct etnaviv_gpu *gpu, struct etnaviv_event *event); 170 static inline void gpu_write(struct etnaviv_gpu *gpu, u32 reg, u32 data) in gpu_write() argument 172 writel(data, gpu->mmio + reg); in gpu_write() 175 static inline u32 gpu_read(struct etnaviv_gpu *gpu, u32 reg) in gpu_read() argument 182 readl(gpu->mmio + reg); in gpu_read() 184 return readl(gpu->mmio + reg); in gpu_read() 187 static inline u32 gpu_fix_power_address(struct etnaviv_gpu *gpu, u32 reg) in gpu_fix_power_address() argument 190 if (gpu->identity.model == chipModel_GC300 && in gpu_fix_power_address() 191 gpu->identity.revision < 0x2000) in gpu_fix_power_address() 197 static inline void gpu_write_power(struct etnaviv_gpu *gpu, u32 reg, u32 data) in gpu_write_power() argument [all …]
|
| H A D | etnaviv_iommu_v2.c | 165 static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu, in etnaviv_iommuv2_restore_nonsec() argument 172 if (gpu_read(gpu, VIVS_MMUv2_CONTROL) & VIVS_MMUv2_CONTROL_ENABLE) in etnaviv_iommuv2_restore_nonsec() 175 if (gpu->mmu_context) in etnaviv_iommuv2_restore_nonsec() 176 etnaviv_iommu_context_put(gpu->mmu_context); in etnaviv_iommuv2_restore_nonsec() 177 gpu->mmu_context = etnaviv_iommu_context_get(context); in etnaviv_iommuv2_restore_nonsec() 179 prefetch = etnaviv_buffer_config_mmuv2(gpu, in etnaviv_iommuv2_restore_nonsec() 182 etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(&gpu->buffer), in etnaviv_iommuv2_restore_nonsec() 184 etnaviv_gpu_wait_idle(gpu, 100); in etnaviv_iommuv2_restore_nonsec() 186 gpu_write(gpu, VIVS_MMUv2_CONTROL, VIVS_MMUv2_CONTROL_ENABLE); in etnaviv_iommuv2_restore_nonsec() 189 static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu, in etnaviv_iommuv2_restore_sec() argument [all …]
|
| /linux/drivers/gpu/drm/panthor/ |
| H A D | panthor_gpu.c | 26 * struct panthor_gpu - GPU block management data. 29 /** @irq: GPU irq. */ 35 /** @pending_reqs: Pending GPU requests. */ 38 /** @reqs_acked: GPU request wait queue. */ 87 drm_warn(&ptdev->base, "GPU Fault 0x%08x (%s) at 0x%016llx\n", in panthor_gpu_irq_handler() 92 drm_warn(&ptdev->base, "GPU Fault in protected mode\n"); in panthor_gpu_irq_handler() 94 spin_lock(&ptdev->gpu->reqs_lock); in panthor_gpu_irq_handler() 95 if (status & ptdev->gpu->pending_reqs) { in panthor_gpu_irq_handler() 96 ptdev->gpu->pending_reqs &= ~status; in panthor_gpu_irq_handler() 97 wake_up_all(&ptdev->gpu->reqs_acked); in panthor_gpu_irq_handler() [all …]
|
| /linux/Documentation/gpu/rfc/ |
| H A D | gpusvm.rst | 4 GPU SVM Section 25 * Eviction is defined as migrating data from the GPU back to the 26 CPU without a virtual address to free up GPU memory. 32 * GPU page table invalidation, which requires a GPU virtual address, is 33 handled via the notifier that has access to the GPU virtual address. 34 * GPU fault side 36 and should strive to take mmap_read lock only in GPU SVM layer. 37 * Big retry loop to handle all races with the mmu notifier under the gpu 47 migration policy requiring GPU access to occur in GPU memory. 49 While no current user (Xe) of GPU SVM has such a policy, it is likely [all …]
|
| /linux/Documentation/devicetree/bindings/gpu/ |
| H A D | img,powervr-sgx.yaml | 6 $id: http://devicetree.org/schemas/gpu/img,powervr-sgx.yaml# 19 - ti,omap3430-gpu # Rev 121 20 - ti,omap3630-gpu # Rev 125 24 - ingenic,jz4780-gpu # Rev 130 25 - ti,omap4430-gpu # Rev 120 29 - allwinner,sun6i-a31-gpu # MP2 Rev 115 30 - ti,omap4470-gpu # MP1 Rev 112 31 - ti,omap5432-gpu # MP2 Rev 105 32 - ti,am5728-gpu # MP2 Rev 116 33 - ti,am6548-gpu # MP1 Rev 117 [all …]
|
| /linux/drivers/gpu/drm/amd/amdkfd/ |
| H A D | kfd_topology.c | 108 return top_dev->gpu; in kfd_device_by_id() 261 if (iolink->gpu && kfd_devcgroup_check_permission(iolink->gpu)) in iolink_show() 303 if (mem->gpu && kfd_devcgroup_check_permission(mem->gpu)) in mem_show() 335 if (cache->gpu && kfd_devcgroup_check_permission(cache->gpu)) in kfd_cache_show() 417 if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu)) in node_show() 426 if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu)) in node_show() 433 if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu)) in node_show() 438 dev->gpu ? dev->node_props.simd_count : 0); in node_show() 462 dev->gpu ? (dev->node_props.array_count * in node_show() 463 NUM_XCC(dev->gpu->xcc_mask)) : 0); in node_show() [all …]
|
| /linux/Documentation/devicetree/bindings/display/msm/ |
| H A D | gmu.yaml | 16 to members of the Adreno A6xx GPU family. The GMU provides on-device power 111 - description: GPU CX clock 112 - description: GPU AXI clock 113 - description: GPU MEMNOC clock 142 - description: GPU CX clock 143 - description: GPU AXI clock 144 - description: GPU MEMNOC clock 145 - description: GPU AHB clock 146 - description: GPU HUB CX clock 179 - description: GPU CX clock [all …]
|