/linux/drivers/gpu/drm/etnaviv/ |
H A D | etnaviv_gpu.c | 32 { .name = "etnaviv-gpu,2d" }, 40 int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value) in etnaviv_gpu_get_param() argument 42 struct etnaviv_drm_private *priv = gpu->drm->dev_private; in etnaviv_gpu_get_param() 46 *value = gpu->identity.model; in etnaviv_gpu_get_param() 50 *value = gpu->identity.revision; in etnaviv_gpu_get_param() 54 *value = gpu->identity.features; in etnaviv_gpu_get_param() 58 *value = gpu->identity.minor_features0; in etnaviv_gpu_get_param() 62 *value = gpu->identity.minor_features1; in etnaviv_gpu_get_param() 66 *value = gpu->identity.minor_features2; in etnaviv_gpu_get_param() 70 *value = gpu->identity.minor_features3; in etnaviv_gpu_get_param() [all …]
|
H A D | etnaviv_sched.c | 29 dev_dbg(submit->gpu->dev, "skipping bad job\n"); in etnaviv_sched_run_job() 38 struct etnaviv_gpu *gpu = submit->gpu; in etnaviv_sched_timedout_job() local 43 * If the GPU managed to complete this jobs fence, the timout is in etnaviv_sched_timedout_job() 50 * If the GPU is still making forward progress on the front-end (which in etnaviv_sched_timedout_job() 54 dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS); in etnaviv_sched_timedout_job() 55 change = dma_addr - gpu->hangcheck_dma_addr; in etnaviv_sched_timedout_job() 58 mutex_lock(&gpu->lock); in etnaviv_sched_timedout_job() 59 gpu_write(gpu, VIVS_MC_PROFILE_CONFIG0, in etnaviv_sched_timedout_job() 62 primid = gpu_read(gpu, VIVS_MC_PROFILE_FE_READ); in etnaviv_sched_timedout_job() 63 mutex_unlock(&gpu->lock); in etnaviv_sched_timedout_job() [all …]
|
H A D | etnaviv_gpu.h | 90 void (*sync_point)(struct etnaviv_gpu *gpu, struct etnaviv_event *event); 170 static inline void gpu_write(struct etnaviv_gpu *gpu, u32 reg, u32 data) in gpu_write() argument 172 writel(data, gpu->mmio + reg); in gpu_write() 175 static inline u32 gpu_read(struct etnaviv_gpu *gpu, u32 reg) in gpu_read() argument 182 readl(gpu->mmio + reg); in gpu_read() 184 return readl(gpu->mmio + reg); in gpu_read() 187 static inline u32 gpu_fix_power_address(struct etnaviv_gpu *gpu, u32 reg) in gpu_fix_power_address() argument 190 if (gpu->identity.model == chipModel_GC300 && in gpu_fix_power_address() 191 gpu->identity.revision < 0x2000) in gpu_fix_power_address() 197 static inline void gpu_write_power(struct etnaviv_gpu *gpu, u32 reg, u32 data) in gpu_write_power() argument [all …]
|
H A D | etnaviv_drv.c | 50 struct etnaviv_gpu *g = priv->gpu[i]; in load_gpu() 57 priv->gpu[i] = NULL; in load_gpu() 85 struct etnaviv_gpu *gpu = priv->gpu[i]; in etnaviv_open() local 88 if (gpu) { in etnaviv_open() 89 sched = &gpu->sched; in etnaviv_open() 112 struct etnaviv_gpu *gpu = priv->gpu[i]; in etnaviv_postclose() local 114 if (gpu) in etnaviv_postclose() 150 static int etnaviv_mmu_show(struct etnaviv_gpu *gpu, struct seq_file *m) in etnaviv_mmu_show() argument 155 seq_printf(m, "Active Objects (%s):\n", dev_name(gpu->dev)); in etnaviv_mmu_show() 158 * Lock the GPU to avoid a MMU context switch just now and elevate in etnaviv_mmu_show() [all …]
|
H A D | etnaviv_iommu_v2.c | 165 static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu, in etnaviv_iommuv2_restore_nonsec() argument 172 if (gpu_read(gpu, VIVS_MMUv2_CONTROL) & VIVS_MMUv2_CONTROL_ENABLE) in etnaviv_iommuv2_restore_nonsec() 175 if (gpu->mmu_context) in etnaviv_iommuv2_restore_nonsec() 176 etnaviv_iommu_context_put(gpu->mmu_context); in etnaviv_iommuv2_restore_nonsec() 177 gpu->mmu_context = etnaviv_iommu_context_get(context); in etnaviv_iommuv2_restore_nonsec() 179 prefetch = etnaviv_buffer_config_mmuv2(gpu, in etnaviv_iommuv2_restore_nonsec() 182 etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(&gpu->buffer), in etnaviv_iommuv2_restore_nonsec() 184 etnaviv_gpu_wait_idle(gpu, 100); in etnaviv_iommuv2_restore_nonsec() 186 gpu_write(gpu, VIVS_MMUv2_CONTROL, VIVS_MMUv2_CONTROL_ENABLE); in etnaviv_iommuv2_restore_nonsec() 189 static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu, in etnaviv_iommuv2_restore_sec() argument [all …]
|
/linux/drivers/gpu/drm/msm/adreno/ |
H A D | a3xx_gpu.c | 28 static void a3xx_dump(struct msm_gpu *gpu); 29 static bool a3xx_idle(struct msm_gpu *gpu); 31 static void a3xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) in a3xx_submit() argument 69 /* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */ in a3xx_submit() 82 adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR); in a3xx_submit() 85 static bool a3xx_me_init(struct msm_gpu *gpu) in a3xx_me_init() argument 87 struct msm_ringbuffer *ring = gpu->rb[0]; in a3xx_me_init() 108 adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR); in a3xx_me_init() 109 return a3xx_idle(gpu); in a3xx_me_init() 112 static int a3xx_hw_init(struct msm_gpu *gpu) in a3xx_hw_init() argument [all …]
|
H A D | a6xx_gpu.c | 19 static inline bool _a6xx_check_idle(struct msm_gpu *gpu) in _a6xx_check_idle() argument 21 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in _a6xx_check_idle() 29 if (gpu_read(gpu, REG_A6XX_RBBM_STATUS) & in _a6xx_check_idle() 33 return !(gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS) & in _a6xx_check_idle() 37 static bool a6xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring) in a6xx_idle() argument 40 if (!adreno_idle(gpu, ring)) in a6xx_idle() 43 if (spin_until(_a6xx_check_idle(gpu))) { in a6xx_idle() 44 DRM_ERROR("%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X rptr/wptr %d/%d\n", in a6xx_idle() 45 gpu->name, __builtin_return_address(0), in a6xx_idle() 46 gpu_read(gpu, REG_A6XX_RBBM_STATUS), in a6xx_idle() [all …]
|
H A D | adreno_gpu.h | 36 * so it helps to be able to group the GPU devices by generation and if 73 int (*get_timestamp)(struct msm_gpu *gpu, uint64_t *value); 186 * of gpu firmware to linux-firmware, the fw files were 249 * GPU specific offsets will be exported by GPU specific 287 static inline uint8_t adreno_patchid(const struct adreno_gpu *gpu) in adreno_patchid() argument 293 WARN_ON_ONCE(gpu->info->family >= ADRENO_6XX_GEN1); in adreno_patchid() 294 return gpu->chip_id & 0xff; in adreno_patchid() 297 static inline bool adreno_is_revn(const struct adreno_gpu *gpu, uint32_t revn) in adreno_is_revn() argument 299 if (WARN_ON_ONCE(!gpu->info)) in adreno_is_revn() 301 return gpu->info->revn == revn; in adreno_is_revn() [all …]
|
H A D | a5xx_gpu.c | 17 static void a5xx_dump(struct msm_gpu *gpu); 21 static void update_shadow_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) in update_shadow_rptr() argument 23 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in update_shadow_rptr() 33 void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, in a5xx_flush() argument 36 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a5xx_flush() 46 update_shadow_rptr(gpu, ring); in a5xx_flush() 63 gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr); in a5xx_flush() 66 static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit) in a5xx_submit_in_rb() argument 68 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a5xx_submit_in_rb() 115 a5xx_flush(gpu, ring, true); in a5xx_submit_in_rb() [all …]
|
H A D | a4xx_gpu.c | 22 static void a4xx_dump(struct msm_gpu *gpu); 23 static bool a4xx_idle(struct msm_gpu *gpu); 25 static void a4xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) in a4xx_submit() argument 63 /* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */ in a4xx_submit() 69 adreno_flush(gpu, ring, REG_A4XX_CP_RB_WPTR); in a4xx_submit() 76 static void a4xx_enable_hwcg(struct msm_gpu *gpu) in a4xx_enable_hwcg() argument 78 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a4xx_enable_hwcg() 81 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_TP(i), 0x02222202); in a4xx_enable_hwcg() 83 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_TP(i), 0x00002222); in a4xx_enable_hwcg() 85 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_TP(i), 0x0E739CE7); in a4xx_enable_hwcg() [all …]
|
H A D | a5xx_preempt.c | 25 static inline void set_preempt_state(struct a5xx_gpu *gpu, in set_preempt_state() argument 34 atomic_set(&gpu->preempt_state, new); in set_preempt_state() 40 static inline void update_wptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) in update_wptr() argument 52 gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr); in update_wptr() 56 static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu) in get_next_ring() argument 58 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in get_next_ring() 63 for (i = 0; i < gpu->nr_rings; i++) { in get_next_ring() 65 struct msm_ringbuffer *ring = gpu->rb[i]; in get_next_ring() 68 empty = (get_wptr(ring) == gpu->funcs->get_rptr(gpu, ring)); in get_next_ring() 83 struct msm_gpu *gpu = &a5xx_gpu->base.base; in a5xx_preempt_timer() local [all …]
|
H A D | adreno_device.c | 16 MODULE_PARM_DESC(snapshot_debugbus, "Include debugbus sections in GPU devcoredump (if not fused off… 45 /* identify gpu: */ in adreno_info() 66 struct msm_gpu *gpu = NULL; in adreno_load_gpu() local 71 gpu = dev_to_gpu(&pdev->dev); in adreno_load_gpu() 73 if (!gpu) { in adreno_load_gpu() 74 dev_err_once(dev->dev, "no GPU device was found\n"); in adreno_load_gpu() 78 adreno_gpu = to_adreno_gpu(gpu); in adreno_load_gpu() 90 if (gpu->funcs->ucode_load) { in adreno_load_gpu() 91 ret = gpu->funcs->ucode_load(gpu); in adreno_load_gpu() 98 * booting the gpu, go ahead and enable runpm: in adreno_load_gpu() [all …]
|
H A D | a6xx_gpu_state.c | 131 static int a6xx_crashdumper_init(struct msm_gpu *gpu, in a6xx_crashdumper_init() argument 134 dumper->ptr = msm_gem_kernel_new(gpu->dev, in a6xx_crashdumper_init() 135 SZ_1M, MSM_BO_WC, gpu->aspace, in a6xx_crashdumper_init() 144 static int a6xx_crashdumper_run(struct msm_gpu *gpu, in a6xx_crashdumper_run() argument 147 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_crashdumper_run() 161 gpu_write64(gpu, REG_A6XX_CP_CRASH_SCRIPT_BASE, dumper->iova); in a6xx_crashdumper_run() 163 gpu_write(gpu, REG_A6XX_CP_CRASH_DUMP_CNTL, 1); in a6xx_crashdumper_run() 165 ret = gpu_poll_timeout(gpu, REG_A6XX_CP_CRASH_DUMP_STATUS, val, in a6xx_crashdumper_run() 168 gpu_write(gpu, REG_A6XX_CP_CRASH_DUMP_CNTL, 0); in a6xx_crashdumper_run() 174 static int debugbus_read(struct msm_gpu *gpu, u32 block, u32 offset, in debugbus_read() argument [all …]
|
H A D | a5xx_debugfs.c | 14 static void pfp_print(struct msm_gpu *gpu, struct drm_printer *p) in pfp_print() argument 21 gpu_write(gpu, REG_A5XX_CP_PFP_STAT_ADDR, i); in pfp_print() 23 gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA)); in pfp_print() 27 static void me_print(struct msm_gpu *gpu, struct drm_printer *p) in me_print() argument 34 gpu_write(gpu, REG_A5XX_CP_ME_STAT_ADDR, i); in me_print() 36 gpu_read(gpu, REG_A5XX_CP_ME_STAT_DATA)); in me_print() 40 static void meq_print(struct msm_gpu *gpu, struct drm_printer *p) in meq_print() argument 45 gpu_write(gpu, REG_A5XX_CP_MEQ_DBG_ADDR, 0); in meq_print() 49 gpu_read(gpu, REG_A5XX_CP_MEQ_DBG_DATA)); in meq_print() 53 static void roq_print(struct msm_gpu *gpu, struct drm_printer *p) in roq_print() argument [all …]
|
H A D | adreno_gpu.c | 25 MODULE_PARM_DESC(address_space_size, "Override for size of processes private GPU address space"); 30 static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname, in zap_shader_load_mdt() argument 33 struct device *dev = &gpu->pdev->dev; in zap_shader_load_mdt() 85 ret = request_firmware_direct(&fw, fwname, gpu->dev->dev); in zap_shader_load_mdt() 90 fw = adreno_request_fw(to_adreno_gpu(gpu), fwname); in zap_shader_load_mdt() 140 if (signed_fwname || (to_adreno_gpu(gpu)->fwloc == FW_LOCATION_LEGACY)) { in zap_shader_load_mdt() 176 int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid) in adreno_zap_shader_load() argument 178 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in adreno_zap_shader_load() 179 struct platform_device *pdev = gpu->pdev; in adreno_zap_shader_load() 191 return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid); in adreno_zap_shader_load() [all …]
|
/linux/drivers/gpu/drm/ |
H A D | Kconfig | 226 source "drivers/gpu/drm/clients/Kconfig" 240 source "drivers/gpu/drm/display/Kconfig" 246 GPU memory management subsystem for devices with multiple 247 GPU memory types. Will be enabled automatically if a device driver 260 Enables unit tests for TTM, a GPU memory manager subsystem used 278 GPU-VM representation providing helpers to manage a GPUs virtual 329 source "drivers/gpu/drm/i2c/Kconfig" 331 source "drivers/gpu/drm/arm/Kconfig" 333 source "drivers/gpu/drm/radeon/Kconfig" 335 source "drivers/gpu/drm/amd/amdgpu/Kconfig" [all …]
|
/linux/Documentation/gpu/ |
H A D | drm-kms-helpers.rst | 53 .. kernel-doc:: drivers/gpu/drm/drm_atomic_helper.c 59 .. kernel-doc:: drivers/gpu/drm/drm_atomic_helper.c 68 .. kernel-doc:: drivers/gpu/drm/drm_atomic_helper.c 74 .. kernel-doc:: drivers/gpu/drm/drm_atomic_state_helper.c 80 .. kernel-doc:: drivers/gpu/drm/drm_atomic_state_helper.c 86 .. kernel-doc:: drivers/gpu/drm/drm_gem_atomic_helper.c 92 .. kernel-doc:: drivers/gpu/drm/drm_gem_atomic_helper.c 98 .. kernel-doc:: drivers/gpu/drm/drm_simple_kms_helper.c 104 .. kernel-doc:: drivers/gpu/drm/drm_simple_kms_helper.c 110 .. kernel-doc:: drivers/gpu/drm/drm_fb_helper.c [all …]
|
/linux/drivers/gpu/drm/panthor/ |
H A D | panthor_gpu.c | 24 * struct panthor_gpu - GPU block management data. 27 /** @irq: GPU irq. */ 33 /** @pending_reqs: Pending GPU requests. */ 36 /** @reqs_acked: GPU request wait queue. */ 41 * struct panthor_model - GPU model description 55 * GPU_MODEL() - Define a GPU model. A GPU product can be uniquely identified 58 * @_name: Name for the GPU model. 158 drm_warn(&ptdev->base, "GPU Fault 0x%08x (%s) at 0x%016llx\n", in panthor_gpu_irq_handler() 163 drm_warn(&ptdev->base, "GPU Fault in protected mode\n"); in panthor_gpu_irq_handler() 165 spin_lock(&ptdev->gpu->reqs_lock); in panthor_gpu_irq_handler() [all …]
|
/linux/drivers/gpu/drm/amd/amdkfd/ |
H A D | kfd_topology.c | 108 return top_dev->gpu; in kfd_device_by_id() 119 if (top_dev->gpu && top_dev->gpu->adev->pdev == pdev) { in kfd_device_by_pci_dev() 120 device = top_dev->gpu; in kfd_device_by_pci_dev() 279 if (iolink->gpu && kfd_devcgroup_check_permission(iolink->gpu)) in iolink_show() 321 if (mem->gpu && kfd_devcgroup_check_permission(mem->gpu)) in mem_show() 353 if (cache->gpu && kfd_devcgroup_check_permission(cache->gpu)) in kfd_cache_show() 435 if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu)) in node_show() 444 if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu)) in node_show() 451 if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu)) in node_show() 456 dev->gpu ? dev->node_props.simd_count : 0); in node_show() [all …]
|
/linux/Documentation/devicetree/bindings/gpu/ |
H A D | img,powervr-sgx.yaml | 6 $id: http://devicetree.org/schemas/gpu/img,powervr-sgx.yaml# 19 - ti,omap3430-gpu # Rev 121 20 - ti,omap3630-gpu # Rev 125 24 - ingenic,jz4780-gpu # Rev 130 25 - ti,omap4430-gpu # Rev 120 29 - allwinner,sun6i-a31-gpu # MP2 Rev 115 30 - ti,omap4470-gpu # MP1 Rev 112 31 - ti,omap5432-gpu # MP2 Rev 105 32 - ti,am5728-gpu # MP2 Rev 116 33 - ti,am6548-gpu # MP1 Rev 117 [all …]
|
H A D | nvidia,gk20a.txt | 4 - compatible: "nvidia,<gpu>" 24 - gpu 35 - gpu 44 gpu@57000000 { 54 clock-names = "gpu", "pwr"; 56 reset-names = "gpu"; 62 gpu@57000000 { 72 clock-names = "gpu", "pwr", "ref"; 74 reset-names = "gpu"; 80 gpu@17000000 { [all …]
|
/linux/Documentation/gpu/amdgpu/ |
H A D | driver-misc.rst | 5 GPU Product Information 8 Information about the GPU can be obtained on certain cards 14 .. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c 20 .. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c 26 .. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c 32 .. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c 38 .. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c 44 .. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c 50 .. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 61 Discrete GPU Info [all …]
|
H A D | amdgpu-glossary.rst | 7 'Documentation/gpu/amdgpu/display/dc-glossary.rst'. 35 page table used by the GPU kernel driver. It remaps system resources 36 (memory or MMIO space) into the GPU's address space so the GPU can access 38 provided an MMU that the GPU could use to get a contiguous view of 39 scattered pages for DMA. The MMU has since moved on to the GPU, but the 49 GPU Virtual Memory. This is the GPU's MMU. The GPU supports multiple 51 allow the GPU to remap VRAM and system resources into GPU virtual address 52 spaces for use by the GPU kernel driver and applications using the GPU. 53 These provide memory protection for different applications using the GPU. 58 use by the GPU. These addresses can be mapped into the "GART" GPUVM page
|
H A D | driver-core.rst | 5 GPU Hardware Structure 17 the SoC itself rather than specific IPs. E.g., things like GPU resets 20 An APU contains more than just CPU and GPU, it also contains all of 22 components are shared between the CPU, platform, and the GPU (e.g., 23 SMU, PSP, etc.). Specific components (CPU, GPU, etc.) usually have 29 With respect to the GPU, we have the following major IPs: 37 different IPs on the GPU get the memory (VRAM or system memory). 38 It also provides the support for per process GPU virtual address 42 This is the interrupt controller on the GPU. All of the IPs feed 62 various things including paging and GPU page table updates. It's also [all …]
|
/linux/drivers/gpu/drm/msm/ |
H A D | msm_debugfs.c | 24 * GPU Snapshot: 37 struct msm_gpu *gpu = priv->gpu; in msm_gpu_show() local 40 ret = mutex_lock_interruptible(&gpu->lock); in msm_gpu_show() 44 drm_printf(&p, "%s Status:\n", gpu->name); in msm_gpu_show() 45 gpu->funcs->show(gpu, show_priv->state, &p); in msm_gpu_show() 47 mutex_unlock(&gpu->lock); in msm_gpu_show() 57 struct msm_gpu *gpu = priv->gpu; in msm_gpu_release() local 59 mutex_lock(&gpu->lock); in msm_gpu_release() 60 gpu->funcs->gpu_state_put(show_priv->state); in msm_gpu_release() 61 mutex_unlock(&gpu->lock); in msm_gpu_release() [all …]
|