Home
last modified time | relevance | path

Searched full:gpu (Results 1 – 25 of 1239) sorted by relevance

12345678910>>...50

/linux/drivers/gpu/drm/etnaviv/
H A Detnaviv_gpu.c32 { .name = "etnaviv-gpu,2d" },
40 int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value) in etnaviv_gpu_get_param() argument
42 struct etnaviv_drm_private *priv = gpu->drm->dev_private; in etnaviv_gpu_get_param()
46 *value = gpu->identity.model; in etnaviv_gpu_get_param()
50 *value = gpu->identity.revision; in etnaviv_gpu_get_param()
54 *value = gpu->identity.features; in etnaviv_gpu_get_param()
58 *value = gpu->identity.minor_features0; in etnaviv_gpu_get_param()
62 *value = gpu->identity.minor_features1; in etnaviv_gpu_get_param()
66 *value = gpu->identity.minor_features2; in etnaviv_gpu_get_param()
70 *value = gpu->identity.minor_features3; in etnaviv_gpu_get_param()
[all …]
H A Detnaviv_sched.c29 dev_dbg(submit->gpu->dev, "skipping bad job\n"); in etnaviv_sched_run_job()
38 struct etnaviv_gpu *gpu = submit->gpu; in etnaviv_sched_timedout_job() local
43 * If the GPU managed to complete this jobs fence, the timeout has in etnaviv_sched_timedout_job()
50 * If the GPU is still making forward progress on the front-end (which in etnaviv_sched_timedout_job()
54 dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS); in etnaviv_sched_timedout_job()
55 change = dma_addr - gpu->hangcheck_dma_addr; in etnaviv_sched_timedout_job()
58 mutex_lock(&gpu->lock); in etnaviv_sched_timedout_job()
59 gpu_write(gpu, VIVS_MC_PROFILE_CONFIG0, in etnaviv_sched_timedout_job()
62 primid = gpu_read(gpu, VIVS_MC_PROFILE_FE_READ); in etnaviv_sched_timedout_job()
63 mutex_unlock(&gpu->lock); in etnaviv_sched_timedout_job()
[all …]
H A Detnaviv_gpu.h90 void (*sync_point)(struct etnaviv_gpu *gpu, struct etnaviv_event *event);
170 static inline void gpu_write(struct etnaviv_gpu *gpu, u32 reg, u32 data) in gpu_write() argument
172 writel(data, gpu->mmio + reg); in gpu_write()
175 static inline u32 gpu_read(struct etnaviv_gpu *gpu, u32 reg) in gpu_read() argument
182 readl(gpu->mmio + reg); in gpu_read()
184 return readl(gpu->mmio + reg); in gpu_read()
187 static inline u32 gpu_fix_power_address(struct etnaviv_gpu *gpu, u32 reg) in gpu_fix_power_address() argument
190 if (gpu->identity.model == chipModel_GC300 && in gpu_fix_power_address()
191 gpu->identity.revision < 0x2000) in gpu_fix_power_address()
197 static inline void gpu_write_power(struct etnaviv_gpu *gpu, u32 reg, u32 data) in gpu_write_power() argument
[all …]
H A Detnaviv_drv.c50 struct etnaviv_gpu *g = priv->gpu[i]; in load_gpu()
57 priv->gpu[i] = NULL; in load_gpu()
85 struct etnaviv_gpu *gpu = priv->gpu[i]; in etnaviv_open() local
88 if (gpu) { in etnaviv_open()
89 sched = &gpu->sched; in etnaviv_open()
112 struct etnaviv_gpu *gpu = priv->gpu[i]; in etnaviv_postclose() local
114 if (gpu) in etnaviv_postclose()
150 static int etnaviv_mmu_show(struct etnaviv_gpu *gpu, struct seq_file *m) in etnaviv_mmu_show() argument
155 seq_printf(m, "Active Objects (%s):\n", dev_name(gpu->dev)); in etnaviv_mmu_show()
158 * Lock the GPU to avoid a MMU context switch just now and elevate in etnaviv_mmu_show()
[all …]
H A Detnaviv_iommu_v2.c165 static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu, in etnaviv_iommuv2_restore_nonsec() argument
172 if (gpu_read(gpu, VIVS_MMUv2_CONTROL) & VIVS_MMUv2_CONTROL_ENABLE) in etnaviv_iommuv2_restore_nonsec()
175 if (gpu->mmu_context) in etnaviv_iommuv2_restore_nonsec()
176 etnaviv_iommu_context_put(gpu->mmu_context); in etnaviv_iommuv2_restore_nonsec()
177 gpu->mmu_context = etnaviv_iommu_context_get(context); in etnaviv_iommuv2_restore_nonsec()
179 prefetch = etnaviv_buffer_config_mmuv2(gpu, in etnaviv_iommuv2_restore_nonsec()
182 etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(&gpu->buffer), in etnaviv_iommuv2_restore_nonsec()
184 etnaviv_gpu_wait_idle(gpu, 100); in etnaviv_iommuv2_restore_nonsec()
186 gpu_write(gpu, VIVS_MMUv2_CONTROL, VIVS_MMUv2_CONTROL_ENABLE); in etnaviv_iommuv2_restore_nonsec()
189 static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu, in etnaviv_iommuv2_restore_sec() argument
[all …]
/linux/drivers/gpu/drm/msm/
H A Dmsm_gpu.c25 static int enable_pwrrail(struct msm_gpu *gpu) in enable_pwrrail() argument
27 struct drm_device *dev = gpu->dev; in enable_pwrrail()
30 if (gpu->gpu_reg) { in enable_pwrrail()
31 ret = regulator_enable(gpu->gpu_reg); in enable_pwrrail()
38 if (gpu->gpu_cx) { in enable_pwrrail()
39 ret = regulator_enable(gpu->gpu_cx); in enable_pwrrail()
49 static int disable_pwrrail(struct msm_gpu *gpu) in disable_pwrrail() argument
51 if (gpu->gpu_cx) in disable_pwrrail()
52 regulator_disable(gpu->gpu_cx); in disable_pwrrail()
53 if (gpu->gpu_reg) in disable_pwrrail()
[all …]
H A Dmsm_gpu.h48 int (*get_param)(struct msm_gpu *gpu, struct msm_context *ctx,
50 int (*set_param)(struct msm_gpu *gpu, struct msm_context *ctx,
52 int (*hw_init)(struct msm_gpu *gpu);
57 int (*ucode_load)(struct msm_gpu *gpu);
59 int (*pm_suspend)(struct msm_gpu *gpu);
60 int (*pm_resume)(struct msm_gpu *gpu);
61 void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit);
62 void (*flush)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
64 struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu);
65 void (*recover)(struct msm_gpu *gpu);
[all …]
/linux/drivers/gpu/drm/msm/adreno/
H A Dadreno_gpu.h37 * so it helps to be able to group the GPU devices by generation and if
75 int (*get_timestamp)(struct msm_gpu *gpu, uint64_t *value);
187 * of gpu firmware to linux-firmware, the fw files were
218 * GPU specific offsets will be exported by GPU specific
256 static inline uint8_t adreno_patchid(const struct adreno_gpu *gpu) in adreno_patchid() argument
262 WARN_ON_ONCE(gpu->info->family >= ADRENO_6XX_GEN1); in adreno_patchid()
263 return gpu->chip_id & 0xff; in adreno_patchid()
266 static inline bool adreno_is_revn(const struct adreno_gpu *gpu, uint32_t revn) in adreno_is_revn() argument
268 if (WARN_ON_ONCE(!gpu->info)) in adreno_is_revn()
270 return gpu->info->revn == revn; in adreno_is_revn()
[all …]
H A Da5xx_gpu.c17 static void a5xx_dump(struct msm_gpu *gpu);
21 static void update_shadow_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) in update_shadow_rptr() argument
23 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in update_shadow_rptr()
33 void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, in a5xx_flush() argument
36 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a5xx_flush()
46 update_shadow_rptr(gpu, ring); in a5xx_flush()
63 gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr); in a5xx_flush()
66 static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit) in a5xx_submit_in_rb() argument
68 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a5xx_submit_in_rb()
115 a5xx_flush(gpu, ring, true); in a5xx_submit_in_rb()
[all …]
H A Da6xx_gpu.c19 static inline bool _a6xx_check_idle(struct msm_gpu *gpu) in _a6xx_check_idle() argument
21 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in _a6xx_check_idle()
29 if (gpu_read(gpu, REG_A6XX_RBBM_STATUS) & in _a6xx_check_idle()
33 return !(gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS) & in _a6xx_check_idle()
37 static bool a6xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring) in a6xx_idle() argument
40 if (!adreno_idle(gpu, ring)) in a6xx_idle()
43 if (spin_until(_a6xx_check_idle(gpu))) { in a6xx_idle()
44 DRM_ERROR("%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X rptr/wptr %d/%d\n", in a6xx_idle()
45 gpu->name, __builtin_return_address(0), in a6xx_idle()
46 gpu_read(gpu, REG_A6XX_RBBM_STATUS), in a6xx_idle()
[all …]
H A Dadreno_device.c16 MODULE_PARM_DESC(snapshot_debugbus, "Include debugbus sections in GPU devcoredump (if not fused off…
24 MODULE_PARM_DESC(disable_acd, "Forcefully disable GPU ACD");
45 /* identify gpu: */ in adreno_info()
66 struct msm_gpu *gpu = NULL; in adreno_load_gpu() local
71 gpu = dev_to_gpu(&pdev->dev); in adreno_load_gpu()
73 if (!gpu) { in adreno_load_gpu()
74 dev_err_once(dev->dev, "no GPU device was found\n"); in adreno_load_gpu()
78 adreno_gpu = to_adreno_gpu(gpu); in adreno_load_gpu()
90 if (gpu->funcs->ucode_load) { in adreno_load_gpu()
91 ret = gpu->funcs->ucode_load(gpu); in adreno_load_gpu()
[all …]
H A Da5xx_preempt.c25 static inline void set_preempt_state(struct a5xx_gpu *gpu, in set_preempt_state() argument
34 atomic_set(&gpu->preempt_state, new); in set_preempt_state()
40 static inline void update_wptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) in update_wptr() argument
52 gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr); in update_wptr()
56 static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu) in get_next_ring() argument
58 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in get_next_ring()
63 for (i = 0; i < gpu->nr_rings; i++) { in get_next_ring()
65 struct msm_ringbuffer *ring = gpu->rb[i]; in get_next_ring()
68 empty = (get_wptr(ring) == gpu->funcs->get_rptr(gpu, ring)); in get_next_ring()
84 struct msm_gpu *gpu = &a5xx_gpu->base.base; in a5xx_preempt_timer() local
[all …]
H A Da6xx_preempt.c29 static inline void set_preempt_state(struct a6xx_gpu *gpu, in set_preempt_state() argument
38 atomic_set(&gpu->preempt_state, new); in set_preempt_state()
44 static inline void update_wptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) in update_wptr() argument
54 gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr); in update_wptr()
63 static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu) in get_next_ring() argument
65 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in get_next_ring()
71 for (i = 0; i < gpu->nr_rings; i++) { in get_next_ring()
73 struct msm_ringbuffer *ring = gpu->rb[i]; in get_next_ring()
76 empty = (get_wptr(ring) == gpu->funcs->get_rptr(gpu, ring)); in get_next_ring()
92 struct msm_gpu *gpu = &a6xx_gpu->base.base; in a6xx_preempt_timer() local
[all …]
H A Da6xx_gpu_state.c131 static int a6xx_crashdumper_init(struct msm_gpu *gpu, in a6xx_crashdumper_init() argument
134 dumper->ptr = msm_gem_kernel_new(gpu->dev, in a6xx_crashdumper_init()
135 SZ_1M, MSM_BO_WC, gpu->vm, in a6xx_crashdumper_init()
144 static int a6xx_crashdumper_run(struct msm_gpu *gpu, in a6xx_crashdumper_run() argument
147 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_crashdumper_run()
161 gpu_write64(gpu, REG_A6XX_CP_CRASH_DUMP_SCRIPT_BASE, dumper->iova); in a6xx_crashdumper_run()
163 gpu_write(gpu, REG_A6XX_CP_CRASH_DUMP_CNTL, 1); in a6xx_crashdumper_run()
165 ret = gpu_poll_timeout(gpu, REG_A6XX_CP_CRASH_DUMP_STATUS, val, in a6xx_crashdumper_run()
168 gpu_write(gpu, REG_A6XX_CP_CRASH_DUMP_CNTL, 0); in a6xx_crashdumper_run()
174 static int debugbus_read(struct msm_gpu *gpu, u32 block, u32 offset, in debugbus_read() argument
[all …]
H A Dadreno_gpu.c25 MODULE_PARM_DESC(address_space_size, "Override for size of processes private GPU address space");
30 static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname, in zap_shader_load_mdt() argument
33 struct device *dev = &gpu->pdev->dev; in zap_shader_load_mdt()
85 ret = request_firmware_direct(&fw, fwname, gpu->dev->dev); in zap_shader_load_mdt()
90 fw = adreno_request_fw(to_adreno_gpu(gpu), fwname); in zap_shader_load_mdt()
140 if (signed_fwname || (to_adreno_gpu(gpu)->fwloc == FW_LOCATION_LEGACY)) { in zap_shader_load_mdt()
176 int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid) in adreno_zap_shader_load() argument
178 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in adreno_zap_shader_load()
179 struct platform_device *pdev = gpu->pdev; in adreno_zap_shader_load()
191 return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid); in adreno_zap_shader_load()
[all …]
/linux/sound/hda/codecs/hdmi/
H A Dnvhdmi.c145 HDA_CODEC_ID_MODEL(0x10de0008, "GPU 08 HDMI/DP", MODEL_LEGACY),
146 HDA_CODEC_ID_MODEL(0x10de0009, "GPU 09 HDMI/DP", MODEL_LEGACY),
147 HDA_CODEC_ID_MODEL(0x10de000a, "GPU 0a HDMI/DP", MODEL_LEGACY),
148 HDA_CODEC_ID_MODEL(0x10de000b, "GPU 0b HDMI/DP", MODEL_LEGACY),
150 HDA_CODEC_ID_MODEL(0x10de000d, "GPU 0d HDMI/DP", MODEL_LEGACY),
151 HDA_CODEC_ID_MODEL(0x10de0010, "GPU 10 HDMI/DP", MODEL_LEGACY),
152 HDA_CODEC_ID_MODEL(0x10de0011, "GPU 11 HDMI/DP", MODEL_LEGACY),
153 HDA_CODEC_ID_MODEL(0x10de0012, "GPU 12 HDMI/DP", MODEL_LEGACY),
154 HDA_CODEC_ID_MODEL(0x10de0013, "GPU 13 HDMI/DP", MODEL_LEGACY),
155 HDA_CODEC_ID_MODEL(0x10de0014, "GPU 14 HDMI/DP", MODEL_LEGACY),
[all …]
/linux/drivers/gpu/drm/
H A DKconfig31 source "drivers/gpu/drm/Kconfig.debug"
172 source "drivers/gpu/drm/clients/Kconfig"
186 source "drivers/gpu/drm/display/Kconfig"
193 GPU memory management subsystem for devices with multiple
194 GPU memory types. Will be enabled automatically if a device driver
208 GPU-VM representation providing helpers to manage a GPUs virtual
217 GPU-SVM representation providing helpers to manage a GPUs shared
268 source "drivers/gpu/drm/sysfb/Kconfig"
270 source "drivers/gpu/drm/arm/Kconfig"
272 source "drivers/gpu/drm/radeon/Kconfig"
[all …]
H A Ddrm_gpusvm.c25 * GPU Shared Virtual Memory (GPU SVM) layer for the Direct Rendering Manager (DRM)
27 * between the CPU and GPU. It enables efficient data exchange and processing
28 * for GPU-accelerated applications by allowing memory sharing and
29 * synchronization between the CPU's and GPU's virtual address spaces.
31 * Key GPU SVM Components:
34 * Used for tracking memory intervals and notifying the GPU of changes,
35 * notifiers are sized based on a GPU SVM initialization parameter, with a
38 * tracked within a GPU SVM Red-BlacK tree and list and are dynamically
42 * Represent memory ranges mapped in a DRM device and managed by GPU SVM.
43 * They are sized based on an array of chunk sizes, which is a GPU SVM
[all …]
/linux/include/drm/
H A Ddrm_gpusvm.h23 * struct drm_gpusvm_ops - Operations structure for GPU SVM
25 * This structure defines the operations for GPU Shared Virtual Memory (SVM).
26 * These operations are provided by the GPU driver to manage SVM ranges and
31 * @notifier_alloc: Allocate a GPU SVM notifier (optional)
33 * Allocate a GPU SVM notifier.
35 * Return: Pointer to the allocated GPU SVM notifier on success, NULL on failure.
40 * @notifier_free: Free a GPU SVM notifier (optional)
41 * @notifier: Pointer to the GPU SVM notifier to be freed
43 * Free a GPU SVM notifier.
48 * @range_alloc: Allocate a GPU SVM range (optional)
[all …]
/linux/Documentation/gpu/rfc/
H A Dgpusvm.rst4 GPU SVM Section
25 * Eviction is defined as migrating data from the GPU back to the
26 CPU without a virtual address to free up GPU memory.
32 * GPU page table invalidation, which requires a GPU virtual address, is
33 handled via the notifier that has access to the GPU virtual address.
34 * GPU fault side
36 and should strive to take mmap_read lock only in GPU SVM layer.
37 * Big retry loop to handle all races with the mmu notifier under the gpu
47 migration policy requiring GPU access to occur in GPU memory.
49 While no current user (Xe) of GPU SVM has such a policy, it is likely
[all …]
/linux/drivers/gpu/drm/amd/amdkfd/
H A Dkfd_topology.c108 return top_dev->gpu; in kfd_device_by_id()
261 if (iolink->gpu && kfd_devcgroup_check_permission(iolink->gpu)) in iolink_show()
303 if (mem->gpu && kfd_devcgroup_check_permission(mem->gpu)) in mem_show()
335 if (cache->gpu && kfd_devcgroup_check_permission(cache->gpu)) in kfd_cache_show()
417 if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu)) in node_show()
426 if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu)) in node_show()
433 if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu)) in node_show()
438 dev->gpu ? dev->node_props.simd_count : 0); in node_show()
462 dev->gpu ? (dev->node_props.array_count * in node_show()
463 NUM_XCC(dev->gpu->xcc_mask)) : 0); in node_show()
[all …]
/linux/Documentation/devicetree/bindings/gpu/
H A Dimg,powervr-sgx.yaml6 $id: http://devicetree.org/schemas/gpu/img,powervr-sgx.yaml#
19 - ti,omap3430-gpu # Rev 121
20 - ti,omap3630-gpu # Rev 125
24 - ingenic,jz4780-gpu # Rev 130
25 - ti,omap4430-gpu # Rev 120
29 - allwinner,sun6i-a31-gpu # MP2 Rev 115
30 - ti,omap4470-gpu # MP1 Rev 112
31 - ti,omap5432-gpu # MP2 Rev 105
32 - ti,am5728-gpu # MP2 Rev 116
33 - ti,am6548-gpu # MP1 Rev 117
[all …]
H A Dnvidia,gk20a.txt4 - compatible: "nvidia,<gpu>"
24 - gpu
35 - gpu
44 gpu@57000000 {
54 clock-names = "gpu", "pwr";
56 reset-names = "gpu";
62 gpu@57000000 {
72 clock-names = "gpu", "pwr", "ref";
74 reset-names = "gpu";
80 gpu@17000000 {
[all …]
/linux/Documentation/driver-api/
H A Dedac.rst116 Several stacks of HBM chips connect to the CPU or GPU through an ultra-fast
196 GPU nodes can be accessed the same way as the data fabric on CPU nodes.
199 and each GPU data fabric contains four Unified Memory Controllers (UMC).
207 Memory controllers on AMD GPU nodes can be represented in EDAC thusly:
209 GPU DF / GPU Node -> EDAC MC
210 GPU UMC -> EDAC CSROW
211 GPU UMC channel -> EDAC CHANNEL
218 - The CPU UMC (Unified Memory Controller) is mostly the same as the GPU UMC.
224 - GPU UMCs use 1 chip select, So UMC = EDAC CSROW.
225 - GPU UMCs use 8 channels, So UMC channel = EDAC channel.
[all …]
/linux/Documentation/gpu/
H A Ddrm-kms.rst159 .. kernel-doc:: drivers/gpu/drm/drm_mode_config.c
191 .. kernel-doc:: drivers/gpu/drm/drm_mode_object.c
288 .. kernel-doc:: drivers/gpu/drm/drm_atomic.c
297 .. kernel-doc:: drivers/gpu/drm/drm_atomic.c
303 .. kernel-doc:: drivers/gpu/drm/drm_atomic_uapi.c
306 .. kernel-doc:: drivers/gpu/drm/drm_atomic_uapi.c
312 .. kernel-doc:: drivers/gpu/drm/drm_crtc.c
321 .. kernel-doc:: drivers/gpu/drm/drm_crtc.c
327 .. kernel-doc:: drivers/gpu/drm/drm_color_mgmt.c
336 .. kernel-doc:: drivers/gpu/drm/drm_framebuffer.c
[all …]

12345678910>>...50