Home
last modified time | relevance | path

Searched refs:gpu (Results 1 – 25 of 457) sorted by relevance

12345678910>>...19

/linux/drivers/gpu/drm/etnaviv/
H A Detnaviv_gpu.c43 int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value) in etnaviv_gpu_get_param() argument
45 struct etnaviv_drm_private *priv = gpu->drm->dev_private; in etnaviv_gpu_get_param()
49 *value = gpu->identity.model; in etnaviv_gpu_get_param()
53 *value = gpu->identity.revision; in etnaviv_gpu_get_param()
57 *value = gpu->identity.features; in etnaviv_gpu_get_param()
61 *value = gpu->identity.minor_features0; in etnaviv_gpu_get_param()
65 *value = gpu->identity.minor_features1; in etnaviv_gpu_get_param()
69 *value = gpu->identity.minor_features2; in etnaviv_gpu_get_param()
73 *value = gpu->identity.minor_features3; in etnaviv_gpu_get_param()
77 *value = gpu->identity.minor_features4; in etnaviv_gpu_get_param()
[all …]
H A Detnaviv_sched.c29 dev_dbg(submit->gpu->dev, "skipping bad job\n"); in etnaviv_sched_run_job()
38 struct etnaviv_gpu *gpu = submit->gpu; in etnaviv_sched_timedout_job() local
54 dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS); in etnaviv_sched_timedout_job()
55 change = dma_addr - gpu->hangcheck_dma_addr; in etnaviv_sched_timedout_job()
58 mutex_lock(&gpu->lock); in etnaviv_sched_timedout_job()
59 gpu_write(gpu, VIVS_MC_PROFILE_CONFIG0, in etnaviv_sched_timedout_job()
62 primid = gpu_read(gpu, VIVS_MC_PROFILE_FE_READ); in etnaviv_sched_timedout_job()
63 mutex_unlock(&gpu->lock); in etnaviv_sched_timedout_job()
65 if (gpu->state == ETNA_GPU_STATE_RUNNING && in etnaviv_sched_timedout_job()
66 (gpu->completed_fence != gpu->hangcheck_fence || in etnaviv_sched_timedout_job()
[all …]
H A Detnaviv_gpu.h90 void (*sync_point)(struct etnaviv_gpu *gpu, struct etnaviv_event *event);
170 static inline void gpu_write(struct etnaviv_gpu *gpu, u32 reg, u32 data) in gpu_write() argument
172 writel(data, gpu->mmio + reg); in gpu_write()
175 static inline u32 gpu_read(struct etnaviv_gpu *gpu, u32 reg) in gpu_read() argument
182 readl(gpu->mmio + reg); in gpu_read()
184 return readl(gpu->mmio + reg); in gpu_read()
187 static inline u32 gpu_fix_power_address(struct etnaviv_gpu *gpu, u32 reg) in gpu_fix_power_address() argument
190 if (gpu->identity.model == chipModel_GC300 && in gpu_fix_power_address()
191 gpu->identity.revision < 0x2000) in gpu_fix_power_address()
197 static inline void gpu_write_power(struct etnaviv_gpu *gpu, u32 reg, u32 data) in gpu_write_power() argument
[all …]
H A Detnaviv_drv.c51 struct etnaviv_gpu *g = priv->gpu[i]; in load_gpu()
58 priv->gpu[i] = NULL; in load_gpu()
86 struct etnaviv_gpu *gpu = priv->gpu[i]; in etnaviv_open() local
89 if (gpu) { in etnaviv_open()
90 sched = &gpu->sched; in etnaviv_open()
113 struct etnaviv_gpu *gpu = priv->gpu[i]; in etnaviv_postclose() local
115 if (gpu) in etnaviv_postclose()
151 static int etnaviv_mmu_show(struct etnaviv_gpu *gpu, struct seq_file *m) in etnaviv_mmu_show() argument
156 seq_printf(m, "Active Objects (%s):\n", dev_name(gpu->dev)); in etnaviv_mmu_show()
163 mutex_lock(&gpu->lock); in etnaviv_mmu_show()
[all …]
H A Detnaviv_iommu_v2.c165 static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu, in etnaviv_iommuv2_restore_nonsec() argument
172 if (gpu_read(gpu, VIVS_MMUv2_CONTROL) & VIVS_MMUv2_CONTROL_ENABLE) in etnaviv_iommuv2_restore_nonsec()
175 if (gpu->mmu_context) in etnaviv_iommuv2_restore_nonsec()
176 etnaviv_iommu_context_put(gpu->mmu_context); in etnaviv_iommuv2_restore_nonsec()
177 gpu->mmu_context = etnaviv_iommu_context_get(context); in etnaviv_iommuv2_restore_nonsec()
179 prefetch = etnaviv_buffer_config_mmuv2(gpu, in etnaviv_iommuv2_restore_nonsec()
182 etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(&gpu->buffer), in etnaviv_iommuv2_restore_nonsec()
184 etnaviv_gpu_wait_idle(gpu, 100); in etnaviv_iommuv2_restore_nonsec()
186 gpu_write(gpu, VIVS_MMUv2_CONTROL, VIVS_MMUv2_CONTROL_ENABLE); in etnaviv_iommuv2_restore_nonsec()
189 static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu, in etnaviv_iommuv2_restore_sec() argument
[all …]
H A Detnaviv_iommu.c89 static void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu, in etnaviv_iommuv1_restore() argument
95 if (gpu->mmu_context) in etnaviv_iommuv1_restore()
96 etnaviv_iommu_context_put(gpu->mmu_context); in etnaviv_iommuv1_restore()
97 gpu->mmu_context = etnaviv_iommu_context_get(context); in etnaviv_iommuv1_restore()
100 gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, context->global->memory_base); in etnaviv_iommuv1_restore()
101 gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, context->global->memory_base); in etnaviv_iommuv1_restore()
102 gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_TX, context->global->memory_base); in etnaviv_iommuv1_restore()
103 gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PEZ, context->global->memory_base); in etnaviv_iommuv1_restore()
104 gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PE, context->global->memory_base); in etnaviv_iommuv1_restore()
109 gpu_write(gpu, VIVS_MC_MMU_FE_PAGE_TABLE, pgtable); in etnaviv_iommuv1_restore()
[all …]
/linux/drivers/gpu/drm/msm/
H A Dmsm_gpu.c25 static int enable_pwrrail(struct msm_gpu *gpu) in enable_pwrrail() argument
27 struct drm_device *dev = gpu->dev; in enable_pwrrail()
30 if (gpu->gpu_reg) { in enable_pwrrail()
31 ret = regulator_enable(gpu->gpu_reg); in enable_pwrrail()
38 if (gpu->gpu_cx) { in enable_pwrrail()
39 ret = regulator_enable(gpu->gpu_cx); in enable_pwrrail()
49 static int disable_pwrrail(struct msm_gpu *gpu) in disable_pwrrail() argument
51 if (gpu->gpu_cx) in disable_pwrrail()
52 regulator_disable(gpu->gpu_cx); in disable_pwrrail()
53 if (gpu->gpu_reg) in disable_pwrrail()
[all …]
H A Dmsm_gpu.h49 int (*get_param)(struct msm_gpu *gpu, struct msm_context *ctx,
51 int (*set_param)(struct msm_gpu *gpu, struct msm_context *ctx,
53 int (*hw_init)(struct msm_gpu *gpu);
58 int (*ucode_load)(struct msm_gpu *gpu);
60 int (*pm_suspend)(struct msm_gpu *gpu);
61 int (*pm_resume)(struct msm_gpu *gpu);
62 void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit);
63 void (*flush)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
65 struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu);
66 void (*recover)(struct msm_gpu *gpu);
[all …]
H A Dmsm_ringbuffer.c18 struct msm_gpu *gpu = submit->gpu; in msm_job_run() local
19 struct msm_drm_private *priv = gpu->dev->dev_private; in msm_job_run()
38 mutex_lock(&gpu->lock); in msm_job_run()
43 msm_gpu_submit(gpu, submit); in msm_job_run()
47 mutex_unlock(&gpu->lock); in msm_job_run()
65 struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id, in msm_ringbuffer_new() argument
73 .dev = gpu->dev->dev, in msm_ringbuffer_new()
88 ring->gpu = gpu; in msm_ringbuffer_new()
91 ring->start = msm_gem_kernel_new(gpu->dev, MSM_GPU_RINGBUFFER_SZ, in msm_ringbuffer_new()
92 check_apriv(gpu, MSM_BO_WC | MSM_BO_GPU_READONLY), in msm_ringbuffer_new()
[all …]
H A Dmsm_debugfs.c37 struct msm_gpu *gpu = priv->gpu; in msm_gpu_show() local
40 ret = mutex_lock_interruptible(&gpu->lock); in msm_gpu_show()
44 drm_printf(&p, "%s Status:\n", gpu->name); in msm_gpu_show()
45 gpu->funcs->show(gpu, show_priv->state, &p); in msm_gpu_show()
47 mutex_unlock(&gpu->lock); in msm_gpu_show()
57 struct msm_gpu *gpu = priv->gpu; in msm_gpu_release() local
59 mutex_lock(&gpu->lock); in msm_gpu_release()
60 gpu->funcs->gpu_state_put(show_priv->state); in msm_gpu_release()
61 mutex_unlock(&gpu->lock); in msm_gpu_release()
72 struct msm_gpu *gpu = priv->gpu; in msm_gpu_open() local
[all …]
/linux/drivers/gpu/drm/msm/adreno/
H A Da4xx_gpu.c22 static void a4xx_dump(struct msm_gpu *gpu);
23 static bool a4xx_idle(struct msm_gpu *gpu);
25 static void a4xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) in a4xx_submit() argument
69 adreno_flush(gpu, ring, REG_A4XX_CP_RB_WPTR); in a4xx_submit()
76 static void a4xx_enable_hwcg(struct msm_gpu *gpu) in a4xx_enable_hwcg() argument
78 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a4xx_enable_hwcg()
81 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_TP(i), 0x02222202); in a4xx_enable_hwcg()
83 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_TP(i), 0x00002222); in a4xx_enable_hwcg()
85 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_TP(i), 0x0E739CE7); in a4xx_enable_hwcg()
87 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_TP(i), 0x00111111); in a4xx_enable_hwcg()
[all …]
H A Dadreno_gpu.h82 int (*get_timestamp)(struct msm_gpu *gpu, uint64_t *value);
287 static inline uint8_t adreno_patchid(const struct adreno_gpu *gpu) in adreno_patchid() argument
293 WARN_ON_ONCE(gpu->info->family >= ADRENO_6XX_GEN1); in adreno_patchid()
294 return gpu->chip_id & 0xff; in adreno_patchid()
297 static inline bool adreno_is_revn(const struct adreno_gpu *gpu, uint32_t revn) in adreno_is_revn() argument
299 if (WARN_ON_ONCE(!gpu->info)) in adreno_is_revn()
301 return gpu->info->revn == revn; in adreno_is_revn()
304 static inline bool adreno_has_gmu_wrapper(const struct adreno_gpu *gpu) in adreno_has_gmu_wrapper() argument
306 return gpu->gmu_is_wrapper; in adreno_has_gmu_wrapper()
309 static inline bool adreno_is_a2xx(const struct adreno_gpu *gpu) in adreno_is_a2xx() argument
[all …]
H A Da3xx_gpu.c28 static void a3xx_dump(struct msm_gpu *gpu);
29 static bool a3xx_idle(struct msm_gpu *gpu);
31 static void a3xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) in a3xx_submit() argument
82 adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR); in a3xx_submit()
85 static bool a3xx_me_init(struct msm_gpu *gpu) in a3xx_me_init() argument
87 struct msm_ringbuffer *ring = gpu->rb[0]; in a3xx_me_init()
108 adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR); in a3xx_me_init()
109 return a3xx_idle(gpu); in a3xx_me_init()
112 static int a3xx_hw_init(struct msm_gpu *gpu) in a3xx_hw_init() argument
114 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a3xx_hw_init()
[all …]
H A Da5xx_gpu.c17 static void a5xx_dump(struct msm_gpu *gpu);
21 static void update_shadow_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) in update_shadow_rptr() argument
23 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in update_shadow_rptr()
33 void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, in a5xx_flush() argument
36 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a5xx_flush()
46 update_shadow_rptr(gpu, ring); in a5xx_flush()
63 gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr); in a5xx_flush()
66 static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit) in a5xx_submit_in_rb() argument
68 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a5xx_submit_in_rb()
115 a5xx_flush(gpu, ring, true); in a5xx_submit_in_rb()
[all …]
H A Da6xx_gpu.c32 static bool fence_status_check(struct msm_gpu *gpu, u32 offset, u32 value, u32 status, u32 mask) in fence_status_check() argument
41 gpu_write(gpu, offset, value); in fence_status_check()
53 struct msm_gpu *gpu = &adreno_gpu->base; in fenced_write() local
57 gpu_write(gpu, offset, value); in fenced_write()
69 fence_status_check(gpu, offset, value, status, mask), 0, 1000)) in fenced_write()
73 gpu_write(gpu, offset, value); in fenced_write()
77 fence_status_check(gpu, offset, value, status, mask), 0, 1000)) { in fenced_write()
110 static inline bool _a6xx_check_idle(struct msm_gpu *gpu) in _a6xx_check_idle() argument
112 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in _a6xx_check_idle()
120 if (gpu_read(gpu, REG_A6XX_RBBM_STATUS) & in _a6xx_check_idle()
[all …]
H A Da2xx_gpu.c10 static void a2xx_dump(struct msm_gpu *gpu);
11 static bool a2xx_idle(struct msm_gpu *gpu);
13 static void a2xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) in a2xx_submit() argument
51 adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR); in a2xx_submit()
54 static bool a2xx_me_init(struct msm_gpu *gpu) in a2xx_me_init() argument
56 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a2xx_me_init()
58 struct msm_ringbuffer *ring = gpu->rb[0]; in a2xx_me_init()
107 adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR); in a2xx_me_init()
108 return a2xx_idle(gpu); in a2xx_me_init()
111 static int a2xx_hw_init(struct msm_gpu *gpu) in a2xx_hw_init() argument
[all …]
H A Da5xx_preempt.c25 static inline void set_preempt_state(struct a5xx_gpu *gpu, in set_preempt_state() argument
34 atomic_set(&gpu->preempt_state, new); in set_preempt_state()
40 static inline void update_wptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) in update_wptr() argument
52 gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr); in update_wptr()
56 static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu) in get_next_ring() argument
58 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in get_next_ring()
63 for (i = 0; i < gpu->nr_rings; i++) { in get_next_ring()
65 struct msm_ringbuffer *ring = gpu->rb[i]; in get_next_ring()
68 empty = (get_wptr(ring) == gpu->funcs->get_rptr(gpu, ring)); in get_next_ring()
84 struct msm_gpu *gpu = &a5xx_gpu->base.base; in a5xx_preempt_timer() local
[all …]
H A Da6xx_preempt.c29 static inline void set_preempt_state(struct a6xx_gpu *gpu, in set_preempt_state() argument
38 atomic_set(&gpu->preempt_state, new); in set_preempt_state()
63 static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu) in get_next_ring() argument
65 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in get_next_ring()
71 for (i = 0; i < gpu->nr_rings; i++) { in get_next_ring()
73 struct msm_ringbuffer *ring = gpu->rb[i]; in get_next_ring()
76 empty = (get_wptr(ring) == gpu->funcs->get_rptr(gpu, ring)); in get_next_ring()
92 struct msm_gpu *gpu = &a6xx_gpu->base.base; in a6xx_preempt_timer() local
93 struct drm_device *dev = gpu->dev; in a6xx_preempt_timer()
98 dev_err(dev->dev, "%s: preemption timed out\n", gpu->name); in a6xx_preempt_timer()
[all …]
H A Dadreno_device.c72 struct msm_gpu *gpu = NULL; in adreno_load_gpu() local
77 gpu = dev_to_gpu(&pdev->dev); in adreno_load_gpu()
79 if (!gpu) { in adreno_load_gpu()
84 adreno_gpu = to_adreno_gpu(gpu); in adreno_load_gpu()
96 if (gpu->funcs->ucode_load) { in adreno_load_gpu()
97 ret = gpu->funcs->ucode_load(gpu); in adreno_load_gpu()
115 mutex_lock(&gpu->lock); in adreno_load_gpu()
116 ret = msm_gpu_hw_init(gpu); in adreno_load_gpu()
117 mutex_unlock(&gpu->lock); in adreno_load_gpu()
126 if (gpu->funcs->debugfs_init) { in adreno_load_gpu()
[all …]
H A Da6xx_gpu_state.c131 static int a6xx_crashdumper_init(struct msm_gpu *gpu, in a6xx_crashdumper_init() argument
134 dumper->ptr = msm_gem_kernel_new(gpu->dev, in a6xx_crashdumper_init()
135 SZ_1M, MSM_BO_WC, gpu->vm, in a6xx_crashdumper_init()
144 static int a6xx_crashdumper_run(struct msm_gpu *gpu, in a6xx_crashdumper_run() argument
147 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_crashdumper_run()
161 gpu_write64(gpu, REG_A6XX_CP_CRASH_DUMP_SCRIPT_BASE, dumper->iova); in a6xx_crashdumper_run()
163 gpu_write(gpu, REG_A6XX_CP_CRASH_DUMP_CNTL, 1); in a6xx_crashdumper_run()
165 ret = gpu_poll_timeout(gpu, REG_A6XX_CP_CRASH_DUMP_STATUS, val, in a6xx_crashdumper_run()
168 gpu_write(gpu, REG_A6XX_CP_CRASH_DUMP_CNTL, 0); in a6xx_crashdumper_run()
174 static int debugbus_read(struct msm_gpu *gpu, u32 block, u32 offset, in debugbus_read() argument
[all …]
H A Da6xx_gpu.h235 static inline bool a6xx_has_gbif(struct adreno_gpu *gpu) in a6xx_has_gbif() argument
237 if(adreno_is_a630(gpu)) in a6xx_has_gbif()
261 int a6xx_gmu_resume(struct a6xx_gpu *gpu);
262 int a6xx_gmu_stop(struct a6xx_gpu *gpu);
274 void a6xx_gmu_sysprof_setup(struct msm_gpu *gpu);
276 void a6xx_preempt_init(struct msm_gpu *gpu);
277 void a6xx_preempt_hw_init(struct msm_gpu *gpu);
278 void a6xx_preempt_trigger(struct msm_gpu *gpu);
279 void a6xx_preempt_irq(struct msm_gpu *gpu);
280 void a6xx_preempt_fini(struct msm_gpu *gpu);
[all …]
H A Da5xx_gpu.h54 void a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor);
138 int a5xx_power_init(struct msm_gpu *gpu);
139 void a5xx_gpmu_ucode_init(struct msm_gpu *gpu);
141 static inline int spin_usecs(struct msm_gpu *gpu, uint32_t usecs, in spin_usecs() argument
146 if ((gpu_read(gpu, reg) & mask) == value) in spin_usecs()
157 bool a5xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
158 void a5xx_set_hwcg(struct msm_gpu *gpu, bool state);
160 void a5xx_preempt_init(struct msm_gpu *gpu);
161 void a5xx_preempt_hw_init(struct msm_gpu *gpu);
162 void a5xx_preempt_trigger(struct msm_gpu *gpu);
[all …]
/linux/drivers/gpu/drm/amd/amdkfd/
H A Dkfd_topology.c108 return top_dev->gpu; in kfd_device_by_id()
261 if (iolink->gpu && kfd_devcgroup_check_permission(iolink->gpu)) in iolink_show()
303 if (mem->gpu && kfd_devcgroup_check_permission(mem->gpu)) in mem_show()
335 if (cache->gpu && kfd_devcgroup_check_permission(cache->gpu)) in kfd_cache_show()
417 if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu)) in node_show()
426 if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu)) in node_show()
433 if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu)) in node_show()
438 dev->gpu ? dev->node_props.simd_count : 0); in node_show()
462 dev->gpu ? (dev->node_props.array_count * in node_show()
463 NUM_XCC(dev->gpu->xcc_mask)) : 0); in node_show()
[all …]
/linux/drivers/gpu/drm/
H A DKconfig31 source "drivers/gpu/drm/Kconfig.debug"
172 source "drivers/gpu/drm/clients/Kconfig"
186 source "drivers/gpu/drm/display/Kconfig"
281 source "drivers/gpu/drm/adp/Kconfig"
282 source "drivers/gpu/drm/amd/amdgpu/Kconfig"
283 source "drivers/gpu/drm/arm/Kconfig"
284 source "drivers/gpu/drm/armada/Kconfig"
285 source "drivers/gpu/drm/aspeed/Kconfig"
286 source "drivers/gpu/drm/ast/Kconfig"
287 source "drivers/gpu/drm/atmel-hlcdc/Kconfig"
[all …]
/linux/Documentation/gpu/
H A Dvc4.rst5 .. kernel-doc:: drivers/gpu/drm/vc4/vc4_drv.c
18 .. kernel-doc:: drivers/gpu/drm/vc4/vc4_crtc.c
24 .. kernel-doc:: drivers/gpu/drm/vc4/vc4_hvs.c
30 .. kernel-doc:: drivers/gpu/drm/vc4/vc4_plane.c
36 .. kernel-doc:: drivers/gpu/drm/vc4/vc4_hdmi.c
42 .. kernel-doc:: drivers/gpu/drm/vc4/vc4_dsi.c
48 .. kernel-doc:: drivers/gpu/drm/vc4/vc4_dpi.c
54 .. kernel-doc:: drivers/gpu/drm/vc4/vc4_vec.c
69 --kunitconfig=drivers/gpu/drm/vc4/tests/.kunitconfig \
84 .. kernel-doc:: drivers/gpu/drm/vc4/vc4_bo.c
[all …]

12345678910>>...19