| /linux/drivers/gpu/drm/msm/adreno/ |
| H A D | a8xx_gpu.c | 19 static void a8xx_aperture_slice_set(struct msm_gpu *gpu, enum adreno_pipe pipe, u32 slice) in a8xx_aperture_slice_set() argument 21 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a8xx_aperture_slice_set() 30 gpu_write(gpu, REG_A8XX_CP_APERTURE_CNTL_HOST, val); in a8xx_aperture_slice_set() 35 static void a8xx_aperture_acquire(struct msm_gpu *gpu, enum adreno_pipe pipe, unsigned long *flags) in a8xx_aperture_acquire() argument 37 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a8xx_aperture_acquire() 42 a8xx_aperture_slice_set(gpu, pipe, 0); in a8xx_aperture_acquire() 45 static void a8xx_aperture_release(struct msm_gpu *gpu, unsigned long flags) in a8xx_aperture_release() argument 47 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a8xx_aperture_release() 53 static void a8xx_aperture_clear(struct msm_gpu *gpu) in a8xx_aperture_clear() argument 57 a8xx_aperture_acquire(gpu, PIPE_NONE, &flags); in a8xx_aperture_clear() [all …]
|
| H A D | a4xx_gpu.c | 22 static void a4xx_dump(struct msm_gpu *gpu); 23 static bool a4xx_idle(struct msm_gpu *gpu); 25 static void a4xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) in a4xx_submit() argument 69 adreno_flush(gpu, ring, REG_A4XX_CP_RB_WPTR); in a4xx_submit() 76 static void a4xx_enable_hwcg(struct msm_gpu *gpu) in a4xx_enable_hwcg() argument 78 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a4xx_enable_hwcg() 81 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_TP(i), 0x02222202); in a4xx_enable_hwcg() 83 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_TP(i), 0x00002222); in a4xx_enable_hwcg() 85 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_TP(i), 0x0E739CE7); in a4xx_enable_hwcg() 87 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_TP(i), 0x00111111); in a4xx_enable_hwcg() [all …]
|
| H A D | adreno_gpu.h | 82 int (*get_timestamp)(struct msm_gpu *gpu, uint64_t *value); 287 static inline uint8_t adreno_patchid(const struct adreno_gpu *gpu) in adreno_patchid() argument 293 WARN_ON_ONCE(gpu->info->family >= ADRENO_6XX_GEN1); in adreno_patchid() 294 return gpu->chip_id & 0xff; in adreno_patchid() 297 static inline bool adreno_is_revn(const struct adreno_gpu *gpu, uint32_t revn) in adreno_is_revn() argument 299 if (WARN_ON_ONCE(!gpu->info)) in adreno_is_revn() 301 return gpu->info->revn == revn; in adreno_is_revn() 304 static inline bool adreno_has_gmu_wrapper(const struct adreno_gpu *gpu) in adreno_has_gmu_wrapper() argument 306 return gpu->gmu_is_wrapper; in adreno_has_gmu_wrapper() 309 static inline bool adreno_is_a2xx(const struct adreno_gpu *gpu) in adreno_is_a2xx() argument [all …]
|
| H A D | a3xx_gpu.c | 28 static void a3xx_dump(struct msm_gpu *gpu); 29 static bool a3xx_idle(struct msm_gpu *gpu); 31 static void a3xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) in a3xx_submit() argument 82 adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR); in a3xx_submit() 85 static bool a3xx_me_init(struct msm_gpu *gpu) in a3xx_me_init() argument 87 struct msm_ringbuffer *ring = gpu->rb[0]; in a3xx_me_init() 108 adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR); in a3xx_me_init() 109 return a3xx_idle(gpu); in a3xx_me_init() 112 static int a3xx_hw_init(struct msm_gpu *gpu) in a3xx_hw_init() argument 114 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a3xx_hw_init() [all …]
|
| H A D | a5xx_gpu.c | 17 static void a5xx_dump(struct msm_gpu *gpu); 21 static void update_shadow_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) in update_shadow_rptr() argument 23 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in update_shadow_rptr() 33 void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, in a5xx_flush() argument 36 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a5xx_flush() 46 update_shadow_rptr(gpu, ring); in a5xx_flush() 63 gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr); in a5xx_flush() 66 static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit) in a5xx_submit_in_rb() argument 68 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a5xx_submit_in_rb() 115 a5xx_flush(gpu, ring, true); in a5xx_submit_in_rb() [all …]
|
| H A D | a6xx_gpu.c | 32 static bool fence_status_check(struct msm_gpu *gpu, u32 offset, u32 value, u32 status, u32 mask) in fence_status_check() argument 41 gpu_write(gpu, offset, value); in fence_status_check() 53 struct msm_gpu *gpu = &adreno_gpu->base; in fenced_write() local 57 gpu_write(gpu, offset, value); in fenced_write() 69 fence_status_check(gpu, offset, value, status, mask), 0, 1000)) in fenced_write() 73 gpu_write(gpu, offset, value); in fenced_write() 77 fence_status_check(gpu, offset, value, status, mask), 0, 1000)) { in fenced_write() 110 static inline bool _a6xx_check_idle(struct msm_gpu *gpu) in _a6xx_check_idle() argument 112 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in _a6xx_check_idle() 120 if (gpu_read(gpu, REG_A6XX_RBBM_STATUS) & in _a6xx_check_idle() [all …]
|
| H A D | a2xx_gpu.c | 10 static void a2xx_dump(struct msm_gpu *gpu); 11 static bool a2xx_idle(struct msm_gpu *gpu); 13 static void a2xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) in a2xx_submit() argument 51 adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR); in a2xx_submit() 54 static bool a2xx_me_init(struct msm_gpu *gpu) in a2xx_me_init() argument 56 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a2xx_me_init() 58 struct msm_ringbuffer *ring = gpu->rb[0]; in a2xx_me_init() 104 adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR); in a2xx_me_init() 105 return a2xx_idle(gpu); in a2xx_me_init() 108 static int a2xx_hw_init(struct msm_gpu *gpu) in a2xx_hw_init() argument [all …]
|
| H A D | a5xx_preempt.c | 25 static inline void set_preempt_state(struct a5xx_gpu *gpu, in set_preempt_state() argument 34 atomic_set(&gpu->preempt_state, new); in set_preempt_state() 40 static inline void update_wptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) in update_wptr() argument 52 gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr); in update_wptr() 56 static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu) in get_next_ring() argument 58 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in get_next_ring() 63 for (i = 0; i < gpu->nr_rings; i++) { in get_next_ring() 65 struct msm_ringbuffer *ring = gpu->rb[i]; in get_next_ring() 68 empty = (get_wptr(ring) == gpu->funcs->get_rptr(gpu, ring)); in get_next_ring() 84 struct msm_gpu *gpu = &a5xx_gpu->base.base; in a5xx_preempt_timer() local [all …]
|
| H A D | a6xx_preempt.c | 29 static inline void set_preempt_state(struct a6xx_gpu *gpu, in set_preempt_state() argument 38 atomic_set(&gpu->preempt_state, new); in set_preempt_state() 63 static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu) in get_next_ring() argument 65 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in get_next_ring() 71 for (i = 0; i < gpu->nr_rings; i++) { in get_next_ring() 73 struct msm_ringbuffer *ring = gpu->rb[i]; in get_next_ring() 76 empty = (get_wptr(ring) == gpu->funcs->get_rptr(gpu, ring)); in get_next_ring() 92 struct msm_gpu *gpu = &a6xx_gpu->base.base; in a6xx_preempt_timer() local 93 struct drm_device *dev = gpu->dev; in a6xx_preempt_timer() 98 dev_err(dev->dev, "%s: preemption timed out\n", gpu->name); in a6xx_preempt_timer() [all …]
|
| H A D | adreno_device.c | 72 struct msm_gpu *gpu = NULL; in adreno_load_gpu() local 77 gpu = dev_to_gpu(&pdev->dev); in adreno_load_gpu() 79 if (!gpu) { in adreno_load_gpu() 84 adreno_gpu = to_adreno_gpu(gpu); in adreno_load_gpu() 96 if (gpu->funcs->ucode_load) { in adreno_load_gpu() 97 ret = gpu->funcs->ucode_load(gpu); in adreno_load_gpu() 115 mutex_lock(&gpu->lock); in adreno_load_gpu() 116 ret = msm_gpu_hw_init(gpu); in adreno_load_gpu() 117 mutex_unlock(&gpu->lock); in adreno_load_gpu() 126 if (gpu->funcs->debugfs_init) { in adreno_load_gpu() [all …]
|
| H A D | adreno_gpu.c | 30 static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname, in zap_shader_load_mdt() argument 33 struct device *dev = &gpu->pdev->dev; in zap_shader_load_mdt() 78 ret = request_firmware_direct(&fw, fwname, gpu->dev->dev); in zap_shader_load_mdt() 83 fw = adreno_request_fw(to_adreno_gpu(gpu), fwname); in zap_shader_load_mdt() 133 if (signed_fwname || (to_adreno_gpu(gpu)->fwloc == FW_LOCATION_LEGACY)) { in zap_shader_load_mdt() 169 int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid) in adreno_zap_shader_load() argument 171 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in adreno_zap_shader_load() 172 struct platform_device *pdev = gpu->pdev; in adreno_zap_shader_load() 184 return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid); in adreno_zap_shader_load() 188 adreno_create_vm(struct msm_gpu *gpu, in adreno_create_vm() argument [all …]
|
| H A D | a6xx_gpu_state.c | 131 static int a6xx_crashdumper_init(struct msm_gpu *gpu, in a6xx_crashdumper_init() argument 134 dumper->ptr = msm_gem_kernel_new(gpu->dev, in a6xx_crashdumper_init() 135 SZ_1M, MSM_BO_WC, gpu->vm, in a6xx_crashdumper_init() 144 static int a6xx_crashdumper_run(struct msm_gpu *gpu, in a6xx_crashdumper_run() argument 147 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_crashdumper_run() 161 gpu_write64(gpu, REG_A6XX_CP_CRASH_DUMP_SCRIPT_BASE, dumper->iova); in a6xx_crashdumper_run() 163 gpu_write(gpu, REG_A6XX_CP_CRASH_DUMP_CNTL, 1); in a6xx_crashdumper_run() 165 ret = gpu_poll_timeout(gpu, REG_A6XX_CP_CRASH_DUMP_STATUS, val, in a6xx_crashdumper_run() 168 gpu_write(gpu, REG_A6XX_CP_CRASH_DUMP_CNTL, 0); in a6xx_crashdumper_run() 174 static int debugbus_read(struct msm_gpu *gpu, u32 block, u32 offset, in debugbus_read() argument [all …]
|
| H A D | a6xx_gpu.h | 235 static inline bool a6xx_has_gbif(struct adreno_gpu *gpu) in a6xx_has_gbif() argument 237 if(adreno_is_a630(gpu)) in a6xx_has_gbif() 261 int a6xx_gmu_resume(struct a6xx_gpu *gpu); 262 int a6xx_gmu_stop(struct a6xx_gpu *gpu); 274 void a6xx_gmu_sysprof_setup(struct msm_gpu *gpu); 276 void a6xx_preempt_init(struct msm_gpu *gpu); 277 void a6xx_preempt_hw_init(struct msm_gpu *gpu); 278 void a6xx_preempt_trigger(struct msm_gpu *gpu); 279 void a6xx_preempt_irq(struct msm_gpu *gpu); 280 void a6xx_preempt_fini(struct msm_gpu *gpu); [all …]
|
| H A D | a5xx_gpu.h | 54 void a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor); 138 int a5xx_power_init(struct msm_gpu *gpu); 139 void a5xx_gpmu_ucode_init(struct msm_gpu *gpu); 141 static inline int spin_usecs(struct msm_gpu *gpu, uint32_t usecs, in spin_usecs() argument 146 if ((gpu_read(gpu, reg) & mask) == value) in spin_usecs() 157 bool a5xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring); 158 void a5xx_set_hwcg(struct msm_gpu *gpu, bool state); 160 void a5xx_preempt_init(struct msm_gpu *gpu); 161 void a5xx_preempt_hw_init(struct msm_gpu *gpu); 162 void a5xx_preempt_trigger(struct msm_gpu *gpu); [all …]
|
| /linux/drivers/gpu/drm/msm/ |
| H A D | msm_gpu.c | 25 static int enable_pwrrail(struct msm_gpu *gpu) in enable_pwrrail() argument 27 struct drm_device *dev = gpu->dev; in enable_pwrrail() 30 if (gpu->gpu_reg) { in enable_pwrrail() 31 ret = regulator_enable(gpu->gpu_reg); in enable_pwrrail() 38 if (gpu->gpu_cx) { in enable_pwrrail() 39 ret = regulator_enable(gpu->gpu_cx); in enable_pwrrail() 49 static int disable_pwrrail(struct msm_gpu *gpu) in disable_pwrrail() argument 51 if (gpu->gpu_cx) in disable_pwrrail() 52 regulator_disable(gpu->gpu_cx); in disable_pwrrail() 53 if (gpu->gpu_reg) in disable_pwrrail() [all …]
|
| H A D | msm_gpu.h | 49 int (*get_param)(struct msm_gpu *gpu, struct msm_context *ctx, 51 int (*set_param)(struct msm_gpu *gpu, struct msm_context *ctx, 53 int (*hw_init)(struct msm_gpu *gpu); 58 int (*ucode_load)(struct msm_gpu *gpu); 60 int (*pm_suspend)(struct msm_gpu *gpu); 61 int (*pm_resume)(struct msm_gpu *gpu); 62 void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit); 63 void (*flush)(struct msm_gpu *gpu, struct msm_ringbuffer *ring); 65 struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu); 66 void (*recover)(struct msm_gpu *gpu); [all …]
|
| H A D | msm_debugfs.c | 37 struct msm_gpu *gpu = priv->gpu; in msm_gpu_show() local 40 ret = mutex_lock_interruptible(&gpu->lock); in msm_gpu_show() 44 drm_printf(&p, "%s Status:\n", gpu->name); in msm_gpu_show() 45 gpu->funcs->show(gpu, show_priv->state, &p); in msm_gpu_show() 47 mutex_unlock(&gpu->lock); in msm_gpu_show() 57 struct msm_gpu *gpu = priv->gpu; in msm_gpu_release() local 59 mutex_lock(&gpu->lock); in msm_gpu_release() 60 gpu->funcs->gpu_state_put(show_priv->state); in msm_gpu_release() 61 mutex_unlock(&gpu->lock); in msm_gpu_release() 72 struct msm_gpu *gpu = priv->gpu; in msm_gpu_open() local [all …]
|
| /linux/drivers/gpu/drm/etnaviv/ |
| H A D | etnaviv_sched.c | 29 dev_dbg(submit->gpu->dev, "skipping bad job\n"); in etnaviv_sched_run_job() 38 struct etnaviv_gpu *gpu = submit->gpu; in etnaviv_sched_timedout_job() local 54 dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS); in etnaviv_sched_timedout_job() 55 change = dma_addr - gpu->hangcheck_dma_addr; in etnaviv_sched_timedout_job() 58 mutex_lock(&gpu->lock); in etnaviv_sched_timedout_job() 59 gpu_write(gpu, VIVS_MC_PROFILE_CONFIG0, in etnaviv_sched_timedout_job() 62 primid = gpu_read(gpu, VIVS_MC_PROFILE_FE_READ); in etnaviv_sched_timedout_job() 63 mutex_unlock(&gpu->lock); in etnaviv_sched_timedout_job() 65 if (gpu->state == ETNA_GPU_STATE_RUNNING && in etnaviv_sched_timedout_job() 66 (gpu->completed_fence != gpu->hangcheck_fence || in etnaviv_sched_timedout_job() [all …]
|
| H A D | etnaviv_buffer.c | 91 static void etnaviv_cmd_select_pipe(struct etnaviv_gpu *gpu, in etnaviv_cmd_select_pipe() argument 96 lockdep_assert_held(&gpu->lock); in etnaviv_cmd_select_pipe() 104 if (gpu->exec_state == ETNA_PIPE_2D) in etnaviv_cmd_select_pipe() 106 else if (gpu->exec_state == ETNA_PIPE_3D) in etnaviv_cmd_select_pipe() 117 static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu, in etnaviv_buffer_dump() argument 123 dev_info(gpu->dev, "virt %p phys 0x%08x free 0x%08x\n", in etnaviv_buffer_dump() 125 &gpu->mmu_context->cmdbuf_mapping) + in etnaviv_buffer_dump() 153 static u32 etnaviv_buffer_reserve(struct etnaviv_gpu *gpu, in etnaviv_buffer_reserve() argument 160 &gpu->mmu_context->cmdbuf_mapping) + in etnaviv_buffer_reserve() 164 u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu) in etnaviv_buffer_init() argument [all …]
|
| H A D | etnaviv_gpu.h | 90 void (*sync_point)(struct etnaviv_gpu *gpu, struct etnaviv_event *event); 170 static inline void gpu_write(struct etnaviv_gpu *gpu, u32 reg, u32 data) in gpu_write() argument 172 writel(data, gpu->mmio + reg); in gpu_write() 175 static inline u32 gpu_read(struct etnaviv_gpu *gpu, u32 reg) in gpu_read() argument 182 readl(gpu->mmio + reg); in gpu_read() 184 return readl(gpu->mmio + reg); in gpu_read() 187 static inline u32 gpu_fix_power_address(struct etnaviv_gpu *gpu, u32 reg) in gpu_fix_power_address() argument 190 if (gpu->identity.model == chipModel_GC300 && in gpu_fix_power_address() 191 gpu->identity.revision < 0x2000) in gpu_fix_power_address() 197 static inline void gpu_write_power(struct etnaviv_gpu *gpu, u32 reg, u32 data) in gpu_write_power() argument [all …]
|
| H A D | etnaviv_iommu_v2.c | 165 static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu, in etnaviv_iommuv2_restore_nonsec() argument 172 if (gpu_read(gpu, VIVS_MMUv2_CONTROL) & VIVS_MMUv2_CONTROL_ENABLE) in etnaviv_iommuv2_restore_nonsec() 175 if (gpu->mmu_context) in etnaviv_iommuv2_restore_nonsec() 176 etnaviv_iommu_context_put(gpu->mmu_context); in etnaviv_iommuv2_restore_nonsec() 177 gpu->mmu_context = etnaviv_iommu_context_get(context); in etnaviv_iommuv2_restore_nonsec() 179 prefetch = etnaviv_buffer_config_mmuv2(gpu, in etnaviv_iommuv2_restore_nonsec() 182 etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(&gpu->buffer), in etnaviv_iommuv2_restore_nonsec() 184 etnaviv_gpu_wait_idle(gpu, 100); in etnaviv_iommuv2_restore_nonsec() 186 gpu_write(gpu, VIVS_MMUv2_CONTROL, VIVS_MMUv2_CONTROL_ENABLE); in etnaviv_iommuv2_restore_nonsec() 189 static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu, in etnaviv_iommuv2_restore_sec() argument [all …]
|
| H A D | etnaviv_iommu.c | 89 static void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu, in etnaviv_iommuv1_restore() argument 95 if (gpu->mmu_context) in etnaviv_iommuv1_restore() 96 etnaviv_iommu_context_put(gpu->mmu_context); in etnaviv_iommuv1_restore() 97 gpu->mmu_context = etnaviv_iommu_context_get(context); in etnaviv_iommuv1_restore() 100 gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, context->global->memory_base); in etnaviv_iommuv1_restore() 101 gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, context->global->memory_base); in etnaviv_iommuv1_restore() 102 gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_TX, context->global->memory_base); in etnaviv_iommuv1_restore() 103 gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PEZ, context->global->memory_base); in etnaviv_iommuv1_restore() 104 gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PE, context->global->memory_base); in etnaviv_iommuv1_restore() 109 gpu_write(gpu, VIVS_MC_MMU_FE_PAGE_TABLE, pgtable); in etnaviv_iommuv1_restore() [all …]
|
| /linux/drivers/gpu/drm/panthor/ |
| H A D | panthor_gpu.c | 94 spin_lock(&ptdev->gpu->reqs_lock); in panthor_gpu_irq_handler() 95 if (status & ptdev->gpu->pending_reqs) { in panthor_gpu_irq_handler() 96 ptdev->gpu->pending_reqs &= ~status; in panthor_gpu_irq_handler() 97 wake_up_all(&ptdev->gpu->reqs_acked); in panthor_gpu_irq_handler() 99 spin_unlock(&ptdev->gpu->reqs_lock); in panthor_gpu_irq_handler() 101 PANTHOR_IRQ_HANDLER(gpu, GPU, panthor_gpu_irq_handler); 113 panthor_gpu_irq_suspend(&ptdev->gpu->irq); in panthor_gpu_unplug() 116 spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags); in panthor_gpu_unplug() 117 ptdev->gpu->pending_reqs = 0; in panthor_gpu_unplug() 118 wake_up_all(&ptdev->gpu->reqs_acked); in panthor_gpu_unplug() [all …]
|
| /linux/drivers/gpu/drm/amd/amdkfd/ |
| H A D | kfd_topology.c | 108 return top_dev->gpu; in kfd_device_by_id() 261 if (iolink->gpu && kfd_devcgroup_check_permission(iolink->gpu)) in iolink_show() 303 if (mem->gpu && kfd_devcgroup_check_permission(mem->gpu)) in mem_show() 335 if (cache->gpu && kfd_devcgroup_check_permission(cache->gpu)) in kfd_cache_show() 417 if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu)) in node_show() 426 if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu)) in node_show() 433 if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu)) in node_show() 438 dev->gpu ? dev->node_props.simd_count : 0); in node_show() 462 dev->gpu ? (dev->node_props.array_count * in node_show() 463 NUM_XCC(dev->gpu->xcc_mask)) : 0); in node_show() [all …]
|
| /linux/drivers/gpu/drm/ |
| H A D | Kconfig | 31 source "drivers/gpu/drm/Kconfig.debug" 172 source "drivers/gpu/drm/clients/Kconfig" 186 source "drivers/gpu/drm/display/Kconfig" 268 source "drivers/gpu/drm/sysfb/Kconfig" 270 source "drivers/gpu/drm/arm/Kconfig" 272 source "drivers/gpu/drm/radeon/Kconfig" 274 source "drivers/gpu/drm/amd/amdgpu/Kconfig" 276 source "drivers/gpu/drm/nouveau/Kconfig" 278 source "drivers/gpu/drm/nova/Kconfig" 280 source "drivers/gpu/drm/i915/Kconfig" [all …]
|