Lines Matching full:gpu
48 int (*get_param)(struct msm_gpu *gpu, struct msm_context *ctx,
50 int (*set_param)(struct msm_gpu *gpu, struct msm_context *ctx,
52 int (*hw_init)(struct msm_gpu *gpu);
57 int (*ucode_load)(struct msm_gpu *gpu);
59 int (*pm_suspend)(struct msm_gpu *gpu);
60 int (*pm_resume)(struct msm_gpu *gpu);
61 void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit);
62 void (*flush)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
64 struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu);
65 void (*recover)(struct msm_gpu *gpu);
66 void (*destroy)(struct msm_gpu *gpu);
68 /* show GPU status in debugfs: */
69 void (*show)(struct msm_gpu *gpu, struct msm_gpu_state *state,
72 void (*debugfs_init)(struct msm_gpu *gpu, struct drm_minor *minor);
75 u64 (*gpu_busy)(struct msm_gpu *gpu, unsigned long *out_sample_rate);
76 struct msm_gpu_state *(*gpu_state_get)(struct msm_gpu *gpu);
78 unsigned long (*gpu_get_freq)(struct msm_gpu *gpu);
80 void (*gpu_set_freq)(struct msm_gpu *gpu, struct dev_pm_opp *opp,
82 struct drm_gpuvm *(*create_vm)(struct msm_gpu *gpu, struct platform_device *pdev);
83 struct drm_gpuvm *(*create_private_vm)(struct msm_gpu *gpu, bool kernel_managed);
84 uint32_t (*get_rptr)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
87 * progress: Has the GPU made progress?
89 * Return true if GPU position in cmdstream has advanced (or changed)
93 bool (*progress)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
126 * Shadow frequency used while the GPU is idle. From the PoV of
128 * adjust frequency while the GPU is idle, but we use this shadow
129 * value as the GPU is actually clamped to minimum frequency while
206 * General lock for serializing all the gpu things.
226 /* does gpu need hw_init? */
230 * global_faults: number of GPU hangs not attributed to a particular
255 /* work for handling GPU recovery: */
308 static inline bool msm_gpu_active(struct msm_gpu *gpu) in msm_gpu_active() argument
312 for (i = 0; i < gpu->nr_rings; i++) { in msm_gpu_active()
313 struct msm_ringbuffer *ring = gpu->rb[i]; in msm_gpu_active()
336 * The number of priority levels provided by drm gpu scheduler. The
379 * The per-process GPU address-space. Do not access directly, use
431 * The total (cumulative) elapsed time GPU was busy with rendering
439 * The total (cumulative) GPU cycles elapsed attributed to this
487 * @gpu: the gpu instance
490 * @sched_prio: [out] the gpu scheduler priority level which the userspace
509 static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio, in msm_gpu_convert_priority() argument
521 if (rn >= gpu->nr_rings) in msm_gpu_convert_priority()
541 * @faults: the number of GPU hangs associated with this submitqueue
614 static inline void gpu_write(struct msm_gpu *gpu, u32 reg, u32 data) in gpu_write() argument
616 writel(data, gpu->mmio + (reg << 2)); in gpu_write()
619 static inline u32 gpu_read(struct msm_gpu *gpu, u32 reg) in gpu_read() argument
621 return readl(gpu->mmio + (reg << 2)); in gpu_read()
624 static inline void gpu_rmw(struct msm_gpu *gpu, u32 reg, u32 mask, u32 or) in gpu_rmw() argument
626 msm_rmw(gpu->mmio + (reg << 2), mask, or); in gpu_rmw()
629 static inline u64 gpu_read64(struct msm_gpu *gpu, u32 reg) in gpu_read64() argument
635 * not quad word aligned and 2) the GPU hardware designers have a bit in gpu_read64()
637 * spins. The longer a GPU family goes the higher the chance that in gpu_read64()
647 val = (u64) readl(gpu->mmio + (reg << 2)); in gpu_read64()
648 val |= ((u64) readl(gpu->mmio + ((reg + 1) << 2)) << 32); in gpu_read64()
653 static inline void gpu_write64(struct msm_gpu *gpu, u32 reg, u64 val) in gpu_write64() argument
656 writel(lower_32_bits(val), gpu->mmio + (reg << 2)); in gpu_write64()
657 writel(upper_32_bits(val), gpu->mmio + ((reg + 1) << 2)); in gpu_write64()
660 int msm_gpu_pm_suspend(struct msm_gpu *gpu);
661 int msm_gpu_pm_resume(struct msm_gpu *gpu);
663 void msm_gpu_show_fdinfo(struct msm_gpu *gpu, struct msm_context *ctx,
679 int msm_context_set_sysprof(struct msm_context *ctx, struct msm_gpu *gpu, int sysprof);
694 void msm_devfreq_init(struct msm_gpu *gpu);
695 void msm_devfreq_cleanup(struct msm_gpu *gpu);
696 void msm_devfreq_resume(struct msm_gpu *gpu);
697 void msm_devfreq_suspend(struct msm_gpu *gpu);
698 void msm_devfreq_boost(struct msm_gpu *gpu, unsigned factor);
699 void msm_devfreq_active(struct msm_gpu *gpu);
700 void msm_devfreq_idle(struct msm_gpu *gpu);
702 int msm_gpu_hw_init(struct msm_gpu *gpu);
704 void msm_gpu_perfcntr_start(struct msm_gpu *gpu);
705 void msm_gpu_perfcntr_stop(struct msm_gpu *gpu);
706 int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
709 void msm_gpu_retire(struct msm_gpu *gpu);
710 void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit);
713 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
717 msm_gpu_create_private_vm(struct msm_gpu *gpu, struct task_struct *task,
720 void msm_gpu_cleanup(struct msm_gpu *gpu);
733 static inline struct msm_gpu_state *msm_gpu_crashstate_get(struct msm_gpu *gpu) in msm_gpu_crashstate_get() argument
737 mutex_lock(&gpu->lock); in msm_gpu_crashstate_get()
739 if (gpu->crashstate) { in msm_gpu_crashstate_get()
740 kref_get(&gpu->crashstate->ref); in msm_gpu_crashstate_get()
741 state = gpu->crashstate; in msm_gpu_crashstate_get()
744 mutex_unlock(&gpu->lock); in msm_gpu_crashstate_get()
749 static inline void msm_gpu_crashstate_put(struct msm_gpu *gpu) in msm_gpu_crashstate_put() argument
751 mutex_lock(&gpu->lock); in msm_gpu_crashstate_put()
753 if (gpu->crashstate) { in msm_gpu_crashstate_put()
754 if (gpu->funcs->gpu_state_put(gpu->crashstate)) in msm_gpu_crashstate_put()
755 gpu->crashstate = NULL; in msm_gpu_crashstate_put()
758 mutex_unlock(&gpu->lock); in msm_gpu_crashstate_put()
761 void msm_gpu_fault_crashstate_capture(struct msm_gpu *gpu, struct msm_gpu_fault_info *fault_info);
767 #define check_apriv(gpu, flags) \ argument
768 (((gpu)->hw_apriv ? MSM_BO_MAP_PRIV : 0) | (flags))