1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2017 The Linux Foundation. All rights reserved. */
3
4 #ifndef _A6XX_GMU_H_
5 #define _A6XX_GMU_H_
6
7 #include <linux/completion.h>
8 #include <linux/iopoll.h>
9 #include <linux/interrupt.h>
10 #include <linux/notifier.h>
11 #include <linux/soc/qcom/qcom_aoss.h>
12 #include "msm_drv.h"
13 #include "a6xx_hfi.h"
14
15 struct a6xx_gmu_bo {
16 struct drm_gem_object *obj;
17 void *virt;
18 size_t size;
19 u64 iova;
20 };
21
22 #define GMU_MAX_GX_FREQS 16
23 #define GMU_MAX_CX_FREQS 4
24 #define GMU_MAX_BCMS 3
25
26 struct a6xx_bcm {
27 char *name;
28 unsigned int buswidth;
29 bool fixed;
30 unsigned int perfmode;
31 unsigned int perfmode_bw;
32 };
33
34 /*
35 * These define the different GMU wake up options - these define how both the
36 * CPU and the GMU bring up the hardware
37 */
38
39 /* THe GMU has already been booted and the rentention registers are active */
40 #define GMU_WARM_BOOT 0
41
42 /* the GMU is coming up for the first time or back from a power collapse */
43 #define GMU_COLD_BOOT 1
44
45 /*
46 * These define the level of control that the GMU has - the higher the number
47 * the more things that the GMU hardware controls on its own.
48 */
49
50 /* The GMU does not do any idle state management */
51 #define GMU_IDLE_STATE_ACTIVE 0
52
53 /* The GMU manages SPTP power collapse */
54 #define GMU_IDLE_STATE_SPTP 2
55
56 /* The GMU does automatic IFPC (intra-frame power collapse) */
57 #define GMU_IDLE_STATE_IFPC 3
58
59 struct a6xx_gmu {
60 struct device *dev;
61
62 /* For serializing communication with the GMU: */
63 struct mutex lock;
64
65 struct msm_gem_address_space *aspace;
66
67 void __iomem *mmio;
68 void __iomem *rscc;
69
70 int hfi_irq;
71 int gmu_irq;
72
73 struct device *gxpd;
74 struct device *cxpd;
75
76 int idle_level;
77
78 struct a6xx_gmu_bo hfi;
79 struct a6xx_gmu_bo debug;
80 struct a6xx_gmu_bo icache;
81 struct a6xx_gmu_bo dcache;
82 struct a6xx_gmu_bo dummy;
83 struct a6xx_gmu_bo log;
84
85 int nr_clocks;
86 struct clk_bulk_data *clocks;
87 struct clk *core_clk;
88 struct clk *hub_clk;
89
90 /* current performance index set externally */
91 int current_perf_index;
92
93 int nr_gpu_freqs;
94 unsigned long gpu_freqs[GMU_MAX_GX_FREQS];
95 u32 gx_arc_votes[GMU_MAX_GX_FREQS];
96
97 int nr_gpu_bws;
98 unsigned long gpu_bw_table[GMU_MAX_GX_FREQS];
99 u32 gpu_ib_votes[GMU_MAX_GX_FREQS][GMU_MAX_BCMS];
100
101 int nr_gmu_freqs;
102 unsigned long gmu_freqs[GMU_MAX_CX_FREQS];
103 u32 cx_arc_votes[GMU_MAX_CX_FREQS];
104
105 unsigned long freq;
106
107 struct a6xx_hfi_queue queues[2];
108
109 bool initialized;
110 bool hung;
111 bool legacy; /* a618 or a630 */
112
113 /* For power domain callback */
114 struct notifier_block pd_nb;
115 struct completion pd_gate;
116
117 struct qmp *qmp;
118 struct a6xx_hfi_msg_bw_table *bw_table;
119 };
120
gmu_read(struct a6xx_gmu * gmu,u32 offset)121 static inline u32 gmu_read(struct a6xx_gmu *gmu, u32 offset)
122 {
123 return readl(gmu->mmio + (offset << 2));
124 }
125
gmu_write(struct a6xx_gmu * gmu,u32 offset,u32 value)126 static inline void gmu_write(struct a6xx_gmu *gmu, u32 offset, u32 value)
127 {
128 writel(value, gmu->mmio + (offset << 2));
129 }
130
131 static inline void
gmu_write_bulk(struct a6xx_gmu * gmu,u32 offset,const u32 * data,u32 size)132 gmu_write_bulk(struct a6xx_gmu *gmu, u32 offset, const u32 *data, u32 size)
133 {
134 memcpy_toio(gmu->mmio + (offset << 2), data, size);
135 wmb();
136 }
137
gmu_rmw(struct a6xx_gmu * gmu,u32 reg,u32 mask,u32 or)138 static inline void gmu_rmw(struct a6xx_gmu *gmu, u32 reg, u32 mask, u32 or)
139 {
140 u32 val = gmu_read(gmu, reg);
141
142 val &= ~mask;
143
144 gmu_write(gmu, reg, val | or);
145 }
146
gmu_read64(struct a6xx_gmu * gmu,u32 lo,u32 hi)147 static inline u64 gmu_read64(struct a6xx_gmu *gmu, u32 lo, u32 hi)
148 {
149 u64 val;
150
151 val = (u64) readl(gmu->mmio + (lo << 2));
152 val |= ((u64) readl(gmu->mmio + (hi << 2)) << 32);
153
154 return val;
155 }
156
157 #define gmu_poll_timeout(gmu, addr, val, cond, interval, timeout) \
158 readl_poll_timeout((gmu)->mmio + ((addr) << 2), val, cond, \
159 interval, timeout)
160
gmu_read_rscc(struct a6xx_gmu * gmu,u32 offset)161 static inline u32 gmu_read_rscc(struct a6xx_gmu *gmu, u32 offset)
162 {
163 return readl(gmu->rscc + (offset << 2));
164 }
165
gmu_write_rscc(struct a6xx_gmu * gmu,u32 offset,u32 value)166 static inline void gmu_write_rscc(struct a6xx_gmu *gmu, u32 offset, u32 value)
167 {
168 writel(value, gmu->rscc + (offset << 2));
169 }
170
171 #define gmu_poll_timeout_rscc(gmu, addr, val, cond, interval, timeout) \
172 readl_poll_timeout((gmu)->rscc + ((addr) << 2), val, cond, \
173 interval, timeout)
174
175 /*
176 * These are the available OOB (out of band requests) to the GMU where "out of
177 * band" means that the CPU talks to the GMU directly and not through HFI.
178 * Normally this works by writing a ITCM/DTCM register and then triggering a
179 * interrupt (the "request" bit) and waiting for an acknowledgment (the "ack"
180 * bit). The state is cleared by writing the "clear' bit to the GMU interrupt.
181 *
182 * These are used to force the GMU/GPU to stay on during a critical sequence or
183 * for hardware workarounds.
184 */
185
186 enum a6xx_gmu_oob_state {
187 /*
188 * Let the GMU know that a boot or slumber operation has started. The value in
189 * REG_A6XX_GMU_BOOT_SLUMBER_OPTION lets the GMU know which operation we are
190 * doing
191 */
192 GMU_OOB_BOOT_SLUMBER = 0,
193 /*
194 * Let the GMU know to not turn off any GPU registers while the CPU is in a
195 * critical section
196 */
197 GMU_OOB_GPU_SET,
198 /*
199 * Set a new power level for the GPU when the CPU is doing frequency scaling
200 */
201 GMU_OOB_DCVS_SET,
202 /*
203 * Used to keep the GPU on for CPU-side reads of performance counters.
204 */
205 GMU_OOB_PERFCOUNTER_SET,
206 };
207
208 void a6xx_hfi_init(struct a6xx_gmu *gmu);
209 int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state);
210 void a6xx_hfi_stop(struct a6xx_gmu *gmu);
211 int a6xx_hfi_send_prep_slumber(struct a6xx_gmu *gmu);
212 int a6xx_hfi_set_freq(struct a6xx_gmu *gmu, u32 perf_index, u32 bw_index);
213
214 bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu);
215 bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu);
216 void a6xx_sptprac_disable(struct a6xx_gmu *gmu);
217 int a6xx_sptprac_enable(struct a6xx_gmu *gmu);
218
219 #endif
220