1 // SPDX-License-Identifier: GPL-2.0 or MIT
2 /* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
3 /* Copyright 2019 Linaro, Ltd., Rob Herring <robh@kernel.org> */
4 /* Copyright 2019 Collabora ltd. */
5
6 #include <linux/bitfield.h>
7 #include <linux/bitmap.h>
8 #include <linux/delay.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/interrupt.h>
11 #include <linux/io.h>
12 #include <linux/iopoll.h>
13 #include <linux/platform_device.h>
14 #include <linux/pm_runtime.h>
15
16 #include <drm/drm_drv.h>
17 #include <drm/drm_managed.h>
18 #include <drm/drm_print.h>
19
20 #include "panthor_device.h"
21 #include "panthor_gpu.h"
22 #include "panthor_hw.h"
23 #include "panthor_regs.h"
24
25 /**
26 * struct panthor_gpu - GPU block management data.
27 */
28 struct panthor_gpu {
29 /** @irq: GPU irq. */
30 struct panthor_irq irq;
31
32 /** @reqs_lock: Lock protecting access to pending_reqs. */
33 spinlock_t reqs_lock;
34
35 /** @pending_reqs: Pending GPU requests. */
36 u32 pending_reqs;
37
38 /** @reqs_acked: GPU request wait queue. */
39 wait_queue_head_t reqs_acked;
40
41 /** @cache_flush_lock: Lock to serialize cache flushes */
42 struct mutex cache_flush_lock;
43 };
44
45 #define GPU_INTERRUPTS_MASK \
46 (GPU_IRQ_FAULT | \
47 GPU_IRQ_PROTM_FAULT | \
48 GPU_IRQ_RESET_COMPLETED | \
49 GPU_IRQ_CLEAN_CACHES_COMPLETED)
50
panthor_gpu_coherency_set(struct panthor_device * ptdev)51 static void panthor_gpu_coherency_set(struct panthor_device *ptdev)
52 {
53 gpu_write(ptdev, GPU_COHERENCY_PROTOCOL,
54 ptdev->coherent ? GPU_COHERENCY_PROT_BIT(ACE_LITE) : GPU_COHERENCY_NONE);
55 }
56
panthor_gpu_l2_config_set(struct panthor_device * ptdev)57 static void panthor_gpu_l2_config_set(struct panthor_device *ptdev)
58 {
59 const struct panthor_soc_data *data = ptdev->soc_data;
60 u32 l2_config;
61 u32 i;
62
63 if (!data || !data->asn_hash_enable)
64 return;
65
66 if (GPU_ARCH_MAJOR(ptdev->gpu_info.gpu_id) < 11) {
67 drm_err(&ptdev->base, "Custom ASN hash not supported by the device");
68 return;
69 }
70
71 for (i = 0; i < ARRAY_SIZE(data->asn_hash); i++)
72 gpu_write(ptdev, GPU_ASN_HASH(i), data->asn_hash[i]);
73
74 l2_config = gpu_read(ptdev, GPU_L2_CONFIG);
75 l2_config |= GPU_L2_CONFIG_ASN_HASH_ENABLE;
76 gpu_write(ptdev, GPU_L2_CONFIG, l2_config);
77 }
78
panthor_gpu_irq_handler(struct panthor_device * ptdev,u32 status)79 static void panthor_gpu_irq_handler(struct panthor_device *ptdev, u32 status)
80 {
81 gpu_write(ptdev, GPU_INT_CLEAR, status);
82
83 if (status & GPU_IRQ_FAULT) {
84 u32 fault_status = gpu_read(ptdev, GPU_FAULT_STATUS);
85 u64 address = gpu_read64(ptdev, GPU_FAULT_ADDR);
86
87 drm_warn(&ptdev->base, "GPU Fault 0x%08x (%s) at 0x%016llx\n",
88 fault_status, panthor_exception_name(ptdev, fault_status & 0xFF),
89 address);
90 }
91 if (status & GPU_IRQ_PROTM_FAULT)
92 drm_warn(&ptdev->base, "GPU Fault in protected mode\n");
93
94 spin_lock(&ptdev->gpu->reqs_lock);
95 if (status & ptdev->gpu->pending_reqs) {
96 ptdev->gpu->pending_reqs &= ~status;
97 wake_up_all(&ptdev->gpu->reqs_acked);
98 }
99 spin_unlock(&ptdev->gpu->reqs_lock);
100 }
101 PANTHOR_IRQ_HANDLER(gpu, GPU, panthor_gpu_irq_handler);
102
103 /**
104 * panthor_gpu_unplug() - Called when the GPU is unplugged.
105 * @ptdev: Device to unplug.
106 */
panthor_gpu_unplug(struct panthor_device * ptdev)107 void panthor_gpu_unplug(struct panthor_device *ptdev)
108 {
109 unsigned long flags;
110
111 /* Make sure the IRQ handler is not running after that point. */
112 if (!IS_ENABLED(CONFIG_PM) || pm_runtime_active(ptdev->base.dev))
113 panthor_gpu_irq_suspend(&ptdev->gpu->irq);
114
115 /* Wake-up all waiters. */
116 spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags);
117 ptdev->gpu->pending_reqs = 0;
118 wake_up_all(&ptdev->gpu->reqs_acked);
119 spin_unlock_irqrestore(&ptdev->gpu->reqs_lock, flags);
120 }
121
122 /**
123 * panthor_gpu_init() - Initialize the GPU block
124 * @ptdev: Device.
125 *
126 * Return: 0 on success, a negative error code otherwise.
127 */
panthor_gpu_init(struct panthor_device * ptdev)128 int panthor_gpu_init(struct panthor_device *ptdev)
129 {
130 struct panthor_gpu *gpu;
131 u32 pa_bits;
132 int ret, irq;
133
134 gpu = drmm_kzalloc(&ptdev->base, sizeof(*gpu), GFP_KERNEL);
135 if (!gpu)
136 return -ENOMEM;
137
138 spin_lock_init(&gpu->reqs_lock);
139 init_waitqueue_head(&gpu->reqs_acked);
140 mutex_init(&gpu->cache_flush_lock);
141 ptdev->gpu = gpu;
142
143 dma_set_max_seg_size(ptdev->base.dev, UINT_MAX);
144 pa_bits = GPU_MMU_FEATURES_PA_BITS(ptdev->gpu_info.mmu_features);
145 ret = dma_set_mask_and_coherent(ptdev->base.dev, DMA_BIT_MASK(pa_bits));
146 if (ret)
147 return ret;
148
149 irq = platform_get_irq_byname(to_platform_device(ptdev->base.dev), "gpu");
150 if (irq < 0)
151 return irq;
152
153 ret = panthor_request_gpu_irq(ptdev, &ptdev->gpu->irq, irq, GPU_INTERRUPTS_MASK);
154 if (ret)
155 return ret;
156
157 return 0;
158 }
159
160 /**
161 * panthor_gpu_block_power_off() - Power-off a specific block of the GPU
162 * @ptdev: Device.
163 * @blk_name: Block name.
164 * @pwroff_reg: Power-off register for this block.
165 * @pwrtrans_reg: Power transition register for this block.
166 * @mask: Sub-elements to power-off.
167 * @timeout_us: Timeout in microseconds.
168 *
169 * Return: 0 on success, a negative error code otherwise.
170 */
panthor_gpu_block_power_off(struct panthor_device * ptdev,const char * blk_name,u32 pwroff_reg,u32 pwrtrans_reg,u64 mask,u32 timeout_us)171 int panthor_gpu_block_power_off(struct panthor_device *ptdev,
172 const char *blk_name,
173 u32 pwroff_reg, u32 pwrtrans_reg,
174 u64 mask, u32 timeout_us)
175 {
176 u32 val;
177 int ret;
178
179 ret = gpu_read64_relaxed_poll_timeout(ptdev, pwrtrans_reg, val,
180 !(mask & val), 100, timeout_us);
181 if (ret) {
182 drm_err(&ptdev->base,
183 "timeout waiting on %s:%llx power transition", blk_name,
184 mask);
185 return ret;
186 }
187
188 gpu_write64(ptdev, pwroff_reg, mask);
189
190 ret = gpu_read64_relaxed_poll_timeout(ptdev, pwrtrans_reg, val,
191 !(mask & val), 100, timeout_us);
192 if (ret) {
193 drm_err(&ptdev->base,
194 "timeout waiting on %s:%llx power transition", blk_name,
195 mask);
196 return ret;
197 }
198
199 return 0;
200 }
201
202 /**
203 * panthor_gpu_block_power_on() - Power-on a specific block of the GPU
204 * @ptdev: Device.
205 * @blk_name: Block name.
206 * @pwron_reg: Power-on register for this block.
207 * @pwrtrans_reg: Power transition register for this block.
208 * @rdy_reg: Power transition ready register.
209 * @mask: Sub-elements to power-on.
210 * @timeout_us: Timeout in microseconds.
211 *
212 * Return: 0 on success, a negative error code otherwise.
213 */
panthor_gpu_block_power_on(struct panthor_device * ptdev,const char * blk_name,u32 pwron_reg,u32 pwrtrans_reg,u32 rdy_reg,u64 mask,u32 timeout_us)214 int panthor_gpu_block_power_on(struct panthor_device *ptdev,
215 const char *blk_name,
216 u32 pwron_reg, u32 pwrtrans_reg,
217 u32 rdy_reg, u64 mask, u32 timeout_us)
218 {
219 u32 val;
220 int ret;
221
222 ret = gpu_read64_relaxed_poll_timeout(ptdev, pwrtrans_reg, val,
223 !(mask & val), 100, timeout_us);
224 if (ret) {
225 drm_err(&ptdev->base,
226 "timeout waiting on %s:%llx power transition", blk_name,
227 mask);
228 return ret;
229 }
230
231 gpu_write64(ptdev, pwron_reg, mask);
232
233 ret = gpu_read64_relaxed_poll_timeout(ptdev, rdy_reg, val,
234 (mask & val) == val,
235 100, timeout_us);
236 if (ret) {
237 drm_err(&ptdev->base, "timeout waiting on %s:%llx readiness",
238 blk_name, mask);
239 return ret;
240 }
241
242 return 0;
243 }
244
panthor_gpu_l2_power_off(struct panthor_device * ptdev)245 void panthor_gpu_l2_power_off(struct panthor_device *ptdev)
246 {
247 panthor_gpu_power_off(ptdev, L2, ptdev->gpu_info.l2_present, 20000);
248 }
249
250 /**
251 * panthor_gpu_l2_power_on() - Power-on the L2-cache
252 * @ptdev: Device.
253 *
254 * Return: 0 on success, a negative error code otherwise.
255 */
panthor_gpu_l2_power_on(struct panthor_device * ptdev)256 int panthor_gpu_l2_power_on(struct panthor_device *ptdev)
257 {
258 if (ptdev->gpu_info.l2_present != 1) {
259 /*
260 * Only support one core group now.
261 * ~(l2_present - 1) unsets all bits in l2_present except
262 * the bottom bit. (l2_present - 2) has all the bits in
263 * the first core group set. AND them together to generate
264 * a mask of cores in the first core group.
265 */
266 u64 core_mask = ~(ptdev->gpu_info.l2_present - 1) &
267 (ptdev->gpu_info.l2_present - 2);
268 drm_info_once(&ptdev->base, "using only 1st core group (%lu cores from %lu)\n",
269 hweight64(core_mask),
270 hweight64(ptdev->gpu_info.shader_present));
271 }
272
273 /* Set the desired coherency mode and L2 config before the power up of L2 */
274 panthor_gpu_coherency_set(ptdev);
275 panthor_gpu_l2_config_set(ptdev);
276
277 return panthor_gpu_power_on(ptdev, L2, 1, 20000);
278 }
279
280 /**
281 * panthor_gpu_flush_caches() - Flush caches
282 * @ptdev: Device.
283 * @l2: L2 flush type.
284 * @lsc: LSC flush type.
285 * @other: Other flush type.
286 *
287 * Return: 0 on success, a negative error code otherwise.
288 */
panthor_gpu_flush_caches(struct panthor_device * ptdev,u32 l2,u32 lsc,u32 other)289 int panthor_gpu_flush_caches(struct panthor_device *ptdev,
290 u32 l2, u32 lsc, u32 other)
291 {
292 bool timedout = false;
293 unsigned long flags;
294
295 /* Serialize cache flush operations. */
296 guard(mutex)(&ptdev->gpu->cache_flush_lock);
297
298 spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags);
299 if (!drm_WARN_ON(&ptdev->base,
300 ptdev->gpu->pending_reqs & GPU_IRQ_CLEAN_CACHES_COMPLETED)) {
301 ptdev->gpu->pending_reqs |= GPU_IRQ_CLEAN_CACHES_COMPLETED;
302 gpu_write(ptdev, GPU_CMD, GPU_FLUSH_CACHES(l2, lsc, other));
303 }
304 spin_unlock_irqrestore(&ptdev->gpu->reqs_lock, flags);
305
306 if (!wait_event_timeout(ptdev->gpu->reqs_acked,
307 !(ptdev->gpu->pending_reqs & GPU_IRQ_CLEAN_CACHES_COMPLETED),
308 msecs_to_jiffies(100))) {
309 spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags);
310 if ((ptdev->gpu->pending_reqs & GPU_IRQ_CLEAN_CACHES_COMPLETED) != 0 &&
311 !(gpu_read(ptdev, GPU_INT_RAWSTAT) & GPU_IRQ_CLEAN_CACHES_COMPLETED))
312 timedout = true;
313 else
314 ptdev->gpu->pending_reqs &= ~GPU_IRQ_CLEAN_CACHES_COMPLETED;
315 spin_unlock_irqrestore(&ptdev->gpu->reqs_lock, flags);
316 }
317
318 if (timedout) {
319 drm_err(&ptdev->base, "Flush caches timeout");
320 return -ETIMEDOUT;
321 }
322
323 return 0;
324 }
325
326 /**
327 * panthor_gpu_soft_reset() - Issue a soft-reset
328 * @ptdev: Device.
329 *
330 * Return: 0 on success, a negative error code otherwise.
331 */
panthor_gpu_soft_reset(struct panthor_device * ptdev)332 int panthor_gpu_soft_reset(struct panthor_device *ptdev)
333 {
334 bool timedout = false;
335 unsigned long flags;
336
337 spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags);
338 if (!drm_WARN_ON(&ptdev->base,
339 ptdev->gpu->pending_reqs & GPU_IRQ_RESET_COMPLETED)) {
340 ptdev->gpu->pending_reqs |= GPU_IRQ_RESET_COMPLETED;
341 gpu_write(ptdev, GPU_INT_CLEAR, GPU_IRQ_RESET_COMPLETED);
342 gpu_write(ptdev, GPU_CMD, GPU_SOFT_RESET);
343 }
344 spin_unlock_irqrestore(&ptdev->gpu->reqs_lock, flags);
345
346 if (!wait_event_timeout(ptdev->gpu->reqs_acked,
347 !(ptdev->gpu->pending_reqs & GPU_IRQ_RESET_COMPLETED),
348 msecs_to_jiffies(100))) {
349 spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags);
350 if ((ptdev->gpu->pending_reqs & GPU_IRQ_RESET_COMPLETED) != 0 &&
351 !(gpu_read(ptdev, GPU_INT_RAWSTAT) & GPU_IRQ_RESET_COMPLETED))
352 timedout = true;
353 else
354 ptdev->gpu->pending_reqs &= ~GPU_IRQ_RESET_COMPLETED;
355 spin_unlock_irqrestore(&ptdev->gpu->reqs_lock, flags);
356 }
357
358 if (timedout) {
359 drm_err(&ptdev->base, "Soft reset timeout");
360 return -ETIMEDOUT;
361 }
362
363 return 0;
364 }
365
366 /**
367 * panthor_gpu_suspend() - Suspend the GPU block.
368 * @ptdev: Device.
369 *
370 * Suspend the GPU irq. This should be called last in the suspend procedure,
371 * after all other blocks have been suspented.
372 */
panthor_gpu_suspend(struct panthor_device * ptdev)373 void panthor_gpu_suspend(struct panthor_device *ptdev)
374 {
375 /* On a fast reset, simply power down the L2. */
376 if (!ptdev->reset.fast)
377 panthor_hw_soft_reset(ptdev);
378 else
379 panthor_hw_l2_power_off(ptdev);
380
381 panthor_gpu_irq_suspend(&ptdev->gpu->irq);
382 }
383
384 /**
385 * panthor_gpu_resume() - Resume the GPU block.
386 * @ptdev: Device.
387 *
388 * Resume the IRQ handler and power-on the L2-cache.
389 * The FW takes care of powering the other blocks.
390 */
panthor_gpu_resume(struct panthor_device * ptdev)391 void panthor_gpu_resume(struct panthor_device *ptdev)
392 {
393 panthor_gpu_irq_resume(&ptdev->gpu->irq, GPU_INTERRUPTS_MASK);
394 panthor_hw_l2_power_on(ptdev);
395 }
396
397