xref: /linux/drivers/gpu/drm/panthor/panthor_gpu.c (revision 07fdad3a93756b872da7b53647715c48d0f4a2d0)
1 // SPDX-License-Identifier: GPL-2.0 or MIT
2 /* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
3 /* Copyright 2019 Linaro, Ltd., Rob Herring <robh@kernel.org> */
4 /* Copyright 2019 Collabora ltd. */
5 
6 #include <linux/bitfield.h>
7 #include <linux/bitmap.h>
8 #include <linux/delay.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/interrupt.h>
11 #include <linux/io.h>
12 #include <linux/iopoll.h>
13 #include <linux/platform_device.h>
14 #include <linux/pm_runtime.h>
15 
16 #include <drm/drm_drv.h>
17 #include <drm/drm_managed.h>
18 
19 #include "panthor_device.h"
20 #include "panthor_gpu.h"
21 #include "panthor_regs.h"
22 
23 /**
24  * struct panthor_gpu - GPU block management data.
25  */
26 struct panthor_gpu {
27 	/** @irq: GPU irq. */
28 	struct panthor_irq irq;
29 
30 	/** @reqs_lock: Lock protecting access to pending_reqs. */
31 	spinlock_t reqs_lock;
32 
33 	/** @pending_reqs: Pending GPU requests. */
34 	u32 pending_reqs;
35 
36 	/** @reqs_acked: GPU request wait queue. */
37 	wait_queue_head_t reqs_acked;
38 
39 	/** @cache_flush_lock: Lock to serialize cache flushes */
40 	struct mutex cache_flush_lock;
41 };
42 
43 #define GPU_INTERRUPTS_MASK	\
44 	(GPU_IRQ_FAULT | \
45 	 GPU_IRQ_PROTM_FAULT | \
46 	 GPU_IRQ_RESET_COMPLETED | \
47 	 GPU_IRQ_CLEAN_CACHES_COMPLETED)
48 
49 static void panthor_gpu_coherency_set(struct panthor_device *ptdev)
50 {
51 	gpu_write(ptdev, GPU_COHERENCY_PROTOCOL,
52 		ptdev->coherent ? GPU_COHERENCY_PROT_BIT(ACE_LITE) : GPU_COHERENCY_NONE);
53 }
54 
55 static void panthor_gpu_irq_handler(struct panthor_device *ptdev, u32 status)
56 {
57 	gpu_write(ptdev, GPU_INT_CLEAR, status);
58 
59 	if (status & GPU_IRQ_FAULT) {
60 		u32 fault_status = gpu_read(ptdev, GPU_FAULT_STATUS);
61 		u64 address = gpu_read64(ptdev, GPU_FAULT_ADDR);
62 
63 		drm_warn(&ptdev->base, "GPU Fault 0x%08x (%s) at 0x%016llx\n",
64 			 fault_status, panthor_exception_name(ptdev, fault_status & 0xFF),
65 			 address);
66 	}
67 	if (status & GPU_IRQ_PROTM_FAULT)
68 		drm_warn(&ptdev->base, "GPU Fault in protected mode\n");
69 
70 	spin_lock(&ptdev->gpu->reqs_lock);
71 	if (status & ptdev->gpu->pending_reqs) {
72 		ptdev->gpu->pending_reqs &= ~status;
73 		wake_up_all(&ptdev->gpu->reqs_acked);
74 	}
75 	spin_unlock(&ptdev->gpu->reqs_lock);
76 }
77 PANTHOR_IRQ_HANDLER(gpu, GPU, panthor_gpu_irq_handler);
78 
79 /**
80  * panthor_gpu_unplug() - Called when the GPU is unplugged.
81  * @ptdev: Device to unplug.
82  */
83 void panthor_gpu_unplug(struct panthor_device *ptdev)
84 {
85 	unsigned long flags;
86 
87 	/* Make sure the IRQ handler is not running after that point. */
88 	if (!IS_ENABLED(CONFIG_PM) || pm_runtime_active(ptdev->base.dev))
89 		panthor_gpu_irq_suspend(&ptdev->gpu->irq);
90 
91 	/* Wake-up all waiters. */
92 	spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags);
93 	ptdev->gpu->pending_reqs = 0;
94 	wake_up_all(&ptdev->gpu->reqs_acked);
95 	spin_unlock_irqrestore(&ptdev->gpu->reqs_lock, flags);
96 }
97 
98 /**
99  * panthor_gpu_init() - Initialize the GPU block
100  * @ptdev: Device.
101  *
102  * Return: 0 on success, a negative error code otherwise.
103  */
104 int panthor_gpu_init(struct panthor_device *ptdev)
105 {
106 	struct panthor_gpu *gpu;
107 	u32 pa_bits;
108 	int ret, irq;
109 
110 	gpu = drmm_kzalloc(&ptdev->base, sizeof(*gpu), GFP_KERNEL);
111 	if (!gpu)
112 		return -ENOMEM;
113 
114 	spin_lock_init(&gpu->reqs_lock);
115 	init_waitqueue_head(&gpu->reqs_acked);
116 	mutex_init(&gpu->cache_flush_lock);
117 	ptdev->gpu = gpu;
118 
119 	dma_set_max_seg_size(ptdev->base.dev, UINT_MAX);
120 	pa_bits = GPU_MMU_FEATURES_PA_BITS(ptdev->gpu_info.mmu_features);
121 	ret = dma_set_mask_and_coherent(ptdev->base.dev, DMA_BIT_MASK(pa_bits));
122 	if (ret)
123 		return ret;
124 
125 	irq = platform_get_irq_byname(to_platform_device(ptdev->base.dev), "gpu");
126 	if (irq < 0)
127 		return irq;
128 
129 	ret = panthor_request_gpu_irq(ptdev, &ptdev->gpu->irq, irq, GPU_INTERRUPTS_MASK);
130 	if (ret)
131 		return ret;
132 
133 	return 0;
134 }
135 
136 /**
137  * panthor_gpu_block_power_off() - Power-off a specific block of the GPU
138  * @ptdev: Device.
139  * @blk_name: Block name.
140  * @pwroff_reg: Power-off register for this block.
141  * @pwrtrans_reg: Power transition register for this block.
142  * @mask: Sub-elements to power-off.
143  * @timeout_us: Timeout in microseconds.
144  *
145  * Return: 0 on success, a negative error code otherwise.
146  */
147 int panthor_gpu_block_power_off(struct panthor_device *ptdev,
148 				const char *blk_name,
149 				u32 pwroff_reg, u32 pwrtrans_reg,
150 				u64 mask, u32 timeout_us)
151 {
152 	u32 val;
153 	int ret;
154 
155 	ret = gpu_read64_relaxed_poll_timeout(ptdev, pwrtrans_reg, val,
156 					      !(mask & val), 100, timeout_us);
157 	if (ret) {
158 		drm_err(&ptdev->base,
159 			"timeout waiting on %s:%llx power transition", blk_name,
160 			mask);
161 		return ret;
162 	}
163 
164 	gpu_write64(ptdev, pwroff_reg, mask);
165 
166 	ret = gpu_read64_relaxed_poll_timeout(ptdev, pwrtrans_reg, val,
167 					      !(mask & val), 100, timeout_us);
168 	if (ret) {
169 		drm_err(&ptdev->base,
170 			"timeout waiting on %s:%llx power transition", blk_name,
171 			mask);
172 		return ret;
173 	}
174 
175 	return 0;
176 }
177 
178 /**
179  * panthor_gpu_block_power_on() - Power-on a specific block of the GPU
180  * @ptdev: Device.
181  * @blk_name: Block name.
182  * @pwron_reg: Power-on register for this block.
183  * @pwrtrans_reg: Power transition register for this block.
184  * @rdy_reg: Power transition ready register.
185  * @mask: Sub-elements to power-on.
186  * @timeout_us: Timeout in microseconds.
187  *
188  * Return: 0 on success, a negative error code otherwise.
189  */
190 int panthor_gpu_block_power_on(struct panthor_device *ptdev,
191 			       const char *blk_name,
192 			       u32 pwron_reg, u32 pwrtrans_reg,
193 			       u32 rdy_reg, u64 mask, u32 timeout_us)
194 {
195 	u32 val;
196 	int ret;
197 
198 	ret = gpu_read64_relaxed_poll_timeout(ptdev, pwrtrans_reg, val,
199 					      !(mask & val), 100, timeout_us);
200 	if (ret) {
201 		drm_err(&ptdev->base,
202 			"timeout waiting on %s:%llx power transition", blk_name,
203 			mask);
204 		return ret;
205 	}
206 
207 	gpu_write64(ptdev, pwron_reg, mask);
208 
209 	ret = gpu_read64_relaxed_poll_timeout(ptdev, rdy_reg, val,
210 					      (mask & val) == val,
211 					      100, timeout_us);
212 	if (ret) {
213 		drm_err(&ptdev->base, "timeout waiting on %s:%llx readiness",
214 			blk_name, mask);
215 		return ret;
216 	}
217 
218 	return 0;
219 }
220 
221 /**
222  * panthor_gpu_l2_power_on() - Power-on the L2-cache
223  * @ptdev: Device.
224  *
225  * Return: 0 on success, a negative error code otherwise.
226  */
227 int panthor_gpu_l2_power_on(struct panthor_device *ptdev)
228 {
229 	if (ptdev->gpu_info.l2_present != 1) {
230 		/*
231 		 * Only support one core group now.
232 		 * ~(l2_present - 1) unsets all bits in l2_present except
233 		 * the bottom bit. (l2_present - 2) has all the bits in
234 		 * the first core group set. AND them together to generate
235 		 * a mask of cores in the first core group.
236 		 */
237 		u64 core_mask = ~(ptdev->gpu_info.l2_present - 1) &
238 				(ptdev->gpu_info.l2_present - 2);
239 		drm_info_once(&ptdev->base, "using only 1st core group (%lu cores from %lu)\n",
240 			      hweight64(core_mask),
241 			      hweight64(ptdev->gpu_info.shader_present));
242 	}
243 
244 	/* Set the desired coherency mode before the power up of L2 */
245 	panthor_gpu_coherency_set(ptdev);
246 
247 	return panthor_gpu_power_on(ptdev, L2, 1, 20000);
248 }
249 
250 /**
251  * panthor_gpu_flush_caches() - Flush caches
252  * @ptdev: Device.
253  * @l2: L2 flush type.
254  * @lsc: LSC flush type.
255  * @other: Other flush type.
256  *
257  * Return: 0 on success, a negative error code otherwise.
258  */
259 int panthor_gpu_flush_caches(struct panthor_device *ptdev,
260 			     u32 l2, u32 lsc, u32 other)
261 {
262 	bool timedout = false;
263 	unsigned long flags;
264 
265 	/* Serialize cache flush operations. */
266 	guard(mutex)(&ptdev->gpu->cache_flush_lock);
267 
268 	spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags);
269 	if (!drm_WARN_ON(&ptdev->base,
270 			 ptdev->gpu->pending_reqs & GPU_IRQ_CLEAN_CACHES_COMPLETED)) {
271 		ptdev->gpu->pending_reqs |= GPU_IRQ_CLEAN_CACHES_COMPLETED;
272 		gpu_write(ptdev, GPU_CMD, GPU_FLUSH_CACHES(l2, lsc, other));
273 	}
274 	spin_unlock_irqrestore(&ptdev->gpu->reqs_lock, flags);
275 
276 	if (!wait_event_timeout(ptdev->gpu->reqs_acked,
277 				!(ptdev->gpu->pending_reqs & GPU_IRQ_CLEAN_CACHES_COMPLETED),
278 				msecs_to_jiffies(100))) {
279 		spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags);
280 		if ((ptdev->gpu->pending_reqs & GPU_IRQ_CLEAN_CACHES_COMPLETED) != 0 &&
281 		    !(gpu_read(ptdev, GPU_INT_RAWSTAT) & GPU_IRQ_CLEAN_CACHES_COMPLETED))
282 			timedout = true;
283 		else
284 			ptdev->gpu->pending_reqs &= ~GPU_IRQ_CLEAN_CACHES_COMPLETED;
285 		spin_unlock_irqrestore(&ptdev->gpu->reqs_lock, flags);
286 	}
287 
288 	if (timedout) {
289 		drm_err(&ptdev->base, "Flush caches timeout");
290 		return -ETIMEDOUT;
291 	}
292 
293 	return 0;
294 }
295 
296 /**
297  * panthor_gpu_soft_reset() - Issue a soft-reset
298  * @ptdev: Device.
299  *
300  * Return: 0 on success, a negative error code otherwise.
301  */
302 int panthor_gpu_soft_reset(struct panthor_device *ptdev)
303 {
304 	bool timedout = false;
305 	unsigned long flags;
306 
307 	spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags);
308 	if (!drm_WARN_ON(&ptdev->base,
309 			 ptdev->gpu->pending_reqs & GPU_IRQ_RESET_COMPLETED)) {
310 		ptdev->gpu->pending_reqs |= GPU_IRQ_RESET_COMPLETED;
311 		gpu_write(ptdev, GPU_INT_CLEAR, GPU_IRQ_RESET_COMPLETED);
312 		gpu_write(ptdev, GPU_CMD, GPU_SOFT_RESET);
313 	}
314 	spin_unlock_irqrestore(&ptdev->gpu->reqs_lock, flags);
315 
316 	if (!wait_event_timeout(ptdev->gpu->reqs_acked,
317 				!(ptdev->gpu->pending_reqs & GPU_IRQ_RESET_COMPLETED),
318 				msecs_to_jiffies(100))) {
319 		spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags);
320 		if ((ptdev->gpu->pending_reqs & GPU_IRQ_RESET_COMPLETED) != 0 &&
321 		    !(gpu_read(ptdev, GPU_INT_RAWSTAT) & GPU_IRQ_RESET_COMPLETED))
322 			timedout = true;
323 		else
324 			ptdev->gpu->pending_reqs &= ~GPU_IRQ_RESET_COMPLETED;
325 		spin_unlock_irqrestore(&ptdev->gpu->reqs_lock, flags);
326 	}
327 
328 	if (timedout) {
329 		drm_err(&ptdev->base, "Soft reset timeout");
330 		return -ETIMEDOUT;
331 	}
332 
333 	return 0;
334 }
335 
336 /**
337  * panthor_gpu_suspend() - Suspend the GPU block.
338  * @ptdev: Device.
339  *
340  * Suspend the GPU irq. This should be called last in the suspend procedure,
341  * after all other blocks have been suspented.
342  */
343 void panthor_gpu_suspend(struct panthor_device *ptdev)
344 {
345 	/* On a fast reset, simply power down the L2. */
346 	if (!ptdev->reset.fast)
347 		panthor_gpu_soft_reset(ptdev);
348 	else
349 		panthor_gpu_power_off(ptdev, L2, 1, 20000);
350 
351 	panthor_gpu_irq_suspend(&ptdev->gpu->irq);
352 }
353 
354 /**
355  * panthor_gpu_resume() - Resume the GPU block.
356  * @ptdev: Device.
357  *
358  * Resume the IRQ handler and power-on the L2-cache.
359  * The FW takes care of powering the other blocks.
360  */
361 void panthor_gpu_resume(struct panthor_device *ptdev)
362 {
363 	panthor_gpu_irq_resume(&ptdev->gpu->irq, GPU_INTERRUPTS_MASK);
364 	panthor_gpu_l2_power_on(ptdev);
365 }
366 
367