xref: /linux/drivers/gpu/drm/panthor/panthor_gpu.c (revision 25489a4f556414445d342951615178368ee45cde)
1 // SPDX-License-Identifier: GPL-2.0 or MIT
2 /* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
3 /* Copyright 2019 Linaro, Ltd., Rob Herring <robh@kernel.org> */
4 /* Copyright 2019 Collabora ltd. */
5 
6 #include <linux/bitfield.h>
7 #include <linux/bitmap.h>
8 #include <linux/delay.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/interrupt.h>
11 #include <linux/io.h>
12 #include <linux/iopoll.h>
13 #include <linux/platform_device.h>
14 #include <linux/pm_runtime.h>
15 
16 #include <drm/drm_drv.h>
17 #include <drm/drm_managed.h>
18 
19 #include "panthor_device.h"
20 #include "panthor_gpu.h"
21 #include "panthor_regs.h"
22 
23 /**
24  * struct panthor_gpu - GPU block management data.
25  */
26 struct panthor_gpu {
27 	/** @irq: GPU irq. */
28 	struct panthor_irq irq;
29 
30 	/** @reqs_lock: Lock protecting access to pending_reqs. */
31 	spinlock_t reqs_lock;
32 
33 	/** @pending_reqs: Pending GPU requests. */
34 	u32 pending_reqs;
35 
36 	/** @reqs_acked: GPU request wait queue. */
37 	wait_queue_head_t reqs_acked;
38 };
39 
40 /**
41  * struct panthor_model - GPU model description
42  */
43 struct panthor_model {
44 	/** @name: Model name. */
45 	const char *name;
46 
47 	/** @arch_major: Major version number of architecture. */
48 	u8 arch_major;
49 
50 	/** @product_major: Major version number of product. */
51 	u8 product_major;
52 };
53 
54 /**
55  * GPU_MODEL() - Define a GPU model. A GPU product can be uniquely identified
56  * by a combination of the major architecture version and the major product
57  * version.
58  * @_name: Name for the GPU model.
59  * @_arch_major: Architecture major.
60  * @_product_major: Product major.
61  */
62 #define GPU_MODEL(_name, _arch_major, _product_major) \
63 {\
64 	.name = __stringify(_name),				\
65 	.arch_major = _arch_major,				\
66 	.product_major = _product_major,			\
67 }
68 
69 static const struct panthor_model gpu_models[] = {
70 	GPU_MODEL(g610, 10, 7),
71 	{},
72 };
73 
74 #define GPU_INTERRUPTS_MASK	\
75 	(GPU_IRQ_FAULT | \
76 	 GPU_IRQ_PROTM_FAULT | \
77 	 GPU_IRQ_RESET_COMPLETED | \
78 	 GPU_IRQ_CLEAN_CACHES_COMPLETED)
79 
80 static void panthor_gpu_coherency_set(struct panthor_device *ptdev)
81 {
82 	gpu_write(ptdev, GPU_COHERENCY_PROTOCOL,
83 		ptdev->coherent ? GPU_COHERENCY_PROT_BIT(ACE_LITE) : GPU_COHERENCY_NONE);
84 }
85 
86 static void panthor_gpu_init_info(struct panthor_device *ptdev)
87 {
88 	const struct panthor_model *model;
89 	u32 arch_major, product_major;
90 	u32 major, minor, status;
91 	unsigned int i;
92 
93 	ptdev->gpu_info.gpu_id = gpu_read(ptdev, GPU_ID);
94 	ptdev->gpu_info.csf_id = gpu_read(ptdev, GPU_CSF_ID);
95 	ptdev->gpu_info.gpu_rev = gpu_read(ptdev, GPU_REVID);
96 	ptdev->gpu_info.core_features = gpu_read(ptdev, GPU_CORE_FEATURES);
97 	ptdev->gpu_info.l2_features = gpu_read(ptdev, GPU_L2_FEATURES);
98 	ptdev->gpu_info.tiler_features = gpu_read(ptdev, GPU_TILER_FEATURES);
99 	ptdev->gpu_info.mem_features = gpu_read(ptdev, GPU_MEM_FEATURES);
100 	ptdev->gpu_info.mmu_features = gpu_read(ptdev, GPU_MMU_FEATURES);
101 	ptdev->gpu_info.thread_features = gpu_read(ptdev, GPU_THREAD_FEATURES);
102 	ptdev->gpu_info.max_threads = gpu_read(ptdev, GPU_THREAD_MAX_THREADS);
103 	ptdev->gpu_info.thread_max_workgroup_size = gpu_read(ptdev, GPU_THREAD_MAX_WORKGROUP_SIZE);
104 	ptdev->gpu_info.thread_max_barrier_size = gpu_read(ptdev, GPU_THREAD_MAX_BARRIER_SIZE);
105 	ptdev->gpu_info.coherency_features = gpu_read(ptdev, GPU_COHERENCY_FEATURES);
106 	for (i = 0; i < 4; i++)
107 		ptdev->gpu_info.texture_features[i] = gpu_read(ptdev, GPU_TEXTURE_FEATURES(i));
108 
109 	ptdev->gpu_info.as_present = gpu_read(ptdev, GPU_AS_PRESENT);
110 
111 	ptdev->gpu_info.shader_present = gpu_read(ptdev, GPU_SHADER_PRESENT_LO);
112 	ptdev->gpu_info.shader_present |= (u64)gpu_read(ptdev, GPU_SHADER_PRESENT_HI) << 32;
113 
114 	ptdev->gpu_info.tiler_present = gpu_read(ptdev, GPU_TILER_PRESENT_LO);
115 	ptdev->gpu_info.tiler_present |= (u64)gpu_read(ptdev, GPU_TILER_PRESENT_HI) << 32;
116 
117 	ptdev->gpu_info.l2_present = gpu_read(ptdev, GPU_L2_PRESENT_LO);
118 	ptdev->gpu_info.l2_present |= (u64)gpu_read(ptdev, GPU_L2_PRESENT_HI) << 32;
119 
120 	arch_major = GPU_ARCH_MAJOR(ptdev->gpu_info.gpu_id);
121 	product_major = GPU_PROD_MAJOR(ptdev->gpu_info.gpu_id);
122 	major = GPU_VER_MAJOR(ptdev->gpu_info.gpu_id);
123 	minor = GPU_VER_MINOR(ptdev->gpu_info.gpu_id);
124 	status = GPU_VER_STATUS(ptdev->gpu_info.gpu_id);
125 
126 	for (model = gpu_models; model->name; model++) {
127 		if (model->arch_major == arch_major &&
128 		    model->product_major == product_major)
129 			break;
130 	}
131 
132 	drm_info(&ptdev->base,
133 		 "mali-%s id 0x%x major 0x%x minor 0x%x status 0x%x",
134 		 model->name ?: "unknown", ptdev->gpu_info.gpu_id >> 16,
135 		 major, minor, status);
136 
137 	drm_info(&ptdev->base,
138 		 "Features: L2:%#x Tiler:%#x Mem:%#x MMU:%#x AS:%#x",
139 		 ptdev->gpu_info.l2_features,
140 		 ptdev->gpu_info.tiler_features,
141 		 ptdev->gpu_info.mem_features,
142 		 ptdev->gpu_info.mmu_features,
143 		 ptdev->gpu_info.as_present);
144 
145 	drm_info(&ptdev->base,
146 		 "shader_present=0x%0llx l2_present=0x%0llx tiler_present=0x%0llx",
147 		 ptdev->gpu_info.shader_present, ptdev->gpu_info.l2_present,
148 		 ptdev->gpu_info.tiler_present);
149 }
150 
151 static void panthor_gpu_irq_handler(struct panthor_device *ptdev, u32 status)
152 {
153 	gpu_write(ptdev, GPU_INT_CLEAR, status);
154 
155 	if (status & GPU_IRQ_FAULT) {
156 		u32 fault_status = gpu_read(ptdev, GPU_FAULT_STATUS);
157 		u64 address = ((u64)gpu_read(ptdev, GPU_FAULT_ADDR_HI) << 32) |
158 			      gpu_read(ptdev, GPU_FAULT_ADDR_LO);
159 
160 		drm_warn(&ptdev->base, "GPU Fault 0x%08x (%s) at 0x%016llx\n",
161 			 fault_status, panthor_exception_name(ptdev, fault_status & 0xFF),
162 			 address);
163 	}
164 	if (status & GPU_IRQ_PROTM_FAULT)
165 		drm_warn(&ptdev->base, "GPU Fault in protected mode\n");
166 
167 	spin_lock(&ptdev->gpu->reqs_lock);
168 	if (status & ptdev->gpu->pending_reqs) {
169 		ptdev->gpu->pending_reqs &= ~status;
170 		wake_up_all(&ptdev->gpu->reqs_acked);
171 	}
172 	spin_unlock(&ptdev->gpu->reqs_lock);
173 }
174 PANTHOR_IRQ_HANDLER(gpu, GPU, panthor_gpu_irq_handler);
175 
176 /**
177  * panthor_gpu_unplug() - Called when the GPU is unplugged.
178  * @ptdev: Device to unplug.
179  */
180 void panthor_gpu_unplug(struct panthor_device *ptdev)
181 {
182 	unsigned long flags;
183 
184 	/* Make sure the IRQ handler is not running after that point. */
185 	if (!IS_ENABLED(CONFIG_PM) || pm_runtime_active(ptdev->base.dev))
186 		panthor_gpu_irq_suspend(&ptdev->gpu->irq);
187 
188 	/* Wake-up all waiters. */
189 	spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags);
190 	ptdev->gpu->pending_reqs = 0;
191 	wake_up_all(&ptdev->gpu->reqs_acked);
192 	spin_unlock_irqrestore(&ptdev->gpu->reqs_lock, flags);
193 }
194 
195 /**
196  * panthor_gpu_init() - Initialize the GPU block
197  * @ptdev: Device.
198  *
199  * Return: 0 on success, a negative error code otherwise.
200  */
201 int panthor_gpu_init(struct panthor_device *ptdev)
202 {
203 	struct panthor_gpu *gpu;
204 	u32 pa_bits;
205 	int ret, irq;
206 
207 	gpu = drmm_kzalloc(&ptdev->base, sizeof(*gpu), GFP_KERNEL);
208 	if (!gpu)
209 		return -ENOMEM;
210 
211 	spin_lock_init(&gpu->reqs_lock);
212 	init_waitqueue_head(&gpu->reqs_acked);
213 	ptdev->gpu = gpu;
214 	panthor_gpu_init_info(ptdev);
215 
216 	dma_set_max_seg_size(ptdev->base.dev, UINT_MAX);
217 	pa_bits = GPU_MMU_FEATURES_PA_BITS(ptdev->gpu_info.mmu_features);
218 	ret = dma_set_mask_and_coherent(ptdev->base.dev, DMA_BIT_MASK(pa_bits));
219 	if (ret)
220 		return ret;
221 
222 	irq = platform_get_irq_byname(to_platform_device(ptdev->base.dev), "gpu");
223 	if (irq < 0)
224 		return irq;
225 
226 	ret = panthor_request_gpu_irq(ptdev, &ptdev->gpu->irq, irq, GPU_INTERRUPTS_MASK);
227 	if (ret)
228 		return ret;
229 
230 	return 0;
231 }
232 
233 /**
234  * panthor_gpu_block_power_off() - Power-off a specific block of the GPU
235  * @ptdev: Device.
236  * @blk_name: Block name.
237  * @pwroff_reg: Power-off register for this block.
238  * @pwrtrans_reg: Power transition register for this block.
239  * @mask: Sub-elements to power-off.
240  * @timeout_us: Timeout in microseconds.
241  *
242  * Return: 0 on success, a negative error code otherwise.
243  */
244 int panthor_gpu_block_power_off(struct panthor_device *ptdev,
245 				const char *blk_name,
246 				u32 pwroff_reg, u32 pwrtrans_reg,
247 				u64 mask, u32 timeout_us)
248 {
249 	u32 val, i;
250 	int ret;
251 
252 	for (i = 0; i < 2; i++) {
253 		u32 mask32 = mask >> (i * 32);
254 
255 		if (!mask32)
256 			continue;
257 
258 		ret = readl_relaxed_poll_timeout(ptdev->iomem + pwrtrans_reg + (i * 4),
259 						 val, !(mask32 & val),
260 						 100, timeout_us);
261 		if (ret) {
262 			drm_err(&ptdev->base, "timeout waiting on %s:%llx power transition",
263 				blk_name, mask);
264 			return ret;
265 		}
266 	}
267 
268 	if (mask & GENMASK(31, 0))
269 		gpu_write(ptdev, pwroff_reg, mask);
270 
271 	if (mask >> 32)
272 		gpu_write(ptdev, pwroff_reg + 4, mask >> 32);
273 
274 	for (i = 0; i < 2; i++) {
275 		u32 mask32 = mask >> (i * 32);
276 
277 		if (!mask32)
278 			continue;
279 
280 		ret = readl_relaxed_poll_timeout(ptdev->iomem + pwrtrans_reg + (i * 4),
281 						 val, !(mask32 & val),
282 						 100, timeout_us);
283 		if (ret) {
284 			drm_err(&ptdev->base, "timeout waiting on %s:%llx power transition",
285 				blk_name, mask);
286 			return ret;
287 		}
288 	}
289 
290 	return 0;
291 }
292 
293 /**
294  * panthor_gpu_block_power_on() - Power-on a specific block of the GPU
295  * @ptdev: Device.
296  * @blk_name: Block name.
297  * @pwron_reg: Power-on register for this block.
298  * @pwrtrans_reg: Power transition register for this block.
299  * @rdy_reg: Power transition ready register.
300  * @mask: Sub-elements to power-on.
301  * @timeout_us: Timeout in microseconds.
302  *
303  * Return: 0 on success, a negative error code otherwise.
304  */
305 int panthor_gpu_block_power_on(struct panthor_device *ptdev,
306 			       const char *blk_name,
307 			       u32 pwron_reg, u32 pwrtrans_reg,
308 			       u32 rdy_reg, u64 mask, u32 timeout_us)
309 {
310 	u32 val, i;
311 	int ret;
312 
313 	for (i = 0; i < 2; i++) {
314 		u32 mask32 = mask >> (i * 32);
315 
316 		if (!mask32)
317 			continue;
318 
319 		ret = readl_relaxed_poll_timeout(ptdev->iomem + pwrtrans_reg + (i * 4),
320 						 val, !(mask32 & val),
321 						 100, timeout_us);
322 		if (ret) {
323 			drm_err(&ptdev->base, "timeout waiting on %s:%llx power transition",
324 				blk_name, mask);
325 			return ret;
326 		}
327 	}
328 
329 	if (mask & GENMASK(31, 0))
330 		gpu_write(ptdev, pwron_reg, mask);
331 
332 	if (mask >> 32)
333 		gpu_write(ptdev, pwron_reg + 4, mask >> 32);
334 
335 	for (i = 0; i < 2; i++) {
336 		u32 mask32 = mask >> (i * 32);
337 
338 		if (!mask32)
339 			continue;
340 
341 		ret = readl_relaxed_poll_timeout(ptdev->iomem + rdy_reg + (i * 4),
342 						 val, (mask32 & val) == mask32,
343 						 100, timeout_us);
344 		if (ret) {
345 			drm_err(&ptdev->base, "timeout waiting on %s:%llx readiness",
346 				blk_name, mask);
347 			return ret;
348 		}
349 	}
350 
351 	return 0;
352 }
353 
354 /**
355  * panthor_gpu_l2_power_on() - Power-on the L2-cache
356  * @ptdev: Device.
357  *
358  * Return: 0 on success, a negative error code otherwise.
359  */
360 int panthor_gpu_l2_power_on(struct panthor_device *ptdev)
361 {
362 	if (ptdev->gpu_info.l2_present != 1) {
363 		/*
364 		 * Only support one core group now.
365 		 * ~(l2_present - 1) unsets all bits in l2_present except
366 		 * the bottom bit. (l2_present - 2) has all the bits in
367 		 * the first core group set. AND them together to generate
368 		 * a mask of cores in the first core group.
369 		 */
370 		u64 core_mask = ~(ptdev->gpu_info.l2_present - 1) &
371 				(ptdev->gpu_info.l2_present - 2);
372 		drm_info_once(&ptdev->base, "using only 1st core group (%lu cores from %lu)\n",
373 			      hweight64(core_mask),
374 			      hweight64(ptdev->gpu_info.shader_present));
375 	}
376 
377 	/* Set the desired coherency mode before the power up of L2 */
378 	panthor_gpu_coherency_set(ptdev);
379 
380 	return panthor_gpu_power_on(ptdev, L2, 1, 20000);
381 }
382 
383 /**
384  * panthor_gpu_flush_caches() - Flush caches
385  * @ptdev: Device.
386  * @l2: L2 flush type.
387  * @lsc: LSC flush type.
388  * @other: Other flush type.
389  *
390  * Return: 0 on success, a negative error code otherwise.
391  */
392 int panthor_gpu_flush_caches(struct panthor_device *ptdev,
393 			     u32 l2, u32 lsc, u32 other)
394 {
395 	bool timedout = false;
396 	unsigned long flags;
397 
398 	spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags);
399 	if (!drm_WARN_ON(&ptdev->base,
400 			 ptdev->gpu->pending_reqs & GPU_IRQ_CLEAN_CACHES_COMPLETED)) {
401 		ptdev->gpu->pending_reqs |= GPU_IRQ_CLEAN_CACHES_COMPLETED;
402 		gpu_write(ptdev, GPU_CMD, GPU_FLUSH_CACHES(l2, lsc, other));
403 	}
404 	spin_unlock_irqrestore(&ptdev->gpu->reqs_lock, flags);
405 
406 	if (!wait_event_timeout(ptdev->gpu->reqs_acked,
407 				!(ptdev->gpu->pending_reqs & GPU_IRQ_CLEAN_CACHES_COMPLETED),
408 				msecs_to_jiffies(100))) {
409 		spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags);
410 		if ((ptdev->gpu->pending_reqs & GPU_IRQ_CLEAN_CACHES_COMPLETED) != 0 &&
411 		    !(gpu_read(ptdev, GPU_INT_RAWSTAT) & GPU_IRQ_CLEAN_CACHES_COMPLETED))
412 			timedout = true;
413 		else
414 			ptdev->gpu->pending_reqs &= ~GPU_IRQ_CLEAN_CACHES_COMPLETED;
415 		spin_unlock_irqrestore(&ptdev->gpu->reqs_lock, flags);
416 	}
417 
418 	if (timedout) {
419 		drm_err(&ptdev->base, "Flush caches timeout");
420 		return -ETIMEDOUT;
421 	}
422 
423 	return 0;
424 }
425 
426 /**
427  * panthor_gpu_soft_reset() - Issue a soft-reset
428  * @ptdev: Device.
429  *
430  * Return: 0 on success, a negative error code otherwise.
431  */
432 int panthor_gpu_soft_reset(struct panthor_device *ptdev)
433 {
434 	bool timedout = false;
435 	unsigned long flags;
436 
437 	spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags);
438 	if (!drm_WARN_ON(&ptdev->base,
439 			 ptdev->gpu->pending_reqs & GPU_IRQ_RESET_COMPLETED)) {
440 		ptdev->gpu->pending_reqs |= GPU_IRQ_RESET_COMPLETED;
441 		gpu_write(ptdev, GPU_INT_CLEAR, GPU_IRQ_RESET_COMPLETED);
442 		gpu_write(ptdev, GPU_CMD, GPU_SOFT_RESET);
443 	}
444 	spin_unlock_irqrestore(&ptdev->gpu->reqs_lock, flags);
445 
446 	if (!wait_event_timeout(ptdev->gpu->reqs_acked,
447 				!(ptdev->gpu->pending_reqs & GPU_IRQ_RESET_COMPLETED),
448 				msecs_to_jiffies(100))) {
449 		spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags);
450 		if ((ptdev->gpu->pending_reqs & GPU_IRQ_RESET_COMPLETED) != 0 &&
451 		    !(gpu_read(ptdev, GPU_INT_RAWSTAT) & GPU_IRQ_RESET_COMPLETED))
452 			timedout = true;
453 		else
454 			ptdev->gpu->pending_reqs &= ~GPU_IRQ_RESET_COMPLETED;
455 		spin_unlock_irqrestore(&ptdev->gpu->reqs_lock, flags);
456 	}
457 
458 	if (timedout) {
459 		drm_err(&ptdev->base, "Soft reset timeout");
460 		return -ETIMEDOUT;
461 	}
462 
463 	return 0;
464 }
465 
466 /**
467  * panthor_gpu_suspend() - Suspend the GPU block.
468  * @ptdev: Device.
469  *
470  * Suspend the GPU irq. This should be called last in the suspend procedure,
471  * after all other blocks have been suspented.
472  */
473 void panthor_gpu_suspend(struct panthor_device *ptdev)
474 {
475 	/* On a fast reset, simply power down the L2. */
476 	if (!ptdev->reset.fast)
477 		panthor_gpu_soft_reset(ptdev);
478 	else
479 		panthor_gpu_power_off(ptdev, L2, 1, 20000);
480 
481 	panthor_gpu_irq_suspend(&ptdev->gpu->irq);
482 }
483 
484 /**
485  * panthor_gpu_resume() - Resume the GPU block.
486  * @ptdev: Device.
487  *
488  * Resume the IRQ handler and power-on the L2-cache.
489  * The FW takes care of powering the other blocks.
490  */
491 void panthor_gpu_resume(struct panthor_device *ptdev)
492 {
493 	panthor_gpu_irq_resume(&ptdev->gpu->irq, GPU_INTERRUPTS_MASK);
494 	panthor_gpu_l2_power_on(ptdev);
495 }
496 
497 /**
498  * panthor_gpu_read_64bit_counter() - Read a 64-bit counter at a given offset.
499  * @ptdev: Device.
500  * @reg: The offset of the register to read.
501  *
502  * Return: The counter value.
503  */
504 static u64
505 panthor_gpu_read_64bit_counter(struct panthor_device *ptdev, u32 reg)
506 {
507 	u32 hi, lo;
508 
509 	do {
510 		hi = gpu_read(ptdev, reg + 0x4);
511 		lo = gpu_read(ptdev, reg);
512 	} while (hi != gpu_read(ptdev, reg + 0x4));
513 
514 	return ((u64)hi << 32) | lo;
515 }
516 
517 /**
518  * panthor_gpu_read_timestamp() - Read the timestamp register.
519  * @ptdev: Device.
520  *
521  * Return: The GPU timestamp value.
522  */
523 u64 panthor_gpu_read_timestamp(struct panthor_device *ptdev)
524 {
525 	return panthor_gpu_read_64bit_counter(ptdev, GPU_TIMESTAMP_LO);
526 }
527 
528 /**
529  * panthor_gpu_read_timestamp_offset() - Read the timestamp offset register.
530  * @ptdev: Device.
531  *
532  * Return: The GPU timestamp offset value.
533  */
534 u64 panthor_gpu_read_timestamp_offset(struct panthor_device *ptdev)
535 {
536 	u32 hi, lo;
537 
538 	hi = gpu_read(ptdev, GPU_TIMESTAMP_OFFSET_HI);
539 	lo = gpu_read(ptdev, GPU_TIMESTAMP_OFFSET_LO);
540 
541 	return ((u64)hi << 32) | lo;
542 }
543