xref: /linux/drivers/gpu/drm/imagination/pvr_device.c (revision 69050f8d6d075dc01af7a5f2f550a8067510366f)
1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
2 /* Copyright (c) 2023 Imagination Technologies Ltd. */
3 
4 #include "pvr_device.h"
5 #include "pvr_device_info.h"
6 
7 #include "pvr_fw.h"
8 #include "pvr_power.h"
9 #include "pvr_queue.h"
10 #include "pvr_rogue_cr_defs.h"
11 #include "pvr_stream.h"
12 #include "pvr_vm.h"
13 
14 #include <drm/drm_print.h>
15 
16 #include <linux/bitfield.h>
17 #include <linux/clk.h>
18 #include <linux/compiler_attributes.h>
19 #include <linux/compiler_types.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/err.h>
22 #include <linux/firmware.h>
23 #include <linux/gfp.h>
24 #include <linux/interrupt.h>
25 #include <linux/of.h>
26 #include <linux/platform_device.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/reset.h>
29 #include <linux/slab.h>
30 #include <linux/stddef.h>
31 #include <linux/types.h>
32 #include <linux/workqueue.h>
33 
34 #include <kunit/visibility.h>
35 
36 /* Major number for the supported version of the firmware. */
37 #define PVR_FW_VERSION_MAJOR 1
38 
39 /**
40  * pvr_device_reg_init() - Initialize kernel access to a PowerVR device's
41  * control registers.
42  * @pvr_dev: Target PowerVR device.
43  *
44  * Sets struct pvr_device->regs.
45  *
46  * This method of mapping the device control registers into memory ensures that
47  * they are unmapped when the driver is detached (i.e. no explicit cleanup is
48  * required).
49  *
50  * Return:
51  *  * 0 on success, or
52  *  * Any error returned by devm_platform_get_and_ioremap_resource().
53  */
54 static int
55 pvr_device_reg_init(struct pvr_device *pvr_dev)
56 {
57 	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
58 	struct platform_device *plat_dev = to_platform_device(drm_dev->dev);
59 	struct resource *regs_resource;
60 	void __iomem *regs;
61 
62 	pvr_dev->regs_resource = NULL;
63 	pvr_dev->regs = NULL;
64 
65 	regs = devm_platform_get_and_ioremap_resource(plat_dev, 0, &regs_resource);
66 	if (IS_ERR(regs))
67 		return dev_err_probe(drm_dev->dev, PTR_ERR(regs),
68 				     "failed to ioremap gpu registers\n");
69 
70 	pvr_dev->regs = regs;
71 	pvr_dev->regs_resource = regs_resource;
72 
73 	return 0;
74 }
75 
76 /**
77  * pvr_device_clk_init() - Initialize clocks required by a PowerVR device
78  * @pvr_dev: Target PowerVR device.
79  *
80  * Sets struct pvr_device->core_clk, struct pvr_device->sys_clk and
81  * struct pvr_device->mem_clk.
82  *
83  * Three clocks are required by the PowerVR device: core, sys and mem. On
84  * return, this function guarantees that the clocks are in one of the following
85  * states:
86  *
87  *  * All successfully initialized,
88  *  * Core errored, sys and mem uninitialized,
89  *  * Core deinitialized, sys errored, mem uninitialized, or
90  *  * Core and sys deinitialized, mem errored.
91  *
92  * Return:
93  *  * 0 on success,
94  *  * Any error returned by devm_clk_get(), or
95  *  * Any error returned by devm_clk_get_optional().
96  */
97 static int pvr_device_clk_init(struct pvr_device *pvr_dev)
98 {
99 	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
100 	struct clk *core_clk;
101 	struct clk *sys_clk;
102 	struct clk *mem_clk;
103 
104 	core_clk = devm_clk_get(drm_dev->dev, "core");
105 	if (IS_ERR(core_clk))
106 		return dev_err_probe(drm_dev->dev, PTR_ERR(core_clk),
107 				     "failed to get core clock\n");
108 
109 	sys_clk = devm_clk_get_optional(drm_dev->dev, "sys");
110 	if (IS_ERR(sys_clk))
111 		return dev_err_probe(drm_dev->dev, PTR_ERR(sys_clk),
112 				     "failed to get sys clock\n");
113 
114 	mem_clk = devm_clk_get_optional(drm_dev->dev, "mem");
115 	if (IS_ERR(mem_clk))
116 		return dev_err_probe(drm_dev->dev, PTR_ERR(mem_clk),
117 				     "failed to get mem clock\n");
118 
119 	pvr_dev->core_clk = core_clk;
120 	pvr_dev->sys_clk = sys_clk;
121 	pvr_dev->mem_clk = mem_clk;
122 
123 	return 0;
124 }
125 
126 /**
127  * pvr_device_process_active_queues() - Process all queue related events.
128  * @pvr_dev: PowerVR device to check
129  *
130  * This is called any time we receive a FW event. It iterates over all
131  * active queues and calls pvr_queue_process() on them.
132  */
133 static void pvr_device_process_active_queues(struct pvr_device *pvr_dev)
134 {
135 	struct pvr_queue *queue, *tmp_queue;
136 	LIST_HEAD(active_queues);
137 
138 	mutex_lock(&pvr_dev->queues.lock);
139 
140 	/* Move all active queues to a temporary list. Queues that remain
141 	 * active after we're done processing them are re-inserted to
142 	 * the queues.active list by pvr_queue_process().
143 	 */
144 	list_splice_init(&pvr_dev->queues.active, &active_queues);
145 
146 	list_for_each_entry_safe(queue, tmp_queue, &active_queues, node)
147 		pvr_queue_process(queue);
148 
149 	mutex_unlock(&pvr_dev->queues.lock);
150 }
151 
152 static bool pvr_device_safety_irq_pending(struct pvr_device *pvr_dev)
153 {
154 	u32 events;
155 
156 	WARN_ON_ONCE(!pvr_dev->has_safety_events);
157 
158 	events = pvr_cr_read32(pvr_dev, ROGUE_CR_EVENT_STATUS);
159 
160 	return (events & ROGUE_CR_EVENT_STATUS_SAFETY_EN) != 0;
161 }
162 
163 static void pvr_device_safety_irq_clear(struct pvr_device *pvr_dev)
164 {
165 	WARN_ON_ONCE(!pvr_dev->has_safety_events);
166 
167 	pvr_cr_write32(pvr_dev, ROGUE_CR_EVENT_CLEAR,
168 		       ROGUE_CR_EVENT_CLEAR_SAFETY_EN);
169 }
170 
171 static void pvr_device_handle_safety_events(struct pvr_device *pvr_dev)
172 {
173 	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
174 	u32 events;
175 
176 	WARN_ON_ONCE(!pvr_dev->has_safety_events);
177 
178 	events = pvr_cr_read32(pvr_dev, ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE);
179 
180 	/* Handle only these events on the host and leave the rest to the FW. */
181 	events &= ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_FW_EN |
182 		ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__WATCHDOG_TIMEOUT_EN;
183 
184 	pvr_cr_write32(pvr_dev, ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE, events);
185 
186 	if (events & ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_FW_EN) {
187 		u32 fault_fw = pvr_cr_read32(pvr_dev, ROGUE_CR_FAULT_FW_STATUS);
188 
189 		pvr_cr_write32(pvr_dev, ROGUE_CR_FAULT_FW_CLEAR, fault_fw);
190 
191 		drm_info(drm_dev, "Safety event: FW fault (mask=0x%08x)\n", fault_fw);
192 	}
193 
194 	if (events & ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__WATCHDOG_TIMEOUT_EN) {
195 		/*
196 		 * The watchdog timer is disabled by the driver so this event
197 		 * should never be fired.
198 		 */
199 		drm_info(drm_dev, "Safety event: Watchdog timeout\n");
200 	}
201 }
202 
203 static irqreturn_t pvr_device_irq_thread_handler(int irq, void *data)
204 {
205 	struct pvr_device *pvr_dev = data;
206 	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
207 	irqreturn_t ret = IRQ_NONE;
208 
209 	/* We are in the threaded handler, we can keep dequeuing events until we
210 	 * don't see any. This should allow us to reduce the number of interrupts
211 	 * when the GPU is receiving a massive amount of short jobs.
212 	 */
213 	while (pvr_fw_irq_pending(pvr_dev)) {
214 		pvr_fw_irq_clear(pvr_dev);
215 
216 		if (pvr_dev->fw_dev.booted) {
217 			pvr_fwccb_process(pvr_dev);
218 			pvr_kccb_wake_up_waiters(pvr_dev);
219 			pvr_device_process_active_queues(pvr_dev);
220 		}
221 
222 		pm_runtime_mark_last_busy(drm_dev->dev);
223 
224 		ret = IRQ_HANDLED;
225 	}
226 
227 	if (pvr_dev->has_safety_events) {
228 		int err;
229 
230 		/*
231 		 * Ensure the GPU is powered on since some safety events (such
232 		 * as ECC faults) can happen outside of job submissions, which
233 		 * are otherwise the only time a power reference is held.
234 		 */
235 		err = pvr_power_get(pvr_dev);
236 		if (err) {
237 			drm_err_ratelimited(drm_dev,
238 					    "%s: could not take power reference (%d)\n",
239 					    __func__, err);
240 			return ret;
241 		}
242 
243 		while (pvr_device_safety_irq_pending(pvr_dev)) {
244 			pvr_device_safety_irq_clear(pvr_dev);
245 			pvr_device_handle_safety_events(pvr_dev);
246 
247 			ret = IRQ_HANDLED;
248 		}
249 
250 		pvr_power_put(pvr_dev);
251 	}
252 
253 	return ret;
254 }
255 
256 static irqreturn_t pvr_device_irq_handler(int irq, void *data)
257 {
258 	struct pvr_device *pvr_dev = data;
259 	bool safety_irq_pending = false;
260 
261 	if (pvr_dev->has_safety_events)
262 		safety_irq_pending = pvr_device_safety_irq_pending(pvr_dev);
263 
264 	if (!pvr_fw_irq_pending(pvr_dev) && !safety_irq_pending)
265 		return IRQ_NONE; /* Spurious IRQ - ignore. */
266 
267 	return IRQ_WAKE_THREAD;
268 }
269 
270 static void pvr_device_safety_irq_init(struct pvr_device *pvr_dev)
271 {
272 	u32 num_ecc_rams = 0;
273 
274 	/*
275 	 * Safety events are an optional feature of the RogueXE platform. They
276 	 * are only enabled if at least one of ECC memory or the watchdog timer
277 	 * are present in HW. While safety events can be generated by other
278 	 * systems, that will never happen if the above mentioned hardware is
279 	 * not present.
280 	 */
281 	if (!PVR_HAS_FEATURE(pvr_dev, roguexe)) {
282 		pvr_dev->has_safety_events = false;
283 		return;
284 	}
285 
286 	PVR_FEATURE_VALUE(pvr_dev, ecc_rams, &num_ecc_rams);
287 
288 	pvr_dev->has_safety_events =
289 		num_ecc_rams > 0 || PVR_HAS_FEATURE(pvr_dev, watchdog_timer);
290 }
291 
292 /**
293  * pvr_device_irq_init() - Initialise IRQ required by a PowerVR device
294  * @pvr_dev: Target PowerVR device.
295  *
296  * Returns:
297  *  * 0 on success,
298  *  * Any error returned by platform_get_irq_byname(), or
299  *  * Any error returned by request_irq().
300  */
301 static int
302 pvr_device_irq_init(struct pvr_device *pvr_dev)
303 {
304 	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
305 	struct platform_device *plat_dev = to_platform_device(drm_dev->dev);
306 
307 	init_waitqueue_head(&pvr_dev->kccb.rtn_q);
308 
309 	pvr_device_safety_irq_init(pvr_dev);
310 
311 	pvr_dev->irq = platform_get_irq(plat_dev, 0);
312 	if (pvr_dev->irq < 0)
313 		return pvr_dev->irq;
314 
315 	/* Clear any pending events before requesting the IRQ line. */
316 	pvr_fw_irq_clear(pvr_dev);
317 
318 	if (pvr_dev->has_safety_events)
319 		pvr_device_safety_irq_clear(pvr_dev);
320 
321 	/*
322 	 * The ONESHOT flag ensures IRQs are masked while the thread handler is
323 	 * running.
324 	 */
325 	return request_threaded_irq(pvr_dev->irq, pvr_device_irq_handler,
326 				    pvr_device_irq_thread_handler,
327 				    IRQF_SHARED | IRQF_ONESHOT, "gpu", pvr_dev);
328 }
329 
330 /**
331  * pvr_device_irq_fini() - Deinitialise IRQ required by a PowerVR device
332  * @pvr_dev: Target PowerVR device.
333  */
334 static void
335 pvr_device_irq_fini(struct pvr_device *pvr_dev)
336 {
337 	free_irq(pvr_dev->irq, pvr_dev);
338 }
339 
340 /**
341  * pvr_build_firmware_filename() - Construct a PowerVR firmware filename
342  * @pvr_dev: Target PowerVR device.
343  * @base: First part of the filename.
344  * @major: Major version number.
345  *
346  * A PowerVR firmware filename consists of three parts separated by underscores
347  * (``'_'``) along with a '.fw' file suffix. The first part is the exact value
348  * of @base, the second part is the hardware version string derived from @pvr_fw
349  * and the final part is the firmware version number constructed from @major with
350  * a 'v' prefix, e.g. powervr/rogue_4.40.2.51_v1.fw.
351  *
352  * The returned string will have been slab allocated and must be freed with
353  * kfree().
354  *
355  * Return:
356  *  * The constructed filename on success, or
357  *  * Any error returned by kasprintf().
358  */
359 static char *
360 pvr_build_firmware_filename(struct pvr_device *pvr_dev, const char *base,
361 			    u8 major)
362 {
363 	struct pvr_gpu_id *gpu_id = &pvr_dev->gpu_id;
364 
365 	return kasprintf(GFP_KERNEL, "%s_%d.%d.%d.%d_v%d.fw", base, gpu_id->b,
366 			 gpu_id->v, gpu_id->n, gpu_id->c, major);
367 }
368 
369 static void
370 pvr_release_firmware(void *data)
371 {
372 	struct pvr_device *pvr_dev = data;
373 
374 	release_firmware(pvr_dev->fw_dev.firmware);
375 }
376 
377 /**
378  * pvr_request_firmware() - Load firmware for a PowerVR device
379  * @pvr_dev: Target PowerVR device.
380  *
381  * See pvr_build_firmware_filename() for details on firmware file naming.
382  *
383  * Return:
384  *  * 0 on success,
385  *  * Any error returned by pvr_build_firmware_filename(), or
386  *  * Any error returned by request_firmware().
387  */
388 static int
389 pvr_request_firmware(struct pvr_device *pvr_dev)
390 {
391 	struct drm_device *drm_dev = &pvr_dev->base;
392 	char *filename;
393 	const struct firmware *fw;
394 	int err;
395 
396 	filename = pvr_build_firmware_filename(pvr_dev, "powervr/rogue",
397 					       PVR_FW_VERSION_MAJOR);
398 	if (!filename)
399 		return -ENOMEM;
400 
401 	/*
402 	 * This function takes a copy of &filename, meaning we can free our
403 	 * instance before returning.
404 	 */
405 	err = request_firmware(&fw, filename, pvr_dev->base.dev);
406 	if (err) {
407 		drm_err(drm_dev, "failed to load firmware %s (err=%d)\n",
408 			filename, err);
409 		goto err_free_filename;
410 	}
411 
412 	drm_info(drm_dev, "loaded firmware %s\n", filename);
413 	kfree(filename);
414 
415 	pvr_dev->fw_dev.firmware = fw;
416 
417 	return devm_add_action_or_reset(drm_dev->dev, pvr_release_firmware, pvr_dev);
418 
419 err_free_filename:
420 	kfree(filename);
421 
422 	return err;
423 }
424 
425 /**
426  * pvr_gpuid_decode_reg() - Decode the GPU ID from GPU register
427  *
428  * Sets the b, v, n, c fields of struct pvr_dev.gpu_id.
429  *
430  * @pvr_dev: Target PowerVR device.
431  * @gpu_id: Output to be updated with the GPU ID.
432  */
433 static void
434 pvr_gpuid_decode_reg(const struct pvr_device *pvr_dev, struct pvr_gpu_id *gpu_id)
435 {
436 	/*
437 	 * Try reading the BVNC using the newer (cleaner) method first. If the
438 	 * B value is zero, fall back to the older method.
439 	 */
440 	u64 bvnc = pvr_cr_read64(pvr_dev, ROGUE_CR_CORE_ID__PBVNC);
441 
442 	gpu_id->b = PVR_CR_FIELD_GET(bvnc, CORE_ID__PBVNC__BRANCH_ID);
443 	if (gpu_id->b != 0) {
444 		gpu_id->v = PVR_CR_FIELD_GET(bvnc, CORE_ID__PBVNC__VERSION_ID);
445 		gpu_id->n = PVR_CR_FIELD_GET(bvnc, CORE_ID__PBVNC__NUMBER_OF_SCALABLE_UNITS);
446 		gpu_id->c = PVR_CR_FIELD_GET(bvnc, CORE_ID__PBVNC__CONFIG_ID);
447 	} else {
448 		u32 core_rev = pvr_cr_read32(pvr_dev, ROGUE_CR_CORE_REVISION);
449 		u32 core_id = pvr_cr_read32(pvr_dev, ROGUE_CR_CORE_ID);
450 		u16 core_id_config = PVR_CR_FIELD_GET(core_id, CORE_ID_CONFIG);
451 
452 		gpu_id->b = PVR_CR_FIELD_GET(core_rev, CORE_REVISION_MAJOR);
453 		gpu_id->v = PVR_CR_FIELD_GET(core_rev, CORE_REVISION_MINOR);
454 		gpu_id->n = FIELD_GET(0xFF00, core_id_config);
455 		gpu_id->c = FIELD_GET(0x00FF, core_id_config);
456 	}
457 }
458 
459 /**
460  * pvr_gpuid_decode_string() - Decode the GPU ID from a module input string
461  *
462  * Sets the b, v, n, c fields of struct pvr_dev.gpu_id.
463  *
464  * @pvr_dev: Target PowerVR device.
465  * @param_bvnc: GPU ID (BVNC) module parameter.
466  * @gpu_id: Output to be updated with the GPU ID.
467  */
468 VISIBLE_IF_KUNIT int
469 pvr_gpuid_decode_string(const struct pvr_device *pvr_dev,
470 			const char *param_bvnc, struct pvr_gpu_id *gpu_id)
471 {
472 	const struct drm_device *drm_dev = &pvr_dev->base;
473 	char str_cpy[PVR_GPUID_STRING_MAX_LENGTH];
474 	char *pos, *tkn;
475 	int ret, idx = 0;
476 	u16 user_bvnc_u16[4];
477 	u8 dot_cnt = 0;
478 
479 	ret = strscpy(str_cpy, param_bvnc);
480 
481 	/*
482 	 * strscpy() should return at least a size 7 for the input to be valid.
483 	 * Returns -E2BIG for the case when the string is empty or too long.
484 	 */
485 	if (ret < PVR_GPUID_STRING_MIN_LENGTH) {
486 		drm_info(drm_dev,
487 			 "Invalid size of the input GPU ID (BVNC): %s",
488 			 str_cpy);
489 		return -EINVAL;
490 	}
491 
492 	while (*param_bvnc) {
493 		if (*param_bvnc == '.')
494 			dot_cnt++;
495 		param_bvnc++;
496 	}
497 
498 	if (dot_cnt != 3) {
499 		drm_info(drm_dev,
500 			 "Invalid format of the input GPU ID (BVNC): %s",
501 			 str_cpy);
502 		return -EINVAL;
503 	}
504 
505 	pos = str_cpy;
506 
507 	while ((tkn = strsep(&pos, ".")) != NULL && idx < 4) {
508 		/* kstrtou16() will also handle the case of consecutive dots */
509 		ret = kstrtou16(tkn, 10, &user_bvnc_u16[idx]);
510 		if (ret) {
511 			drm_info(drm_dev,
512 				 "Invalid format of the input GPU ID (BVNC): %s",
513 				 str_cpy);
514 			return -EINVAL;
515 		}
516 		idx++;
517 	}
518 
519 	gpu_id->b = user_bvnc_u16[0];
520 	gpu_id->v = user_bvnc_u16[1];
521 	gpu_id->n = user_bvnc_u16[2];
522 	gpu_id->c = user_bvnc_u16[3];
523 
524 	return 0;
525 }
526 EXPORT_SYMBOL_IF_KUNIT(pvr_gpuid_decode_string);
527 
528 static bool pvr_exp_hw_support;
529 module_param_named(exp_hw_support, pvr_exp_hw_support, bool, 0600);
530 MODULE_PARM_DESC(exp_hw_support, "Bypass runtime checks for fully supported GPU cores. WARNING: enabling this option may result in a buggy, insecure, or otherwise unusable driver.");
531 
532 /**
533  * enum pvr_gpu_support_level - The level of support for a gpu_id in the current
534  * version of the driver.
535  *
536  * @PVR_GPU_UNKNOWN: Cores that are unknown to the driver. These may not even exist.
537  * @PVR_GPU_EXPERIMENTAL: Cores that have experimental support.
538  * @PVR_GPU_SUPPORTED: Cores that are supported and maintained.
539  */
540 enum pvr_gpu_support_level {
541 	PVR_GPU_UNKNOWN,
542 	PVR_GPU_EXPERIMENTAL,
543 	PVR_GPU_SUPPORTED,
544 };
545 
546 static enum pvr_gpu_support_level
547 pvr_gpu_support_level(const struct pvr_gpu_id *gpu_id)
548 {
549 	switch (pvr_gpu_id_to_packed_bvnc(gpu_id)) {
550 	case PVR_PACKED_BVNC(33, 15, 11, 3):
551 	case PVR_PACKED_BVNC(36, 53, 104, 796):
552 		return PVR_GPU_SUPPORTED;
553 
554 	case PVR_PACKED_BVNC(36, 52, 104, 182):
555 		return PVR_GPU_EXPERIMENTAL;
556 
557 	default:
558 		return PVR_GPU_UNKNOWN;
559 	}
560 }
561 
562 static int
563 pvr_check_gpu_supported(struct pvr_device *pvr_dev,
564 			const struct pvr_gpu_id *gpu_id)
565 {
566 	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
567 
568 	switch (pvr_gpu_support_level(gpu_id)) {
569 	case PVR_GPU_SUPPORTED:
570 		if (pvr_exp_hw_support)
571 			drm_info(drm_dev, "Module parameter 'exp_hw_support' was set, but this hardware is fully supported by the current driver.");
572 
573 		break;
574 
575 	case PVR_GPU_EXPERIMENTAL:
576 		if (!pvr_exp_hw_support) {
577 			drm_err(drm_dev, "Unsupported GPU! Set 'exp_hw_support' to bypass this check.");
578 			return -ENODEV;
579 		}
580 
581 		drm_warn(drm_dev, "Running on unsupported hardware; you may encounter bugs!");
582 		break;
583 
584 	/* NOTE: This code path may indicate misbehaving hardware. */
585 	case PVR_GPU_UNKNOWN:
586 	default:
587 		if (!pvr_exp_hw_support) {
588 			drm_err(drm_dev, "Unknown GPU! Set 'exp_hw_support' to bypass this check.");
589 			return -ENODEV;
590 		}
591 
592 		drm_warn(drm_dev, "Running on unknown hardware; expect issues.");
593 		break;
594 	}
595 
596 	return 0;
597 }
598 
599 static char *pvr_gpuid_override;
600 module_param_named(gpuid, pvr_gpuid_override, charp, 0400);
601 MODULE_PARM_DESC(gpuid, "GPU ID (BVNC) to be used instead of the value read from hardware.");
602 
603 /**
604  * pvr_load_gpu_id() - Load a PowerVR device's GPU ID (BVNC) from control
605  * registers or input parameter. The input parameter is processed instead
606  * of the GPU register if provided.
607  *
608  * Sets the arch field of struct pvr_dev.gpu_id.
609  *
610  * @pvr_dev: Target PowerVR device.
611  */
612 static int
613 pvr_load_gpu_id(struct pvr_device *pvr_dev)
614 {
615 	struct pvr_gpu_id *gpu_id = &pvr_dev->gpu_id;
616 
617 	if (!pvr_gpuid_override || !pvr_gpuid_override[0]) {
618 		pvr_gpuid_decode_reg(pvr_dev, gpu_id);
619 	} else {
620 		drm_warn(from_pvr_device(pvr_dev),
621 			 "Using custom GPU ID (BVNC) provided by the user!");
622 
623 		int err = pvr_gpuid_decode_string(pvr_dev, pvr_gpuid_override,
624 						  gpu_id);
625 		if (err)
626 			return err;
627 	}
628 
629 	return pvr_check_gpu_supported(pvr_dev, gpu_id);
630 }
631 
632 /**
633  * pvr_set_dma_info() - Set PowerVR device DMA information
634  * @pvr_dev: Target PowerVR device.
635  *
636  * Sets the DMA mask and max segment size for the PowerVR device.
637  *
638  * Return:
639  *  * 0 on success,
640  *  * Any error returned by PVR_FEATURE_VALUE(), or
641  *  * Any error returned by dma_set_mask().
642  */
643 
644 static int
645 pvr_set_dma_info(struct pvr_device *pvr_dev)
646 {
647 	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
648 	u16 phys_bus_width;
649 	int err;
650 
651 	err = PVR_FEATURE_VALUE(pvr_dev, phys_bus_width, &phys_bus_width);
652 	if (err) {
653 		drm_err(drm_dev, "Failed to get device physical bus width\n");
654 		return err;
655 	}
656 
657 	err = dma_set_mask(drm_dev->dev, DMA_BIT_MASK(phys_bus_width));
658 	if (err) {
659 		drm_err(drm_dev, "Failed to set DMA mask (err=%d)\n", err);
660 		return err;
661 	}
662 
663 	dma_set_max_seg_size(drm_dev->dev, UINT_MAX);
664 
665 	return 0;
666 }
667 
668 /**
669  * pvr_device_gpu_init() - GPU-specific initialization for a PowerVR device
670  * @pvr_dev: Target PowerVR device.
671  *
672  * The following steps are taken to ensure the device is ready:
673  *
674  *  1. Read the hardware version information from control registers,
675  *  2. Initialise the hardware feature information,
676  *  3. Setup the device DMA information,
677  *  4. Setup the device-scoped memory context, and
678  *  5. Load firmware into the device.
679  *
680  * Return:
681  *  * 0 on success,
682  *  * -%ENODEV if the GPU is not supported,
683  *  * Any error returned by pvr_set_dma_info(),
684  *  * Any error returned by pvr_memory_context_init(), or
685  *  * Any error returned by pvr_request_firmware().
686  */
687 static int
688 pvr_device_gpu_init(struct pvr_device *pvr_dev)
689 {
690 	int err;
691 
692 	err = pvr_load_gpu_id(pvr_dev);
693 	if (err)
694 		return err;
695 
696 	err = pvr_request_firmware(pvr_dev);
697 	if (err)
698 		return err;
699 
700 	err = pvr_fw_validate_init_device_info(pvr_dev);
701 	if (err)
702 		return err;
703 
704 	if (PVR_HAS_FEATURE(pvr_dev, meta))
705 		pvr_dev->fw_dev.processor_type = PVR_FW_PROCESSOR_TYPE_META;
706 	else if (PVR_HAS_FEATURE(pvr_dev, mips))
707 		pvr_dev->fw_dev.processor_type = PVR_FW_PROCESSOR_TYPE_MIPS;
708 	else if (PVR_HAS_FEATURE(pvr_dev, riscv_fw_processor))
709 		pvr_dev->fw_dev.processor_type = PVR_FW_PROCESSOR_TYPE_RISCV;
710 	else
711 		return -EINVAL;
712 
713 	pvr_stream_create_musthave_masks(pvr_dev);
714 
715 	err = pvr_set_dma_info(pvr_dev);
716 	if (err)
717 		return err;
718 
719 	if (pvr_dev->fw_dev.processor_type != PVR_FW_PROCESSOR_TYPE_MIPS) {
720 		pvr_dev->kernel_vm_ctx = pvr_vm_create_context(pvr_dev, false);
721 		if (IS_ERR(pvr_dev->kernel_vm_ctx))
722 			return PTR_ERR(pvr_dev->kernel_vm_ctx);
723 	}
724 
725 	err = pvr_fw_init(pvr_dev);
726 	if (err)
727 		goto err_vm_ctx_put;
728 
729 	return 0;
730 
731 err_vm_ctx_put:
732 	if (pvr_dev->fw_dev.processor_type != PVR_FW_PROCESSOR_TYPE_MIPS) {
733 		pvr_vm_context_put(pvr_dev->kernel_vm_ctx);
734 		pvr_dev->kernel_vm_ctx = NULL;
735 	}
736 
737 	return err;
738 }
739 
740 /**
741  * pvr_device_gpu_fini() - GPU-specific deinitialization for a PowerVR device
742  * @pvr_dev: Target PowerVR device.
743  */
744 static void
745 pvr_device_gpu_fini(struct pvr_device *pvr_dev)
746 {
747 	pvr_fw_fini(pvr_dev);
748 
749 	if (pvr_dev->fw_dev.processor_type != PVR_FW_PROCESSOR_TYPE_MIPS) {
750 		WARN_ON(!pvr_vm_context_put(pvr_dev->kernel_vm_ctx));
751 		pvr_dev->kernel_vm_ctx = NULL;
752 	}
753 }
754 
755 /**
756  * pvr_device_init() - Initialize a PowerVR device
757  * @pvr_dev: Target PowerVR device.
758  *
759  * If this function returns successfully, the device will have been fully
760  * initialized. Otherwise, any parts of the device initialized before an error
761  * occurs will be de-initialized before returning.
762  *
763  * NOTE: The initialization steps currently taken are the bare minimum required
764  *       to read from the control registers. The device is unlikely to function
765  *       until further initialization steps are added. [This note should be
766  *       removed when that happens.]
767  *
768  * Return:
769  *  * 0 on success,
770  *  * Any error returned by pvr_device_reg_init(),
771  *  * Any error returned by pvr_device_clk_init(), or
772  *  * Any error returned by pvr_device_gpu_init().
773  */
774 int
775 pvr_device_init(struct pvr_device *pvr_dev)
776 {
777 	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
778 	struct device *dev = drm_dev->dev;
779 	int err;
780 
781 	/* Get the platform-specific data based on the compatible string. */
782 	pvr_dev->device_data = of_device_get_match_data(dev);
783 
784 	/* Enable and initialize clocks required for the device to operate. */
785 	err = pvr_device_clk_init(pvr_dev);
786 	if (err)
787 		return err;
788 
789 	err = pvr_dev->device_data->pwr_ops->init(pvr_dev);
790 	if (err)
791 		return err;
792 
793 	/* Explicitly power the GPU so we can access control registers before the FW is booted. */
794 	err = pm_runtime_resume_and_get(dev);
795 	if (err)
796 		return err;
797 
798 	/* Map the control registers into memory. */
799 	err = pvr_device_reg_init(pvr_dev);
800 	if (err)
801 		goto err_pm_runtime_put;
802 
803 	/* Perform GPU-specific initialization steps. */
804 	err = pvr_device_gpu_init(pvr_dev);
805 	if (err)
806 		goto err_pm_runtime_put;
807 
808 	err = pvr_device_irq_init(pvr_dev);
809 	if (err)
810 		goto err_device_gpu_fini;
811 
812 	pm_runtime_put(dev);
813 
814 	return 0;
815 
816 err_device_gpu_fini:
817 	pvr_device_gpu_fini(pvr_dev);
818 
819 err_pm_runtime_put:
820 	pm_runtime_put_sync_suspend(dev);
821 
822 	return err;
823 }
824 
825 /**
826  * pvr_device_fini() - Deinitialize a PowerVR device
827  * @pvr_dev: Target PowerVR device.
828  */
829 void
830 pvr_device_fini(struct pvr_device *pvr_dev)
831 {
832 	/*
833 	 * Deinitialization stages are performed in reverse order compared to
834 	 * the initialization stages in pvr_device_init().
835 	 */
836 	pvr_device_irq_fini(pvr_dev);
837 	pvr_device_gpu_fini(pvr_dev);
838 }
839 
840 bool
841 pvr_device_has_uapi_quirk(struct pvr_device *pvr_dev, u32 quirk)
842 {
843 	switch (quirk) {
844 	case 47217:
845 		return PVR_HAS_QUIRK(pvr_dev, 47217);
846 	case 48545:
847 		return PVR_HAS_QUIRK(pvr_dev, 48545);
848 	case 49927:
849 		return PVR_HAS_QUIRK(pvr_dev, 49927);
850 	case 51764:
851 		return PVR_HAS_QUIRK(pvr_dev, 51764);
852 	case 62269:
853 		return PVR_HAS_QUIRK(pvr_dev, 62269);
854 	default:
855 		return false;
856 	};
857 }
858 
859 bool
860 pvr_device_has_uapi_enhancement(struct pvr_device *pvr_dev, u32 enhancement)
861 {
862 	switch (enhancement) {
863 	case 35421:
864 		return PVR_HAS_ENHANCEMENT(pvr_dev, 35421);
865 	case 42064:
866 		return PVR_HAS_ENHANCEMENT(pvr_dev, 42064);
867 	default:
868 		return false;
869 	};
870 }
871 
872 /**
873  * pvr_device_has_feature() - Look up device feature based on feature definition
874  * @pvr_dev: Device pointer.
875  * @feature: Feature to look up. Should be one of %PVR_FEATURE_*.
876  *
877  * Returns:
878  *  * %true if feature is present on device, or
879  *  * %false if feature is not present on device.
880  */
881 bool
882 pvr_device_has_feature(struct pvr_device *pvr_dev, u32 feature)
883 {
884 	switch (feature) {
885 	case PVR_FEATURE_CLUSTER_GROUPING:
886 		return PVR_HAS_FEATURE(pvr_dev, cluster_grouping);
887 
888 	case PVR_FEATURE_COMPUTE_MORTON_CAPABLE:
889 		return PVR_HAS_FEATURE(pvr_dev, compute_morton_capable);
890 
891 	case PVR_FEATURE_FB_CDC_V4:
892 		return PVR_HAS_FEATURE(pvr_dev, fb_cdc_v4);
893 
894 	case PVR_FEATURE_GPU_MULTICORE_SUPPORT:
895 		return PVR_HAS_FEATURE(pvr_dev, gpu_multicore_support);
896 
897 	case PVR_FEATURE_ISP_ZLS_D24_S8_PACKING_OGL_MODE:
898 		return PVR_HAS_FEATURE(pvr_dev, isp_zls_d24_s8_packing_ogl_mode);
899 
900 	case PVR_FEATURE_S7_TOP_INFRASTRUCTURE:
901 		return PVR_HAS_FEATURE(pvr_dev, s7_top_infrastructure);
902 
903 	case PVR_FEATURE_TESSELLATION:
904 		return PVR_HAS_FEATURE(pvr_dev, tessellation);
905 
906 	case PVR_FEATURE_TPU_DM_GLOBAL_REGISTERS:
907 		return PVR_HAS_FEATURE(pvr_dev, tpu_dm_global_registers);
908 
909 	case PVR_FEATURE_VDM_DRAWINDIRECT:
910 		return PVR_HAS_FEATURE(pvr_dev, vdm_drawindirect);
911 
912 	case PVR_FEATURE_VDM_OBJECT_LEVEL_LLS:
913 		return PVR_HAS_FEATURE(pvr_dev, vdm_object_level_lls);
914 
915 	case PVR_FEATURE_ZLS_SUBTILE:
916 		return PVR_HAS_FEATURE(pvr_dev, zls_subtile);
917 
918 	/* Derived features. */
919 	case PVR_FEATURE_CDM_USER_MODE_QUEUE: {
920 		u8 cdm_control_stream_format = 0;
921 
922 		PVR_FEATURE_VALUE(pvr_dev, cdm_control_stream_format, &cdm_control_stream_format);
923 		return (cdm_control_stream_format >= 2 && cdm_control_stream_format <= 4);
924 	}
925 
926 	case PVR_FEATURE_REQUIRES_FB_CDC_ZLS_SETUP:
927 		if (PVR_HAS_FEATURE(pvr_dev, fbcdc_algorithm)) {
928 			u8 fbcdc_algorithm = 0;
929 
930 			PVR_FEATURE_VALUE(pvr_dev, fbcdc_algorithm, &fbcdc_algorithm);
931 			return (fbcdc_algorithm < 3 || PVR_HAS_FEATURE(pvr_dev, fb_cdc_v4));
932 		}
933 		return false;
934 
935 	default:
936 		WARN(true, "Looking up undefined feature %u\n", feature);
937 		return false;
938 	}
939 }
940