xref: /linux/drivers/gpu/drm/imagination/pvr_device.c (revision 9738280aae592b579a25b5b1b6584c894827d3c7)
1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
2 /* Copyright (c) 2023 Imagination Technologies Ltd. */
3 
4 #include "pvr_device.h"
5 #include "pvr_device_info.h"
6 
7 #include "pvr_fw.h"
8 #include "pvr_params.h"
9 #include "pvr_power.h"
10 #include "pvr_queue.h"
11 #include "pvr_rogue_cr_defs.h"
12 #include "pvr_stream.h"
13 #include "pvr_vm.h"
14 
15 #include <drm/drm_print.h>
16 
17 #include <linux/bitfield.h>
18 #include <linux/clk.h>
19 #include <linux/compiler_attributes.h>
20 #include <linux/compiler_types.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/err.h>
23 #include <linux/firmware.h>
24 #include <linux/gfp.h>
25 #include <linux/interrupt.h>
26 #include <linux/platform_device.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/reset.h>
29 #include <linux/slab.h>
30 #include <linux/stddef.h>
31 #include <linux/types.h>
32 #include <linux/workqueue.h>
33 
34 /* Major number for the supported version of the firmware. */
35 #define PVR_FW_VERSION_MAJOR 1
36 
37 /**
38  * pvr_device_reg_init() - Initialize kernel access to a PowerVR device's
39  * control registers.
40  * @pvr_dev: Target PowerVR device.
41  *
42  * Sets struct pvr_device->regs.
43  *
44  * This method of mapping the device control registers into memory ensures that
45  * they are unmapped when the driver is detached (i.e. no explicit cleanup is
46  * required).
47  *
48  * Return:
49  *  * 0 on success, or
50  *  * Any error returned by devm_platform_ioremap_resource().
51  */
52 static int
53 pvr_device_reg_init(struct pvr_device *pvr_dev)
54 {
55 	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
56 	struct platform_device *plat_dev = to_platform_device(drm_dev->dev);
57 	struct resource *regs_resource;
58 	void __iomem *regs;
59 
60 	pvr_dev->regs_resource = NULL;
61 	pvr_dev->regs = NULL;
62 
63 	regs = devm_platform_get_and_ioremap_resource(plat_dev, 0, &regs_resource);
64 	if (IS_ERR(regs))
65 		return dev_err_probe(drm_dev->dev, PTR_ERR(regs),
66 				     "failed to ioremap gpu registers\n");
67 
68 	pvr_dev->regs = regs;
69 	pvr_dev->regs_resource = regs_resource;
70 
71 	return 0;
72 }
73 
74 /**
75  * pvr_device_clk_init() - Initialize clocks required by a PowerVR device
76  * @pvr_dev: Target PowerVR device.
77  *
78  * Sets struct pvr_device->core_clk, struct pvr_device->sys_clk and
79  * struct pvr_device->mem_clk.
80  *
81  * Three clocks are required by the PowerVR device: core, sys and mem. On
82  * return, this function guarantees that the clocks are in one of the following
83  * states:
84  *
85  *  * All successfully initialized,
86  *  * Core errored, sys and mem uninitialized,
87  *  * Core deinitialized, sys errored, mem uninitialized, or
88  *  * Core and sys deinitialized, mem errored.
89  *
90  * Return:
91  *  * 0 on success,
92  *  * Any error returned by devm_clk_get(), or
93  *  * Any error returned by devm_clk_get_optional().
94  */
95 static int pvr_device_clk_init(struct pvr_device *pvr_dev)
96 {
97 	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
98 	struct clk *core_clk;
99 	struct clk *sys_clk;
100 	struct clk *mem_clk;
101 
102 	core_clk = devm_clk_get(drm_dev->dev, "core");
103 	if (IS_ERR(core_clk))
104 		return dev_err_probe(drm_dev->dev, PTR_ERR(core_clk),
105 				     "failed to get core clock\n");
106 
107 	sys_clk = devm_clk_get_optional(drm_dev->dev, "sys");
108 	if (IS_ERR(sys_clk))
109 		return dev_err_probe(drm_dev->dev, PTR_ERR(sys_clk),
110 				     "failed to get sys clock\n");
111 
112 	mem_clk = devm_clk_get_optional(drm_dev->dev, "mem");
113 	if (IS_ERR(mem_clk))
114 		return dev_err_probe(drm_dev->dev, PTR_ERR(mem_clk),
115 				     "failed to get mem clock\n");
116 
117 	pvr_dev->core_clk = core_clk;
118 	pvr_dev->sys_clk = sys_clk;
119 	pvr_dev->mem_clk = mem_clk;
120 
121 	return 0;
122 }
123 
124 static int pvr_device_reset_init(struct pvr_device *pvr_dev)
125 {
126 	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
127 	struct reset_control *reset;
128 
129 	reset = devm_reset_control_get_optional_exclusive(drm_dev->dev, NULL);
130 	if (IS_ERR(reset))
131 		return dev_err_probe(drm_dev->dev, PTR_ERR(reset),
132 				     "failed to get gpu reset line\n");
133 
134 	pvr_dev->reset = reset;
135 
136 	return 0;
137 }
138 
139 /**
140  * pvr_device_process_active_queues() - Process all queue related events.
141  * @pvr_dev: PowerVR device to check
142  *
143  * This is called any time we receive a FW event. It iterates over all
144  * active queues and calls pvr_queue_process() on them.
145  */
146 static void pvr_device_process_active_queues(struct pvr_device *pvr_dev)
147 {
148 	struct pvr_queue *queue, *tmp_queue;
149 	LIST_HEAD(active_queues);
150 
151 	mutex_lock(&pvr_dev->queues.lock);
152 
153 	/* Move all active queues to a temporary list. Queues that remain
154 	 * active after we're done processing them are re-inserted to
155 	 * the queues.active list by pvr_queue_process().
156 	 */
157 	list_splice_init(&pvr_dev->queues.active, &active_queues);
158 
159 	list_for_each_entry_safe(queue, tmp_queue, &active_queues, node)
160 		pvr_queue_process(queue);
161 
162 	mutex_unlock(&pvr_dev->queues.lock);
163 }
164 
165 static bool pvr_device_safety_irq_pending(struct pvr_device *pvr_dev)
166 {
167 	u32 events;
168 
169 	WARN_ON_ONCE(!pvr_dev->has_safety_events);
170 
171 	events = pvr_cr_read32(pvr_dev, ROGUE_CR_EVENT_STATUS);
172 
173 	return (events & ROGUE_CR_EVENT_STATUS_SAFETY_EN) != 0;
174 }
175 
176 static void pvr_device_safety_irq_clear(struct pvr_device *pvr_dev)
177 {
178 	WARN_ON_ONCE(!pvr_dev->has_safety_events);
179 
180 	pvr_cr_write32(pvr_dev, ROGUE_CR_EVENT_CLEAR,
181 		       ROGUE_CR_EVENT_CLEAR_SAFETY_EN);
182 }
183 
184 static void pvr_device_handle_safety_events(struct pvr_device *pvr_dev)
185 {
186 	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
187 	u32 events;
188 
189 	WARN_ON_ONCE(!pvr_dev->has_safety_events);
190 
191 	events = pvr_cr_read32(pvr_dev, ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE);
192 
193 	/* Handle only these events on the host and leave the rest to the FW. */
194 	events &= ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_FW_EN |
195 		ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__WATCHDOG_TIMEOUT_EN;
196 
197 	pvr_cr_write32(pvr_dev, ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE, events);
198 
199 	if (events & ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_FW_EN) {
200 		u32 fault_fw = pvr_cr_read32(pvr_dev, ROGUE_CR_FAULT_FW_STATUS);
201 
202 		pvr_cr_write32(pvr_dev, ROGUE_CR_FAULT_FW_CLEAR, fault_fw);
203 
204 		drm_info(drm_dev, "Safety event: FW fault (mask=0x%08x)\n", fault_fw);
205 	}
206 
207 	if (events & ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__WATCHDOG_TIMEOUT_EN) {
208 		/*
209 		 * The watchdog timer is disabled by the driver so this event
210 		 * should never be fired.
211 		 */
212 		drm_info(drm_dev, "Safety event: Watchdog timeout\n");
213 	}
214 }
215 
216 static irqreturn_t pvr_device_irq_thread_handler(int irq, void *data)
217 {
218 	struct pvr_device *pvr_dev = data;
219 	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
220 	irqreturn_t ret = IRQ_NONE;
221 
222 	/* We are in the threaded handler, we can keep dequeuing events until we
223 	 * don't see any. This should allow us to reduce the number of interrupts
224 	 * when the GPU is receiving a massive amount of short jobs.
225 	 */
226 	while (pvr_fw_irq_pending(pvr_dev)) {
227 		pvr_fw_irq_clear(pvr_dev);
228 
229 		if (pvr_dev->fw_dev.booted) {
230 			pvr_fwccb_process(pvr_dev);
231 			pvr_kccb_wake_up_waiters(pvr_dev);
232 			pvr_device_process_active_queues(pvr_dev);
233 		}
234 
235 		pm_runtime_mark_last_busy(drm_dev->dev);
236 
237 		ret = IRQ_HANDLED;
238 	}
239 
240 	if (pvr_dev->has_safety_events) {
241 		int err;
242 
243 		/*
244 		 * Ensure the GPU is powered on since some safety events (such
245 		 * as ECC faults) can happen outside of job submissions, which
246 		 * are otherwise the only time a power reference is held.
247 		 */
248 		err = pvr_power_get(pvr_dev);
249 		if (err) {
250 			drm_err_ratelimited(drm_dev,
251 					    "%s: could not take power reference (%d)\n",
252 					    __func__, err);
253 			return ret;
254 		}
255 
256 		while (pvr_device_safety_irq_pending(pvr_dev)) {
257 			pvr_device_safety_irq_clear(pvr_dev);
258 			pvr_device_handle_safety_events(pvr_dev);
259 
260 			ret = IRQ_HANDLED;
261 		}
262 
263 		pvr_power_put(pvr_dev);
264 	}
265 
266 	return ret;
267 }
268 
269 static irqreturn_t pvr_device_irq_handler(int irq, void *data)
270 {
271 	struct pvr_device *pvr_dev = data;
272 	bool safety_irq_pending = false;
273 
274 	if (pvr_dev->has_safety_events)
275 		safety_irq_pending = pvr_device_safety_irq_pending(pvr_dev);
276 
277 	if (!pvr_fw_irq_pending(pvr_dev) && !safety_irq_pending)
278 		return IRQ_NONE; /* Spurious IRQ - ignore. */
279 
280 	return IRQ_WAKE_THREAD;
281 }
282 
283 static void pvr_device_safety_irq_init(struct pvr_device *pvr_dev)
284 {
285 	u32 num_ecc_rams = 0;
286 
287 	/*
288 	 * Safety events are an optional feature of the RogueXE platform. They
289 	 * are only enabled if at least one of ECC memory or the watchdog timer
290 	 * are present in HW. While safety events can be generated by other
291 	 * systems, that will never happen if the above mentioned hardware is
292 	 * not present.
293 	 */
294 	if (!PVR_HAS_FEATURE(pvr_dev, roguexe)) {
295 		pvr_dev->has_safety_events = false;
296 		return;
297 	}
298 
299 	PVR_FEATURE_VALUE(pvr_dev, ecc_rams, &num_ecc_rams);
300 
301 	pvr_dev->has_safety_events =
302 		num_ecc_rams > 0 || PVR_HAS_FEATURE(pvr_dev, watchdog_timer);
303 }
304 
305 /**
306  * pvr_device_irq_init() - Initialise IRQ required by a PowerVR device
307  * @pvr_dev: Target PowerVR device.
308  *
309  * Returns:
310  *  * 0 on success,
311  *  * Any error returned by platform_get_irq_byname(), or
312  *  * Any error returned by request_irq().
313  */
314 static int
315 pvr_device_irq_init(struct pvr_device *pvr_dev)
316 {
317 	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
318 	struct platform_device *plat_dev = to_platform_device(drm_dev->dev);
319 
320 	init_waitqueue_head(&pvr_dev->kccb.rtn_q);
321 
322 	pvr_device_safety_irq_init(pvr_dev);
323 
324 	pvr_dev->irq = platform_get_irq(plat_dev, 0);
325 	if (pvr_dev->irq < 0)
326 		return pvr_dev->irq;
327 
328 	/* Clear any pending events before requesting the IRQ line. */
329 	pvr_fw_irq_clear(pvr_dev);
330 
331 	if (pvr_dev->has_safety_events)
332 		pvr_device_safety_irq_clear(pvr_dev);
333 
334 	/*
335 	 * The ONESHOT flag ensures IRQs are masked while the thread handler is
336 	 * running.
337 	 */
338 	return request_threaded_irq(pvr_dev->irq, pvr_device_irq_handler,
339 				    pvr_device_irq_thread_handler,
340 				    IRQF_SHARED | IRQF_ONESHOT, "gpu", pvr_dev);
341 }
342 
343 /**
344  * pvr_device_irq_fini() - Deinitialise IRQ required by a PowerVR device
345  * @pvr_dev: Target PowerVR device.
346  */
347 static void
348 pvr_device_irq_fini(struct pvr_device *pvr_dev)
349 {
350 	free_irq(pvr_dev->irq, pvr_dev);
351 }
352 
353 /**
354  * pvr_build_firmware_filename() - Construct a PowerVR firmware filename
355  * @pvr_dev: Target PowerVR device.
356  * @base: First part of the filename.
357  * @major: Major version number.
358  *
359  * A PowerVR firmware filename consists of three parts separated by underscores
360  * (``'_'``) along with a '.fw' file suffix. The first part is the exact value
361  * of @base, the second part is the hardware version string derived from @pvr_fw
362  * and the final part is the firmware version number constructed from @major with
363  * a 'v' prefix, e.g. powervr/rogue_4.40.2.51_v1.fw.
364  *
365  * The returned string will have been slab allocated and must be freed with
366  * kfree().
367  *
368  * Return:
369  *  * The constructed filename on success, or
370  *  * Any error returned by kasprintf().
371  */
372 static char *
373 pvr_build_firmware_filename(struct pvr_device *pvr_dev, const char *base,
374 			    u8 major)
375 {
376 	struct pvr_gpu_id *gpu_id = &pvr_dev->gpu_id;
377 
378 	return kasprintf(GFP_KERNEL, "%s_%d.%d.%d.%d_v%d.fw", base, gpu_id->b,
379 			 gpu_id->v, gpu_id->n, gpu_id->c, major);
380 }
381 
382 static void
383 pvr_release_firmware(void *data)
384 {
385 	struct pvr_device *pvr_dev = data;
386 
387 	release_firmware(pvr_dev->fw_dev.firmware);
388 }
389 
390 /**
391  * pvr_request_firmware() - Load firmware for a PowerVR device
392  * @pvr_dev: Target PowerVR device.
393  *
394  * See pvr_build_firmware_filename() for details on firmware file naming.
395  *
396  * Return:
397  *  * 0 on success,
398  *  * Any error returned by pvr_build_firmware_filename(), or
399  *  * Any error returned by request_firmware().
400  */
401 static int
402 pvr_request_firmware(struct pvr_device *pvr_dev)
403 {
404 	struct drm_device *drm_dev = &pvr_dev->base;
405 	char *filename;
406 	const struct firmware *fw;
407 	int err;
408 
409 	filename = pvr_build_firmware_filename(pvr_dev, "powervr/rogue",
410 					       PVR_FW_VERSION_MAJOR);
411 	if (!filename)
412 		return -ENOMEM;
413 
414 	/*
415 	 * This function takes a copy of &filename, meaning we can free our
416 	 * instance before returning.
417 	 */
418 	err = request_firmware(&fw, filename, pvr_dev->base.dev);
419 	if (err) {
420 		drm_err(drm_dev, "failed to load firmware %s (err=%d)\n",
421 			filename, err);
422 		goto err_free_filename;
423 	}
424 
425 	drm_info(drm_dev, "loaded firmware %s\n", filename);
426 	kfree(filename);
427 
428 	pvr_dev->fw_dev.firmware = fw;
429 
430 	return devm_add_action_or_reset(drm_dev->dev, pvr_release_firmware, pvr_dev);
431 
432 err_free_filename:
433 	kfree(filename);
434 
435 	return err;
436 }
437 
438 /**
439  * pvr_load_gpu_id() - Load a PowerVR device's GPU ID (BVNC) from control registers.
440  *
441  * Sets struct pvr_dev.gpu_id.
442  *
443  * @pvr_dev: Target PowerVR device.
444  */
445 static void
446 pvr_load_gpu_id(struct pvr_device *pvr_dev)
447 {
448 	struct pvr_gpu_id *gpu_id = &pvr_dev->gpu_id;
449 	u64 bvnc;
450 
451 	/*
452 	 * Try reading the BVNC using the newer (cleaner) method first. If the
453 	 * B value is zero, fall back to the older method.
454 	 */
455 	bvnc = pvr_cr_read64(pvr_dev, ROGUE_CR_CORE_ID__PBVNC);
456 
457 	gpu_id->b = PVR_CR_FIELD_GET(bvnc, CORE_ID__PBVNC__BRANCH_ID);
458 	if (gpu_id->b != 0) {
459 		gpu_id->v = PVR_CR_FIELD_GET(bvnc, CORE_ID__PBVNC__VERSION_ID);
460 		gpu_id->n = PVR_CR_FIELD_GET(bvnc, CORE_ID__PBVNC__NUMBER_OF_SCALABLE_UNITS);
461 		gpu_id->c = PVR_CR_FIELD_GET(bvnc, CORE_ID__PBVNC__CONFIG_ID);
462 	} else {
463 		u32 core_rev = pvr_cr_read32(pvr_dev, ROGUE_CR_CORE_REVISION);
464 		u32 core_id = pvr_cr_read32(pvr_dev, ROGUE_CR_CORE_ID);
465 		u16 core_id_config = PVR_CR_FIELD_GET(core_id, CORE_ID_CONFIG);
466 
467 		gpu_id->b = PVR_CR_FIELD_GET(core_rev, CORE_REVISION_MAJOR);
468 		gpu_id->v = PVR_CR_FIELD_GET(core_rev, CORE_REVISION_MINOR);
469 		gpu_id->n = FIELD_GET(0xFF00, core_id_config);
470 		gpu_id->c = FIELD_GET(0x00FF, core_id_config);
471 	}
472 }
473 
474 /**
475  * pvr_set_dma_info() - Set PowerVR device DMA information
476  * @pvr_dev: Target PowerVR device.
477  *
478  * Sets the DMA mask and max segment size for the PowerVR device.
479  *
480  * Return:
481  *  * 0 on success,
482  *  * Any error returned by PVR_FEATURE_VALUE(), or
483  *  * Any error returned by dma_set_mask().
484  */
485 
486 static int
487 pvr_set_dma_info(struct pvr_device *pvr_dev)
488 {
489 	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
490 	u16 phys_bus_width;
491 	int err;
492 
493 	err = PVR_FEATURE_VALUE(pvr_dev, phys_bus_width, &phys_bus_width);
494 	if (err) {
495 		drm_err(drm_dev, "Failed to get device physical bus width\n");
496 		return err;
497 	}
498 
499 	err = dma_set_mask(drm_dev->dev, DMA_BIT_MASK(phys_bus_width));
500 	if (err) {
501 		drm_err(drm_dev, "Failed to set DMA mask (err=%d)\n", err);
502 		return err;
503 	}
504 
505 	dma_set_max_seg_size(drm_dev->dev, UINT_MAX);
506 
507 	return 0;
508 }
509 
510 /**
511  * pvr_device_gpu_init() - GPU-specific initialization for a PowerVR device
512  * @pvr_dev: Target PowerVR device.
513  *
514  * The following steps are taken to ensure the device is ready:
515  *
516  *  1. Read the hardware version information from control registers,
517  *  2. Initialise the hardware feature information,
518  *  3. Setup the device DMA information,
519  *  4. Setup the device-scoped memory context, and
520  *  5. Load firmware into the device.
521  *
522  * Return:
523  *  * 0 on success,
524  *  * -%ENODEV if the GPU is not supported,
525  *  * Any error returned by pvr_set_dma_info(),
526  *  * Any error returned by pvr_memory_context_init(), or
527  *  * Any error returned by pvr_request_firmware().
528  */
529 static int
530 pvr_device_gpu_init(struct pvr_device *pvr_dev)
531 {
532 	int err;
533 
534 	pvr_load_gpu_id(pvr_dev);
535 
536 	err = pvr_request_firmware(pvr_dev);
537 	if (err)
538 		return err;
539 
540 	err = pvr_fw_validate_init_device_info(pvr_dev);
541 	if (err)
542 		return err;
543 
544 	if (PVR_HAS_FEATURE(pvr_dev, meta))
545 		pvr_dev->fw_dev.processor_type = PVR_FW_PROCESSOR_TYPE_META;
546 	else if (PVR_HAS_FEATURE(pvr_dev, mips))
547 		pvr_dev->fw_dev.processor_type = PVR_FW_PROCESSOR_TYPE_MIPS;
548 	else if (PVR_HAS_FEATURE(pvr_dev, riscv_fw_processor))
549 		pvr_dev->fw_dev.processor_type = PVR_FW_PROCESSOR_TYPE_RISCV;
550 	else
551 		return -EINVAL;
552 
553 	pvr_stream_create_musthave_masks(pvr_dev);
554 
555 	err = pvr_set_dma_info(pvr_dev);
556 	if (err)
557 		return err;
558 
559 	if (pvr_dev->fw_dev.processor_type != PVR_FW_PROCESSOR_TYPE_MIPS) {
560 		pvr_dev->kernel_vm_ctx = pvr_vm_create_context(pvr_dev, false);
561 		if (IS_ERR(pvr_dev->kernel_vm_ctx))
562 			return PTR_ERR(pvr_dev->kernel_vm_ctx);
563 	}
564 
565 	err = pvr_fw_init(pvr_dev);
566 	if (err)
567 		goto err_vm_ctx_put;
568 
569 	return 0;
570 
571 err_vm_ctx_put:
572 	if (pvr_dev->fw_dev.processor_type != PVR_FW_PROCESSOR_TYPE_MIPS) {
573 		pvr_vm_context_put(pvr_dev->kernel_vm_ctx);
574 		pvr_dev->kernel_vm_ctx = NULL;
575 	}
576 
577 	return err;
578 }
579 
580 /**
581  * pvr_device_gpu_fini() - GPU-specific deinitialization for a PowerVR device
582  * @pvr_dev: Target PowerVR device.
583  */
584 static void
585 pvr_device_gpu_fini(struct pvr_device *pvr_dev)
586 {
587 	pvr_fw_fini(pvr_dev);
588 
589 	if (pvr_dev->fw_dev.processor_type != PVR_FW_PROCESSOR_TYPE_MIPS) {
590 		WARN_ON(!pvr_vm_context_put(pvr_dev->kernel_vm_ctx));
591 		pvr_dev->kernel_vm_ctx = NULL;
592 	}
593 }
594 
595 /**
596  * pvr_device_init() - Initialize a PowerVR device
597  * @pvr_dev: Target PowerVR device.
598  *
599  * If this function returns successfully, the device will have been fully
600  * initialized. Otherwise, any parts of the device initialized before an error
601  * occurs will be de-initialized before returning.
602  *
603  * NOTE: The initialization steps currently taken are the bare minimum required
604  *       to read from the control registers. The device is unlikely to function
605  *       until further initialization steps are added. [This note should be
606  *       removed when that happens.]
607  *
608  * Return:
609  *  * 0 on success,
610  *  * Any error returned by pvr_device_reg_init(),
611  *  * Any error returned by pvr_device_clk_init(), or
612  *  * Any error returned by pvr_device_gpu_init().
613  */
614 int
615 pvr_device_init(struct pvr_device *pvr_dev)
616 {
617 	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
618 	struct device *dev = drm_dev->dev;
619 	int err;
620 
621 	/*
622 	 * Setup device parameters. We do this first in case other steps
623 	 * depend on them.
624 	 */
625 	err = pvr_device_params_init(&pvr_dev->params);
626 	if (err)
627 		return err;
628 
629 	/* Enable and initialize clocks required for the device to operate. */
630 	err = pvr_device_clk_init(pvr_dev);
631 	if (err)
632 		return err;
633 
634 	/* Get the reset line for the GPU */
635 	err = pvr_device_reset_init(pvr_dev);
636 	if (err)
637 		return err;
638 
639 	/* Explicitly power the GPU so we can access control registers before the FW is booted. */
640 	err = pm_runtime_resume_and_get(dev);
641 	if (err)
642 		return err;
643 
644 	/* Map the control registers into memory. */
645 	err = pvr_device_reg_init(pvr_dev);
646 	if (err)
647 		goto err_pm_runtime_put;
648 
649 	/* Perform GPU-specific initialization steps. */
650 	err = pvr_device_gpu_init(pvr_dev);
651 	if (err)
652 		goto err_pm_runtime_put;
653 
654 	err = pvr_device_irq_init(pvr_dev);
655 	if (err)
656 		goto err_device_gpu_fini;
657 
658 	pm_runtime_put(dev);
659 
660 	return 0;
661 
662 err_device_gpu_fini:
663 	pvr_device_gpu_fini(pvr_dev);
664 
665 err_pm_runtime_put:
666 	pm_runtime_put_sync_suspend(dev);
667 
668 	return err;
669 }
670 
671 /**
672  * pvr_device_fini() - Deinitialize a PowerVR device
673  * @pvr_dev: Target PowerVR device.
674  */
675 void
676 pvr_device_fini(struct pvr_device *pvr_dev)
677 {
678 	/*
679 	 * Deinitialization stages are performed in reverse order compared to
680 	 * the initialization stages in pvr_device_init().
681 	 */
682 	pvr_device_irq_fini(pvr_dev);
683 	pvr_device_gpu_fini(pvr_dev);
684 }
685 
686 bool
687 pvr_device_has_uapi_quirk(struct pvr_device *pvr_dev, u32 quirk)
688 {
689 	switch (quirk) {
690 	case 47217:
691 		return PVR_HAS_QUIRK(pvr_dev, 47217);
692 	case 48545:
693 		return PVR_HAS_QUIRK(pvr_dev, 48545);
694 	case 49927:
695 		return PVR_HAS_QUIRK(pvr_dev, 49927);
696 	case 51764:
697 		return PVR_HAS_QUIRK(pvr_dev, 51764);
698 	case 62269:
699 		return PVR_HAS_QUIRK(pvr_dev, 62269);
700 	default:
701 		return false;
702 	};
703 }
704 
705 bool
706 pvr_device_has_uapi_enhancement(struct pvr_device *pvr_dev, u32 enhancement)
707 {
708 	switch (enhancement) {
709 	case 35421:
710 		return PVR_HAS_ENHANCEMENT(pvr_dev, 35421);
711 	case 42064:
712 		return PVR_HAS_ENHANCEMENT(pvr_dev, 42064);
713 	default:
714 		return false;
715 	};
716 }
717 
718 /**
719  * pvr_device_has_feature() - Look up device feature based on feature definition
720  * @pvr_dev: Device pointer.
721  * @feature: Feature to look up. Should be one of %PVR_FEATURE_*.
722  *
723  * Returns:
724  *  * %true if feature is present on device, or
725  *  * %false if feature is not present on device.
726  */
727 bool
728 pvr_device_has_feature(struct pvr_device *pvr_dev, u32 feature)
729 {
730 	switch (feature) {
731 	case PVR_FEATURE_CLUSTER_GROUPING:
732 		return PVR_HAS_FEATURE(pvr_dev, cluster_grouping);
733 
734 	case PVR_FEATURE_COMPUTE_MORTON_CAPABLE:
735 		return PVR_HAS_FEATURE(pvr_dev, compute_morton_capable);
736 
737 	case PVR_FEATURE_FB_CDC_V4:
738 		return PVR_HAS_FEATURE(pvr_dev, fb_cdc_v4);
739 
740 	case PVR_FEATURE_GPU_MULTICORE_SUPPORT:
741 		return PVR_HAS_FEATURE(pvr_dev, gpu_multicore_support);
742 
743 	case PVR_FEATURE_ISP_ZLS_D24_S8_PACKING_OGL_MODE:
744 		return PVR_HAS_FEATURE(pvr_dev, isp_zls_d24_s8_packing_ogl_mode);
745 
746 	case PVR_FEATURE_S7_TOP_INFRASTRUCTURE:
747 		return PVR_HAS_FEATURE(pvr_dev, s7_top_infrastructure);
748 
749 	case PVR_FEATURE_TESSELLATION:
750 		return PVR_HAS_FEATURE(pvr_dev, tessellation);
751 
752 	case PVR_FEATURE_TPU_DM_GLOBAL_REGISTERS:
753 		return PVR_HAS_FEATURE(pvr_dev, tpu_dm_global_registers);
754 
755 	case PVR_FEATURE_VDM_DRAWINDIRECT:
756 		return PVR_HAS_FEATURE(pvr_dev, vdm_drawindirect);
757 
758 	case PVR_FEATURE_VDM_OBJECT_LEVEL_LLS:
759 		return PVR_HAS_FEATURE(pvr_dev, vdm_object_level_lls);
760 
761 	case PVR_FEATURE_ZLS_SUBTILE:
762 		return PVR_HAS_FEATURE(pvr_dev, zls_subtile);
763 
764 	/* Derived features. */
765 	case PVR_FEATURE_CDM_USER_MODE_QUEUE: {
766 		u8 cdm_control_stream_format = 0;
767 
768 		PVR_FEATURE_VALUE(pvr_dev, cdm_control_stream_format, &cdm_control_stream_format);
769 		return (cdm_control_stream_format >= 2 && cdm_control_stream_format <= 4);
770 	}
771 
772 	case PVR_FEATURE_REQUIRES_FB_CDC_ZLS_SETUP:
773 		if (PVR_HAS_FEATURE(pvr_dev, fbcdc_algorithm)) {
774 			u8 fbcdc_algorithm = 0;
775 
776 			PVR_FEATURE_VALUE(pvr_dev, fbcdc_algorithm, &fbcdc_algorithm);
777 			return (fbcdc_algorithm < 3 || PVR_HAS_FEATURE(pvr_dev, fb_cdc_v4));
778 		}
779 		return false;
780 
781 	default:
782 		WARN(true, "Looking up undefined feature %u\n", feature);
783 		return false;
784 	}
785 }
786