xref: /linux/drivers/gpu/drm/imagination/pvr_device.c (revision fd073dffef041d6a2d11f00cd6cbd8ff46083396)
1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
2 /* Copyright (c) 2023 Imagination Technologies Ltd. */
3 
4 #include "pvr_device.h"
5 #include "pvr_device_info.h"
6 
7 #include "pvr_fw.h"
8 #include "pvr_params.h"
9 #include "pvr_power.h"
10 #include "pvr_queue.h"
11 #include "pvr_rogue_cr_defs.h"
12 #include "pvr_stream.h"
13 #include "pvr_vm.h"
14 
15 #include <drm/drm_print.h>
16 
17 #include <linux/bitfield.h>
18 #include <linux/clk.h>
19 #include <linux/compiler_attributes.h>
20 #include <linux/compiler_types.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/err.h>
23 #include <linux/firmware.h>
24 #include <linux/gfp.h>
25 #include <linux/interrupt.h>
26 #include <linux/platform_device.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/slab.h>
29 #include <linux/stddef.h>
30 #include <linux/types.h>
31 #include <linux/workqueue.h>
32 
33 /* Major number for the supported version of the firmware. */
34 #define PVR_FW_VERSION_MAJOR 1
35 
36 /**
37  * pvr_device_reg_init() - Initialize kernel access to a PowerVR device's
38  * control registers.
39  * @pvr_dev: Target PowerVR device.
40  *
41  * Sets struct pvr_device->regs.
42  *
43  * This method of mapping the device control registers into memory ensures that
44  * they are unmapped when the driver is detached (i.e. no explicit cleanup is
45  * required).
46  *
47  * Return:
48  *  * 0 on success, or
49  *  * Any error returned by devm_platform_ioremap_resource().
50  */
51 static int
52 pvr_device_reg_init(struct pvr_device *pvr_dev)
53 {
54 	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
55 	struct platform_device *plat_dev = to_platform_device(drm_dev->dev);
56 	struct resource *regs_resource;
57 	void __iomem *regs;
58 
59 	pvr_dev->regs_resource = NULL;
60 	pvr_dev->regs = NULL;
61 
62 	regs = devm_platform_get_and_ioremap_resource(plat_dev, 0, &regs_resource);
63 	if (IS_ERR(regs))
64 		return dev_err_probe(drm_dev->dev, PTR_ERR(regs),
65 				     "failed to ioremap gpu registers\n");
66 
67 	pvr_dev->regs = regs;
68 	pvr_dev->regs_resource = regs_resource;
69 
70 	return 0;
71 }
72 
73 /**
74  * pvr_device_clk_init() - Initialize clocks required by a PowerVR device
75  * @pvr_dev: Target PowerVR device.
76  *
77  * Sets struct pvr_device->core_clk, struct pvr_device->sys_clk and
78  * struct pvr_device->mem_clk.
79  *
80  * Three clocks are required by the PowerVR device: core, sys and mem. On
81  * return, this function guarantees that the clocks are in one of the following
82  * states:
83  *
84  *  * All successfully initialized,
85  *  * Core errored, sys and mem uninitialized,
86  *  * Core deinitialized, sys errored, mem uninitialized, or
87  *  * Core and sys deinitialized, mem errored.
88  *
89  * Return:
90  *  * 0 on success,
91  *  * Any error returned by devm_clk_get(), or
92  *  * Any error returned by devm_clk_get_optional().
93  */
94 static int pvr_device_clk_init(struct pvr_device *pvr_dev)
95 {
96 	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
97 	struct clk *core_clk;
98 	struct clk *sys_clk;
99 	struct clk *mem_clk;
100 
101 	core_clk = devm_clk_get(drm_dev->dev, "core");
102 	if (IS_ERR(core_clk))
103 		return dev_err_probe(drm_dev->dev, PTR_ERR(core_clk),
104 				     "failed to get core clock\n");
105 
106 	sys_clk = devm_clk_get_optional(drm_dev->dev, "sys");
107 	if (IS_ERR(sys_clk))
108 		return dev_err_probe(drm_dev->dev, PTR_ERR(sys_clk),
109 				     "failed to get sys clock\n");
110 
111 	mem_clk = devm_clk_get_optional(drm_dev->dev, "mem");
112 	if (IS_ERR(mem_clk))
113 		return dev_err_probe(drm_dev->dev, PTR_ERR(mem_clk),
114 				     "failed to get mem clock\n");
115 
116 	pvr_dev->core_clk = core_clk;
117 	pvr_dev->sys_clk = sys_clk;
118 	pvr_dev->mem_clk = mem_clk;
119 
120 	return 0;
121 }
122 
123 /**
124  * pvr_device_process_active_queues() - Process all queue related events.
125  * @pvr_dev: PowerVR device to check
126  *
127  * This is called any time we receive a FW event. It iterates over all
128  * active queues and calls pvr_queue_process() on them.
129  */
130 static void pvr_device_process_active_queues(struct pvr_device *pvr_dev)
131 {
132 	struct pvr_queue *queue, *tmp_queue;
133 	LIST_HEAD(active_queues);
134 
135 	mutex_lock(&pvr_dev->queues.lock);
136 
137 	/* Move all active queues to a temporary list. Queues that remain
138 	 * active after we're done processing them are re-inserted to
139 	 * the queues.active list by pvr_queue_process().
140 	 */
141 	list_splice_init(&pvr_dev->queues.active, &active_queues);
142 
143 	list_for_each_entry_safe(queue, tmp_queue, &active_queues, node)
144 		pvr_queue_process(queue);
145 
146 	mutex_unlock(&pvr_dev->queues.lock);
147 }
148 
149 static bool pvr_device_safety_irq_pending(struct pvr_device *pvr_dev)
150 {
151 	u32 events;
152 
153 	WARN_ON_ONCE(!pvr_dev->has_safety_events);
154 
155 	events = pvr_cr_read32(pvr_dev, ROGUE_CR_EVENT_STATUS);
156 
157 	return (events & ROGUE_CR_EVENT_STATUS_SAFETY_EN) != 0;
158 }
159 
160 static void pvr_device_safety_irq_clear(struct pvr_device *pvr_dev)
161 {
162 	WARN_ON_ONCE(!pvr_dev->has_safety_events);
163 
164 	pvr_cr_write32(pvr_dev, ROGUE_CR_EVENT_CLEAR,
165 		       ROGUE_CR_EVENT_CLEAR_SAFETY_EN);
166 }
167 
168 static void pvr_device_handle_safety_events(struct pvr_device *pvr_dev)
169 {
170 	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
171 	u32 events;
172 
173 	WARN_ON_ONCE(!pvr_dev->has_safety_events);
174 
175 	events = pvr_cr_read32(pvr_dev, ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE);
176 
177 	/* Handle only these events on the host and leave the rest to the FW. */
178 	events &= ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_FW_EN |
179 		ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__WATCHDOG_TIMEOUT_EN;
180 
181 	pvr_cr_write32(pvr_dev, ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE, events);
182 
183 	if (events & ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_FW_EN) {
184 		u32 fault_fw = pvr_cr_read32(pvr_dev, ROGUE_CR_FAULT_FW_STATUS);
185 
186 		pvr_cr_write32(pvr_dev, ROGUE_CR_FAULT_FW_CLEAR, fault_fw);
187 
188 		drm_info(drm_dev, "Safety event: FW fault (mask=0x%08x)\n", fault_fw);
189 	}
190 
191 	if (events & ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__WATCHDOG_TIMEOUT_EN) {
192 		/*
193 		 * The watchdog timer is disabled by the driver so this event
194 		 * should never be fired.
195 		 */
196 		drm_info(drm_dev, "Safety event: Watchdog timeout\n");
197 	}
198 }
199 
200 static irqreturn_t pvr_device_irq_thread_handler(int irq, void *data)
201 {
202 	struct pvr_device *pvr_dev = data;
203 	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
204 	irqreturn_t ret = IRQ_NONE;
205 
206 	/* We are in the threaded handler, we can keep dequeuing events until we
207 	 * don't see any. This should allow us to reduce the number of interrupts
208 	 * when the GPU is receiving a massive amount of short jobs.
209 	 */
210 	while (pvr_fw_irq_pending(pvr_dev)) {
211 		pvr_fw_irq_clear(pvr_dev);
212 
213 		if (pvr_dev->fw_dev.booted) {
214 			pvr_fwccb_process(pvr_dev);
215 			pvr_kccb_wake_up_waiters(pvr_dev);
216 			pvr_device_process_active_queues(pvr_dev);
217 		}
218 
219 		pm_runtime_mark_last_busy(drm_dev->dev);
220 
221 		ret = IRQ_HANDLED;
222 	}
223 
224 	if (pvr_dev->has_safety_events) {
225 		int err;
226 
227 		/*
228 		 * Ensure the GPU is powered on since some safety events (such
229 		 * as ECC faults) can happen outside of job submissions, which
230 		 * are otherwise the only time a power reference is held.
231 		 */
232 		err = pvr_power_get(pvr_dev);
233 		if (err) {
234 			drm_err_ratelimited(drm_dev,
235 					    "%s: could not take power reference (%d)\n",
236 					    __func__, err);
237 			return ret;
238 		}
239 
240 		while (pvr_device_safety_irq_pending(pvr_dev)) {
241 			pvr_device_safety_irq_clear(pvr_dev);
242 			pvr_device_handle_safety_events(pvr_dev);
243 
244 			ret = IRQ_HANDLED;
245 		}
246 
247 		pvr_power_put(pvr_dev);
248 	}
249 
250 	return ret;
251 }
252 
253 static irqreturn_t pvr_device_irq_handler(int irq, void *data)
254 {
255 	struct pvr_device *pvr_dev = data;
256 	bool safety_irq_pending = false;
257 
258 	if (pvr_dev->has_safety_events)
259 		safety_irq_pending = pvr_device_safety_irq_pending(pvr_dev);
260 
261 	if (!pvr_fw_irq_pending(pvr_dev) && !safety_irq_pending)
262 		return IRQ_NONE; /* Spurious IRQ - ignore. */
263 
264 	return IRQ_WAKE_THREAD;
265 }
266 
267 static void pvr_device_safety_irq_init(struct pvr_device *pvr_dev)
268 {
269 	u32 num_ecc_rams = 0;
270 
271 	/*
272 	 * Safety events are an optional feature of the RogueXE platform. They
273 	 * are only enabled if at least one of ECC memory or the watchdog timer
274 	 * are present in HW. While safety events can be generated by other
275 	 * systems, that will never happen if the above mentioned hardware is
276 	 * not present.
277 	 */
278 	if (!PVR_HAS_FEATURE(pvr_dev, roguexe)) {
279 		pvr_dev->has_safety_events = false;
280 		return;
281 	}
282 
283 	PVR_FEATURE_VALUE(pvr_dev, ecc_rams, &num_ecc_rams);
284 
285 	pvr_dev->has_safety_events =
286 		num_ecc_rams > 0 || PVR_HAS_FEATURE(pvr_dev, watchdog_timer);
287 }
288 
289 /**
290  * pvr_device_irq_init() - Initialise IRQ required by a PowerVR device
291  * @pvr_dev: Target PowerVR device.
292  *
293  * Returns:
294  *  * 0 on success,
295  *  * Any error returned by platform_get_irq_byname(), or
296  *  * Any error returned by request_irq().
297  */
298 static int
299 pvr_device_irq_init(struct pvr_device *pvr_dev)
300 {
301 	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
302 	struct platform_device *plat_dev = to_platform_device(drm_dev->dev);
303 
304 	init_waitqueue_head(&pvr_dev->kccb.rtn_q);
305 
306 	pvr_device_safety_irq_init(pvr_dev);
307 
308 	pvr_dev->irq = platform_get_irq(plat_dev, 0);
309 	if (pvr_dev->irq < 0)
310 		return pvr_dev->irq;
311 
312 	/* Clear any pending events before requesting the IRQ line. */
313 	pvr_fw_irq_clear(pvr_dev);
314 
315 	if (pvr_dev->has_safety_events)
316 		pvr_device_safety_irq_clear(pvr_dev);
317 
318 	/*
319 	 * The ONESHOT flag ensures IRQs are masked while the thread handler is
320 	 * running.
321 	 */
322 	return request_threaded_irq(pvr_dev->irq, pvr_device_irq_handler,
323 				    pvr_device_irq_thread_handler,
324 				    IRQF_SHARED | IRQF_ONESHOT, "gpu", pvr_dev);
325 }
326 
327 /**
328  * pvr_device_irq_fini() - Deinitialise IRQ required by a PowerVR device
329  * @pvr_dev: Target PowerVR device.
330  */
331 static void
332 pvr_device_irq_fini(struct pvr_device *pvr_dev)
333 {
334 	free_irq(pvr_dev->irq, pvr_dev);
335 }
336 
337 /**
338  * pvr_build_firmware_filename() - Construct a PowerVR firmware filename
339  * @pvr_dev: Target PowerVR device.
340  * @base: First part of the filename.
341  * @major: Major version number.
342  *
343  * A PowerVR firmware filename consists of three parts separated by underscores
344  * (``'_'``) along with a '.fw' file suffix. The first part is the exact value
345  * of @base, the second part is the hardware version string derived from @pvr_fw
346  * and the final part is the firmware version number constructed from @major with
347  * a 'v' prefix, e.g. powervr/rogue_4.40.2.51_v1.fw.
348  *
349  * The returned string will have been slab allocated and must be freed with
350  * kfree().
351  *
352  * Return:
353  *  * The constructed filename on success, or
354  *  * Any error returned by kasprintf().
355  */
356 static char *
357 pvr_build_firmware_filename(struct pvr_device *pvr_dev, const char *base,
358 			    u8 major)
359 {
360 	struct pvr_gpu_id *gpu_id = &pvr_dev->gpu_id;
361 
362 	return kasprintf(GFP_KERNEL, "%s_%d.%d.%d.%d_v%d.fw", base, gpu_id->b,
363 			 gpu_id->v, gpu_id->n, gpu_id->c, major);
364 }
365 
366 static void
367 pvr_release_firmware(void *data)
368 {
369 	struct pvr_device *pvr_dev = data;
370 
371 	release_firmware(pvr_dev->fw_dev.firmware);
372 }
373 
374 /**
375  * pvr_request_firmware() - Load firmware for a PowerVR device
376  * @pvr_dev: Target PowerVR device.
377  *
378  * See pvr_build_firmware_filename() for details on firmware file naming.
379  *
380  * Return:
381  *  * 0 on success,
382  *  * Any error returned by pvr_build_firmware_filename(), or
383  *  * Any error returned by request_firmware().
384  */
385 static int
386 pvr_request_firmware(struct pvr_device *pvr_dev)
387 {
388 	struct drm_device *drm_dev = &pvr_dev->base;
389 	char *filename;
390 	const struct firmware *fw;
391 	int err;
392 
393 	filename = pvr_build_firmware_filename(pvr_dev, "powervr/rogue",
394 					       PVR_FW_VERSION_MAJOR);
395 	if (!filename)
396 		return -ENOMEM;
397 
398 	/*
399 	 * This function takes a copy of &filename, meaning we can free our
400 	 * instance before returning.
401 	 */
402 	err = request_firmware(&fw, filename, pvr_dev->base.dev);
403 	if (err) {
404 		drm_err(drm_dev, "failed to load firmware %s (err=%d)\n",
405 			filename, err);
406 		goto err_free_filename;
407 	}
408 
409 	drm_info(drm_dev, "loaded firmware %s\n", filename);
410 	kfree(filename);
411 
412 	pvr_dev->fw_dev.firmware = fw;
413 
414 	return devm_add_action_or_reset(drm_dev->dev, pvr_release_firmware, pvr_dev);
415 
416 err_free_filename:
417 	kfree(filename);
418 
419 	return err;
420 }
421 
422 /**
423  * pvr_load_gpu_id() - Load a PowerVR device's GPU ID (BVNC) from control registers.
424  *
425  * Sets struct pvr_dev.gpu_id.
426  *
427  * @pvr_dev: Target PowerVR device.
428  */
429 static void
430 pvr_load_gpu_id(struct pvr_device *pvr_dev)
431 {
432 	struct pvr_gpu_id *gpu_id = &pvr_dev->gpu_id;
433 	u64 bvnc;
434 
435 	/*
436 	 * Try reading the BVNC using the newer (cleaner) method first. If the
437 	 * B value is zero, fall back to the older method.
438 	 */
439 	bvnc = pvr_cr_read64(pvr_dev, ROGUE_CR_CORE_ID__PBVNC);
440 
441 	gpu_id->b = PVR_CR_FIELD_GET(bvnc, CORE_ID__PBVNC__BRANCH_ID);
442 	if (gpu_id->b != 0) {
443 		gpu_id->v = PVR_CR_FIELD_GET(bvnc, CORE_ID__PBVNC__VERSION_ID);
444 		gpu_id->n = PVR_CR_FIELD_GET(bvnc, CORE_ID__PBVNC__NUMBER_OF_SCALABLE_UNITS);
445 		gpu_id->c = PVR_CR_FIELD_GET(bvnc, CORE_ID__PBVNC__CONFIG_ID);
446 	} else {
447 		u32 core_rev = pvr_cr_read32(pvr_dev, ROGUE_CR_CORE_REVISION);
448 		u32 core_id = pvr_cr_read32(pvr_dev, ROGUE_CR_CORE_ID);
449 		u16 core_id_config = PVR_CR_FIELD_GET(core_id, CORE_ID_CONFIG);
450 
451 		gpu_id->b = PVR_CR_FIELD_GET(core_rev, CORE_REVISION_MAJOR);
452 		gpu_id->v = PVR_CR_FIELD_GET(core_rev, CORE_REVISION_MINOR);
453 		gpu_id->n = FIELD_GET(0xFF00, core_id_config);
454 		gpu_id->c = FIELD_GET(0x00FF, core_id_config);
455 	}
456 }
457 
458 /**
459  * pvr_set_dma_info() - Set PowerVR device DMA information
460  * @pvr_dev: Target PowerVR device.
461  *
462  * Sets the DMA mask and max segment size for the PowerVR device.
463  *
464  * Return:
465  *  * 0 on success,
466  *  * Any error returned by PVR_FEATURE_VALUE(), or
467  *  * Any error returned by dma_set_mask().
468  */
469 
470 static int
471 pvr_set_dma_info(struct pvr_device *pvr_dev)
472 {
473 	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
474 	u16 phys_bus_width;
475 	int err;
476 
477 	err = PVR_FEATURE_VALUE(pvr_dev, phys_bus_width, &phys_bus_width);
478 	if (err) {
479 		drm_err(drm_dev, "Failed to get device physical bus width\n");
480 		return err;
481 	}
482 
483 	err = dma_set_mask(drm_dev->dev, DMA_BIT_MASK(phys_bus_width));
484 	if (err) {
485 		drm_err(drm_dev, "Failed to set DMA mask (err=%d)\n", err);
486 		return err;
487 	}
488 
489 	dma_set_max_seg_size(drm_dev->dev, UINT_MAX);
490 
491 	return 0;
492 }
493 
494 /**
495  * pvr_device_gpu_init() - GPU-specific initialization for a PowerVR device
496  * @pvr_dev: Target PowerVR device.
497  *
498  * The following steps are taken to ensure the device is ready:
499  *
500  *  1. Read the hardware version information from control registers,
501  *  2. Initialise the hardware feature information,
502  *  3. Setup the device DMA information,
503  *  4. Setup the device-scoped memory context, and
504  *  5. Load firmware into the device.
505  *
506  * Return:
507  *  * 0 on success,
508  *  * -%ENODEV if the GPU is not supported,
509  *  * Any error returned by pvr_set_dma_info(),
510  *  * Any error returned by pvr_memory_context_init(), or
511  *  * Any error returned by pvr_request_firmware().
512  */
513 static int
514 pvr_device_gpu_init(struct pvr_device *pvr_dev)
515 {
516 	int err;
517 
518 	pvr_load_gpu_id(pvr_dev);
519 
520 	err = pvr_request_firmware(pvr_dev);
521 	if (err)
522 		return err;
523 
524 	err = pvr_fw_validate_init_device_info(pvr_dev);
525 	if (err)
526 		return err;
527 
528 	if (PVR_HAS_FEATURE(pvr_dev, meta))
529 		pvr_dev->fw_dev.processor_type = PVR_FW_PROCESSOR_TYPE_META;
530 	else if (PVR_HAS_FEATURE(pvr_dev, mips))
531 		pvr_dev->fw_dev.processor_type = PVR_FW_PROCESSOR_TYPE_MIPS;
532 	else if (PVR_HAS_FEATURE(pvr_dev, riscv_fw_processor))
533 		pvr_dev->fw_dev.processor_type = PVR_FW_PROCESSOR_TYPE_RISCV;
534 	else
535 		return -EINVAL;
536 
537 	pvr_stream_create_musthave_masks(pvr_dev);
538 
539 	err = pvr_set_dma_info(pvr_dev);
540 	if (err)
541 		return err;
542 
543 	if (pvr_dev->fw_dev.processor_type != PVR_FW_PROCESSOR_TYPE_MIPS) {
544 		pvr_dev->kernel_vm_ctx = pvr_vm_create_context(pvr_dev, false);
545 		if (IS_ERR(pvr_dev->kernel_vm_ctx))
546 			return PTR_ERR(pvr_dev->kernel_vm_ctx);
547 	}
548 
549 	err = pvr_fw_init(pvr_dev);
550 	if (err)
551 		goto err_vm_ctx_put;
552 
553 	return 0;
554 
555 err_vm_ctx_put:
556 	if (pvr_dev->fw_dev.processor_type != PVR_FW_PROCESSOR_TYPE_MIPS) {
557 		pvr_vm_context_put(pvr_dev->kernel_vm_ctx);
558 		pvr_dev->kernel_vm_ctx = NULL;
559 	}
560 
561 	return err;
562 }
563 
564 /**
565  * pvr_device_gpu_fini() - GPU-specific deinitialization for a PowerVR device
566  * @pvr_dev: Target PowerVR device.
567  */
568 static void
569 pvr_device_gpu_fini(struct pvr_device *pvr_dev)
570 {
571 	pvr_fw_fini(pvr_dev);
572 
573 	if (pvr_dev->fw_dev.processor_type != PVR_FW_PROCESSOR_TYPE_MIPS) {
574 		WARN_ON(!pvr_vm_context_put(pvr_dev->kernel_vm_ctx));
575 		pvr_dev->kernel_vm_ctx = NULL;
576 	}
577 }
578 
579 /**
580  * pvr_device_init() - Initialize a PowerVR device
581  * @pvr_dev: Target PowerVR device.
582  *
583  * If this function returns successfully, the device will have been fully
584  * initialized. Otherwise, any parts of the device initialized before an error
585  * occurs will be de-initialized before returning.
586  *
587  * NOTE: The initialization steps currently taken are the bare minimum required
588  *       to read from the control registers. The device is unlikely to function
589  *       until further initialization steps are added. [This note should be
590  *       removed when that happens.]
591  *
592  * Return:
593  *  * 0 on success,
594  *  * Any error returned by pvr_device_reg_init(),
595  *  * Any error returned by pvr_device_clk_init(), or
596  *  * Any error returned by pvr_device_gpu_init().
597  */
598 int
599 pvr_device_init(struct pvr_device *pvr_dev)
600 {
601 	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
602 	struct device *dev = drm_dev->dev;
603 	int err;
604 
605 	/*
606 	 * Setup device parameters. We do this first in case other steps
607 	 * depend on them.
608 	 */
609 	err = pvr_device_params_init(&pvr_dev->params);
610 	if (err)
611 		return err;
612 
613 	/* Enable and initialize clocks required for the device to operate. */
614 	err = pvr_device_clk_init(pvr_dev);
615 	if (err)
616 		return err;
617 
618 	/* Explicitly power the GPU so we can access control registers before the FW is booted. */
619 	err = pm_runtime_resume_and_get(dev);
620 	if (err)
621 		return err;
622 
623 	/* Map the control registers into memory. */
624 	err = pvr_device_reg_init(pvr_dev);
625 	if (err)
626 		goto err_pm_runtime_put;
627 
628 	/* Perform GPU-specific initialization steps. */
629 	err = pvr_device_gpu_init(pvr_dev);
630 	if (err)
631 		goto err_pm_runtime_put;
632 
633 	err = pvr_device_irq_init(pvr_dev);
634 	if (err)
635 		goto err_device_gpu_fini;
636 
637 	pm_runtime_put(dev);
638 
639 	return 0;
640 
641 err_device_gpu_fini:
642 	pvr_device_gpu_fini(pvr_dev);
643 
644 err_pm_runtime_put:
645 	pm_runtime_put_sync_suspend(dev);
646 
647 	return err;
648 }
649 
650 /**
651  * pvr_device_fini() - Deinitialize a PowerVR device
652  * @pvr_dev: Target PowerVR device.
653  */
654 void
655 pvr_device_fini(struct pvr_device *pvr_dev)
656 {
657 	/*
658 	 * Deinitialization stages are performed in reverse order compared to
659 	 * the initialization stages in pvr_device_init().
660 	 */
661 	pvr_device_irq_fini(pvr_dev);
662 	pvr_device_gpu_fini(pvr_dev);
663 }
664 
665 bool
666 pvr_device_has_uapi_quirk(struct pvr_device *pvr_dev, u32 quirk)
667 {
668 	switch (quirk) {
669 	case 47217:
670 		return PVR_HAS_QUIRK(pvr_dev, 47217);
671 	case 48545:
672 		return PVR_HAS_QUIRK(pvr_dev, 48545);
673 	case 49927:
674 		return PVR_HAS_QUIRK(pvr_dev, 49927);
675 	case 51764:
676 		return PVR_HAS_QUIRK(pvr_dev, 51764);
677 	case 62269:
678 		return PVR_HAS_QUIRK(pvr_dev, 62269);
679 	default:
680 		return false;
681 	};
682 }
683 
684 bool
685 pvr_device_has_uapi_enhancement(struct pvr_device *pvr_dev, u32 enhancement)
686 {
687 	switch (enhancement) {
688 	case 35421:
689 		return PVR_HAS_ENHANCEMENT(pvr_dev, 35421);
690 	case 42064:
691 		return PVR_HAS_ENHANCEMENT(pvr_dev, 42064);
692 	default:
693 		return false;
694 	};
695 }
696 
697 /**
698  * pvr_device_has_feature() - Look up device feature based on feature definition
699  * @pvr_dev: Device pointer.
700  * @feature: Feature to look up. Should be one of %PVR_FEATURE_*.
701  *
702  * Returns:
703  *  * %true if feature is present on device, or
704  *  * %false if feature is not present on device.
705  */
706 bool
707 pvr_device_has_feature(struct pvr_device *pvr_dev, u32 feature)
708 {
709 	switch (feature) {
710 	case PVR_FEATURE_CLUSTER_GROUPING:
711 		return PVR_HAS_FEATURE(pvr_dev, cluster_grouping);
712 
713 	case PVR_FEATURE_COMPUTE_MORTON_CAPABLE:
714 		return PVR_HAS_FEATURE(pvr_dev, compute_morton_capable);
715 
716 	case PVR_FEATURE_FB_CDC_V4:
717 		return PVR_HAS_FEATURE(pvr_dev, fb_cdc_v4);
718 
719 	case PVR_FEATURE_GPU_MULTICORE_SUPPORT:
720 		return PVR_HAS_FEATURE(pvr_dev, gpu_multicore_support);
721 
722 	case PVR_FEATURE_ISP_ZLS_D24_S8_PACKING_OGL_MODE:
723 		return PVR_HAS_FEATURE(pvr_dev, isp_zls_d24_s8_packing_ogl_mode);
724 
725 	case PVR_FEATURE_S7_TOP_INFRASTRUCTURE:
726 		return PVR_HAS_FEATURE(pvr_dev, s7_top_infrastructure);
727 
728 	case PVR_FEATURE_TESSELLATION:
729 		return PVR_HAS_FEATURE(pvr_dev, tessellation);
730 
731 	case PVR_FEATURE_TPU_DM_GLOBAL_REGISTERS:
732 		return PVR_HAS_FEATURE(pvr_dev, tpu_dm_global_registers);
733 
734 	case PVR_FEATURE_VDM_DRAWINDIRECT:
735 		return PVR_HAS_FEATURE(pvr_dev, vdm_drawindirect);
736 
737 	case PVR_FEATURE_VDM_OBJECT_LEVEL_LLS:
738 		return PVR_HAS_FEATURE(pvr_dev, vdm_object_level_lls);
739 
740 	case PVR_FEATURE_ZLS_SUBTILE:
741 		return PVR_HAS_FEATURE(pvr_dev, zls_subtile);
742 
743 	/* Derived features. */
744 	case PVR_FEATURE_CDM_USER_MODE_QUEUE: {
745 		u8 cdm_control_stream_format = 0;
746 
747 		PVR_FEATURE_VALUE(pvr_dev, cdm_control_stream_format, &cdm_control_stream_format);
748 		return (cdm_control_stream_format >= 2 && cdm_control_stream_format <= 4);
749 	}
750 
751 	case PVR_FEATURE_REQUIRES_FB_CDC_ZLS_SETUP:
752 		if (PVR_HAS_FEATURE(pvr_dev, fbcdc_algorithm)) {
753 			u8 fbcdc_algorithm = 0;
754 
755 			PVR_FEATURE_VALUE(pvr_dev, fbcdc_algorithm, &fbcdc_algorithm);
756 			return (fbcdc_algorithm < 3 || PVR_HAS_FEATURE(pvr_dev, fb_cdc_v4));
757 		}
758 		return false;
759 
760 	default:
761 		WARN(true, "Looking up undefined feature %u\n", feature);
762 		return false;
763 	}
764 }
765